1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::{HashMap, hash_map};
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21
22use crate::arch::core_local::*;
23#[cfg(target_arch = "riscv64")]
24use crate::arch::switch::switch_to_task;
25#[cfg(target_arch = "x86_64")]
26use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
27use crate::arch::{get_processor_count, interrupts};
28use crate::errno::Errno;
29use crate::fd::{ObjectInterface, RawFd};
30use crate::kernel::scheduler::TaskStacks;
31use crate::scheduler::task::*;
32use crate::{arch, io};
33
34pub mod task;
35
36static NO_TASKS: AtomicU32 = AtomicU32::new(0);
37#[cfg(feature = "smp")]
39static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
40 SpinMutex::new(Vec::new());
41#[cfg(all(target_arch = "x86_64", feature = "smp"))]
42static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
43static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
45 InterruptTicketMutex::new(BTreeMap::new());
46static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
48 InterruptTicketMutex::new(BTreeMap::new());
49
50pub type CoreId = u32;
52
53#[cfg(feature = "smp")]
54pub(crate) struct SchedulerInput {
55 new_tasks: VecDeque<NewTask>,
57 wakeup_tasks: VecDeque<TaskHandle>,
59}
60
61#[cfg(feature = "smp")]
62impl SchedulerInput {
63 pub fn new() -> Self {
64 Self {
65 new_tasks: VecDeque::new(),
66 wakeup_tasks: VecDeque::new(),
67 }
68 }
69}
70
71#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
72#[cfg_attr(
73 not(any(target_arch = "x86_64", target_arch = "aarch64")),
74 repr(align(64))
75)]
76pub(crate) struct PerCoreScheduler {
77 #[cfg(feature = "smp")]
79 core_id: CoreId,
80 current_task: Rc<RefCell<Task>>,
82 idle_task: Rc<RefCell<Task>>,
84 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
86 fpu_owner: Rc<RefCell<Task>>,
87 ready_queue: PriorityTaskQueue,
89 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
91 blocked_tasks: BlockedTaskQueue,
93}
94
95pub(crate) trait PerCoreSchedulerExt {
96 fn reschedule(self);
99
100 #[cfg(feature = "net")]
101 fn add_network_timer(self, wakeup_time: Option<u64>);
102
103 fn exit(self, exit_code: i32) -> !;
105}
106
107impl PerCoreSchedulerExt for &mut PerCoreScheduler {
108 #[cfg(target_arch = "x86_64")]
109 fn reschedule(self) {
110 without_interrupts(|| {
111 if let Some(last_stack_pointer) = self.scheduler() {
112 let (new_stack_pointer, is_idle) = {
113 let borrowed = self.current_task.borrow();
114 (
115 borrowed.last_stack_pointer,
116 borrowed.status == TaskStatus::Idle,
117 )
118 };
119
120 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
121 unsafe {
122 switch_to_fpu_owner(
123 last_stack_pointer,
124 new_stack_pointer.as_u64() as usize,
125 );
126 }
127 } else {
128 unsafe {
129 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
130 }
131 }
132 }
133 });
134 }
135
136 #[cfg(target_arch = "aarch64")]
138 fn reschedule(self) {
139 use aarch64_cpu::asm::barrier::{NSH, SY, dsb, isb};
140 use arm_gic::IntId;
141 use arm_gic::gicv3::{GicV3, SgiTarget, SgiTargetGroup};
142
143 use crate::interrupts::SGI_RESCHED;
144
145 dsb(NSH);
146 isb(SY);
147
148 let reschedid = IntId::sgi(SGI_RESCHED.into());
149 #[cfg(feature = "smp")]
150 let core_id = self.core_id;
151 #[cfg(not(feature = "smp"))]
152 let core_id = 0;
153
154 GicV3::send_sgi(
155 reschedid,
156 SgiTarget::List {
157 affinity3: 0,
158 affinity2: 0,
159 affinity1: 0,
160 target_list: 1 << core_id,
161 },
162 SgiTargetGroup::CurrentGroup1,
163 );
164
165 interrupts::enable();
166 }
167
168 #[cfg(target_arch = "riscv64")]
169 fn reschedule(self) {
170 without_interrupts(|| self.scheduler());
171 }
172
173 #[cfg(feature = "net")]
174 fn add_network_timer(self, wakeup_time: Option<u64>) {
175 without_interrupts(|| {
176 self.blocked_tasks.add_network_timer(wakeup_time);
177 });
178 }
179
180 fn exit(self, exit_code: i32) -> ! {
181 without_interrupts(|| {
182 let mut current_task_borrowed = self.current_task.borrow_mut();
184 assert_ne!(
185 current_task_borrowed.status,
186 TaskStatus::Idle,
187 "Trying to terminate the idle task"
188 );
189
190 debug!(
192 "Finishing task {} with exit code {}",
193 current_task_borrowed.id, exit_code
194 );
195 current_task_borrowed.status = TaskStatus::Finished;
196 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
197
198 let current_id = current_task_borrowed.id;
199 drop(current_task_borrowed);
200
201 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
203 while let Some(task) = queue.pop_front() {
204 self.custom_wakeup(task);
205 }
206 }
207 });
208
209 self.reschedule();
210 unreachable!()
211 }
212}
213
214struct NewTask {
215 tid: TaskId,
216 func: unsafe extern "C" fn(usize),
217 arg: usize,
218 prio: Priority,
219 core_id: CoreId,
220 stacks: TaskStacks,
221 object_map:
222 Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>>>,
223}
224
225impl From<NewTask> for Task {
226 fn from(value: NewTask) -> Self {
227 let NewTask {
228 tid,
229 func,
230 arg,
231 prio,
232 core_id,
233 stacks,
234 object_map,
235 } = value;
236 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
237 task.create_stack_frame(func, arg);
238 task
239 }
240}
241
242impl PerCoreScheduler {
243 pub unsafe fn spawn(
245 func: unsafe extern "C" fn(usize),
246 arg: usize,
247 prio: Priority,
248 core_id: CoreId,
249 stack_size: usize,
250 ) -> TaskId {
251 let tid = get_tid();
253 let stacks = TaskStacks::new(stack_size);
254 let new_task = NewTask {
255 tid,
256 func,
257 arg,
258 prio,
259 core_id,
260 stacks,
261 object_map: core_scheduler().get_current_task_object_map(),
262 };
263
264 let wakeup = {
266 #[cfg(feature = "smp")]
267 let mut input_locked = get_scheduler_input(core_id).lock();
268 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
269 TASKS.lock().insert(
270 tid,
271 TaskHandle::new(
272 tid,
273 prio,
274 #[cfg(feature = "smp")]
275 core_id,
276 ),
277 );
278 NO_TASKS.fetch_add(1, Ordering::SeqCst);
279
280 #[cfg(feature = "smp")]
281 if core_id == core_scheduler().core_id {
282 let task = Rc::new(RefCell::new(Task::from(new_task)));
283 core_scheduler().ready_queue.push(task);
284 false
285 } else {
286 input_locked.new_tasks.push_back(new_task);
287 true
288 }
289 #[cfg(not(feature = "smp"))]
290 if core_id == 0 {
291 let task = Rc::new(RefCell::new(Task::from(new_task)));
292 core_scheduler().ready_queue.push(task);
293 false
294 } else {
295 panic!("Invalid core_id {}!", core_id)
296 }
297 };
298
299 debug!("Creating task {tid} with priority {prio} on core {core_id}");
300
301 if wakeup {
302 arch::wakeup_core(core_id);
303 }
304
305 tid
306 }
307
308 #[cfg(feature = "newlib")]
309 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
310 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
311
312 let core_id: CoreId = {
314 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
316
317 if id == arch::get_processor_count() {
319 NEXT_CORE_ID.store(0, Ordering::SeqCst);
320 0
321 } else {
322 id
323 }
324 };
325
326 let current_task_borrowed = self.current_task.borrow();
328
329 let tid = get_tid();
331 let clone_task = NewTask {
332 tid,
333 func,
334 arg,
335 prio: current_task_borrowed.prio,
336 core_id,
337 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
338 object_map: current_task_borrowed.object_map.clone(),
339 };
340
341 let wakeup = {
343 #[cfg(feature = "smp")]
344 let mut input_locked = get_scheduler_input(core_id).lock();
345 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
346 TASKS.lock().insert(
347 tid,
348 TaskHandle::new(
349 tid,
350 current_task_borrowed.prio,
351 #[cfg(feature = "smp")]
352 core_id,
353 ),
354 );
355 NO_TASKS.fetch_add(1, Ordering::SeqCst);
356 #[cfg(feature = "smp")]
357 if core_id == core_scheduler().core_id {
358 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
359 core_scheduler().ready_queue.push(clone_task);
360 false
361 } else {
362 input_locked.new_tasks.push_back(clone_task);
363 true
364 }
365 #[cfg(not(feature = "smp"))]
366 if core_id == 0 {
367 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
368 core_scheduler().ready_queue.push(clone_task);
369 false
370 } else {
371 panic!("Invalid core_id {}!", core_id);
372 }
373 };
374
375 if wakeup {
377 arch::wakeup_core(core_id);
378 }
379
380 tid
381 }
382
383 #[cfg(feature = "newlib")]
384 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
385 without_interrupts(|| self.clone_impl(func, arg))
386 }
387
388 #[inline]
390 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
391 pub fn is_scheduling(&self) -> bool {
392 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
393 }
394
395 #[inline]
396 pub fn handle_waiting_tasks(&mut self) {
397 without_interrupts(|| {
398 crate::executor::run();
399 self.blocked_tasks
400 .handle_waiting_tasks(&mut self.ready_queue);
401 });
402 }
403
404 #[cfg(not(feature = "smp"))]
405 pub fn custom_wakeup(&mut self, task: TaskHandle) {
406 without_interrupts(|| {
407 let task = self.blocked_tasks.custom_wakeup(task);
408 self.ready_queue.push(task);
409 });
410 }
411
412 #[cfg(feature = "smp")]
413 pub fn custom_wakeup(&mut self, task: TaskHandle) {
414 if task.get_core_id() == self.core_id {
415 without_interrupts(|| {
416 let task = self.blocked_tasks.custom_wakeup(task);
417 self.ready_queue.push(task);
418 });
419 } else {
420 get_scheduler_input(task.get_core_id())
421 .lock()
422 .wakeup_tasks
423 .push_back(task);
424 arch::wakeup_core(task.get_core_id());
426 }
427 }
428
429 #[inline]
430 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
431 without_interrupts(|| {
432 self.blocked_tasks
433 .add(self.current_task.clone(), wakeup_time);
434 });
435 }
436
437 #[inline]
438 pub fn get_current_task_handle(&self) -> TaskHandle {
439 without_interrupts(|| {
440 let current_task_borrowed = self.current_task.borrow();
441
442 TaskHandle::new(
443 current_task_borrowed.id,
444 current_task_borrowed.prio,
445 #[cfg(feature = "smp")]
446 current_task_borrowed.core_id,
447 )
448 })
449 }
450
451 #[inline]
452 pub fn get_current_task_id(&self) -> TaskId {
453 without_interrupts(|| self.current_task.borrow().id)
454 }
455
456 #[inline]
457 pub fn get_current_task_object_map(
458 &self,
459 ) -> Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>>>
460 {
461 without_interrupts(|| self.current_task.borrow().object_map.clone())
462 }
463
464 #[inline]
467 pub fn get_object(
468 &self,
469 fd: RawFd,
470 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
471 without_interrupts(|| {
472 let current_task = self.current_task.borrow();
473 let object_map = current_task.object_map.read();
474 object_map.get(&fd).cloned().ok_or(Errno::Badf)
475 })
476 }
477
478 #[cfg(feature = "common-os")]
481 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
482 pub fn recreate_objmap(&self) -> io::Result<()> {
483 let mut map = HashMap::<
484 RawFd,
485 Arc<async_lock::RwLock<dyn ObjectInterface>>,
486 RandomState,
487 >::with_hasher(RandomState::with_seeds(0, 0, 0, 0));
488
489 without_interrupts(|| {
490 let mut current_task = self.current_task.borrow_mut();
491 let object_map = current_task.object_map.read();
492
493 for i in 0..3 {
495 if let Some(obj) = object_map.get(&i) {
496 map.insert(i, obj.clone());
497 }
498 }
499
500 drop(object_map);
501 current_task.object_map = Arc::new(RwSpinLock::new(map));
502 });
503
504 Ok(())
505 }
506
507 pub fn insert_object(
510 &self,
511 obj: Arc<async_lock::RwLock<dyn ObjectInterface>>,
512 ) -> io::Result<RawFd> {
513 without_interrupts(|| {
514 let current_task = self.current_task.borrow();
515 let mut object_map = current_task.object_map.write();
516
517 let new_fd = || -> io::Result<RawFd> {
518 let mut fd: RawFd = 0;
519 loop {
520 if !object_map.contains_key(&fd) {
521 break Ok(fd);
522 } else if fd == RawFd::MAX {
523 break Err(Errno::Overflow);
524 }
525
526 fd = fd.saturating_add(1);
527 }
528 };
529
530 let fd = new_fd()?;
531 let _ = object_map.insert(fd, obj.clone());
532 Ok(fd)
533 })
534 }
535
536 pub fn dup_object(&self, fd: RawFd) -> io::Result<RawFd> {
539 without_interrupts(|| {
540 let current_task = self.current_task.borrow();
541 let mut object_map = current_task.object_map.write();
542
543 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
544
545 let new_fd = || -> io::Result<RawFd> {
546 let mut fd: RawFd = 0;
547 loop {
548 if !object_map.contains_key(&fd) {
549 break Ok(fd);
550 } else if fd == RawFd::MAX {
551 break Err(Errno::Overflow);
552 }
553
554 fd = fd.saturating_add(1);
555 }
556 };
557
558 let fd = new_fd()?;
559 match object_map.entry(fd) {
560 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
561 hash_map::Entry::Vacant(vacant_entry) => {
562 vacant_entry.insert(obj);
563 Ok(fd)
564 }
565 }
566 })
567 }
568
569 pub fn dup_object2(&self, fd1: RawFd, fd2: RawFd) -> io::Result<RawFd> {
570 without_interrupts(|| {
571 let current_task = self.current_task.borrow();
572 let mut object_map = current_task.object_map.write();
573
574 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
575
576 match object_map.entry(fd2) {
577 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
578 hash_map::Entry::Vacant(vacant_entry) => {
579 vacant_entry.insert(obj);
580 Ok(fd2)
581 }
582 }
583 })
584 }
585
586 pub fn remove_object(
588 &self,
589 fd: RawFd,
590 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
591 without_interrupts(|| {
592 let current_task = self.current_task.borrow();
593 let mut object_map = current_task.object_map.write();
594
595 object_map.remove(&fd).ok_or(Errno::Badf)
596 })
597 }
598
599 #[inline]
600 pub fn get_current_task_prio(&self) -> Priority {
601 without_interrupts(|| self.current_task.borrow().prio)
602 }
603
604 #[allow(dead_code)]
606 #[inline]
607 pub fn get_priority_bitmap(&self) -> &u64 {
608 self.ready_queue.get_priority_bitmap()
609 }
610
611 #[cfg(target_arch = "x86_64")]
612 pub fn set_current_kernel_stack(&self) {
613 let current_task_borrowed = self.current_task.borrow();
614 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
615
616 let rsp = current_task_borrowed.stacks.get_kernel_stack()
617 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
618 - TaskStacks::MARKER_SIZE as u64;
619 tss.privilege_stack_table[0] = rsp.into();
620 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
621 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
622 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
623 - TaskStacks::MARKER_SIZE as u64;
624 tss.interrupt_stack_table[0] = ist_start.into();
625 }
626
627 pub fn set_current_task_priority(&mut self, prio: Priority) {
628 without_interrupts(|| {
629 trace!("Change priority of the current task");
630 self.current_task.borrow_mut().prio = prio;
631 });
632 }
633
634 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
635 trace!("Change priority of task {id} to priority {prio}");
636
637 without_interrupts(|| {
638 let task = get_task_handle(id).ok_or(())?;
639 #[cfg(feature = "smp")]
640 let other_core = task.get_core_id() != self.core_id;
641 #[cfg(not(feature = "smp"))]
642 let other_core = false;
643
644 if other_core {
645 warn!("Have to change the priority on another core");
646 } else if self.current_task.borrow().id == task.get_id() {
647 self.current_task.borrow_mut().prio = prio;
648 } else {
649 self.ready_queue
650 .set_priority(task, prio)
651 .expect("Do not find valid task in ready queue");
652 }
653
654 Ok(())
655 })
656 }
657
658 #[cfg(target_arch = "riscv64")]
659 pub fn set_current_kernel_stack(&self) {
660 let current_task_borrowed = self.current_task.borrow();
661
662 let stack = (current_task_borrowed.stacks.get_kernel_stack()
663 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
664 - TaskStacks::MARKER_SIZE as u64)
665 .as_u64();
666 CoreLocal::get().kernel_stack.set(stack);
667 }
668
669 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
672 pub fn fpu_switch(&mut self) {
673 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
674 debug!(
675 "Switching FPU owner from task {} to {}",
676 self.fpu_owner.borrow().id,
677 self.current_task.borrow().id
678 );
679
680 self.fpu_owner.borrow_mut().last_fpu_state.save();
681 self.current_task.borrow().last_fpu_state.restore();
682 self.fpu_owner = self.current_task.clone();
683 }
684 }
685
686 fn cleanup_tasks(&mut self) {
688 while let Some(finished_task) = self.finished_tasks.pop_front() {
690 debug!("Cleaning up task {}", finished_task.borrow().id);
691 }
692 }
693
694 #[cfg(feature = "smp")]
695 pub fn check_input(&mut self) {
696 let mut input_locked = CoreLocal::get().scheduler_input.lock();
697
698 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
699 let task = self.blocked_tasks.custom_wakeup(task);
700 self.ready_queue.push(task);
701 }
702
703 while let Some(new_task) = input_locked.new_tasks.pop_front() {
704 let task = Rc::new(RefCell::new(Task::from(new_task)));
705 self.ready_queue.push(task.clone());
706 }
707 }
708
709 pub fn run() -> ! {
713 let backoff = Backoff::new();
714
715 loop {
716 let core_scheduler = core_scheduler();
717 interrupts::disable();
718
719 crate::executor::run();
721
722 #[cfg(feature = "smp")]
724 core_scheduler.check_input();
725 core_scheduler.cleanup_tasks();
726
727 if core_scheduler.ready_queue.is_empty() {
728 if backoff.is_completed() {
729 interrupts::enable_and_wait();
730 backoff.reset();
731 } else {
732 interrupts::enable();
733 backoff.snooze();
734 }
735 } else {
736 interrupts::enable();
737 core_scheduler.reschedule();
738 backoff.reset();
739 }
740 }
741 }
742
743 #[inline]
744 #[cfg(target_arch = "aarch64")]
745 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
746 self.current_task.borrow().last_stack_pointer
747 }
748
749 pub fn scheduler(&mut self) -> Option<*mut usize> {
752 crate::executor::run();
754
755 self.cleanup_tasks();
758
759 let (id, last_stack_pointer, prio, status) = {
761 let mut borrowed = self.current_task.borrow_mut();
762 (
763 borrowed.id,
764 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
765 borrowed.prio,
766 borrowed.status,
767 )
768 };
769
770 let mut new_task = None;
771
772 if status == TaskStatus::Running {
773 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
776 new_task = Some(task);
777 }
778 } else {
779 if status == TaskStatus::Finished {
780 self.current_task.borrow_mut().status = TaskStatus::Invalid;
782 self.finished_tasks.push_back(self.current_task.clone());
783 }
784
785 if let Some(task) = self.ready_queue.pop() {
788 debug!("Task is available.");
790 new_task = Some(task);
791 } else if status != TaskStatus::Idle {
792 debug!("Only Idle Task is available.");
794 new_task = Some(self.idle_task.clone());
795 }
796 }
797
798 if let Some(task) = new_task {
799 if status == TaskStatus::Running {
803 self.current_task.borrow_mut().status = TaskStatus::Ready;
805 self.ready_queue.push(self.current_task.clone());
806 }
807
808 let (new_id, new_stack_pointer) = {
810 let mut borrowed = task.borrow_mut();
811 if borrowed.status != TaskStatus::Idle {
812 borrowed.status = TaskStatus::Running;
814 }
815
816 (borrowed.id, borrowed.last_stack_pointer)
817 };
818
819 if id != new_id {
820 debug!(
822 "Switching task from {} to {} (stack {:#X} => {:p})",
823 id,
824 new_id,
825 unsafe { *last_stack_pointer },
826 new_stack_pointer
827 );
828 #[cfg(not(target_arch = "riscv64"))]
829 {
830 self.current_task = task;
831 }
832
833 #[cfg(not(target_arch = "riscv64"))]
835 return Some(last_stack_pointer);
836
837 #[cfg(target_arch = "riscv64")]
838 {
839 if sstatus::read().fs() == sstatus::FS::Dirty {
840 self.current_task.borrow_mut().last_fpu_state.save();
841 }
842 task.borrow().last_fpu_state.restore();
843 self.current_task = task;
844 unsafe {
845 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
846 }
847 }
848 }
849 }
850
851 None
852 }
853}
854
855fn get_tid() -> TaskId {
856 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
857 let guard = TASKS.lock();
858
859 loop {
860 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
861 if !guard.contains_key(&id) {
862 return id;
863 }
864 }
865}
866
867#[inline]
868pub(crate) fn abort() -> ! {
869 core_scheduler().exit(-1)
870}
871
872pub(crate) fn add_current_core() {
874 let core_id = core_id();
876 let tid = get_tid();
877 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
878
879 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
881 TASKS.lock().insert(
882 tid,
883 TaskHandle::new(
884 tid,
885 IDLE_PRIO,
886 #[cfg(feature = "smp")]
887 core_id,
888 ),
889 );
890 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
892 let boxed_scheduler = Box::new(PerCoreScheduler {
893 #[cfg(feature = "smp")]
894 core_id,
895 current_task: idle_task.clone(),
896 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
897 fpu_owner: idle_task.clone(),
898 idle_task,
899 ready_queue: PriorityTaskQueue::new(),
900 finished_tasks: VecDeque::new(),
901 blocked_tasks: BlockedTaskQueue::new(),
902 });
903
904 let scheduler = Box::into_raw(boxed_scheduler);
905 set_core_scheduler(scheduler);
906 #[cfg(feature = "smp")]
907 {
908 SCHEDULER_INPUTS.lock().insert(
909 core_id.try_into().unwrap(),
910 &CoreLocal::get().scheduler_input,
911 );
912 #[cfg(target_arch = "x86_64")]
913 CORE_HLT_STATE
914 .lock()
915 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
916 }
917}
918
919#[inline]
920#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
921pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
922 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
923}
924
925#[inline]
926#[cfg(feature = "smp")]
927fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
928 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
929}
930
931pub unsafe fn spawn(
932 func: unsafe extern "C" fn(usize),
933 arg: usize,
934 prio: Priority,
935 stack_size: usize,
936 selector: isize,
937) -> TaskId {
938 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
939
940 let core_id = if selector < 0 {
941 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
943 } else {
944 selector as u32
945 };
946
947 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
948}
949
950#[allow(clippy::result_unit_err)]
951pub fn join(id: TaskId) -> Result<(), ()> {
952 let core_scheduler = core_scheduler();
953
954 debug!(
955 "Task {} is waiting for task {}",
956 core_scheduler.get_current_task_id(),
957 id
958 );
959
960 loop {
961 let mut waiting_tasks_guard = WAITING_TASKS.lock();
962
963 if let Some(queue) = waiting_tasks_guard.get_mut(&id) {
964 queue.push_back(core_scheduler.get_current_task_handle());
965 core_scheduler.block_current_task(None);
966
967 drop(waiting_tasks_guard);
969 core_scheduler.reschedule();
970 } else {
971 return Ok(());
972 }
973 }
974}
975
976pub fn shutdown(arg: i32) -> ! {
977 crate::syscalls::shutdown(arg)
978}
979
980fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
981 TASKS.lock().get(&id).copied()
982}
983
984#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
985pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
986
987#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
988pub(crate) fn get_root_page_table() -> usize {
989 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
990 current_task_borrowed.root_page_table
991}