1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::{HashMap, hash_map};
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21
22use crate::arch::core_local::*;
23#[cfg(target_arch = "riscv64")]
24use crate::arch::switch::switch_to_task;
25#[cfg(target_arch = "x86_64")]
26use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
27use crate::arch::{get_processor_count, interrupts};
28use crate::errno::Errno;
29use crate::fd::{FileDescriptor, ObjectInterface};
30use crate::kernel::scheduler::TaskStacks;
31use crate::scheduler::task::*;
32use crate::{arch, io};
33
34pub mod task;
35
36static NO_TASKS: AtomicU32 = AtomicU32::new(0);
37#[cfg(feature = "smp")]
39static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
40 SpinMutex::new(Vec::new());
41#[cfg(all(target_arch = "x86_64", feature = "smp"))]
42static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
43static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
45 InterruptTicketMutex::new(BTreeMap::new());
46static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
48 InterruptTicketMutex::new(BTreeMap::new());
49
50pub type CoreId = u32;
52
53#[cfg(feature = "smp")]
54pub(crate) struct SchedulerInput {
55 new_tasks: VecDeque<NewTask>,
57 wakeup_tasks: VecDeque<TaskHandle>,
59}
60
61#[cfg(feature = "smp")]
62impl SchedulerInput {
63 pub fn new() -> Self {
64 Self {
65 new_tasks: VecDeque::new(),
66 wakeup_tasks: VecDeque::new(),
67 }
68 }
69}
70
71#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
72#[cfg_attr(
73 not(any(target_arch = "x86_64", target_arch = "aarch64")),
74 repr(align(64))
75)]
76pub(crate) struct PerCoreScheduler {
77 #[cfg(feature = "smp")]
79 core_id: CoreId,
80 current_task: Rc<RefCell<Task>>,
82 idle_task: Rc<RefCell<Task>>,
84 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
86 fpu_owner: Rc<RefCell<Task>>,
87 ready_queue: PriorityTaskQueue,
89 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
91 blocked_tasks: BlockedTaskQueue,
93}
94
95pub(crate) trait PerCoreSchedulerExt {
96 fn reschedule(self);
99
100 #[cfg(feature = "net")]
101 fn add_network_timer(self, wakeup_time: Option<u64>);
102
103 fn exit(self, exit_code: i32) -> !;
105}
106
107impl PerCoreSchedulerExt for &mut PerCoreScheduler {
108 #[cfg(target_arch = "x86_64")]
109 fn reschedule(self) {
110 without_interrupts(|| {
111 if let Some(last_stack_pointer) = self.scheduler() {
112 let (new_stack_pointer, is_idle) = {
113 let borrowed = self.current_task.borrow();
114 (
115 borrowed.last_stack_pointer,
116 borrowed.status == TaskStatus::Idle,
117 )
118 };
119
120 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
121 unsafe {
122 switch_to_fpu_owner(
123 last_stack_pointer,
124 new_stack_pointer.as_u64() as usize,
125 );
126 }
127 } else {
128 unsafe {
129 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
130 }
131 }
132 }
133 });
134 }
135
136 #[cfg(target_arch = "aarch64")]
138 fn reschedule(self) {
139 use aarch64_cpu::asm::barrier::{NSH, SY, dsb, isb};
140 use arm_gic::IntId;
141 use arm_gic::gicv3::{GicV3, SgiTarget, SgiTargetGroup};
142
143 use crate::interrupts::SGI_RESCHED;
144
145 dsb(NSH);
146 isb(SY);
147
148 let reschedid = IntId::sgi(SGI_RESCHED.into());
149 #[cfg(feature = "smp")]
150 let core_id = self.core_id;
151 #[cfg(not(feature = "smp"))]
152 let core_id = 0;
153
154 GicV3::send_sgi(
155 reschedid,
156 SgiTarget::List {
157 affinity3: 0,
158 affinity2: 0,
159 affinity1: 0,
160 target_list: 1 << core_id,
161 },
162 SgiTargetGroup::CurrentGroup1,
163 );
164
165 interrupts::enable();
166 }
167
168 #[cfg(target_arch = "riscv64")]
169 fn reschedule(self) {
170 without_interrupts(|| self.scheduler());
171 }
172
173 #[cfg(feature = "net")]
174 fn add_network_timer(self, wakeup_time: Option<u64>) {
175 without_interrupts(|| {
176 self.blocked_tasks.add_network_timer(wakeup_time);
177 });
178 }
179
180 fn exit(self, exit_code: i32) -> ! {
181 without_interrupts(|| {
182 let mut current_task_borrowed = self.current_task.borrow_mut();
184 assert_ne!(
185 current_task_borrowed.status,
186 TaskStatus::Idle,
187 "Trying to terminate the idle task"
188 );
189
190 debug!(
192 "Finishing task {} with exit code {}",
193 current_task_borrowed.id, exit_code
194 );
195 current_task_borrowed.status = TaskStatus::Finished;
196 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
197
198 let current_id = current_task_borrowed.id;
199 drop(current_task_borrowed);
200
201 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
203 while let Some(task) = queue.pop_front() {
204 self.custom_wakeup(task);
205 }
206 }
207 });
208
209 self.reschedule();
210 unreachable!()
211 }
212}
213
214struct NewTask {
215 tid: TaskId,
216 func: unsafe extern "C" fn(usize),
217 arg: usize,
218 prio: Priority,
219 core_id: CoreId,
220 stacks: TaskStacks,
221 object_map: Arc<
222 RwSpinLock<
223 HashMap<FileDescriptor, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>,
224 >,
225 >,
226}
227
228impl From<NewTask> for Task {
229 fn from(value: NewTask) -> Self {
230 let NewTask {
231 tid,
232 func,
233 arg,
234 prio,
235 core_id,
236 stacks,
237 object_map,
238 } = value;
239 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
240 task.create_stack_frame(func, arg);
241 task
242 }
243}
244
245impl PerCoreScheduler {
246 pub unsafe fn spawn(
248 func: unsafe extern "C" fn(usize),
249 arg: usize,
250 prio: Priority,
251 core_id: CoreId,
252 stack_size: usize,
253 ) -> TaskId {
254 let tid = get_tid();
256 let stacks = TaskStacks::new(stack_size);
257 let new_task = NewTask {
258 tid,
259 func,
260 arg,
261 prio,
262 core_id,
263 stacks,
264 object_map: core_scheduler().get_current_task_object_map(),
265 };
266
267 let wakeup = {
269 #[cfg(feature = "smp")]
270 let mut input_locked = get_scheduler_input(core_id).lock();
271 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
272 TASKS.lock().insert(
273 tid,
274 TaskHandle::new(
275 tid,
276 prio,
277 #[cfg(feature = "smp")]
278 core_id,
279 ),
280 );
281 NO_TASKS.fetch_add(1, Ordering::SeqCst);
282
283 #[cfg(feature = "smp")]
284 if core_id == core_scheduler().core_id {
285 let task = Rc::new(RefCell::new(Task::from(new_task)));
286 core_scheduler().ready_queue.push(task);
287 false
288 } else {
289 input_locked.new_tasks.push_back(new_task);
290 true
291 }
292 #[cfg(not(feature = "smp"))]
293 if core_id == 0 {
294 let task = Rc::new(RefCell::new(Task::from(new_task)));
295 core_scheduler().ready_queue.push(task);
296 false
297 } else {
298 panic!("Invalid core_id {}!", core_id)
299 }
300 };
301
302 debug!("Creating task {tid} with priority {prio} on core {core_id}");
303
304 if wakeup {
305 arch::wakeup_core(core_id);
306 }
307
308 tid
309 }
310
311 #[cfg(feature = "newlib")]
312 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
313 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
314
315 let core_id: CoreId = {
317 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
319
320 if id == arch::get_processor_count() {
322 NEXT_CORE_ID.store(0, Ordering::SeqCst);
323 0
324 } else {
325 id
326 }
327 };
328
329 let current_task_borrowed = self.current_task.borrow();
331
332 let tid = get_tid();
334 let clone_task = NewTask {
335 tid,
336 func,
337 arg,
338 prio: current_task_borrowed.prio,
339 core_id,
340 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
341 object_map: current_task_borrowed.object_map.clone(),
342 };
343
344 let wakeup = {
346 #[cfg(feature = "smp")]
347 let mut input_locked = get_scheduler_input(core_id).lock();
348 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
349 TASKS.lock().insert(
350 tid,
351 TaskHandle::new(
352 tid,
353 current_task_borrowed.prio,
354 #[cfg(feature = "smp")]
355 core_id,
356 ),
357 );
358 NO_TASKS.fetch_add(1, Ordering::SeqCst);
359 #[cfg(feature = "smp")]
360 if core_id == core_scheduler().core_id {
361 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
362 core_scheduler().ready_queue.push(clone_task);
363 false
364 } else {
365 input_locked.new_tasks.push_back(clone_task);
366 true
367 }
368 #[cfg(not(feature = "smp"))]
369 if core_id == 0 {
370 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
371 core_scheduler().ready_queue.push(clone_task);
372 false
373 } else {
374 panic!("Invalid core_id {}!", core_id);
375 }
376 };
377
378 if wakeup {
380 arch::wakeup_core(core_id);
381 }
382
383 tid
384 }
385
386 #[cfg(feature = "newlib")]
387 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
388 without_interrupts(|| self.clone_impl(func, arg))
389 }
390
391 #[inline]
393 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
394 pub fn is_scheduling(&self) -> bool {
395 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
396 }
397
398 #[inline]
399 pub fn handle_waiting_tasks(&mut self) {
400 without_interrupts(|| {
401 crate::executor::run();
402 self.blocked_tasks
403 .handle_waiting_tasks(&mut self.ready_queue);
404 });
405 }
406
407 #[cfg(not(feature = "smp"))]
408 pub fn custom_wakeup(&mut self, task: TaskHandle) {
409 without_interrupts(|| {
410 let task = self.blocked_tasks.custom_wakeup(task);
411 self.ready_queue.push(task);
412 });
413 }
414
415 #[cfg(feature = "smp")]
416 pub fn custom_wakeup(&mut self, task: TaskHandle) {
417 if task.get_core_id() == self.core_id {
418 without_interrupts(|| {
419 let task = self.blocked_tasks.custom_wakeup(task);
420 self.ready_queue.push(task);
421 });
422 } else {
423 get_scheduler_input(task.get_core_id())
424 .lock()
425 .wakeup_tasks
426 .push_back(task);
427 arch::wakeup_core(task.get_core_id());
429 }
430 }
431
432 #[inline]
433 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
434 without_interrupts(|| {
435 self.blocked_tasks
436 .add(self.current_task.clone(), wakeup_time);
437 });
438 }
439
440 #[inline]
441 pub fn get_current_task_handle(&self) -> TaskHandle {
442 without_interrupts(|| {
443 let current_task_borrowed = self.current_task.borrow();
444
445 TaskHandle::new(
446 current_task_borrowed.id,
447 current_task_borrowed.prio,
448 #[cfg(feature = "smp")]
449 current_task_borrowed.core_id,
450 )
451 })
452 }
453
454 #[inline]
455 pub fn get_current_task_id(&self) -> TaskId {
456 without_interrupts(|| self.current_task.borrow().id)
457 }
458
459 #[inline]
460 pub fn get_current_task_object_map(
461 &self,
462 ) -> Arc<
463 RwSpinLock<
464 HashMap<FileDescriptor, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>,
465 >,
466 > {
467 without_interrupts(|| self.current_task.borrow().object_map.clone())
468 }
469
470 #[inline]
473 pub fn get_object(
474 &self,
475 fd: FileDescriptor,
476 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
477 without_interrupts(|| {
478 let current_task = self.current_task.borrow();
479 let object_map = current_task.object_map.read();
480 object_map.get(&fd).cloned().ok_or(Errno::Badf)
481 })
482 }
483
484 #[cfg(feature = "common-os")]
487 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
488 pub fn recreate_objmap(&self) -> io::Result<()> {
489 let mut map = HashMap::<
490 FileDescriptor,
491 Arc<async_lock::RwLock<dyn ObjectInterface>>,
492 RandomState,
493 >::with_hasher(RandomState::with_seeds(0, 0, 0, 0));
494
495 without_interrupts(|| {
496 let mut current_task = self.current_task.borrow_mut();
497 let object_map = current_task.object_map.read();
498
499 for i in 0..3 {
501 if let Some(obj) = object_map.get(&i) {
502 map.insert(i, obj.clone());
503 }
504 }
505
506 drop(object_map);
507 current_task.object_map = Arc::new(RwSpinLock::new(map));
508 });
509
510 Ok(())
511 }
512
513 pub fn insert_object(
516 &self,
517 obj: Arc<async_lock::RwLock<dyn ObjectInterface>>,
518 ) -> io::Result<FileDescriptor> {
519 without_interrupts(|| {
520 let current_task = self.current_task.borrow();
521 let mut object_map = current_task.object_map.write();
522
523 let new_fd = || -> io::Result<FileDescriptor> {
524 let mut fd: FileDescriptor = 0;
525 loop {
526 if !object_map.contains_key(&fd) {
527 break Ok(fd);
528 } else if fd == FileDescriptor::MAX {
529 break Err(Errno::Overflow);
530 }
531
532 fd = fd.saturating_add(1);
533 }
534 };
535
536 let fd = new_fd()?;
537 let _ = object_map.insert(fd, obj.clone());
538 Ok(fd)
539 })
540 }
541
542 pub fn dup_object(&self, fd: FileDescriptor) -> io::Result<FileDescriptor> {
545 without_interrupts(|| {
546 let current_task = self.current_task.borrow();
547 let mut object_map = current_task.object_map.write();
548
549 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
550
551 let new_fd = || -> io::Result<FileDescriptor> {
552 let mut fd: FileDescriptor = 0;
553 loop {
554 if !object_map.contains_key(&fd) {
555 break Ok(fd);
556 } else if fd == FileDescriptor::MAX {
557 break Err(Errno::Overflow);
558 }
559
560 fd = fd.saturating_add(1);
561 }
562 };
563
564 let fd = new_fd()?;
565 match object_map.entry(fd) {
566 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
567 hash_map::Entry::Vacant(vacant_entry) => {
568 vacant_entry.insert(obj);
569 Ok(fd)
570 }
571 }
572 })
573 }
574
575 pub fn dup_object2(
576 &self,
577 fd1: FileDescriptor,
578 fd2: FileDescriptor,
579 ) -> io::Result<FileDescriptor> {
580 without_interrupts(|| {
581 let current_task = self.current_task.borrow();
582 let mut object_map = current_task.object_map.write();
583
584 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
585
586 match object_map.entry(fd2) {
587 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
588 hash_map::Entry::Vacant(vacant_entry) => {
589 vacant_entry.insert(obj);
590 Ok(fd2)
591 }
592 }
593 })
594 }
595
596 pub fn remove_object(
598 &self,
599 fd: FileDescriptor,
600 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
601 without_interrupts(|| {
602 let current_task = self.current_task.borrow();
603 let mut object_map = current_task.object_map.write();
604
605 object_map.remove(&fd).ok_or(Errno::Badf)
606 })
607 }
608
609 #[inline]
610 pub fn get_current_task_prio(&self) -> Priority {
611 without_interrupts(|| self.current_task.borrow().prio)
612 }
613
614 #[allow(dead_code)]
616 #[inline]
617 pub fn get_priority_bitmap(&self) -> &u64 {
618 self.ready_queue.get_priority_bitmap()
619 }
620
621 #[cfg(target_arch = "x86_64")]
622 pub fn set_current_kernel_stack(&self) {
623 let current_task_borrowed = self.current_task.borrow();
624 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
625
626 let rsp = current_task_borrowed.stacks.get_kernel_stack()
627 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
628 - TaskStacks::MARKER_SIZE as u64;
629 tss.privilege_stack_table[0] = rsp.into();
630 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
631 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
632 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
633 - TaskStacks::MARKER_SIZE as u64;
634 tss.interrupt_stack_table[0] = ist_start.into();
635 }
636
637 pub fn set_current_task_priority(&mut self, prio: Priority) {
638 without_interrupts(|| {
639 trace!("Change priority of the current task");
640 self.current_task.borrow_mut().prio = prio;
641 });
642 }
643
644 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
645 trace!("Change priority of task {id} to priority {prio}");
646
647 without_interrupts(|| {
648 let task = get_task_handle(id).ok_or(())?;
649 #[cfg(feature = "smp")]
650 let other_core = task.get_core_id() != self.core_id;
651 #[cfg(not(feature = "smp"))]
652 let other_core = false;
653
654 if other_core {
655 warn!("Have to change the priority on another core");
656 } else if self.current_task.borrow().id == task.get_id() {
657 self.current_task.borrow_mut().prio = prio;
658 } else {
659 self.ready_queue
660 .set_priority(task, prio)
661 .expect("Do not find valid task in ready queue");
662 }
663
664 Ok(())
665 })
666 }
667
668 #[cfg(target_arch = "riscv64")]
669 pub fn set_current_kernel_stack(&self) {
670 let current_task_borrowed = self.current_task.borrow();
671
672 let stack = (current_task_borrowed.stacks.get_kernel_stack()
673 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
674 - TaskStacks::MARKER_SIZE as u64)
675 .as_u64();
676 CoreLocal::get().kernel_stack.set(stack);
677 }
678
679 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
682 pub fn fpu_switch(&mut self) {
683 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
684 debug!(
685 "Switching FPU owner from task {} to {}",
686 self.fpu_owner.borrow().id,
687 self.current_task.borrow().id
688 );
689
690 self.fpu_owner.borrow_mut().last_fpu_state.save();
691 self.current_task.borrow().last_fpu_state.restore();
692 self.fpu_owner = self.current_task.clone();
693 }
694 }
695
696 fn cleanup_tasks(&mut self) {
698 while let Some(finished_task) = self.finished_tasks.pop_front() {
700 debug!("Cleaning up task {}", finished_task.borrow().id);
701 }
702 }
703
704 #[cfg(feature = "smp")]
705 pub fn check_input(&mut self) {
706 let mut input_locked = CoreLocal::get().scheduler_input.lock();
707
708 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
709 let task = self.blocked_tasks.custom_wakeup(task);
710 self.ready_queue.push(task);
711 }
712
713 while let Some(new_task) = input_locked.new_tasks.pop_front() {
714 let task = Rc::new(RefCell::new(Task::from(new_task)));
715 self.ready_queue.push(task.clone());
716 }
717 }
718
719 pub fn run() -> ! {
723 let backoff = Backoff::new();
724
725 loop {
726 let core_scheduler = core_scheduler();
727 interrupts::disable();
728
729 crate::executor::run();
731
732 #[cfg(feature = "smp")]
734 core_scheduler.check_input();
735 core_scheduler.cleanup_tasks();
736
737 if core_scheduler.ready_queue.is_empty() {
738 if backoff.is_completed() {
739 interrupts::enable_and_wait();
740 backoff.reset();
741 } else {
742 interrupts::enable();
743 backoff.snooze();
744 }
745 } else {
746 interrupts::enable();
747 core_scheduler.reschedule();
748 backoff.reset();
749 }
750 }
751 }
752
753 #[inline]
754 #[cfg(target_arch = "aarch64")]
755 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
756 self.current_task.borrow().last_stack_pointer
757 }
758
759 pub fn scheduler(&mut self) -> Option<*mut usize> {
762 crate::executor::run();
764
765 self.cleanup_tasks();
768
769 let (id, last_stack_pointer, prio, status) = {
771 let mut borrowed = self.current_task.borrow_mut();
772 (
773 borrowed.id,
774 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
775 borrowed.prio,
776 borrowed.status,
777 )
778 };
779
780 let mut new_task = None;
781
782 if status == TaskStatus::Running {
783 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
786 new_task = Some(task);
787 }
788 } else {
789 if status == TaskStatus::Finished {
790 self.current_task.borrow_mut().status = TaskStatus::Invalid;
792 self.finished_tasks.push_back(self.current_task.clone());
793 }
794
795 if let Some(task) = self.ready_queue.pop() {
798 debug!("Task is available.");
800 new_task = Some(task);
801 } else if status != TaskStatus::Idle {
802 debug!("Only Idle Task is available.");
804 new_task = Some(self.idle_task.clone());
805 }
806 }
807
808 if let Some(task) = new_task {
809 if status == TaskStatus::Running {
813 self.current_task.borrow_mut().status = TaskStatus::Ready;
815 self.ready_queue.push(self.current_task.clone());
816 }
817
818 let (new_id, new_stack_pointer) = {
820 let mut borrowed = task.borrow_mut();
821 if borrowed.status != TaskStatus::Idle {
822 borrowed.status = TaskStatus::Running;
824 }
825
826 (borrowed.id, borrowed.last_stack_pointer)
827 };
828
829 if id != new_id {
830 debug!(
832 "Switching task from {} to {} (stack {:#X} => {:p})",
833 id,
834 new_id,
835 unsafe { *last_stack_pointer },
836 new_stack_pointer
837 );
838 #[cfg(not(target_arch = "riscv64"))]
839 {
840 self.current_task = task;
841 }
842
843 #[cfg(not(target_arch = "riscv64"))]
845 return Some(last_stack_pointer);
846
847 #[cfg(target_arch = "riscv64")]
848 {
849 if sstatus::read().fs() == sstatus::FS::Dirty {
850 self.current_task.borrow_mut().last_fpu_state.save();
851 }
852 task.borrow().last_fpu_state.restore();
853 self.current_task = task;
854 unsafe {
855 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
856 }
857 }
858 }
859 }
860
861 None
862 }
863}
864
865fn get_tid() -> TaskId {
866 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
867 let guard = TASKS.lock();
868
869 loop {
870 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
871 if !guard.contains_key(&id) {
872 return id;
873 }
874 }
875}
876
877#[inline]
878pub(crate) fn abort() -> ! {
879 core_scheduler().exit(-1)
880}
881
882pub(crate) fn add_current_core() {
884 let core_id = core_id();
886 let tid = get_tid();
887 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
888
889 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
891 TASKS.lock().insert(
892 tid,
893 TaskHandle::new(
894 tid,
895 IDLE_PRIO,
896 #[cfg(feature = "smp")]
897 core_id,
898 ),
899 );
900 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
902 let boxed_scheduler = Box::new(PerCoreScheduler {
903 #[cfg(feature = "smp")]
904 core_id,
905 current_task: idle_task.clone(),
906 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
907 fpu_owner: idle_task.clone(),
908 idle_task,
909 ready_queue: PriorityTaskQueue::new(),
910 finished_tasks: VecDeque::new(),
911 blocked_tasks: BlockedTaskQueue::new(),
912 });
913
914 let scheduler = Box::into_raw(boxed_scheduler);
915 set_core_scheduler(scheduler);
916 #[cfg(feature = "smp")]
917 {
918 SCHEDULER_INPUTS.lock().insert(
919 core_id.try_into().unwrap(),
920 &CoreLocal::get().scheduler_input,
921 );
922 #[cfg(target_arch = "x86_64")]
923 CORE_HLT_STATE
924 .lock()
925 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
926 }
927}
928
929#[inline]
930#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
931pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
932 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
933}
934
935#[inline]
936#[cfg(feature = "smp")]
937fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
938 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
939}
940
941pub unsafe fn spawn(
942 func: unsafe extern "C" fn(usize),
943 arg: usize,
944 prio: Priority,
945 stack_size: usize,
946 selector: isize,
947) -> TaskId {
948 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
949
950 let core_id = if selector < 0 {
951 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
953 } else {
954 selector as u32
955 };
956
957 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
958}
959
960#[allow(clippy::result_unit_err)]
961pub fn join(id: TaskId) -> Result<(), ()> {
962 let core_scheduler = core_scheduler();
963
964 debug!(
965 "Task {} is waiting for task {}",
966 core_scheduler.get_current_task_id(),
967 id
968 );
969
970 loop {
971 let mut waiting_tasks_guard = WAITING_TASKS.lock();
972
973 if let Some(queue) = waiting_tasks_guard.get_mut(&id) {
974 queue.push_back(core_scheduler.get_current_task_handle());
975 core_scheduler.block_current_task(None);
976
977 drop(waiting_tasks_guard);
979 core_scheduler.reschedule();
980 } else {
981 return Ok(());
982 }
983 }
984}
985
986pub fn shutdown(arg: i32) -> ! {
987 crate::syscalls::shutdown(arg)
988}
989
990fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
991 TASKS.lock().get(&id).copied()
992}
993
994#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
995pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
996
997#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
998pub(crate) fn get_root_page_table() -> usize {
999 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
1000 current_task_borrowed.root_page_table
1001}