1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::HashMap;
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21
22use crate::arch::core_local::*;
23#[cfg(target_arch = "riscv64")]
24use crate::arch::switch::switch_to_task;
25#[cfg(target_arch = "x86_64")]
26use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
27use crate::arch::{get_processor_count, interrupts};
28use crate::errno::Errno;
29use crate::fd::{FileDescriptor, ObjectInterface};
30use crate::kernel::scheduler::TaskStacks;
31use crate::scheduler::task::*;
32use crate::{arch, io};
33
34pub mod task;
35
36static NO_TASKS: AtomicU32 = AtomicU32::new(0);
37#[cfg(feature = "smp")]
39static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
40 SpinMutex::new(Vec::new());
41#[cfg(all(target_arch = "x86_64", feature = "smp"))]
42static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
43static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
45 InterruptTicketMutex::new(BTreeMap::new());
46static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
48 InterruptTicketMutex::new(BTreeMap::new());
49
50pub type CoreId = u32;
52
53#[cfg(feature = "smp")]
54pub(crate) struct SchedulerInput {
55 new_tasks: VecDeque<NewTask>,
57 wakeup_tasks: VecDeque<TaskHandle>,
59}
60
61#[cfg(feature = "smp")]
62impl SchedulerInput {
63 pub fn new() -> Self {
64 Self {
65 new_tasks: VecDeque::new(),
66 wakeup_tasks: VecDeque::new(),
67 }
68 }
69}
70
71#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
72#[cfg_attr(
73 not(any(target_arch = "x86_64", target_arch = "aarch64")),
74 repr(align(64))
75)]
76pub(crate) struct PerCoreScheduler {
77 #[cfg(feature = "smp")]
79 core_id: CoreId,
80 current_task: Rc<RefCell<Task>>,
82 idle_task: Rc<RefCell<Task>>,
84 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
86 fpu_owner: Rc<RefCell<Task>>,
87 ready_queue: PriorityTaskQueue,
89 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
91 blocked_tasks: BlockedTaskQueue,
93}
94
95pub(crate) trait PerCoreSchedulerExt {
96 fn reschedule(self);
99
100 #[cfg(any(feature = "tcp", feature = "udp"))]
101 fn add_network_timer(self, wakeup_time: Option<u64>);
102
103 fn exit(self, exit_code: i32) -> !;
105}
106
107impl PerCoreSchedulerExt for &mut PerCoreScheduler {
108 #[cfg(target_arch = "x86_64")]
109 fn reschedule(self) {
110 without_interrupts(|| {
111 if let Some(last_stack_pointer) = self.scheduler() {
112 let (new_stack_pointer, is_idle) = {
113 let borrowed = self.current_task.borrow();
114 (
115 borrowed.last_stack_pointer,
116 borrowed.status == TaskStatus::Idle,
117 )
118 };
119
120 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
121 unsafe {
122 switch_to_fpu_owner(
123 last_stack_pointer,
124 new_stack_pointer.as_u64() as usize,
125 );
126 }
127 } else {
128 unsafe {
129 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
130 }
131 }
132 }
133 });
134 }
135
136 #[cfg(target_arch = "aarch64")]
138 fn reschedule(self) {
139 use core::arch::asm;
140
141 use arm_gic::IntId;
142 use arm_gic::gicv3::{GicV3, SgiTarget, SgiTargetGroup};
143
144 use crate::interrupts::SGI_RESCHED;
145
146 unsafe {
147 asm!("dsb nsh", "isb", options(nostack, nomem, preserves_flags));
148 }
149
150 let reschedid = IntId::sgi(SGI_RESCHED.into());
151 #[cfg(feature = "smp")]
152 let core_id = self.core_id;
153 #[cfg(not(feature = "smp"))]
154 let core_id = 0;
155
156 GicV3::send_sgi(
157 reschedid,
158 SgiTarget::List {
159 affinity3: 0,
160 affinity2: 0,
161 affinity1: 0,
162 target_list: 1 << core_id,
163 },
164 SgiTargetGroup::CurrentGroup1,
165 );
166
167 interrupts::enable();
168 }
169
170 #[cfg(target_arch = "riscv64")]
171 fn reschedule(self) {
172 without_interrupts(|| self.scheduler());
173 }
174
175 #[cfg(any(feature = "tcp", feature = "udp"))]
176 fn add_network_timer(self, wakeup_time: Option<u64>) {
177 without_interrupts(|| {
178 self.blocked_tasks.add_network_timer(wakeup_time);
179 });
180 }
181
182 fn exit(self, exit_code: i32) -> ! {
183 without_interrupts(|| {
184 let mut current_task_borrowed = self.current_task.borrow_mut();
186 assert_ne!(
187 current_task_borrowed.status,
188 TaskStatus::Idle,
189 "Trying to terminate the idle task"
190 );
191
192 debug!(
194 "Finishing task {} with exit code {}",
195 current_task_borrowed.id, exit_code
196 );
197 current_task_borrowed.status = TaskStatus::Finished;
198 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
199
200 let current_id = current_task_borrowed.id;
201 drop(current_task_borrowed);
202
203 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
205 while let Some(task) = queue.pop_front() {
206 self.custom_wakeup(task);
207 }
208 }
209 });
210
211 self.reschedule();
212 unreachable!()
213 }
214}
215
216struct NewTask {
217 tid: TaskId,
218 func: unsafe extern "C" fn(usize),
219 arg: usize,
220 prio: Priority,
221 core_id: CoreId,
222 stacks: TaskStacks,
223 object_map: Arc<RwSpinLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
224}
225
226impl From<NewTask> for Task {
227 fn from(value: NewTask) -> Self {
228 let NewTask {
229 tid,
230 func,
231 arg,
232 prio,
233 core_id,
234 stacks,
235 object_map,
236 } = value;
237 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
238 task.create_stack_frame(func, arg);
239 task
240 }
241}
242
243impl PerCoreScheduler {
244 pub unsafe fn spawn(
246 func: unsafe extern "C" fn(usize),
247 arg: usize,
248 prio: Priority,
249 core_id: CoreId,
250 stack_size: usize,
251 ) -> TaskId {
252 let tid = get_tid();
254 let stacks = TaskStacks::new(stack_size);
255 let new_task = NewTask {
256 tid,
257 func,
258 arg,
259 prio,
260 core_id,
261 stacks,
262 object_map: core_scheduler().get_current_task_object_map(),
263 };
264
265 let wakeup = {
267 #[cfg(feature = "smp")]
268 let mut input_locked = get_scheduler_input(core_id).lock();
269 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
270 TASKS.lock().insert(
271 tid,
272 TaskHandle::new(
273 tid,
274 prio,
275 #[cfg(feature = "smp")]
276 core_id,
277 ),
278 );
279 NO_TASKS.fetch_add(1, Ordering::SeqCst);
280
281 #[cfg(feature = "smp")]
282 if core_id == core_scheduler().core_id {
283 let task = Rc::new(RefCell::new(Task::from(new_task)));
284 core_scheduler().ready_queue.push(task);
285 false
286 } else {
287 input_locked.new_tasks.push_back(new_task);
288 true
289 }
290 #[cfg(not(feature = "smp"))]
291 if core_id == 0 {
292 let task = Rc::new(RefCell::new(Task::from(new_task)));
293 core_scheduler().ready_queue.push(task);
294 false
295 } else {
296 panic!("Invalid core_id {}!", core_id)
297 }
298 };
299
300 debug!("Creating task {tid} with priority {prio} on core {core_id}");
301
302 if wakeup {
303 arch::wakeup_core(core_id);
304 }
305
306 tid
307 }
308
309 #[cfg(feature = "newlib")]
310 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
311 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
312
313 let core_id: CoreId = {
315 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
317
318 if id == arch::get_processor_count() {
320 NEXT_CORE_ID.store(0, Ordering::SeqCst);
321 0
322 } else {
323 id
324 }
325 };
326
327 let current_task_borrowed = self.current_task.borrow();
329
330 let tid = get_tid();
332 let clone_task = NewTask {
333 tid,
334 func,
335 arg,
336 prio: current_task_borrowed.prio,
337 core_id,
338 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
339 object_map: current_task_borrowed.object_map.clone(),
340 };
341
342 let wakeup = {
344 #[cfg(feature = "smp")]
345 let mut input_locked = get_scheduler_input(core_id).lock();
346 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
347 TASKS.lock().insert(
348 tid,
349 TaskHandle::new(
350 tid,
351 current_task_borrowed.prio,
352 #[cfg(feature = "smp")]
353 core_id,
354 ),
355 );
356 NO_TASKS.fetch_add(1, Ordering::SeqCst);
357 #[cfg(feature = "smp")]
358 if core_id == core_scheduler().core_id {
359 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
360 core_scheduler().ready_queue.push(clone_task);
361 false
362 } else {
363 input_locked.new_tasks.push_back(clone_task);
364 true
365 }
366 #[cfg(not(feature = "smp"))]
367 if core_id == 0 {
368 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
369 core_scheduler().ready_queue.push(clone_task);
370 false
371 } else {
372 panic!("Invalid core_id {}!", core_id);
373 }
374 };
375
376 if wakeup {
378 arch::wakeup_core(core_id);
379 }
380
381 tid
382 }
383
384 #[cfg(feature = "newlib")]
385 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
386 without_interrupts(|| self.clone_impl(func, arg))
387 }
388
389 #[inline]
391 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
392 pub fn is_scheduling(&self) -> bool {
393 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
394 }
395
396 #[inline]
397 pub fn handle_waiting_tasks(&mut self) {
398 without_interrupts(|| {
399 crate::executor::run();
400 self.blocked_tasks
401 .handle_waiting_tasks(&mut self.ready_queue);
402 });
403 }
404
405 #[cfg(not(feature = "smp"))]
406 pub fn custom_wakeup(&mut self, task: TaskHandle) {
407 without_interrupts(|| {
408 let task = self.blocked_tasks.custom_wakeup(task);
409 self.ready_queue.push(task);
410 });
411 }
412
413 #[cfg(feature = "smp")]
414 pub fn custom_wakeup(&mut self, task: TaskHandle) {
415 if task.get_core_id() == self.core_id {
416 without_interrupts(|| {
417 let task = self.blocked_tasks.custom_wakeup(task);
418 self.ready_queue.push(task);
419 });
420 } else {
421 get_scheduler_input(task.get_core_id())
422 .lock()
423 .wakeup_tasks
424 .push_back(task);
425 arch::wakeup_core(task.get_core_id());
427 }
428 }
429
430 #[inline]
431 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
432 without_interrupts(|| {
433 self.blocked_tasks
434 .add(self.current_task.clone(), wakeup_time);
435 });
436 }
437
438 #[inline]
439 pub fn get_current_task_handle(&self) -> TaskHandle {
440 without_interrupts(|| {
441 let current_task_borrowed = self.current_task.borrow();
442
443 TaskHandle::new(
444 current_task_borrowed.id,
445 current_task_borrowed.prio,
446 #[cfg(feature = "smp")]
447 current_task_borrowed.core_id,
448 )
449 })
450 }
451
452 #[inline]
453 pub fn get_current_task_id(&self) -> TaskId {
454 without_interrupts(|| self.current_task.borrow().id)
455 }
456
457 #[inline]
458 pub fn get_current_task_object_map(
459 &self,
460 ) -> Arc<RwSpinLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>> {
461 without_interrupts(|| self.current_task.borrow().object_map.clone())
462 }
463
464 #[inline]
467 pub fn get_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
468 without_interrupts(|| {
469 let current_task = self.current_task.borrow();
470 let object_map = current_task.object_map.read();
471 object_map.get(&fd).cloned().ok_or(Errno::Badf)
472 })
473 }
474
475 #[cfg(feature = "common-os")]
478 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
479 pub fn recreate_objmap(&self) -> io::Result<()> {
480 let mut map = HashMap::<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>::with_hasher(
481 RandomState::with_seeds(0, 0, 0, 0),
482 );
483
484 without_interrupts(|| {
485 let mut current_task = self.current_task.borrow_mut();
486 let object_map = current_task.object_map.read();
487
488 for i in 0..3 {
490 if let Some(obj) = object_map.get(&i) {
491 map.insert(i, obj.clone());
492 }
493 }
494
495 drop(object_map);
496 current_task.object_map = Arc::new(RwSpinLock::new(map));
497 });
498
499 Ok(())
500 }
501
502 pub fn insert_object(&self, obj: Arc<dyn ObjectInterface>) -> io::Result<FileDescriptor> {
505 without_interrupts(|| {
506 let current_task = self.current_task.borrow();
507 let mut object_map = current_task.object_map.write();
508
509 let new_fd = || -> io::Result<FileDescriptor> {
510 let mut fd: FileDescriptor = 0;
511 loop {
512 if !object_map.contains_key(&fd) {
513 break Ok(fd);
514 } else if fd == FileDescriptor::MAX {
515 break Err(Errno::Overflow);
516 }
517
518 fd = fd.saturating_add(1);
519 }
520 };
521
522 let fd = new_fd()?;
523 let _ = object_map.insert(fd, obj.clone());
524 Ok(fd)
525 })
526 }
527
528 pub fn dup_object(&self, fd: FileDescriptor) -> io::Result<FileDescriptor> {
531 without_interrupts(|| {
532 let current_task = self.current_task.borrow();
533 let mut object_map = current_task.object_map.write();
534
535 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
536
537 let new_fd = || -> io::Result<FileDescriptor> {
538 let mut fd: FileDescriptor = 0;
539 loop {
540 if !object_map.contains_key(&fd) {
541 break Ok(fd);
542 } else if fd == FileDescriptor::MAX {
543 break Err(Errno::Overflow);
544 }
545
546 fd = fd.saturating_add(1);
547 }
548 };
549
550 let fd = new_fd()?;
551 if object_map.try_insert(fd, obj).is_err() {
552 Err(Errno::Mfile)
553 } else {
554 Ok(fd)
555 }
556 })
557 }
558
559 pub fn dup_object2(
560 &self,
561 fd1: FileDescriptor,
562 fd2: FileDescriptor,
563 ) -> io::Result<FileDescriptor> {
564 without_interrupts(|| {
565 let current_task = self.current_task.borrow();
566 let mut object_map = current_task.object_map.write();
567
568 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
569
570 if object_map.try_insert(fd2, obj).is_err() {
571 Err(Errno::Mfile)
572 } else {
573 Ok(fd2)
574 }
575 })
576 }
577
578 pub fn remove_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
580 without_interrupts(|| {
581 let current_task = self.current_task.borrow();
582 let mut object_map = current_task.object_map.write();
583
584 object_map.remove(&fd).ok_or(Errno::Badf)
585 })
586 }
587
588 #[inline]
589 pub fn get_current_task_prio(&self) -> Priority {
590 without_interrupts(|| self.current_task.borrow().prio)
591 }
592
593 #[allow(dead_code)]
595 #[inline]
596 pub fn get_priority_bitmap(&self) -> &u64 {
597 self.ready_queue.get_priority_bitmap()
598 }
599
600 #[cfg(target_arch = "x86_64")]
601 pub fn set_current_kernel_stack(&self) {
602 let current_task_borrowed = self.current_task.borrow();
603 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
604
605 let rsp = current_task_borrowed.stacks.get_kernel_stack()
606 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
607 - TaskStacks::MARKER_SIZE as u64;
608 tss.privilege_stack_table[0] = rsp.into();
609 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
610 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
611 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
612 - TaskStacks::MARKER_SIZE as u64;
613 tss.interrupt_stack_table[0] = ist_start.into();
614 }
615
616 pub fn set_current_task_priority(&mut self, prio: Priority) {
617 without_interrupts(|| {
618 trace!("Change priority of the current task");
619 self.current_task.borrow_mut().prio = prio;
620 });
621 }
622
623 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
624 trace!("Change priority of task {id} to priority {prio}");
625
626 without_interrupts(|| {
627 let task = get_task_handle(id).ok_or(())?;
628 #[cfg(feature = "smp")]
629 let other_core = task.get_core_id() != self.core_id;
630 #[cfg(not(feature = "smp"))]
631 let other_core = false;
632
633 if other_core {
634 warn!("Have to change the priority on another core");
635 } else if self.current_task.borrow().id == task.get_id() {
636 self.current_task.borrow_mut().prio = prio;
637 } else {
638 self.ready_queue
639 .set_priority(task, prio)
640 .expect("Do not find valid task in ready queue");
641 }
642
643 Ok(())
644 })
645 }
646
647 #[cfg(target_arch = "riscv64")]
648 pub fn set_current_kernel_stack(&self) {
649 let current_task_borrowed = self.current_task.borrow();
650
651 let stack = (current_task_borrowed.stacks.get_kernel_stack()
652 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
653 - TaskStacks::MARKER_SIZE as u64)
654 .as_u64();
655 CoreLocal::get().kernel_stack.set(stack);
656 }
657
658 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
661 pub fn fpu_switch(&mut self) {
662 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
663 debug!(
664 "Switching FPU owner from task {} to {}",
665 self.fpu_owner.borrow().id,
666 self.current_task.borrow().id
667 );
668
669 self.fpu_owner.borrow_mut().last_fpu_state.save();
670 self.current_task.borrow().last_fpu_state.restore();
671 self.fpu_owner = self.current_task.clone();
672 }
673 }
674
675 fn cleanup_tasks(&mut self) {
677 while let Some(finished_task) = self.finished_tasks.pop_front() {
679 debug!("Cleaning up task {}", finished_task.borrow().id);
680 }
681 }
682
683 #[cfg(feature = "smp")]
684 pub fn check_input(&mut self) {
685 let mut input_locked = CoreLocal::get().scheduler_input.lock();
686
687 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
688 let task = self.blocked_tasks.custom_wakeup(task);
689 self.ready_queue.push(task);
690 }
691
692 while let Some(new_task) = input_locked.new_tasks.pop_front() {
693 let task = Rc::new(RefCell::new(Task::from(new_task)));
694 self.ready_queue.push(task.clone());
695 }
696 }
697
698 pub fn run() -> ! {
702 let backoff = Backoff::new();
703
704 loop {
705 let core_scheduler = core_scheduler();
706 interrupts::disable();
707
708 crate::executor::run();
710
711 #[cfg(feature = "smp")]
713 core_scheduler.check_input();
714 core_scheduler.cleanup_tasks();
715
716 if core_scheduler.ready_queue.is_empty() {
717 if backoff.is_completed() {
718 interrupts::enable_and_wait();
719 backoff.reset();
720 } else {
721 interrupts::enable();
722 backoff.snooze();
723 }
724 } else {
725 interrupts::enable();
726 core_scheduler.reschedule();
727 backoff.reset();
728 }
729 }
730 }
731
732 #[inline]
733 #[cfg(target_arch = "aarch64")]
734 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
735 self.current_task.borrow().last_stack_pointer
736 }
737
738 pub fn scheduler(&mut self) -> Option<*mut usize> {
741 crate::executor::run();
743
744 self.cleanup_tasks();
747
748 let (id, last_stack_pointer, prio, status) = {
750 let mut borrowed = self.current_task.borrow_mut();
751 (
752 borrowed.id,
753 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
754 borrowed.prio,
755 borrowed.status,
756 )
757 };
758
759 let mut new_task = None;
760
761 if status == TaskStatus::Running {
762 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
765 new_task = Some(task);
766 }
767 } else {
768 if status == TaskStatus::Finished {
769 self.current_task.borrow_mut().status = TaskStatus::Invalid;
771 self.finished_tasks.push_back(self.current_task.clone());
772 }
773
774 if let Some(task) = self.ready_queue.pop() {
777 debug!("Task is available.");
779 new_task = Some(task);
780 } else if status != TaskStatus::Idle {
781 debug!("Only Idle Task is available.");
783 new_task = Some(self.idle_task.clone());
784 }
785 }
786
787 if let Some(task) = new_task {
788 if status == TaskStatus::Running {
792 self.current_task.borrow_mut().status = TaskStatus::Ready;
794 self.ready_queue.push(self.current_task.clone());
795 }
796
797 let (new_id, new_stack_pointer) = {
799 let mut borrowed = task.borrow_mut();
800 if borrowed.status != TaskStatus::Idle {
801 borrowed.status = TaskStatus::Running;
803 }
804
805 (borrowed.id, borrowed.last_stack_pointer)
806 };
807
808 if id != new_id {
809 debug!(
811 "Switching task from {} to {} (stack {:#X} => {:p})",
812 id,
813 new_id,
814 unsafe { *last_stack_pointer },
815 new_stack_pointer
816 );
817 #[cfg(not(target_arch = "riscv64"))]
818 {
819 self.current_task = task;
820 }
821
822 #[cfg(not(target_arch = "riscv64"))]
824 return Some(last_stack_pointer);
825
826 #[cfg(target_arch = "riscv64")]
827 {
828 if sstatus::read().fs() == sstatus::FS::Dirty {
829 self.current_task.borrow_mut().last_fpu_state.save();
830 }
831 task.borrow().last_fpu_state.restore();
832 self.current_task = task;
833 unsafe {
834 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
835 }
836 }
837 }
838 }
839
840 None
841 }
842}
843
844fn get_tid() -> TaskId {
845 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
846 let guard = TASKS.lock();
847
848 loop {
849 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
850 if !guard.contains_key(&id) {
851 return id;
852 }
853 }
854}
855
856#[inline]
857pub(crate) fn abort() -> ! {
858 core_scheduler().exit(-1)
859}
860
861pub(crate) fn add_current_core() {
863 let core_id = core_id();
865 let tid = get_tid();
866 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
867
868 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
870 TASKS.lock().insert(
871 tid,
872 TaskHandle::new(
873 tid,
874 IDLE_PRIO,
875 #[cfg(feature = "smp")]
876 core_id,
877 ),
878 );
879 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
881 let boxed_scheduler = Box::new(PerCoreScheduler {
882 #[cfg(feature = "smp")]
883 core_id,
884 current_task: idle_task.clone(),
885 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
886 fpu_owner: idle_task.clone(),
887 idle_task,
888 ready_queue: PriorityTaskQueue::new(),
889 finished_tasks: VecDeque::new(),
890 blocked_tasks: BlockedTaskQueue::new(),
891 });
892
893 let scheduler = Box::into_raw(boxed_scheduler);
894 set_core_scheduler(scheduler);
895 #[cfg(feature = "smp")]
896 {
897 SCHEDULER_INPUTS.lock().insert(
898 core_id.try_into().unwrap(),
899 &CoreLocal::get().scheduler_input,
900 );
901 #[cfg(target_arch = "x86_64")]
902 CORE_HLT_STATE
903 .lock()
904 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
905 }
906}
907
908#[inline]
909#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
910pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
911 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
912}
913
914#[inline]
915#[cfg(feature = "smp")]
916fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
917 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
918}
919
920pub unsafe fn spawn(
921 func: unsafe extern "C" fn(usize),
922 arg: usize,
923 prio: Priority,
924 stack_size: usize,
925 selector: isize,
926) -> TaskId {
927 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
928
929 let core_id = if selector < 0 {
930 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
932 } else {
933 selector as u32
934 };
935
936 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
937}
938
939#[allow(clippy::result_unit_err)]
940pub fn join(id: TaskId) -> Result<(), ()> {
941 let core_scheduler = core_scheduler();
942
943 debug!(
944 "Task {} is waiting for task {}",
945 core_scheduler.get_current_task_id(),
946 id
947 );
948
949 loop {
950 let mut waiting_tasks_guard = WAITING_TASKS.lock();
951
952 if let Some(queue) = waiting_tasks_guard.get_mut(&id) {
953 queue.push_back(core_scheduler.get_current_task_handle());
954 core_scheduler.block_current_task(None);
955
956 drop(waiting_tasks_guard);
958 core_scheduler.reschedule();
959 } else {
960 return Ok(());
961 }
962 }
963}
964
965pub fn shutdown(arg: i32) -> ! {
966 crate::syscalls::shutdown(arg)
967}
968
969fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
970 TASKS.lock().get(&id).copied()
971}
972
973#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
974pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
975
976#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
977pub(crate) fn get_root_page_table() -> usize {
978 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
979 current_task_borrowed.root_page_table
980}