1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::{HashMap, hash_map};
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21use timer_interrupts::TimerList;
22
23use crate::arch::core_local::*;
24#[cfg(target_arch = "riscv64")]
25use crate::arch::switch::switch_to_task;
26#[cfg(target_arch = "x86_64")]
27use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
28use crate::arch::{get_processor_count, interrupts};
29use crate::errno::Errno;
30use crate::fd::{Fd, RawFd};
31use crate::kernel::scheduler::TaskStacks;
32use crate::scheduler::task::*;
33use crate::{arch, io};
34
35pub mod task;
36pub mod timer_interrupts;
37
38static NO_TASKS: AtomicU32 = AtomicU32::new(0);
39#[cfg(feature = "smp")]
41static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
42 SpinMutex::new(Vec::new());
43#[cfg(all(target_arch = "x86_64", feature = "smp"))]
44static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
45static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
47 InterruptTicketMutex::new(BTreeMap::new());
48static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
50 InterruptTicketMutex::new(BTreeMap::new());
51
52pub type CoreId = u32;
54
55#[cfg(feature = "smp")]
56pub(crate) struct SchedulerInput {
57 new_tasks: VecDeque<NewTask>,
59 wakeup_tasks: VecDeque<TaskHandle>,
61}
62
63#[cfg(feature = "smp")]
64impl SchedulerInput {
65 pub fn new() -> Self {
66 Self {
67 new_tasks: VecDeque::new(),
68 wakeup_tasks: VecDeque::new(),
69 }
70 }
71}
72
73#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
74#[cfg_attr(
75 not(any(target_arch = "x86_64", target_arch = "aarch64")),
76 repr(align(64))
77)]
78pub(crate) struct PerCoreScheduler {
79 #[cfg(feature = "smp")]
81 core_id: CoreId,
82 current_task: Rc<RefCell<Task>>,
84 idle_task: Rc<RefCell<Task>>,
86 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
88 fpu_owner: Rc<RefCell<Task>>,
89 ready_queue: PriorityTaskQueue,
91 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
93 blocked_tasks: BlockedTaskQueue,
95 pub timers: TimerList,
97}
98
99pub(crate) trait PerCoreSchedulerExt {
100 fn reschedule(self);
103
104 fn exit(self, exit_code: i32) -> !;
106}
107
108impl PerCoreSchedulerExt for &mut PerCoreScheduler {
109 #[cfg(target_arch = "x86_64")]
110 fn reschedule(self) {
111 without_interrupts(|| {
112 let Some(last_stack_pointer) = self.scheduler() else {
113 return;
114 };
115
116 let (new_stack_pointer, is_idle) = {
117 let borrowed = self.current_task.borrow();
118 (
119 borrowed.last_stack_pointer,
120 borrowed.status == TaskStatus::Idle,
121 )
122 };
123
124 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
125 unsafe {
126 switch_to_fpu_owner(last_stack_pointer, new_stack_pointer.as_u64() as usize);
127 }
128 } else {
129 unsafe {
130 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
131 }
132 }
133 });
134 }
135
136 #[cfg(target_arch = "aarch64")]
138 fn reschedule(self) {
139 use aarch64_cpu::asm::barrier::{NSH, SY, dsb, isb};
140 use arm_gic::IntId;
141 use arm_gic::gicv3::{GicV3, SgiTarget, SgiTargetGroup};
142
143 use crate::interrupts::SGI_RESCHED;
144
145 dsb(NSH);
146 isb(SY);
147
148 let reschedid = IntId::sgi(SGI_RESCHED.into());
149 #[cfg(feature = "smp")]
150 let core_id = self.core_id;
151 #[cfg(not(feature = "smp"))]
152 let core_id = 0;
153
154 GicV3::send_sgi(
155 reschedid,
156 SgiTarget::List {
157 affinity3: 0,
158 affinity2: 0,
159 affinity1: 0,
160 target_list: 1 << core_id,
161 },
162 SgiTargetGroup::CurrentGroup1,
163 );
164
165 interrupts::enable();
166 }
167
168 #[cfg(target_arch = "riscv64")]
169 fn reschedule(self) {
170 without_interrupts(|| self.scheduler());
171 }
172
173 fn exit(self, exit_code: i32) -> ! {
174 without_interrupts(|| {
175 let mut current_task_borrowed = self.current_task.borrow_mut();
177 assert_ne!(
178 current_task_borrowed.status,
179 TaskStatus::Idle,
180 "Trying to terminate the idle task"
181 );
182
183 debug!(
185 "Finishing task {} with exit code {}",
186 current_task_borrowed.id, exit_code
187 );
188 current_task_borrowed.status = TaskStatus::Finished;
189 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
190
191 let current_id = current_task_borrowed.id;
192 drop(current_task_borrowed);
193
194 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
196 while let Some(task) = queue.pop_front() {
197 self.custom_wakeup(task);
198 }
199 }
200 });
201
202 self.reschedule();
203 unreachable!()
204 }
205}
206
207struct NewTask {
208 tid: TaskId,
209 func: unsafe extern "C" fn(usize),
210 arg: usize,
211 prio: Priority,
212 core_id: CoreId,
213 stacks: TaskStacks,
214 object_map: Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>>>,
215}
216
217impl From<NewTask> for Task {
218 fn from(value: NewTask) -> Self {
219 let NewTask {
220 tid,
221 func,
222 arg,
223 prio,
224 core_id,
225 stacks,
226 object_map,
227 } = value;
228 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
229 task.create_stack_frame(func, arg);
230 task
231 }
232}
233
234impl PerCoreScheduler {
235 pub unsafe fn spawn(
237 func: unsafe extern "C" fn(usize),
238 arg: usize,
239 prio: Priority,
240 core_id: CoreId,
241 stack_size: usize,
242 ) -> TaskId {
243 let tid = get_tid();
245 let stacks = TaskStacks::new(stack_size);
246 let new_task = NewTask {
247 tid,
248 func,
249 arg,
250 prio,
251 core_id,
252 stacks,
253 object_map: core_scheduler().get_current_task_object_map(),
254 };
255
256 let wakeup = {
258 #[cfg(feature = "smp")]
259 let mut input_locked = get_scheduler_input(core_id).lock();
260 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
261 TASKS.lock().insert(
262 tid,
263 TaskHandle::new(
264 tid,
265 prio,
266 #[cfg(feature = "smp")]
267 core_id,
268 ),
269 );
270 NO_TASKS.fetch_add(1, Ordering::SeqCst);
271
272 #[cfg(feature = "smp")]
273 if core_id == core_scheduler().core_id {
274 let task = Rc::new(RefCell::new(Task::from(new_task)));
275 core_scheduler().ready_queue.push(task);
276 false
277 } else {
278 input_locked.new_tasks.push_back(new_task);
279 true
280 }
281 #[cfg(not(feature = "smp"))]
282 if core_id == 0 {
283 let task = Rc::new(RefCell::new(Task::from(new_task)));
284 core_scheduler().ready_queue.push(task);
285 false
286 } else {
287 panic!("Invalid core_id {core_id}!")
288 }
289 };
290
291 debug!("Creating task {tid} with priority {prio} on core {core_id}");
292
293 if wakeup {
294 arch::wakeup_core(core_id);
295 }
296
297 tid
298 }
299
300 #[cfg(feature = "newlib")]
301 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
302 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
303
304 let core_id: CoreId = {
306 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
308
309 if id == arch::get_processor_count() {
311 NEXT_CORE_ID.store(0, Ordering::SeqCst);
312 0
313 } else {
314 id
315 }
316 };
317
318 let current_task_borrowed = self.current_task.borrow();
320
321 let tid = get_tid();
323 let clone_task = NewTask {
324 tid,
325 func,
326 arg,
327 prio: current_task_borrowed.prio,
328 core_id,
329 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
330 object_map: current_task_borrowed.object_map.clone(),
331 };
332
333 let wakeup = {
335 #[cfg(feature = "smp")]
336 let mut input_locked = get_scheduler_input(core_id).lock();
337 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
338 TASKS.lock().insert(
339 tid,
340 TaskHandle::new(
341 tid,
342 current_task_borrowed.prio,
343 #[cfg(feature = "smp")]
344 core_id,
345 ),
346 );
347 NO_TASKS.fetch_add(1, Ordering::SeqCst);
348 #[cfg(feature = "smp")]
349 if core_id == core_scheduler().core_id {
350 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
351 core_scheduler().ready_queue.push(clone_task);
352 false
353 } else {
354 input_locked.new_tasks.push_back(clone_task);
355 true
356 }
357 #[cfg(not(feature = "smp"))]
358 if core_id == 0 {
359 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
360 core_scheduler().ready_queue.push(clone_task);
361 false
362 } else {
363 panic!("Invalid core_id {core_id}!");
364 }
365 };
366
367 if wakeup {
369 arch::wakeup_core(core_id);
370 }
371
372 tid
373 }
374
375 #[cfg(feature = "newlib")]
376 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
377 without_interrupts(|| self.clone_impl(func, arg))
378 }
379
380 #[inline]
382 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
383 pub fn is_scheduling(&self) -> bool {
384 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
385 }
386
387 #[inline]
388 pub fn handle_waiting_tasks(&mut self) {
389 without_interrupts(|| {
390 crate::executor::run();
391 self.blocked_tasks
392 .handle_waiting_tasks(&mut self.ready_queue);
393 });
394 }
395
396 #[cfg(not(feature = "smp"))]
397 pub fn custom_wakeup(&mut self, task: TaskHandle) {
398 without_interrupts(|| {
399 let task = self.blocked_tasks.custom_wakeup(task);
400 self.ready_queue.push(task);
401 });
402 }
403
404 #[cfg(feature = "smp")]
405 pub fn custom_wakeup(&mut self, task: TaskHandle) {
406 if task.get_core_id() == self.core_id {
407 without_interrupts(|| {
408 let task = self.blocked_tasks.custom_wakeup(task);
409 self.ready_queue.push(task);
410 });
411 } else {
412 get_scheduler_input(task.get_core_id())
413 .lock()
414 .wakeup_tasks
415 .push_back(task);
416 arch::wakeup_core(task.get_core_id());
418 }
419 }
420
421 #[inline]
422 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
423 without_interrupts(|| {
424 self.blocked_tasks
425 .add(self.current_task.clone(), wakeup_time);
426 });
427 }
428
429 #[inline]
430 pub fn get_current_task_handle(&self) -> TaskHandle {
431 without_interrupts(|| {
432 let current_task_borrowed = self.current_task.borrow();
433
434 TaskHandle::new(
435 current_task_borrowed.id,
436 current_task_borrowed.prio,
437 #[cfg(feature = "smp")]
438 current_task_borrowed.core_id,
439 )
440 })
441 }
442
443 #[inline]
444 pub fn get_current_task_id(&self) -> TaskId {
445 without_interrupts(|| self.current_task.borrow().id)
446 }
447
448 #[inline]
449 pub fn get_current_task_object_map(
450 &self,
451 ) -> Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>>> {
452 without_interrupts(|| self.current_task.borrow().object_map.clone())
453 }
454
455 #[inline]
458 pub fn get_object(&self, fd: RawFd) -> io::Result<Arc<async_lock::RwLock<Fd>>> {
459 without_interrupts(|| {
460 let current_task = self.current_task.borrow();
461 let object_map = current_task.object_map.read();
462 object_map.get(&fd).cloned().ok_or(Errno::Badf)
463 })
464 }
465
466 #[cfg(feature = "common-os")]
469 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
470 pub fn recreate_objmap(&self) -> io::Result<()> {
471 let mut map = HashMap::<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>::with_hasher(
472 RandomState::with_seeds(0, 0, 0, 0),
473 );
474
475 without_interrupts(|| {
476 let mut current_task = self.current_task.borrow_mut();
477 let object_map = current_task.object_map.read();
478
479 for i in 0..3 {
481 if let Some(obj) = object_map.get(&i) {
482 map.insert(i, obj.clone());
483 }
484 }
485
486 drop(object_map);
487 current_task.object_map = Arc::new(RwSpinLock::new(map));
488 });
489
490 Ok(())
491 }
492
493 pub fn insert_object(&self, obj: Arc<async_lock::RwLock<Fd>>) -> io::Result<RawFd> {
496 without_interrupts(|| {
497 let current_task = self.current_task.borrow();
498 let mut object_map = current_task.object_map.write();
499
500 let new_fd = || -> io::Result<RawFd> {
501 let mut fd: RawFd = 0;
502 loop {
503 if !object_map.contains_key(&fd) {
504 break Ok(fd);
505 } else if fd == RawFd::MAX {
506 break Err(Errno::Overflow);
507 }
508
509 fd = fd.saturating_add(1);
510 }
511 };
512
513 let fd = new_fd()?;
514 let _ = object_map.insert(fd, obj.clone());
515 Ok(fd)
516 })
517 }
518
519 pub fn dup_object(&self, fd: RawFd) -> io::Result<RawFd> {
522 without_interrupts(|| {
523 let current_task = self.current_task.borrow();
524 let mut object_map = current_task.object_map.write();
525
526 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
527
528 let new_fd = || -> io::Result<RawFd> {
529 let mut fd: RawFd = 0;
530 loop {
531 if !object_map.contains_key(&fd) {
532 break Ok(fd);
533 } else if fd == RawFd::MAX {
534 break Err(Errno::Overflow);
535 }
536
537 fd = fd.saturating_add(1);
538 }
539 };
540
541 let fd = new_fd()?;
542 match object_map.entry(fd) {
543 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
544 hash_map::Entry::Vacant(vacant_entry) => {
545 vacant_entry.insert(obj);
546 Ok(fd)
547 }
548 }
549 })
550 }
551
552 pub fn dup_object2(&self, fd1: RawFd, fd2: RawFd) -> io::Result<RawFd> {
553 without_interrupts(|| {
554 let current_task = self.current_task.borrow();
555 let mut object_map = current_task.object_map.write();
556
557 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
558
559 match object_map.entry(fd2) {
560 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
561 hash_map::Entry::Vacant(vacant_entry) => {
562 vacant_entry.insert(obj);
563 Ok(fd2)
564 }
565 }
566 })
567 }
568
569 pub fn remove_object(&self, fd: RawFd) -> io::Result<Arc<async_lock::RwLock<Fd>>> {
571 without_interrupts(|| {
572 let current_task = self.current_task.borrow();
573 let mut object_map = current_task.object_map.write();
574
575 object_map.remove(&fd).ok_or(Errno::Badf)
576 })
577 }
578
579 #[inline]
580 pub fn get_current_task_prio(&self) -> Priority {
581 without_interrupts(|| self.current_task.borrow().prio)
582 }
583
584 #[allow(dead_code)]
586 #[inline]
587 pub fn get_priority_bitmap(&self) -> &u64 {
588 self.ready_queue.get_priority_bitmap()
589 }
590
591 #[cfg(target_arch = "x86_64")]
592 pub fn set_current_kernel_stack(&self) {
593 let current_task_borrowed = self.current_task.borrow();
594 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
595
596 let rsp = current_task_borrowed.stacks.get_kernel_stack()
597 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
598 - TaskStacks::MARKER_SIZE as u64;
599 tss.privilege_stack_table[0] = rsp.into();
600 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
601 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
602 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
603 - TaskStacks::MARKER_SIZE as u64;
604 tss.interrupt_stack_table[0] = ist_start.into();
605 }
606
607 pub fn set_current_task_priority(&mut self, prio: Priority) {
608 without_interrupts(|| {
609 trace!("Change priority of the current task");
610 self.current_task.borrow_mut().prio = prio;
611 });
612 }
613
614 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
615 trace!("Change priority of task {id} to priority {prio}");
616
617 without_interrupts(|| {
618 let task = get_task_handle(id).ok_or(())?;
619 #[cfg(feature = "smp")]
620 let other_core = task.get_core_id() != self.core_id;
621 #[cfg(not(feature = "smp"))]
622 let other_core = false;
623
624 if other_core {
625 warn!("Have to change the priority on another core");
626 } else if self.current_task.borrow().id == task.get_id() {
627 self.current_task.borrow_mut().prio = prio;
628 } else {
629 self.ready_queue
630 .set_priority(task, prio)
631 .expect("Do not find valid task in ready queue");
632 }
633
634 Ok(())
635 })
636 }
637
638 #[cfg(target_arch = "riscv64")]
639 pub fn set_current_kernel_stack(&self) {
640 let current_task_borrowed = self.current_task.borrow();
641
642 let stack = (current_task_borrowed.stacks.get_kernel_stack()
643 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
644 - TaskStacks::MARKER_SIZE as u64)
645 .as_u64();
646 CoreLocal::get().kernel_stack.set(stack);
647 }
648
649 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
652 pub fn fpu_switch(&mut self) {
653 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
654 debug!(
655 "Switching FPU owner from task {} to {}",
656 self.fpu_owner.borrow().id,
657 self.current_task.borrow().id
658 );
659
660 self.fpu_owner.borrow_mut().last_fpu_state.save();
661 self.current_task.borrow().last_fpu_state.restore();
662 self.fpu_owner = self.current_task.clone();
663 }
664 }
665
666 fn cleanup_tasks(&mut self) {
668 while let Some(finished_task) = self.finished_tasks.pop_front() {
670 debug!("Cleaning up task {}", finished_task.borrow().id);
671 }
672 }
673
674 #[cfg(feature = "smp")]
675 pub fn check_input(&mut self) {
676 let mut input_locked = CoreLocal::get().scheduler_input.lock();
677
678 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
679 let task = self.blocked_tasks.custom_wakeup(task);
680 self.ready_queue.push(task);
681 }
682
683 while let Some(new_task) = input_locked.new_tasks.pop_front() {
684 let task = Rc::new(RefCell::new(Task::from(new_task)));
685 self.ready_queue.push(task.clone());
686 }
687 }
688
689 pub fn run() -> ! {
693 let backoff = Backoff::new();
694
695 loop {
696 let core_scheduler = core_scheduler();
697 interrupts::disable();
698
699 crate::executor::run();
701
702 #[cfg(feature = "smp")]
704 core_scheduler.check_input();
705 core_scheduler.cleanup_tasks();
706
707 if core_scheduler.ready_queue.is_empty() {
708 if backoff.is_completed() {
709 interrupts::enable_and_wait();
710 backoff.reset();
711 } else {
712 interrupts::enable();
713 backoff.snooze();
714 }
715 } else {
716 interrupts::enable();
717 core_scheduler.reschedule();
718 backoff.reset();
719 }
720 }
721 }
722
723 #[inline]
724 #[cfg(target_arch = "aarch64")]
725 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
726 self.current_task.borrow().last_stack_pointer
727 }
728
729 pub fn scheduler(&mut self) -> Option<*mut usize> {
732 crate::executor::run();
734
735 self.cleanup_tasks();
738
739 let (id, last_stack_pointer, prio, status) = {
741 let mut borrowed = self.current_task.borrow_mut();
742 (
743 borrowed.id,
744 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
745 borrowed.prio,
746 borrowed.status,
747 )
748 };
749
750 let mut new_task = None;
751
752 if status == TaskStatus::Running {
753 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
756 new_task = Some(task);
757 }
758 } else {
759 if status == TaskStatus::Finished {
760 self.current_task.borrow_mut().status = TaskStatus::Invalid;
762 self.finished_tasks.push_back(self.current_task.clone());
763 }
764
765 if let Some(task) = self.ready_queue.pop() {
768 debug!("Task is available.");
770 new_task = Some(task);
771 } else if status != TaskStatus::Idle {
772 debug!("Only Idle Task is available.");
774 new_task = Some(self.idle_task.clone());
775 }
776 }
777
778 let task = new_task?;
779 if status == TaskStatus::Running {
783 self.current_task.borrow_mut().status = TaskStatus::Ready;
785 self.ready_queue.push(self.current_task.clone());
786 }
787
788 let (new_id, new_stack_pointer) = {
790 let mut borrowed = task.borrow_mut();
791 if borrowed.status != TaskStatus::Idle {
792 borrowed.status = TaskStatus::Running;
794 }
795
796 (borrowed.id, borrowed.last_stack_pointer)
797 };
798
799 if id == new_id {
800 return None;
801 }
802
803 debug!(
805 "Switching task from {} to {} (stack {:#X} => {:p})",
806 id,
807 new_id,
808 unsafe { *last_stack_pointer },
809 new_stack_pointer
810 );
811 #[cfg(not(target_arch = "riscv64"))]
812 {
813 self.current_task = task;
814 }
815
816 #[cfg(not(target_arch = "riscv64"))]
818 return Some(last_stack_pointer);
819
820 #[cfg(target_arch = "riscv64")]
821 {
822 if sstatus::read().fs() == sstatus::FS::Dirty {
823 self.current_task.borrow_mut().last_fpu_state.save();
824 }
825 task.borrow().last_fpu_state.restore();
826 self.current_task = task;
827 unsafe {
828 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
829 }
830 None
831 }
832 }
833}
834
835fn get_tid() -> TaskId {
836 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
837 let guard = TASKS.lock();
838
839 loop {
840 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
841 if !guard.contains_key(&id) {
842 return id;
843 }
844 }
845}
846
847#[inline]
848pub(crate) fn abort() -> ! {
849 core_scheduler().exit(-1)
850}
851
852pub(crate) fn add_current_core() {
854 let core_id = core_id();
856 let tid = get_tid();
857 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
858
859 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
861 TASKS.lock().insert(
862 tid,
863 TaskHandle::new(
864 tid,
865 IDLE_PRIO,
866 #[cfg(feature = "smp")]
867 core_id,
868 ),
869 );
870 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
872 let boxed_scheduler = Box::new(PerCoreScheduler {
873 #[cfg(feature = "smp")]
874 core_id,
875 current_task: idle_task.clone(),
876 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
877 fpu_owner: idle_task.clone(),
878 idle_task,
879 ready_queue: PriorityTaskQueue::new(),
880 finished_tasks: VecDeque::new(),
881 blocked_tasks: BlockedTaskQueue::new(),
882 timers: TimerList::new(),
883 });
884
885 let scheduler = Box::into_raw(boxed_scheduler);
886 set_core_scheduler(scheduler);
887 #[cfg(feature = "smp")]
888 {
889 SCHEDULER_INPUTS.lock().insert(
890 core_id.try_into().unwrap(),
891 &CoreLocal::get().scheduler_input,
892 );
893 #[cfg(target_arch = "x86_64")]
894 CORE_HLT_STATE
895 .lock()
896 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
897 }
898}
899
900#[inline]
901#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
902pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
903 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
904}
905
906#[inline]
907#[cfg(feature = "smp")]
908fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
909 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
910}
911
912pub unsafe fn spawn(
913 func: unsafe extern "C" fn(usize),
914 arg: usize,
915 prio: Priority,
916 stack_size: usize,
917 selector: isize,
918) -> TaskId {
919 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
920
921 let core_id = if selector < 0 {
922 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
924 } else {
925 selector as u32
926 };
927
928 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
929}
930
931#[allow(clippy::result_unit_err)]
932pub fn join(id: TaskId) -> Result<(), ()> {
933 let core_scheduler = core_scheduler();
934
935 debug!(
936 "Task {} is waiting for task {}",
937 core_scheduler.get_current_task_id(),
938 id
939 );
940
941 loop {
942 let mut waiting_tasks_guard = WAITING_TASKS.lock();
943
944 let Some(queue) = waiting_tasks_guard.get_mut(&id) else {
945 return Ok(());
946 };
947
948 queue.push_back(core_scheduler.get_current_task_handle());
949 core_scheduler.block_current_task(None);
950
951 drop(waiting_tasks_guard);
953 core_scheduler.reschedule();
954 }
955}
956
957pub fn shutdown(arg: i32) -> ! {
958 crate::syscalls::shutdown(arg)
959}
960
961fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
962 TASKS.lock().get(&id).copied()
963}
964
965#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
966pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
967
968#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
969pub(crate) fn get_root_page_table() -> usize {
970 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
971 current_task_borrowed.root_page_table
972}