1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::{HashMap, hash_map};
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21use timer_interrupts::TimerList;
22
23use crate::arch::core_local::*;
24#[cfg(target_arch = "riscv64")]
25use crate::arch::switch::switch_to_task;
26#[cfg(target_arch = "x86_64")]
27use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
28use crate::arch::{get_processor_count, interrupts};
29use crate::errno::Errno;
30use crate::fd::{Fd, RawFd};
31use crate::kernel::scheduler::TaskStacks;
32use crate::scheduler::task::*;
33use crate::{arch, io};
34
35pub mod task;
36pub mod timer_interrupts;
37
38static NO_TASKS: AtomicU32 = AtomicU32::new(0);
39#[cfg(feature = "smp")]
41static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
42 SpinMutex::new(Vec::new());
43#[cfg(all(target_arch = "x86_64", feature = "smp"))]
44static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
45static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
47 InterruptTicketMutex::new(BTreeMap::new());
48static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
50 InterruptTicketMutex::new(BTreeMap::new());
51
52pub type CoreId = u32;
54
55#[cfg(feature = "smp")]
56pub(crate) struct SchedulerInput {
57 new_tasks: VecDeque<NewTask>,
59 wakeup_tasks: VecDeque<TaskHandle>,
61}
62
63#[cfg(feature = "smp")]
64impl SchedulerInput {
65 pub fn new() -> Self {
66 Self {
67 new_tasks: VecDeque::new(),
68 wakeup_tasks: VecDeque::new(),
69 }
70 }
71}
72
73#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
74#[cfg_attr(
75 not(any(target_arch = "x86_64", target_arch = "aarch64")),
76 repr(align(64))
77)]
78pub(crate) struct PerCoreScheduler {
79 #[cfg(feature = "smp")]
81 core_id: CoreId,
82 current_task: Rc<RefCell<Task>>,
84 idle_task: Rc<RefCell<Task>>,
86 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
88 fpu_owner: Rc<RefCell<Task>>,
89 ready_queue: PriorityTaskQueue,
91 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
93 blocked_tasks: BlockedTaskQueue,
95 pub timers: TimerList,
97}
98
99pub(crate) trait PerCoreSchedulerExt {
100 fn reschedule(self);
103
104 fn exit(self, exit_code: i32) -> !;
106}
107
108impl PerCoreSchedulerExt for &mut PerCoreScheduler {
109 #[cfg(target_arch = "x86_64")]
110 fn reschedule(self) {
111 without_interrupts(|| {
112 let Some(last_stack_pointer) = self.scheduler() else {
113 return;
114 };
115
116 let (new_stack_pointer, is_idle) = {
117 let borrowed = self.current_task.borrow();
118 (
119 borrowed.last_stack_pointer,
120 borrowed.status == TaskStatus::Idle,
121 )
122 };
123
124 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
125 unsafe {
126 switch_to_fpu_owner(last_stack_pointer, new_stack_pointer.as_u64() as usize);
127 }
128 } else {
129 unsafe {
130 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
131 }
132 }
133 });
134 }
135
136 #[cfg(target_arch = "aarch64")]
138 fn reschedule(self) {
139 use aarch64_cpu::asm::barrier::{NSH, SY, dsb, isb};
140 use arm_gic::IntId;
141 use arm_gic::gicv3::{GicCpuInterface, SgiTarget, SgiTargetGroup};
142
143 use crate::interrupts::SGI_RESCHED;
144
145 dsb(NSH);
146 isb(SY);
147
148 let reschedid = IntId::sgi(SGI_RESCHED.into());
149 #[cfg(feature = "smp")]
150 let core_id = self.core_id;
151 #[cfg(not(feature = "smp"))]
152 let core_id = 0;
153
154 GicCpuInterface::send_sgi(
155 reschedid,
156 SgiTarget::List {
157 affinity3: 0,
158 affinity2: 0,
159 affinity1: 0,
160 target_list: 1 << core_id,
161 },
162 SgiTargetGroup::CurrentGroup1,
163 )
164 .unwrap();
165
166 interrupts::enable();
167 }
168
169 #[cfg(target_arch = "riscv64")]
170 fn reschedule(self) {
171 without_interrupts(|| self.scheduler());
172 }
173
174 fn exit(self, exit_code: i32) -> ! {
175 without_interrupts(|| {
176 let mut current_task_borrowed = self.current_task.borrow_mut();
178 assert_ne!(
179 current_task_borrowed.status,
180 TaskStatus::Idle,
181 "Trying to terminate the idle task"
182 );
183
184 debug!(
186 "Finishing task {} with exit code {}",
187 current_task_borrowed.id, exit_code
188 );
189 current_task_borrowed.status = TaskStatus::Finished;
190 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
191
192 let current_id = current_task_borrowed.id;
193 drop(current_task_borrowed);
194
195 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
197 while let Some(task) = queue.pop_front() {
198 self.custom_wakeup(task);
199 }
200 }
201 });
202
203 self.reschedule();
204 unreachable!()
205 }
206}
207
208struct NewTask {
209 tid: TaskId,
210 func: unsafe extern "C" fn(usize),
211 arg: usize,
212 prio: Priority,
213 core_id: CoreId,
214 stacks: TaskStacks,
215 object_map: Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>>>,
216}
217
218impl From<NewTask> for Task {
219 fn from(value: NewTask) -> Self {
220 let NewTask {
221 tid,
222 func,
223 arg,
224 prio,
225 core_id,
226 stacks,
227 object_map,
228 } = value;
229 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
230 task.create_stack_frame(func, arg);
231 task
232 }
233}
234
235impl PerCoreScheduler {
236 pub unsafe fn spawn(
238 func: unsafe extern "C" fn(usize),
239 arg: usize,
240 prio: Priority,
241 core_id: CoreId,
242 stack_size: usize,
243 ) -> TaskId {
244 let tid = get_tid();
246 let stacks = TaskStacks::new(stack_size);
247 let new_task = NewTask {
248 tid,
249 func,
250 arg,
251 prio,
252 core_id,
253 stacks,
254 object_map: core_scheduler().get_current_task_object_map(),
255 };
256
257 let wakeup = {
259 #[cfg(feature = "smp")]
260 let mut input_locked = get_scheduler_input(core_id).lock();
261 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
262 TASKS.lock().insert(
263 tid,
264 TaskHandle::new(
265 tid,
266 prio,
267 #[cfg(feature = "smp")]
268 core_id,
269 ),
270 );
271 NO_TASKS.fetch_add(1, Ordering::SeqCst);
272
273 #[cfg(feature = "smp")]
274 if core_id == core_scheduler().core_id {
275 let task = Rc::new(RefCell::new(Task::from(new_task)));
276 core_scheduler().ready_queue.push(task);
277 false
278 } else {
279 input_locked.new_tasks.push_back(new_task);
280 true
281 }
282 #[cfg(not(feature = "smp"))]
283 if core_id == 0 {
284 let task = Rc::new(RefCell::new(Task::from(new_task)));
285 core_scheduler().ready_queue.push(task);
286 false
287 } else {
288 panic!("Invalid core_id {core_id}!")
289 }
290 };
291
292 debug!("Creating task {tid} with priority {prio} on core {core_id}");
293
294 if wakeup {
295 arch::wakeup_core(core_id);
296 }
297
298 tid
299 }
300
301 #[cfg(feature = "newlib")]
302 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
303 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
304
305 let core_id: CoreId = {
307 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
309
310 if id == arch::get_processor_count() {
312 NEXT_CORE_ID.store(0, Ordering::SeqCst);
313 0
314 } else {
315 id
316 }
317 };
318
319 let current_task_borrowed = self.current_task.borrow();
321
322 let tid = get_tid();
324 let clone_task = NewTask {
325 tid,
326 func,
327 arg,
328 prio: current_task_borrowed.prio,
329 core_id,
330 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
331 object_map: current_task_borrowed.object_map.clone(),
332 };
333
334 let wakeup = {
336 #[cfg(feature = "smp")]
337 let mut input_locked = get_scheduler_input(core_id).lock();
338 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
339 TASKS.lock().insert(
340 tid,
341 TaskHandle::new(
342 tid,
343 current_task_borrowed.prio,
344 #[cfg(feature = "smp")]
345 core_id,
346 ),
347 );
348 NO_TASKS.fetch_add(1, Ordering::SeqCst);
349 #[cfg(feature = "smp")]
350 if core_id == core_scheduler().core_id {
351 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
352 core_scheduler().ready_queue.push(clone_task);
353 false
354 } else {
355 input_locked.new_tasks.push_back(clone_task);
356 true
357 }
358 #[cfg(not(feature = "smp"))]
359 if core_id == 0 {
360 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
361 core_scheduler().ready_queue.push(clone_task);
362 false
363 } else {
364 panic!("Invalid core_id {core_id}!");
365 }
366 };
367
368 if wakeup {
370 arch::wakeup_core(core_id);
371 }
372
373 tid
374 }
375
376 #[cfg(feature = "newlib")]
377 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
378 without_interrupts(|| self.clone_impl(func, arg))
379 }
380
381 #[inline]
383 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
384 pub fn is_scheduling(&self) -> bool {
385 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
386 }
387
388 #[inline]
389 pub fn handle_waiting_tasks(&mut self) {
390 without_interrupts(|| {
391 crate::executor::run();
392 self.blocked_tasks
393 .handle_waiting_tasks(&mut self.ready_queue);
394 });
395 }
396
397 #[cfg(not(feature = "smp"))]
398 pub fn custom_wakeup(&mut self, task: TaskHandle) {
399 without_interrupts(|| {
400 let task = self.blocked_tasks.custom_wakeup(task);
401 self.ready_queue.push(task);
402 });
403 }
404
405 #[cfg(feature = "smp")]
406 pub fn custom_wakeup(&mut self, task: TaskHandle) {
407 if task.get_core_id() == self.core_id {
408 without_interrupts(|| {
409 let task = self.blocked_tasks.custom_wakeup(task);
410 self.ready_queue.push(task);
411 });
412 } else {
413 get_scheduler_input(task.get_core_id())
414 .lock()
415 .wakeup_tasks
416 .push_back(task);
417 arch::wakeup_core(task.get_core_id());
419 }
420 }
421
422 #[inline]
423 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
424 without_interrupts(|| {
425 self.blocked_tasks
426 .add(self.current_task.clone(), wakeup_time);
427 });
428 }
429
430 #[inline]
431 pub fn get_current_task_handle(&self) -> TaskHandle {
432 without_interrupts(|| {
433 let current_task_borrowed = self.current_task.borrow();
434
435 TaskHandle::new(
436 current_task_borrowed.id,
437 current_task_borrowed.prio,
438 #[cfg(feature = "smp")]
439 current_task_borrowed.core_id,
440 )
441 })
442 }
443
444 #[inline]
445 pub fn get_current_task_id(&self) -> TaskId {
446 without_interrupts(|| self.current_task.borrow().id)
447 }
448
449 #[inline]
450 pub fn get_current_task_object_map(
451 &self,
452 ) -> Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>>> {
453 without_interrupts(|| self.current_task.borrow().object_map.clone())
454 }
455
456 #[inline]
459 pub fn get_object(&self, fd: RawFd) -> io::Result<Arc<async_lock::RwLock<Fd>>> {
460 without_interrupts(|| {
461 let current_task = self.current_task.borrow();
462 let object_map = current_task.object_map.read();
463 object_map.get(&fd).cloned().ok_or(Errno::Badf)
464 })
465 }
466
467 #[cfg(feature = "common-os")]
470 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
471 pub fn recreate_objmap(&self) -> io::Result<()> {
472 let mut map = HashMap::<RawFd, Arc<async_lock::RwLock<Fd>>, RandomState>::with_hasher(
473 RandomState::with_seeds(0, 0, 0, 0),
474 );
475
476 without_interrupts(|| {
477 let mut current_task = self.current_task.borrow_mut();
478 let object_map = current_task.object_map.read();
479
480 for i in 0..3 {
482 if let Some(obj) = object_map.get(&i) {
483 map.insert(i, obj.clone());
484 }
485 }
486
487 drop(object_map);
488 current_task.object_map = Arc::new(RwSpinLock::new(map));
489 });
490
491 Ok(())
492 }
493
494 pub fn insert_object(&self, obj: Arc<async_lock::RwLock<Fd>>) -> io::Result<RawFd> {
497 without_interrupts(|| {
498 let current_task = self.current_task.borrow();
499 let mut object_map = current_task.object_map.write();
500
501 let new_fd = || -> io::Result<RawFd> {
502 let mut fd: RawFd = 0;
503 loop {
504 if !object_map.contains_key(&fd) {
505 break Ok(fd);
506 } else if fd == RawFd::MAX {
507 break Err(Errno::Overflow);
508 }
509
510 fd = fd.saturating_add(1);
511 }
512 };
513
514 let fd = new_fd()?;
515 let _ = object_map.insert(fd, obj.clone());
516 Ok(fd)
517 })
518 }
519
520 pub fn dup_object(&self, fd: RawFd) -> io::Result<RawFd> {
523 without_interrupts(|| {
524 let current_task = self.current_task.borrow();
525 let mut object_map = current_task.object_map.write();
526
527 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
528
529 let new_fd = || -> io::Result<RawFd> {
530 let mut fd: RawFd = 0;
531 loop {
532 if !object_map.contains_key(&fd) {
533 break Ok(fd);
534 } else if fd == RawFd::MAX {
535 break Err(Errno::Overflow);
536 }
537
538 fd = fd.saturating_add(1);
539 }
540 };
541
542 let fd = new_fd()?;
543 match object_map.entry(fd) {
544 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
545 hash_map::Entry::Vacant(vacant_entry) => {
546 vacant_entry.insert(obj);
547 Ok(fd)
548 }
549 }
550 })
551 }
552
553 pub fn dup_object2(&self, fd1: RawFd, fd2: RawFd) -> io::Result<RawFd> {
554 without_interrupts(|| {
555 let current_task = self.current_task.borrow();
556 let mut object_map = current_task.object_map.write();
557
558 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
559
560 match object_map.entry(fd2) {
561 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
562 hash_map::Entry::Vacant(vacant_entry) => {
563 vacant_entry.insert(obj);
564 Ok(fd2)
565 }
566 }
567 })
568 }
569
570 pub fn remove_object(&self, fd: RawFd) -> io::Result<Arc<async_lock::RwLock<Fd>>> {
572 without_interrupts(|| {
573 let current_task = self.current_task.borrow();
574 let mut object_map = current_task.object_map.write();
575
576 object_map.remove(&fd).ok_or(Errno::Badf)
577 })
578 }
579
580 #[inline]
581 pub fn get_current_task_prio(&self) -> Priority {
582 without_interrupts(|| self.current_task.borrow().prio)
583 }
584
585 #[allow(dead_code)]
587 #[inline]
588 pub fn get_priority_bitmap(&self) -> &u64 {
589 self.ready_queue.get_priority_bitmap()
590 }
591
592 #[cfg(target_arch = "x86_64")]
593 pub fn set_current_kernel_stack(&self) {
594 let current_task_borrowed = self.current_task.borrow();
595 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
596
597 let rsp = current_task_borrowed.stacks.get_kernel_stack()
598 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
599 - TaskStacks::MARKER_SIZE as u64;
600 tss.privilege_stack_table[0] = rsp.into();
601 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
602 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
603 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
604 - TaskStacks::MARKER_SIZE as u64;
605 tss.interrupt_stack_table[0] = ist_start.into();
606 }
607
608 pub fn set_current_task_priority(&mut self, prio: Priority) {
609 without_interrupts(|| {
610 trace!("Change priority of the current task");
611 self.current_task.borrow_mut().prio = prio;
612 });
613 }
614
615 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
616 trace!("Change priority of task {id} to priority {prio}");
617
618 without_interrupts(|| {
619 let task = get_task_handle(id).ok_or(())?;
620 #[cfg(feature = "smp")]
621 let other_core = task.get_core_id() != self.core_id;
622 #[cfg(not(feature = "smp"))]
623 let other_core = false;
624
625 if other_core {
626 warn!("Have to change the priority on another core");
627 } else if self.current_task.borrow().id == task.get_id() {
628 self.current_task.borrow_mut().prio = prio;
629 } else {
630 self.ready_queue
631 .set_priority(task, prio)
632 .expect("Do not find valid task in ready queue");
633 }
634
635 Ok(())
636 })
637 }
638
639 #[cfg(target_arch = "riscv64")]
640 pub fn set_current_kernel_stack(&self) {
641 let current_task_borrowed = self.current_task.borrow();
642
643 let stack = (current_task_borrowed.stacks.get_kernel_stack()
644 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
645 - TaskStacks::MARKER_SIZE as u64)
646 .as_u64();
647 CoreLocal::get().kernel_stack.set(stack);
648 }
649
650 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
653 pub fn fpu_switch(&mut self) {
654 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
655 debug!(
656 "Switching FPU owner from task {} to {}",
657 self.fpu_owner.borrow().id,
658 self.current_task.borrow().id
659 );
660
661 self.fpu_owner.borrow_mut().last_fpu_state.save();
662 self.current_task.borrow().last_fpu_state.restore();
663 self.fpu_owner = self.current_task.clone();
664 }
665 }
666
667 fn cleanup_tasks(&mut self) {
669 while let Some(finished_task) = self.finished_tasks.pop_front() {
671 debug!("Cleaning up task {}", finished_task.borrow().id);
672 }
673 }
674
675 #[cfg(feature = "smp")]
676 pub fn check_input(&mut self) {
677 let mut input_locked = CoreLocal::get().scheduler_input.lock();
678
679 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
680 let task = self.blocked_tasks.custom_wakeup(task);
681 self.ready_queue.push(task);
682 }
683
684 while let Some(new_task) = input_locked.new_tasks.pop_front() {
685 let task = Rc::new(RefCell::new(Task::from(new_task)));
686 self.ready_queue.push(task.clone());
687 }
688 }
689
690 pub fn run() -> ! {
694 let backoff = Backoff::new();
695
696 loop {
697 let core_scheduler = core_scheduler();
698 interrupts::disable();
699
700 crate::executor::run();
702
703 #[cfg(feature = "smp")]
705 core_scheduler.check_input();
706 core_scheduler.cleanup_tasks();
707
708 if core_scheduler.ready_queue.is_empty() {
709 if backoff.is_completed() {
710 interrupts::enable_and_wait();
711 backoff.reset();
712 } else {
713 interrupts::enable();
714 backoff.snooze();
715 }
716 } else {
717 interrupts::enable();
718 core_scheduler.reschedule();
719 backoff.reset();
720 }
721 }
722 }
723
724 #[inline]
725 #[cfg(target_arch = "aarch64")]
726 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
727 self.current_task.borrow().last_stack_pointer
728 }
729
730 pub fn scheduler(&mut self) -> Option<*mut usize> {
733 crate::executor::run();
735
736 self.cleanup_tasks();
739
740 let (id, last_stack_pointer, prio, status) = {
742 let mut borrowed = self.current_task.borrow_mut();
743 (
744 borrowed.id,
745 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
746 borrowed.prio,
747 borrowed.status,
748 )
749 };
750
751 let mut new_task = None;
752
753 if status == TaskStatus::Running {
754 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
757 new_task = Some(task);
758 }
759 } else {
760 if status == TaskStatus::Finished {
761 self.current_task.borrow_mut().status = TaskStatus::Invalid;
763 self.finished_tasks.push_back(self.current_task.clone());
764 }
765
766 if let Some(task) = self.ready_queue.pop() {
769 debug!("Task is available.");
771 new_task = Some(task);
772 } else if status != TaskStatus::Idle {
773 debug!("Only Idle Task is available.");
775 new_task = Some(self.idle_task.clone());
776 }
777 }
778
779 let task = new_task?;
780 if status == TaskStatus::Running {
784 self.current_task.borrow_mut().status = TaskStatus::Ready;
786 self.ready_queue.push(self.current_task.clone());
787 }
788
789 let (new_id, new_stack_pointer) = {
791 let mut borrowed = task.borrow_mut();
792 if borrowed.status != TaskStatus::Idle {
793 borrowed.status = TaskStatus::Running;
795 }
796
797 (borrowed.id, borrowed.last_stack_pointer)
798 };
799
800 if id == new_id {
801 return None;
802 }
803
804 debug!(
806 "Switching task from {} to {} (stack {:#X} => {:p})",
807 id,
808 new_id,
809 unsafe { *last_stack_pointer },
810 new_stack_pointer
811 );
812 #[cfg(not(target_arch = "riscv64"))]
813 {
814 self.current_task = task;
815 }
816
817 #[cfg(not(target_arch = "riscv64"))]
819 return Some(last_stack_pointer);
820
821 #[cfg(target_arch = "riscv64")]
822 {
823 if sstatus::read().fs() == sstatus::FS::Dirty {
824 self.current_task.borrow_mut().last_fpu_state.save();
825 }
826 task.borrow().last_fpu_state.restore();
827 self.current_task = task;
828 unsafe {
829 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
830 }
831 None
832 }
833 }
834}
835
836fn get_tid() -> TaskId {
837 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
838 let guard = TASKS.lock();
839
840 loop {
841 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
842 if !guard.contains_key(&id) {
843 return id;
844 }
845 }
846}
847
848#[inline]
849pub(crate) fn abort() -> ! {
850 core_scheduler().exit(-1)
851}
852
853pub(crate) fn add_current_core() {
855 let core_id = core_id();
857 let tid = get_tid();
858 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
859
860 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
862 TASKS.lock().insert(
863 tid,
864 TaskHandle::new(
865 tid,
866 IDLE_PRIO,
867 #[cfg(feature = "smp")]
868 core_id,
869 ),
870 );
871 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
873 let boxed_scheduler = Box::new(PerCoreScheduler {
874 #[cfg(feature = "smp")]
875 core_id,
876 current_task: idle_task.clone(),
877 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
878 fpu_owner: idle_task.clone(),
879 idle_task,
880 ready_queue: PriorityTaskQueue::new(),
881 finished_tasks: VecDeque::new(),
882 blocked_tasks: BlockedTaskQueue::new(),
883 timers: TimerList::new(),
884 });
885
886 let scheduler = Box::into_raw(boxed_scheduler);
887 set_core_scheduler(scheduler);
888 #[cfg(feature = "smp")]
889 {
890 SCHEDULER_INPUTS.lock().insert(
891 core_id.try_into().unwrap(),
892 &CoreLocal::get().scheduler_input,
893 );
894 #[cfg(target_arch = "x86_64")]
895 CORE_HLT_STATE
896 .lock()
897 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
898 }
899}
900
901#[inline]
902#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
903pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
904 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
905}
906
907#[inline]
908#[cfg(feature = "smp")]
909fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
910 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
911}
912
913pub unsafe fn spawn(
914 func: unsafe extern "C" fn(usize),
915 arg: usize,
916 prio: Priority,
917 stack_size: usize,
918 selector: isize,
919) -> TaskId {
920 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
921
922 let core_id = if selector < 0 {
923 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
925 } else {
926 selector as u32
927 };
928
929 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
930}
931
932#[allow(clippy::result_unit_err)]
933pub fn join(id: TaskId) -> Result<(), ()> {
934 let core_scheduler = core_scheduler();
935
936 debug!(
937 "Task {} is waiting for task {}",
938 core_scheduler.get_current_task_id(),
939 id
940 );
941
942 loop {
943 let mut waiting_tasks_guard = WAITING_TASKS.lock();
944
945 let Some(queue) = waiting_tasks_guard.get_mut(&id) else {
946 return Ok(());
947 };
948
949 queue.push_back(core_scheduler.get_current_task_handle());
950 core_scheduler.block_current_task(None);
951
952 drop(waiting_tasks_guard);
954 core_scheduler.reschedule();
955 }
956}
957
958pub fn shutdown(arg: i32) -> ! {
959 crate::syscalls::shutdown(arg)
960}
961
962fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
963 TASKS.lock().get(&id).copied()
964}
965
966#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
967pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
968
969#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
970pub(crate) fn get_root_page_table() -> usize {
971 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
972 current_task_borrowed.root_page_table
973}