1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::ptr;
11#[cfg(all(target_arch = "x86_64", feature = "smp"))]
12use core::sync::atomic::AtomicBool;
13use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
14
15use ahash::RandomState;
16use crossbeam_utils::Backoff;
17use hashbrown::{HashMap, hash_map};
18use hermit_sync::*;
19#[cfg(target_arch = "riscv64")]
20use riscv::register::sstatus;
21
22use crate::arch::core_local::*;
23#[cfg(target_arch = "riscv64")]
24use crate::arch::switch::switch_to_task;
25#[cfg(target_arch = "x86_64")]
26use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
27use crate::arch::{get_processor_count, interrupts};
28use crate::errno::Errno;
29use crate::fd::{ObjectInterface, RawFd};
30use crate::kernel::scheduler::TaskStacks;
31use crate::scheduler::task::*;
32use crate::{arch, io};
33
34pub mod task;
35
36static NO_TASKS: AtomicU32 = AtomicU32::new(0);
37#[cfg(feature = "smp")]
39static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
40 SpinMutex::new(Vec::new());
41#[cfg(all(target_arch = "x86_64", feature = "smp"))]
42static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
43static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
45 InterruptTicketMutex::new(BTreeMap::new());
46static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
48 InterruptTicketMutex::new(BTreeMap::new());
49
50pub type CoreId = u32;
52
53#[cfg(feature = "smp")]
54pub(crate) struct SchedulerInput {
55 new_tasks: VecDeque<NewTask>,
57 wakeup_tasks: VecDeque<TaskHandle>,
59}
60
61#[cfg(feature = "smp")]
62impl SchedulerInput {
63 pub fn new() -> Self {
64 Self {
65 new_tasks: VecDeque::new(),
66 wakeup_tasks: VecDeque::new(),
67 }
68 }
69}
70
71#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
72#[cfg_attr(
73 not(any(target_arch = "x86_64", target_arch = "aarch64")),
74 repr(align(64))
75)]
76pub(crate) struct PerCoreScheduler {
77 #[cfg(feature = "smp")]
79 core_id: CoreId,
80 current_task: Rc<RefCell<Task>>,
82 idle_task: Rc<RefCell<Task>>,
84 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
86 fpu_owner: Rc<RefCell<Task>>,
87 ready_queue: PriorityTaskQueue,
89 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
91 blocked_tasks: BlockedTaskQueue,
93}
94
95pub(crate) trait PerCoreSchedulerExt {
96 fn reschedule(self);
99
100 #[cfg(feature = "net")]
101 fn add_network_timer(self, wakeup_time: Option<u64>);
102
103 fn exit(self, exit_code: i32) -> !;
105}
106
107impl PerCoreSchedulerExt for &mut PerCoreScheduler {
108 #[cfg(target_arch = "x86_64")]
109 fn reschedule(self) {
110 without_interrupts(|| {
111 let Some(last_stack_pointer) = self.scheduler() else {
112 return;
113 };
114
115 let (new_stack_pointer, is_idle) = {
116 let borrowed = self.current_task.borrow();
117 (
118 borrowed.last_stack_pointer,
119 borrowed.status == TaskStatus::Idle,
120 )
121 };
122
123 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
124 unsafe {
125 switch_to_fpu_owner(last_stack_pointer, new_stack_pointer.as_u64() as usize);
126 }
127 } else {
128 unsafe {
129 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
130 }
131 }
132 });
133 }
134
135 #[cfg(target_arch = "aarch64")]
137 fn reschedule(self) {
138 use aarch64_cpu::asm::barrier::{NSH, SY, dsb, isb};
139 use arm_gic::IntId;
140 use arm_gic::gicv3::{GicV3, SgiTarget, SgiTargetGroup};
141
142 use crate::interrupts::SGI_RESCHED;
143
144 dsb(NSH);
145 isb(SY);
146
147 let reschedid = IntId::sgi(SGI_RESCHED.into());
148 #[cfg(feature = "smp")]
149 let core_id = self.core_id;
150 #[cfg(not(feature = "smp"))]
151 let core_id = 0;
152
153 GicV3::send_sgi(
154 reschedid,
155 SgiTarget::List {
156 affinity3: 0,
157 affinity2: 0,
158 affinity1: 0,
159 target_list: 1 << core_id,
160 },
161 SgiTargetGroup::CurrentGroup1,
162 );
163
164 interrupts::enable();
165 }
166
167 #[cfg(target_arch = "riscv64")]
168 fn reschedule(self) {
169 without_interrupts(|| self.scheduler());
170 }
171
172 #[cfg(feature = "net")]
173 fn add_network_timer(self, wakeup_time: Option<u64>) {
174 without_interrupts(|| {
175 self.blocked_tasks.add_network_timer(wakeup_time);
176 });
177 }
178
179 fn exit(self, exit_code: i32) -> ! {
180 without_interrupts(|| {
181 let mut current_task_borrowed = self.current_task.borrow_mut();
183 assert_ne!(
184 current_task_borrowed.status,
185 TaskStatus::Idle,
186 "Trying to terminate the idle task"
187 );
188
189 debug!(
191 "Finishing task {} with exit code {}",
192 current_task_borrowed.id, exit_code
193 );
194 current_task_borrowed.status = TaskStatus::Finished;
195 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
196
197 let current_id = current_task_borrowed.id;
198 drop(current_task_borrowed);
199
200 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
202 while let Some(task) = queue.pop_front() {
203 self.custom_wakeup(task);
204 }
205 }
206 });
207
208 self.reschedule();
209 unreachable!()
210 }
211}
212
213struct NewTask {
214 tid: TaskId,
215 func: unsafe extern "C" fn(usize),
216 arg: usize,
217 prio: Priority,
218 core_id: CoreId,
219 stacks: TaskStacks,
220 object_map:
221 Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>>>,
222}
223
224impl From<NewTask> for Task {
225 fn from(value: NewTask) -> Self {
226 let NewTask {
227 tid,
228 func,
229 arg,
230 prio,
231 core_id,
232 stacks,
233 object_map,
234 } = value;
235 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
236 task.create_stack_frame(func, arg);
237 task
238 }
239}
240
241impl PerCoreScheduler {
242 pub unsafe fn spawn(
244 func: unsafe extern "C" fn(usize),
245 arg: usize,
246 prio: Priority,
247 core_id: CoreId,
248 stack_size: usize,
249 ) -> TaskId {
250 let tid = get_tid();
252 let stacks = TaskStacks::new(stack_size);
253 let new_task = NewTask {
254 tid,
255 func,
256 arg,
257 prio,
258 core_id,
259 stacks,
260 object_map: core_scheduler().get_current_task_object_map(),
261 };
262
263 let wakeup = {
265 #[cfg(feature = "smp")]
266 let mut input_locked = get_scheduler_input(core_id).lock();
267 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
268 TASKS.lock().insert(
269 tid,
270 TaskHandle::new(
271 tid,
272 prio,
273 #[cfg(feature = "smp")]
274 core_id,
275 ),
276 );
277 NO_TASKS.fetch_add(1, Ordering::SeqCst);
278
279 #[cfg(feature = "smp")]
280 if core_id == core_scheduler().core_id {
281 let task = Rc::new(RefCell::new(Task::from(new_task)));
282 core_scheduler().ready_queue.push(task);
283 false
284 } else {
285 input_locked.new_tasks.push_back(new_task);
286 true
287 }
288 #[cfg(not(feature = "smp"))]
289 if core_id == 0 {
290 let task = Rc::new(RefCell::new(Task::from(new_task)));
291 core_scheduler().ready_queue.push(task);
292 false
293 } else {
294 panic!("Invalid core_id {}!", core_id)
295 }
296 };
297
298 debug!("Creating task {tid} with priority {prio} on core {core_id}");
299
300 if wakeup {
301 arch::wakeup_core(core_id);
302 }
303
304 tid
305 }
306
307 #[cfg(feature = "newlib")]
308 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
309 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
310
311 let core_id: CoreId = {
313 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
315
316 if id == arch::get_processor_count() {
318 NEXT_CORE_ID.store(0, Ordering::SeqCst);
319 0
320 } else {
321 id
322 }
323 };
324
325 let current_task_borrowed = self.current_task.borrow();
327
328 let tid = get_tid();
330 let clone_task = NewTask {
331 tid,
332 func,
333 arg,
334 prio: current_task_borrowed.prio,
335 core_id,
336 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
337 object_map: current_task_borrowed.object_map.clone(),
338 };
339
340 let wakeup = {
342 #[cfg(feature = "smp")]
343 let mut input_locked = get_scheduler_input(core_id).lock();
344 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
345 TASKS.lock().insert(
346 tid,
347 TaskHandle::new(
348 tid,
349 current_task_borrowed.prio,
350 #[cfg(feature = "smp")]
351 core_id,
352 ),
353 );
354 NO_TASKS.fetch_add(1, Ordering::SeqCst);
355 #[cfg(feature = "smp")]
356 if core_id == core_scheduler().core_id {
357 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
358 core_scheduler().ready_queue.push(clone_task);
359 false
360 } else {
361 input_locked.new_tasks.push_back(clone_task);
362 true
363 }
364 #[cfg(not(feature = "smp"))]
365 if core_id == 0 {
366 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
367 core_scheduler().ready_queue.push(clone_task);
368 false
369 } else {
370 panic!("Invalid core_id {}!", core_id);
371 }
372 };
373
374 if wakeup {
376 arch::wakeup_core(core_id);
377 }
378
379 tid
380 }
381
382 #[cfg(feature = "newlib")]
383 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
384 without_interrupts(|| self.clone_impl(func, arg))
385 }
386
387 #[inline]
389 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
390 pub fn is_scheduling(&self) -> bool {
391 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
392 }
393
394 #[inline]
395 pub fn handle_waiting_tasks(&mut self) {
396 without_interrupts(|| {
397 crate::executor::run();
398 self.blocked_tasks
399 .handle_waiting_tasks(&mut self.ready_queue);
400 });
401 }
402
403 #[cfg(not(feature = "smp"))]
404 pub fn custom_wakeup(&mut self, task: TaskHandle) {
405 without_interrupts(|| {
406 let task = self.blocked_tasks.custom_wakeup(task);
407 self.ready_queue.push(task);
408 });
409 }
410
411 #[cfg(feature = "smp")]
412 pub fn custom_wakeup(&mut self, task: TaskHandle) {
413 if task.get_core_id() == self.core_id {
414 without_interrupts(|| {
415 let task = self.blocked_tasks.custom_wakeup(task);
416 self.ready_queue.push(task);
417 });
418 } else {
419 get_scheduler_input(task.get_core_id())
420 .lock()
421 .wakeup_tasks
422 .push_back(task);
423 arch::wakeup_core(task.get_core_id());
425 }
426 }
427
428 #[inline]
429 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
430 without_interrupts(|| {
431 self.blocked_tasks
432 .add(self.current_task.clone(), wakeup_time);
433 });
434 }
435
436 #[inline]
437 pub fn get_current_task_handle(&self) -> TaskHandle {
438 without_interrupts(|| {
439 let current_task_borrowed = self.current_task.borrow();
440
441 TaskHandle::new(
442 current_task_borrowed.id,
443 current_task_borrowed.prio,
444 #[cfg(feature = "smp")]
445 current_task_borrowed.core_id,
446 )
447 })
448 }
449
450 #[inline]
451 pub fn get_current_task_id(&self) -> TaskId {
452 without_interrupts(|| self.current_task.borrow().id)
453 }
454
455 #[inline]
456 pub fn get_current_task_object_map(
457 &self,
458 ) -> Arc<RwSpinLock<HashMap<RawFd, Arc<async_lock::RwLock<dyn ObjectInterface>>, RandomState>>>
459 {
460 without_interrupts(|| self.current_task.borrow().object_map.clone())
461 }
462
463 #[inline]
466 pub fn get_object(
467 &self,
468 fd: RawFd,
469 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
470 without_interrupts(|| {
471 let current_task = self.current_task.borrow();
472 let object_map = current_task.object_map.read();
473 object_map.get(&fd).cloned().ok_or(Errno::Badf)
474 })
475 }
476
477 #[cfg(feature = "common-os")]
480 #[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
481 pub fn recreate_objmap(&self) -> io::Result<()> {
482 let mut map = HashMap::<
483 RawFd,
484 Arc<async_lock::RwLock<dyn ObjectInterface>>,
485 RandomState,
486 >::with_hasher(RandomState::with_seeds(0, 0, 0, 0));
487
488 without_interrupts(|| {
489 let mut current_task = self.current_task.borrow_mut();
490 let object_map = current_task.object_map.read();
491
492 for i in 0..3 {
494 if let Some(obj) = object_map.get(&i) {
495 map.insert(i, obj.clone());
496 }
497 }
498
499 drop(object_map);
500 current_task.object_map = Arc::new(RwSpinLock::new(map));
501 });
502
503 Ok(())
504 }
505
506 pub fn insert_object(
509 &self,
510 obj: Arc<async_lock::RwLock<dyn ObjectInterface>>,
511 ) -> io::Result<RawFd> {
512 without_interrupts(|| {
513 let current_task = self.current_task.borrow();
514 let mut object_map = current_task.object_map.write();
515
516 let new_fd = || -> io::Result<RawFd> {
517 let mut fd: RawFd = 0;
518 loop {
519 if !object_map.contains_key(&fd) {
520 break Ok(fd);
521 } else if fd == RawFd::MAX {
522 break Err(Errno::Overflow);
523 }
524
525 fd = fd.saturating_add(1);
526 }
527 };
528
529 let fd = new_fd()?;
530 let _ = object_map.insert(fd, obj.clone());
531 Ok(fd)
532 })
533 }
534
535 pub fn dup_object(&self, fd: RawFd) -> io::Result<RawFd> {
538 without_interrupts(|| {
539 let current_task = self.current_task.borrow();
540 let mut object_map = current_task.object_map.write();
541
542 let obj = (*(object_map.get(&fd).ok_or(Errno::Inval)?)).clone();
543
544 let new_fd = || -> io::Result<RawFd> {
545 let mut fd: RawFd = 0;
546 loop {
547 if !object_map.contains_key(&fd) {
548 break Ok(fd);
549 } else if fd == RawFd::MAX {
550 break Err(Errno::Overflow);
551 }
552
553 fd = fd.saturating_add(1);
554 }
555 };
556
557 let fd = new_fd()?;
558 match object_map.entry(fd) {
559 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
560 hash_map::Entry::Vacant(vacant_entry) => {
561 vacant_entry.insert(obj);
562 Ok(fd)
563 }
564 }
565 })
566 }
567
568 pub fn dup_object2(&self, fd1: RawFd, fd2: RawFd) -> io::Result<RawFd> {
569 without_interrupts(|| {
570 let current_task = self.current_task.borrow();
571 let mut object_map = current_task.object_map.write();
572
573 let obj = object_map.get(&fd1).cloned().ok_or(Errno::Badf)?;
574
575 match object_map.entry(fd2) {
576 hash_map::Entry::Occupied(_occupied_entry) => Err(Errno::Mfile),
577 hash_map::Entry::Vacant(vacant_entry) => {
578 vacant_entry.insert(obj);
579 Ok(fd2)
580 }
581 }
582 })
583 }
584
585 pub fn remove_object(
587 &self,
588 fd: RawFd,
589 ) -> io::Result<Arc<async_lock::RwLock<dyn ObjectInterface>>> {
590 without_interrupts(|| {
591 let current_task = self.current_task.borrow();
592 let mut object_map = current_task.object_map.write();
593
594 object_map.remove(&fd).ok_or(Errno::Badf)
595 })
596 }
597
598 #[inline]
599 pub fn get_current_task_prio(&self) -> Priority {
600 without_interrupts(|| self.current_task.borrow().prio)
601 }
602
603 #[allow(dead_code)]
605 #[inline]
606 pub fn get_priority_bitmap(&self) -> &u64 {
607 self.ready_queue.get_priority_bitmap()
608 }
609
610 #[cfg(target_arch = "x86_64")]
611 pub fn set_current_kernel_stack(&self) {
612 let current_task_borrowed = self.current_task.borrow();
613 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
614
615 let rsp = current_task_borrowed.stacks.get_kernel_stack()
616 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
617 - TaskStacks::MARKER_SIZE as u64;
618 tss.privilege_stack_table[0] = rsp.into();
619 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
620 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
621 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
622 - TaskStacks::MARKER_SIZE as u64;
623 tss.interrupt_stack_table[0] = ist_start.into();
624 }
625
626 pub fn set_current_task_priority(&mut self, prio: Priority) {
627 without_interrupts(|| {
628 trace!("Change priority of the current task");
629 self.current_task.borrow_mut().prio = prio;
630 });
631 }
632
633 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
634 trace!("Change priority of task {id} to priority {prio}");
635
636 without_interrupts(|| {
637 let task = get_task_handle(id).ok_or(())?;
638 #[cfg(feature = "smp")]
639 let other_core = task.get_core_id() != self.core_id;
640 #[cfg(not(feature = "smp"))]
641 let other_core = false;
642
643 if other_core {
644 warn!("Have to change the priority on another core");
645 } else if self.current_task.borrow().id == task.get_id() {
646 self.current_task.borrow_mut().prio = prio;
647 } else {
648 self.ready_queue
649 .set_priority(task, prio)
650 .expect("Do not find valid task in ready queue");
651 }
652
653 Ok(())
654 })
655 }
656
657 #[cfg(target_arch = "riscv64")]
658 pub fn set_current_kernel_stack(&self) {
659 let current_task_borrowed = self.current_task.borrow();
660
661 let stack = (current_task_borrowed.stacks.get_kernel_stack()
662 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
663 - TaskStacks::MARKER_SIZE as u64)
664 .as_u64();
665 CoreLocal::get().kernel_stack.set(stack);
666 }
667
668 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
671 pub fn fpu_switch(&mut self) {
672 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
673 debug!(
674 "Switching FPU owner from task {} to {}",
675 self.fpu_owner.borrow().id,
676 self.current_task.borrow().id
677 );
678
679 self.fpu_owner.borrow_mut().last_fpu_state.save();
680 self.current_task.borrow().last_fpu_state.restore();
681 self.fpu_owner = self.current_task.clone();
682 }
683 }
684
685 fn cleanup_tasks(&mut self) {
687 while let Some(finished_task) = self.finished_tasks.pop_front() {
689 debug!("Cleaning up task {}", finished_task.borrow().id);
690 }
691 }
692
693 #[cfg(feature = "smp")]
694 pub fn check_input(&mut self) {
695 let mut input_locked = CoreLocal::get().scheduler_input.lock();
696
697 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
698 let task = self.blocked_tasks.custom_wakeup(task);
699 self.ready_queue.push(task);
700 }
701
702 while let Some(new_task) = input_locked.new_tasks.pop_front() {
703 let task = Rc::new(RefCell::new(Task::from(new_task)));
704 self.ready_queue.push(task.clone());
705 }
706 }
707
708 pub fn run() -> ! {
712 let backoff = Backoff::new();
713
714 loop {
715 let core_scheduler = core_scheduler();
716 interrupts::disable();
717
718 crate::executor::run();
720
721 #[cfg(feature = "smp")]
723 core_scheduler.check_input();
724 core_scheduler.cleanup_tasks();
725
726 if core_scheduler.ready_queue.is_empty() {
727 if backoff.is_completed() {
728 interrupts::enable_and_wait();
729 backoff.reset();
730 } else {
731 interrupts::enable();
732 backoff.snooze();
733 }
734 } else {
735 interrupts::enable();
736 core_scheduler.reschedule();
737 backoff.reset();
738 }
739 }
740 }
741
742 #[inline]
743 #[cfg(target_arch = "aarch64")]
744 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
745 self.current_task.borrow().last_stack_pointer
746 }
747
748 pub fn scheduler(&mut self) -> Option<*mut usize> {
751 crate::executor::run();
753
754 self.cleanup_tasks();
757
758 let (id, last_stack_pointer, prio, status) = {
760 let mut borrowed = self.current_task.borrow_mut();
761 (
762 borrowed.id,
763 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
764 borrowed.prio,
765 borrowed.status,
766 )
767 };
768
769 let mut new_task = None;
770
771 if status == TaskStatus::Running {
772 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
775 new_task = Some(task);
776 }
777 } else {
778 if status == TaskStatus::Finished {
779 self.current_task.borrow_mut().status = TaskStatus::Invalid;
781 self.finished_tasks.push_back(self.current_task.clone());
782 }
783
784 if let Some(task) = self.ready_queue.pop() {
787 debug!("Task is available.");
789 new_task = Some(task);
790 } else if status != TaskStatus::Idle {
791 debug!("Only Idle Task is available.");
793 new_task = Some(self.idle_task.clone());
794 }
795 }
796
797 let task = new_task?;
798 if status == TaskStatus::Running {
802 self.current_task.borrow_mut().status = TaskStatus::Ready;
804 self.ready_queue.push(self.current_task.clone());
805 }
806
807 let (new_id, new_stack_pointer) = {
809 let mut borrowed = task.borrow_mut();
810 if borrowed.status != TaskStatus::Idle {
811 borrowed.status = TaskStatus::Running;
813 }
814
815 (borrowed.id, borrowed.last_stack_pointer)
816 };
817
818 if id == new_id {
819 return None;
820 }
821
822 debug!(
824 "Switching task from {} to {} (stack {:#X} => {:p})",
825 id,
826 new_id,
827 unsafe { *last_stack_pointer },
828 new_stack_pointer
829 );
830 #[cfg(not(target_arch = "riscv64"))]
831 {
832 self.current_task = task;
833 }
834
835 #[cfg(not(target_arch = "riscv64"))]
837 return Some(last_stack_pointer);
838
839 #[cfg(target_arch = "riscv64")]
840 {
841 if sstatus::read().fs() == sstatus::FS::Dirty {
842 self.current_task.borrow_mut().last_fpu_state.save();
843 }
844 task.borrow().last_fpu_state.restore();
845 self.current_task = task;
846 unsafe {
847 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
848 }
849 None
850 }
851 }
852}
853
854fn get_tid() -> TaskId {
855 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
856 let guard = TASKS.lock();
857
858 loop {
859 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
860 if !guard.contains_key(&id) {
861 return id;
862 }
863 }
864}
865
866#[inline]
867pub(crate) fn abort() -> ! {
868 core_scheduler().exit(-1)
869}
870
871pub(crate) fn add_current_core() {
873 let core_id = core_id();
875 let tid = get_tid();
876 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
877
878 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
880 TASKS.lock().insert(
881 tid,
882 TaskHandle::new(
883 tid,
884 IDLE_PRIO,
885 #[cfg(feature = "smp")]
886 core_id,
887 ),
888 );
889 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
891 let boxed_scheduler = Box::new(PerCoreScheduler {
892 #[cfg(feature = "smp")]
893 core_id,
894 current_task: idle_task.clone(),
895 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
896 fpu_owner: idle_task.clone(),
897 idle_task,
898 ready_queue: PriorityTaskQueue::new(),
899 finished_tasks: VecDeque::new(),
900 blocked_tasks: BlockedTaskQueue::new(),
901 });
902
903 let scheduler = Box::into_raw(boxed_scheduler);
904 set_core_scheduler(scheduler);
905 #[cfg(feature = "smp")]
906 {
907 SCHEDULER_INPUTS.lock().insert(
908 core_id.try_into().unwrap(),
909 &CoreLocal::get().scheduler_input,
910 );
911 #[cfg(target_arch = "x86_64")]
912 CORE_HLT_STATE
913 .lock()
914 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
915 }
916}
917
918#[inline]
919#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
920pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
921 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
922}
923
924#[inline]
925#[cfg(feature = "smp")]
926fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
927 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
928}
929
930pub unsafe fn spawn(
931 func: unsafe extern "C" fn(usize),
932 arg: usize,
933 prio: Priority,
934 stack_size: usize,
935 selector: isize,
936) -> TaskId {
937 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
938
939 let core_id = if selector < 0 {
940 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
942 } else {
943 selector as u32
944 };
945
946 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
947}
948
949#[allow(clippy::result_unit_err)]
950pub fn join(id: TaskId) -> Result<(), ()> {
951 let core_scheduler = core_scheduler();
952
953 debug!(
954 "Task {} is waiting for task {}",
955 core_scheduler.get_current_task_id(),
956 id
957 );
958
959 loop {
960 let mut waiting_tasks_guard = WAITING_TASKS.lock();
961
962 let Some(queue) = waiting_tasks_guard.get_mut(&id) else {
963 return Ok(());
964 };
965
966 queue.push_back(core_scheduler.get_current_task_handle());
967 core_scheduler.block_current_task(None);
968
969 drop(waiting_tasks_guard);
971 core_scheduler.reschedule();
972 }
973}
974
975pub fn shutdown(arg: i32) -> ! {
976 crate::syscalls::shutdown(arg)
977}
978
979fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
980 TASKS.lock().get(&id).copied()
981}
982
983#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
984pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
985
986#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
987pub(crate) fn get_root_page_table() -> usize {
988 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
989 current_task_borrowed.root_page_table
990}