1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::future::{self, Future};
11use core::ptr;
12#[cfg(all(target_arch = "x86_64", feature = "smp"))]
13use core::sync::atomic::AtomicBool;
14use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
15use core::task::Poll::Ready;
16use core::task::ready;
17
18use ahash::RandomState;
19use crossbeam_utils::Backoff;
20use hashbrown::HashMap;
21use hermit_sync::*;
22#[cfg(target_arch = "riscv64")]
23use riscv::register::sstatus;
24
25use crate::arch::core_local::*;
26#[cfg(target_arch = "riscv64")]
27use crate::arch::switch::switch_to_task;
28#[cfg(target_arch = "x86_64")]
29use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
30use crate::arch::{get_processor_count, interrupts};
31use crate::fd::{FileDescriptor, ObjectInterface};
32use crate::kernel::scheduler::TaskStacks;
33use crate::scheduler::task::*;
34use crate::{arch, io};
35
36pub mod task;
37
38static NO_TASKS: AtomicU32 = AtomicU32::new(0);
39#[cfg(feature = "smp")]
41static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
42 SpinMutex::new(Vec::new());
43#[cfg(all(target_arch = "x86_64", feature = "smp"))]
44static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
45static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
47 InterruptTicketMutex::new(BTreeMap::new());
48static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
50 InterruptTicketMutex::new(BTreeMap::new());
51
52pub type CoreId = u32;
54
55#[cfg(feature = "smp")]
56pub(crate) struct SchedulerInput {
57 new_tasks: VecDeque<NewTask>,
59 wakeup_tasks: VecDeque<TaskHandle>,
61}
62
63#[cfg(feature = "smp")]
64impl SchedulerInput {
65 pub fn new() -> Self {
66 Self {
67 new_tasks: VecDeque::new(),
68 wakeup_tasks: VecDeque::new(),
69 }
70 }
71}
72
73#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
74#[cfg_attr(
75 not(any(target_arch = "x86_64", target_arch = "aarch64")),
76 repr(align(64))
77)]
78pub(crate) struct PerCoreScheduler {
79 #[cfg(feature = "smp")]
81 core_id: CoreId,
82 current_task: Rc<RefCell<Task>>,
84 idle_task: Rc<RefCell<Task>>,
86 #[cfg(target_arch = "x86_64")]
88 fpu_owner: Rc<RefCell<Task>>,
89 ready_queue: PriorityTaskQueue,
91 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
93 blocked_tasks: BlockedTaskQueue,
95}
96
97pub(crate) trait PerCoreSchedulerExt {
98 fn reschedule(self);
101
102 #[cfg(any(feature = "tcp", feature = "udp"))]
103 fn add_network_timer(self, wakeup_time: Option<u64>);
104
105 fn exit(self, exit_code: i32) -> !;
107}
108
109impl PerCoreSchedulerExt for &mut PerCoreScheduler {
110 #[cfg(target_arch = "x86_64")]
111 fn reschedule(self) {
112 without_interrupts(|| {
113 if let Some(last_stack_pointer) = self.scheduler() {
114 let (new_stack_pointer, is_idle) = {
115 let borrowed = self.current_task.borrow();
116 (
117 borrowed.last_stack_pointer,
118 borrowed.status == TaskStatus::Idle,
119 )
120 };
121
122 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
123 unsafe {
124 switch_to_fpu_owner(
125 last_stack_pointer,
126 new_stack_pointer.as_u64() as usize,
127 );
128 }
129 } else {
130 unsafe {
131 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
132 }
133 }
134 }
135 });
136 }
137
138 #[cfg(target_arch = "aarch64")]
140 fn reschedule(self) {
141 use core::arch::asm;
142
143 use arm_gic::IntId;
144 use arm_gic::gicv3::{GicV3, SgiTarget};
145
146 use crate::interrupts::SGI_RESCHED;
147
148 unsafe {
149 asm!("dsb nsh", "isb", options(nostack, nomem, preserves_flags));
150 }
151
152 let reschedid = IntId::sgi(SGI_RESCHED.into());
153 #[cfg(feature = "smp")]
154 let core_id = self.core_id;
155 #[cfg(not(feature = "smp"))]
156 let core_id = 0;
157
158 GicV3::send_sgi(
159 reschedid,
160 SgiTarget::List {
161 affinity3: 0,
162 affinity2: 0,
163 affinity1: 0,
164 target_list: 1 << core_id,
165 },
166 );
167
168 interrupts::enable();
169 }
170
171 #[cfg(target_arch = "riscv64")]
172 fn reschedule(self) {
173 without_interrupts(|| self.scheduler());
174 }
175
176 #[cfg(any(feature = "tcp", feature = "udp"))]
177 fn add_network_timer(self, wakeup_time: Option<u64>) {
178 without_interrupts(|| {
179 self.blocked_tasks.add_network_timer(wakeup_time);
180 });
181 }
182
183 fn exit(self, exit_code: i32) -> ! {
184 without_interrupts(|| {
185 let mut current_task_borrowed = self.current_task.borrow_mut();
187 assert_ne!(
188 current_task_borrowed.status,
189 TaskStatus::Idle,
190 "Trying to terminate the idle task"
191 );
192
193 debug!(
195 "Finishing task {} with exit code {}",
196 current_task_borrowed.id, exit_code
197 );
198 current_task_borrowed.status = TaskStatus::Finished;
199 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
200
201 let current_id = current_task_borrowed.id;
202 drop(current_task_borrowed);
203
204 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
206 while let Some(task) = queue.pop_front() {
207 self.custom_wakeup(task);
208 }
209 }
210 });
211
212 self.reschedule();
213 unreachable!()
214 }
215}
216
217struct NewTask {
218 tid: TaskId,
219 func: unsafe extern "C" fn(usize),
220 arg: usize,
221 prio: Priority,
222 core_id: CoreId,
223 stacks: TaskStacks,
224 object_map:
225 Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
226}
227
228impl From<NewTask> for Task {
229 fn from(value: NewTask) -> Self {
230 let NewTask {
231 tid,
232 func,
233 arg,
234 prio,
235 core_id,
236 stacks,
237 object_map,
238 } = value;
239 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
240 task.create_stack_frame(func, arg);
241 task
242 }
243}
244
245impl PerCoreScheduler {
246 pub unsafe fn spawn(
248 func: unsafe extern "C" fn(usize),
249 arg: usize,
250 prio: Priority,
251 core_id: CoreId,
252 stack_size: usize,
253 ) -> TaskId {
254 let tid = get_tid();
256 let stacks = TaskStacks::new(stack_size);
257 let new_task = NewTask {
258 tid,
259 func,
260 arg,
261 prio,
262 core_id,
263 stacks,
264 object_map: core_scheduler().get_current_task_object_map(),
265 };
266
267 let wakeup = {
269 #[cfg(feature = "smp")]
270 let mut input_locked = get_scheduler_input(core_id).lock();
271 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
272 TASKS.lock().insert(
273 tid,
274 TaskHandle::new(
275 tid,
276 prio,
277 #[cfg(feature = "smp")]
278 core_id,
279 ),
280 );
281 NO_TASKS.fetch_add(1, Ordering::SeqCst);
282
283 #[cfg(feature = "smp")]
284 if core_id == core_scheduler().core_id {
285 let task = Rc::new(RefCell::new(Task::from(new_task)));
286 core_scheduler().ready_queue.push(task);
287 false
288 } else {
289 input_locked.new_tasks.push_back(new_task);
290 true
291 }
292 #[cfg(not(feature = "smp"))]
293 if core_id == 0 {
294 let task = Rc::new(RefCell::new(Task::from(new_task)));
295 core_scheduler().ready_queue.push(task);
296 false
297 } else {
298 panic!("Invalid core_id {}!", core_id)
299 }
300 };
301
302 debug!("Creating task {tid} with priority {prio} on core {core_id}");
303
304 if wakeup {
305 arch::wakeup_core(core_id);
306 }
307
308 tid
309 }
310
311 #[cfg(feature = "newlib")]
312 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
313 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
314
315 let core_id: CoreId = {
317 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
319
320 if id == arch::get_processor_count() {
322 NEXT_CORE_ID.store(0, Ordering::SeqCst);
323 0
324 } else {
325 id
326 }
327 };
328
329 let current_task_borrowed = self.current_task.borrow();
331
332 let tid = get_tid();
334 let clone_task = NewTask {
335 tid,
336 func,
337 arg,
338 prio: current_task_borrowed.prio,
339 core_id,
340 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
341 object_map: current_task_borrowed.object_map.clone(),
342 };
343
344 let wakeup = {
346 #[cfg(feature = "smp")]
347 let mut input_locked = get_scheduler_input(core_id).lock();
348 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
349 TASKS.lock().insert(
350 tid,
351 TaskHandle::new(
352 tid,
353 current_task_borrowed.prio,
354 #[cfg(feature = "smp")]
355 core_id,
356 ),
357 );
358 NO_TASKS.fetch_add(1, Ordering::SeqCst);
359 #[cfg(feature = "smp")]
360 if core_id == core_scheduler().core_id {
361 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
362 core_scheduler().ready_queue.push(clone_task);
363 false
364 } else {
365 input_locked.new_tasks.push_back(clone_task);
366 true
367 }
368 #[cfg(not(feature = "smp"))]
369 if core_id == 0 {
370 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
371 core_scheduler().ready_queue.push(clone_task);
372 false
373 } else {
374 panic!("Invalid core_id {}!", core_id);
375 }
376 };
377
378 if wakeup {
380 arch::wakeup_core(core_id);
381 }
382
383 tid
384 }
385
386 #[cfg(feature = "newlib")]
387 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
388 without_interrupts(|| self.clone_impl(func, arg))
389 }
390
391 #[inline]
393 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
394 pub fn is_scheduling(&self) -> bool {
395 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
396 }
397
398 #[inline]
399 pub fn handle_waiting_tasks(&mut self) {
400 without_interrupts(|| {
401 crate::executor::run();
402 self.blocked_tasks
403 .handle_waiting_tasks(&mut self.ready_queue);
404 });
405 }
406
407 #[cfg(not(feature = "smp"))]
408 pub fn custom_wakeup(&mut self, task: TaskHandle) {
409 without_interrupts(|| {
410 let task = self.blocked_tasks.custom_wakeup(task);
411 self.ready_queue.push(task);
412 });
413 }
414
415 #[cfg(feature = "smp")]
416 pub fn custom_wakeup(&mut self, task: TaskHandle) {
417 if task.get_core_id() == self.core_id {
418 without_interrupts(|| {
419 let task = self.blocked_tasks.custom_wakeup(task);
420 self.ready_queue.push(task);
421 });
422 } else {
423 get_scheduler_input(task.get_core_id())
424 .lock()
425 .wakeup_tasks
426 .push_back(task);
427 arch::wakeup_core(task.get_core_id());
429 }
430 }
431
432 #[inline]
433 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
434 without_interrupts(|| {
435 self.blocked_tasks
436 .add(self.current_task.clone(), wakeup_time);
437 });
438 }
439
440 #[inline]
441 pub fn get_current_task_handle(&self) -> TaskHandle {
442 without_interrupts(|| {
443 let current_task_borrowed = self.current_task.borrow();
444
445 TaskHandle::new(
446 current_task_borrowed.id,
447 current_task_borrowed.prio,
448 #[cfg(feature = "smp")]
449 current_task_borrowed.core_id,
450 )
451 })
452 }
453
454 #[inline]
455 pub fn get_current_task_id(&self) -> TaskId {
456 without_interrupts(|| self.current_task.borrow().id)
457 }
458
459 #[inline]
460 pub fn get_current_task_object_map(
461 &self,
462 ) -> Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>> {
463 without_interrupts(|| self.current_task.borrow().object_map.clone())
464 }
465
466 #[inline]
469 pub async fn get_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
470 future::poll_fn(|cx| {
471 without_interrupts(|| {
472 let borrowed = self.current_task.borrow();
473 let mut pinned_obj = core::pin::pin!(borrowed.object_map.read());
474
475 let guard = ready!(pinned_obj.as_mut().poll(cx));
476 Ready(guard.get(&fd).cloned().ok_or(io::Error::EBADF))
477 })
478 })
479 .await
480 }
481
482 #[allow(dead_code)]
485 pub async fn recreate_objmap(&self) -> io::Result<()> {
486 let mut map = HashMap::<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>::with_hasher(
487 RandomState::with_seeds(0, 0, 0, 0),
488 );
489
490 future::poll_fn(|cx| {
491 without_interrupts(|| {
492 let borrowed = self.current_task.borrow();
493 let mut pinned_obj = core::pin::pin!(borrowed.object_map.read());
494
495 let guard = ready!(pinned_obj.as_mut().poll(cx));
496 for i in 0..3 {
498 if let Some(obj) = guard.get(&i) {
499 map.insert(i, obj.clone());
500 }
501 }
502
503 Ready(io::Result::Ok(()))
504 })
505 })
506 .await?;
507
508 without_interrupts(|| {
509 self.current_task.borrow_mut().object_map = Arc::new(async_lock::RwLock::new(map));
510 });
511
512 Ok(())
513 }
514
515 pub async fn insert_object(&self, obj: Arc<dyn ObjectInterface>) -> io::Result<FileDescriptor> {
518 future::poll_fn(|cx| {
519 without_interrupts(|| {
520 let borrowed = self.current_task.borrow();
521 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
522
523 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
524 let new_fd = || -> io::Result<FileDescriptor> {
525 let mut fd: FileDescriptor = 0;
526 loop {
527 if !guard.contains_key(&fd) {
528 break Ok(fd);
529 } else if fd == FileDescriptor::MAX {
530 break Err(io::Error::EOVERFLOW);
531 }
532
533 fd = fd.saturating_add(1);
534 }
535 };
536
537 let fd = new_fd()?;
538 let _ = guard.insert(fd, obj.clone());
539 Ready(Ok(fd))
540 })
541 })
542 .await
543 }
544
545 pub async fn dup_object(&self, fd: FileDescriptor) -> io::Result<FileDescriptor> {
548 future::poll_fn(|cx| {
549 without_interrupts(|| {
550 let borrowed = self.current_task.borrow();
551 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
552
553 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
554 let obj = (*(guard.get(&fd).ok_or(io::Error::EINVAL)?)).clone();
555
556 let new_fd = || -> io::Result<FileDescriptor> {
557 let mut fd: FileDescriptor = 0;
558 loop {
559 if !guard.contains_key(&fd) {
560 break Ok(fd);
561 } else if fd == FileDescriptor::MAX {
562 break Err(io::Error::EOVERFLOW);
563 }
564
565 fd = fd.saturating_add(1);
566 }
567 };
568
569 let fd = new_fd()?;
570 if guard.try_insert(fd, obj).is_err() {
571 Ready(Err(io::Error::EMFILE))
572 } else {
573 Ready(Ok(fd))
574 }
575 })
576 })
577 .await
578 }
579
580 pub async fn dup_object2(
581 &self,
582 fd1: FileDescriptor,
583 fd2: FileDescriptor,
584 ) -> io::Result<FileDescriptor> {
585 future::poll_fn(|cx| {
586 without_interrupts(|| {
587 let borrowed = self.current_task.borrow();
588 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
589 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
590 let obj = guard.get(&fd1).cloned().ok_or(io::Error::EBADF)?;
591
592 if guard.try_insert(fd2, obj).is_err() {
593 Ready(Err(io::Error::EMFILE))
594 } else {
595 Ready(Ok(fd2))
596 }
597 })
598 })
599 .await
600 }
601
602 pub async fn remove_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
604 future::poll_fn(|cx| {
605 without_interrupts(|| {
606 let borrowed = self.current_task.borrow();
607 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
608 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
609 Ready(guard.remove(&fd).ok_or(io::Error::EBADF))
610 })
611 })
612 .await
613 }
614
615 #[inline]
616 pub fn get_current_task_prio(&self) -> Priority {
617 without_interrupts(|| self.current_task.borrow().prio)
618 }
619
620 #[allow(dead_code)]
622 #[inline]
623 pub fn get_priority_bitmap(&self) -> &u64 {
624 self.ready_queue.get_priority_bitmap()
625 }
626
627 #[cfg(target_arch = "x86_64")]
628 pub fn set_current_kernel_stack(&self) {
629 let current_task_borrowed = self.current_task.borrow();
630 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
631
632 let rsp = current_task_borrowed.stacks.get_kernel_stack()
633 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
634 - TaskStacks::MARKER_SIZE as u64;
635 tss.privilege_stack_table[0] = rsp.into();
636 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
637 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
638 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
639 - TaskStacks::MARKER_SIZE as u64;
640 tss.interrupt_stack_table[0] = ist_start.into();
641 }
642
643 pub fn set_current_task_priority(&mut self, prio: Priority) {
644 without_interrupts(|| {
645 trace!("Change priority of the current task");
646 self.current_task.borrow_mut().prio = prio;
647 });
648 }
649
650 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
651 trace!("Change priority of task {id} to priority {prio}");
652
653 without_interrupts(|| {
654 let task = get_task_handle(id).ok_or(())?;
655 #[cfg(feature = "smp")]
656 let other_core = task.get_core_id() != self.core_id;
657 #[cfg(not(feature = "smp"))]
658 let other_core = false;
659
660 if other_core {
661 warn!("Have to change the priority on another core");
662 } else if self.current_task.borrow().id == task.get_id() {
663 self.current_task.borrow_mut().prio = prio;
664 } else {
665 self.ready_queue
666 .set_priority(task, prio)
667 .expect("Do not find valid task in ready queue");
668 }
669
670 Ok(())
671 })
672 }
673
674 #[cfg(target_arch = "riscv64")]
675 pub fn set_current_kernel_stack(&self) {
676 let current_task_borrowed = self.current_task.borrow();
677
678 let stack = (current_task_borrowed.stacks.get_kernel_stack()
679 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
680 - TaskStacks::MARKER_SIZE as u64)
681 .as_u64();
682 CoreLocal::get().kernel_stack.set(stack);
683 }
684
685 #[cfg(target_arch = "x86_64")]
688 pub fn fpu_switch(&mut self) {
689 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
690 debug!(
691 "Switching FPU owner from task {} to {}",
692 self.fpu_owner.borrow().id,
693 self.current_task.borrow().id
694 );
695
696 self.fpu_owner.borrow_mut().last_fpu_state.save();
697 self.current_task.borrow().last_fpu_state.restore();
698 self.fpu_owner = self.current_task.clone();
699 }
700 }
701
702 fn cleanup_tasks(&mut self) {
704 while let Some(finished_task) = self.finished_tasks.pop_front() {
706 debug!("Cleaning up task {}", finished_task.borrow().id);
707 }
708 }
709
710 #[cfg(feature = "smp")]
711 pub fn check_input(&mut self) {
712 let mut input_locked = CoreLocal::get().scheduler_input.lock();
713
714 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
715 let task = self.blocked_tasks.custom_wakeup(task);
716 self.ready_queue.push(task);
717 }
718
719 while let Some(new_task) = input_locked.new_tasks.pop_front() {
720 let task = Rc::new(RefCell::new(Task::from(new_task)));
721 self.ready_queue.push(task.clone());
722 }
723 }
724
725 pub fn run() -> ! {
729 let backoff = Backoff::new();
730
731 loop {
732 let core_scheduler = core_scheduler();
733 interrupts::disable();
734
735 crate::executor::run();
737
738 #[cfg(feature = "smp")]
740 core_scheduler.check_input();
741 core_scheduler.cleanup_tasks();
742
743 if core_scheduler.ready_queue.is_empty() {
744 if backoff.is_completed() {
745 interrupts::enable_and_wait();
746 backoff.reset();
747 } else {
748 interrupts::enable();
749 backoff.snooze();
750 }
751 } else {
752 interrupts::enable();
753 core_scheduler.reschedule();
754 backoff.reset();
755 }
756 }
757 }
758
759 #[inline]
760 #[cfg(target_arch = "aarch64")]
761 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
762 self.current_task.borrow().last_stack_pointer
763 }
764
765 pub fn scheduler(&mut self) -> Option<*mut usize> {
768 crate::executor::run();
770
771 self.cleanup_tasks();
774
775 let (id, last_stack_pointer, prio, status) = {
777 let mut borrowed = self.current_task.borrow_mut();
778 (
779 borrowed.id,
780 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
781 borrowed.prio,
782 borrowed.status,
783 )
784 };
785
786 let mut new_task = None;
787
788 if status == TaskStatus::Running {
789 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
792 new_task = Some(task);
793 }
794 } else {
795 if status == TaskStatus::Finished {
796 self.current_task.borrow_mut().status = TaskStatus::Invalid;
798 self.finished_tasks.push_back(self.current_task.clone());
799 }
800
801 if let Some(task) = self.ready_queue.pop() {
804 debug!("Task is available.");
806 new_task = Some(task);
807 } else if status != TaskStatus::Idle {
808 debug!("Only Idle Task is available.");
810 new_task = Some(self.idle_task.clone());
811 }
812 }
813
814 if let Some(task) = new_task {
815 if status == TaskStatus::Running {
819 self.current_task.borrow_mut().status = TaskStatus::Ready;
821 self.ready_queue.push(self.current_task.clone());
822 }
823
824 let (new_id, new_stack_pointer) = {
826 let mut borrowed = task.borrow_mut();
827 if borrowed.status != TaskStatus::Idle {
828 borrowed.status = TaskStatus::Running;
830 }
831
832 (borrowed.id, borrowed.last_stack_pointer)
833 };
834
835 if id != new_id {
836 debug!(
838 "Switching task from {} to {} (stack {:#X} => {:p})",
839 id,
840 new_id,
841 unsafe { *last_stack_pointer },
842 new_stack_pointer
843 );
844 #[cfg(not(target_arch = "riscv64"))]
845 {
846 self.current_task = task;
847 }
848
849 #[cfg(not(target_arch = "riscv64"))]
851 return Some(last_stack_pointer);
852
853 #[cfg(target_arch = "riscv64")]
854 {
855 if sstatus::read().fs() == sstatus::FS::Dirty {
856 self.current_task.borrow_mut().last_fpu_state.save();
857 }
858 task.borrow().last_fpu_state.restore();
859 self.current_task = task;
860 unsafe {
861 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
862 }
863 }
864 }
865 }
866
867 None
868 }
869}
870
871fn get_tid() -> TaskId {
872 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
873 let guard = TASKS.lock();
874
875 loop {
876 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
877 if !guard.contains_key(&id) {
878 return id;
879 }
880 }
881}
882
883#[inline]
884pub(crate) fn abort() -> ! {
885 core_scheduler().exit(-1)
886}
887
888pub(crate) fn add_current_core() {
890 let core_id = core_id();
892 let tid = get_tid();
893 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
894
895 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
897 TASKS.lock().insert(
898 tid,
899 TaskHandle::new(
900 tid,
901 IDLE_PRIO,
902 #[cfg(feature = "smp")]
903 core_id,
904 ),
905 );
906 debug!("Initializing scheduler for core {core_id} with idle task {tid}");
908 let boxed_scheduler = Box::new(PerCoreScheduler {
909 #[cfg(feature = "smp")]
910 core_id,
911 current_task: idle_task.clone(),
912 #[cfg(target_arch = "x86_64")]
913 fpu_owner: idle_task.clone(),
914 idle_task,
915 ready_queue: PriorityTaskQueue::new(),
916 finished_tasks: VecDeque::new(),
917 blocked_tasks: BlockedTaskQueue::new(),
918 });
919
920 let scheduler = Box::into_raw(boxed_scheduler);
921 set_core_scheduler(scheduler);
922 #[cfg(feature = "smp")]
923 {
924 SCHEDULER_INPUTS.lock().insert(
925 core_id.try_into().unwrap(),
926 &CoreLocal::get().scheduler_input,
927 );
928 #[cfg(target_arch = "x86_64")]
929 CORE_HLT_STATE
930 .lock()
931 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
932 }
933}
934
935#[inline]
936#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
937pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
938 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
939}
940
941#[inline]
942#[cfg(feature = "smp")]
943fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
944 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
945}
946
947pub unsafe fn spawn(
948 func: unsafe extern "C" fn(usize),
949 arg: usize,
950 prio: Priority,
951 stack_size: usize,
952 selector: isize,
953) -> TaskId {
954 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
955
956 let core_id = if selector < 0 {
957 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
959 } else {
960 selector as u32
961 };
962
963 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
964}
965
966#[allow(clippy::result_unit_err)]
967pub fn join(id: TaskId) -> Result<(), ()> {
968 let core_scheduler = core_scheduler();
969
970 debug!(
971 "Task {} is waiting for task {}",
972 core_scheduler.get_current_task_id(),
973 id
974 );
975
976 loop {
977 let mut waiting_tasks_guard = WAITING_TASKS.lock();
978
979 if let Some(queue) = waiting_tasks_guard.get_mut(&id) {
980 queue.push_back(core_scheduler.get_current_task_handle());
981 core_scheduler.block_current_task(None);
982
983 drop(waiting_tasks_guard);
985 core_scheduler.reschedule();
986 } else {
987 return Ok(());
988 }
989 }
990}
991
992pub fn shutdown(arg: i32) -> ! {
993 crate::syscalls::shutdown(arg)
994}
995
996fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
997 TASKS.lock().get(&id).copied()
998}
999
1000#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
1001pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
1002
1003#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
1004pub(crate) fn get_root_page_table() -> usize {
1005 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
1006 current_task_borrowed.root_page_table
1007}