hermit/scheduler/
task.rs

1#![allow(clippy::type_complexity)]
2
3#[cfg(not(feature = "common-os"))]
4use alloc::boxed::Box;
5use alloc::collections::{LinkedList, VecDeque};
6use alloc::rc::Rc;
7use alloc::sync::Arc;
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::num::NonZeroU64;
11use core::{cmp, fmt};
12
13use ahash::RandomState;
14use crossbeam_utils::CachePadded;
15use hashbrown::HashMap;
16use hermit_sync::OnceCell;
17use memory_addresses::VirtAddr;
18
19use crate::arch::core_local::*;
20use crate::arch::scheduler::TaskStacks;
21#[cfg(not(feature = "common-os"))]
22use crate::arch::scheduler::TaskTLS;
23use crate::executor::poll_on;
24use crate::fd::stdio::*;
25use crate::fd::{FileDescriptor, ObjectInterface, STDERR_FILENO, STDIN_FILENO, STDOUT_FILENO};
26use crate::scheduler::CoreId;
27use crate::{arch, env, io};
28
29/// Returns the most significant bit.
30///
31/// # Examples
32///
33/// ```
34/// assert_eq!(msb(0), None);
35/// assert_eq!(msb(1), 0);
36/// assert_eq!(msb(u64::MAX), 63);
37/// ```
38#[inline]
39fn msb(n: u64) -> Option<u32> {
40	NonZeroU64::new(n).map(|n| u64::BITS - 1 - n.leading_zeros())
41}
42
43/// The status of the task - used for scheduling
44#[derive(Copy, Clone, Debug, Eq, PartialEq)]
45pub(crate) enum TaskStatus {
46	Invalid,
47	Ready,
48	Running,
49	Blocked,
50	Finished,
51	Idle,
52}
53
54/// Unique identifier for a task (i.e. `pid`).
55#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
56pub struct TaskId(i32);
57
58impl TaskId {
59	pub const fn into(self) -> i32 {
60		self.0
61	}
62
63	pub const fn from(x: i32) -> Self {
64		TaskId(x)
65	}
66}
67
68impl fmt::Display for TaskId {
69	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
70		write!(f, "{}", self.0)
71	}
72}
73
74/// Priority of a task
75#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
76pub struct Priority(u8);
77
78impl Priority {
79	pub const fn into(self) -> u8 {
80		self.0
81	}
82
83	pub const fn from(x: u8) -> Self {
84		Priority(x)
85	}
86}
87
88impl fmt::Display for Priority {
89	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
90		write!(f, "{}", self.0)
91	}
92}
93
94#[allow(dead_code)]
95pub const HIGH_PRIO: Priority = Priority::from(3);
96pub const NORMAL_PRIO: Priority = Priority::from(2);
97#[allow(dead_code)]
98pub const LOW_PRIO: Priority = Priority::from(1);
99pub const IDLE_PRIO: Priority = Priority::from(0);
100
101/// Maximum number of priorities
102pub const NO_PRIORITIES: usize = 31;
103
104#[derive(Copy, Clone, Debug)]
105pub(crate) struct TaskHandle {
106	id: TaskId,
107	priority: Priority,
108	#[cfg(feature = "smp")]
109	core_id: CoreId,
110}
111
112impl TaskHandle {
113	pub fn new(id: TaskId, priority: Priority, #[cfg(feature = "smp")] core_id: CoreId) -> Self {
114		Self {
115			id,
116			priority,
117			#[cfg(feature = "smp")]
118			core_id,
119		}
120	}
121
122	#[cfg(feature = "smp")]
123	pub fn get_core_id(&self) -> CoreId {
124		self.core_id
125	}
126
127	pub fn get_id(&self) -> TaskId {
128		self.id
129	}
130
131	pub fn get_priority(&self) -> Priority {
132		self.priority
133	}
134}
135
136impl Ord for TaskHandle {
137	fn cmp(&self, other: &Self) -> cmp::Ordering {
138		self.id.cmp(&other.id)
139	}
140}
141
142impl PartialOrd for TaskHandle {
143	fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
144		Some(self.cmp(other))
145	}
146}
147
148impl PartialEq for TaskHandle {
149	fn eq(&self, other: &Self) -> bool {
150		self.id == other.id
151	}
152}
153
154impl Eq for TaskHandle {}
155
156/// Realize a priority queue for task handles
157#[derive(Default)]
158pub(crate) struct TaskHandlePriorityQueue {
159	queues: [Option<VecDeque<TaskHandle>>; NO_PRIORITIES],
160	prio_bitmap: CachePadded<u64>,
161}
162
163impl TaskHandlePriorityQueue {
164	/// Creates an empty priority queue for tasks
165	pub const fn new() -> Self {
166		Self {
167			queues: [
168				None, None, None, None, None, None, None, None, None, None, None, None, None, None,
169				None, None, None, None, None, None, None, None, None, None, None, None, None, None,
170				None, None, None,
171			],
172			prio_bitmap: CachePadded::new(0),
173		}
174	}
175
176	/// Checks if the queue is empty.
177	pub fn is_empty(&self) -> bool {
178		self.prio_bitmap.into_inner() == 0
179	}
180
181	/// Checks if the given task is in the queue. Returns `true` if the task
182	/// was found.
183	pub fn contains(&self, task: TaskHandle) -> bool {
184		matches!(self.queues[task.priority.into() as usize]
185			.as_ref(), Some(queue) if queue.iter().any(|queued| queued.id == task.id))
186	}
187
188	/// Add a task handle by its priority to the queue
189	pub fn push(&mut self, task: TaskHandle) {
190		let i = task.priority.into() as usize;
191		//assert!(i < NO_PRIORITIES, "Priority {} is too high", i);
192
193		*self.prio_bitmap |= (1 << i) as u64;
194		if let Some(queue) = &mut self.queues[i] {
195			queue.push_back(task);
196		} else {
197			let mut queue = VecDeque::new();
198			queue.push_back(task);
199			self.queues[i] = Some(queue);
200		}
201	}
202
203	fn pop_from_queue(&mut self, queue_index: usize) -> Option<TaskHandle> {
204		if let Some(queue) = &mut self.queues[queue_index] {
205			let task = queue.pop_front();
206
207			if queue.is_empty() {
208				*self.prio_bitmap &= !(1 << queue_index as u64);
209			}
210
211			task
212		} else {
213			None
214		}
215	}
216
217	/// Pop the task handle with the highest priority from the queue
218	pub fn pop(&mut self) -> Option<TaskHandle> {
219		if let Some(i) = msb(self.prio_bitmap.into_inner()) {
220			return self.pop_from_queue(i as usize);
221		}
222
223		None
224	}
225
226	/// Remove a specific task handle from the priority queue. Returns `true` if
227	/// the handle was in the queue.
228	pub fn remove(&mut self, task: TaskHandle) -> bool {
229		let queue_index = task.priority.into() as usize;
230		//assert!(queue_index < NO_PRIORITIES, "Priority {} is too high", queue_index);
231
232		let mut success = false;
233		if let Some(queue) = &mut self.queues[queue_index] {
234			let mut i = 0;
235			while i != queue.len() {
236				if queue[i].id == task.id {
237					queue.remove(i);
238					success = true;
239				} else {
240					i += 1;
241				}
242			}
243
244			if queue.is_empty() {
245				*self.prio_bitmap &= !(1 << queue_index as u64);
246			}
247		}
248
249		success
250	}
251}
252
253/// Realize a priority queue for tasks
254pub(crate) struct PriorityTaskQueue {
255	queues: [LinkedList<Rc<RefCell<Task>>>; NO_PRIORITIES],
256	prio_bitmap: u64,
257}
258
259impl PriorityTaskQueue {
260	/// Creates an empty priority queue for tasks
261	pub const fn new() -> PriorityTaskQueue {
262		const EMPTY_LIST: LinkedList<Rc<RefCell<Task>>> = LinkedList::new();
263		PriorityTaskQueue {
264			queues: [EMPTY_LIST; NO_PRIORITIES],
265			prio_bitmap: 0,
266		}
267	}
268
269	/// Add a task by its priority to the queue
270	pub fn push(&mut self, task: Rc<RefCell<Task>>) {
271		let i = task.borrow().prio.into() as usize;
272		//assert!(i < NO_PRIORITIES, "Priority {} is too high", i);
273
274		self.prio_bitmap |= (1 << i) as u64;
275		let queue = &mut self.queues[i];
276		queue.push_back(task);
277	}
278
279	fn pop_from_queue(&mut self, queue_index: usize) -> Option<Rc<RefCell<Task>>> {
280		let task = self.queues[queue_index].pop_front();
281		if self.queues[queue_index].is_empty() {
282			self.prio_bitmap &= !(1 << queue_index as u64);
283		}
284
285		task
286	}
287
288	/// Remove the task at index from the queue and return that task,
289	/// or None if the index is out of range or the list is empty.
290	fn remove_from_queue(
291		&mut self,
292		task_index: usize,
293		queue_index: usize,
294	) -> Option<Rc<RefCell<Task>>> {
295		//assert!(prio < NO_PRIORITIES, "Priority {} is too high", prio);
296
297		let queue = &mut self.queues[queue_index];
298		if task_index <= queue.len() {
299			// Calling remove is unstable: https://github.com/rust-lang/rust/issues/69210
300			let mut split_list = queue.split_off(task_index);
301			let element = split_list.pop_front();
302			queue.append(&mut split_list);
303			if queue.is_empty() {
304				self.prio_bitmap &= !(1 << queue_index as u64);
305			}
306			element
307		} else {
308			None
309		}
310	}
311
312	/// Returns true if the queue is empty.
313	pub fn is_empty(&self) -> bool {
314		self.prio_bitmap == 0
315	}
316
317	/// Returns reference to prio_bitmap
318	#[allow(dead_code)]
319	#[inline]
320	pub fn get_priority_bitmap(&self) -> &u64 {
321		&self.prio_bitmap
322	}
323
324	/// Pop the task with the highest priority from the queue
325	pub fn pop(&mut self) -> Option<Rc<RefCell<Task>>> {
326		if let Some(i) = msb(self.prio_bitmap) {
327			return self.pop_from_queue(i as usize);
328		}
329
330		None
331	}
332
333	/// Pop the next task, which has a higher or the same priority as `prio`
334	pub fn pop_with_prio(&mut self, prio: Priority) -> Option<Rc<RefCell<Task>>> {
335		if let Some(i) = msb(self.prio_bitmap) {
336			if i >= u32::from(prio.into()) {
337				return self.pop_from_queue(i as usize);
338			}
339		}
340
341		None
342	}
343
344	/// Returns the highest priority of all available task
345	#[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
346	pub fn get_highest_priority(&self) -> Priority {
347		if let Some(i) = msb(self.prio_bitmap) {
348			Priority::from(i.try_into().unwrap())
349		} else {
350			IDLE_PRIO
351		}
352	}
353
354	/// Change priority of specific task
355	pub fn set_priority(&mut self, handle: TaskHandle, prio: Priority) -> Result<(), ()> {
356		let old_priority = handle.get_priority().into() as usize;
357		if let Some(index) = self.queues[old_priority]
358			.iter()
359			.position(|current_task| current_task.borrow().id == handle.id)
360		{
361			let Some(task) = self.remove_from_queue(index, old_priority) else {
362				return Err(());
363			};
364			task.borrow_mut().prio = prio;
365			self.push(task);
366			return Ok(());
367		}
368
369		Err(())
370	}
371}
372
373/// A task control block, which identifies either a process or a thread
374#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
375#[cfg_attr(
376	not(any(target_arch = "x86_64", target_arch = "aarch64")),
377	repr(align(64))
378)]
379pub(crate) struct Task {
380	/// The ID of this context
381	pub id: TaskId,
382	/// Status of a task, e.g. if the task is ready or blocked
383	pub status: TaskStatus,
384	/// Task priority,
385	pub prio: Priority,
386	/// Last stack pointer before a context switch to another task
387	pub last_stack_pointer: VirtAddr,
388	/// Last stack pointer on the user stack before jumping to kernel space
389	pub user_stack_pointer: VirtAddr,
390	/// Last FPU state before a context switch to another task using the FPU
391	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
392	pub last_fpu_state: arch::processor::FPUState,
393	/// ID of the core this task is running on
394	pub core_id: CoreId,
395	/// Stack of the task
396	pub stacks: TaskStacks,
397	/// Mapping between file descriptor and the referenced IO interface
398	pub object_map:
399		Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
400	/// Task Thread-Local-Storage (TLS)
401	#[cfg(not(feature = "common-os"))]
402	pub tls: Option<Box<TaskTLS>>,
403	// Physical address of the 1st level page table
404	#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
405	pub root_page_table: usize,
406}
407
408pub(crate) trait TaskFrame {
409	/// Create the initial stack frame for a new task
410	fn create_stack_frame(&mut self, func: unsafe extern "C" fn(usize), arg: usize);
411}
412
413impl Task {
414	pub fn new(
415		tid: TaskId,
416		core_id: CoreId,
417		task_status: TaskStatus,
418		task_prio: Priority,
419		stacks: TaskStacks,
420		object_map: Arc<
421			async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>,
422		>,
423	) -> Task {
424		debug!("Creating new task {tid} on core {core_id}");
425
426		Task {
427			id: tid,
428			status: task_status,
429			prio: task_prio,
430			last_stack_pointer: VirtAddr::zero(),
431			user_stack_pointer: VirtAddr::zero(),
432			#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
433			last_fpu_state: arch::processor::FPUState::new(),
434			core_id,
435			stacks,
436			object_map,
437			#[cfg(not(feature = "common-os"))]
438			tls: None,
439			#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
440			root_page_table: arch::create_new_root_page_table(),
441		}
442	}
443
444	pub fn new_idle(tid: TaskId, core_id: CoreId) -> Task {
445		debug!("Creating idle task {tid}");
446
447		/// All cores use the same mapping between file descriptor and the referenced object
448		static OBJECT_MAP: OnceCell<
449			Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
450		> = OnceCell::new();
451
452		if core_id == 0 {
453			OBJECT_MAP
454				.set(Arc::new(async_lock::RwLock::new(HashMap::<
455					FileDescriptor,
456					Arc<dyn ObjectInterface>,
457					RandomState,
458				>::with_hasher(
459					RandomState::with_seeds(0, 0, 0, 0),
460				))))
461				.unwrap();
462			let objmap = OBJECT_MAP.get().unwrap().clone();
463			let _ = poll_on(async {
464				let mut guard = objmap.write().await;
465				if env::is_uhyve() {
466					guard
467						.try_insert(STDIN_FILENO, Arc::new(UhyveStdin::new()))
468						.map_err(|_| io::Error::EIO)?;
469					guard
470						.try_insert(STDOUT_FILENO, Arc::new(UhyveStdout::new()))
471						.map_err(|_| io::Error::EIO)?;
472					guard
473						.try_insert(STDERR_FILENO, Arc::new(UhyveStderr::new()))
474						.map_err(|_| io::Error::EIO)?;
475				} else {
476					guard
477						.try_insert(STDIN_FILENO, Arc::new(GenericStdin::new()))
478						.map_err(|_| io::Error::EIO)?;
479					guard
480						.try_insert(STDOUT_FILENO, Arc::new(GenericStdout::new()))
481						.map_err(|_| io::Error::EIO)?;
482					guard
483						.try_insert(STDERR_FILENO, Arc::new(GenericStderr::new()))
484						.map_err(|_| io::Error::EIO)?;
485				}
486
487				Ok(())
488			});
489		}
490
491		Task {
492			id: tid,
493			status: TaskStatus::Idle,
494			prio: IDLE_PRIO,
495			last_stack_pointer: VirtAddr::zero(),
496			user_stack_pointer: VirtAddr::zero(),
497			#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
498			last_fpu_state: arch::processor::FPUState::new(),
499			core_id,
500			stacks: TaskStacks::from_boot_stacks(),
501			object_map: OBJECT_MAP.get().unwrap().clone(),
502			#[cfg(not(feature = "common-os"))]
503			tls: None,
504			#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
505			root_page_table: *crate::scheduler::BOOT_ROOT_PAGE_TABLE.get().unwrap(),
506		}
507	}
508}
509
510/*impl Drop for Task {
511	fn drop(&mut self) {
512		debug!("Drop task {}", self.id);
513	}
514}*/
515
516struct BlockedTask {
517	task: Rc<RefCell<Task>>,
518	wakeup_time: Option<u64>,
519}
520
521impl BlockedTask {
522	pub fn new(task: Rc<RefCell<Task>>, wakeup_time: Option<u64>) -> Self {
523		Self { task, wakeup_time }
524	}
525}
526
527pub(crate) struct BlockedTaskQueue {
528	list: LinkedList<BlockedTask>,
529	#[cfg(any(feature = "tcp", feature = "udp"))]
530	network_wakeup_time: Option<u64>,
531}
532
533impl BlockedTaskQueue {
534	pub const fn new() -> Self {
535		Self {
536			list: LinkedList::new(),
537			#[cfg(any(feature = "tcp", feature = "udp"))]
538			network_wakeup_time: None,
539		}
540	}
541
542	fn wakeup_task(task: Rc<RefCell<Task>>) {
543		let mut borrowed = task.borrow_mut();
544		debug!(
545			"Waking up task {} on core {}",
546			borrowed.id, borrowed.core_id
547		);
548
549		assert!(
550			borrowed.core_id == core_id(),
551			"Try to wake up task {} on the wrong core {} != {}",
552			borrowed.id,
553			borrowed.core_id,
554			core_id()
555		);
556
557		assert!(
558			borrowed.status == TaskStatus::Blocked,
559			"Trying to wake up task {} which is not blocked",
560			borrowed.id
561		);
562		borrowed.status = TaskStatus::Ready;
563	}
564
565	#[cfg(any(feature = "tcp", feature = "udp"))]
566	pub fn add_network_timer(&mut self, wakeup_time: Option<u64>) {
567		self.network_wakeup_time = wakeup_time;
568
569		let next = self.list.front().and_then(|t| t.wakeup_time);
570
571		let time = match (wakeup_time, next) {
572			(Some(a), Some(b)) => Some(a.min(b)),
573			(a, b) => a.or(b),
574		};
575
576		arch::set_oneshot_timer(time);
577	}
578
579	/// Blocks the given task for `wakeup_time` ticks, or indefinitely if None is given.
580	pub fn add(&mut self, task: Rc<RefCell<Task>>, wakeup_time: Option<u64>) {
581		{
582			// Set the task status to Blocked.
583			let mut borrowed = task.borrow_mut();
584			debug!("Blocking task {}", borrowed.id);
585
586			assert_eq!(
587				borrowed.status,
588				TaskStatus::Running,
589				"Trying to block task {} which is not running",
590				borrowed.id
591			);
592			borrowed.status = TaskStatus::Blocked;
593		}
594
595		let new_node = BlockedTask::new(task, wakeup_time);
596
597		// Shall the task automatically be woken up after a certain time?
598		if let Some(wt) = wakeup_time {
599			let mut cursor = self.list.cursor_front_mut();
600			let set_oneshot_timer = || {
601				#[cfg(not(any(feature = "tcp", feature = "udp")))]
602				arch::set_oneshot_timer(wakeup_time);
603				#[cfg(any(feature = "tcp", feature = "udp"))]
604				match self.network_wakeup_time {
605					Some(time) => {
606						if time > wt {
607							arch::set_oneshot_timer(wakeup_time);
608						} else {
609							arch::set_oneshot_timer(self.network_wakeup_time);
610						}
611					}
612					_ => arch::set_oneshot_timer(wakeup_time),
613				}
614			};
615
616			while let Some(node) = cursor.current() {
617				let node_wakeup_time = node.wakeup_time;
618				if node_wakeup_time.is_none() || wt < node_wakeup_time.unwrap() {
619					cursor.insert_before(new_node);
620
621					set_oneshot_timer();
622					return;
623				}
624
625				cursor.move_next();
626			}
627
628			set_oneshot_timer();
629		}
630
631		self.list.push_back(new_node);
632	}
633
634	/// Manually wake up a blocked task.
635	pub fn custom_wakeup(&mut self, task: TaskHandle) -> Rc<RefCell<Task>> {
636		let mut first_task = true;
637		let mut cursor = self.list.cursor_front_mut();
638
639		#[cfg(any(feature = "tcp", feature = "udp"))]
640		if let Some(wakeup_time) = self.network_wakeup_time {
641			if wakeup_time <= arch::processor::get_timer_ticks() {
642				self.network_wakeup_time = None;
643			}
644		}
645
646		// Loop through all blocked tasks to find it.
647		while let Some(node) = cursor.current() {
648			if node.task.borrow().id == task.get_id() {
649				// Remove it from the list of blocked tasks.
650				let task_ref = node.task.clone();
651				cursor.remove_current();
652
653				// If this is the first task, adjust the One-Shot Timer to fire at the
654				// next task's wakeup time (if any).
655				#[cfg(any(feature = "tcp", feature = "udp"))]
656				if first_task {
657					arch::set_oneshot_timer(cursor.current().map_or_else(
658						|| self.network_wakeup_time,
659						|node| match node.wakeup_time {
660							Some(wt) => {
661								if let Some(timer) = self.network_wakeup_time {
662									if wt < timer { Some(wt) } else { Some(timer) }
663								} else {
664									Some(wt)
665								}
666							}
667							None => self.network_wakeup_time,
668						},
669					));
670				}
671				#[cfg(not(any(feature = "tcp", feature = "udp")))]
672				if first_task {
673					arch::set_oneshot_timer(
674						cursor
675							.current()
676							.map_or_else(|| None, |node| node.wakeup_time),
677					);
678				}
679
680				// Wake it up.
681				Self::wakeup_task(task_ref.clone());
682
683				return task_ref;
684			}
685
686			first_task = false;
687			cursor.move_next();
688		}
689
690		unreachable!();
691	}
692
693	/// Wakes up all tasks whose wakeup time has elapsed.
694	///
695	/// Should be called by the One-Shot Timer interrupt handler when the wakeup time for
696	/// at least one task has elapsed.
697	pub fn handle_waiting_tasks(&mut self) -> Vec<Rc<RefCell<Task>>> {
698		// Get the current time.
699		let time = arch::processor::get_timer_ticks();
700
701		#[cfg(any(feature = "tcp", feature = "udp"))]
702		if let Some(mut guard) = crate::executor::network::NIC.try_lock() {
703			if let crate::executor::network::NetworkState::Initialized(nic) = &mut *guard {
704				let now = crate::executor::network::now();
705				nic.poll_common(now);
706				self.network_wakeup_time = nic.poll_delay(now).map(|d| d.total_micros() + time);
707			}
708		}
709
710		let mut tasks = vec![];
711
712		// Loop through all blocked tasks.
713		let mut cursor = self.list.cursor_front_mut();
714		while let Some(node) = cursor.current() {
715			// Get the wakeup time of this task and check if we have reached the first task
716			// that hasn't elapsed yet or waits indefinitely.
717			let node_wakeup_time = node.wakeup_time;
718			if node_wakeup_time.is_none() || time < node_wakeup_time.unwrap() {
719				break;
720			}
721
722			// Otherwise, this task has elapsed, so remove it from the list and wake it up.
723			tasks.push(node.task.clone());
724			cursor.remove_current();
725		}
726
727		#[cfg(any(feature = "tcp", feature = "udp"))]
728		arch::set_oneshot_timer(cursor.current().map_or_else(
729			|| self.network_wakeup_time,
730			|node| match node.wakeup_time {
731				Some(wt) => {
732					if let Some(timer) = self.network_wakeup_time {
733						if wt < timer { Some(wt) } else { Some(timer) }
734					} else {
735						Some(wt)
736					}
737				}
738				None => self.network_wakeup_time,
739			},
740		));
741		#[cfg(not(any(feature = "tcp", feature = "udp")))]
742		arch::set_oneshot_timer(
743			cursor
744				.current()
745				.map_or_else(|| None, |node| node.wakeup_time),
746		);
747
748		for task in tasks.iter().cloned() {
749			Self::wakeup_task(task);
750		}
751
752		tasks
753	}
754}