hermit/scheduler/
task.rs

1#![allow(clippy::type_complexity)]
2
3#[cfg(not(feature = "common-os"))]
4use alloc::boxed::Box;
5use alloc::collections::{LinkedList, VecDeque};
6use alloc::rc::Rc;
7use alloc::sync::Arc;
8use core::cell::RefCell;
9use core::num::NonZeroU64;
10use core::{cmp, fmt};
11
12use ahash::RandomState;
13use crossbeam_utils::CachePadded;
14use hashbrown::HashMap;
15use hermit_sync::OnceCell;
16use memory_addresses::VirtAddr;
17
18use crate::arch::core_local::*;
19use crate::arch::scheduler::TaskStacks;
20#[cfg(not(feature = "common-os"))]
21use crate::arch::scheduler::TaskTLS;
22use crate::executor::poll_on;
23use crate::fd::stdio::*;
24use crate::fd::{FileDescriptor, ObjectInterface, STDERR_FILENO, STDIN_FILENO, STDOUT_FILENO};
25use crate::scheduler::CoreId;
26use crate::{arch, env, io};
27
28/// Returns the most significant bit.
29///
30/// # Examples
31///
32/// ```
33/// assert_eq!(msb(0), None);
34/// assert_eq!(msb(1), 0);
35/// assert_eq!(msb(u64::MAX), 63);
36/// ```
37#[inline]
38fn msb(n: u64) -> Option<u32> {
39	NonZeroU64::new(n).map(|n| u64::BITS - 1 - n.leading_zeros())
40}
41
42/// The status of the task - used for scheduling
43#[derive(Copy, Clone, Debug, Eq, PartialEq)]
44pub(crate) enum TaskStatus {
45	Invalid,
46	Ready,
47	Running,
48	Blocked,
49	Finished,
50	Idle,
51}
52
53/// Unique identifier for a task (i.e. `pid`).
54#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
55pub struct TaskId(i32);
56
57impl TaskId {
58	pub const fn into(self) -> i32 {
59		self.0
60	}
61
62	pub const fn from(x: i32) -> Self {
63		TaskId(x)
64	}
65}
66
67impl fmt::Display for TaskId {
68	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
69		write!(f, "{}", self.0)
70	}
71}
72
73/// Priority of a task
74#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
75pub struct Priority(u8);
76
77impl Priority {
78	pub const fn into(self) -> u8 {
79		self.0
80	}
81
82	pub const fn from(x: u8) -> Self {
83		Priority(x)
84	}
85}
86
87impl fmt::Display for Priority {
88	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
89		write!(f, "{}", self.0)
90	}
91}
92
93#[allow(dead_code)]
94pub const HIGH_PRIO: Priority = Priority::from(3);
95pub const NORMAL_PRIO: Priority = Priority::from(2);
96#[allow(dead_code)]
97pub const LOW_PRIO: Priority = Priority::from(1);
98pub const IDLE_PRIO: Priority = Priority::from(0);
99
100/// Maximum number of priorities
101pub const NO_PRIORITIES: usize = 31;
102
103#[derive(Copy, Clone, Debug)]
104pub(crate) struct TaskHandle {
105	id: TaskId,
106	priority: Priority,
107	#[cfg(feature = "smp")]
108	core_id: CoreId,
109}
110
111impl TaskHandle {
112	pub fn new(id: TaskId, priority: Priority, #[cfg(feature = "smp")] core_id: CoreId) -> Self {
113		Self {
114			id,
115			priority,
116			#[cfg(feature = "smp")]
117			core_id,
118		}
119	}
120
121	#[cfg(feature = "smp")]
122	pub fn get_core_id(&self) -> CoreId {
123		self.core_id
124	}
125
126	pub fn get_id(&self) -> TaskId {
127		self.id
128	}
129
130	pub fn get_priority(&self) -> Priority {
131		self.priority
132	}
133}
134
135impl Ord for TaskHandle {
136	fn cmp(&self, other: &Self) -> cmp::Ordering {
137		self.id.cmp(&other.id)
138	}
139}
140
141impl PartialOrd for TaskHandle {
142	fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
143		Some(self.cmp(other))
144	}
145}
146
147impl PartialEq for TaskHandle {
148	fn eq(&self, other: &Self) -> bool {
149		self.id == other.id
150	}
151}
152
153impl Eq for TaskHandle {}
154
155/// Realize a priority queue for task handles
156#[derive(Default)]
157pub(crate) struct TaskHandlePriorityQueue {
158	queues: [Option<VecDeque<TaskHandle>>; NO_PRIORITIES],
159	prio_bitmap: CachePadded<u64>,
160}
161
162impl TaskHandlePriorityQueue {
163	/// Creates an empty priority queue for tasks
164	pub const fn new() -> Self {
165		Self {
166			queues: [const { None }; NO_PRIORITIES],
167			prio_bitmap: CachePadded::new(0),
168		}
169	}
170
171	/// Checks if the queue is empty.
172	pub fn is_empty(&self) -> bool {
173		self.prio_bitmap.into_inner() == 0
174	}
175
176	/// Checks if the given task is in the queue. Returns `true` if the task
177	/// was found.
178	pub fn contains(&self, task: TaskHandle) -> bool {
179		matches!(self.queues[task.priority.into() as usize]
180			.as_ref(), Some(queue) if queue.iter().any(|queued| queued.id == task.id))
181	}
182
183	/// Add a task handle by its priority to the queue
184	pub fn push(&mut self, task: TaskHandle) {
185		let i = task.priority.into() as usize;
186		//assert!(i < NO_PRIORITIES, "Priority {} is too high", i);
187
188		*self.prio_bitmap |= (1 << i) as u64;
189		if let Some(queue) = &mut self.queues[i] {
190			queue.push_back(task);
191		} else {
192			let mut queue = VecDeque::new();
193			queue.push_back(task);
194			self.queues[i] = Some(queue);
195		}
196	}
197
198	fn pop_from_queue(&mut self, queue_index: usize) -> Option<TaskHandle> {
199		if let Some(queue) = &mut self.queues[queue_index] {
200			let task = queue.pop_front();
201
202			if queue.is_empty() {
203				*self.prio_bitmap &= !(1 << queue_index as u64);
204			}
205
206			task
207		} else {
208			None
209		}
210	}
211
212	/// Pop the task handle with the highest priority from the queue
213	pub fn pop(&mut self) -> Option<TaskHandle> {
214		if let Some(i) = msb(self.prio_bitmap.into_inner()) {
215			return self.pop_from_queue(i as usize);
216		}
217
218		None
219	}
220
221	/// Remove a specific task handle from the priority queue. Returns `true` if
222	/// the handle was in the queue.
223	pub fn remove(&mut self, task: TaskHandle) -> bool {
224		let queue_index = task.priority.into() as usize;
225		//assert!(queue_index < NO_PRIORITIES, "Priority {} is too high", queue_index);
226
227		let mut success = false;
228		if let Some(queue) = &mut self.queues[queue_index] {
229			let mut i = 0;
230			while i != queue.len() {
231				if queue[i].id == task.id {
232					queue.remove(i);
233					success = true;
234				} else {
235					i += 1;
236				}
237			}
238
239			if queue.is_empty() {
240				*self.prio_bitmap &= !(1 << queue_index as u64);
241			}
242		}
243
244		success
245	}
246}
247
248/// Realize a priority queue for tasks
249pub(crate) struct PriorityTaskQueue {
250	queues: [LinkedList<Rc<RefCell<Task>>>; NO_PRIORITIES],
251	prio_bitmap: u64,
252}
253
254impl PriorityTaskQueue {
255	/// Creates an empty priority queue for tasks
256	pub const fn new() -> PriorityTaskQueue {
257		const EMPTY_LIST: LinkedList<Rc<RefCell<Task>>> = LinkedList::new();
258		PriorityTaskQueue {
259			queues: [EMPTY_LIST; NO_PRIORITIES],
260			prio_bitmap: 0,
261		}
262	}
263
264	/// Add a task by its priority to the queue
265	pub fn push(&mut self, task: Rc<RefCell<Task>>) {
266		let i = task.borrow().prio.into() as usize;
267		//assert!(i < NO_PRIORITIES, "Priority {} is too high", i);
268
269		self.prio_bitmap |= (1 << i) as u64;
270		let queue = &mut self.queues[i];
271		queue.push_back(task);
272	}
273
274	fn pop_from_queue(&mut self, queue_index: usize) -> Option<Rc<RefCell<Task>>> {
275		let task = self.queues[queue_index].pop_front();
276		if self.queues[queue_index].is_empty() {
277			self.prio_bitmap &= !(1 << queue_index as u64);
278		}
279
280		task
281	}
282
283	/// Remove the task at index from the queue and return that task,
284	/// or None if the index is out of range or the list is empty.
285	fn remove_from_queue(
286		&mut self,
287		task_index: usize,
288		queue_index: usize,
289	) -> Option<Rc<RefCell<Task>>> {
290		//assert!(prio < NO_PRIORITIES, "Priority {} is too high", prio);
291
292		let queue = &mut self.queues[queue_index];
293		if task_index <= queue.len() {
294			// Calling remove is unstable: https://github.com/rust-lang/rust/issues/69210
295			let mut split_list = queue.split_off(task_index);
296			let element = split_list.pop_front();
297			queue.append(&mut split_list);
298			if queue.is_empty() {
299				self.prio_bitmap &= !(1 << queue_index as u64);
300			}
301			element
302		} else {
303			None
304		}
305	}
306
307	/// Returns true if the queue is empty.
308	pub fn is_empty(&self) -> bool {
309		self.prio_bitmap == 0
310	}
311
312	/// Returns reference to prio_bitmap
313	#[allow(dead_code)]
314	#[inline]
315	pub fn get_priority_bitmap(&self) -> &u64 {
316		&self.prio_bitmap
317	}
318
319	/// Pop the task with the highest priority from the queue
320	pub fn pop(&mut self) -> Option<Rc<RefCell<Task>>> {
321		if let Some(i) = msb(self.prio_bitmap) {
322			return self.pop_from_queue(i as usize);
323		}
324
325		None
326	}
327
328	/// Pop the next task, which has a higher or the same priority as `prio`
329	pub fn pop_with_prio(&mut self, prio: Priority) -> Option<Rc<RefCell<Task>>> {
330		if let Some(i) = msb(self.prio_bitmap) {
331			if i >= u32::from(prio.into()) {
332				return self.pop_from_queue(i as usize);
333			}
334		}
335
336		None
337	}
338
339	/// Returns the highest priority of all available task
340	#[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
341	pub fn get_highest_priority(&self) -> Priority {
342		if let Some(i) = msb(self.prio_bitmap) {
343			Priority::from(i.try_into().unwrap())
344		} else {
345			IDLE_PRIO
346		}
347	}
348
349	/// Change priority of specific task
350	pub fn set_priority(&mut self, handle: TaskHandle, prio: Priority) -> Result<(), ()> {
351		let old_priority = handle.get_priority().into() as usize;
352		if let Some(index) = self.queues[old_priority]
353			.iter()
354			.position(|current_task| current_task.borrow().id == handle.id)
355		{
356			let Some(task) = self.remove_from_queue(index, old_priority) else {
357				return Err(());
358			};
359			task.borrow_mut().prio = prio;
360			self.push(task);
361			return Ok(());
362		}
363
364		Err(())
365	}
366}
367
368/// A task control block, which identifies either a process or a thread
369#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
370#[cfg_attr(
371	not(any(target_arch = "x86_64", target_arch = "aarch64")),
372	repr(align(64))
373)]
374pub(crate) struct Task {
375	/// The ID of this context
376	pub id: TaskId,
377	/// Status of a task, e.g. if the task is ready or blocked
378	pub status: TaskStatus,
379	/// Task priority,
380	pub prio: Priority,
381	/// Last stack pointer before a context switch to another task
382	pub last_stack_pointer: VirtAddr,
383	/// Last stack pointer on the user stack before jumping to kernel space
384	pub user_stack_pointer: VirtAddr,
385	/// Last FPU state before a context switch to another task using the FPU
386	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
387	pub last_fpu_state: arch::processor::FPUState,
388	/// ID of the core this task is running on
389	pub core_id: CoreId,
390	/// Stack of the task
391	pub stacks: TaskStacks,
392	/// Mapping between file descriptor and the referenced IO interface
393	pub object_map:
394		Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
395	/// Task Thread-Local-Storage (TLS)
396	#[cfg(not(feature = "common-os"))]
397	pub tls: Option<Box<TaskTLS>>,
398	// Physical address of the 1st level page table
399	#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
400	pub root_page_table: usize,
401}
402
403pub(crate) trait TaskFrame {
404	/// Create the initial stack frame for a new task
405	fn create_stack_frame(&mut self, func: unsafe extern "C" fn(usize), arg: usize);
406}
407
408impl Task {
409	pub fn new(
410		tid: TaskId,
411		core_id: CoreId,
412		task_status: TaskStatus,
413		task_prio: Priority,
414		stacks: TaskStacks,
415		object_map: Arc<
416			async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>,
417		>,
418	) -> Task {
419		debug!("Creating new task {tid} on core {core_id}");
420
421		Task {
422			id: tid,
423			status: task_status,
424			prio: task_prio,
425			last_stack_pointer: VirtAddr::zero(),
426			user_stack_pointer: VirtAddr::zero(),
427			#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
428			last_fpu_state: arch::processor::FPUState::new(),
429			core_id,
430			stacks,
431			object_map,
432			#[cfg(not(feature = "common-os"))]
433			tls: None,
434			#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
435			root_page_table: arch::create_new_root_page_table(),
436		}
437	}
438
439	pub fn new_idle(tid: TaskId, core_id: CoreId) -> Task {
440		debug!("Creating idle task {tid}");
441
442		/// All cores use the same mapping between file descriptor and the referenced object
443		static OBJECT_MAP: OnceCell<
444			Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
445		> = OnceCell::new();
446
447		if core_id == 0 {
448			OBJECT_MAP
449				.set(Arc::new(async_lock::RwLock::new(HashMap::<
450					FileDescriptor,
451					Arc<dyn ObjectInterface>,
452					RandomState,
453				>::with_hasher(
454					RandomState::with_seeds(0, 0, 0, 0),
455				))))
456				.unwrap();
457			let objmap = OBJECT_MAP.get().unwrap().clone();
458			let _ = poll_on(async {
459				let mut guard = objmap.write().await;
460				if env::is_uhyve() {
461					guard
462						.try_insert(STDIN_FILENO, Arc::new(UhyveStdin::new()))
463						.map_err(|_| io::Error::EIO)?;
464					guard
465						.try_insert(STDOUT_FILENO, Arc::new(UhyveStdout::new()))
466						.map_err(|_| io::Error::EIO)?;
467					guard
468						.try_insert(STDERR_FILENO, Arc::new(UhyveStderr::new()))
469						.map_err(|_| io::Error::EIO)?;
470				} else {
471					guard
472						.try_insert(STDIN_FILENO, Arc::new(GenericStdin::new()))
473						.map_err(|_| io::Error::EIO)?;
474					guard
475						.try_insert(STDOUT_FILENO, Arc::new(GenericStdout::new()))
476						.map_err(|_| io::Error::EIO)?;
477					guard
478						.try_insert(STDERR_FILENO, Arc::new(GenericStderr::new()))
479						.map_err(|_| io::Error::EIO)?;
480				}
481
482				Ok(())
483			});
484		}
485
486		Task {
487			id: tid,
488			status: TaskStatus::Idle,
489			prio: IDLE_PRIO,
490			last_stack_pointer: VirtAddr::zero(),
491			user_stack_pointer: VirtAddr::zero(),
492			#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
493			last_fpu_state: arch::processor::FPUState::new(),
494			core_id,
495			stacks: TaskStacks::from_boot_stacks(),
496			object_map: OBJECT_MAP.get().unwrap().clone(),
497			#[cfg(not(feature = "common-os"))]
498			tls: None,
499			#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
500			root_page_table: *crate::scheduler::BOOT_ROOT_PAGE_TABLE.get().unwrap(),
501		}
502	}
503}
504
505/*impl Drop for Task {
506	fn drop(&mut self) {
507		debug!("Drop task {}", self.id);
508	}
509}*/
510
511struct BlockedTask {
512	task: Rc<RefCell<Task>>,
513	wakeup_time: Option<u64>,
514}
515
516impl BlockedTask {
517	pub fn new(task: Rc<RefCell<Task>>, wakeup_time: Option<u64>) -> Self {
518		Self { task, wakeup_time }
519	}
520}
521
522pub(crate) struct BlockedTaskQueue {
523	list: LinkedList<BlockedTask>,
524	#[cfg(any(feature = "tcp", feature = "udp"))]
525	network_wakeup_time: Option<u64>,
526}
527
528impl BlockedTaskQueue {
529	pub const fn new() -> Self {
530		Self {
531			list: LinkedList::new(),
532			#[cfg(any(feature = "tcp", feature = "udp"))]
533			network_wakeup_time: None,
534		}
535	}
536
537	fn mark_ready(task: &RefCell<Task>) {
538		let mut borrowed = task.borrow_mut();
539		debug!(
540			"Waking up task {} on core {}",
541			borrowed.id, borrowed.core_id
542		);
543
544		assert!(
545			borrowed.core_id == core_id(),
546			"Try to wake up task {} on the wrong core {} != {}",
547			borrowed.id,
548			borrowed.core_id,
549			core_id()
550		);
551
552		assert!(
553			borrowed.status == TaskStatus::Blocked,
554			"Trying to wake up task {} which is not blocked",
555			borrowed.id
556		);
557		borrowed.status = TaskStatus::Ready;
558	}
559
560	#[cfg(any(feature = "tcp", feature = "udp"))]
561	pub fn add_network_timer(&mut self, wakeup_time: Option<u64>) {
562		self.network_wakeup_time = wakeup_time;
563
564		let next = self.list.front().and_then(|t| t.wakeup_time);
565
566		let time = match (wakeup_time, next) {
567			(Some(a), Some(b)) => Some(a.min(b)),
568			(a, b) => a.or(b),
569		};
570
571		arch::set_oneshot_timer(time);
572	}
573
574	/// Blocks the given task for `wakeup_time` ticks, or indefinitely if None is given.
575	pub fn add(&mut self, task: Rc<RefCell<Task>>, wakeup_time: Option<u64>) {
576		{
577			// Set the task status to Blocked.
578			let mut borrowed = task.borrow_mut();
579			debug!("Blocking task {}", borrowed.id);
580
581			assert_eq!(
582				borrowed.status,
583				TaskStatus::Running,
584				"Trying to block task {} which is not running",
585				borrowed.id
586			);
587			borrowed.status = TaskStatus::Blocked;
588		}
589
590		let new_node = BlockedTask::new(task, wakeup_time);
591
592		// Shall the task automatically be woken up after a certain time?
593		if let Some(wt) = wakeup_time {
594			let mut cursor = self.list.cursor_front_mut();
595			let set_oneshot_timer = || {
596				#[cfg(not(any(feature = "tcp", feature = "udp")))]
597				arch::set_oneshot_timer(wakeup_time);
598				#[cfg(any(feature = "tcp", feature = "udp"))]
599				match self.network_wakeup_time {
600					Some(time) => {
601						if time > wt {
602							arch::set_oneshot_timer(wakeup_time);
603						} else {
604							arch::set_oneshot_timer(self.network_wakeup_time);
605						}
606					}
607					_ => arch::set_oneshot_timer(wakeup_time),
608				}
609			};
610
611			while let Some(node) = cursor.current() {
612				let node_wakeup_time = node.wakeup_time;
613				if node_wakeup_time.is_none() || wt < node_wakeup_time.unwrap() {
614					cursor.insert_before(new_node);
615
616					set_oneshot_timer();
617					return;
618				}
619
620				cursor.move_next();
621			}
622
623			set_oneshot_timer();
624		}
625
626		self.list.push_back(new_node);
627	}
628
629	/// Manually wake up a blocked task.
630	pub fn custom_wakeup(&mut self, task: TaskHandle) -> Rc<RefCell<Task>> {
631		let mut first_task = true;
632		let mut cursor = self.list.cursor_front_mut();
633
634		#[cfg(any(feature = "tcp", feature = "udp"))]
635		if let Some(wakeup_time) = self.network_wakeup_time {
636			if wakeup_time <= arch::processor::get_timer_ticks() {
637				self.network_wakeup_time = None;
638			}
639		}
640
641		// Loop through all blocked tasks to find it.
642		while let Some(node) = cursor.current() {
643			if node.task.borrow().id == task.get_id() {
644				// Remove it from the list of blocked tasks.
645				let task_ref = node.task.clone();
646				cursor.remove_current();
647
648				// If this is the first task, adjust the One-Shot Timer to fire at the
649				// next task's wakeup time (if any).
650				#[cfg(any(feature = "tcp", feature = "udp"))]
651				if first_task {
652					arch::set_oneshot_timer(cursor.current().map_or_else(
653						|| self.network_wakeup_time,
654						|node| match node.wakeup_time {
655							Some(wt) => {
656								if let Some(timer) = self.network_wakeup_time {
657									if wt < timer { Some(wt) } else { Some(timer) }
658								} else {
659									Some(wt)
660								}
661							}
662							None => self.network_wakeup_time,
663						},
664					));
665				}
666				#[cfg(not(any(feature = "tcp", feature = "udp")))]
667				if first_task {
668					arch::set_oneshot_timer(
669						cursor
670							.current()
671							.map_or_else(|| None, |node| node.wakeup_time),
672					);
673				}
674
675				// Wake it up.
676				Self::mark_ready(&task_ref);
677
678				return task_ref;
679			}
680
681			first_task = false;
682			cursor.move_next();
683		}
684
685		unreachable!();
686	}
687
688	/// Wakes up all tasks whose wakeup time has elapsed.
689	///
690	/// Should be called by the One-Shot Timer interrupt handler when the wakeup time for
691	/// at least one task has elapsed.
692	pub fn handle_waiting_tasks(&mut self, ready_queue: &mut PriorityTaskQueue) {
693		// Get the current time.
694		let time = arch::processor::get_timer_ticks();
695
696		#[cfg(any(feature = "tcp", feature = "udp"))]
697		if let Some(mut guard) = crate::executor::network::NIC.try_lock() {
698			if let crate::executor::network::NetworkState::Initialized(nic) = &mut *guard {
699				let now = crate::executor::network::now();
700				nic.poll_common(now);
701				self.network_wakeup_time = nic.poll_delay(now).map(|d| d.total_micros() + time);
702			}
703		}
704
705		// Get the wakeup time of this task and check if we have reached the first task
706		// that hasn't elapsed yet or waits indefinitely.
707		// This iterator has to be consumed to actually remove the elements.
708		let newly_ready_tasks = self.list.extract_if(|blocked_task| {
709			blocked_task
710				.wakeup_time
711				.is_some_and(|wakeup_time| wakeup_time < time)
712		});
713
714		for task in newly_ready_tasks {
715			Self::mark_ready(&task.task);
716			ready_queue.push(task.task);
717		}
718
719		let new_task_wakeup_time = self.list.front().and_then(|task| task.wakeup_time);
720		cfg_if::cfg_if! {
721			if 	#[cfg(any(feature = "tcp", feature = "udp"))] {
722				let network_wakeup_time = self.network_wakeup_time;
723			} else {
724				let network_wakeup_time = None;
725			}
726		};
727		let timer_wakeup_time = match (new_task_wakeup_time, network_wakeup_time) {
728			(None, None) => None,
729			(None, Some(network_wt)) => Some(network_wt),
730			(Some(task_wt), None) => Some(task_wt),
731			(Some(task_wt), Some(network_wt)) => Some(u64::min(task_wt, network_wt)),
732		};
733
734		arch::set_oneshot_timer(timer_wakeup_time);
735	}
736}