hermit/drivers/virtio/virtqueue/
mod.rs

1//! This module contains Virtio's virtqueue.
2//!
3//! The virtqueue is available in two forms.
4//! [split::SplitVq] and [packed::PackedVq].
5//! Both queues are wrapped inside an enum [Virtq] in
6//! order to provide an unified interface.
7//!
8//! Drivers who need a more fine grained access to the specific queues must
9//! use the respective virtqueue structs directly.
10#![allow(dead_code)]
11
12pub mod packed;
13pub mod split;
14
15use alloc::boxed::Box;
16use alloc::collections::vec_deque::VecDeque;
17use alloc::vec::Vec;
18use core::any::Any;
19use core::mem::MaybeUninit;
20use core::{mem, ptr};
21
22use memory_addresses::VirtAddr;
23use virtio::{le32, le64, pvirtq, virtq};
24
25use self::error::VirtqError;
26#[cfg(not(feature = "pci"))]
27use super::transport::mmio::{ComCfg, NotifCfg};
28#[cfg(feature = "pci")]
29use super::transport::pci::{ComCfg, NotifCfg};
30use crate::arch::mm::paging;
31use crate::mm::device_alloc::DeviceAlloc;
32
33/// A u16 newtype. If instantiated via ``VqIndex::from(T)``, the newtype is ensured to be
34/// smaller-equal to `min(u16::MAX , T::MAX)`.
35///
36/// Currently implements `From<u16>` and `From<u32>`.
37#[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Eq)]
38pub struct VqIndex(u16);
39
40impl From<u16> for VqIndex {
41	fn from(val: u16) -> Self {
42		VqIndex(val)
43	}
44}
45
46impl From<VqIndex> for u16 {
47	fn from(i: VqIndex) -> Self {
48		i.0
49	}
50}
51
52impl From<u32> for VqIndex {
53	fn from(val: u32) -> Self {
54		if val > u32::from(u16::MAX) {
55			VqIndex(u16::MAX)
56		} else {
57			VqIndex(val as u16)
58		}
59	}
60}
61
62/// A u16 newtype. If instantiated via ``VqSize::from(T)``, the newtype is ensured to be
63/// smaller-equal to `min(u16::MAX , T::MAX)`.
64///
65/// Currently implements `From<u16>` and `From<u32>`.
66#[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Eq)]
67pub struct VqSize(u16);
68
69impl From<u16> for VqSize {
70	fn from(val: u16) -> Self {
71		VqSize(val)
72	}
73}
74
75impl From<u32> for VqSize {
76	fn from(val: u32) -> Self {
77		if val > u32::from(u16::MAX) {
78			VqSize(u16::MAX)
79		} else {
80			VqSize(val as u16)
81		}
82	}
83}
84
85impl From<VqSize> for u16 {
86	fn from(val: VqSize) -> Self {
87		val.0
88	}
89}
90
91// Public interface of Virtq
92
93/// The Virtq trait unifies access to the two different Virtqueue types
94/// [packed::PackedVq] and [split::SplitVq].
95///
96/// The trait provides a common interface for both types. Which in some case
97/// might not provide the complete feature set of each queue. Drivers who
98/// do need these features should refrain from providing support for both
99/// Virtqueue types and use the structs directly instead.
100pub trait Virtq: Send {
101	/// The `notif` parameter indicates if the driver wants to have a notification for this specific
102	/// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the
103	/// updated notification flags before finishing transfers!
104	fn dispatch(
105		&mut self,
106		tkn: AvailBufferToken,
107		notif: bool,
108		buffer_type: BufferType,
109	) -> Result<(), VirtqError>;
110
111	/// Dispatches the provided TransferToken to the respective queue and does
112	/// return when, the queue finished the transfer.
113	///
114	/// The returned [UsedBufferToken] can be copied from
115	/// or return the underlying buffers.
116	///
117	/// **INFO:**
118	/// Currently this function is constantly polling the queue while keeping the notifications disabled.
119	/// Upon finish notifications are enabled again.
120	fn dispatch_blocking(
121		&mut self,
122		tkn: AvailBufferToken,
123		buffer_type: BufferType,
124	) -> Result<UsedBufferToken, VirtqError> {
125		self.dispatch(tkn, false, buffer_type)?;
126
127		self.disable_notifs();
128
129		let result: UsedBufferToken;
130		// Keep Spinning until the receive queue is filled
131		loop {
132			// TODO: normally, we should check if the used buffer in question is the one
133			// we just made available. However, this shouldn't be a problem as the queue this
134			// function is called on makes use of this blocking dispatch function exclusively
135			// and thus dispatches cannot be interleaved.
136			if let Ok(buffer_tkn) = self.try_recv() {
137				result = buffer_tkn;
138				break;
139			}
140		}
141
142		self.enable_notifs();
143
144		Ok(result)
145	}
146
147	/// Enables interrupts for this virtqueue upon receiving a transfer
148	fn enable_notifs(&mut self);
149
150	/// Disables interrupts for this virtqueue upon receiving a transfer
151	fn disable_notifs(&mut self);
152
153	/// Checks if new used descriptors have been written by the device.
154	/// This activates the queue and polls the descriptor ring of the queue.
155	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError>;
156
157	/// Dispatches a batch of [AvailBufferToken]s. The buffers are provided to the queue in
158	/// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers
159	/// a device notification if wanted by the device.
160	///
161	/// The `notif` parameter indicates if the driver wants to have a notification for this specific
162	/// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the
163	/// updated notification flags before finishing transfers!
164	fn dispatch_batch(
165		&mut self,
166		tkns: Vec<(AvailBufferToken, BufferType)>,
167		notif: bool,
168	) -> Result<(), VirtqError>;
169
170	/// Dispatches a batch of [AvailBufferToken]s. The tokens will be placed in to the `await_queue`
171	/// upon finish.
172	///
173	/// The `notif` parameter indicates if the driver wants to have a notification for this specific
174	/// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the
175	/// updated notification flags before finishing transfers!
176	///
177	/// The buffers are provided to the queue in
178	/// sequence. After the last buffer has been written, the queue marks the first buffer as available and triggers
179	/// a device notification if wanted by the device.
180	///
181	/// Tokens to get a reference to the provided await_queue, where they will be placed upon finish.
182	fn dispatch_batch_await(
183		&mut self,
184		tkns: Vec<(AvailBufferToken, BufferType)>,
185		notif: bool,
186	) -> Result<(), VirtqError>;
187
188	/// Creates a new Virtq of the specified [VqSize] and the [VqIndex].
189	/// The index represents the "ID" of the virtqueue.
190	/// Upon creation the virtqueue is "registered" at the device via the `ComCfg` struct.
191	///
192	/// Be aware, that devices define a maximum number of queues and a maximal size they can handle.
193	fn new(
194		com_cfg: &mut ComCfg,
195		notif_cfg: &NotifCfg,
196		size: VqSize,
197		index: VqIndex,
198		features: virtio::F,
199	) -> Result<Self, VirtqError>
200	where
201		Self: Sized;
202
203	/// Returns the size of a Virtqueue. This represents the overall size and not the capacity the
204	/// queue currently has for new descriptors.
205	fn size(&self) -> VqSize;
206
207	// Returns the index (ID) of a Virtqueue.
208	fn index(&self) -> VqIndex;
209
210	fn has_used_buffers(&self) -> bool;
211}
212
213/// These methods are an implementation detail and are meant only for consumption by the default method
214/// implementations in [Virtq].
215trait VirtqPrivate {
216	type Descriptor: VirtqDescriptor;
217
218	fn create_indirect_ctrl(
219		buffer_tkn: &AvailBufferToken,
220	) -> Result<Box<[Self::Descriptor]>, VirtqError>;
221
222	fn indirect_desc(table: &[Self::Descriptor]) -> Self::Descriptor {
223		Self::Descriptor::incomplete_desc(
224			paging::virt_to_phys(VirtAddr::from_ptr(table.as_ptr()))
225				.as_u64()
226				.into(),
227			(mem::size_of_val(table) as u32).into(),
228			virtq::DescF::INDIRECT,
229		)
230	}
231
232	/// Consumes the [AvailBufferToken] and returns a [TransferToken], that can be used to actually start the transfer.
233	///
234	/// After this call, the buffers are no longer writable.
235	fn transfer_token_from_buffer_token(
236		buff_tkn: AvailBufferToken,
237		buffer_type: BufferType,
238	) -> TransferToken<Self::Descriptor> {
239		let ctrl_desc = match buffer_type {
240			BufferType::Direct => None,
241			BufferType::Indirect => Some(Self::create_indirect_ctrl(&buff_tkn).unwrap()),
242		};
243
244		TransferToken {
245			buff_tkn,
246			ctrl_desc,
247		}
248	}
249
250	// The descriptors returned by the iterator will be incomplete, as they do not
251	// have all the information necessary.
252	fn descriptor_iter(
253		buffer_tkn: &AvailBufferToken,
254	) -> Result<impl DoubleEndedIterator<Item = Self::Descriptor>, VirtqError> {
255		let send_desc_iter = buffer_tkn
256			.send_buff
257			.iter()
258			.map(|elem| (elem, elem.len(), virtq::DescF::empty()));
259		let recv_desc_iter = buffer_tkn
260			.recv_buff
261			.iter()
262			.map(|elem| (elem, elem.capacity(), virtq::DescF::WRITE));
263		let mut all_desc_iter =
264			send_desc_iter
265				.chain(recv_desc_iter)
266				.map(|(mem_descr, len, incomplete_flags)| {
267					Self::Descriptor::incomplete_desc(
268						paging::virt_to_phys(VirtAddr::from_ptr(mem_descr.addr()))
269							.as_u64()
270							.into(),
271						len.into(),
272						incomplete_flags | virtq::DescF::NEXT,
273					)
274				});
275
276		let mut last_desc = all_desc_iter
277			.next_back()
278			.ok_or(VirtqError::BufferNotSpecified)?;
279		*last_desc.flags_mut() -= virtq::DescF::NEXT;
280
281		Ok(all_desc_iter.chain([last_desc]))
282	}
283}
284
285trait VirtqDescriptor {
286	fn flags_mut(&mut self) -> &mut virtq::DescF;
287
288	fn incomplete_desc(addr: virtio::le64, len: virtio::le32, flags: virtq::DescF) -> Self;
289}
290
291impl VirtqDescriptor for virtq::Desc {
292	fn flags_mut(&mut self) -> &mut virtq::DescF {
293		&mut self.flags
294	}
295
296	fn incomplete_desc(addr: le64, len: le32, flags: virtq::DescF) -> Self {
297		Self {
298			addr,
299			len,
300			flags,
301			next: 0.into(),
302		}
303	}
304}
305
306impl VirtqDescriptor for pvirtq::Desc {
307	fn flags_mut(&mut self) -> &mut virtq::DescF {
308		&mut self.flags
309	}
310
311	fn incomplete_desc(addr: le64, len: le32, flags: virtq::DescF) -> Self {
312		Self {
313			addr,
314			len,
315			flags,
316			id: 0.into(),
317		}
318	}
319}
320
321/// The struct represents buffers which are ready to be send via the
322/// virtqueue. Buffers can no longer be written or retrieved.
323pub struct TransferToken<Descriptor> {
324	/// Must be some in order to prevent drop
325	/// upon reuse.
326	buff_tkn: AvailBufferToken,
327	// Contains the [MemDescr] for the indirect table if the transfer is indirect.
328	ctrl_desc: Option<Box<[Descriptor]>>,
329}
330
331/// Public Interface for TransferToken
332impl<Descriptor> TransferToken<Descriptor> {
333	/// Returns the number of descritprors that will be placed in the queue.
334	/// This number can differ from the `BufferToken.num_descr()` function value
335	/// as indirect buffers only consume one descriptor in the queue, but can have
336	/// more descriptors that are accessible via the descriptor in the queue.
337	fn num_consuming_descr(&self) -> u16 {
338		if self.ctrl_desc.is_some() {
339			1
340		} else {
341			self.buff_tkn.num_descr()
342		}
343	}
344}
345
346#[derive(Debug)]
347pub enum BufferElem {
348	Sized(Box<dyn Any + Send, DeviceAlloc>),
349	Vector(Vec<u8, DeviceAlloc>),
350}
351
352impl BufferElem {
353	// Returns the initialized length of the element. Assumes [Self::Sized] to
354	// be initialized, since the type of the object is erased and we cannot
355	// detect if the content is actually a [MaybeUninit]. However, this function
356	// should be only relevant for read buffer elements, which should not be uninit.
357	// If the element belongs to a write buffer, it is likely that [Self::capacity]
358	// is more appropriate.
359	pub fn len(&self) -> u32 {
360		match self {
361			BufferElem::Sized(sized) => mem::size_of_val(sized.as_ref()),
362			BufferElem::Vector(vec) => vec.len(),
363		}
364		.try_into()
365		.unwrap()
366	}
367
368	pub fn capacity(&self) -> u32 {
369		match self {
370			BufferElem::Sized(sized) => mem::size_of_val(sized.as_ref()),
371			BufferElem::Vector(vec) => vec.capacity(),
372		}
373		.try_into()
374		.unwrap()
375	}
376
377	pub fn addr(&self) -> *const u8 {
378		match self {
379			BufferElem::Sized(sized) => ptr::from_ref(sized.as_ref()).cast::<u8>(),
380			BufferElem::Vector(vec) => vec.as_ptr(),
381		}
382	}
383}
384
385/// The struct represents buffers which are ready to be written or to be send.
386///
387/// BufferTokens can be written in two ways:
388/// * in one step via `BufferToken.write()
389///   * consumes BufferToken and returns a TransferToken
390/// * sequentially via `BufferToken.write_seq()
391///
392/// # Structure of the Token
393/// The token can potentially hold both a *send* and a *recv* buffer, but MUST hold
394/// one.
395/// The *send* buffer is the data the device will read during a transfer, the *recv* buffer
396/// is the data the device will write to during a transfer.
397///
398/// # What are Buffers
399/// A buffer represents multiple chunks of memory. Where each chunk can be of different size.
400/// The chunks are named descriptors in the following.
401///
402/// **For Example:**
403/// A buffer could consist of 3 descriptors:
404/// 1. First descriptor of 30 bytes
405/// 2. Second descriptor of 10 bytes
406/// 3. Third descriptor of 100 bytes
407///
408/// Each of these descriptors consumes one "element" of the
409/// respective virtqueue.
410/// The maximum number of descriptors per buffer is bounded by the size of the virtqueue.
411pub struct AvailBufferToken {
412	pub(crate) send_buff: Vec<BufferElem>,
413	pub(crate) recv_buff: Vec<BufferElem>,
414}
415
416pub(crate) struct UsedDeviceWritableBuffer {
417	elems: VecDeque<BufferElem>,
418	remaining_written_len: u32,
419}
420
421impl UsedDeviceWritableBuffer {
422	pub fn pop_front_downcast<T>(&mut self) -> Option<Box<T, DeviceAlloc>>
423	where
424		T: Any,
425	{
426		if self.remaining_written_len < u32::try_from(size_of::<T>()).unwrap() {
427			return None;
428		}
429
430		let elem = self.elems.pop_front()?;
431		if let BufferElem::Sized(sized) = elem {
432			match sized.downcast::<MaybeUninit<T>>() {
433				Ok(cast) => {
434					self.remaining_written_len -= u32::try_from(size_of::<T>()).unwrap();
435					Some(unsafe { cast.assume_init() })
436				}
437				Err(sized) => {
438					self.elems.push_front(BufferElem::Sized(sized));
439					None
440				}
441			}
442		} else {
443			self.elems.push_front(elem);
444			None
445		}
446	}
447
448	pub fn pop_front_vec(&mut self) -> Option<Vec<u8, DeviceAlloc>> {
449		let elem = self.elems.pop_front()?;
450		if let BufferElem::Vector(mut vector) = elem {
451			let new_len = u32::min(
452				vector.capacity().try_into().unwrap(),
453				self.remaining_written_len,
454			);
455			self.remaining_written_len -= new_len;
456			unsafe { vector.set_len(new_len.try_into().unwrap()) };
457			Some(vector)
458		} else {
459			self.elems.push_front(elem);
460			None
461		}
462	}
463}
464
465pub(crate) struct UsedBufferToken {
466	pub send_buff: Vec<BufferElem>,
467	pub used_recv_buff: UsedDeviceWritableBuffer,
468}
469
470impl UsedBufferToken {
471	fn from_avail_buffer_token(tkn: AvailBufferToken, written_len: u32) -> Self {
472		Self {
473			send_buff: tkn.send_buff,
474			used_recv_buff: UsedDeviceWritableBuffer {
475				elems: tkn.recv_buff.into(),
476				remaining_written_len: written_len,
477			},
478		}
479	}
480}
481
482// Private interface of BufferToken
483impl AvailBufferToken {
484	/// Returns the overall number of descriptors.
485	fn num_descr(&self) -> u16 {
486		u16::try_from(self.send_buff.len() + self.recv_buff.len()).unwrap()
487	}
488}
489
490// Public interface of BufferToken
491impl AvailBufferToken {
492	/// **Parameters**
493	/// * send: The slices that will make up the elements of the driver-writable buffer.
494	/// * recv: The slices that will make up the elements of the device-writable buffer.
495	///
496	/// **Reasons for Failure:**
497	/// * Both `send` and `recv` are empty, which is not allowed by Virtio.
498	///
499	/// * If one wants to have a structure in the style of:
500	/// ```
501	/// struct send_recv_struct {
502	///     // send_part: ...
503	///     // recv_part: ...
504	/// }
505	/// ```
506	/// they must split the structure after the send part and provide the respective part via the send argument and the respective other
507	/// part via the recv argument.
508	pub fn new(send_buff: Vec<BufferElem>, recv_buff: Vec<BufferElem>) -> Result<Self, VirtqError> {
509		if send_buff.is_empty() && recv_buff.is_empty() {
510			return Err(VirtqError::BufferNotSpecified);
511		}
512
513		Ok(Self {
514			send_buff,
515			recv_buff,
516		})
517	}
518}
519
520pub enum BufferType {
521	/// As many descriptors get consumed in the descriptor table as the sum of the numbers of slices in [AvailBufferToken::send_buff] and [AvailBufferToken::recv_buff].
522	Direct,
523	/// Results in one descriptor in the queue, hence consumes one element in the main descriptor table. The queue will merge the send and recv buffers as follows:
524	/// ```text
525	/// //+++++++++++++++++++++++
526	/// //+        Queue        +
527	/// //+++++++++++++++++++++++
528	/// //+ Indirect descriptor + -> refers to a descriptor list in the form of ->  ++++++++++++++++++++++++++
529	/// //+         ...         +                                                   +  Descriptors for send  +
530	/// //+++++++++++++++++++++++                                                   +  Descriptors for recv  +
531	/// //                                                                          ++++++++++++++++++++++++++
532	/// ```
533	/// As a result indirect descriptors result in a single descriptor consumption in the actual queue.
534	Indirect,
535}
536
537/// A newtype for descriptor ids, for better readability.
538#[derive(Clone, Copy)]
539struct MemDescrId(pub u16);
540
541/// MemPool allows to easily control, request and provide memory for Virtqueues.
542///
543/// The struct is initialized with a limit of free running "tracked"
544/// memory descriptor ids. As Virtqueus do only allow a limited amount of descriptors in their queue,
545/// the independent queues, can control the number of descriptors by this.
546struct MemPool {
547	pool: Vec<MemDescrId>,
548	limit: u16,
549}
550
551impl MemPool {
552	/// Returns a given id to the id pool
553	fn ret_id(&mut self, id: MemDescrId) {
554		self.pool.push(id);
555	}
556
557	/// Returns a new instance, with a pool of the specified size.
558	fn new(size: u16) -> MemPool {
559		MemPool {
560			pool: (0..size).map(MemDescrId).collect(),
561			limit: size,
562		}
563	}
564}
565
566/// Virtqeueus error module.
567///
568/// This module unifies errors provided to useres of a virtqueue, independent of the underlying
569/// virtqueue implementation, realized via the different enum variants.
570pub mod error {
571	use crate::io;
572
573	#[derive(Debug)]
574	// Internal Error Handling for Buffers
575	pub enum BufferError {
576		WriteToLarge,
577		ToManyWrites,
578	}
579
580	// External Error Handling for users of the virtqueue.
581	pub enum VirtqError {
582		General,
583		/// Call to create a BufferToken or TransferToken without
584		/// any buffers to be inserted
585		BufferNotSpecified,
586		/// Selected queue does not exist or
587		/// is not known to the device and hence can not be used
588		QueueNotExisting(u16),
589		/// Signals, that the queue does not have any free descriptors
590		/// left.
591		/// Typically this means, that the driver either has to provide
592		/// "unsend" `TransferToken` to the queue (see Docs for details)
593		/// or the device needs to process available descriptors in the queue.
594		NoDescrAvail,
595		/// Indicates that a Bytes::new() call failed or generally that a buffer is to large to
596		/// be transferred as one. The Maximum size is u32::MAX. This also is the maximum for indirect
597		/// descriptors (both the one placed in the queue, as also the ones the indirect descriptor is
598		/// referring to).
599		BufferToLarge,
600		QueueSizeNotAllowed(u16),
601		FeatureNotSupported(virtio::F),
602		AllocationError,
603		IncompleteWrite,
604		NoNewUsed,
605	}
606
607	impl core::fmt::Debug for VirtqError {
608		fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
609			match self {
610				VirtqError::General => write!(f, "Virtq failure due to unknown reasons!"),
611				VirtqError::BufferNotSpecified => {
612					write!(f, "Virtq detected creation of Token, without a BuffSpec")
613				}
614				VirtqError::QueueNotExisting(_) => {
615					write!(f, "Virtq does not exist and can not be used!")
616				}
617				VirtqError::NoDescrAvail => write!(f, "Virtqs memory pool is exhausted!"),
618				VirtqError::BufferToLarge => {
619					write!(f, "Buffer to large for queue! u32::MAX exceeded.")
620				}
621				VirtqError::QueueSizeNotAllowed(_) => {
622					write!(f, "The requested queue size is not valid.")
623				}
624				VirtqError::FeatureNotSupported(_) => {
625					write!(f, "An unsupported feature was requested from the queue.")
626				}
627				VirtqError::AllocationError => write!(
628					f,
629					"An error was encountered during the allocation of the queue structures."
630				),
631				VirtqError::IncompleteWrite => {
632					write!(f, "A sized object was partially initialized.")
633				}
634				VirtqError::NoNewUsed => {
635					write!(f, "The queue does not contain any new used buffers.")
636				}
637			}
638		}
639	}
640
641	impl core::convert::From<VirtqError> for io::Error {
642		fn from(_: VirtqError) -> Self {
643			io::Error::EIO
644		}
645	}
646}