hermit/drivers/virtio/virtqueue/
packed.rs

1//! This module contains Virtio's packed virtqueue.
2//! See Virito specification v1.1. - 2.7
3#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::sync::atomic::{Ordering, fence};
9use core::{ops, ptr};
10
11use align_address::Align;
12use memory_addresses::VirtAddr;
13#[cfg(not(feature = "pci"))]
14use virtio::mmio::NotificationData;
15#[cfg(feature = "pci")]
16use virtio::pci::NotificationData;
17use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
18use virtio::virtq::DescF;
19use virtio::{RingEventFlags, pvirtq, virtq};
20
21#[cfg(not(feature = "pci"))]
22use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
23#[cfg(feature = "pci")]
24use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
25use super::error::VirtqError;
26use super::{
27	AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
28	VirtqPrivate, VqIndex, VqSize,
29};
30use crate::arch::mm::paging;
31use crate::arch::mm::paging::{BasePageSize, PageSize};
32use crate::mm::device_alloc::DeviceAlloc;
33
34#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
35struct RingIdx {
36	off: u16,
37	wrap: u8,
38}
39
40trait RingIndexRange {
41	fn wrapping_contains(&self, item: &RingIdx) -> bool;
42}
43
44impl RingIndexRange for ops::Range<RingIdx> {
45	fn wrapping_contains(&self, item: &RingIdx) -> bool {
46		let ops::Range { start, end } = self;
47
48		if start.wrap == end.wrap {
49			item.wrap == start.wrap && start.off <= item.off && item.off < end.off
50		} else if item.wrap == start.wrap {
51			start.off <= item.off
52		} else {
53			debug_assert!(item.wrap == end.wrap);
54			item.off < end.off
55		}
56	}
57}
58
59/// A newtype of bool used for convenience in context with
60/// packed queues wrap counter.
61///
62/// For more details see Virtio specification v1.1. - 2.7.1
63#[derive(Copy, Clone, Debug)]
64struct WrapCount(bool);
65
66impl WrapCount {
67	/// Masks all other bits, besides the wrap count specific ones.
68	fn flag_mask() -> virtq::DescF {
69		virtq::DescF::AVAIL | virtq::DescF::USED
70	}
71
72	/// Returns a new WrapCount struct initialized to true or 1.
73	///
74	/// See virtio specification v1.1. - 2.7.1
75	fn new() -> Self {
76		WrapCount(true)
77	}
78
79	/// Toogles a given wrap count to respectiver other value.
80	///
81	/// If WrapCount(true) returns WrapCount(false),
82	/// if WrapCount(false) returns WrapCount(true).
83	fn wrap(&mut self) {
84		self.0 = !self.0;
85	}
86}
87
88/// Structure which allows to control raw ring and operate easily on it
89struct DescriptorRing {
90	ring: Box<[pvirtq::Desc], DeviceAlloc>,
91	tkn_ref_ring: Box<[Option<Box<TransferToken<pvirtq::Desc>>>]>,
92
93	// Controlling variables for the ring
94	//
95	/// where to insert available descriptors next
96	write_index: u16,
97	/// How much descriptors can be inserted
98	capacity: u16,
99	/// Where to expect the next used descriptor by the device
100	poll_index: u16,
101	/// See Virtio specification v1.1. - 2.7.1
102	drv_wc: WrapCount,
103	dev_wc: WrapCount,
104	/// Memory pool controls the amount of "free floating" descriptors
105	/// See [MemPool] docs for detail.
106	mem_pool: MemPool,
107}
108
109impl DescriptorRing {
110	fn new(size: u16) -> Self {
111		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
112
113		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
114		let tkn_ref_ring = core::iter::repeat_with(|| None)
115			.take(size.into())
116			.collect::<Vec<_>>()
117			.into_boxed_slice();
118
119		DescriptorRing {
120			ring,
121			tkn_ref_ring,
122			write_index: 0,
123			capacity: size,
124			poll_index: 0,
125			drv_wc: WrapCount::new(),
126			dev_wc: WrapCount::new(),
127			mem_pool: MemPool::new(size),
128		}
129	}
130
131	/// Polls poll index and sets the state of any finished TransferTokens.
132	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
133		let mut ctrl = self.get_read_ctrler();
134
135		ctrl.poll_next()
136			.map(|(tkn, written_len)| {
137				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
138			})
139			.ok_or(VirtqError::NoNewUsed)
140	}
141
142	fn push_batch(
143		&mut self,
144		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
145	) -> Result<RingIdx, VirtqError> {
146		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
147		// which will be overwritten in the first iteration of the for-loop
148
149		let first_ctrl_settings;
150		let first_buffer;
151		let mut ctrl;
152
153		let mut tkn_iterator = tkn_lst.into_iter();
154		if let Some(first_tkn) = tkn_iterator.next() {
155			ctrl = self.push_without_making_available(&first_tkn)?;
156			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
157			first_buffer = Some(Box::new(first_tkn));
158		} else {
159			// Empty batches are an error
160			return Err(VirtqError::BufferNotSpecified);
161		}
162		// Push the remaining tokens (if any)
163		for tkn in tkn_iterator {
164			ctrl.make_avail(Box::new(tkn));
165		}
166
167		// Manually make the first buffer available lastly
168		//
169		// Providing the first buffer in the list manually
170		self.make_avail_with_state(
171			first_buffer.unwrap(),
172			first_ctrl_settings.0,
173			first_ctrl_settings.1,
174			first_ctrl_settings.2,
175		);
176		Ok(RingIdx {
177			off: self.write_index,
178			wrap: self.drv_wc.0.into(),
179		})
180	}
181
182	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
183		self.push_batch([tkn])
184	}
185
186	fn push_without_making_available(
187		&mut self,
188		tkn: &TransferToken<pvirtq::Desc>,
189	) -> Result<WriteCtrl<'_>, VirtqError> {
190		if tkn.num_consuming_descr() > self.capacity {
191			return Err(VirtqError::NoDescrAvail);
192		}
193
194		// create an counter that wrappes to the first element
195		// after reaching a the end of the ring
196		let mut ctrl = self.get_write_ctrler()?;
197
198		// Importance here is:
199		// * distinguish between Indirect and direct buffers
200		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
201
202		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
203		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
204			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
205			ctrl.write_desc(desc);
206		} else {
207			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
208				ctrl.write_desc(incomplete_desc);
209			}
210		}
211		Ok(ctrl)
212	}
213
214	/// # Unsafe
215	/// Returns the memory address of the first element of the descriptor ring
216	fn raw_addr(&self) -> usize {
217		self.ring.as_ptr() as usize
218	}
219
220	/// Returns an initialized write controller in order
221	/// to write the queue correctly.
222	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
223		let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
224		Ok(WriteCtrl {
225			start: self.write_index,
226			position: self.write_index,
227			modulo: u16::try_from(self.ring.len()).unwrap(),
228			first_flags: DescF::empty(),
229			buff_id: desc_id,
230
231			desc_ring: self,
232		})
233	}
234
235	/// Returns an initialized read controller in order
236	/// to read the queue correctly.
237	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
238		ReadCtrl {
239			position: self.poll_index,
240			modulo: u16::try_from(self.ring.len()).unwrap(),
241
242			desc_ring: self,
243		}
244	}
245
246	fn make_avail_with_state(
247		&mut self,
248		raw_tkn: Box<TransferToken<pvirtq::Desc>>,
249		start: u16,
250		buff_id: MemDescrId,
251		first_flags: DescF,
252	) {
253		// provide reference, in order to let TransferToken know upon finish.
254		self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
255		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
256		// See Virtio specfification v1.1. - 2.7.21
257		fence(Ordering::SeqCst);
258		self.ring[usize::from(start)].flags = first_flags;
259	}
260
261	/// Returns the [DescF] with the avail and used flags set in accordance
262	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
263	/// the driver WrapCount and the used flag set to NOT match the WrapCount).
264	///
265	/// This function is defined on the whole ring rather than only the
266	/// wrap counter to ensure that it is not called on the incorrect
267	/// wrap counter (i.e. device wrap counter) by accident.
268	///
269	/// A copy of the flag is taken instead of a mutable reference
270	/// for the cases in which the modification of the flag needs to be
271	/// deferred (e.g. patched dispatches, chained buffers).
272	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
273		flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
274		flags.set(virtq::DescF::USED, !self.drv_wc.0);
275		flags
276	}
277
278	/// Checks the avail and used flags to see if the descriptor is marked
279	/// as used by the device in accordance with the
280	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device WrapCount)
281	///
282	/// This function is defined on the whole ring rather than only the
283	/// wrap counter to ensure that it is not called on the incorrect
284	/// wrap counter (i.e. driver wrap counter) by accident.
285	fn is_marked_used(&self, flags: DescF) -> bool {
286		if self.dev_wc.0 {
287			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
288		} else {
289			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
290		}
291	}
292}
293
294struct ReadCtrl<'a> {
295	/// Poll index of the ring at init of ReadCtrl
296	position: u16,
297	modulo: u16,
298
299	desc_ring: &'a mut DescriptorRing,
300}
301
302impl ReadCtrl<'_> {
303	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
304	/// updating the queue and returns the respective TransferToken.
305	fn poll_next(&mut self) -> Option<(Box<TransferToken<pvirtq::Desc>>, u32)> {
306		// Check if descriptor has been marked used.
307		let desc = &self.desc_ring.ring[usize::from(self.position)];
308		if self.desc_ring.is_marked_used(desc.flags) {
309			let buff_id = desc.id.to_ne();
310			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
311				.take()
312				.expect(
313					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
314				);
315
316			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
317			// This is necessary in order to provide the drivers with the correct access, to usable data.
318			//
319			// According to the standard the device signals solely via the first written descriptor if anything has been written to
320			// the write descriptors of a buffer.
321			// See Virtio specification v1.1. - 2.7.4
322			//                                - 2.7.5
323			//                                - 2.7.6
324			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
325			//      self.desc_ring.ring[self.position].len
326			//  } else {
327			//      0
328			//  };
329			//
330			// INFO:
331			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
332			// flag correctly upon writes. Hence we omit it, in order to receive data.
333
334			// We need to read the written length before advancing the position.
335			let write_len = desc.len.to_ne();
336
337			for _ in 0..tkn.num_consuming_descr() {
338				self.incrmt();
339			}
340			self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
341
342			Some((tkn, write_len))
343		} else {
344			None
345		}
346	}
347
348	fn incrmt(&mut self) {
349		if self.desc_ring.poll_index + 1 == self.modulo {
350			self.desc_ring.dev_wc.wrap();
351		}
352
353		// Increment capacity as we have one more free now!
354		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
355		self.desc_ring.capacity += 1;
356
357		self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
358		self.position = self.desc_ring.poll_index;
359	}
360}
361
362/// Convenient struct that allows to conveniently write descriptors into the queue.
363/// The struct takes care of updating the state of the queue correctly and to write
364/// the correct flags.
365struct WriteCtrl<'a> {
366	/// Where did the write of the buffer start in the descriptor ring
367	/// This is important, as we must make this descriptor available
368	/// lastly.
369	start: u16,
370	/// Where to write next. This should always be equal to the Rings
371	/// write_next field.
372	position: u16,
373	modulo: u16,
374	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
375	first_flags: DescF,
376	/// Buff ID of this write
377	buff_id: MemDescrId,
378
379	desc_ring: &'a mut DescriptorRing,
380}
381
382impl WriteCtrl<'_> {
383	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
384	///
385	/// Incrementing index by one. The index wrappes around to zero when
386	/// reaching (modulo -1).
387	///
388	/// Also takes care of wrapping the WrapCount of the associated
389	/// DescriptorRing.
390	fn incrmt(&mut self) {
391		// Firstly check if we are at all allowed to write a descriptor
392		assert!(self.desc_ring.capacity != 0);
393		self.desc_ring.capacity -= 1;
394		// check if increment wrapped around end of ring
395		// then also wrap the wrap counter.
396		if self.position + 1 == self.modulo {
397			self.desc_ring.drv_wc.wrap();
398		}
399		// Also update the write_index
400		self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
401
402		self.position = (self.position + 1) % self.modulo;
403	}
404
405	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
406	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
407		incomplete_desc.id = self.buff_id.0.into();
408		if self.start == self.position {
409			// We save what the flags value for the first descriptor will be to be able
410			// to write it later when all the other descriptors are written (so that
411			// the device does not see an incomplete chain).
412			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
413		} else {
414			// Set avail and used according to the current WrapCount.
415			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
416		}
417		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
418		self.incrmt();
419	}
420
421	fn make_avail(&mut self, raw_tkn: Box<TransferToken<pvirtq::Desc>>) {
422		// We fail if one wants to make a buffer available without inserting one element!
423		assert!(self.start != self.position);
424		self.desc_ring
425			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
426	}
427}
428
429/// A newtype in order to implement the correct functionality upon
430/// the `EventSuppr` structure for driver notifications settings.
431/// The Driver Event Suppression structure is read-only by the device
432/// and controls the used buffer notifications sent by the device to the driver.
433struct DrvNotif {
434	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
435	f_notif_idx: bool,
436	/// Actual structure to read from, if device wants notifs
437	raw: &'static mut pvirtq::EventSuppress,
438}
439
440/// A newtype in order to implement the correct functionality upon
441/// the `EventSuppr` structure for device notifications settings.
442/// The Device Event Suppression structure is read-only by the driver
443/// and controls the available buffer notifica- tions sent by the driver to the device.
444struct DevNotif {
445	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
446	f_notif_idx: bool,
447	/// Actual structure to read from, if device wants notifs
448	raw: &'static mut pvirtq::EventSuppress,
449}
450
451impl DrvNotif {
452	/// Enables notifications by unsetting the LSB.
453	/// See Virito specification v1.1. - 2.7.10
454	fn enable_notif(&mut self) {
455		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
456	}
457
458	/// Disables notifications by setting the LSB.
459	/// See Virtio specification v1.1. - 2.7.10
460	fn disable_notif(&mut self) {
461		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
462	}
463
464	/// Enables a notification by the device for a specific descriptor.
465	fn enable_specific(&mut self, idx: RingIdx) {
466		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
467		if self.f_notif_idx {
468			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
469			self.raw.desc = EventSuppressDesc::new()
470				.with_desc_event_off(idx.off)
471				.with_desc_event_wrap(idx.wrap);
472		}
473	}
474}
475
476impl DevNotif {
477	/// Enables the notificication capability for a specific buffer.
478	pub fn enable_notif_specific(&mut self) {
479		self.f_notif_idx = true;
480	}
481
482	/// Reads notification bit (i.e. LSB) and returns value.
483	/// If notifications are enabled returns true, else false.
484	fn is_notif(&self) -> bool {
485		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
486	}
487
488	fn notif_specific(&self) -> Option<RingIdx> {
489		if !self.f_notif_idx {
490			return None;
491		}
492
493		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
494			return None;
495		}
496
497		let off = self.raw.desc.desc_event_off();
498		let wrap = self.raw.desc.desc_event_wrap();
499
500		Some(RingIdx { off, wrap })
501	}
502}
503
504/// Packed virtqueue which provides the functionilaty as described in the
505/// virtio specification v1.1. - 2.7
506pub struct PackedVq {
507	/// Ring which allows easy access to the raw ring structure of the
508	/// specfification
509	descr_ring: DescriptorRing,
510	/// Allows to tell the device if notifications are wanted
511	drv_event: DrvNotif,
512	/// Allows to check, if the device wants a notification
513	dev_event: DevNotif,
514	/// Actually notify device about avail buffers
515	notif_ctrl: NotifCtrl,
516	/// The size of the queue, equals the number of descriptors which can
517	/// be used
518	size: VqSize,
519	/// The virtqueues index. This identifies the virtqueue to the
520	/// device and is unique on a per device basis.
521	index: VqIndex,
522	last_next: Cell<RingIdx>,
523}
524
525// Public interface of PackedVq
526// This interface is also public in order to allow people to use the PackedVq directly!
527impl Virtq for PackedVq {
528	fn enable_notifs(&mut self) {
529		self.drv_event.enable_notif();
530	}
531
532	fn disable_notifs(&mut self) {
533		self.drv_event.disable_notif();
534	}
535
536	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
537		self.descr_ring.try_recv()
538	}
539
540	fn dispatch_batch(
541		&mut self,
542		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
543		notif: bool,
544	) -> Result<(), VirtqError> {
545		// Zero transfers are not allowed
546		assert!(!buffer_tkns.is_empty());
547
548		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
549			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
550		});
551
552		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
553
554		if notif {
555			self.drv_event.enable_specific(next_idx);
556		}
557
558		let range = self.last_next.get()..next_idx;
559		let notif_specific = self
560			.dev_event
561			.notif_specific()
562			.is_some_and(|idx| range.wrapping_contains(&idx));
563
564		if self.dev_event.is_notif() || notif_specific {
565			let notification_data = NotificationData::new()
566				.with_vqn(self.index.0)
567				.with_next_off(next_idx.off)
568				.with_next_wrap(next_idx.wrap);
569			self.notif_ctrl.notify_dev(notification_data);
570			self.last_next.set(next_idx);
571		}
572		Ok(())
573	}
574
575	fn dispatch_batch_await(
576		&mut self,
577		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
578		notif: bool,
579	) -> Result<(), VirtqError> {
580		// Zero transfers are not allowed
581		assert!(!buffer_tkns.is_empty());
582
583		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
584			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
585		});
586
587		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
588
589		if notif {
590			self.drv_event.enable_specific(next_idx);
591		}
592
593		let range = self.last_next.get()..next_idx;
594		let notif_specific = self
595			.dev_event
596			.notif_specific()
597			.is_some_and(|idx| range.wrapping_contains(&idx));
598
599		if self.dev_event.is_notif() | notif_specific {
600			let notification_data = NotificationData::new()
601				.with_vqn(self.index.0)
602				.with_next_off(next_idx.off)
603				.with_next_wrap(next_idx.wrap);
604			self.notif_ctrl.notify_dev(notification_data);
605			self.last_next.set(next_idx);
606		}
607		Ok(())
608	}
609
610	fn dispatch(
611		&mut self,
612		buffer_tkn: AvailBufferToken,
613		notif: bool,
614		buffer_type: BufferType,
615	) -> Result<(), VirtqError> {
616		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
617		let next_idx = self.descr_ring.push(transfer_tkn)?;
618
619		if notif {
620			self.drv_event.enable_specific(next_idx);
621		}
622
623		let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
624
625		if self.dev_event.is_notif() || notif_specific {
626			let notification_data = NotificationData::new()
627				.with_vqn(self.index.0)
628				.with_next_off(next_idx.off)
629				.with_next_wrap(next_idx.wrap);
630			self.notif_ctrl.notify_dev(notification_data);
631			self.last_next.set(next_idx);
632		}
633		Ok(())
634	}
635
636	fn index(&self) -> VqIndex {
637		self.index
638	}
639
640	fn new(
641		com_cfg: &mut ComCfg,
642		notif_cfg: &NotifCfg,
643		size: VqSize,
644		index: VqIndex,
645		features: virtio::F,
646	) -> Result<Self, VirtqError> {
647		// Currently we do not have support for in order use.
648		// This steems from the fact, that the packedVq ReadCtrl currently is not
649		// able to derive other finished transfer from a used-buffer notification.
650		// In order to allow this, the queue MUST track the sequence in which
651		// TransferTokens are inserted into the queue. Furthermore the Queue should
652		// carry a feature u64 in order to check which features are used currently
653		// and adjust its ReadCtrl accordingly.
654		if features.contains(virtio::F::IN_ORDER) {
655			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
656			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
657		}
658
659		// Get a handler to the queues configuration area.
660		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
661			return Err(VirtqError::QueueNotExisting(index.into()));
662		};
663
664		// Must catch zero size as it is not allowed for packed queues.
665		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
666		//
667		// See Virtio specification v1.1. - 4.1.4.3.2
668		let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
669			return Err(VirtqError::QueueSizeNotAllowed(size.0));
670		} else {
671			vq_handler.set_vq_size(size.0)
672		};
673
674		let descr_ring = DescriptorRing::new(vq_size);
675		// Allocate heap memory via a vec, leak and cast
676		let _mem_len =
677			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
678
679		let drv_event_ptr =
680			ptr::with_exposed_provenance_mut(crate::mm::allocate(_mem_len, true).as_usize());
681		let dev_event_ptr =
682			ptr::with_exposed_provenance_mut(crate::mm::allocate(_mem_len, true).as_usize());
683
684		// Provide memory areas of the queues data structures to the device
685		vq_handler.set_ring_addr(paging::virt_to_phys(VirtAddr::from(
686			descr_ring.raw_addr() as u64
687		)));
688		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
689		vq_handler.set_drv_ctrl_addr(paging::virt_to_phys(VirtAddr::from(drv_event_ptr as u64)));
690		vq_handler.set_dev_ctrl_addr(paging::virt_to_phys(VirtAddr::from(dev_event_ptr as u64)));
691
692		let drv_event: &'static mut pvirtq::EventSuppress = unsafe { &mut *(drv_event_ptr) };
693
694		let dev_event: &'static mut pvirtq::EventSuppress = unsafe { &mut *(dev_event_ptr) };
695
696		let mut drv_event = DrvNotif {
697			f_notif_idx: false,
698			raw: drv_event,
699		};
700
701		let dev_event = DevNotif {
702			f_notif_idx: false,
703			raw: dev_event,
704		};
705
706		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
707
708		if features.contains(virtio::F::NOTIFICATION_DATA) {
709			notif_ctrl.enable_notif_data();
710		}
711
712		if features.contains(virtio::F::EVENT_IDX) {
713			drv_event.f_notif_idx = true;
714		}
715
716		vq_handler.enable_queue();
717
718		info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
719
720		Ok(PackedVq {
721			descr_ring,
722			drv_event,
723			dev_event,
724			notif_ctrl,
725			size: VqSize::from(vq_size),
726			index,
727			last_next: Cell::default(),
728		})
729	}
730
731	fn size(&self) -> VqSize {
732		self.size
733	}
734
735	fn has_used_buffers(&self) -> bool {
736		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
737		self.descr_ring.is_marked_used(desc.flags)
738	}
739}
740
741impl VirtqPrivate for PackedVq {
742	type Descriptor = pvirtq::Desc;
743
744	fn create_indirect_ctrl(
745		buffer_tkn: &AvailBufferToken,
746	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
747		Ok(Self::descriptor_iter(buffer_tkn)?
748			.collect::<Vec<_>>()
749			.into_boxed_slice())
750	}
751}