hermit/drivers/virtio/virtqueue/
packed.rs

1//! `pvirtq` infrastructure.
2//!
3//! The main type of this module is [`PackedVq`].
4//!
5//! For details on packed virtqueues, see [Packed Virtqueues].
6//! For details on the Rust definitions, see [`virtio::pvirtq`].
7//!
8//! [Packed Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-720008
9
10#![allow(dead_code)]
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::ops;
16use core::sync::atomic::{Ordering, fence};
17
18use align_address::Align;
19#[cfg(not(feature = "pci"))]
20use virtio::mmio::NotificationData;
21#[cfg(feature = "pci")]
22use virtio::pci::NotificationData;
23use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
24use virtio::virtq::DescF;
25use virtio::{RingEventFlags, pvirtq, virtq};
26
27#[cfg(not(feature = "pci"))]
28use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
29#[cfg(feature = "pci")]
30use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
31use super::error::VirtqError;
32use super::index_alloc::IndexAlloc;
33use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
34use crate::arch::mm::paging::{BasePageSize, PageSize};
35use crate::mm::device_alloc::DeviceAlloc;
36
37trait RingIndexRange {
38	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
39}
40
41impl RingIndexRange for ops::Range<EventSuppressDesc> {
42	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
43		let start_off = self.start.desc_event_off();
44		let start_wrap = self.start.desc_event_wrap();
45		let end_off = self.end.desc_event_off();
46		let end_wrap = self.end.desc_event_wrap();
47		let item_off = item.desc_event_off();
48		let item_wrap = item.desc_event_wrap();
49
50		if start_wrap == end_wrap {
51			item_wrap == start_wrap && start_off <= item_off && item_off < end_off
52		} else if item_wrap == start_wrap {
53			start_off <= item_off
54		} else {
55			debug_assert!(item_wrap == end_wrap);
56			item_off < end_off
57		}
58	}
59}
60
61/// Structure which allows to control raw ring and operate easily on it
62struct DescriptorRing {
63	ring: Box<[pvirtq::Desc], DeviceAlloc>,
64	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
65
66	// Controlling variables for the ring
67	//
68	/// where to insert available descriptors next
69	/// See Virtio specification v1.1. - 2.7.1
70	write_index: EventSuppressDesc,
71	/// How much descriptors can be inserted
72	capacity: u16,
73	/// Where to expect the next used descriptor by the device
74	///
75	/// See Virtio specification v1.1. - 2.7.1
76	poll_index: EventSuppressDesc,
77	/// This allocates available descriptors.
78	indexes: IndexAlloc,
79}
80
81impl DescriptorRing {
82	fn new(size: u16) -> Self {
83		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
84
85		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
86		let tkn_ref_ring = core::iter::repeat_with(|| None)
87			.take(size.into())
88			.collect::<Vec<_>>()
89			.into_boxed_slice();
90
91		let write_index = EventSuppressDesc::new()
92			.with_desc_event_off(0)
93			.with_desc_event_wrap(1);
94
95		let poll_index = write_index;
96
97		DescriptorRing {
98			ring,
99			tkn_ref_ring,
100			write_index,
101			capacity: size,
102			poll_index,
103			indexes: IndexAlloc::new(size.into()),
104		}
105	}
106
107	/// Polls poll index and sets the state of any finished TransferTokens.
108	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
109		let mut ctrl = self.get_read_ctrler();
110
111		ctrl.poll_next()
112			.map(|(tkn, written_len)| {
113				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
114			})
115			.ok_or(VirtqError::NoNewUsed)
116	}
117
118	fn push_batch(
119		&mut self,
120		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
121	) -> Result<EventSuppressDesc, VirtqError> {
122		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
123		// which will be overwritten in the first iteration of the for-loop
124
125		let first_ctrl_settings;
126		let first_buffer;
127		let mut ctrl;
128
129		let mut tkn_iterator = tkn_lst.into_iter();
130		if let Some(first_tkn) = tkn_iterator.next() {
131			ctrl = self.push_without_making_available(&first_tkn)?;
132			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
133			first_buffer = first_tkn;
134		} else {
135			// Empty batches are an error
136			return Err(VirtqError::BufferNotSpecified);
137		}
138		// Push the remaining tokens (if any)
139		for tkn in tkn_iterator {
140			ctrl.make_avail(tkn);
141		}
142
143		// Manually make the first buffer available lastly
144		//
145		// Providing the first buffer in the list manually
146		self.make_avail_with_state(
147			first_buffer,
148			first_ctrl_settings.0,
149			first_ctrl_settings.1,
150			first_ctrl_settings.2,
151		);
152
153		Ok(self.write_index)
154	}
155
156	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
157		self.push_batch([tkn])
158	}
159
160	fn push_without_making_available(
161		&mut self,
162		tkn: &TransferToken<pvirtq::Desc>,
163	) -> Result<WriteCtrl<'_>, VirtqError> {
164		if tkn.num_consuming_descr() > self.capacity {
165			return Err(VirtqError::NoDescrAvail);
166		}
167
168		// create an counter that wrappes to the first element
169		// after reaching a the end of the ring
170		let mut ctrl = self.get_write_ctrler()?;
171
172		// Importance here is:
173		// * distinguish between Indirect and direct buffers
174		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
175
176		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
177		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
178			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
179			ctrl.write_desc(desc);
180		} else {
181			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
182				ctrl.write_desc(incomplete_desc);
183			}
184		}
185		Ok(ctrl)
186	}
187
188	fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
189		self.ring.as_mut_ptr()
190	}
191
192	/// Returns an initialized write controller in order
193	/// to write the queue correctly.
194	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
195		let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
196		Ok(WriteCtrl {
197			start: self.write_index.desc_event_off(),
198			position: self.write_index.desc_event_off(),
199			modulo: u16::try_from(self.ring.len()).unwrap(),
200			first_flags: DescF::empty(),
201			buff_id: u16::try_from(desc_id).unwrap(),
202
203			desc_ring: self,
204		})
205	}
206
207	/// Returns an initialized read controller in order
208	/// to read the queue correctly.
209	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
210		ReadCtrl {
211			position: self.poll_index.desc_event_off(),
212			modulo: u16::try_from(self.ring.len()).unwrap(),
213
214			desc_ring: self,
215		}
216	}
217
218	fn make_avail_with_state(
219		&mut self,
220		raw_tkn: TransferToken<pvirtq::Desc>,
221		start: u16,
222		buff_id: u16,
223		first_flags: DescF,
224	) {
225		// provide reference, in order to let TransferToken know upon finish.
226		self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
227		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
228		// See Virtio specification v1.1. - 2.7.21
229		fence(Ordering::SeqCst);
230		self.ring[usize::from(start)].flags = first_flags;
231	}
232
233	/// Returns the [DescF] with the avail and used flags set in accordance
234	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
235	/// the driver wrap counter and the used flag set to NOT match the wrap counter).
236	///
237	/// This function is defined on the whole ring rather than only the
238	/// wrap counter to ensure that it is not called on the incorrect
239	/// wrap counter (i.e. device wrap counter) by accident.
240	///
241	/// A copy of the flag is taken instead of a mutable reference
242	/// for the cases in which the modification of the flag needs to be
243	/// deferred (e.g. patched dispatches, chained buffers).
244	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
245		let avail = self.write_index.desc_event_wrap() != 0;
246		flags.set(virtq::DescF::AVAIL, avail);
247		flags.set(virtq::DescF::USED, !avail);
248		flags
249	}
250
251	/// Checks the avail and used flags to see if the descriptor is marked
252	/// as used by the device in accordance with the
253	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device wrap counter)
254	///
255	/// This function is defined on the whole ring rather than only the
256	/// wrap counter to ensure that it is not called on the incorrect
257	/// wrap counter (i.e. driver wrap counter) by accident.
258	fn is_marked_used(&self, flags: DescF) -> bool {
259		if self.poll_index.desc_event_wrap() != 0 {
260			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
261		} else {
262			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
263		}
264	}
265}
266
267struct ReadCtrl<'a> {
268	/// Poll index of the ring at init of ReadCtrl
269	position: u16,
270	modulo: u16,
271
272	desc_ring: &'a mut DescriptorRing,
273}
274
275impl ReadCtrl<'_> {
276	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
277	/// updating the queue and returns the respective TransferToken.
278	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
279		// Check if descriptor has been marked used.
280		let desc = &self.desc_ring.ring[usize::from(self.position)];
281		if self.desc_ring.is_marked_used(desc.flags) {
282			let buff_id = desc.id.to_ne();
283			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
284				.take()
285				.expect(
286					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
287				);
288
289			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
290			// This is necessary in order to provide the drivers with the correct access, to usable data.
291			//
292			// According to the standard the device signals solely via the first written descriptor if anything has been written to
293			// the write descriptors of a buffer.
294			// See Virtio specification v1.1. - 2.7.4
295			//                                - 2.7.5
296			//                                - 2.7.6
297			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
298			//      self.desc_ring.ring[self.position].len
299			//  } else {
300			//      0
301			//  };
302			//
303			// INFO:
304			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
305			// flag correctly upon writes. Hence we omit it, in order to receive data.
306
307			// We need to read the written length before advancing the position.
308			let write_len = desc.len.to_ne();
309
310			for _ in 0..tkn.num_consuming_descr() {
311				self.incrmt();
312			}
313			unsafe {
314				self.desc_ring.indexes.deallocate(buff_id.into());
315			}
316
317			Some((tkn, write_len))
318		} else {
319			None
320		}
321	}
322
323	fn incrmt(&mut self) {
324		let mut desc = self.desc_ring.poll_index;
325
326		if desc.desc_event_off() + 1 == self.modulo {
327			let wrap = desc.desc_event_wrap() ^ 1;
328			desc.set_desc_event_wrap(wrap);
329		}
330
331		let off = (desc.desc_event_off() + 1) % self.modulo;
332		desc.set_desc_event_off(off);
333
334		self.desc_ring.poll_index = desc;
335
336		self.position = desc.desc_event_off();
337
338		// Increment capacity as we have one more free now!
339		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
340		self.desc_ring.capacity += 1;
341	}
342}
343
344/// Convenient struct that allows to conveniently write descriptors into the queue.
345/// The struct takes care of updating the state of the queue correctly and to write
346/// the correct flags.
347struct WriteCtrl<'a> {
348	/// Where did the write of the buffer start in the descriptor ring
349	/// This is important, as we must make this descriptor available
350	/// lastly.
351	start: u16,
352	/// Where to write next. This should always be equal to the Rings
353	/// write_next field.
354	position: u16,
355	modulo: u16,
356	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
357	first_flags: DescF,
358	/// Buff ID of this write
359	buff_id: u16,
360
361	desc_ring: &'a mut DescriptorRing,
362}
363
364impl WriteCtrl<'_> {
365	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
366	///
367	/// Incrementing index by one. The index wrappes around to zero when
368	/// reaching (modulo -1).
369	///
370	/// Also takes care of wrapping the wrap counter of the associated
371	/// DescriptorRing.
372	fn incrmt(&mut self) {
373		// Firstly check if we are at all allowed to write a descriptor
374		assert!(self.desc_ring.capacity != 0);
375		self.desc_ring.capacity -= 1;
376
377		let mut desc = self.desc_ring.write_index;
378
379		// check if increment wrapped around end of ring
380		// then also wrap the wrap counter.
381		if self.position + 1 == self.modulo {
382			let wrap = desc.desc_event_wrap() ^ 1;
383			desc.set_desc_event_wrap(wrap);
384		}
385
386		// Also update the write_index
387		let off = (desc.desc_event_off() + 1) % self.modulo;
388		desc.set_desc_event_off(off);
389
390		self.desc_ring.write_index = desc;
391
392		self.position = (self.position + 1) % self.modulo;
393	}
394
395	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
396	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
397		incomplete_desc.id = self.buff_id.into();
398		if self.start == self.position {
399			// We save what the flags value for the first descriptor will be to be able
400			// to write it later when all the other descriptors are written (so that
401			// the device does not see an incomplete chain).
402			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
403		} else {
404			// Set avail and used according to the current wrap counter.
405			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
406		}
407		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
408		self.incrmt();
409	}
410
411	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
412		// We fail if one wants to make a buffer available without inserting one element!
413		assert!(self.start != self.position);
414		self.desc_ring
415			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
416	}
417}
418
419/// A type in order to implement the correct functionality upon
420/// the `EventSuppr` structure for driver notifications settings.
421/// The Driver Event Suppression structure is read-only by the device
422/// and controls the used buffer notifications sent by the device to the driver.
423struct DrvNotif {
424	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
425	f_notif_idx: bool,
426	/// Actual structure to read from, if device wants notifs
427	raw: &'static mut pvirtq::EventSuppress,
428}
429
430/// A type in order to implement the correct functionality upon
431/// the `EventSuppr` structure for device notifications settings.
432/// The Device Event Suppression structure is read-only by the driver
433/// and controls the available buffer notifica- tions sent by the driver to the device.
434struct DevNotif {
435	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
436	f_notif_idx: bool,
437	/// Actual structure to read from, if device wants notifs
438	raw: &'static mut pvirtq::EventSuppress,
439}
440
441impl DrvNotif {
442	/// Enables notifications by unsetting the LSB.
443	/// See Virito specification v1.1. - 2.7.10
444	fn enable_notif(&mut self) {
445		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
446	}
447
448	/// Disables notifications by setting the LSB.
449	/// See Virtio specification v1.1. - 2.7.10
450	fn disable_notif(&mut self) {
451		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
452	}
453
454	/// Enables a notification by the device for a specific descriptor.
455	fn enable_specific(&mut self, desc: EventSuppressDesc) {
456		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
457		if self.f_notif_idx {
458			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
459			self.raw.desc = desc;
460		}
461	}
462}
463
464impl DevNotif {
465	/// Enables the notification capability for a specific buffer.
466	pub fn enable_notif_specific(&mut self) {
467		self.f_notif_idx = true;
468	}
469
470	/// Reads notification bit (i.e. LSB) and returns value.
471	/// If notifications are enabled returns true, else false.
472	fn is_notif(&self) -> bool {
473		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
474	}
475
476	fn notif_specific(&self) -> Option<EventSuppressDesc> {
477		if !self.f_notif_idx {
478			return None;
479		}
480
481		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
482			return None;
483		}
484
485		Some(self.raw.desc)
486	}
487}
488
489/// Packed virtqueue which provides the functionilaty as described in the
490/// virtio specification v1.1. - 2.7
491pub struct PackedVq {
492	/// Ring which allows easy access to the raw ring structure of the
493	/// specification
494	descr_ring: DescriptorRing,
495	/// Allows to tell the device if notifications are wanted
496	drv_event: DrvNotif,
497	/// Allows to check, if the device wants a notification
498	dev_event: DevNotif,
499	/// Actually notify device about avail buffers
500	notif_ctrl: NotifCtrl,
501	/// The size of the queue, equals the number of descriptors which can
502	/// be used
503	size: u16,
504	/// The virtqueues index. This identifies the virtqueue to the
505	/// device and is unique on a per device basis.
506	index: u16,
507	last_next: Cell<EventSuppressDesc>,
508}
509
510// Public interface of PackedVq
511// This interface is also public in order to allow people to use the PackedVq directly!
512impl Virtq for PackedVq {
513	fn enable_notifs(&mut self) {
514		self.drv_event.enable_notif();
515	}
516
517	fn disable_notifs(&mut self) {
518		self.drv_event.disable_notif();
519	}
520
521	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
522		self.descr_ring.try_recv()
523	}
524
525	fn dispatch_batch(
526		&mut self,
527		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
528		notif: bool,
529	) -> Result<(), VirtqError> {
530		// Zero transfers are not allowed
531		assert!(!buffer_tkns.is_empty());
532
533		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
534			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
535		});
536
537		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
538
539		if notif {
540			self.drv_event.enable_specific(next_idx);
541		}
542
543		let range = self.last_next.get()..next_idx;
544		let notif_specific = self
545			.dev_event
546			.notif_specific()
547			.is_some_and(|idx| range.wrapping_contains(&idx));
548
549		if self.dev_event.is_notif() || notif_specific {
550			let notification_data = NotificationData::new()
551				.with_vqn(self.index)
552				.with_next_off(next_idx.desc_event_off())
553				.with_next_wrap(next_idx.desc_event_wrap());
554			self.notif_ctrl.notify_dev(notification_data);
555			self.last_next.set(next_idx);
556		}
557		Ok(())
558	}
559
560	fn dispatch_batch_await(
561		&mut self,
562		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
563		notif: bool,
564	) -> Result<(), VirtqError> {
565		// Zero transfers are not allowed
566		assert!(!buffer_tkns.is_empty());
567
568		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
569			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
570		});
571
572		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
573
574		if notif {
575			self.drv_event.enable_specific(next_idx);
576		}
577
578		let range = self.last_next.get()..next_idx;
579		let notif_specific = self
580			.dev_event
581			.notif_specific()
582			.is_some_and(|idx| range.wrapping_contains(&idx));
583
584		if self.dev_event.is_notif() | notif_specific {
585			let notification_data = NotificationData::new()
586				.with_vqn(self.index)
587				.with_next_off(next_idx.desc_event_off())
588				.with_next_wrap(next_idx.desc_event_wrap());
589			self.notif_ctrl.notify_dev(notification_data);
590			self.last_next.set(next_idx);
591		}
592		Ok(())
593	}
594
595	fn dispatch(
596		&mut self,
597		buffer_tkn: AvailBufferToken,
598		notif: bool,
599		buffer_type: BufferType,
600	) -> Result<(), VirtqError> {
601		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
602		let next_idx = self.descr_ring.push(transfer_tkn)?;
603
604		if notif {
605			self.drv_event.enable_specific(next_idx);
606		}
607
608		// FIXME: impl PartialEq for EventSuppressDesc in virtio-spec instead of converting into bits.
609		let notif_specific = self
610			.dev_event
611			.notif_specific()
612			.map(EventSuppressDesc::into_bits)
613			== Some(self.last_next.get().into_bits());
614
615		if self.dev_event.is_notif() || notif_specific {
616			let notification_data = NotificationData::new()
617				.with_vqn(self.index)
618				.with_next_off(next_idx.desc_event_off())
619				.with_next_wrap(next_idx.desc_event_wrap());
620			self.notif_ctrl.notify_dev(notification_data);
621			self.last_next.set(next_idx);
622		}
623		Ok(())
624	}
625
626	fn index(&self) -> u16 {
627		self.index
628	}
629
630	fn size(&self) -> u16 {
631		self.size
632	}
633
634	fn has_used_buffers(&self) -> bool {
635		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
636		self.descr_ring.is_marked_used(desc.flags)
637	}
638}
639
640impl VirtqPrivate for PackedVq {
641	type Descriptor = pvirtq::Desc;
642
643	fn create_indirect_ctrl(
644		buffer_tkn: &AvailBufferToken,
645	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
646		Ok(Self::descriptor_iter(buffer_tkn)?
647			.collect::<Vec<_>>()
648			.into_boxed_slice())
649	}
650}
651
652impl PackedVq {
653	pub(crate) fn new(
654		com_cfg: &mut ComCfg,
655		notif_cfg: &NotifCfg,
656		size: u16,
657		index: u16,
658		features: virtio::F,
659	) -> Result<Self, VirtqError> {
660		// Currently we do not have support for in order use.
661		// This steems from the fact, that the packedVq ReadCtrl currently is not
662		// able to derive other finished transfer from a used-buffer notification.
663		// In order to allow this, the queue MUST track the sequence in which
664		// TransferTokens are inserted into the queue. Furthermore the Queue should
665		// carry a feature u64 in order to check which features are used currently
666		// and adjust its ReadCtrl accordingly.
667		if features.contains(virtio::F::IN_ORDER) {
668			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
669			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
670		}
671
672		// Get a handler to the queues configuration area.
673		let Some(mut vq_handler) = com_cfg.select_vq(index) else {
674			return Err(VirtqError::QueueNotExisting(index));
675		};
676
677		// Must catch zero size as it is not allowed for packed queues.
678		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
679		//
680		// See Virtio specification v1.1. - 4.1.4.3.2
681		let vq_size = if (size == 0) | (size > 0x8000) {
682			return Err(VirtqError::QueueSizeNotAllowed(size));
683		} else {
684			vq_handler.set_vq_size(size)
685		};
686
687		let mut descr_ring = DescriptorRing::new(vq_size);
688		// Allocate heap memory via a vec, leak and cast
689		let _mem_len =
690			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
691
692		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
693		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
694		// TODO: make this safe using zerocopy
695		let drv_event = unsafe { drv_event.assume_init() };
696		let dev_event = unsafe { dev_event.assume_init() };
697		let drv_event = Box::leak(drv_event);
698		let dev_event = Box::leak(dev_event);
699
700		// Provide memory areas of the queues data structures to the device
701		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
702		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
703		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
704		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
705
706		let mut drv_event = DrvNotif {
707			f_notif_idx: false,
708			raw: drv_event,
709		};
710
711		let dev_event = DevNotif {
712			f_notif_idx: false,
713			raw: dev_event,
714		};
715
716		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
717
718		if features.contains(virtio::F::NOTIFICATION_DATA) {
719			notif_ctrl.enable_notif_data();
720		}
721
722		if features.contains(virtio::F::EVENT_IDX) {
723			drv_event.f_notif_idx = true;
724		}
725
726		vq_handler.enable_queue();
727
728		info!("Created PackedVq: idx={index}, size={vq_size}");
729
730		Ok(PackedVq {
731			descr_ring,
732			drv_event,
733			dev_event,
734			notif_ctrl,
735			size: vq_size,
736			index,
737			last_next: Cell::default(),
738		})
739	}
740}