Skip to main content

hermit/drivers/virtio/virtqueue/
packed.rs

1//! `pvirtq` infrastructure.
2//!
3//! The main type of this module is [`PackedVq`].
4//!
5//! For details on packed virtqueues, see [Packed Virtqueues].
6//! For details on the Rust definitions, see [`virtio::pvirtq`].
7//!
8//! [Packed Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-720008
9
10use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::Cell;
13use core::sync::atomic::{Ordering, fence};
14use core::{mem, ops};
15
16use align_address::Align;
17#[cfg(not(feature = "pci"))]
18use virtio::mmio::NotificationData;
19#[cfg(feature = "pci")]
20use virtio::pci::NotificationData;
21use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
22use virtio::virtq::DescF;
23use virtio::{RingEventFlags, pvirtq, virtq};
24
25#[cfg(not(feature = "pci"))]
26use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
27#[cfg(feature = "pci")]
28use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
29use super::error::VirtqError;
30use super::index_alloc::IndexAlloc;
31use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
32use crate::arch::mm::paging::{BasePageSize, PageSize};
33use crate::mm::device_alloc::DeviceAlloc;
34
35trait RingIndexRange {
36	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
37}
38
39impl RingIndexRange for ops::Range<EventSuppressDesc> {
40	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
41		let start_off = self.start.desc_event_off();
42		let start_wrap = self.start.desc_event_wrap();
43		let end_off = self.end.desc_event_off();
44		let end_wrap = self.end.desc_event_wrap();
45		let item_off = item.desc_event_off();
46		let item_wrap = item.desc_event_wrap();
47
48		if start_wrap == end_wrap {
49			item_wrap == start_wrap && start_off <= item_off && item_off < end_off
50		} else if item_wrap == start_wrap {
51			start_off <= item_off
52		} else {
53			debug_assert!(item_wrap == end_wrap);
54			item_off < end_off
55		}
56	}
57}
58
59/// Structure which allows to control raw ring and operate easily on it
60struct DescriptorRing {
61	ring: Box<[pvirtq::Desc], DeviceAlloc>,
62	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
63
64	// Controlling variables for the ring
65	//
66	/// where to insert available descriptors next
67	/// See Virtio specification v1.1. - 2.7.1
68	write_index: EventSuppressDesc,
69	/// How much descriptors can be inserted
70	capacity: u16,
71	/// Where to expect the next used descriptor by the device
72	///
73	/// See Virtio specification v1.1. - 2.7.1
74	poll_index: EventSuppressDesc,
75	/// This allocates available descriptors.
76	indexes: IndexAlloc,
77}
78
79impl DescriptorRing {
80	fn new(size: u16) -> Self {
81		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
82
83		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
84		let tkn_ref_ring = core::iter::repeat_with(|| None)
85			.take(size.into())
86			.collect::<Vec<_>>()
87			.into_boxed_slice();
88
89		let write_index = EventSuppressDesc::new()
90			.with_desc_event_off(0)
91			.with_desc_event_wrap(1);
92
93		let poll_index = write_index;
94
95		DescriptorRing {
96			ring,
97			tkn_ref_ring,
98			write_index,
99			capacity: size,
100			poll_index,
101			indexes: IndexAlloc::new(size.into()),
102		}
103	}
104
105	/// Polls poll index and sets the state of any finished TransferTokens.
106	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
107		let mut ctrl = self.get_read_ctrler();
108
109		ctrl.poll_next()
110			.map(|(tkn, written_len)| {
111				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
112			})
113			.ok_or(VirtqError::NoNewUsed)
114	}
115
116	fn push_batch(
117		&mut self,
118		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
119	) -> Result<EventSuppressDesc, VirtqError> {
120		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
121		// which will be overwritten in the first iteration of the for-loop
122
123		let mut tkn_iterator = tkn_lst.into_iter();
124		let Some(first_tkn) = tkn_iterator.next() else {
125			// Empty batches are an error
126			return Err(VirtqError::BufferNotSpecified);
127		};
128
129		let mut ctrl = self.push_without_making_available(&first_tkn)?;
130		let first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
131		let first_buffer = first_tkn;
132
133		// Push the remaining tokens (if any)
134		for tkn in tkn_iterator {
135			ctrl.make_avail(tkn);
136		}
137
138		// Manually make the first buffer available lastly
139		//
140		// Providing the first buffer in the list manually
141		self.make_avail_with_state(
142			first_buffer,
143			first_ctrl_settings.0,
144			first_ctrl_settings.1,
145			first_ctrl_settings.2,
146		);
147
148		Ok(self.write_index)
149	}
150
151	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
152		self.push_batch([tkn])
153	}
154
155	fn push_without_making_available(
156		&mut self,
157		tkn: &TransferToken<pvirtq::Desc>,
158	) -> Result<WriteCtrl<'_>, VirtqError> {
159		if tkn.num_consuming_descr() > self.capacity {
160			return Err(VirtqError::NoDescrAvail);
161		}
162
163		// create an counter that wrappes to the first element
164		// after reaching a the end of the ring
165		let mut ctrl = self.get_write_ctrler()?;
166
167		// Importance here is:
168		// * distinguish between Indirect and direct buffers
169		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
170
171		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
172		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
173			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
174			ctrl.write_desc(desc);
175		} else {
176			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
177				ctrl.write_desc(incomplete_desc);
178			}
179		}
180		Ok(ctrl)
181	}
182
183	fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
184		self.ring.as_mut_ptr()
185	}
186
187	/// Returns an initialized write controller in order
188	/// to write the queue correctly.
189	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
190		let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
191		Ok(WriteCtrl {
192			start: self.write_index.desc_event_off(),
193			position: self.write_index.desc_event_off(),
194			modulo: u16::try_from(self.ring.len()).unwrap(),
195			first_flags: DescF::empty(),
196			buff_id: u16::try_from(desc_id).unwrap(),
197
198			desc_ring: self,
199		})
200	}
201
202	/// Returns an initialized read controller in order
203	/// to read the queue correctly.
204	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
205		ReadCtrl {
206			position: self.poll_index.desc_event_off(),
207			modulo: u16::try_from(self.ring.len()).unwrap(),
208
209			desc_ring: self,
210		}
211	}
212
213	fn make_avail_with_state(
214		&mut self,
215		raw_tkn: TransferToken<pvirtq::Desc>,
216		start: u16,
217		buff_id: u16,
218		first_flags: DescF,
219	) {
220		// provide reference, in order to let TransferToken know upon finish.
221		self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
222		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
223		// See Virtio specification v1.1. - 2.7.21
224		fence(Ordering::SeqCst);
225		self.ring[usize::from(start)].flags = first_flags;
226	}
227
228	/// Returns the [DescF] with the avail and used flags set in accordance
229	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
230	/// the driver wrap counter and the used flag set to NOT match the wrap counter).
231	///
232	/// This function is defined on the whole ring rather than only the
233	/// wrap counter to ensure that it is not called on the incorrect
234	/// wrap counter (i.e. device wrap counter) by accident.
235	///
236	/// A copy of the flag is taken instead of a mutable reference
237	/// for the cases in which the modification of the flag needs to be
238	/// deferred (e.g. patched dispatches, chained buffers).
239	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
240		let avail = self.write_index.desc_event_wrap() != 0;
241		flags.set(virtq::DescF::AVAIL, avail);
242		flags.set(virtq::DescF::USED, !avail);
243		flags
244	}
245
246	/// Checks the avail and used flags to see if the descriptor is marked
247	/// as used by the device in accordance with the
248	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device wrap counter)
249	///
250	/// This function is defined on the whole ring rather than only the
251	/// wrap counter to ensure that it is not called on the incorrect
252	/// wrap counter (i.e. driver wrap counter) by accident.
253	fn is_marked_used(&self, flags: DescF) -> bool {
254		if self.poll_index.desc_event_wrap() != 0 {
255			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
256		} else {
257			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
258		}
259	}
260}
261
262struct ReadCtrl<'a> {
263	/// Poll index of the ring at init of ReadCtrl
264	position: u16,
265	modulo: u16,
266
267	desc_ring: &'a mut DescriptorRing,
268}
269
270impl ReadCtrl<'_> {
271	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
272	/// updating the queue and returns the respective TransferToken.
273	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
274		// Check if descriptor has been marked used.
275		let desc = &self.desc_ring.ring[usize::from(self.position)];
276		if !self.desc_ring.is_marked_used(desc.flags) {
277			return None;
278		}
279
280		let buff_id = desc.id.to_ne();
281		let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
282			.take()
283			.expect(
284				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
285			);
286
287		// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
288		// This is necessary in order to provide the drivers with the correct access, to usable data.
289		//
290		// According to the standard the device signals solely via the first written descriptor if anything has been written to
291		// the write descriptors of a buffer.
292		// See Virtio specification v1.1. - 2.7.4
293		//                                - 2.7.5
294		//                                - 2.7.6
295		// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
296		//      self.desc_ring.ring[self.position].len
297		//  } else {
298		//      0
299		//  };
300		//
301		// INFO:
302		// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
303		// flag correctly upon writes. Hence we omit it, in order to receive data.
304
305		// We need to read the written length before advancing the position.
306		let write_len = desc.len.to_ne();
307
308		for _ in 0..tkn.num_consuming_descr() {
309			self.incrmt();
310		}
311		unsafe {
312			self.desc_ring.indexes.deallocate(buff_id.into());
313		}
314
315		Some((tkn, write_len))
316	}
317
318	fn incrmt(&mut self) {
319		let mut desc = self.desc_ring.poll_index;
320
321		if desc.desc_event_off() + 1 == self.modulo {
322			let wrap = desc.desc_event_wrap() ^ 1;
323			desc.set_desc_event_wrap(wrap);
324		}
325
326		let off = (desc.desc_event_off() + 1) % self.modulo;
327		desc.set_desc_event_off(off);
328
329		self.desc_ring.poll_index = desc;
330
331		self.position = desc.desc_event_off();
332
333		// Increment capacity as we have one more free now!
334		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
335		self.desc_ring.capacity += 1;
336	}
337}
338
339/// Convenient struct that allows to conveniently write descriptors into the queue.
340/// The struct takes care of updating the state of the queue correctly and to write
341/// the correct flags.
342struct WriteCtrl<'a> {
343	/// Where did the write of the buffer start in the descriptor ring
344	/// This is important, as we must make this descriptor available
345	/// lastly.
346	start: u16,
347	/// Where to write next. This should always be equal to the Rings
348	/// write_next field.
349	position: u16,
350	modulo: u16,
351	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
352	first_flags: DescF,
353	/// Buff ID of this write
354	buff_id: u16,
355
356	desc_ring: &'a mut DescriptorRing,
357}
358
359impl WriteCtrl<'_> {
360	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
361	///
362	/// Incrementing index by one. The index wrappes around to zero when
363	/// reaching (modulo -1).
364	///
365	/// Also takes care of wrapping the wrap counter of the associated
366	/// DescriptorRing.
367	fn incrmt(&mut self) {
368		// Firstly check if we are at all allowed to write a descriptor
369		assert!(self.desc_ring.capacity != 0);
370		self.desc_ring.capacity -= 1;
371
372		let mut desc = self.desc_ring.write_index;
373
374		// check if increment wrapped around end of ring
375		// then also wrap the wrap counter.
376		if self.position + 1 == self.modulo {
377			let wrap = desc.desc_event_wrap() ^ 1;
378			desc.set_desc_event_wrap(wrap);
379		}
380
381		// Also update the write_index
382		let off = (desc.desc_event_off() + 1) % self.modulo;
383		desc.set_desc_event_off(off);
384
385		self.desc_ring.write_index = desc;
386
387		self.position = (self.position + 1) % self.modulo;
388	}
389
390	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
391	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
392		incomplete_desc.id = self.buff_id.into();
393		if self.start == self.position {
394			// We save what the flags value for the first descriptor will be to be able
395			// to write it later when all the other descriptors are written (so that
396			// the device does not see an incomplete chain).
397			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
398		} else {
399			// Set avail and used according to the current wrap counter.
400			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
401		}
402		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
403		self.incrmt();
404	}
405
406	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
407		// We fail if one wants to make a buffer available without inserting one element!
408		assert!(self.start != self.position);
409		self.desc_ring
410			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
411	}
412}
413
414/// A type in order to implement the correct functionality upon
415/// the `EventSuppr` structure for driver notifications settings.
416/// The Driver Event Suppression structure is read-only by the device
417/// and controls the used buffer notifications sent by the device to the driver.
418struct DrvNotif {
419	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
420	f_notif_idx: bool,
421	/// Actual structure to read from, if device wants notifs
422	raw: &'static mut pvirtq::EventSuppress,
423}
424
425/// A type in order to implement the correct functionality upon
426/// the `EventSuppr` structure for device notifications settings.
427/// The Device Event Suppression structure is read-only by the driver
428/// and controls the available buffer notifica- tions sent by the driver to the device.
429struct DevNotif {
430	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
431	f_notif_idx: bool,
432	/// Actual structure to read from, if device wants notifs
433	raw: &'static mut pvirtq::EventSuppress,
434}
435
436impl DrvNotif {
437	/// Enables notifications by unsetting the LSB.
438	/// See Virito specification v1.1. - 2.7.10
439	fn enable_notif(&mut self) {
440		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
441	}
442
443	/// Disables notifications by setting the LSB.
444	/// See Virtio specification v1.1. - 2.7.10
445	fn disable_notif(&mut self) {
446		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
447	}
448
449	/// Enables a notification by the device for a specific descriptor.
450	fn enable_specific(&mut self, desc: EventSuppressDesc) {
451		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
452		if self.f_notif_idx {
453			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
454			self.raw.desc = desc;
455		}
456	}
457}
458
459impl DevNotif {
460	/// Enables the notification capability for a specific buffer.
461	#[expect(dead_code)]
462	pub fn enable_notif_specific(&mut self) {
463		self.f_notif_idx = true;
464	}
465
466	/// Reads notification bit (i.e. LSB) and returns value.
467	/// If notifications are enabled returns true, else false.
468	fn is_notif(&self) -> bool {
469		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
470	}
471
472	fn notif_specific(&self) -> Option<EventSuppressDesc> {
473		if !self.f_notif_idx {
474			return None;
475		}
476
477		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
478			return None;
479		}
480
481		Some(self.raw.desc)
482	}
483}
484
485/// Packed virtqueue which provides the functionilaty as described in the
486/// virtio specification v1.1. - 2.7
487pub struct PackedVq {
488	/// Ring which allows easy access to the raw ring structure of the
489	/// specification
490	descr_ring: DescriptorRing,
491	/// Allows to tell the device if notifications are wanted
492	drv_event: DrvNotif,
493	/// Allows to check, if the device wants a notification
494	dev_event: DevNotif,
495	/// Actually notify device about avail buffers
496	notif_ctrl: NotifCtrl,
497	/// The size of the queue, equals the number of descriptors which can
498	/// be used
499	size: u16,
500	/// The virtqueues index. This identifies the virtqueue to the
501	/// device and is unique on a per device basis.
502	index: u16,
503	last_next: Cell<EventSuppressDesc>,
504}
505
506// Public interface of PackedVq
507// This interface is also public in order to allow people to use the PackedVq directly!
508impl Virtq for PackedVq {
509	fn enable_notifs(&mut self) {
510		self.drv_event.enable_notif();
511	}
512
513	fn disable_notifs(&mut self) {
514		self.drv_event.disable_notif();
515	}
516
517	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
518		self.descr_ring.try_recv()
519	}
520
521	fn dispatch_batch(
522		&mut self,
523		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
524		notif: bool,
525	) -> Result<(), VirtqError> {
526		// Zero transfers are not allowed
527		assert!(!buffer_tkns.is_empty());
528
529		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
530			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
531		});
532
533		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
534
535		if notif {
536			self.drv_event.enable_specific(next_idx);
537		}
538
539		let range = self.last_next.get()..next_idx;
540		let notif_specific = self
541			.dev_event
542			.notif_specific()
543			.is_some_and(|idx| range.wrapping_contains(&idx));
544
545		if self.dev_event.is_notif() || notif_specific {
546			let notification_data = NotificationData::new()
547				.with_vqn(self.index)
548				.with_next_off(next_idx.desc_event_off())
549				.with_next_wrap(next_idx.desc_event_wrap());
550			self.notif_ctrl.notify_dev(notification_data);
551			self.last_next.set(next_idx);
552		}
553		Ok(())
554	}
555
556	fn dispatch_batch_await(
557		&mut self,
558		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
559		notif: bool,
560	) -> Result<(), VirtqError> {
561		// Zero transfers are not allowed
562		assert!(!buffer_tkns.is_empty());
563
564		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
565			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
566		});
567
568		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
569
570		if notif {
571			self.drv_event.enable_specific(next_idx);
572		}
573
574		let range = self.last_next.get()..next_idx;
575		let notif_specific = self
576			.dev_event
577			.notif_specific()
578			.is_some_and(|idx| range.wrapping_contains(&idx));
579
580		if self.dev_event.is_notif() | notif_specific {
581			let notification_data = NotificationData::new()
582				.with_vqn(self.index)
583				.with_next_off(next_idx.desc_event_off())
584				.with_next_wrap(next_idx.desc_event_wrap());
585			self.notif_ctrl.notify_dev(notification_data);
586			self.last_next.set(next_idx);
587		}
588		Ok(())
589	}
590
591	fn dispatch(
592		&mut self,
593		buffer_tkn: AvailBufferToken,
594		notif: bool,
595		buffer_type: BufferType,
596	) -> Result<(), VirtqError> {
597		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
598		let next_idx = self.descr_ring.push(transfer_tkn)?;
599
600		if notif {
601			self.drv_event.enable_specific(next_idx);
602		}
603
604		// FIXME: impl PartialEq for EventSuppressDesc in virtio-spec instead of converting into bits.
605		let notif_specific = self
606			.dev_event
607			.notif_specific()
608			.map(EventSuppressDesc::into_bits)
609			== Some(self.last_next.get().into_bits());
610
611		if self.dev_event.is_notif() || notif_specific {
612			let notification_data = NotificationData::new()
613				.with_vqn(self.index)
614				.with_next_off(next_idx.desc_event_off())
615				.with_next_wrap(next_idx.desc_event_wrap());
616			self.notif_ctrl.notify_dev(notification_data);
617			self.last_next.set(next_idx);
618		}
619		Ok(())
620	}
621
622	fn index(&self) -> u16 {
623		self.index
624	}
625
626	fn size(&self) -> u16 {
627		self.size
628	}
629
630	fn has_used_buffers(&self) -> bool {
631		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
632		self.descr_ring.is_marked_used(desc.flags)
633	}
634}
635
636impl VirtqPrivate for PackedVq {
637	type Descriptor = pvirtq::Desc;
638
639	fn create_indirect_ctrl(
640		buffer_tkn: &AvailBufferToken,
641	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
642		Ok(Self::descriptor_iter(buffer_tkn)?
643			.collect::<Vec<_>>()
644			.into_boxed_slice())
645	}
646}
647
648impl PackedVq {
649	#[allow(dead_code)]
650	pub(crate) fn new(
651		com_cfg: &mut ComCfg,
652		notif_cfg: &NotifCfg,
653		max_size: u16,
654		index: u16,
655		features: virtio::F,
656	) -> Result<Self, VirtqError> {
657		// Currently we do not have support for in order use.
658		// This steems from the fact, that the packedVq ReadCtrl currently is not
659		// able to derive other finished transfer from a used-buffer notification.
660		// In order to allow this, the queue MUST track the sequence in which
661		// TransferTokens are inserted into the queue. Furthermore the Queue should
662		// carry a feature u64 in order to check which features are used currently
663		// and adjust its ReadCtrl accordingly.
664		if features.contains(virtio::F::IN_ORDER) {
665			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
666			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
667		}
668
669		// Get a handler to the queues configuration area.
670		let mut vq_handler = com_cfg
671			.select_vq(index)
672			.ok_or(VirtqError::QueueNotExisting(index))?;
673
674		// Must catch zero size as it is not allowed for packed queues.
675		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
676		//
677		// See Virtio specification v1.1. - 4.1.4.3.2
678		let vq_size = if (max_size == 0) | (max_size > 0x8000) {
679			return Err(VirtqError::QueueSizeNotAllowed(max_size));
680		} else {
681			vq_handler.set_vq_size(max_size)
682		};
683
684		let mut descr_ring = DescriptorRing::new(vq_size);
685		// Allocate heap memory via a vec, leak and cast
686		let _mem_len =
687			mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
688
689		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
690		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
691		// TODO: make this safe using zerocopy
692		let drv_event = unsafe { drv_event.assume_init() };
693		let dev_event = unsafe { dev_event.assume_init() };
694		let drv_event = Box::leak(drv_event);
695		let dev_event = Box::leak(dev_event);
696
697		// Provide memory areas of the queues data structures to the device
698		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
699		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
700		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
701		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
702
703		let mut drv_event = DrvNotif {
704			f_notif_idx: false,
705			raw: drv_event,
706		};
707
708		let dev_event = DevNotif {
709			f_notif_idx: false,
710			raw: dev_event,
711		};
712
713		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
714
715		if features.contains(virtio::F::NOTIFICATION_DATA) {
716			notif_ctrl.enable_notif_data();
717		}
718
719		if features.contains(virtio::F::EVENT_IDX) {
720			drv_event.f_notif_idx = true;
721		}
722
723		vq_handler.enable_queue();
724
725		info!("Created PackedVq: idx={index}, size={vq_size}");
726
727		Ok(PackedVq {
728			descr_ring,
729			drv_event,
730			dev_event,
731			notif_ctrl,
732			size: vq_size,
733			index,
734			last_next: Cell::default(),
735		})
736	}
737}