hermit/drivers/virtio/virtqueue/
packed.rs

1//! `pvirtq` infrastructure.
2//!
3//! The main type of this module is [`PackedVq`].
4//!
5//! For details on packed virtqueues, see [Packed Virtqueues].
6//! For details on the Rust definitions, see [`virtio::pvirtq`].
7//!
8//! [Packed Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-720008
9
10use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::Cell;
13use core::ops;
14use core::sync::atomic::{Ordering, fence};
15
16use align_address::Align;
17#[cfg(not(feature = "pci"))]
18use virtio::mmio::NotificationData;
19#[cfg(feature = "pci")]
20use virtio::pci::NotificationData;
21use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
22use virtio::virtq::DescF;
23use virtio::{RingEventFlags, pvirtq, virtq};
24
25#[cfg(not(feature = "pci"))]
26use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
27#[cfg(feature = "pci")]
28use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
29use super::error::VirtqError;
30use super::index_alloc::IndexAlloc;
31use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
32use crate::arch::mm::paging::{BasePageSize, PageSize};
33use crate::mm::device_alloc::DeviceAlloc;
34
35trait RingIndexRange {
36	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
37}
38
39impl RingIndexRange for ops::Range<EventSuppressDesc> {
40	fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
41		let start_off = self.start.desc_event_off();
42		let start_wrap = self.start.desc_event_wrap();
43		let end_off = self.end.desc_event_off();
44		let end_wrap = self.end.desc_event_wrap();
45		let item_off = item.desc_event_off();
46		let item_wrap = item.desc_event_wrap();
47
48		if start_wrap == end_wrap {
49			item_wrap == start_wrap && start_off <= item_off && item_off < end_off
50		} else if item_wrap == start_wrap {
51			start_off <= item_off
52		} else {
53			debug_assert!(item_wrap == end_wrap);
54			item_off < end_off
55		}
56	}
57}
58
59/// Structure which allows to control raw ring and operate easily on it
60struct DescriptorRing {
61	ring: Box<[pvirtq::Desc], DeviceAlloc>,
62	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
63
64	// Controlling variables for the ring
65	//
66	/// where to insert available descriptors next
67	/// See Virtio specification v1.1. - 2.7.1
68	write_index: EventSuppressDesc,
69	/// How much descriptors can be inserted
70	capacity: u16,
71	/// Where to expect the next used descriptor by the device
72	///
73	/// See Virtio specification v1.1. - 2.7.1
74	poll_index: EventSuppressDesc,
75	/// This allocates available descriptors.
76	indexes: IndexAlloc,
77}
78
79impl DescriptorRing {
80	fn new(size: u16) -> Self {
81		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
82
83		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
84		let tkn_ref_ring = core::iter::repeat_with(|| None)
85			.take(size.into())
86			.collect::<Vec<_>>()
87			.into_boxed_slice();
88
89		let write_index = EventSuppressDesc::new()
90			.with_desc_event_off(0)
91			.with_desc_event_wrap(1);
92
93		let poll_index = write_index;
94
95		DescriptorRing {
96			ring,
97			tkn_ref_ring,
98			write_index,
99			capacity: size,
100			poll_index,
101			indexes: IndexAlloc::new(size.into()),
102		}
103	}
104
105	/// Polls poll index and sets the state of any finished TransferTokens.
106	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
107		let mut ctrl = self.get_read_ctrler();
108
109		ctrl.poll_next()
110			.map(|(tkn, written_len)| {
111				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
112			})
113			.ok_or(VirtqError::NoNewUsed)
114	}
115
116	fn push_batch(
117		&mut self,
118		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
119	) -> Result<EventSuppressDesc, VirtqError> {
120		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
121		// which will be overwritten in the first iteration of the for-loop
122
123		let first_ctrl_settings;
124		let first_buffer;
125		let mut ctrl;
126
127		let mut tkn_iterator = tkn_lst.into_iter();
128		if let Some(first_tkn) = tkn_iterator.next() {
129			ctrl = self.push_without_making_available(&first_tkn)?;
130			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
131			first_buffer = first_tkn;
132		} else {
133			// Empty batches are an error
134			return Err(VirtqError::BufferNotSpecified);
135		}
136		// Push the remaining tokens (if any)
137		for tkn in tkn_iterator {
138			ctrl.make_avail(tkn);
139		}
140
141		// Manually make the first buffer available lastly
142		//
143		// Providing the first buffer in the list manually
144		self.make_avail_with_state(
145			first_buffer,
146			first_ctrl_settings.0,
147			first_ctrl_settings.1,
148			first_ctrl_settings.2,
149		);
150
151		Ok(self.write_index)
152	}
153
154	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
155		self.push_batch([tkn])
156	}
157
158	fn push_without_making_available(
159		&mut self,
160		tkn: &TransferToken<pvirtq::Desc>,
161	) -> Result<WriteCtrl<'_>, VirtqError> {
162		if tkn.num_consuming_descr() > self.capacity {
163			return Err(VirtqError::NoDescrAvail);
164		}
165
166		// create an counter that wrappes to the first element
167		// after reaching a the end of the ring
168		let mut ctrl = self.get_write_ctrler()?;
169
170		// Importance here is:
171		// * distinguish between Indirect and direct buffers
172		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
173
174		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
175		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
176			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
177			ctrl.write_desc(desc);
178		} else {
179			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
180				ctrl.write_desc(incomplete_desc);
181			}
182		}
183		Ok(ctrl)
184	}
185
186	fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
187		self.ring.as_mut_ptr()
188	}
189
190	/// Returns an initialized write controller in order
191	/// to write the queue correctly.
192	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
193		let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
194		Ok(WriteCtrl {
195			start: self.write_index.desc_event_off(),
196			position: self.write_index.desc_event_off(),
197			modulo: u16::try_from(self.ring.len()).unwrap(),
198			first_flags: DescF::empty(),
199			buff_id: u16::try_from(desc_id).unwrap(),
200
201			desc_ring: self,
202		})
203	}
204
205	/// Returns an initialized read controller in order
206	/// to read the queue correctly.
207	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
208		ReadCtrl {
209			position: self.poll_index.desc_event_off(),
210			modulo: u16::try_from(self.ring.len()).unwrap(),
211
212			desc_ring: self,
213		}
214	}
215
216	fn make_avail_with_state(
217		&mut self,
218		raw_tkn: TransferToken<pvirtq::Desc>,
219		start: u16,
220		buff_id: u16,
221		first_flags: DescF,
222	) {
223		// provide reference, in order to let TransferToken know upon finish.
224		self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
225		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
226		// See Virtio specification v1.1. - 2.7.21
227		fence(Ordering::SeqCst);
228		self.ring[usize::from(start)].flags = first_flags;
229	}
230
231	/// Returns the [DescF] with the avail and used flags set in accordance
232	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
233	/// the driver wrap counter and the used flag set to NOT match the wrap counter).
234	///
235	/// This function is defined on the whole ring rather than only the
236	/// wrap counter to ensure that it is not called on the incorrect
237	/// wrap counter (i.e. device wrap counter) by accident.
238	///
239	/// A copy of the flag is taken instead of a mutable reference
240	/// for the cases in which the modification of the flag needs to be
241	/// deferred (e.g. patched dispatches, chained buffers).
242	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
243		let avail = self.write_index.desc_event_wrap() != 0;
244		flags.set(virtq::DescF::AVAIL, avail);
245		flags.set(virtq::DescF::USED, !avail);
246		flags
247	}
248
249	/// Checks the avail and used flags to see if the descriptor is marked
250	/// as used by the device in accordance with the
251	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device wrap counter)
252	///
253	/// This function is defined on the whole ring rather than only the
254	/// wrap counter to ensure that it is not called on the incorrect
255	/// wrap counter (i.e. driver wrap counter) by accident.
256	fn is_marked_used(&self, flags: DescF) -> bool {
257		if self.poll_index.desc_event_wrap() != 0 {
258			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
259		} else {
260			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
261		}
262	}
263}
264
265struct ReadCtrl<'a> {
266	/// Poll index of the ring at init of ReadCtrl
267	position: u16,
268	modulo: u16,
269
270	desc_ring: &'a mut DescriptorRing,
271}
272
273impl ReadCtrl<'_> {
274	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
275	/// updating the queue and returns the respective TransferToken.
276	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
277		// Check if descriptor has been marked used.
278		let desc = &self.desc_ring.ring[usize::from(self.position)];
279		if self.desc_ring.is_marked_used(desc.flags) {
280			let buff_id = desc.id.to_ne();
281			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
282				.take()
283				.expect(
284					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
285				);
286
287			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
288			// This is necessary in order to provide the drivers with the correct access, to usable data.
289			//
290			// According to the standard the device signals solely via the first written descriptor if anything has been written to
291			// the write descriptors of a buffer.
292			// See Virtio specification v1.1. - 2.7.4
293			//                                - 2.7.5
294			//                                - 2.7.6
295			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
296			//      self.desc_ring.ring[self.position].len
297			//  } else {
298			//      0
299			//  };
300			//
301			// INFO:
302			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
303			// flag correctly upon writes. Hence we omit it, in order to receive data.
304
305			// We need to read the written length before advancing the position.
306			let write_len = desc.len.to_ne();
307
308			for _ in 0..tkn.num_consuming_descr() {
309				self.incrmt();
310			}
311			unsafe {
312				self.desc_ring.indexes.deallocate(buff_id.into());
313			}
314
315			Some((tkn, write_len))
316		} else {
317			None
318		}
319	}
320
321	fn incrmt(&mut self) {
322		let mut desc = self.desc_ring.poll_index;
323
324		if desc.desc_event_off() + 1 == self.modulo {
325			let wrap = desc.desc_event_wrap() ^ 1;
326			desc.set_desc_event_wrap(wrap);
327		}
328
329		let off = (desc.desc_event_off() + 1) % self.modulo;
330		desc.set_desc_event_off(off);
331
332		self.desc_ring.poll_index = desc;
333
334		self.position = desc.desc_event_off();
335
336		// Increment capacity as we have one more free now!
337		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
338		self.desc_ring.capacity += 1;
339	}
340}
341
342/// Convenient struct that allows to conveniently write descriptors into the queue.
343/// The struct takes care of updating the state of the queue correctly and to write
344/// the correct flags.
345struct WriteCtrl<'a> {
346	/// Where did the write of the buffer start in the descriptor ring
347	/// This is important, as we must make this descriptor available
348	/// lastly.
349	start: u16,
350	/// Where to write next. This should always be equal to the Rings
351	/// write_next field.
352	position: u16,
353	modulo: u16,
354	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
355	first_flags: DescF,
356	/// Buff ID of this write
357	buff_id: u16,
358
359	desc_ring: &'a mut DescriptorRing,
360}
361
362impl WriteCtrl<'_> {
363	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
364	///
365	/// Incrementing index by one. The index wrappes around to zero when
366	/// reaching (modulo -1).
367	///
368	/// Also takes care of wrapping the wrap counter of the associated
369	/// DescriptorRing.
370	fn incrmt(&mut self) {
371		// Firstly check if we are at all allowed to write a descriptor
372		assert!(self.desc_ring.capacity != 0);
373		self.desc_ring.capacity -= 1;
374
375		let mut desc = self.desc_ring.write_index;
376
377		// check if increment wrapped around end of ring
378		// then also wrap the wrap counter.
379		if self.position + 1 == self.modulo {
380			let wrap = desc.desc_event_wrap() ^ 1;
381			desc.set_desc_event_wrap(wrap);
382		}
383
384		// Also update the write_index
385		let off = (desc.desc_event_off() + 1) % self.modulo;
386		desc.set_desc_event_off(off);
387
388		self.desc_ring.write_index = desc;
389
390		self.position = (self.position + 1) % self.modulo;
391	}
392
393	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
394	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
395		incomplete_desc.id = self.buff_id.into();
396		if self.start == self.position {
397			// We save what the flags value for the first descriptor will be to be able
398			// to write it later when all the other descriptors are written (so that
399			// the device does not see an incomplete chain).
400			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
401		} else {
402			// Set avail and used according to the current wrap counter.
403			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
404		}
405		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
406		self.incrmt();
407	}
408
409	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
410		// We fail if one wants to make a buffer available without inserting one element!
411		assert!(self.start != self.position);
412		self.desc_ring
413			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
414	}
415}
416
417/// A type in order to implement the correct functionality upon
418/// the `EventSuppr` structure for driver notifications settings.
419/// The Driver Event Suppression structure is read-only by the device
420/// and controls the used buffer notifications sent by the device to the driver.
421struct DrvNotif {
422	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
423	f_notif_idx: bool,
424	/// Actual structure to read from, if device wants notifs
425	raw: &'static mut pvirtq::EventSuppress,
426}
427
428/// A type in order to implement the correct functionality upon
429/// the `EventSuppr` structure for device notifications settings.
430/// The Device Event Suppression structure is read-only by the driver
431/// and controls the available buffer notifica- tions sent by the driver to the device.
432struct DevNotif {
433	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
434	f_notif_idx: bool,
435	/// Actual structure to read from, if device wants notifs
436	raw: &'static mut pvirtq::EventSuppress,
437}
438
439impl DrvNotif {
440	/// Enables notifications by unsetting the LSB.
441	/// See Virito specification v1.1. - 2.7.10
442	fn enable_notif(&mut self) {
443		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
444	}
445
446	/// Disables notifications by setting the LSB.
447	/// See Virtio specification v1.1. - 2.7.10
448	fn disable_notif(&mut self) {
449		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
450	}
451
452	/// Enables a notification by the device for a specific descriptor.
453	fn enable_specific(&mut self, desc: EventSuppressDesc) {
454		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
455		if self.f_notif_idx {
456			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
457			self.raw.desc = desc;
458		}
459	}
460}
461
462impl DevNotif {
463	/// Enables the notification capability for a specific buffer.
464	#[expect(dead_code)]
465	pub fn enable_notif_specific(&mut self) {
466		self.f_notif_idx = true;
467	}
468
469	/// Reads notification bit (i.e. LSB) and returns value.
470	/// If notifications are enabled returns true, else false.
471	fn is_notif(&self) -> bool {
472		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
473	}
474
475	fn notif_specific(&self) -> Option<EventSuppressDesc> {
476		if !self.f_notif_idx {
477			return None;
478		}
479
480		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
481			return None;
482		}
483
484		Some(self.raw.desc)
485	}
486}
487
488/// Packed virtqueue which provides the functionilaty as described in the
489/// virtio specification v1.1. - 2.7
490pub struct PackedVq {
491	/// Ring which allows easy access to the raw ring structure of the
492	/// specification
493	descr_ring: DescriptorRing,
494	/// Allows to tell the device if notifications are wanted
495	drv_event: DrvNotif,
496	/// Allows to check, if the device wants a notification
497	dev_event: DevNotif,
498	/// Actually notify device about avail buffers
499	notif_ctrl: NotifCtrl,
500	/// The size of the queue, equals the number of descriptors which can
501	/// be used
502	size: u16,
503	/// The virtqueues index. This identifies the virtqueue to the
504	/// device and is unique on a per device basis.
505	index: u16,
506	last_next: Cell<EventSuppressDesc>,
507}
508
509// Public interface of PackedVq
510// This interface is also public in order to allow people to use the PackedVq directly!
511impl Virtq for PackedVq {
512	fn enable_notifs(&mut self) {
513		self.drv_event.enable_notif();
514	}
515
516	fn disable_notifs(&mut self) {
517		self.drv_event.disable_notif();
518	}
519
520	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
521		self.descr_ring.try_recv()
522	}
523
524	fn dispatch_batch(
525		&mut self,
526		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
527		notif: bool,
528	) -> Result<(), VirtqError> {
529		// Zero transfers are not allowed
530		assert!(!buffer_tkns.is_empty());
531
532		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
533			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
534		});
535
536		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
537
538		if notif {
539			self.drv_event.enable_specific(next_idx);
540		}
541
542		let range = self.last_next.get()..next_idx;
543		let notif_specific = self
544			.dev_event
545			.notif_specific()
546			.is_some_and(|idx| range.wrapping_contains(&idx));
547
548		if self.dev_event.is_notif() || notif_specific {
549			let notification_data = NotificationData::new()
550				.with_vqn(self.index)
551				.with_next_off(next_idx.desc_event_off())
552				.with_next_wrap(next_idx.desc_event_wrap());
553			self.notif_ctrl.notify_dev(notification_data);
554			self.last_next.set(next_idx);
555		}
556		Ok(())
557	}
558
559	fn dispatch_batch_await(
560		&mut self,
561		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
562		notif: bool,
563	) -> Result<(), VirtqError> {
564		// Zero transfers are not allowed
565		assert!(!buffer_tkns.is_empty());
566
567		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
568			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
569		});
570
571		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
572
573		if notif {
574			self.drv_event.enable_specific(next_idx);
575		}
576
577		let range = self.last_next.get()..next_idx;
578		let notif_specific = self
579			.dev_event
580			.notif_specific()
581			.is_some_and(|idx| range.wrapping_contains(&idx));
582
583		if self.dev_event.is_notif() | notif_specific {
584			let notification_data = NotificationData::new()
585				.with_vqn(self.index)
586				.with_next_off(next_idx.desc_event_off())
587				.with_next_wrap(next_idx.desc_event_wrap());
588			self.notif_ctrl.notify_dev(notification_data);
589			self.last_next.set(next_idx);
590		}
591		Ok(())
592	}
593
594	fn dispatch(
595		&mut self,
596		buffer_tkn: AvailBufferToken,
597		notif: bool,
598		buffer_type: BufferType,
599	) -> Result<(), VirtqError> {
600		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
601		let next_idx = self.descr_ring.push(transfer_tkn)?;
602
603		if notif {
604			self.drv_event.enable_specific(next_idx);
605		}
606
607		// FIXME: impl PartialEq for EventSuppressDesc in virtio-spec instead of converting into bits.
608		let notif_specific = self
609			.dev_event
610			.notif_specific()
611			.map(EventSuppressDesc::into_bits)
612			== Some(self.last_next.get().into_bits());
613
614		if self.dev_event.is_notif() || notif_specific {
615			let notification_data = NotificationData::new()
616				.with_vqn(self.index)
617				.with_next_off(next_idx.desc_event_off())
618				.with_next_wrap(next_idx.desc_event_wrap());
619			self.notif_ctrl.notify_dev(notification_data);
620			self.last_next.set(next_idx);
621		}
622		Ok(())
623	}
624
625	fn index(&self) -> u16 {
626		self.index
627	}
628
629	fn size(&self) -> u16 {
630		self.size
631	}
632
633	fn has_used_buffers(&self) -> bool {
634		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
635		self.descr_ring.is_marked_used(desc.flags)
636	}
637}
638
639impl VirtqPrivate for PackedVq {
640	type Descriptor = pvirtq::Desc;
641
642	fn create_indirect_ctrl(
643		buffer_tkn: &AvailBufferToken,
644	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
645		Ok(Self::descriptor_iter(buffer_tkn)?
646			.collect::<Vec<_>>()
647			.into_boxed_slice())
648	}
649}
650
651impl PackedVq {
652	#[allow(dead_code)]
653	pub(crate) fn new(
654		com_cfg: &mut ComCfg,
655		notif_cfg: &NotifCfg,
656		max_size: u16,
657		index: u16,
658		features: virtio::F,
659	) -> Result<Self, VirtqError> {
660		// Currently we do not have support for in order use.
661		// This steems from the fact, that the packedVq ReadCtrl currently is not
662		// able to derive other finished transfer from a used-buffer notification.
663		// In order to allow this, the queue MUST track the sequence in which
664		// TransferTokens are inserted into the queue. Furthermore the Queue should
665		// carry a feature u64 in order to check which features are used currently
666		// and adjust its ReadCtrl accordingly.
667		if features.contains(virtio::F::IN_ORDER) {
668			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
669			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
670		}
671
672		// Get a handler to the queues configuration area.
673		let Some(mut vq_handler) = com_cfg.select_vq(index) else {
674			return Err(VirtqError::QueueNotExisting(index));
675		};
676
677		// Must catch zero size as it is not allowed for packed queues.
678		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
679		//
680		// See Virtio specification v1.1. - 4.1.4.3.2
681		let vq_size = if (max_size == 0) | (max_size > 0x8000) {
682			return Err(VirtqError::QueueSizeNotAllowed(max_size));
683		} else {
684			vq_handler.set_vq_size(max_size)
685		};
686
687		let mut descr_ring = DescriptorRing::new(vq_size);
688		// Allocate heap memory via a vec, leak and cast
689		let _mem_len =
690			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
691
692		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
693		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
694		// TODO: make this safe using zerocopy
695		let drv_event = unsafe { drv_event.assume_init() };
696		let dev_event = unsafe { dev_event.assume_init() };
697		let drv_event = Box::leak(drv_event);
698		let dev_event = Box::leak(dev_event);
699
700		// Provide memory areas of the queues data structures to the device
701		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
702		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
703		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
704		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
705
706		let mut drv_event = DrvNotif {
707			f_notif_idx: false,
708			raw: drv_event,
709		};
710
711		let dev_event = DevNotif {
712			f_notif_idx: false,
713			raw: dev_event,
714		};
715
716		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
717
718		if features.contains(virtio::F::NOTIFICATION_DATA) {
719			notif_ctrl.enable_notif_data();
720		}
721
722		if features.contains(virtio::F::EVENT_IDX) {
723			drv_event.f_notif_idx = true;
724		}
725
726		vq_handler.enable_queue();
727
728		info!("Created PackedVq: idx={index}, size={vq_size}");
729
730		Ok(PackedVq {
731			descr_ring,
732			drv_event,
733			dev_event,
734			notif_ctrl,
735			size: vq_size,
736			index,
737			last_next: Cell::default(),
738		})
739	}
740}