hermit/drivers/virtio/virtqueue/
packed.rs

1//! This module contains Virtio's packed virtqueue.
2//! See Virito specification v1.1. - 2.7
3#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::ops;
9use core::sync::atomic::{Ordering, fence};
10
11use align_address::Align;
12#[cfg(not(feature = "pci"))]
13use virtio::mmio::NotificationData;
14#[cfg(feature = "pci")]
15use virtio::pci::NotificationData;
16use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
17use virtio::virtq::DescF;
18use virtio::{RingEventFlags, pvirtq, virtq};
19
20#[cfg(not(feature = "pci"))]
21use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
22#[cfg(feature = "pci")]
23use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
24use super::error::VirtqError;
25use super::{
26	AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
27	VirtqPrivate, VqIndex, VqSize,
28};
29use crate::arch::mm::paging::{BasePageSize, PageSize};
30use crate::mm::device_alloc::DeviceAlloc;
31
32#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
33struct RingIdx {
34	off: u16,
35	wrap: u8,
36}
37
38trait RingIndexRange {
39	fn wrapping_contains(&self, item: &RingIdx) -> bool;
40}
41
42impl RingIndexRange for ops::Range<RingIdx> {
43	fn wrapping_contains(&self, item: &RingIdx) -> bool {
44		let ops::Range { start, end } = self;
45
46		if start.wrap == end.wrap {
47			item.wrap == start.wrap && start.off <= item.off && item.off < end.off
48		} else if item.wrap == start.wrap {
49			start.off <= item.off
50		} else {
51			debug_assert!(item.wrap == end.wrap);
52			item.off < end.off
53		}
54	}
55}
56
57/// A newtype of bool used for convenience in context with
58/// packed queues wrap counter.
59///
60/// For more details see Virtio specification v1.1. - 2.7.1
61#[derive(Copy, Clone, Debug)]
62struct WrapCount(bool);
63
64impl WrapCount {
65	/// Masks all other bits, besides the wrap count specific ones.
66	fn flag_mask() -> virtq::DescF {
67		virtq::DescF::AVAIL | virtq::DescF::USED
68	}
69
70	/// Returns a new WrapCount struct initialized to true or 1.
71	///
72	/// See virtio specification v1.1. - 2.7.1
73	fn new() -> Self {
74		WrapCount(true)
75	}
76
77	/// Toogles a given wrap count to respectiver other value.
78	///
79	/// If WrapCount(true) returns WrapCount(false),
80	/// if WrapCount(false) returns WrapCount(true).
81	fn wrap(&mut self) {
82		self.0 = !self.0;
83	}
84}
85
86/// Structure which allows to control raw ring and operate easily on it
87struct DescriptorRing {
88	ring: Box<[pvirtq::Desc], DeviceAlloc>,
89	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
90
91	// Controlling variables for the ring
92	//
93	/// where to insert available descriptors next
94	write_index: u16,
95	/// How much descriptors can be inserted
96	capacity: u16,
97	/// Where to expect the next used descriptor by the device
98	poll_index: u16,
99	/// See Virtio specification v1.1. - 2.7.1
100	drv_wc: WrapCount,
101	dev_wc: WrapCount,
102	/// Memory pool controls the amount of "free floating" descriptors
103	/// See [MemPool] docs for detail.
104	mem_pool: MemPool,
105}
106
107impl DescriptorRing {
108	fn new(size: u16) -> Self {
109		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
110
111		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
112		let tkn_ref_ring = core::iter::repeat_with(|| None)
113			.take(size.into())
114			.collect::<Vec<_>>()
115			.into_boxed_slice();
116
117		DescriptorRing {
118			ring,
119			tkn_ref_ring,
120			write_index: 0,
121			capacity: size,
122			poll_index: 0,
123			drv_wc: WrapCount::new(),
124			dev_wc: WrapCount::new(),
125			mem_pool: MemPool::new(size),
126		}
127	}
128
129	/// Polls poll index and sets the state of any finished TransferTokens.
130	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
131		let mut ctrl = self.get_read_ctrler();
132
133		ctrl.poll_next()
134			.map(|(tkn, written_len)| {
135				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
136			})
137			.ok_or(VirtqError::NoNewUsed)
138	}
139
140	fn push_batch(
141		&mut self,
142		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
143	) -> Result<RingIdx, VirtqError> {
144		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
145		// which will be overwritten in the first iteration of the for-loop
146
147		let first_ctrl_settings;
148		let first_buffer;
149		let mut ctrl;
150
151		let mut tkn_iterator = tkn_lst.into_iter();
152		if let Some(first_tkn) = tkn_iterator.next() {
153			ctrl = self.push_without_making_available(&first_tkn)?;
154			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
155			first_buffer = first_tkn;
156		} else {
157			// Empty batches are an error
158			return Err(VirtqError::BufferNotSpecified);
159		}
160		// Push the remaining tokens (if any)
161		for tkn in tkn_iterator {
162			ctrl.make_avail(tkn);
163		}
164
165		// Manually make the first buffer available lastly
166		//
167		// Providing the first buffer in the list manually
168		self.make_avail_with_state(
169			first_buffer,
170			first_ctrl_settings.0,
171			first_ctrl_settings.1,
172			first_ctrl_settings.2,
173		);
174		Ok(RingIdx {
175			off: self.write_index,
176			wrap: self.drv_wc.0.into(),
177		})
178	}
179
180	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
181		self.push_batch([tkn])
182	}
183
184	fn push_without_making_available(
185		&mut self,
186		tkn: &TransferToken<pvirtq::Desc>,
187	) -> Result<WriteCtrl<'_>, VirtqError> {
188		if tkn.num_consuming_descr() > self.capacity {
189			return Err(VirtqError::NoDescrAvail);
190		}
191
192		// create an counter that wrappes to the first element
193		// after reaching a the end of the ring
194		let mut ctrl = self.get_write_ctrler()?;
195
196		// Importance here is:
197		// * distinguish between Indirect and direct buffers
198		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
199
200		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
201		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
202			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
203			ctrl.write_desc(desc);
204		} else {
205			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
206				ctrl.write_desc(incomplete_desc);
207			}
208		}
209		Ok(ctrl)
210	}
211
212	fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
213		self.ring.as_mut_ptr()
214	}
215
216	/// Returns an initialized write controller in order
217	/// to write the queue correctly.
218	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
219		let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
220		Ok(WriteCtrl {
221			start: self.write_index,
222			position: self.write_index,
223			modulo: u16::try_from(self.ring.len()).unwrap(),
224			first_flags: DescF::empty(),
225			buff_id: desc_id,
226
227			desc_ring: self,
228		})
229	}
230
231	/// Returns an initialized read controller in order
232	/// to read the queue correctly.
233	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
234		ReadCtrl {
235			position: self.poll_index,
236			modulo: u16::try_from(self.ring.len()).unwrap(),
237
238			desc_ring: self,
239		}
240	}
241
242	fn make_avail_with_state(
243		&mut self,
244		raw_tkn: TransferToken<pvirtq::Desc>,
245		start: u16,
246		buff_id: MemDescrId,
247		first_flags: DescF,
248	) {
249		// provide reference, in order to let TransferToken know upon finish.
250		self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
251		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
252		// See Virtio specfification v1.1. - 2.7.21
253		fence(Ordering::SeqCst);
254		self.ring[usize::from(start)].flags = first_flags;
255	}
256
257	/// Returns the [DescF] with the avail and used flags set in accordance
258	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
259	/// the driver WrapCount and the used flag set to NOT match the WrapCount).
260	///
261	/// This function is defined on the whole ring rather than only the
262	/// wrap counter to ensure that it is not called on the incorrect
263	/// wrap counter (i.e. device wrap counter) by accident.
264	///
265	/// A copy of the flag is taken instead of a mutable reference
266	/// for the cases in which the modification of the flag needs to be
267	/// deferred (e.g. patched dispatches, chained buffers).
268	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
269		flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
270		flags.set(virtq::DescF::USED, !self.drv_wc.0);
271		flags
272	}
273
274	/// Checks the avail and used flags to see if the descriptor is marked
275	/// as used by the device in accordance with the
276	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device WrapCount)
277	///
278	/// This function is defined on the whole ring rather than only the
279	/// wrap counter to ensure that it is not called on the incorrect
280	/// wrap counter (i.e. driver wrap counter) by accident.
281	fn is_marked_used(&self, flags: DescF) -> bool {
282		if self.dev_wc.0 {
283			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
284		} else {
285			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
286		}
287	}
288}
289
290struct ReadCtrl<'a> {
291	/// Poll index of the ring at init of ReadCtrl
292	position: u16,
293	modulo: u16,
294
295	desc_ring: &'a mut DescriptorRing,
296}
297
298impl ReadCtrl<'_> {
299	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
300	/// updating the queue and returns the respective TransferToken.
301	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
302		// Check if descriptor has been marked used.
303		let desc = &self.desc_ring.ring[usize::from(self.position)];
304		if self.desc_ring.is_marked_used(desc.flags) {
305			let buff_id = desc.id.to_ne();
306			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
307				.take()
308				.expect(
309					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
310				);
311
312			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
313			// This is necessary in order to provide the drivers with the correct access, to usable data.
314			//
315			// According to the standard the device signals solely via the first written descriptor if anything has been written to
316			// the write descriptors of a buffer.
317			// See Virtio specification v1.1. - 2.7.4
318			//                                - 2.7.5
319			//                                - 2.7.6
320			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
321			//      self.desc_ring.ring[self.position].len
322			//  } else {
323			//      0
324			//  };
325			//
326			// INFO:
327			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
328			// flag correctly upon writes. Hence we omit it, in order to receive data.
329
330			// We need to read the written length before advancing the position.
331			let write_len = desc.len.to_ne();
332
333			for _ in 0..tkn.num_consuming_descr() {
334				self.incrmt();
335			}
336			self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
337
338			Some((tkn, write_len))
339		} else {
340			None
341		}
342	}
343
344	fn incrmt(&mut self) {
345		if self.desc_ring.poll_index + 1 == self.modulo {
346			self.desc_ring.dev_wc.wrap();
347		}
348
349		// Increment capacity as we have one more free now!
350		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
351		self.desc_ring.capacity += 1;
352
353		self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
354		self.position = self.desc_ring.poll_index;
355	}
356}
357
358/// Convenient struct that allows to conveniently write descriptors into the queue.
359/// The struct takes care of updating the state of the queue correctly and to write
360/// the correct flags.
361struct WriteCtrl<'a> {
362	/// Where did the write of the buffer start in the descriptor ring
363	/// This is important, as we must make this descriptor available
364	/// lastly.
365	start: u16,
366	/// Where to write next. This should always be equal to the Rings
367	/// write_next field.
368	position: u16,
369	modulo: u16,
370	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
371	first_flags: DescF,
372	/// Buff ID of this write
373	buff_id: MemDescrId,
374
375	desc_ring: &'a mut DescriptorRing,
376}
377
378impl WriteCtrl<'_> {
379	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
380	///
381	/// Incrementing index by one. The index wrappes around to zero when
382	/// reaching (modulo -1).
383	///
384	/// Also takes care of wrapping the WrapCount of the associated
385	/// DescriptorRing.
386	fn incrmt(&mut self) {
387		// Firstly check if we are at all allowed to write a descriptor
388		assert!(self.desc_ring.capacity != 0);
389		self.desc_ring.capacity -= 1;
390		// check if increment wrapped around end of ring
391		// then also wrap the wrap counter.
392		if self.position + 1 == self.modulo {
393			self.desc_ring.drv_wc.wrap();
394		}
395		// Also update the write_index
396		self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
397
398		self.position = (self.position + 1) % self.modulo;
399	}
400
401	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
402	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
403		incomplete_desc.id = self.buff_id.0.into();
404		if self.start == self.position {
405			// We save what the flags value for the first descriptor will be to be able
406			// to write it later when all the other descriptors are written (so that
407			// the device does not see an incomplete chain).
408			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
409		} else {
410			// Set avail and used according to the current WrapCount.
411			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
412		}
413		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
414		self.incrmt();
415	}
416
417	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
418		// We fail if one wants to make a buffer available without inserting one element!
419		assert!(self.start != self.position);
420		self.desc_ring
421			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
422	}
423}
424
425/// A newtype in order to implement the correct functionality upon
426/// the `EventSuppr` structure for driver notifications settings.
427/// The Driver Event Suppression structure is read-only by the device
428/// and controls the used buffer notifications sent by the device to the driver.
429struct DrvNotif {
430	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
431	f_notif_idx: bool,
432	/// Actual structure to read from, if device wants notifs
433	raw: &'static mut pvirtq::EventSuppress,
434}
435
436/// A newtype in order to implement the correct functionality upon
437/// the `EventSuppr` structure for device notifications settings.
438/// The Device Event Suppression structure is read-only by the driver
439/// and controls the available buffer notifica- tions sent by the driver to the device.
440struct DevNotif {
441	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
442	f_notif_idx: bool,
443	/// Actual structure to read from, if device wants notifs
444	raw: &'static mut pvirtq::EventSuppress,
445}
446
447impl DrvNotif {
448	/// Enables notifications by unsetting the LSB.
449	/// See Virito specification v1.1. - 2.7.10
450	fn enable_notif(&mut self) {
451		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
452	}
453
454	/// Disables notifications by setting the LSB.
455	/// See Virtio specification v1.1. - 2.7.10
456	fn disable_notif(&mut self) {
457		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
458	}
459
460	/// Enables a notification by the device for a specific descriptor.
461	fn enable_specific(&mut self, idx: RingIdx) {
462		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
463		if self.f_notif_idx {
464			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
465			self.raw.desc = EventSuppressDesc::new()
466				.with_desc_event_off(idx.off)
467				.with_desc_event_wrap(idx.wrap);
468		}
469	}
470}
471
472impl DevNotif {
473	/// Enables the notificication capability for a specific buffer.
474	pub fn enable_notif_specific(&mut self) {
475		self.f_notif_idx = true;
476	}
477
478	/// Reads notification bit (i.e. LSB) and returns value.
479	/// If notifications are enabled returns true, else false.
480	fn is_notif(&self) -> bool {
481		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
482	}
483
484	fn notif_specific(&self) -> Option<RingIdx> {
485		if !self.f_notif_idx {
486			return None;
487		}
488
489		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
490			return None;
491		}
492
493		let off = self.raw.desc.desc_event_off();
494		let wrap = self.raw.desc.desc_event_wrap();
495
496		Some(RingIdx { off, wrap })
497	}
498}
499
500/// Packed virtqueue which provides the functionilaty as described in the
501/// virtio specification v1.1. - 2.7
502pub struct PackedVq {
503	/// Ring which allows easy access to the raw ring structure of the
504	/// specfification
505	descr_ring: DescriptorRing,
506	/// Allows to tell the device if notifications are wanted
507	drv_event: DrvNotif,
508	/// Allows to check, if the device wants a notification
509	dev_event: DevNotif,
510	/// Actually notify device about avail buffers
511	notif_ctrl: NotifCtrl,
512	/// The size of the queue, equals the number of descriptors which can
513	/// be used
514	size: VqSize,
515	/// The virtqueues index. This identifies the virtqueue to the
516	/// device and is unique on a per device basis.
517	index: VqIndex,
518	last_next: Cell<RingIdx>,
519}
520
521// Public interface of PackedVq
522// This interface is also public in order to allow people to use the PackedVq directly!
523impl Virtq for PackedVq {
524	fn enable_notifs(&mut self) {
525		self.drv_event.enable_notif();
526	}
527
528	fn disable_notifs(&mut self) {
529		self.drv_event.disable_notif();
530	}
531
532	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
533		self.descr_ring.try_recv()
534	}
535
536	fn dispatch_batch(
537		&mut self,
538		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
539		notif: bool,
540	) -> Result<(), VirtqError> {
541		// Zero transfers are not allowed
542		assert!(!buffer_tkns.is_empty());
543
544		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
545			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
546		});
547
548		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
549
550		if notif {
551			self.drv_event.enable_specific(next_idx);
552		}
553
554		let range = self.last_next.get()..next_idx;
555		let notif_specific = self
556			.dev_event
557			.notif_specific()
558			.is_some_and(|idx| range.wrapping_contains(&idx));
559
560		if self.dev_event.is_notif() || notif_specific {
561			let notification_data = NotificationData::new()
562				.with_vqn(self.index.0)
563				.with_next_off(next_idx.off)
564				.with_next_wrap(next_idx.wrap);
565			self.notif_ctrl.notify_dev(notification_data);
566			self.last_next.set(next_idx);
567		}
568		Ok(())
569	}
570
571	fn dispatch_batch_await(
572		&mut self,
573		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
574		notif: bool,
575	) -> Result<(), VirtqError> {
576		// Zero transfers are not allowed
577		assert!(!buffer_tkns.is_empty());
578
579		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
580			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
581		});
582
583		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
584
585		if notif {
586			self.drv_event.enable_specific(next_idx);
587		}
588
589		let range = self.last_next.get()..next_idx;
590		let notif_specific = self
591			.dev_event
592			.notif_specific()
593			.is_some_and(|idx| range.wrapping_contains(&idx));
594
595		if self.dev_event.is_notif() | notif_specific {
596			let notification_data = NotificationData::new()
597				.with_vqn(self.index.0)
598				.with_next_off(next_idx.off)
599				.with_next_wrap(next_idx.wrap);
600			self.notif_ctrl.notify_dev(notification_data);
601			self.last_next.set(next_idx);
602		}
603		Ok(())
604	}
605
606	fn dispatch(
607		&mut self,
608		buffer_tkn: AvailBufferToken,
609		notif: bool,
610		buffer_type: BufferType,
611	) -> Result<(), VirtqError> {
612		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
613		let next_idx = self.descr_ring.push(transfer_tkn)?;
614
615		if notif {
616			self.drv_event.enable_specific(next_idx);
617		}
618
619		let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
620
621		if self.dev_event.is_notif() || notif_specific {
622			let notification_data = NotificationData::new()
623				.with_vqn(self.index.0)
624				.with_next_off(next_idx.off)
625				.with_next_wrap(next_idx.wrap);
626			self.notif_ctrl.notify_dev(notification_data);
627			self.last_next.set(next_idx);
628		}
629		Ok(())
630	}
631
632	fn index(&self) -> VqIndex {
633		self.index
634	}
635
636	fn size(&self) -> VqSize {
637		self.size
638	}
639
640	fn has_used_buffers(&self) -> bool {
641		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
642		self.descr_ring.is_marked_used(desc.flags)
643	}
644}
645
646impl VirtqPrivate for PackedVq {
647	type Descriptor = pvirtq::Desc;
648
649	fn create_indirect_ctrl(
650		buffer_tkn: &AvailBufferToken,
651	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
652		Ok(Self::descriptor_iter(buffer_tkn)?
653			.collect::<Vec<_>>()
654			.into_boxed_slice())
655	}
656}
657
658impl PackedVq {
659	pub(crate) fn new(
660		com_cfg: &mut ComCfg,
661		notif_cfg: &NotifCfg,
662		size: VqSize,
663		index: VqIndex,
664		features: virtio::F,
665	) -> Result<Self, VirtqError> {
666		// Currently we do not have support for in order use.
667		// This steems from the fact, that the packedVq ReadCtrl currently is not
668		// able to derive other finished transfer from a used-buffer notification.
669		// In order to allow this, the queue MUST track the sequence in which
670		// TransferTokens are inserted into the queue. Furthermore the Queue should
671		// carry a feature u64 in order to check which features are used currently
672		// and adjust its ReadCtrl accordingly.
673		if features.contains(virtio::F::IN_ORDER) {
674			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
675			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
676		}
677
678		// Get a handler to the queues configuration area.
679		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
680			return Err(VirtqError::QueueNotExisting(index.into()));
681		};
682
683		// Must catch zero size as it is not allowed for packed queues.
684		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
685		//
686		// See Virtio specification v1.1. - 4.1.4.3.2
687		let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
688			return Err(VirtqError::QueueSizeNotAllowed(size.0));
689		} else {
690			vq_handler.set_vq_size(size.0)
691		};
692
693		let mut descr_ring = DescriptorRing::new(vq_size);
694		// Allocate heap memory via a vec, leak and cast
695		let _mem_len =
696			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
697
698		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
699		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
700		// TODO: make this safe using zerocopy
701		let drv_event = unsafe { drv_event.assume_init() };
702		let dev_event = unsafe { dev_event.assume_init() };
703		let drv_event = Box::leak(drv_event);
704		let dev_event = Box::leak(dev_event);
705
706		// Provide memory areas of the queues data structures to the device
707		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
708		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
709		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
710		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
711
712		let mut drv_event = DrvNotif {
713			f_notif_idx: false,
714			raw: drv_event,
715		};
716
717		let dev_event = DevNotif {
718			f_notif_idx: false,
719			raw: dev_event,
720		};
721
722		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
723
724		if features.contains(virtio::F::NOTIFICATION_DATA) {
725			notif_ctrl.enable_notif_data();
726		}
727
728		if features.contains(virtio::F::EVENT_IDX) {
729			drv_event.f_notif_idx = true;
730		}
731
732		vq_handler.enable_queue();
733
734		info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
735
736		Ok(PackedVq {
737			descr_ring,
738			drv_event,
739			dev_event,
740			notif_ctrl,
741			size: VqSize::from(vq_size),
742			index,
743			last_next: Cell::default(),
744		})
745	}
746}