hermit/drivers/virtio/virtqueue/
packed.rs

1//! `pvirtq` infrastructure.
2//!
3//! The main type of this module is [`PackedVq`].
4//!
5//! For details on packed virtqueues, see [Packed Virtqueues].
6//! For details on the Rust definitions, see [`virtio::pvirtq`].
7//!
8//! [Packed Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-720008
9
10#![allow(dead_code)]
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::ops;
16use core::sync::atomic::{Ordering, fence};
17
18use align_address::Align;
19#[cfg(not(feature = "pci"))]
20use virtio::mmio::NotificationData;
21#[cfg(feature = "pci")]
22use virtio::pci::NotificationData;
23use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
24use virtio::virtq::DescF;
25use virtio::{RingEventFlags, pvirtq, virtq};
26
27#[cfg(not(feature = "pci"))]
28use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
29#[cfg(feature = "pci")]
30use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
31use super::error::VirtqError;
32use super::{
33	AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
34};
35use crate::arch::mm::paging::{BasePageSize, PageSize};
36use crate::mm::device_alloc::DeviceAlloc;
37
38#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
39struct RingIdx {
40	off: u16,
41	wrap: u8,
42}
43
44trait RingIndexRange {
45	fn wrapping_contains(&self, item: &RingIdx) -> bool;
46}
47
48impl RingIndexRange for ops::Range<RingIdx> {
49	fn wrapping_contains(&self, item: &RingIdx) -> bool {
50		let ops::Range { start, end } = self;
51
52		if start.wrap == end.wrap {
53			item.wrap == start.wrap && start.off <= item.off && item.off < end.off
54		} else if item.wrap == start.wrap {
55			start.off <= item.off
56		} else {
57			debug_assert!(item.wrap == end.wrap);
58			item.off < end.off
59		}
60	}
61}
62
63/// Structure which allows to control raw ring and operate easily on it
64struct DescriptorRing {
65	ring: Box<[pvirtq::Desc], DeviceAlloc>,
66	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
67
68	// Controlling variables for the ring
69	//
70	/// where to insert available descriptors next
71	write_index: u16,
72	/// How much descriptors can be inserted
73	capacity: u16,
74	/// Where to expect the next used descriptor by the device
75	poll_index: u16,
76	/// See Virtio specification v1.1. - 2.7.1
77	drv_wc: bool,
78	dev_wc: bool,
79	/// Memory pool controls the amount of "free floating" descriptors
80	/// See [MemPool] docs for detail.
81	mem_pool: MemPool,
82}
83
84impl DescriptorRing {
85	fn new(size: u16) -> Self {
86		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
87
88		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
89		let tkn_ref_ring = core::iter::repeat_with(|| None)
90			.take(size.into())
91			.collect::<Vec<_>>()
92			.into_boxed_slice();
93
94		DescriptorRing {
95			ring,
96			tkn_ref_ring,
97			write_index: 0,
98			capacity: size,
99			poll_index: 0,
100			drv_wc: true,
101			dev_wc: true,
102			mem_pool: MemPool::new(size),
103		}
104	}
105
106	/// Polls poll index and sets the state of any finished TransferTokens.
107	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
108		let mut ctrl = self.get_read_ctrler();
109
110		ctrl.poll_next()
111			.map(|(tkn, written_len)| {
112				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
113			})
114			.ok_or(VirtqError::NoNewUsed)
115	}
116
117	fn push_batch(
118		&mut self,
119		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
120	) -> Result<RingIdx, VirtqError> {
121		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
122		// which will be overwritten in the first iteration of the for-loop
123
124		let first_ctrl_settings;
125		let first_buffer;
126		let mut ctrl;
127
128		let mut tkn_iterator = tkn_lst.into_iter();
129		if let Some(first_tkn) = tkn_iterator.next() {
130			ctrl = self.push_without_making_available(&first_tkn)?;
131			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
132			first_buffer = first_tkn;
133		} else {
134			// Empty batches are an error
135			return Err(VirtqError::BufferNotSpecified);
136		}
137		// Push the remaining tokens (if any)
138		for tkn in tkn_iterator {
139			ctrl.make_avail(tkn);
140		}
141
142		// Manually make the first buffer available lastly
143		//
144		// Providing the first buffer in the list manually
145		self.make_avail_with_state(
146			first_buffer,
147			first_ctrl_settings.0,
148			first_ctrl_settings.1,
149			first_ctrl_settings.2,
150		);
151		Ok(RingIdx {
152			off: self.write_index,
153			wrap: self.drv_wc.into(),
154		})
155	}
156
157	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
158		self.push_batch([tkn])
159	}
160
161	fn push_without_making_available(
162		&mut self,
163		tkn: &TransferToken<pvirtq::Desc>,
164	) -> Result<WriteCtrl<'_>, VirtqError> {
165		if tkn.num_consuming_descr() > self.capacity {
166			return Err(VirtqError::NoDescrAvail);
167		}
168
169		// create an counter that wrappes to the first element
170		// after reaching a the end of the ring
171		let mut ctrl = self.get_write_ctrler()?;
172
173		// Importance here is:
174		// * distinguish between Indirect and direct buffers
175		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
176
177		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
178		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
179			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
180			ctrl.write_desc(desc);
181		} else {
182			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
183				ctrl.write_desc(incomplete_desc);
184			}
185		}
186		Ok(ctrl)
187	}
188
189	fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
190		self.ring.as_mut_ptr()
191	}
192
193	/// Returns an initialized write controller in order
194	/// to write the queue correctly.
195	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
196		let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
197		Ok(WriteCtrl {
198			start: self.write_index,
199			position: self.write_index,
200			modulo: u16::try_from(self.ring.len()).unwrap(),
201			first_flags: DescF::empty(),
202			buff_id: desc_id,
203
204			desc_ring: self,
205		})
206	}
207
208	/// Returns an initialized read controller in order
209	/// to read the queue correctly.
210	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
211		ReadCtrl {
212			position: self.poll_index,
213			modulo: u16::try_from(self.ring.len()).unwrap(),
214
215			desc_ring: self,
216		}
217	}
218
219	fn make_avail_with_state(
220		&mut self,
221		raw_tkn: TransferToken<pvirtq::Desc>,
222		start: u16,
223		buff_id: u16,
224		first_flags: DescF,
225	) {
226		// provide reference, in order to let TransferToken know upon finish.
227		self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
228		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
229		// See Virtio specification v1.1. - 2.7.21
230		fence(Ordering::SeqCst);
231		self.ring[usize::from(start)].flags = first_flags;
232	}
233
234	/// Returns the [DescF] with the avail and used flags set in accordance
235	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
236	/// the driver wrap counter and the used flag set to NOT match the wrap counter).
237	///
238	/// This function is defined on the whole ring rather than only the
239	/// wrap counter to ensure that it is not called on the incorrect
240	/// wrap counter (i.e. device wrap counter) by accident.
241	///
242	/// A copy of the flag is taken instead of a mutable reference
243	/// for the cases in which the modification of the flag needs to be
244	/// deferred (e.g. patched dispatches, chained buffers).
245	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
246		flags.set(virtq::DescF::AVAIL, self.drv_wc);
247		flags.set(virtq::DescF::USED, !self.drv_wc);
248		flags
249	}
250
251	/// Checks the avail and used flags to see if the descriptor is marked
252	/// as used by the device in accordance with the
253	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device wrap counter)
254	///
255	/// This function is defined on the whole ring rather than only the
256	/// wrap counter to ensure that it is not called on the incorrect
257	/// wrap counter (i.e. driver wrap counter) by accident.
258	fn is_marked_used(&self, flags: DescF) -> bool {
259		if self.dev_wc {
260			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
261		} else {
262			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
263		}
264	}
265}
266
267struct ReadCtrl<'a> {
268	/// Poll index of the ring at init of ReadCtrl
269	position: u16,
270	modulo: u16,
271
272	desc_ring: &'a mut DescriptorRing,
273}
274
275impl ReadCtrl<'_> {
276	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
277	/// updating the queue and returns the respective TransferToken.
278	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
279		// Check if descriptor has been marked used.
280		let desc = &self.desc_ring.ring[usize::from(self.position)];
281		if self.desc_ring.is_marked_used(desc.flags) {
282			let buff_id = desc.id.to_ne();
283			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
284				.take()
285				.expect(
286					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
287				);
288
289			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
290			// This is necessary in order to provide the drivers with the correct access, to usable data.
291			//
292			// According to the standard the device signals solely via the first written descriptor if anything has been written to
293			// the write descriptors of a buffer.
294			// See Virtio specification v1.1. - 2.7.4
295			//                                - 2.7.5
296			//                                - 2.7.6
297			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
298			//      self.desc_ring.ring[self.position].len
299			//  } else {
300			//      0
301			//  };
302			//
303			// INFO:
304			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
305			// flag correctly upon writes. Hence we omit it, in order to receive data.
306
307			// We need to read the written length before advancing the position.
308			let write_len = desc.len.to_ne();
309
310			for _ in 0..tkn.num_consuming_descr() {
311				self.incrmt();
312			}
313			self.desc_ring.mem_pool.ret_id(buff_id);
314
315			Some((tkn, write_len))
316		} else {
317			None
318		}
319	}
320
321	fn incrmt(&mut self) {
322		if self.desc_ring.poll_index + 1 == self.modulo {
323			self.desc_ring.dev_wc ^= true;
324		}
325
326		// Increment capacity as we have one more free now!
327		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
328		self.desc_ring.capacity += 1;
329
330		self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
331		self.position = self.desc_ring.poll_index;
332	}
333}
334
335/// Convenient struct that allows to conveniently write descriptors into the queue.
336/// The struct takes care of updating the state of the queue correctly and to write
337/// the correct flags.
338struct WriteCtrl<'a> {
339	/// Where did the write of the buffer start in the descriptor ring
340	/// This is important, as we must make this descriptor available
341	/// lastly.
342	start: u16,
343	/// Where to write next. This should always be equal to the Rings
344	/// write_next field.
345	position: u16,
346	modulo: u16,
347	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
348	first_flags: DescF,
349	/// Buff ID of this write
350	buff_id: u16,
351
352	desc_ring: &'a mut DescriptorRing,
353}
354
355impl WriteCtrl<'_> {
356	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
357	///
358	/// Incrementing index by one. The index wrappes around to zero when
359	/// reaching (modulo -1).
360	///
361	/// Also takes care of wrapping the wrap counter of the associated
362	/// DescriptorRing.
363	fn incrmt(&mut self) {
364		// Firstly check if we are at all allowed to write a descriptor
365		assert!(self.desc_ring.capacity != 0);
366		self.desc_ring.capacity -= 1;
367		// check if increment wrapped around end of ring
368		// then also wrap the wrap counter.
369		if self.position + 1 == self.modulo {
370			self.desc_ring.drv_wc ^= true;
371		}
372		// Also update the write_index
373		self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
374
375		self.position = (self.position + 1) % self.modulo;
376	}
377
378	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
379	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
380		incomplete_desc.id = self.buff_id.into();
381		if self.start == self.position {
382			// We save what the flags value for the first descriptor will be to be able
383			// to write it later when all the other descriptors are written (so that
384			// the device does not see an incomplete chain).
385			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
386		} else {
387			// Set avail and used according to the current wrap counter.
388			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
389		}
390		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
391		self.incrmt();
392	}
393
394	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
395		// We fail if one wants to make a buffer available without inserting one element!
396		assert!(self.start != self.position);
397		self.desc_ring
398			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
399	}
400}
401
402/// A type in order to implement the correct functionality upon
403/// the `EventSuppr` structure for driver notifications settings.
404/// The Driver Event Suppression structure is read-only by the device
405/// and controls the used buffer notifications sent by the device to the driver.
406struct DrvNotif {
407	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
408	f_notif_idx: bool,
409	/// Actual structure to read from, if device wants notifs
410	raw: &'static mut pvirtq::EventSuppress,
411}
412
413/// A type in order to implement the correct functionality upon
414/// the `EventSuppr` structure for device notifications settings.
415/// The Device Event Suppression structure is read-only by the driver
416/// and controls the available buffer notifica- tions sent by the driver to the device.
417struct DevNotif {
418	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
419	f_notif_idx: bool,
420	/// Actual structure to read from, if device wants notifs
421	raw: &'static mut pvirtq::EventSuppress,
422}
423
424impl DrvNotif {
425	/// Enables notifications by unsetting the LSB.
426	/// See Virito specification v1.1. - 2.7.10
427	fn enable_notif(&mut self) {
428		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
429	}
430
431	/// Disables notifications by setting the LSB.
432	/// See Virtio specification v1.1. - 2.7.10
433	fn disable_notif(&mut self) {
434		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
435	}
436
437	/// Enables a notification by the device for a specific descriptor.
438	fn enable_specific(&mut self, idx: RingIdx) {
439		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
440		if self.f_notif_idx {
441			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
442			self.raw.desc = EventSuppressDesc::new()
443				.with_desc_event_off(idx.off)
444				.with_desc_event_wrap(idx.wrap);
445		}
446	}
447}
448
449impl DevNotif {
450	/// Enables the notification capability for a specific buffer.
451	pub fn enable_notif_specific(&mut self) {
452		self.f_notif_idx = true;
453	}
454
455	/// Reads notification bit (i.e. LSB) and returns value.
456	/// If notifications are enabled returns true, else false.
457	fn is_notif(&self) -> bool {
458		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
459	}
460
461	fn notif_specific(&self) -> Option<RingIdx> {
462		if !self.f_notif_idx {
463			return None;
464		}
465
466		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
467			return None;
468		}
469
470		let off = self.raw.desc.desc_event_off();
471		let wrap = self.raw.desc.desc_event_wrap();
472
473		Some(RingIdx { off, wrap })
474	}
475}
476
477/// Packed virtqueue which provides the functionilaty as described in the
478/// virtio specification v1.1. - 2.7
479pub struct PackedVq {
480	/// Ring which allows easy access to the raw ring structure of the
481	/// specification
482	descr_ring: DescriptorRing,
483	/// Allows to tell the device if notifications are wanted
484	drv_event: DrvNotif,
485	/// Allows to check, if the device wants a notification
486	dev_event: DevNotif,
487	/// Actually notify device about avail buffers
488	notif_ctrl: NotifCtrl,
489	/// The size of the queue, equals the number of descriptors which can
490	/// be used
491	size: u16,
492	/// The virtqueues index. This identifies the virtqueue to the
493	/// device and is unique on a per device basis.
494	index: u16,
495	last_next: Cell<RingIdx>,
496}
497
498// Public interface of PackedVq
499// This interface is also public in order to allow people to use the PackedVq directly!
500impl Virtq for PackedVq {
501	fn enable_notifs(&mut self) {
502		self.drv_event.enable_notif();
503	}
504
505	fn disable_notifs(&mut self) {
506		self.drv_event.disable_notif();
507	}
508
509	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
510		self.descr_ring.try_recv()
511	}
512
513	fn dispatch_batch(
514		&mut self,
515		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
516		notif: bool,
517	) -> Result<(), VirtqError> {
518		// Zero transfers are not allowed
519		assert!(!buffer_tkns.is_empty());
520
521		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
522			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
523		});
524
525		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
526
527		if notif {
528			self.drv_event.enable_specific(next_idx);
529		}
530
531		let range = self.last_next.get()..next_idx;
532		let notif_specific = self
533			.dev_event
534			.notif_specific()
535			.is_some_and(|idx| range.wrapping_contains(&idx));
536
537		if self.dev_event.is_notif() || notif_specific {
538			let notification_data = NotificationData::new()
539				.with_vqn(self.index)
540				.with_next_off(next_idx.off)
541				.with_next_wrap(next_idx.wrap);
542			self.notif_ctrl.notify_dev(notification_data);
543			self.last_next.set(next_idx);
544		}
545		Ok(())
546	}
547
548	fn dispatch_batch_await(
549		&mut self,
550		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
551		notif: bool,
552	) -> Result<(), VirtqError> {
553		// Zero transfers are not allowed
554		assert!(!buffer_tkns.is_empty());
555
556		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
557			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
558		});
559
560		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
561
562		if notif {
563			self.drv_event.enable_specific(next_idx);
564		}
565
566		let range = self.last_next.get()..next_idx;
567		let notif_specific = self
568			.dev_event
569			.notif_specific()
570			.is_some_and(|idx| range.wrapping_contains(&idx));
571
572		if self.dev_event.is_notif() | notif_specific {
573			let notification_data = NotificationData::new()
574				.with_vqn(self.index)
575				.with_next_off(next_idx.off)
576				.with_next_wrap(next_idx.wrap);
577			self.notif_ctrl.notify_dev(notification_data);
578			self.last_next.set(next_idx);
579		}
580		Ok(())
581	}
582
583	fn dispatch(
584		&mut self,
585		buffer_tkn: AvailBufferToken,
586		notif: bool,
587		buffer_type: BufferType,
588	) -> Result<(), VirtqError> {
589		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
590		let next_idx = self.descr_ring.push(transfer_tkn)?;
591
592		if notif {
593			self.drv_event.enable_specific(next_idx);
594		}
595
596		let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
597
598		if self.dev_event.is_notif() || notif_specific {
599			let notification_data = NotificationData::new()
600				.with_vqn(self.index)
601				.with_next_off(next_idx.off)
602				.with_next_wrap(next_idx.wrap);
603			self.notif_ctrl.notify_dev(notification_data);
604			self.last_next.set(next_idx);
605		}
606		Ok(())
607	}
608
609	fn index(&self) -> u16 {
610		self.index
611	}
612
613	fn size(&self) -> u16 {
614		self.size
615	}
616
617	fn has_used_buffers(&self) -> bool {
618		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
619		self.descr_ring.is_marked_used(desc.flags)
620	}
621}
622
623impl VirtqPrivate for PackedVq {
624	type Descriptor = pvirtq::Desc;
625
626	fn create_indirect_ctrl(
627		buffer_tkn: &AvailBufferToken,
628	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
629		Ok(Self::descriptor_iter(buffer_tkn)?
630			.collect::<Vec<_>>()
631			.into_boxed_slice())
632	}
633}
634
635impl PackedVq {
636	pub(crate) fn new(
637		com_cfg: &mut ComCfg,
638		notif_cfg: &NotifCfg,
639		size: u16,
640		index: u16,
641		features: virtio::F,
642	) -> Result<Self, VirtqError> {
643		// Currently we do not have support for in order use.
644		// This steems from the fact, that the packedVq ReadCtrl currently is not
645		// able to derive other finished transfer from a used-buffer notification.
646		// In order to allow this, the queue MUST track the sequence in which
647		// TransferTokens are inserted into the queue. Furthermore the Queue should
648		// carry a feature u64 in order to check which features are used currently
649		// and adjust its ReadCtrl accordingly.
650		if features.contains(virtio::F::IN_ORDER) {
651			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
652			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
653		}
654
655		// Get a handler to the queues configuration area.
656		let Some(mut vq_handler) = com_cfg.select_vq(index) else {
657			return Err(VirtqError::QueueNotExisting(index));
658		};
659
660		// Must catch zero size as it is not allowed for packed queues.
661		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
662		//
663		// See Virtio specification v1.1. - 4.1.4.3.2
664		let vq_size = if (size == 0) | (size > 0x8000) {
665			return Err(VirtqError::QueueSizeNotAllowed(size));
666		} else {
667			vq_handler.set_vq_size(size)
668		};
669
670		let mut descr_ring = DescriptorRing::new(vq_size);
671		// Allocate heap memory via a vec, leak and cast
672		let _mem_len =
673			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
674
675		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
676		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
677		// TODO: make this safe using zerocopy
678		let drv_event = unsafe { drv_event.assume_init() };
679		let dev_event = unsafe { dev_event.assume_init() };
680		let drv_event = Box::leak(drv_event);
681		let dev_event = Box::leak(dev_event);
682
683		// Provide memory areas of the queues data structures to the device
684		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
685		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
686		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
687		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
688
689		let mut drv_event = DrvNotif {
690			f_notif_idx: false,
691			raw: drv_event,
692		};
693
694		let dev_event = DevNotif {
695			f_notif_idx: false,
696			raw: dev_event,
697		};
698
699		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
700
701		if features.contains(virtio::F::NOTIFICATION_DATA) {
702			notif_ctrl.enable_notif_data();
703		}
704
705		if features.contains(virtio::F::EVENT_IDX) {
706			drv_event.f_notif_idx = true;
707		}
708
709		vq_handler.enable_queue();
710
711		info!("Created PackedVq: idx={index}, size={vq_size}");
712
713		Ok(PackedVq {
714			descr_ring,
715			drv_event,
716			dev_event,
717			notif_ctrl,
718			size: vq_size,
719			index,
720			last_next: Cell::default(),
721		})
722	}
723}