hermit/drivers/virtio/virtqueue/
packed.rs

1//! This module contains Virtio's packed virtqueue.
2//! See Virito specification v1.1. - 2.7
3#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::sync::atomic::{Ordering, fence};
9use core::{ops, ptr};
10
11use align_address::Align;
12use memory_addresses::PhysAddr;
13#[cfg(not(feature = "pci"))]
14use virtio::mmio::NotificationData;
15#[cfg(feature = "pci")]
16use virtio::pci::NotificationData;
17use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
18use virtio::virtq::DescF;
19use virtio::{RingEventFlags, pvirtq, virtq};
20
21#[cfg(not(feature = "pci"))]
22use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
23#[cfg(feature = "pci")]
24use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
25use super::error::VirtqError;
26use super::{
27	AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
28	VirtqPrivate, VqIndex, VqSize,
29};
30use crate::arch::mm::paging::{BasePageSize, PageSize};
31use crate::mm::device_alloc::DeviceAlloc;
32
33#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
34struct RingIdx {
35	off: u16,
36	wrap: u8,
37}
38
39trait RingIndexRange {
40	fn wrapping_contains(&self, item: &RingIdx) -> bool;
41}
42
43impl RingIndexRange for ops::Range<RingIdx> {
44	fn wrapping_contains(&self, item: &RingIdx) -> bool {
45		let ops::Range { start, end } = self;
46
47		if start.wrap == end.wrap {
48			item.wrap == start.wrap && start.off <= item.off && item.off < end.off
49		} else if item.wrap == start.wrap {
50			start.off <= item.off
51		} else {
52			debug_assert!(item.wrap == end.wrap);
53			item.off < end.off
54		}
55	}
56}
57
58/// A newtype of bool used for convenience in context with
59/// packed queues wrap counter.
60///
61/// For more details see Virtio specification v1.1. - 2.7.1
62#[derive(Copy, Clone, Debug)]
63struct WrapCount(bool);
64
65impl WrapCount {
66	/// Masks all other bits, besides the wrap count specific ones.
67	fn flag_mask() -> virtq::DescF {
68		virtq::DescF::AVAIL | virtq::DescF::USED
69	}
70
71	/// Returns a new WrapCount struct initialized to true or 1.
72	///
73	/// See virtio specification v1.1. - 2.7.1
74	fn new() -> Self {
75		WrapCount(true)
76	}
77
78	/// Toogles a given wrap count to respectiver other value.
79	///
80	/// If WrapCount(true) returns WrapCount(false),
81	/// if WrapCount(false) returns WrapCount(true).
82	fn wrap(&mut self) {
83		self.0 = !self.0;
84	}
85}
86
87/// Structure which allows to control raw ring and operate easily on it
88struct DescriptorRing {
89	ring: Box<[pvirtq::Desc], DeviceAlloc>,
90	tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
91
92	// Controlling variables for the ring
93	//
94	/// where to insert available descriptors next
95	write_index: u16,
96	/// How much descriptors can be inserted
97	capacity: u16,
98	/// Where to expect the next used descriptor by the device
99	poll_index: u16,
100	/// See Virtio specification v1.1. - 2.7.1
101	drv_wc: WrapCount,
102	dev_wc: WrapCount,
103	/// Memory pool controls the amount of "free floating" descriptors
104	/// See [MemPool] docs for detail.
105	mem_pool: MemPool,
106}
107
108impl DescriptorRing {
109	fn new(size: u16) -> Self {
110		let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
111
112		// `Box` is not Clone, so neither is `None::<Box<_>>`. Hence, we need to produce `None`s with a closure.
113		let tkn_ref_ring = core::iter::repeat_with(|| None)
114			.take(size.into())
115			.collect::<Vec<_>>()
116			.into_boxed_slice();
117
118		DescriptorRing {
119			ring,
120			tkn_ref_ring,
121			write_index: 0,
122			capacity: size,
123			poll_index: 0,
124			drv_wc: WrapCount::new(),
125			dev_wc: WrapCount::new(),
126			mem_pool: MemPool::new(size),
127		}
128	}
129
130	/// Polls poll index and sets the state of any finished TransferTokens.
131	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
132		let mut ctrl = self.get_read_ctrler();
133
134		ctrl.poll_next()
135			.map(|(tkn, written_len)| {
136				UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
137			})
138			.ok_or(VirtqError::NoNewUsed)
139	}
140
141	fn push_batch(
142		&mut self,
143		tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
144	) -> Result<RingIdx, VirtqError> {
145		// Catch empty push, in order to allow zero initialized first_ctrl_settings struct
146		// which will be overwritten in the first iteration of the for-loop
147
148		let first_ctrl_settings;
149		let first_buffer;
150		let mut ctrl;
151
152		let mut tkn_iterator = tkn_lst.into_iter();
153		if let Some(first_tkn) = tkn_iterator.next() {
154			ctrl = self.push_without_making_available(&first_tkn)?;
155			first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
156			first_buffer = first_tkn;
157		} else {
158			// Empty batches are an error
159			return Err(VirtqError::BufferNotSpecified);
160		}
161		// Push the remaining tokens (if any)
162		for tkn in tkn_iterator {
163			ctrl.make_avail(tkn);
164		}
165
166		// Manually make the first buffer available lastly
167		//
168		// Providing the first buffer in the list manually
169		self.make_avail_with_state(
170			first_buffer,
171			first_ctrl_settings.0,
172			first_ctrl_settings.1,
173			first_ctrl_settings.2,
174		);
175		Ok(RingIdx {
176			off: self.write_index,
177			wrap: self.drv_wc.0.into(),
178		})
179	}
180
181	fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
182		self.push_batch([tkn])
183	}
184
185	fn push_without_making_available(
186		&mut self,
187		tkn: &TransferToken<pvirtq::Desc>,
188	) -> Result<WriteCtrl<'_>, VirtqError> {
189		if tkn.num_consuming_descr() > self.capacity {
190			return Err(VirtqError::NoDescrAvail);
191		}
192
193		// create an counter that wrappes to the first element
194		// after reaching a the end of the ring
195		let mut ctrl = self.get_write_ctrler()?;
196
197		// Importance here is:
198		// * distinguish between Indirect and direct buffers
199		// * make them available in the right order (the first descriptor last) (VIRTIO Spec. v1.2 section 2.8.6)
200
201		// The buffer uses indirect descriptors if the ctrl_desc field is Some.
202		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
203			let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
204			ctrl.write_desc(desc);
205		} else {
206			for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
207				ctrl.write_desc(incomplete_desc);
208			}
209		}
210		Ok(ctrl)
211	}
212
213	/// # Unsafe
214	/// Returns the memory address of the first element of the descriptor ring
215	fn raw_addr(&self) -> usize {
216		self.ring.as_ptr() as usize
217	}
218
219	/// Returns an initialized write controller in order
220	/// to write the queue correctly.
221	fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
222		let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
223		Ok(WriteCtrl {
224			start: self.write_index,
225			position: self.write_index,
226			modulo: u16::try_from(self.ring.len()).unwrap(),
227			first_flags: DescF::empty(),
228			buff_id: desc_id,
229
230			desc_ring: self,
231		})
232	}
233
234	/// Returns an initialized read controller in order
235	/// to read the queue correctly.
236	fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
237		ReadCtrl {
238			position: self.poll_index,
239			modulo: u16::try_from(self.ring.len()).unwrap(),
240
241			desc_ring: self,
242		}
243	}
244
245	fn make_avail_with_state(
246		&mut self,
247		raw_tkn: TransferToken<pvirtq::Desc>,
248		start: u16,
249		buff_id: MemDescrId,
250		first_flags: DescF,
251	) {
252		// provide reference, in order to let TransferToken know upon finish.
253		self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
254		// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
255		// See Virtio specfification v1.1. - 2.7.21
256		fence(Ordering::SeqCst);
257		self.ring[usize::from(start)].flags = first_flags;
258	}
259
260	/// Returns the [DescF] with the avail and used flags set in accordance
261	/// with the VIRTIO specification v1.2 - 2.8.1 (i.e. avail flag set to match
262	/// the driver WrapCount and the used flag set to NOT match the WrapCount).
263	///
264	/// This function is defined on the whole ring rather than only the
265	/// wrap counter to ensure that it is not called on the incorrect
266	/// wrap counter (i.e. device wrap counter) by accident.
267	///
268	/// A copy of the flag is taken instead of a mutable reference
269	/// for the cases in which the modification of the flag needs to be
270	/// deferred (e.g. patched dispatches, chained buffers).
271	fn to_marked_avail(&self, mut flags: DescF) -> DescF {
272		flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
273		flags.set(virtq::DescF::USED, !self.drv_wc.0);
274		flags
275	}
276
277	/// Checks the avail and used flags to see if the descriptor is marked
278	/// as used by the device in accordance with the
279	/// VIRTIO specification v1.2 - 2.8.1 (i.e. they match the device WrapCount)
280	///
281	/// This function is defined on the whole ring rather than only the
282	/// wrap counter to ensure that it is not called on the incorrect
283	/// wrap counter (i.e. driver wrap counter) by accident.
284	fn is_marked_used(&self, flags: DescF) -> bool {
285		if self.dev_wc.0 {
286			flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
287		} else {
288			!flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
289		}
290	}
291}
292
293struct ReadCtrl<'a> {
294	/// Poll index of the ring at init of ReadCtrl
295	position: u16,
296	modulo: u16,
297
298	desc_ring: &'a mut DescriptorRing,
299}
300
301impl ReadCtrl<'_> {
302	/// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of
303	/// updating the queue and returns the respective TransferToken.
304	fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
305		// Check if descriptor has been marked used.
306		let desc = &self.desc_ring.ring[usize::from(self.position)];
307		if self.desc_ring.is_marked_used(desc.flags) {
308			let buff_id = desc.id.to_ne();
309			let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
310				.take()
311				.expect(
312					"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
313				);
314
315			// Retrieve if any has been written to the queue. If this is the case, we calculate the overall length
316			// This is necessary in order to provide the drivers with the correct access, to usable data.
317			//
318			// According to the standard the device signals solely via the first written descriptor if anything has been written to
319			// the write descriptors of a buffer.
320			// See Virtio specification v1.1. - 2.7.4
321			//                                - 2.7.5
322			//                                - 2.7.6
323			// let mut write_len = if self.desc_ring.ring[self.position].flags & DescrFlags::VIRTQ_DESC_F_WRITE == DescrFlags::VIRTQ_DESC_F_WRITE {
324			//      self.desc_ring.ring[self.position].len
325			//  } else {
326			//      0
327			//  };
328			//
329			// INFO:
330			// Due to the behavior of the currently used devices and the virtio code from the linux kernel, we assume, that device do NOT set this
331			// flag correctly upon writes. Hence we omit it, in order to receive data.
332
333			// We need to read the written length before advancing the position.
334			let write_len = desc.len.to_ne();
335
336			for _ in 0..tkn.num_consuming_descr() {
337				self.incrmt();
338			}
339			self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
340
341			Some((tkn, write_len))
342		} else {
343			None
344		}
345	}
346
347	fn incrmt(&mut self) {
348		if self.desc_ring.poll_index + 1 == self.modulo {
349			self.desc_ring.dev_wc.wrap();
350		}
351
352		// Increment capacity as we have one more free now!
353		assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
354		self.desc_ring.capacity += 1;
355
356		self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
357		self.position = self.desc_ring.poll_index;
358	}
359}
360
361/// Convenient struct that allows to conveniently write descriptors into the queue.
362/// The struct takes care of updating the state of the queue correctly and to write
363/// the correct flags.
364struct WriteCtrl<'a> {
365	/// Where did the write of the buffer start in the descriptor ring
366	/// This is important, as we must make this descriptor available
367	/// lastly.
368	start: u16,
369	/// Where to write next. This should always be equal to the Rings
370	/// write_next field.
371	position: u16,
372	modulo: u16,
373	/// The [pvirtq::Desc::flags] value for the first descriptor, the write of which is deferred.
374	first_flags: DescF,
375	/// Buff ID of this write
376	buff_id: MemDescrId,
377
378	desc_ring: &'a mut DescriptorRing,
379}
380
381impl WriteCtrl<'_> {
382	/// **This function MUST only be used within the WriteCtrl.write_desc() function!**
383	///
384	/// Incrementing index by one. The index wrappes around to zero when
385	/// reaching (modulo -1).
386	///
387	/// Also takes care of wrapping the WrapCount of the associated
388	/// DescriptorRing.
389	fn incrmt(&mut self) {
390		// Firstly check if we are at all allowed to write a descriptor
391		assert!(self.desc_ring.capacity != 0);
392		self.desc_ring.capacity -= 1;
393		// check if increment wrapped around end of ring
394		// then also wrap the wrap counter.
395		if self.position + 1 == self.modulo {
396			self.desc_ring.drv_wc.wrap();
397		}
398		// Also update the write_index
399		self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
400
401		self.position = (self.position + 1) % self.modulo;
402	}
403
404	/// Completes the descriptor flags and id, and writes into the queue at the correct position.
405	fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
406		incomplete_desc.id = self.buff_id.0.into();
407		if self.start == self.position {
408			// We save what the flags value for the first descriptor will be to be able
409			// to write it later when all the other descriptors are written (so that
410			// the device does not see an incomplete chain).
411			self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
412		} else {
413			// Set avail and used according to the current WrapCount.
414			incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
415		}
416		self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
417		self.incrmt();
418	}
419
420	fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
421		// We fail if one wants to make a buffer available without inserting one element!
422		assert!(self.start != self.position);
423		self.desc_ring
424			.make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
425	}
426}
427
428/// A newtype in order to implement the correct functionality upon
429/// the `EventSuppr` structure for driver notifications settings.
430/// The Driver Event Suppression structure is read-only by the device
431/// and controls the used buffer notifications sent by the device to the driver.
432struct DrvNotif {
433	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
434	f_notif_idx: bool,
435	/// Actual structure to read from, if device wants notifs
436	raw: &'static mut pvirtq::EventSuppress,
437}
438
439/// A newtype in order to implement the correct functionality upon
440/// the `EventSuppr` structure for device notifications settings.
441/// The Device Event Suppression structure is read-only by the driver
442/// and controls the available buffer notifica- tions sent by the driver to the device.
443struct DevNotif {
444	/// Indicates if VIRTIO_F_RING_EVENT_IDX has been negotiated
445	f_notif_idx: bool,
446	/// Actual structure to read from, if device wants notifs
447	raw: &'static mut pvirtq::EventSuppress,
448}
449
450impl DrvNotif {
451	/// Enables notifications by unsetting the LSB.
452	/// See Virito specification v1.1. - 2.7.10
453	fn enable_notif(&mut self) {
454		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
455	}
456
457	/// Disables notifications by setting the LSB.
458	/// See Virtio specification v1.1. - 2.7.10
459	fn disable_notif(&mut self) {
460		self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
461	}
462
463	/// Enables a notification by the device for a specific descriptor.
464	fn enable_specific(&mut self, idx: RingIdx) {
465		// Check if VIRTIO_F_RING_EVENT_IDX has been negotiated
466		if self.f_notif_idx {
467			self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
468			self.raw.desc = EventSuppressDesc::new()
469				.with_desc_event_off(idx.off)
470				.with_desc_event_wrap(idx.wrap);
471		}
472	}
473}
474
475impl DevNotif {
476	/// Enables the notificication capability for a specific buffer.
477	pub fn enable_notif_specific(&mut self) {
478		self.f_notif_idx = true;
479	}
480
481	/// Reads notification bit (i.e. LSB) and returns value.
482	/// If notifications are enabled returns true, else false.
483	fn is_notif(&self) -> bool {
484		self.raw.flags.desc_event_flags() == RingEventFlags::Enable
485	}
486
487	fn notif_specific(&self) -> Option<RingIdx> {
488		if !self.f_notif_idx {
489			return None;
490		}
491
492		if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
493			return None;
494		}
495
496		let off = self.raw.desc.desc_event_off();
497		let wrap = self.raw.desc.desc_event_wrap();
498
499		Some(RingIdx { off, wrap })
500	}
501}
502
503/// Packed virtqueue which provides the functionilaty as described in the
504/// virtio specification v1.1. - 2.7
505pub struct PackedVq {
506	/// Ring which allows easy access to the raw ring structure of the
507	/// specfification
508	descr_ring: DescriptorRing,
509	/// Allows to tell the device if notifications are wanted
510	drv_event: DrvNotif,
511	/// Allows to check, if the device wants a notification
512	dev_event: DevNotif,
513	/// Actually notify device about avail buffers
514	notif_ctrl: NotifCtrl,
515	/// The size of the queue, equals the number of descriptors which can
516	/// be used
517	size: VqSize,
518	/// The virtqueues index. This identifies the virtqueue to the
519	/// device and is unique on a per device basis.
520	index: VqIndex,
521	last_next: Cell<RingIdx>,
522}
523
524// Public interface of PackedVq
525// This interface is also public in order to allow people to use the PackedVq directly!
526impl Virtq for PackedVq {
527	fn enable_notifs(&mut self) {
528		self.drv_event.enable_notif();
529	}
530
531	fn disable_notifs(&mut self) {
532		self.drv_event.disable_notif();
533	}
534
535	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
536		self.descr_ring.try_recv()
537	}
538
539	fn dispatch_batch(
540		&mut self,
541		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
542		notif: bool,
543	) -> Result<(), VirtqError> {
544		// Zero transfers are not allowed
545		assert!(!buffer_tkns.is_empty());
546
547		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
548			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
549		});
550
551		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
552
553		if notif {
554			self.drv_event.enable_specific(next_idx);
555		}
556
557		let range = self.last_next.get()..next_idx;
558		let notif_specific = self
559			.dev_event
560			.notif_specific()
561			.is_some_and(|idx| range.wrapping_contains(&idx));
562
563		if self.dev_event.is_notif() || notif_specific {
564			let notification_data = NotificationData::new()
565				.with_vqn(self.index.0)
566				.with_next_off(next_idx.off)
567				.with_next_wrap(next_idx.wrap);
568			self.notif_ctrl.notify_dev(notification_data);
569			self.last_next.set(next_idx);
570		}
571		Ok(())
572	}
573
574	fn dispatch_batch_await(
575		&mut self,
576		buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
577		notif: bool,
578	) -> Result<(), VirtqError> {
579		// Zero transfers are not allowed
580		assert!(!buffer_tkns.is_empty());
581
582		let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
583			Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
584		});
585
586		let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
587
588		if notif {
589			self.drv_event.enable_specific(next_idx);
590		}
591
592		let range = self.last_next.get()..next_idx;
593		let notif_specific = self
594			.dev_event
595			.notif_specific()
596			.is_some_and(|idx| range.wrapping_contains(&idx));
597
598		if self.dev_event.is_notif() | notif_specific {
599			let notification_data = NotificationData::new()
600				.with_vqn(self.index.0)
601				.with_next_off(next_idx.off)
602				.with_next_wrap(next_idx.wrap);
603			self.notif_ctrl.notify_dev(notification_data);
604			self.last_next.set(next_idx);
605		}
606		Ok(())
607	}
608
609	fn dispatch(
610		&mut self,
611		buffer_tkn: AvailBufferToken,
612		notif: bool,
613		buffer_type: BufferType,
614	) -> Result<(), VirtqError> {
615		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
616		let next_idx = self.descr_ring.push(transfer_tkn)?;
617
618		if notif {
619			self.drv_event.enable_specific(next_idx);
620		}
621
622		let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
623
624		if self.dev_event.is_notif() || notif_specific {
625			let notification_data = NotificationData::new()
626				.with_vqn(self.index.0)
627				.with_next_off(next_idx.off)
628				.with_next_wrap(next_idx.wrap);
629			self.notif_ctrl.notify_dev(notification_data);
630			self.last_next.set(next_idx);
631		}
632		Ok(())
633	}
634
635	fn index(&self) -> VqIndex {
636		self.index
637	}
638
639	fn size(&self) -> VqSize {
640		self.size
641	}
642
643	fn has_used_buffers(&self) -> bool {
644		let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
645		self.descr_ring.is_marked_used(desc.flags)
646	}
647}
648
649impl VirtqPrivate for PackedVq {
650	type Descriptor = pvirtq::Desc;
651
652	fn create_indirect_ctrl(
653		buffer_tkn: &AvailBufferToken,
654	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
655		Ok(Self::descriptor_iter(buffer_tkn)?
656			.collect::<Vec<_>>()
657			.into_boxed_slice())
658	}
659}
660
661impl PackedVq {
662	pub(crate) fn new(
663		com_cfg: &mut ComCfg,
664		notif_cfg: &NotifCfg,
665		size: VqSize,
666		index: VqIndex,
667		features: virtio::F,
668	) -> Result<Self, VirtqError> {
669		// Currently we do not have support for in order use.
670		// This steems from the fact, that the packedVq ReadCtrl currently is not
671		// able to derive other finished transfer from a used-buffer notification.
672		// In order to allow this, the queue MUST track the sequence in which
673		// TransferTokens are inserted into the queue. Furthermore the Queue should
674		// carry a feature u64 in order to check which features are used currently
675		// and adjust its ReadCtrl accordingly.
676		if features.contains(virtio::F::IN_ORDER) {
677			info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
678			return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
679		}
680
681		// Get a handler to the queues configuration area.
682		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
683			return Err(VirtqError::QueueNotExisting(index.into()));
684		};
685
686		// Must catch zero size as it is not allowed for packed queues.
687		// Must catch size larger 0x8000 (2^15) as it is not allowed for packed queues.
688		//
689		// See Virtio specification v1.1. - 4.1.4.3.2
690		let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
691			return Err(VirtqError::QueueSizeNotAllowed(size.0));
692		} else {
693			vq_handler.set_vq_size(size.0)
694		};
695
696		let descr_ring = DescriptorRing::new(vq_size);
697		// Allocate heap memory via a vec, leak and cast
698		let _mem_len =
699			core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
700
701		let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
702		let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
703		// TODO: make this safe using zerocopy
704		let drv_event = unsafe { drv_event.assume_init() };
705		let dev_event = unsafe { dev_event.assume_init() };
706		let drv_event = Box::leak(drv_event);
707		let dev_event = Box::leak(dev_event);
708
709		// Provide memory areas of the queues data structures to the device
710		vq_handler.set_ring_addr(PhysAddr::from(descr_ring.raw_addr()));
711		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
712		vq_handler.set_drv_ctrl_addr(PhysAddr::from(ptr::from_mut(drv_event).expose_provenance()));
713		vq_handler.set_dev_ctrl_addr(PhysAddr::from(ptr::from_mut(dev_event).expose_provenance()));
714
715		let mut drv_event = DrvNotif {
716			f_notif_idx: false,
717			raw: drv_event,
718		};
719
720		let dev_event = DevNotif {
721			f_notif_idx: false,
722			raw: dev_event,
723		};
724
725		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
726
727		if features.contains(virtio::F::NOTIFICATION_DATA) {
728			notif_ctrl.enable_notif_data();
729		}
730
731		if features.contains(virtio::F::EVENT_IDX) {
732			drv_event.f_notif_idx = true;
733		}
734
735		vq_handler.enable_queue();
736
737		info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
738
739		Ok(PackedVq {
740			descr_ring,
741			drv_event,
742			dev_event,
743			notif_ctrl,
744			size: VqSize::from(vq_size),
745			index,
746			last_next: Cell::default(),
747		})
748	}
749}