hermit/drivers/virtio/virtqueue/
split.rs

1//! `virtq` infrastructure.
2//!
3//! The main type of this module is [`SplitVq`].
4//!
5//! For details, see [Split Virtqueues].
6//! For details on the Rust definitions, see [`virtio::virtq`].
7//!
8//! [Split Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-350007
9
10use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::UnsafeCell;
13use core::mem::{self, MaybeUninit};
14
15use mem_barrier::BarrierType;
16#[cfg(not(feature = "pci"))]
17use virtio::mmio::NotificationData;
18#[cfg(feature = "pci")]
19use virtio::pci::NotificationData;
20use virtio::{le16, virtq};
21
22#[cfg(not(feature = "pci"))]
23use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
24#[cfg(feature = "pci")]
25use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
26use super::error::VirtqError;
27use super::index_alloc::IndexAlloc;
28use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
29use crate::mm::device_alloc::DeviceAlloc;
30
31struct DescrRing {
32	read_idx: u16,
33	token_ring: Box<[Option<TransferToken<virtq::Desc>>]>,
34	indexes: IndexAlloc,
35
36	descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
37	avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
38	used_ring_cell: Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
39	order_platform: bool,
40}
41
42impl DescrRing {
43	fn descr_table_mut(&mut self) -> &mut [MaybeUninit<virtq::Desc>] {
44		unsafe { &mut *self.descr_table_cell.get() }
45	}
46	#[expect(dead_code)]
47	fn avail_ring(&self) -> &virtq::Avail {
48		unsafe { &*self.avail_ring_cell.get() }
49	}
50	fn avail_ring_mut(&mut self) -> &mut virtq::Avail {
51		unsafe { &mut *self.avail_ring_cell.get() }
52	}
53	fn used_ring(&self) -> &virtq::Used {
54		unsafe { &*self.used_ring_cell.get() }
55	}
56
57	fn push(&mut self, tkn: TransferToken<virtq::Desc>) -> Result<u16, VirtqError> {
58		let mut index;
59		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
60			let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());
61
62			index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
63			self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
64		} else {
65			let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();
66
67			// We need to handle the last descriptor (the first for the reversed iterator) specially to not set the next flag.
68			{
69				// If the [AvailBufferToken] is empty, we panic
70				let descriptor = rev_all_desc_iter.next().unwrap();
71
72				index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
73				self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
74			}
75			for mut descriptor in rev_all_desc_iter {
76				// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
77				descriptor.next = le16::from_ne(index.try_into().unwrap());
78
79				index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
80				self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
81			}
82			// At this point, `index` is the index of the last element of the reversed iterator,
83			// thus the head of the descriptor chain.
84		}
85
86		self.token_ring[index] = Some(tkn);
87
88		let len = self.token_ring.len();
89		let idx = self.avail_ring_mut().idx.to_ne();
90		self.avail_ring_mut().ring_mut(true)[idx as usize % len] =
91			le16::from_ne(index.try_into().unwrap());
92
93		super::virtio_mem_barrier(BarrierType::Write, self.order_platform);
94		let next_idx = idx.wrapping_add(1);
95		self.avail_ring_mut().idx = next_idx.into();
96
97		Ok(next_idx)
98	}
99
100	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
101		super::virtio_mem_barrier(BarrierType::Read, self.order_platform);
102		if self.read_idx == self.used_ring().idx.to_ne() {
103			return Err(VirtqError::NoNewUsed);
104		}
105		let cur_ring_index = self.read_idx as usize % self.token_ring.len();
106		let used_elem = self.used_ring().ring()[cur_ring_index];
107
108		let tkn = self.token_ring[used_elem.id.to_ne() as usize]
109			.take()
110			.expect(
111				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
112			);
113
114		// We return the indices of the now freed ring slots back to `mem_pool.`
115		let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
116		loop {
117			unsafe {
118				self.indexes.deallocate(id_ret_idx.into());
119			}
120			let cur_chain_elem =
121				unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
122			if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
123				id_ret_idx = cur_chain_elem.next.to_ne();
124			} else {
125				break;
126			}
127		}
128
129		self.read_idx = self.read_idx.wrapping_add(1);
130		Ok(UsedBufferToken::from_avail_buffer_token(
131			tkn.buff_tkn,
132			used_elem.len.to_ne(),
133		))
134	}
135
136	fn drv_enable_notif(&mut self) {
137		self.avail_ring_mut()
138			.flags
139			.remove(virtq::AvailF::NO_INTERRUPT);
140	}
141
142	fn drv_disable_notif(&mut self) {
143		self.avail_ring_mut()
144			.flags
145			.insert(virtq::AvailF::NO_INTERRUPT);
146	}
147
148	fn dev_is_notif(&self) -> bool {
149		!self.used_ring().flags.contains(virtq::UsedF::NO_NOTIFY)
150	}
151}
152
153/// Virtio's split virtqueue structure
154pub struct SplitVq {
155	ring: DescrRing,
156	size: u16,
157	index: u16,
158
159	notif_ctrl: NotifCtrl,
160}
161
162impl Virtq for SplitVq {
163	fn enable_notifs(&mut self) {
164		self.ring.drv_enable_notif();
165	}
166
167	fn disable_notifs(&mut self) {
168		self.ring.drv_disable_notif();
169	}
170
171	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
172		self.ring.try_recv()
173	}
174
175	fn dispatch_batch(
176		&mut self,
177		_tkns: Vec<(AvailBufferToken, BufferType)>,
178		_notif: bool,
179	) -> Result<(), VirtqError> {
180		unimplemented!();
181	}
182
183	fn dispatch_batch_await(
184		&mut self,
185		_tkns: Vec<(AvailBufferToken, BufferType)>,
186		_notif: bool,
187	) -> Result<(), VirtqError> {
188		unimplemented!()
189	}
190
191	fn dispatch(
192		&mut self,
193		buffer_tkn: AvailBufferToken,
194		notif: bool,
195		buffer_type: BufferType,
196	) -> Result<(), VirtqError> {
197		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
198		let next_idx = self.ring.push(transfer_tkn)?;
199
200		if notif {
201			// TODO: Check whether the splitvirtquue has notifications for specific descriptors
202			// I believe it does not.
203			unimplemented!();
204		}
205
206		if self.ring.dev_is_notif() {
207			let notification_data = NotificationData::new()
208				.with_vqn(self.index)
209				.with_next_idx(next_idx);
210			self.notif_ctrl.notify_dev(notification_data);
211		}
212		Ok(())
213	}
214
215	fn index(&self) -> u16 {
216		self.index
217	}
218
219	fn size(&self) -> u16 {
220		self.size
221	}
222
223	fn has_used_buffers(&self) -> bool {
224		self.ring.read_idx != self.ring.used_ring().idx.to_ne()
225	}
226}
227
228impl VirtqPrivate for SplitVq {
229	type Descriptor = virtq::Desc;
230	fn create_indirect_ctrl(
231		buffer_tkn: &AvailBufferToken,
232	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
233		Ok(Self::descriptor_iter(buffer_tkn)?
234			.zip(1..)
235			.map(|(descriptor, next_id)| Self::Descriptor {
236				next: next_id.into(),
237				..descriptor
238			})
239			.collect::<Vec<_>>()
240			.into_boxed_slice())
241	}
242}
243
244impl SplitVq {
245	pub(crate) fn new(
246		com_cfg: &mut ComCfg,
247		notif_cfg: &NotifCfg,
248		max_size: u16,
249		index: u16,
250		features: virtio::F,
251	) -> Result<Self, VirtqError> {
252		// Get a handler to the queues configuration area.
253		let Some(mut vq_handler) = com_cfg.select_vq(index) else {
254			return Err(VirtqError::QueueNotExisting(index));
255		};
256
257		let size = vq_handler.set_vq_size(max_size);
258
259		let mut descr_table_cell = unsafe {
260			core::mem::transmute::<
261				Box<[MaybeUninit<virtq::Desc>], DeviceAlloc>,
262				Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
263			>(Box::new_uninit_slice_in(size.into(), DeviceAlloc))
264		};
265
266		let mut avail_ring_cell = {
267			let avail = virtq::Avail::try_new_in(size, true, DeviceAlloc)
268				.map_err(|_| VirtqError::AllocationError)?;
269
270			unsafe {
271				mem::transmute::<
272					Box<virtq::Avail, DeviceAlloc>,
273					Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
274				>(avail)
275			}
276		};
277
278		let mut used_ring_cell = {
279			let used = virtq::Used::try_new_in(size, true, DeviceAlloc)
280				.map_err(|_| VirtqError::AllocationError)?;
281
282			unsafe {
283				mem::transmute::<
284					Box<virtq::Used, DeviceAlloc>,
285					Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
286				>(used)
287			}
288		};
289
290		// Provide memory areas of the queues data structures to the device
291		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_table_cell.as_mut()));
292		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
293		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(avail_ring_cell.as_mut()));
294		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(used_ring_cell.as_mut()));
295
296		let order_platform = features.contains(virtio::F::ORDER_PLATFORM);
297
298		let descr_ring = DescrRing {
299			read_idx: 0,
300			token_ring: core::iter::repeat_with(|| None)
301				.take(size.into())
302				.collect::<Vec<_>>()
303				.into_boxed_slice(),
304			indexes: IndexAlloc::new(size.into()),
305
306			descr_table_cell,
307			avail_ring_cell,
308			used_ring_cell,
309			order_platform,
310		};
311
312		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
313
314		if features.contains(virtio::F::NOTIFICATION_DATA) {
315			notif_ctrl.enable_notif_data();
316		}
317
318		vq_handler.enable_queue();
319
320		info!("Created SplitVq: idx={index}, size={size}");
321
322		Ok(SplitVq {
323			ring: descr_ring,
324			notif_ctrl,
325			size,
326			index,
327		})
328	}
329}