hermit/drivers/virtio/virtqueue/
split.rs

1//! `virtq` infrastructure.
2//!
3//! The main type of this module is [`SplitVq`].
4//!
5//! For details, see [Split Virtqueues].
6//! For details on the Rust definitions, see [`virtio::virtq`].
7//!
8//! [Split Virtqueues]: https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-350007
9
10use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::UnsafeCell;
13use core::mem::{self, MaybeUninit};
14
15#[cfg(not(feature = "pci"))]
16use virtio::mmio::NotificationData;
17#[cfg(feature = "pci")]
18use virtio::pci::NotificationData;
19use virtio::{le16, virtq};
20
21#[cfg(not(feature = "pci"))]
22use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
23#[cfg(feature = "pci")]
24use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
25use super::error::VirtqError;
26use super::index_alloc::IndexAlloc;
27use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
28use crate::arch::memory_barrier;
29use crate::mm::device_alloc::DeviceAlloc;
30
31struct DescrRing {
32	read_idx: u16,
33	token_ring: Box<[Option<TransferToken<virtq::Desc>>]>,
34	indexes: IndexAlloc,
35
36	descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
37	avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
38	used_ring_cell: Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
39}
40
41impl DescrRing {
42	fn descr_table_mut(&mut self) -> &mut [MaybeUninit<virtq::Desc>] {
43		unsafe { &mut *self.descr_table_cell.get() }
44	}
45	fn avail_ring(&self) -> &virtq::Avail {
46		unsafe { &*self.avail_ring_cell.get() }
47	}
48	fn avail_ring_mut(&mut self) -> &mut virtq::Avail {
49		unsafe { &mut *self.avail_ring_cell.get() }
50	}
51	fn used_ring(&self) -> &virtq::Used {
52		unsafe { &*self.used_ring_cell.get() }
53	}
54
55	fn push(&mut self, tkn: TransferToken<virtq::Desc>) -> Result<u16, VirtqError> {
56		let mut index;
57		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
58			let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());
59
60			index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
61			self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
62		} else {
63			let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();
64
65			// We need to handle the last descriptor (the first for the reversed iterator) specially to not set the next flag.
66			{
67				// If the [AvailBufferToken] is empty, we panic
68				let descriptor = rev_all_desc_iter.next().unwrap();
69
70				index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
71				self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
72			}
73			for mut descriptor in rev_all_desc_iter {
74				// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
75				descriptor.next = le16::from_ne(index.try_into().unwrap());
76
77				index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
78				self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
79			}
80			// At this point, `index` is the index of the last element of the reversed iterator,
81			// thus the head of the descriptor chain.
82		}
83
84		self.token_ring[index] = Some(tkn);
85
86		let len = self.token_ring.len();
87		let idx = self.avail_ring_mut().idx.to_ne();
88		self.avail_ring_mut().ring_mut(true)[idx as usize % len] =
89			le16::from_ne(index.try_into().unwrap());
90
91		memory_barrier();
92		let next_idx = idx.wrapping_add(1);
93		self.avail_ring_mut().idx = next_idx.into();
94
95		Ok(next_idx)
96	}
97
98	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
99		if self.read_idx == self.used_ring().idx.to_ne() {
100			return Err(VirtqError::NoNewUsed);
101		}
102		let cur_ring_index = self.read_idx as usize % self.token_ring.len();
103		let used_elem = self.used_ring().ring()[cur_ring_index];
104
105		let tkn = self.token_ring[used_elem.id.to_ne() as usize]
106			.take()
107			.expect(
108				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
109			);
110
111		// We return the indices of the now freed ring slots back to `mem_pool.`
112		let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
113		loop {
114			unsafe {
115				self.indexes.deallocate(id_ret_idx.into());
116			}
117			let cur_chain_elem =
118				unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
119			if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
120				id_ret_idx = cur_chain_elem.next.to_ne();
121			} else {
122				break;
123			}
124		}
125
126		memory_barrier();
127		self.read_idx = self.read_idx.wrapping_add(1);
128		Ok(UsedBufferToken::from_avail_buffer_token(
129			tkn.buff_tkn,
130			used_elem.len.to_ne(),
131		))
132	}
133
134	fn drv_enable_notif(&mut self) {
135		self.avail_ring_mut()
136			.flags
137			.remove(virtq::AvailF::NO_INTERRUPT);
138	}
139
140	fn drv_disable_notif(&mut self) {
141		self.avail_ring_mut()
142			.flags
143			.insert(virtq::AvailF::NO_INTERRUPT);
144	}
145
146	fn dev_is_notif(&self) -> bool {
147		!self.used_ring().flags.contains(virtq::UsedF::NO_NOTIFY)
148	}
149}
150
151/// Virtio's split virtqueue structure
152pub struct SplitVq {
153	ring: DescrRing,
154	size: u16,
155	index: u16,
156
157	notif_ctrl: NotifCtrl,
158}
159
160impl Virtq for SplitVq {
161	fn enable_notifs(&mut self) {
162		self.ring.drv_enable_notif();
163	}
164
165	fn disable_notifs(&mut self) {
166		self.ring.drv_disable_notif();
167	}
168
169	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
170		self.ring.try_recv()
171	}
172
173	fn dispatch_batch(
174		&mut self,
175		_tkns: Vec<(AvailBufferToken, BufferType)>,
176		_notif: bool,
177	) -> Result<(), VirtqError> {
178		unimplemented!();
179	}
180
181	fn dispatch_batch_await(
182		&mut self,
183		_tkns: Vec<(AvailBufferToken, BufferType)>,
184		_notif: bool,
185	) -> Result<(), VirtqError> {
186		unimplemented!()
187	}
188
189	fn dispatch(
190		&mut self,
191		buffer_tkn: AvailBufferToken,
192		notif: bool,
193		buffer_type: BufferType,
194	) -> Result<(), VirtqError> {
195		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
196		let next_idx = self.ring.push(transfer_tkn)?;
197
198		if notif {
199			// TODO: Check whether the splitvirtquue has notifications for specific descriptors
200			// I believe it does not.
201			unimplemented!();
202		}
203
204		if self.ring.dev_is_notif() {
205			let notification_data = NotificationData::new()
206				.with_vqn(self.index)
207				.with_next_idx(next_idx);
208			self.notif_ctrl.notify_dev(notification_data);
209		}
210		Ok(())
211	}
212
213	fn index(&self) -> u16 {
214		self.index
215	}
216
217	fn size(&self) -> u16 {
218		self.size
219	}
220
221	fn has_used_buffers(&self) -> bool {
222		self.ring.read_idx != self.ring.used_ring().idx.to_ne()
223	}
224}
225
226impl VirtqPrivate for SplitVq {
227	type Descriptor = virtq::Desc;
228	fn create_indirect_ctrl(
229		buffer_tkn: &AvailBufferToken,
230	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
231		Ok(Self::descriptor_iter(buffer_tkn)?
232			.zip(1..)
233			.map(|(descriptor, next_id)| Self::Descriptor {
234				next: next_id.into(),
235				..descriptor
236			})
237			.collect::<Vec<_>>()
238			.into_boxed_slice())
239	}
240}
241
242impl SplitVq {
243	pub(crate) fn new(
244		com_cfg: &mut ComCfg,
245		notif_cfg: &NotifCfg,
246		size: u16,
247		index: u16,
248		features: virtio::F,
249	) -> Result<Self, VirtqError> {
250		// Get a handler to the queues configuration area.
251		let Some(mut vq_handler) = com_cfg.select_vq(index) else {
252			return Err(VirtqError::QueueNotExisting(index));
253		};
254
255		let size = vq_handler.set_vq_size(size);
256
257		let mut descr_table_cell = unsafe {
258			core::mem::transmute::<
259				Box<[MaybeUninit<virtq::Desc>], DeviceAlloc>,
260				Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
261			>(Box::new_uninit_slice_in(size.into(), DeviceAlloc))
262		};
263
264		let mut avail_ring_cell = {
265			let avail = virtq::Avail::try_new_in(size, true, DeviceAlloc)
266				.map_err(|_| VirtqError::AllocationError)?;
267
268			unsafe {
269				mem::transmute::<
270					Box<virtq::Avail, DeviceAlloc>,
271					Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
272				>(avail)
273			}
274		};
275
276		let mut used_ring_cell = {
277			let used = virtq::Used::try_new_in(size, true, DeviceAlloc)
278				.map_err(|_| VirtqError::AllocationError)?;
279
280			unsafe {
281				mem::transmute::<
282					Box<virtq::Used, DeviceAlloc>,
283					Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
284				>(used)
285			}
286		};
287
288		// Provide memory areas of the queues data structures to the device
289		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_table_cell.as_mut()));
290		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
291		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(avail_ring_cell.as_mut()));
292		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(used_ring_cell.as_mut()));
293
294		let descr_ring = DescrRing {
295			read_idx: 0,
296			token_ring: core::iter::repeat_with(|| None)
297				.take(size.into())
298				.collect::<Vec<_>>()
299				.into_boxed_slice(),
300			indexes: IndexAlloc::new(size.into()),
301
302			descr_table_cell,
303			avail_ring_cell,
304			used_ring_cell,
305		};
306
307		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
308
309		if features.contains(virtio::F::NOTIFICATION_DATA) {
310			notif_ctrl.enable_notif_data();
311		}
312
313		vq_handler.enable_queue();
314
315		info!("Created SplitVq: idx={index}, size={size}");
316
317		Ok(SplitVq {
318			ring: descr_ring,
319			notif_ctrl,
320			size,
321			index,
322		})
323	}
324}