hermit/drivers/virtio/virtqueue/
split.rs

1//! This module contains Virtio's split virtqueue.
2//! See Virito specification v1.1. - 2.6
3
4use alloc::boxed::Box;
5use alloc::vec::Vec;
6use core::cell::UnsafeCell;
7use core::mem::{self, MaybeUninit};
8use core::ptr;
9
10use memory_addresses::PhysAddr;
11#[cfg(not(feature = "pci"))]
12use virtio::mmio::NotificationData;
13#[cfg(feature = "pci")]
14use virtio::pci::NotificationData;
15use virtio::{le16, virtq};
16
17#[cfg(not(feature = "pci"))]
18use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
19#[cfg(feature = "pci")]
20use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
21use super::error::VirtqError;
22use super::{
23	AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
24	VqIndex, VqSize,
25};
26use crate::arch::memory_barrier;
27use crate::mm::device_alloc::DeviceAlloc;
28
29struct DescrRing {
30	read_idx: u16,
31	token_ring: Box<[Option<Box<TransferToken<virtq::Desc>>>]>,
32	mem_pool: MemPool,
33
34	/// Descriptor Tables
35	///
36	/// # Safety
37	///
38	/// These tables may only be accessed via volatile operations.
39	/// See the corresponding method for a safe wrapper.
40	descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
41	avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
42	used_ring_cell: Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
43}
44
45impl DescrRing {
46	fn descr_table_mut(&mut self) -> &mut [MaybeUninit<virtq::Desc>] {
47		unsafe { &mut *self.descr_table_cell.get() }
48	}
49	fn avail_ring(&self) -> &virtq::Avail {
50		unsafe { &*self.avail_ring_cell.get() }
51	}
52	fn avail_ring_mut(&mut self) -> &mut virtq::Avail {
53		unsafe { &mut *self.avail_ring_cell.get() }
54	}
55	fn used_ring(&self) -> &virtq::Used {
56		unsafe { &*self.used_ring_cell.get() }
57	}
58
59	fn push(&mut self, tkn: TransferToken<virtq::Desc>) -> Result<u16, VirtqError> {
60		let mut index;
61		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
62			let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());
63
64			index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
65			self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
66		} else {
67			let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();
68
69			// We need to handle the last descriptor (the first for the reversed iterator) specially to not set the next flag.
70			{
71				// If the [AvailBufferToken] is empty, we panic
72				let descriptor = rev_all_desc_iter.next().unwrap();
73
74				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
75				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
76			}
77			for mut descriptor in rev_all_desc_iter {
78				// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
79				descriptor.next = le16::from(index);
80
81				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
82				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
83			}
84			// At this point, `index` is the index of the last element of the reversed iterator,
85			// thus the head of the descriptor chain.
86		}
87
88		self.token_ring[usize::from(index)] = Some(Box::new(tkn));
89
90		let len = self.token_ring.len();
91		let idx = self.avail_ring_mut().idx.to_ne();
92		self.avail_ring_mut().ring_mut(true)[idx as usize % len] = index.into();
93
94		memory_barrier();
95		let next_idx = idx.wrapping_add(1);
96		self.avail_ring_mut().idx = next_idx.into();
97
98		Ok(next_idx)
99	}
100
101	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
102		if self.read_idx == self.used_ring().idx.to_ne() {
103			return Err(VirtqError::NoNewUsed);
104		}
105		let cur_ring_index = self.read_idx as usize % self.token_ring.len();
106		let used_elem = self.used_ring().ring()[cur_ring_index];
107
108		let tkn = self.token_ring[used_elem.id.to_ne() as usize]
109			.take()
110			.expect(
111				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
112			);
113
114		// We return the indices of the now freed ring slots back to `mem_pool.`
115		let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
116		loop {
117			self.mem_pool.ret_id(super::MemDescrId(id_ret_idx));
118			let cur_chain_elem =
119				unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
120			if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
121				id_ret_idx = cur_chain_elem.next.to_ne();
122			} else {
123				break;
124			}
125		}
126
127		memory_barrier();
128		self.read_idx = self.read_idx.wrapping_add(1);
129		Ok(UsedBufferToken::from_avail_buffer_token(
130			tkn.buff_tkn,
131			used_elem.len.to_ne(),
132		))
133	}
134
135	fn drv_enable_notif(&mut self) {
136		self.avail_ring_mut()
137			.flags
138			.remove(virtq::AvailF::NO_INTERRUPT);
139	}
140
141	fn drv_disable_notif(&mut self) {
142		self.avail_ring_mut()
143			.flags
144			.insert(virtq::AvailF::NO_INTERRUPT);
145	}
146
147	fn dev_is_notif(&self) -> bool {
148		!self.used_ring().flags.contains(virtq::UsedF::NO_NOTIFY)
149	}
150}
151
152/// Virtio's split virtqueue structure
153pub struct SplitVq {
154	ring: DescrRing,
155	size: VqSize,
156	index: VqIndex,
157
158	notif_ctrl: NotifCtrl,
159}
160
161impl Virtq for SplitVq {
162	fn enable_notifs(&mut self) {
163		self.ring.drv_enable_notif();
164	}
165
166	fn disable_notifs(&mut self) {
167		self.ring.drv_disable_notif();
168	}
169
170	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
171		self.ring.try_recv()
172	}
173
174	fn dispatch_batch(
175		&mut self,
176		_tkns: Vec<(AvailBufferToken, BufferType)>,
177		_notif: bool,
178	) -> Result<(), VirtqError> {
179		unimplemented!();
180	}
181
182	fn dispatch_batch_await(
183		&mut self,
184		_tkns: Vec<(AvailBufferToken, BufferType)>,
185		_notif: bool,
186	) -> Result<(), VirtqError> {
187		unimplemented!()
188	}
189
190	fn dispatch(
191		&mut self,
192		buffer_tkn: AvailBufferToken,
193		notif: bool,
194		buffer_type: BufferType,
195	) -> Result<(), VirtqError> {
196		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
197		let next_idx = self.ring.push(transfer_tkn)?;
198
199		if notif {
200			// TODO: Check whether the splitvirtquue has notifications for specific descriptors
201			// I believe it does not.
202			unimplemented!();
203		}
204
205		if self.ring.dev_is_notif() {
206			let notification_data = NotificationData::new()
207				.with_vqn(self.index.0)
208				.with_next_idx(next_idx);
209			self.notif_ctrl.notify_dev(notification_data);
210		}
211		Ok(())
212	}
213
214	fn index(&self) -> VqIndex {
215		self.index
216	}
217
218	fn size(&self) -> VqSize {
219		self.size
220	}
221
222	fn has_used_buffers(&self) -> bool {
223		self.ring.read_idx != self.ring.used_ring().idx.to_ne()
224	}
225}
226
227impl VirtqPrivate for SplitVq {
228	type Descriptor = virtq::Desc;
229	fn create_indirect_ctrl(
230		buffer_tkn: &AvailBufferToken,
231	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
232		Ok(Self::descriptor_iter(buffer_tkn)?
233			.zip(1..)
234			.map(|(descriptor, next_id)| Self::Descriptor {
235				next: next_id.into(),
236				..descriptor
237			})
238			.collect::<Vec<_>>()
239			.into_boxed_slice())
240	}
241}
242
243impl SplitVq {
244	pub(crate) fn new(
245		com_cfg: &mut ComCfg,
246		notif_cfg: &NotifCfg,
247		size: VqSize,
248		index: VqIndex,
249		features: virtio::F,
250	) -> Result<Self, VirtqError> {
251		// Get a handler to the queues configuration area.
252		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
253			return Err(VirtqError::QueueNotExisting(index.into()));
254		};
255
256		let size = vq_handler.set_vq_size(size.0);
257
258		let descr_table_cell = unsafe {
259			core::mem::transmute::<
260				Box<[MaybeUninit<virtq::Desc>], DeviceAlloc>,
261				Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
262			>(Box::new_uninit_slice_in(size.into(), DeviceAlloc))
263		};
264
265		let avail_ring_cell = {
266			let avail = virtq::Avail::try_new_in(size, true, DeviceAlloc)
267				.map_err(|_| VirtqError::AllocationError)?;
268
269			unsafe {
270				mem::transmute::<
271					Box<virtq::Avail, DeviceAlloc>,
272					Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
273				>(avail)
274			}
275		};
276
277		let used_ring_cell = {
278			let used = virtq::Used::try_new_in(size, true, DeviceAlloc)
279				.map_err(|_| VirtqError::AllocationError)?;
280
281			unsafe {
282				mem::transmute::<
283					Box<virtq::Used, DeviceAlloc>,
284					Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
285				>(used)
286			}
287		};
288
289		// Provide memory areas of the queues data structures to the device
290		vq_handler.set_ring_addr(PhysAddr::from(
291			ptr::from_ref(descr_table_cell.as_ref()).expose_provenance(),
292		));
293		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
294		vq_handler.set_drv_ctrl_addr(PhysAddr::from(
295			ptr::from_ref(avail_ring_cell.as_ref()).expose_provenance(),
296		));
297		vq_handler.set_dev_ctrl_addr(PhysAddr::from(
298			ptr::from_ref(used_ring_cell.as_ref()).expose_provenance(),
299		));
300
301		let descr_ring = DescrRing {
302			read_idx: 0,
303			token_ring: core::iter::repeat_with(|| None)
304				.take(size.into())
305				.collect::<Vec<_>>()
306				.into_boxed_slice(),
307			mem_pool: MemPool::new(size),
308
309			descr_table_cell,
310			avail_ring_cell,
311			used_ring_cell,
312		};
313
314		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
315
316		if features.contains(virtio::F::NOTIFICATION_DATA) {
317			notif_ctrl.enable_notif_data();
318		}
319
320		vq_handler.enable_queue();
321
322		info!("Created SplitVq: idx={}, size={}", index.0, size);
323
324		Ok(SplitVq {
325			ring: descr_ring,
326			notif_ctrl,
327			size: VqSize(size),
328			index,
329		})
330	}
331}