hermit/drivers/virtio/virtqueue/
split.rs

1//! This module contains Virtio's split virtqueue.
2//! See Virito specification v1.1. - 2.6
3
4use alloc::boxed::Box;
5use alloc::vec::Vec;
6use core::cell::UnsafeCell;
7use core::mem::{self, MaybeUninit};
8use core::ptr;
9
10use memory_addresses::VirtAddr;
11#[cfg(not(feature = "pci"))]
12use virtio::mmio::NotificationData;
13#[cfg(feature = "pci")]
14use virtio::pci::NotificationData;
15use virtio::{le16, virtq};
16
17#[cfg(not(feature = "pci"))]
18use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
19#[cfg(feature = "pci")]
20use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
21use super::error::VirtqError;
22use super::{
23	AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
24	VqIndex, VqSize,
25};
26use crate::arch::memory_barrier;
27use crate::arch::mm::paging;
28use crate::mm::device_alloc::DeviceAlloc;
29
30struct DescrRing {
31	read_idx: u16,
32	token_ring: Box<[Option<Box<TransferToken<virtq::Desc>>>]>,
33	mem_pool: MemPool,
34
35	/// Descriptor Tables
36	///
37	/// # Safety
38	///
39	/// These tables may only be accessed via volatile operations.
40	/// See the corresponding method for a safe wrapper.
41	descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
42	avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
43	used_ring_cell: Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
44}
45
46impl DescrRing {
47	fn descr_table_mut(&mut self) -> &mut [MaybeUninit<virtq::Desc>] {
48		unsafe { &mut *self.descr_table_cell.get() }
49	}
50	fn avail_ring(&self) -> &virtq::Avail {
51		unsafe { &*self.avail_ring_cell.get() }
52	}
53	fn avail_ring_mut(&mut self) -> &mut virtq::Avail {
54		unsafe { &mut *self.avail_ring_cell.get() }
55	}
56	fn used_ring(&self) -> &virtq::Used {
57		unsafe { &*self.used_ring_cell.get() }
58	}
59
60	fn push(&mut self, tkn: TransferToken<virtq::Desc>) -> Result<u16, VirtqError> {
61		let mut index;
62		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
63			let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());
64
65			index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
66			self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
67		} else {
68			let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();
69
70			// We need to handle the last descriptor (the first for the reversed iterator) specially to not set the next flag.
71			{
72				// If the [AvailBufferToken] is empty, we panic
73				let descriptor = rev_all_desc_iter.next().unwrap();
74
75				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
76				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
77			}
78			for mut descriptor in rev_all_desc_iter {
79				// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
80				descriptor.next = le16::from(index);
81
82				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
83				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
84			}
85			// At this point, `index` is the index of the last element of the reversed iterator,
86			// thus the head of the descriptor chain.
87		}
88
89		self.token_ring[usize::from(index)] = Some(Box::new(tkn));
90
91		let len = self.token_ring.len();
92		let idx = self.avail_ring_mut().idx.to_ne();
93		self.avail_ring_mut().ring_mut(true)[idx as usize % len] = index.into();
94
95		memory_barrier();
96		let next_idx = idx.wrapping_add(1);
97		self.avail_ring_mut().idx = next_idx.into();
98
99		Ok(next_idx)
100	}
101
102	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
103		if self.read_idx == self.used_ring().idx.to_ne() {
104			return Err(VirtqError::NoNewUsed);
105		}
106		let cur_ring_index = self.read_idx as usize % self.token_ring.len();
107		let used_elem = self.used_ring().ring()[cur_ring_index];
108
109		let tkn = self.token_ring[used_elem.id.to_ne() as usize]
110			.take()
111			.expect(
112				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
113			);
114
115		// We return the indices of the now freed ring slots back to `mem_pool.`
116		let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
117		loop {
118			self.mem_pool.ret_id(super::MemDescrId(id_ret_idx));
119			let cur_chain_elem =
120				unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
121			if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
122				id_ret_idx = cur_chain_elem.next.to_ne();
123			} else {
124				break;
125			}
126		}
127
128		memory_barrier();
129		self.read_idx = self.read_idx.wrapping_add(1);
130		Ok(UsedBufferToken::from_avail_buffer_token(
131			tkn.buff_tkn,
132			used_elem.len.to_ne(),
133		))
134	}
135
136	fn drv_enable_notif(&mut self) {
137		self.avail_ring_mut()
138			.flags
139			.remove(virtq::AvailF::NO_INTERRUPT);
140	}
141
142	fn drv_disable_notif(&mut self) {
143		self.avail_ring_mut()
144			.flags
145			.insert(virtq::AvailF::NO_INTERRUPT);
146	}
147
148	fn dev_is_notif(&self) -> bool {
149		!self.used_ring().flags.contains(virtq::UsedF::NO_NOTIFY)
150	}
151}
152
153/// Virtio's split virtqueue structure
154pub struct SplitVq {
155	ring: DescrRing,
156	size: VqSize,
157	index: VqIndex,
158
159	notif_ctrl: NotifCtrl,
160}
161
162impl Virtq for SplitVq {
163	fn enable_notifs(&mut self) {
164		self.ring.drv_enable_notif();
165	}
166
167	fn disable_notifs(&mut self) {
168		self.ring.drv_disable_notif();
169	}
170
171	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
172		self.ring.try_recv()
173	}
174
175	fn dispatch_batch(
176		&mut self,
177		_tkns: Vec<(AvailBufferToken, BufferType)>,
178		_notif: bool,
179	) -> Result<(), VirtqError> {
180		unimplemented!();
181	}
182
183	fn dispatch_batch_await(
184		&mut self,
185		_tkns: Vec<(AvailBufferToken, BufferType)>,
186		_notif: bool,
187	) -> Result<(), VirtqError> {
188		unimplemented!()
189	}
190
191	fn dispatch(
192		&mut self,
193		buffer_tkn: AvailBufferToken,
194		notif: bool,
195		buffer_type: BufferType,
196	) -> Result<(), VirtqError> {
197		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
198		let next_idx = self.ring.push(transfer_tkn)?;
199
200		if notif {
201			// TODO: Check whether the splitvirtquue has notifications for specific descriptors
202			// I believe it does not.
203			unimplemented!();
204		}
205
206		if self.ring.dev_is_notif() {
207			let notification_data = NotificationData::new()
208				.with_vqn(self.index.0)
209				.with_next_idx(next_idx);
210			self.notif_ctrl.notify_dev(notification_data);
211		}
212		Ok(())
213	}
214
215	fn index(&self) -> VqIndex {
216		self.index
217	}
218
219	fn new(
220		com_cfg: &mut ComCfg,
221		notif_cfg: &NotifCfg,
222		size: VqSize,
223		index: VqIndex,
224		features: virtio::F,
225	) -> Result<Self, VirtqError> {
226		// Get a handler to the queues configuration area.
227		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
228			return Err(VirtqError::QueueNotExisting(index.into()));
229		};
230
231		let size = vq_handler.set_vq_size(size.0);
232
233		let descr_table_cell = unsafe {
234			core::mem::transmute::<
235				Box<[MaybeUninit<virtq::Desc>], DeviceAlloc>,
236				Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
237			>(Box::new_uninit_slice_in(size.into(), DeviceAlloc))
238		};
239
240		let avail_ring_cell = {
241			let avail = virtq::Avail::try_new_in(size, true, DeviceAlloc)
242				.map_err(|_| VirtqError::AllocationError)?;
243
244			unsafe {
245				mem::transmute::<
246					Box<virtq::Avail, DeviceAlloc>,
247					Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
248				>(avail)
249			}
250		};
251
252		let used_ring_cell = {
253			let used = virtq::Used::try_new_in(size, true, DeviceAlloc)
254				.map_err(|_| VirtqError::AllocationError)?;
255
256			unsafe {
257				mem::transmute::<
258					Box<virtq::Used, DeviceAlloc>,
259					Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
260				>(used)
261			}
262		};
263
264		// Provide memory areas of the queues data structures to the device
265		vq_handler.set_ring_addr(paging::virt_to_phys(VirtAddr::from(
266			ptr::from_ref(descr_table_cell.as_ref()).expose_provenance(),
267		)));
268		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
269		vq_handler.set_drv_ctrl_addr(paging::virt_to_phys(VirtAddr::from(
270			ptr::from_ref(avail_ring_cell.as_ref()).expose_provenance(),
271		)));
272		vq_handler.set_dev_ctrl_addr(paging::virt_to_phys(VirtAddr::from(
273			ptr::from_ref(used_ring_cell.as_ref()).expose_provenance(),
274		)));
275
276		let descr_ring = DescrRing {
277			read_idx: 0,
278			token_ring: core::iter::repeat_with(|| None)
279				.take(size.into())
280				.collect::<Vec<_>>()
281				.into_boxed_slice(),
282			mem_pool: MemPool::new(size),
283
284			descr_table_cell,
285			avail_ring_cell,
286			used_ring_cell,
287		};
288
289		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
290
291		if features.contains(virtio::F::NOTIFICATION_DATA) {
292			notif_ctrl.enable_notif_data();
293		}
294
295		vq_handler.enable_queue();
296
297		info!("Created SplitVq: idx={}, size={}", index.0, size);
298
299		Ok(SplitVq {
300			ring: descr_ring,
301			notif_ctrl,
302			size: VqSize(size),
303			index,
304		})
305	}
306
307	fn size(&self) -> VqSize {
308		self.size
309	}
310
311	fn has_used_buffers(&self) -> bool {
312		self.ring.read_idx != self.ring.used_ring().idx.to_ne()
313	}
314}
315
316impl VirtqPrivate for SplitVq {
317	type Descriptor = virtq::Desc;
318	fn create_indirect_ctrl(
319		buffer_tkn: &AvailBufferToken,
320	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
321		Ok(Self::descriptor_iter(buffer_tkn)?
322			.zip(1..)
323			.map(|(descriptor, next_id)| Self::Descriptor {
324				next: next_id.into(),
325				..descriptor
326			})
327			.collect::<Vec<_>>()
328			.into_boxed_slice())
329	}
330}