hermit/drivers/virtio/virtqueue/
split.rs

1//! This module contains Virtio's split virtqueue.
2//! See Virito specification v1.1. - 2.6
3
4use alloc::boxed::Box;
5use alloc::vec::Vec;
6use core::cell::UnsafeCell;
7use core::mem::{self, MaybeUninit};
8
9#[cfg(not(feature = "pci"))]
10use virtio::mmio::NotificationData;
11#[cfg(feature = "pci")]
12use virtio::pci::NotificationData;
13use virtio::{le16, virtq};
14
15#[cfg(not(feature = "pci"))]
16use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
17#[cfg(feature = "pci")]
18use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
19use super::error::VirtqError;
20use super::{
21	AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
22	VqIndex, VqSize,
23};
24use crate::arch::memory_barrier;
25use crate::mm::device_alloc::DeviceAlloc;
26
27struct DescrRing {
28	read_idx: u16,
29	token_ring: Box<[Option<Box<TransferToken<virtq::Desc>>>]>,
30	mem_pool: MemPool,
31
32	/// Descriptor Tables
33	///
34	/// # Safety
35	///
36	/// These tables may only be accessed via volatile operations.
37	/// See the corresponding method for a safe wrapper.
38	descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
39	avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
40	used_ring_cell: Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
41}
42
43impl DescrRing {
44	fn descr_table_mut(&mut self) -> &mut [MaybeUninit<virtq::Desc>] {
45		unsafe { &mut *self.descr_table_cell.get() }
46	}
47	fn avail_ring(&self) -> &virtq::Avail {
48		unsafe { &*self.avail_ring_cell.get() }
49	}
50	fn avail_ring_mut(&mut self) -> &mut virtq::Avail {
51		unsafe { &mut *self.avail_ring_cell.get() }
52	}
53	fn used_ring(&self) -> &virtq::Used {
54		unsafe { &*self.used_ring_cell.get() }
55	}
56
57	fn push(&mut self, tkn: TransferToken<virtq::Desc>) -> Result<u16, VirtqError> {
58		let mut index;
59		if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
60			let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());
61
62			index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
63			self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
64		} else {
65			let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();
66
67			// We need to handle the last descriptor (the first for the reversed iterator) specially to not set the next flag.
68			{
69				// If the [AvailBufferToken] is empty, we panic
70				let descriptor = rev_all_desc_iter.next().unwrap();
71
72				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
73				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
74			}
75			for mut descriptor in rev_all_desc_iter {
76				// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
77				descriptor.next = le16::from(index);
78
79				index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?.0;
80				self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
81			}
82			// At this point, `index` is the index of the last element of the reversed iterator,
83			// thus the head of the descriptor chain.
84		}
85
86		self.token_ring[usize::from(index)] = Some(Box::new(tkn));
87
88		let len = self.token_ring.len();
89		let idx = self.avail_ring_mut().idx.to_ne();
90		self.avail_ring_mut().ring_mut(true)[idx as usize % len] = index.into();
91
92		memory_barrier();
93		let next_idx = idx.wrapping_add(1);
94		self.avail_ring_mut().idx = next_idx.into();
95
96		Ok(next_idx)
97	}
98
99	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
100		if self.read_idx == self.used_ring().idx.to_ne() {
101			return Err(VirtqError::NoNewUsed);
102		}
103		let cur_ring_index = self.read_idx as usize % self.token_ring.len();
104		let used_elem = self.used_ring().ring()[cur_ring_index];
105
106		let tkn = self.token_ring[used_elem.id.to_ne() as usize]
107			.take()
108			.expect(
109				"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
110			);
111
112		// We return the indices of the now freed ring slots back to `mem_pool.`
113		let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
114		loop {
115			self.mem_pool.ret_id(super::MemDescrId(id_ret_idx));
116			let cur_chain_elem =
117				unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
118			if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
119				id_ret_idx = cur_chain_elem.next.to_ne();
120			} else {
121				break;
122			}
123		}
124
125		memory_barrier();
126		self.read_idx = self.read_idx.wrapping_add(1);
127		Ok(UsedBufferToken::from_avail_buffer_token(
128			tkn.buff_tkn,
129			used_elem.len.to_ne(),
130		))
131	}
132
133	fn drv_enable_notif(&mut self) {
134		self.avail_ring_mut()
135			.flags
136			.remove(virtq::AvailF::NO_INTERRUPT);
137	}
138
139	fn drv_disable_notif(&mut self) {
140		self.avail_ring_mut()
141			.flags
142			.insert(virtq::AvailF::NO_INTERRUPT);
143	}
144
145	fn dev_is_notif(&self) -> bool {
146		!self.used_ring().flags.contains(virtq::UsedF::NO_NOTIFY)
147	}
148}
149
150/// Virtio's split virtqueue structure
151pub struct SplitVq {
152	ring: DescrRing,
153	size: VqSize,
154	index: VqIndex,
155
156	notif_ctrl: NotifCtrl,
157}
158
159impl Virtq for SplitVq {
160	fn enable_notifs(&mut self) {
161		self.ring.drv_enable_notif();
162	}
163
164	fn disable_notifs(&mut self) {
165		self.ring.drv_disable_notif();
166	}
167
168	fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
169		self.ring.try_recv()
170	}
171
172	fn dispatch_batch(
173		&mut self,
174		_tkns: Vec<(AvailBufferToken, BufferType)>,
175		_notif: bool,
176	) -> Result<(), VirtqError> {
177		unimplemented!();
178	}
179
180	fn dispatch_batch_await(
181		&mut self,
182		_tkns: Vec<(AvailBufferToken, BufferType)>,
183		_notif: bool,
184	) -> Result<(), VirtqError> {
185		unimplemented!()
186	}
187
188	fn dispatch(
189		&mut self,
190		buffer_tkn: AvailBufferToken,
191		notif: bool,
192		buffer_type: BufferType,
193	) -> Result<(), VirtqError> {
194		let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
195		let next_idx = self.ring.push(transfer_tkn)?;
196
197		if notif {
198			// TODO: Check whether the splitvirtquue has notifications for specific descriptors
199			// I believe it does not.
200			unimplemented!();
201		}
202
203		if self.ring.dev_is_notif() {
204			let notification_data = NotificationData::new()
205				.with_vqn(self.index.0)
206				.with_next_idx(next_idx);
207			self.notif_ctrl.notify_dev(notification_data);
208		}
209		Ok(())
210	}
211
212	fn index(&self) -> VqIndex {
213		self.index
214	}
215
216	fn size(&self) -> VqSize {
217		self.size
218	}
219
220	fn has_used_buffers(&self) -> bool {
221		self.ring.read_idx != self.ring.used_ring().idx.to_ne()
222	}
223}
224
225impl VirtqPrivate for SplitVq {
226	type Descriptor = virtq::Desc;
227	fn create_indirect_ctrl(
228		buffer_tkn: &AvailBufferToken,
229	) -> Result<Box<[Self::Descriptor]>, VirtqError> {
230		Ok(Self::descriptor_iter(buffer_tkn)?
231			.zip(1..)
232			.map(|(descriptor, next_id)| Self::Descriptor {
233				next: next_id.into(),
234				..descriptor
235			})
236			.collect::<Vec<_>>()
237			.into_boxed_slice())
238	}
239}
240
241impl SplitVq {
242	pub(crate) fn new(
243		com_cfg: &mut ComCfg,
244		notif_cfg: &NotifCfg,
245		size: VqSize,
246		index: VqIndex,
247		features: virtio::F,
248	) -> Result<Self, VirtqError> {
249		// Get a handler to the queues configuration area.
250		let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
251			return Err(VirtqError::QueueNotExisting(index.into()));
252		};
253
254		let size = vq_handler.set_vq_size(size.0);
255
256		let mut descr_table_cell = unsafe {
257			core::mem::transmute::<
258				Box<[MaybeUninit<virtq::Desc>], DeviceAlloc>,
259				Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
260			>(Box::new_uninit_slice_in(size.into(), DeviceAlloc))
261		};
262
263		let mut avail_ring_cell = {
264			let avail = virtq::Avail::try_new_in(size, true, DeviceAlloc)
265				.map_err(|_| VirtqError::AllocationError)?;
266
267			unsafe {
268				mem::transmute::<
269					Box<virtq::Avail, DeviceAlloc>,
270					Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
271				>(avail)
272			}
273		};
274
275		let mut used_ring_cell = {
276			let used = virtq::Used::try_new_in(size, true, DeviceAlloc)
277				.map_err(|_| VirtqError::AllocationError)?;
278
279			unsafe {
280				mem::transmute::<
281					Box<virtq::Used, DeviceAlloc>,
282					Box<UnsafeCell<virtq::Used>, DeviceAlloc>,
283				>(used)
284			}
285		};
286
287		// Provide memory areas of the queues data structures to the device
288		vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_table_cell.as_mut()));
289		// As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize
290		vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(avail_ring_cell.as_mut()));
291		vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(used_ring_cell.as_mut()));
292
293		let descr_ring = DescrRing {
294			read_idx: 0,
295			token_ring: core::iter::repeat_with(|| None)
296				.take(size.into())
297				.collect::<Vec<_>>()
298				.into_boxed_slice(),
299			mem_pool: MemPool::new(size),
300
301			descr_table_cell,
302			avail_ring_cell,
303			used_ring_cell,
304		};
305
306		let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
307
308		if features.contains(virtio::F::NOTIFICATION_DATA) {
309			notif_ctrl.enable_notif_data();
310		}
311
312		vq_handler.enable_queue();
313
314		info!("Created SplitVq: idx={}, size={}", index.0, size);
315
316		Ok(SplitVq {
317			ring: descr_ring,
318			notif_ctrl,
319			size: VqSize(size),
320			index,
321		})
322	}
323}