hermit/drivers/virtio/virtqueue/
packed.rs1#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::sync::atomic::{Ordering, fence};
9use core::{ops, ptr};
10
11use align_address::Align;
12use memory_addresses::VirtAddr;
13#[cfg(not(feature = "pci"))]
14use virtio::mmio::NotificationData;
15#[cfg(feature = "pci")]
16use virtio::pci::NotificationData;
17use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
18use virtio::virtq::DescF;
19use virtio::{RingEventFlags, pvirtq, virtq};
20
21#[cfg(not(feature = "pci"))]
22use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
23#[cfg(feature = "pci")]
24use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
25use super::error::VirtqError;
26use super::{
27 AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
28 VirtqPrivate, VqIndex, VqSize,
29};
30use crate::arch::mm::paging;
31use crate::arch::mm::paging::{BasePageSize, PageSize};
32use crate::mm::device_alloc::DeviceAlloc;
33
34#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
35struct RingIdx {
36 off: u16,
37 wrap: u8,
38}
39
40trait RingIndexRange {
41 fn wrapping_contains(&self, item: &RingIdx) -> bool;
42}
43
44impl RingIndexRange for ops::Range<RingIdx> {
45 fn wrapping_contains(&self, item: &RingIdx) -> bool {
46 let ops::Range { start, end } = self;
47
48 if start.wrap == end.wrap {
49 item.wrap == start.wrap && start.off <= item.off && item.off < end.off
50 } else if item.wrap == start.wrap {
51 start.off <= item.off
52 } else {
53 debug_assert!(item.wrap == end.wrap);
54 item.off < end.off
55 }
56 }
57}
58
59#[derive(Copy, Clone, Debug)]
64struct WrapCount(bool);
65
66impl WrapCount {
67 fn flag_mask() -> virtq::DescF {
69 virtq::DescF::AVAIL | virtq::DescF::USED
70 }
71
72 fn new() -> Self {
76 WrapCount(true)
77 }
78
79 fn wrap(&mut self) {
84 self.0 = !self.0;
85 }
86}
87
88struct DescriptorRing {
90 ring: Box<[pvirtq::Desc], DeviceAlloc>,
91 tkn_ref_ring: Box<[Option<Box<TransferToken<pvirtq::Desc>>>]>,
92
93 write_index: u16,
97 capacity: u16,
99 poll_index: u16,
101 drv_wc: WrapCount,
103 dev_wc: WrapCount,
104 mem_pool: MemPool,
107}
108
109impl DescriptorRing {
110 fn new(size: u16) -> Self {
111 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
112
113 let tkn_ref_ring = core::iter::repeat_with(|| None)
115 .take(size.into())
116 .collect::<Vec<_>>()
117 .into_boxed_slice();
118
119 DescriptorRing {
120 ring,
121 tkn_ref_ring,
122 write_index: 0,
123 capacity: size,
124 poll_index: 0,
125 drv_wc: WrapCount::new(),
126 dev_wc: WrapCount::new(),
127 mem_pool: MemPool::new(size),
128 }
129 }
130
131 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
133 let mut ctrl = self.get_read_ctrler();
134
135 ctrl.poll_next()
136 .map(|(tkn, written_len)| {
137 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
138 })
139 .ok_or(VirtqError::NoNewUsed)
140 }
141
142 fn push_batch(
143 &mut self,
144 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
145 ) -> Result<RingIdx, VirtqError> {
146 let first_ctrl_settings;
150 let first_buffer;
151 let mut ctrl;
152
153 let mut tkn_iterator = tkn_lst.into_iter();
154 if let Some(first_tkn) = tkn_iterator.next() {
155 ctrl = self.push_without_making_available(&first_tkn)?;
156 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
157 first_buffer = Some(Box::new(first_tkn));
158 } else {
159 return Err(VirtqError::BufferNotSpecified);
161 }
162 for tkn in tkn_iterator {
164 ctrl.make_avail(Box::new(tkn));
165 }
166
167 self.make_avail_with_state(
171 first_buffer.unwrap(),
172 first_ctrl_settings.0,
173 first_ctrl_settings.1,
174 first_ctrl_settings.2,
175 );
176 Ok(RingIdx {
177 off: self.write_index,
178 wrap: self.drv_wc.0.into(),
179 })
180 }
181
182 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
183 self.push_batch([tkn])
184 }
185
186 fn push_without_making_available(
187 &mut self,
188 tkn: &TransferToken<pvirtq::Desc>,
189 ) -> Result<WriteCtrl<'_>, VirtqError> {
190 if tkn.num_consuming_descr() > self.capacity {
191 return Err(VirtqError::NoDescrAvail);
192 }
193
194 let mut ctrl = self.get_write_ctrler()?;
197
198 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
204 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
205 ctrl.write_desc(desc);
206 } else {
207 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
208 ctrl.write_desc(incomplete_desc);
209 }
210 }
211 Ok(ctrl)
212 }
213
214 fn raw_addr(&self) -> usize {
217 self.ring.as_ptr() as usize
218 }
219
220 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
223 let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
224 Ok(WriteCtrl {
225 start: self.write_index,
226 position: self.write_index,
227 modulo: u16::try_from(self.ring.len()).unwrap(),
228 first_flags: DescF::empty(),
229 buff_id: desc_id,
230
231 desc_ring: self,
232 })
233 }
234
235 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
238 ReadCtrl {
239 position: self.poll_index,
240 modulo: u16::try_from(self.ring.len()).unwrap(),
241
242 desc_ring: self,
243 }
244 }
245
246 fn make_avail_with_state(
247 &mut self,
248 raw_tkn: Box<TransferToken<pvirtq::Desc>>,
249 start: u16,
250 buff_id: MemDescrId,
251 first_flags: DescF,
252 ) {
253 self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
255 fence(Ordering::SeqCst);
258 self.ring[usize::from(start)].flags = first_flags;
259 }
260
261 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
273 flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
274 flags.set(virtq::DescF::USED, !self.drv_wc.0);
275 flags
276 }
277
278 fn is_marked_used(&self, flags: DescF) -> bool {
286 if self.dev_wc.0 {
287 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
288 } else {
289 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
290 }
291 }
292}
293
294struct ReadCtrl<'a> {
295 position: u16,
297 modulo: u16,
298
299 desc_ring: &'a mut DescriptorRing,
300}
301
302impl ReadCtrl<'_> {
303 fn poll_next(&mut self) -> Option<(Box<TransferToken<pvirtq::Desc>>, u32)> {
306 let desc = &self.desc_ring.ring[usize::from(self.position)];
308 if self.desc_ring.is_marked_used(desc.flags) {
309 let buff_id = desc.id.to_ne();
310 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
311 .take()
312 .expect(
313 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
314 );
315
316 let write_len = desc.len.to_ne();
336
337 for _ in 0..tkn.num_consuming_descr() {
338 self.incrmt();
339 }
340 self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
341
342 Some((tkn, write_len))
343 } else {
344 None
345 }
346 }
347
348 fn incrmt(&mut self) {
349 if self.desc_ring.poll_index + 1 == self.modulo {
350 self.desc_ring.dev_wc.wrap();
351 }
352
353 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
355 self.desc_ring.capacity += 1;
356
357 self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
358 self.position = self.desc_ring.poll_index;
359 }
360}
361
362struct WriteCtrl<'a> {
366 start: u16,
370 position: u16,
373 modulo: u16,
374 first_flags: DescF,
376 buff_id: MemDescrId,
378
379 desc_ring: &'a mut DescriptorRing,
380}
381
382impl WriteCtrl<'_> {
383 fn incrmt(&mut self) {
391 assert!(self.desc_ring.capacity != 0);
393 self.desc_ring.capacity -= 1;
394 if self.position + 1 == self.modulo {
397 self.desc_ring.drv_wc.wrap();
398 }
399 self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
401
402 self.position = (self.position + 1) % self.modulo;
403 }
404
405 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
407 incomplete_desc.id = self.buff_id.0.into();
408 if self.start == self.position {
409 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
413 } else {
414 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
416 }
417 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
418 self.incrmt();
419 }
420
421 fn make_avail(&mut self, raw_tkn: Box<TransferToken<pvirtq::Desc>>) {
422 assert!(self.start != self.position);
424 self.desc_ring
425 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
426 }
427}
428
429struct DrvNotif {
434 f_notif_idx: bool,
436 raw: &'static mut pvirtq::EventSuppress,
438}
439
440struct DevNotif {
445 f_notif_idx: bool,
447 raw: &'static mut pvirtq::EventSuppress,
449}
450
451impl DrvNotif {
452 fn enable_notif(&mut self) {
455 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
456 }
457
458 fn disable_notif(&mut self) {
461 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
462 }
463
464 fn enable_specific(&mut self, idx: RingIdx) {
466 if self.f_notif_idx {
468 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
469 self.raw.desc = EventSuppressDesc::new()
470 .with_desc_event_off(idx.off)
471 .with_desc_event_wrap(idx.wrap);
472 }
473 }
474}
475
476impl DevNotif {
477 pub fn enable_notif_specific(&mut self) {
479 self.f_notif_idx = true;
480 }
481
482 fn is_notif(&self) -> bool {
485 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
486 }
487
488 fn notif_specific(&self) -> Option<RingIdx> {
489 if !self.f_notif_idx {
490 return None;
491 }
492
493 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
494 return None;
495 }
496
497 let off = self.raw.desc.desc_event_off();
498 let wrap = self.raw.desc.desc_event_wrap();
499
500 Some(RingIdx { off, wrap })
501 }
502}
503
504pub struct PackedVq {
507 descr_ring: DescriptorRing,
510 drv_event: DrvNotif,
512 dev_event: DevNotif,
514 notif_ctrl: NotifCtrl,
516 size: VqSize,
519 index: VqIndex,
522 last_next: Cell<RingIdx>,
523}
524
525impl Virtq for PackedVq {
528 fn enable_notifs(&mut self) {
529 self.drv_event.enable_notif();
530 }
531
532 fn disable_notifs(&mut self) {
533 self.drv_event.disable_notif();
534 }
535
536 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
537 self.descr_ring.try_recv()
538 }
539
540 fn dispatch_batch(
541 &mut self,
542 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
543 notif: bool,
544 ) -> Result<(), VirtqError> {
545 assert!(!buffer_tkns.is_empty());
547
548 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
549 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
550 });
551
552 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
553
554 if notif {
555 self.drv_event.enable_specific(next_idx);
556 }
557
558 let range = self.last_next.get()..next_idx;
559 let notif_specific = self
560 .dev_event
561 .notif_specific()
562 .is_some_and(|idx| range.wrapping_contains(&idx));
563
564 if self.dev_event.is_notif() || notif_specific {
565 let notification_data = NotificationData::new()
566 .with_vqn(self.index.0)
567 .with_next_off(next_idx.off)
568 .with_next_wrap(next_idx.wrap);
569 self.notif_ctrl.notify_dev(notification_data);
570 self.last_next.set(next_idx);
571 }
572 Ok(())
573 }
574
575 fn dispatch_batch_await(
576 &mut self,
577 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
578 notif: bool,
579 ) -> Result<(), VirtqError> {
580 assert!(!buffer_tkns.is_empty());
582
583 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
584 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
585 });
586
587 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
588
589 if notif {
590 self.drv_event.enable_specific(next_idx);
591 }
592
593 let range = self.last_next.get()..next_idx;
594 let notif_specific = self
595 .dev_event
596 .notif_specific()
597 .is_some_and(|idx| range.wrapping_contains(&idx));
598
599 if self.dev_event.is_notif() | notif_specific {
600 let notification_data = NotificationData::new()
601 .with_vqn(self.index.0)
602 .with_next_off(next_idx.off)
603 .with_next_wrap(next_idx.wrap);
604 self.notif_ctrl.notify_dev(notification_data);
605 self.last_next.set(next_idx);
606 }
607 Ok(())
608 }
609
610 fn dispatch(
611 &mut self,
612 buffer_tkn: AvailBufferToken,
613 notif: bool,
614 buffer_type: BufferType,
615 ) -> Result<(), VirtqError> {
616 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
617 let next_idx = self.descr_ring.push(transfer_tkn)?;
618
619 if notif {
620 self.drv_event.enable_specific(next_idx);
621 }
622
623 let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
624
625 if self.dev_event.is_notif() || notif_specific {
626 let notification_data = NotificationData::new()
627 .with_vqn(self.index.0)
628 .with_next_off(next_idx.off)
629 .with_next_wrap(next_idx.wrap);
630 self.notif_ctrl.notify_dev(notification_data);
631 self.last_next.set(next_idx);
632 }
633 Ok(())
634 }
635
636 fn index(&self) -> VqIndex {
637 self.index
638 }
639
640 fn new(
641 com_cfg: &mut ComCfg,
642 notif_cfg: &NotifCfg,
643 size: VqSize,
644 index: VqIndex,
645 features: virtio::F,
646 ) -> Result<Self, VirtqError> {
647 if features.contains(virtio::F::IN_ORDER) {
655 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
656 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
657 }
658
659 let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
661 return Err(VirtqError::QueueNotExisting(index.into()));
662 };
663
664 let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
669 return Err(VirtqError::QueueSizeNotAllowed(size.0));
670 } else {
671 vq_handler.set_vq_size(size.0)
672 };
673
674 let descr_ring = DescriptorRing::new(vq_size);
675 let _mem_len =
677 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
678
679 let drv_event_ptr =
680 ptr::with_exposed_provenance_mut(crate::mm::allocate(_mem_len, true).as_usize());
681 let dev_event_ptr =
682 ptr::with_exposed_provenance_mut(crate::mm::allocate(_mem_len, true).as_usize());
683
684 vq_handler.set_ring_addr(paging::virt_to_phys(VirtAddr::from(
686 descr_ring.raw_addr() as u64
687 )));
688 vq_handler.set_drv_ctrl_addr(paging::virt_to_phys(VirtAddr::from(drv_event_ptr as u64)));
690 vq_handler.set_dev_ctrl_addr(paging::virt_to_phys(VirtAddr::from(dev_event_ptr as u64)));
691
692 let drv_event: &'static mut pvirtq::EventSuppress = unsafe { &mut *(drv_event_ptr) };
693
694 let dev_event: &'static mut pvirtq::EventSuppress = unsafe { &mut *(dev_event_ptr) };
695
696 let mut drv_event = DrvNotif {
697 f_notif_idx: false,
698 raw: drv_event,
699 };
700
701 let dev_event = DevNotif {
702 f_notif_idx: false,
703 raw: dev_event,
704 };
705
706 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
707
708 if features.contains(virtio::F::NOTIFICATION_DATA) {
709 notif_ctrl.enable_notif_data();
710 }
711
712 if features.contains(virtio::F::EVENT_IDX) {
713 drv_event.f_notif_idx = true;
714 }
715
716 vq_handler.enable_queue();
717
718 info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
719
720 Ok(PackedVq {
721 descr_ring,
722 drv_event,
723 dev_event,
724 notif_ctrl,
725 size: VqSize::from(vq_size),
726 index,
727 last_next: Cell::default(),
728 })
729 }
730
731 fn size(&self) -> VqSize {
732 self.size
733 }
734
735 fn has_used_buffers(&self) -> bool {
736 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
737 self.descr_ring.is_marked_used(desc.flags)
738 }
739}
740
741impl VirtqPrivate for PackedVq {
742 type Descriptor = pvirtq::Desc;
743
744 fn create_indirect_ctrl(
745 buffer_tkn: &AvailBufferToken,
746 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
747 Ok(Self::descriptor_iter(buffer_tkn)?
748 .collect::<Vec<_>>()
749 .into_boxed_slice())
750 }
751}