hermit/drivers/virtio/virtqueue/
packed.rs1#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::sync::atomic::{Ordering, fence};
9use core::{ops, ptr};
10
11use align_address::Align;
12use memory_addresses::PhysAddr;
13#[cfg(not(feature = "pci"))]
14use virtio::mmio::NotificationData;
15#[cfg(feature = "pci")]
16use virtio::pci::NotificationData;
17use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
18use virtio::virtq::DescF;
19use virtio::{RingEventFlags, pvirtq, virtq};
20
21#[cfg(not(feature = "pci"))]
22use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
23#[cfg(feature = "pci")]
24use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
25use super::error::VirtqError;
26use super::{
27 AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
28 VirtqPrivate, VqIndex, VqSize,
29};
30use crate::arch::mm::paging::{BasePageSize, PageSize};
31use crate::mm::device_alloc::DeviceAlloc;
32
33#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
34struct RingIdx {
35 off: u16,
36 wrap: u8,
37}
38
39trait RingIndexRange {
40 fn wrapping_contains(&self, item: &RingIdx) -> bool;
41}
42
43impl RingIndexRange for ops::Range<RingIdx> {
44 fn wrapping_contains(&self, item: &RingIdx) -> bool {
45 let ops::Range { start, end } = self;
46
47 if start.wrap == end.wrap {
48 item.wrap == start.wrap && start.off <= item.off && item.off < end.off
49 } else if item.wrap == start.wrap {
50 start.off <= item.off
51 } else {
52 debug_assert!(item.wrap == end.wrap);
53 item.off < end.off
54 }
55 }
56}
57
58#[derive(Copy, Clone, Debug)]
63struct WrapCount(bool);
64
65impl WrapCount {
66 fn flag_mask() -> virtq::DescF {
68 virtq::DescF::AVAIL | virtq::DescF::USED
69 }
70
71 fn new() -> Self {
75 WrapCount(true)
76 }
77
78 fn wrap(&mut self) {
83 self.0 = !self.0;
84 }
85}
86
87struct DescriptorRing {
89 ring: Box<[pvirtq::Desc], DeviceAlloc>,
90 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
91
92 write_index: u16,
96 capacity: u16,
98 poll_index: u16,
100 drv_wc: WrapCount,
102 dev_wc: WrapCount,
103 mem_pool: MemPool,
106}
107
108impl DescriptorRing {
109 fn new(size: u16) -> Self {
110 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
111
112 let tkn_ref_ring = core::iter::repeat_with(|| None)
114 .take(size.into())
115 .collect::<Vec<_>>()
116 .into_boxed_slice();
117
118 DescriptorRing {
119 ring,
120 tkn_ref_ring,
121 write_index: 0,
122 capacity: size,
123 poll_index: 0,
124 drv_wc: WrapCount::new(),
125 dev_wc: WrapCount::new(),
126 mem_pool: MemPool::new(size),
127 }
128 }
129
130 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
132 let mut ctrl = self.get_read_ctrler();
133
134 ctrl.poll_next()
135 .map(|(tkn, written_len)| {
136 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
137 })
138 .ok_or(VirtqError::NoNewUsed)
139 }
140
141 fn push_batch(
142 &mut self,
143 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
144 ) -> Result<RingIdx, VirtqError> {
145 let first_ctrl_settings;
149 let first_buffer;
150 let mut ctrl;
151
152 let mut tkn_iterator = tkn_lst.into_iter();
153 if let Some(first_tkn) = tkn_iterator.next() {
154 ctrl = self.push_without_making_available(&first_tkn)?;
155 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
156 first_buffer = first_tkn;
157 } else {
158 return Err(VirtqError::BufferNotSpecified);
160 }
161 for tkn in tkn_iterator {
163 ctrl.make_avail(tkn);
164 }
165
166 self.make_avail_with_state(
170 first_buffer,
171 first_ctrl_settings.0,
172 first_ctrl_settings.1,
173 first_ctrl_settings.2,
174 );
175 Ok(RingIdx {
176 off: self.write_index,
177 wrap: self.drv_wc.0.into(),
178 })
179 }
180
181 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
182 self.push_batch([tkn])
183 }
184
185 fn push_without_making_available(
186 &mut self,
187 tkn: &TransferToken<pvirtq::Desc>,
188 ) -> Result<WriteCtrl<'_>, VirtqError> {
189 if tkn.num_consuming_descr() > self.capacity {
190 return Err(VirtqError::NoDescrAvail);
191 }
192
193 let mut ctrl = self.get_write_ctrler()?;
196
197 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
203 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
204 ctrl.write_desc(desc);
205 } else {
206 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
207 ctrl.write_desc(incomplete_desc);
208 }
209 }
210 Ok(ctrl)
211 }
212
213 fn raw_addr(&self) -> usize {
216 self.ring.as_ptr() as usize
217 }
218
219 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
222 let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
223 Ok(WriteCtrl {
224 start: self.write_index,
225 position: self.write_index,
226 modulo: u16::try_from(self.ring.len()).unwrap(),
227 first_flags: DescF::empty(),
228 buff_id: desc_id,
229
230 desc_ring: self,
231 })
232 }
233
234 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
237 ReadCtrl {
238 position: self.poll_index,
239 modulo: u16::try_from(self.ring.len()).unwrap(),
240
241 desc_ring: self,
242 }
243 }
244
245 fn make_avail_with_state(
246 &mut self,
247 raw_tkn: TransferToken<pvirtq::Desc>,
248 start: u16,
249 buff_id: MemDescrId,
250 first_flags: DescF,
251 ) {
252 self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
254 fence(Ordering::SeqCst);
257 self.ring[usize::from(start)].flags = first_flags;
258 }
259
260 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
272 flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
273 flags.set(virtq::DescF::USED, !self.drv_wc.0);
274 flags
275 }
276
277 fn is_marked_used(&self, flags: DescF) -> bool {
285 if self.dev_wc.0 {
286 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
287 } else {
288 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
289 }
290 }
291}
292
293struct ReadCtrl<'a> {
294 position: u16,
296 modulo: u16,
297
298 desc_ring: &'a mut DescriptorRing,
299}
300
301impl ReadCtrl<'_> {
302 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
305 let desc = &self.desc_ring.ring[usize::from(self.position)];
307 if self.desc_ring.is_marked_used(desc.flags) {
308 let buff_id = desc.id.to_ne();
309 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
310 .take()
311 .expect(
312 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
313 );
314
315 let write_len = desc.len.to_ne();
335
336 for _ in 0..tkn.num_consuming_descr() {
337 self.incrmt();
338 }
339 self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
340
341 Some((tkn, write_len))
342 } else {
343 None
344 }
345 }
346
347 fn incrmt(&mut self) {
348 if self.desc_ring.poll_index + 1 == self.modulo {
349 self.desc_ring.dev_wc.wrap();
350 }
351
352 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
354 self.desc_ring.capacity += 1;
355
356 self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
357 self.position = self.desc_ring.poll_index;
358 }
359}
360
361struct WriteCtrl<'a> {
365 start: u16,
369 position: u16,
372 modulo: u16,
373 first_flags: DescF,
375 buff_id: MemDescrId,
377
378 desc_ring: &'a mut DescriptorRing,
379}
380
381impl WriteCtrl<'_> {
382 fn incrmt(&mut self) {
390 assert!(self.desc_ring.capacity != 0);
392 self.desc_ring.capacity -= 1;
393 if self.position + 1 == self.modulo {
396 self.desc_ring.drv_wc.wrap();
397 }
398 self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
400
401 self.position = (self.position + 1) % self.modulo;
402 }
403
404 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
406 incomplete_desc.id = self.buff_id.0.into();
407 if self.start == self.position {
408 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
412 } else {
413 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
415 }
416 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
417 self.incrmt();
418 }
419
420 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
421 assert!(self.start != self.position);
423 self.desc_ring
424 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
425 }
426}
427
428struct DrvNotif {
433 f_notif_idx: bool,
435 raw: &'static mut pvirtq::EventSuppress,
437}
438
439struct DevNotif {
444 f_notif_idx: bool,
446 raw: &'static mut pvirtq::EventSuppress,
448}
449
450impl DrvNotif {
451 fn enable_notif(&mut self) {
454 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
455 }
456
457 fn disable_notif(&mut self) {
460 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
461 }
462
463 fn enable_specific(&mut self, idx: RingIdx) {
465 if self.f_notif_idx {
467 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
468 self.raw.desc = EventSuppressDesc::new()
469 .with_desc_event_off(idx.off)
470 .with_desc_event_wrap(idx.wrap);
471 }
472 }
473}
474
475impl DevNotif {
476 pub fn enable_notif_specific(&mut self) {
478 self.f_notif_idx = true;
479 }
480
481 fn is_notif(&self) -> bool {
484 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
485 }
486
487 fn notif_specific(&self) -> Option<RingIdx> {
488 if !self.f_notif_idx {
489 return None;
490 }
491
492 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
493 return None;
494 }
495
496 let off = self.raw.desc.desc_event_off();
497 let wrap = self.raw.desc.desc_event_wrap();
498
499 Some(RingIdx { off, wrap })
500 }
501}
502
503pub struct PackedVq {
506 descr_ring: DescriptorRing,
509 drv_event: DrvNotif,
511 dev_event: DevNotif,
513 notif_ctrl: NotifCtrl,
515 size: VqSize,
518 index: VqIndex,
521 last_next: Cell<RingIdx>,
522}
523
524impl Virtq for PackedVq {
527 fn enable_notifs(&mut self) {
528 self.drv_event.enable_notif();
529 }
530
531 fn disable_notifs(&mut self) {
532 self.drv_event.disable_notif();
533 }
534
535 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
536 self.descr_ring.try_recv()
537 }
538
539 fn dispatch_batch(
540 &mut self,
541 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
542 notif: bool,
543 ) -> Result<(), VirtqError> {
544 assert!(!buffer_tkns.is_empty());
546
547 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
548 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
549 });
550
551 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
552
553 if notif {
554 self.drv_event.enable_specific(next_idx);
555 }
556
557 let range = self.last_next.get()..next_idx;
558 let notif_specific = self
559 .dev_event
560 .notif_specific()
561 .is_some_and(|idx| range.wrapping_contains(&idx));
562
563 if self.dev_event.is_notif() || notif_specific {
564 let notification_data = NotificationData::new()
565 .with_vqn(self.index.0)
566 .with_next_off(next_idx.off)
567 .with_next_wrap(next_idx.wrap);
568 self.notif_ctrl.notify_dev(notification_data);
569 self.last_next.set(next_idx);
570 }
571 Ok(())
572 }
573
574 fn dispatch_batch_await(
575 &mut self,
576 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
577 notif: bool,
578 ) -> Result<(), VirtqError> {
579 assert!(!buffer_tkns.is_empty());
581
582 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
583 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
584 });
585
586 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
587
588 if notif {
589 self.drv_event.enable_specific(next_idx);
590 }
591
592 let range = self.last_next.get()..next_idx;
593 let notif_specific = self
594 .dev_event
595 .notif_specific()
596 .is_some_and(|idx| range.wrapping_contains(&idx));
597
598 if self.dev_event.is_notif() | notif_specific {
599 let notification_data = NotificationData::new()
600 .with_vqn(self.index.0)
601 .with_next_off(next_idx.off)
602 .with_next_wrap(next_idx.wrap);
603 self.notif_ctrl.notify_dev(notification_data);
604 self.last_next.set(next_idx);
605 }
606 Ok(())
607 }
608
609 fn dispatch(
610 &mut self,
611 buffer_tkn: AvailBufferToken,
612 notif: bool,
613 buffer_type: BufferType,
614 ) -> Result<(), VirtqError> {
615 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
616 let next_idx = self.descr_ring.push(transfer_tkn)?;
617
618 if notif {
619 self.drv_event.enable_specific(next_idx);
620 }
621
622 let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
623
624 if self.dev_event.is_notif() || notif_specific {
625 let notification_data = NotificationData::new()
626 .with_vqn(self.index.0)
627 .with_next_off(next_idx.off)
628 .with_next_wrap(next_idx.wrap);
629 self.notif_ctrl.notify_dev(notification_data);
630 self.last_next.set(next_idx);
631 }
632 Ok(())
633 }
634
635 fn index(&self) -> VqIndex {
636 self.index
637 }
638
639 fn size(&self) -> VqSize {
640 self.size
641 }
642
643 fn has_used_buffers(&self) -> bool {
644 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
645 self.descr_ring.is_marked_used(desc.flags)
646 }
647}
648
649impl VirtqPrivate for PackedVq {
650 type Descriptor = pvirtq::Desc;
651
652 fn create_indirect_ctrl(
653 buffer_tkn: &AvailBufferToken,
654 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
655 Ok(Self::descriptor_iter(buffer_tkn)?
656 .collect::<Vec<_>>()
657 .into_boxed_slice())
658 }
659}
660
661impl PackedVq {
662 pub(crate) fn new(
663 com_cfg: &mut ComCfg,
664 notif_cfg: &NotifCfg,
665 size: VqSize,
666 index: VqIndex,
667 features: virtio::F,
668 ) -> Result<Self, VirtqError> {
669 if features.contains(virtio::F::IN_ORDER) {
677 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
678 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
679 }
680
681 let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
683 return Err(VirtqError::QueueNotExisting(index.into()));
684 };
685
686 let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
691 return Err(VirtqError::QueueSizeNotAllowed(size.0));
692 } else {
693 vq_handler.set_vq_size(size.0)
694 };
695
696 let descr_ring = DescriptorRing::new(vq_size);
697 let _mem_len =
699 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
700
701 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
702 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
703 let drv_event = unsafe { drv_event.assume_init() };
705 let dev_event = unsafe { dev_event.assume_init() };
706 let drv_event = Box::leak(drv_event);
707 let dev_event = Box::leak(dev_event);
708
709 vq_handler.set_ring_addr(PhysAddr::from(descr_ring.raw_addr()));
711 vq_handler.set_drv_ctrl_addr(PhysAddr::from(ptr::from_mut(drv_event).expose_provenance()));
713 vq_handler.set_dev_ctrl_addr(PhysAddr::from(ptr::from_mut(dev_event).expose_provenance()));
714
715 let mut drv_event = DrvNotif {
716 f_notif_idx: false,
717 raw: drv_event,
718 };
719
720 let dev_event = DevNotif {
721 f_notif_idx: false,
722 raw: dev_event,
723 };
724
725 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
726
727 if features.contains(virtio::F::NOTIFICATION_DATA) {
728 notif_ctrl.enable_notif_data();
729 }
730
731 if features.contains(virtio::F::EVENT_IDX) {
732 drv_event.f_notif_idx = true;
733 }
734
735 vq_handler.enable_queue();
736
737 info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
738
739 Ok(PackedVq {
740 descr_ring,
741 drv_event,
742 dev_event,
743 notif_ctrl,
744 size: VqSize::from(vq_size),
745 index,
746 last_next: Cell::default(),
747 })
748 }
749}