hermit/drivers/virtio/virtqueue/
packed.rs1#![allow(dead_code)]
4
5use alloc::boxed::Box;
6use alloc::vec::Vec;
7use core::cell::Cell;
8use core::ops;
9use core::sync::atomic::{Ordering, fence};
10
11use align_address::Align;
12#[cfg(not(feature = "pci"))]
13use virtio::mmio::NotificationData;
14#[cfg(feature = "pci")]
15use virtio::pci::NotificationData;
16use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
17use virtio::virtq::DescF;
18use virtio::{RingEventFlags, pvirtq, virtq};
19
20#[cfg(not(feature = "pci"))]
21use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
22#[cfg(feature = "pci")]
23use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
24use super::error::VirtqError;
25use super::{
26 AvailBufferToken, BufferType, MemDescrId, MemPool, TransferToken, UsedBufferToken, Virtq,
27 VirtqPrivate, VqIndex, VqSize,
28};
29use crate::arch::mm::paging::{BasePageSize, PageSize};
30use crate::mm::device_alloc::DeviceAlloc;
31
32#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
33struct RingIdx {
34 off: u16,
35 wrap: u8,
36}
37
38trait RingIndexRange {
39 fn wrapping_contains(&self, item: &RingIdx) -> bool;
40}
41
42impl RingIndexRange for ops::Range<RingIdx> {
43 fn wrapping_contains(&self, item: &RingIdx) -> bool {
44 let ops::Range { start, end } = self;
45
46 if start.wrap == end.wrap {
47 item.wrap == start.wrap && start.off <= item.off && item.off < end.off
48 } else if item.wrap == start.wrap {
49 start.off <= item.off
50 } else {
51 debug_assert!(item.wrap == end.wrap);
52 item.off < end.off
53 }
54 }
55}
56
57#[derive(Copy, Clone, Debug)]
62struct WrapCount(bool);
63
64impl WrapCount {
65 fn flag_mask() -> virtq::DescF {
67 virtq::DescF::AVAIL | virtq::DescF::USED
68 }
69
70 fn new() -> Self {
74 WrapCount(true)
75 }
76
77 fn wrap(&mut self) {
82 self.0 = !self.0;
83 }
84}
85
86struct DescriptorRing {
88 ring: Box<[pvirtq::Desc], DeviceAlloc>,
89 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
90
91 write_index: u16,
95 capacity: u16,
97 poll_index: u16,
99 drv_wc: WrapCount,
101 dev_wc: WrapCount,
102 mem_pool: MemPool,
105}
106
107impl DescriptorRing {
108 fn new(size: u16) -> Self {
109 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
110
111 let tkn_ref_ring = core::iter::repeat_with(|| None)
113 .take(size.into())
114 .collect::<Vec<_>>()
115 .into_boxed_slice();
116
117 DescriptorRing {
118 ring,
119 tkn_ref_ring,
120 write_index: 0,
121 capacity: size,
122 poll_index: 0,
123 drv_wc: WrapCount::new(),
124 dev_wc: WrapCount::new(),
125 mem_pool: MemPool::new(size),
126 }
127 }
128
129 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
131 let mut ctrl = self.get_read_ctrler();
132
133 ctrl.poll_next()
134 .map(|(tkn, written_len)| {
135 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
136 })
137 .ok_or(VirtqError::NoNewUsed)
138 }
139
140 fn push_batch(
141 &mut self,
142 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
143 ) -> Result<RingIdx, VirtqError> {
144 let first_ctrl_settings;
148 let first_buffer;
149 let mut ctrl;
150
151 let mut tkn_iterator = tkn_lst.into_iter();
152 if let Some(first_tkn) = tkn_iterator.next() {
153 ctrl = self.push_without_making_available(&first_tkn)?;
154 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
155 first_buffer = first_tkn;
156 } else {
157 return Err(VirtqError::BufferNotSpecified);
159 }
160 for tkn in tkn_iterator {
162 ctrl.make_avail(tkn);
163 }
164
165 self.make_avail_with_state(
169 first_buffer,
170 first_ctrl_settings.0,
171 first_ctrl_settings.1,
172 first_ctrl_settings.2,
173 );
174 Ok(RingIdx {
175 off: self.write_index,
176 wrap: self.drv_wc.0.into(),
177 })
178 }
179
180 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
181 self.push_batch([tkn])
182 }
183
184 fn push_without_making_available(
185 &mut self,
186 tkn: &TransferToken<pvirtq::Desc>,
187 ) -> Result<WriteCtrl<'_>, VirtqError> {
188 if tkn.num_consuming_descr() > self.capacity {
189 return Err(VirtqError::NoDescrAvail);
190 }
191
192 let mut ctrl = self.get_write_ctrler()?;
195
196 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
202 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
203 ctrl.write_desc(desc);
204 } else {
205 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
206 ctrl.write_desc(incomplete_desc);
207 }
208 }
209 Ok(ctrl)
210 }
211
212 fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
213 self.ring.as_mut_ptr()
214 }
215
216 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
219 let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
220 Ok(WriteCtrl {
221 start: self.write_index,
222 position: self.write_index,
223 modulo: u16::try_from(self.ring.len()).unwrap(),
224 first_flags: DescF::empty(),
225 buff_id: desc_id,
226
227 desc_ring: self,
228 })
229 }
230
231 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
234 ReadCtrl {
235 position: self.poll_index,
236 modulo: u16::try_from(self.ring.len()).unwrap(),
237
238 desc_ring: self,
239 }
240 }
241
242 fn make_avail_with_state(
243 &mut self,
244 raw_tkn: TransferToken<pvirtq::Desc>,
245 start: u16,
246 buff_id: MemDescrId,
247 first_flags: DescF,
248 ) {
249 self.tkn_ref_ring[usize::from(buff_id.0)] = Some(raw_tkn);
251 fence(Ordering::SeqCst);
254 self.ring[usize::from(start)].flags = first_flags;
255 }
256
257 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
269 flags.set(virtq::DescF::AVAIL, self.drv_wc.0);
270 flags.set(virtq::DescF::USED, !self.drv_wc.0);
271 flags
272 }
273
274 fn is_marked_used(&self, flags: DescF) -> bool {
282 if self.dev_wc.0 {
283 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
284 } else {
285 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
286 }
287 }
288}
289
290struct ReadCtrl<'a> {
291 position: u16,
293 modulo: u16,
294
295 desc_ring: &'a mut DescriptorRing,
296}
297
298impl ReadCtrl<'_> {
299 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
302 let desc = &self.desc_ring.ring[usize::from(self.position)];
304 if self.desc_ring.is_marked_used(desc.flags) {
305 let buff_id = desc.id.to_ne();
306 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
307 .take()
308 .expect(
309 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
310 );
311
312 let write_len = desc.len.to_ne();
332
333 for _ in 0..tkn.num_consuming_descr() {
334 self.incrmt();
335 }
336 self.desc_ring.mem_pool.ret_id(MemDescrId(buff_id));
337
338 Some((tkn, write_len))
339 } else {
340 None
341 }
342 }
343
344 fn incrmt(&mut self) {
345 if self.desc_ring.poll_index + 1 == self.modulo {
346 self.desc_ring.dev_wc.wrap();
347 }
348
349 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
351 self.desc_ring.capacity += 1;
352
353 self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
354 self.position = self.desc_ring.poll_index;
355 }
356}
357
358struct WriteCtrl<'a> {
362 start: u16,
366 position: u16,
369 modulo: u16,
370 first_flags: DescF,
372 buff_id: MemDescrId,
374
375 desc_ring: &'a mut DescriptorRing,
376}
377
378impl WriteCtrl<'_> {
379 fn incrmt(&mut self) {
387 assert!(self.desc_ring.capacity != 0);
389 self.desc_ring.capacity -= 1;
390 if self.position + 1 == self.modulo {
393 self.desc_ring.drv_wc.wrap();
394 }
395 self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
397
398 self.position = (self.position + 1) % self.modulo;
399 }
400
401 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
403 incomplete_desc.id = self.buff_id.0.into();
404 if self.start == self.position {
405 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
409 } else {
410 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
412 }
413 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
414 self.incrmt();
415 }
416
417 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
418 assert!(self.start != self.position);
420 self.desc_ring
421 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
422 }
423}
424
425struct DrvNotif {
430 f_notif_idx: bool,
432 raw: &'static mut pvirtq::EventSuppress,
434}
435
436struct DevNotif {
441 f_notif_idx: bool,
443 raw: &'static mut pvirtq::EventSuppress,
445}
446
447impl DrvNotif {
448 fn enable_notif(&mut self) {
451 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
452 }
453
454 fn disable_notif(&mut self) {
457 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
458 }
459
460 fn enable_specific(&mut self, idx: RingIdx) {
462 if self.f_notif_idx {
464 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
465 self.raw.desc = EventSuppressDesc::new()
466 .with_desc_event_off(idx.off)
467 .with_desc_event_wrap(idx.wrap);
468 }
469 }
470}
471
472impl DevNotif {
473 pub fn enable_notif_specific(&mut self) {
475 self.f_notif_idx = true;
476 }
477
478 fn is_notif(&self) -> bool {
481 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
482 }
483
484 fn notif_specific(&self) -> Option<RingIdx> {
485 if !self.f_notif_idx {
486 return None;
487 }
488
489 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
490 return None;
491 }
492
493 let off = self.raw.desc.desc_event_off();
494 let wrap = self.raw.desc.desc_event_wrap();
495
496 Some(RingIdx { off, wrap })
497 }
498}
499
500pub struct PackedVq {
503 descr_ring: DescriptorRing,
506 drv_event: DrvNotif,
508 dev_event: DevNotif,
510 notif_ctrl: NotifCtrl,
512 size: VqSize,
515 index: VqIndex,
518 last_next: Cell<RingIdx>,
519}
520
521impl Virtq for PackedVq {
524 fn enable_notifs(&mut self) {
525 self.drv_event.enable_notif();
526 }
527
528 fn disable_notifs(&mut self) {
529 self.drv_event.disable_notif();
530 }
531
532 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
533 self.descr_ring.try_recv()
534 }
535
536 fn dispatch_batch(
537 &mut self,
538 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
539 notif: bool,
540 ) -> Result<(), VirtqError> {
541 assert!(!buffer_tkns.is_empty());
543
544 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
545 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
546 });
547
548 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
549
550 if notif {
551 self.drv_event.enable_specific(next_idx);
552 }
553
554 let range = self.last_next.get()..next_idx;
555 let notif_specific = self
556 .dev_event
557 .notif_specific()
558 .is_some_and(|idx| range.wrapping_contains(&idx));
559
560 if self.dev_event.is_notif() || notif_specific {
561 let notification_data = NotificationData::new()
562 .with_vqn(self.index.0)
563 .with_next_off(next_idx.off)
564 .with_next_wrap(next_idx.wrap);
565 self.notif_ctrl.notify_dev(notification_data);
566 self.last_next.set(next_idx);
567 }
568 Ok(())
569 }
570
571 fn dispatch_batch_await(
572 &mut self,
573 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
574 notif: bool,
575 ) -> Result<(), VirtqError> {
576 assert!(!buffer_tkns.is_empty());
578
579 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
580 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
581 });
582
583 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
584
585 if notif {
586 self.drv_event.enable_specific(next_idx);
587 }
588
589 let range = self.last_next.get()..next_idx;
590 let notif_specific = self
591 .dev_event
592 .notif_specific()
593 .is_some_and(|idx| range.wrapping_contains(&idx));
594
595 if self.dev_event.is_notif() | notif_specific {
596 let notification_data = NotificationData::new()
597 .with_vqn(self.index.0)
598 .with_next_off(next_idx.off)
599 .with_next_wrap(next_idx.wrap);
600 self.notif_ctrl.notify_dev(notification_data);
601 self.last_next.set(next_idx);
602 }
603 Ok(())
604 }
605
606 fn dispatch(
607 &mut self,
608 buffer_tkn: AvailBufferToken,
609 notif: bool,
610 buffer_type: BufferType,
611 ) -> Result<(), VirtqError> {
612 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
613 let next_idx = self.descr_ring.push(transfer_tkn)?;
614
615 if notif {
616 self.drv_event.enable_specific(next_idx);
617 }
618
619 let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
620
621 if self.dev_event.is_notif() || notif_specific {
622 let notification_data = NotificationData::new()
623 .with_vqn(self.index.0)
624 .with_next_off(next_idx.off)
625 .with_next_wrap(next_idx.wrap);
626 self.notif_ctrl.notify_dev(notification_data);
627 self.last_next.set(next_idx);
628 }
629 Ok(())
630 }
631
632 fn index(&self) -> VqIndex {
633 self.index
634 }
635
636 fn size(&self) -> VqSize {
637 self.size
638 }
639
640 fn has_used_buffers(&self) -> bool {
641 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
642 self.descr_ring.is_marked_used(desc.flags)
643 }
644}
645
646impl VirtqPrivate for PackedVq {
647 type Descriptor = pvirtq::Desc;
648
649 fn create_indirect_ctrl(
650 buffer_tkn: &AvailBufferToken,
651 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
652 Ok(Self::descriptor_iter(buffer_tkn)?
653 .collect::<Vec<_>>()
654 .into_boxed_slice())
655 }
656}
657
658impl PackedVq {
659 pub(crate) fn new(
660 com_cfg: &mut ComCfg,
661 notif_cfg: &NotifCfg,
662 size: VqSize,
663 index: VqIndex,
664 features: virtio::F,
665 ) -> Result<Self, VirtqError> {
666 if features.contains(virtio::F::IN_ORDER) {
674 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
675 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
676 }
677
678 let Some(mut vq_handler) = com_cfg.select_vq(index.into()) else {
680 return Err(VirtqError::QueueNotExisting(index.into()));
681 };
682
683 let vq_size = if (size.0 == 0) | (size.0 > 0x8000) {
688 return Err(VirtqError::QueueSizeNotAllowed(size.0));
689 } else {
690 vq_handler.set_vq_size(size.0)
691 };
692
693 let mut descr_ring = DescriptorRing::new(vq_size);
694 let _mem_len =
696 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
697
698 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
699 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
700 let drv_event = unsafe { drv_event.assume_init() };
702 let dev_event = unsafe { dev_event.assume_init() };
703 let drv_event = Box::leak(drv_event);
704 let dev_event = Box::leak(dev_event);
705
706 vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
708 vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
710 vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
711
712 let mut drv_event = DrvNotif {
713 f_notif_idx: false,
714 raw: drv_event,
715 };
716
717 let dev_event = DevNotif {
718 f_notif_idx: false,
719 raw: dev_event,
720 };
721
722 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
723
724 if features.contains(virtio::F::NOTIFICATION_DATA) {
725 notif_ctrl.enable_notif_data();
726 }
727
728 if features.contains(virtio::F::EVENT_IDX) {
729 drv_event.f_notif_idx = true;
730 }
731
732 vq_handler.enable_queue();
733
734 info!("Created PackedVq: idx={}, size={}", index.0, vq_size);
735
736 Ok(PackedVq {
737 descr_ring,
738 drv_event,
739 dev_event,
740 notif_ctrl,
741 size: VqSize::from(vq_size),
742 index,
743 last_next: Cell::default(),
744 })
745 }
746}