hermit/drivers/virtio/virtqueue/
packed.rs1use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::Cell;
13use core::sync::atomic::{Ordering, fence};
14use core::{mem, ops};
15
16use align_address::Align;
17#[cfg(not(feature = "pci"))]
18use virtio::mmio::NotificationData;
19#[cfg(feature = "pci")]
20use virtio::pci::NotificationData;
21use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
22use virtio::virtq::DescF;
23use virtio::{RingEventFlags, pvirtq, virtq};
24
25#[cfg(not(feature = "pci"))]
26use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
27#[cfg(feature = "pci")]
28use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
29use super::error::VirtqError;
30use super::index_alloc::IndexAlloc;
31use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
32use crate::arch::mm::paging::{BasePageSize, PageSize};
33use crate::mm::device_alloc::DeviceAlloc;
34
35trait RingIndexRange {
36 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
37}
38
39impl RingIndexRange for ops::Range<EventSuppressDesc> {
40 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
41 let start_off = self.start.desc_event_off();
42 let start_wrap = self.start.desc_event_wrap();
43 let end_off = self.end.desc_event_off();
44 let end_wrap = self.end.desc_event_wrap();
45 let item_off = item.desc_event_off();
46 let item_wrap = item.desc_event_wrap();
47
48 if start_wrap == end_wrap {
49 item_wrap == start_wrap && start_off <= item_off && item_off < end_off
50 } else if item_wrap == start_wrap {
51 start_off <= item_off
52 } else {
53 debug_assert!(item_wrap == end_wrap);
54 item_off < end_off
55 }
56 }
57}
58
59struct DescriptorRing {
61 ring: Box<[pvirtq::Desc], DeviceAlloc>,
62 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
63
64 write_index: EventSuppressDesc,
69 capacity: u16,
71 poll_index: EventSuppressDesc,
75 indexes: IndexAlloc,
77}
78
79impl DescriptorRing {
80 fn new(size: u16) -> Self {
81 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
82
83 let tkn_ref_ring = core::iter::repeat_with(|| None)
85 .take(size.into())
86 .collect::<Vec<_>>()
87 .into_boxed_slice();
88
89 let write_index = EventSuppressDesc::new()
90 .with_desc_event_off(0)
91 .with_desc_event_wrap(1);
92
93 let poll_index = write_index;
94
95 DescriptorRing {
96 ring,
97 tkn_ref_ring,
98 write_index,
99 capacity: size,
100 poll_index,
101 indexes: IndexAlloc::new(size.into()),
102 }
103 }
104
105 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
107 let mut ctrl = self.get_read_ctrler();
108
109 ctrl.poll_next()
110 .map(|(tkn, written_len)| {
111 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
112 })
113 .ok_or(VirtqError::NoNewUsed)
114 }
115
116 fn push_batch(
117 &mut self,
118 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
119 ) -> Result<EventSuppressDesc, VirtqError> {
120 let mut tkn_iterator = tkn_lst.into_iter();
124 let Some(first_tkn) = tkn_iterator.next() else {
125 return Err(VirtqError::BufferNotSpecified);
127 };
128
129 let mut ctrl = self.push_without_making_available(&first_tkn)?;
130 let first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
131 let first_buffer = first_tkn;
132
133 for tkn in tkn_iterator {
135 ctrl.make_avail(tkn);
136 }
137
138 self.make_avail_with_state(
142 first_buffer,
143 first_ctrl_settings.0,
144 first_ctrl_settings.1,
145 first_ctrl_settings.2,
146 );
147
148 Ok(self.write_index)
149 }
150
151 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
152 self.push_batch([tkn])
153 }
154
155 fn push_without_making_available(
156 &mut self,
157 tkn: &TransferToken<pvirtq::Desc>,
158 ) -> Result<WriteCtrl<'_>, VirtqError> {
159 if tkn.num_consuming_descr() > self.capacity {
160 return Err(VirtqError::NoDescrAvail);
161 }
162
163 let mut ctrl = self.get_write_ctrler()?;
166
167 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
173 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
174 ctrl.write_desc(desc);
175 } else {
176 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
177 ctrl.write_desc(incomplete_desc);
178 }
179 }
180 Ok(ctrl)
181 }
182
183 fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
184 self.ring.as_mut_ptr()
185 }
186
187 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
190 let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
191 Ok(WriteCtrl {
192 start: self.write_index.desc_event_off(),
193 position: self.write_index.desc_event_off(),
194 modulo: u16::try_from(self.ring.len()).unwrap(),
195 first_flags: DescF::empty(),
196 buff_id: u16::try_from(desc_id).unwrap(),
197
198 desc_ring: self,
199 })
200 }
201
202 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
205 ReadCtrl {
206 position: self.poll_index.desc_event_off(),
207 modulo: u16::try_from(self.ring.len()).unwrap(),
208
209 desc_ring: self,
210 }
211 }
212
213 fn make_avail_with_state(
214 &mut self,
215 raw_tkn: TransferToken<pvirtq::Desc>,
216 start: u16,
217 buff_id: u16,
218 first_flags: DescF,
219 ) {
220 self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
222 fence(Ordering::SeqCst);
225 self.ring[usize::from(start)].flags = first_flags;
226 }
227
228 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
240 let avail = self.write_index.desc_event_wrap() != 0;
241 flags.set(virtq::DescF::AVAIL, avail);
242 flags.set(virtq::DescF::USED, !avail);
243 flags
244 }
245
246 fn is_marked_used(&self, flags: DescF) -> bool {
254 if self.poll_index.desc_event_wrap() != 0 {
255 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
256 } else {
257 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
258 }
259 }
260}
261
262struct ReadCtrl<'a> {
263 position: u16,
265 modulo: u16,
266
267 desc_ring: &'a mut DescriptorRing,
268}
269
270impl ReadCtrl<'_> {
271 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
274 let desc = &self.desc_ring.ring[usize::from(self.position)];
276 if !self.desc_ring.is_marked_used(desc.flags) {
277 return None;
278 }
279
280 let buff_id = desc.id.to_ne();
281 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
282 .take()
283 .expect(
284 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
285 );
286
287 let write_len = desc.len.to_ne();
307
308 for _ in 0..tkn.num_consuming_descr() {
309 self.incrmt();
310 }
311 unsafe {
312 self.desc_ring.indexes.deallocate(buff_id.into());
313 }
314
315 Some((tkn, write_len))
316 }
317
318 fn incrmt(&mut self) {
319 let mut desc = self.desc_ring.poll_index;
320
321 if desc.desc_event_off() + 1 == self.modulo {
322 let wrap = desc.desc_event_wrap() ^ 1;
323 desc.set_desc_event_wrap(wrap);
324 }
325
326 let off = (desc.desc_event_off() + 1) % self.modulo;
327 desc.set_desc_event_off(off);
328
329 self.desc_ring.poll_index = desc;
330
331 self.position = desc.desc_event_off();
332
333 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
335 self.desc_ring.capacity += 1;
336 }
337}
338
339struct WriteCtrl<'a> {
343 start: u16,
347 position: u16,
350 modulo: u16,
351 first_flags: DescF,
353 buff_id: u16,
355
356 desc_ring: &'a mut DescriptorRing,
357}
358
359impl WriteCtrl<'_> {
360 fn incrmt(&mut self) {
368 assert!(self.desc_ring.capacity != 0);
370 self.desc_ring.capacity -= 1;
371
372 let mut desc = self.desc_ring.write_index;
373
374 if self.position + 1 == self.modulo {
377 let wrap = desc.desc_event_wrap() ^ 1;
378 desc.set_desc_event_wrap(wrap);
379 }
380
381 let off = (desc.desc_event_off() + 1) % self.modulo;
383 desc.set_desc_event_off(off);
384
385 self.desc_ring.write_index = desc;
386
387 self.position = (self.position + 1) % self.modulo;
388 }
389
390 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
392 incomplete_desc.id = self.buff_id.into();
393 if self.start == self.position {
394 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
398 } else {
399 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
401 }
402 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
403 self.incrmt();
404 }
405
406 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
407 assert!(self.start != self.position);
409 self.desc_ring
410 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
411 }
412}
413
414struct DrvNotif {
419 f_notif_idx: bool,
421 raw: &'static mut pvirtq::EventSuppress,
423}
424
425struct DevNotif {
430 f_notif_idx: bool,
432 raw: &'static mut pvirtq::EventSuppress,
434}
435
436impl DrvNotif {
437 fn enable_notif(&mut self) {
440 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
441 }
442
443 fn disable_notif(&mut self) {
446 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
447 }
448
449 fn enable_specific(&mut self, desc: EventSuppressDesc) {
451 if self.f_notif_idx {
453 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
454 self.raw.desc = desc;
455 }
456 }
457}
458
459impl DevNotif {
460 #[expect(dead_code)]
462 pub fn enable_notif_specific(&mut self) {
463 self.f_notif_idx = true;
464 }
465
466 fn is_notif(&self) -> bool {
469 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
470 }
471
472 fn notif_specific(&self) -> Option<EventSuppressDesc> {
473 if !self.f_notif_idx {
474 return None;
475 }
476
477 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
478 return None;
479 }
480
481 Some(self.raw.desc)
482 }
483}
484
485pub struct PackedVq {
488 descr_ring: DescriptorRing,
491 drv_event: DrvNotif,
493 dev_event: DevNotif,
495 notif_ctrl: NotifCtrl,
497 size: u16,
500 index: u16,
503 last_next: Cell<EventSuppressDesc>,
504}
505
506impl Virtq for PackedVq {
509 fn enable_notifs(&mut self) {
510 self.drv_event.enable_notif();
511 }
512
513 fn disable_notifs(&mut self) {
514 self.drv_event.disable_notif();
515 }
516
517 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
518 self.descr_ring.try_recv()
519 }
520
521 fn dispatch_batch(
522 &mut self,
523 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
524 notif: bool,
525 ) -> Result<(), VirtqError> {
526 assert!(!buffer_tkns.is_empty());
528
529 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
530 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
531 });
532
533 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
534
535 if notif {
536 self.drv_event.enable_specific(next_idx);
537 }
538
539 let range = self.last_next.get()..next_idx;
540 let notif_specific = self
541 .dev_event
542 .notif_specific()
543 .is_some_and(|idx| range.wrapping_contains(&idx));
544
545 if self.dev_event.is_notif() || notif_specific {
546 let notification_data = NotificationData::new()
547 .with_vqn(self.index)
548 .with_next_off(next_idx.desc_event_off())
549 .with_next_wrap(next_idx.desc_event_wrap());
550 self.notif_ctrl.notify_dev(notification_data);
551 self.last_next.set(next_idx);
552 }
553 Ok(())
554 }
555
556 fn dispatch_batch_await(
557 &mut self,
558 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
559 notif: bool,
560 ) -> Result<(), VirtqError> {
561 assert!(!buffer_tkns.is_empty());
563
564 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
565 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
566 });
567
568 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
569
570 if notif {
571 self.drv_event.enable_specific(next_idx);
572 }
573
574 let range = self.last_next.get()..next_idx;
575 let notif_specific = self
576 .dev_event
577 .notif_specific()
578 .is_some_and(|idx| range.wrapping_contains(&idx));
579
580 if self.dev_event.is_notif() | notif_specific {
581 let notification_data = NotificationData::new()
582 .with_vqn(self.index)
583 .with_next_off(next_idx.desc_event_off())
584 .with_next_wrap(next_idx.desc_event_wrap());
585 self.notif_ctrl.notify_dev(notification_data);
586 self.last_next.set(next_idx);
587 }
588 Ok(())
589 }
590
591 fn dispatch(
592 &mut self,
593 buffer_tkn: AvailBufferToken,
594 notif: bool,
595 buffer_type: BufferType,
596 ) -> Result<(), VirtqError> {
597 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
598 let next_idx = self.descr_ring.push(transfer_tkn)?;
599
600 if notif {
601 self.drv_event.enable_specific(next_idx);
602 }
603
604 let notif_specific = self
606 .dev_event
607 .notif_specific()
608 .map(EventSuppressDesc::into_bits)
609 == Some(self.last_next.get().into_bits());
610
611 if self.dev_event.is_notif() || notif_specific {
612 let notification_data = NotificationData::new()
613 .with_vqn(self.index)
614 .with_next_off(next_idx.desc_event_off())
615 .with_next_wrap(next_idx.desc_event_wrap());
616 self.notif_ctrl.notify_dev(notification_data);
617 self.last_next.set(next_idx);
618 }
619 Ok(())
620 }
621
622 fn index(&self) -> u16 {
623 self.index
624 }
625
626 fn size(&self) -> u16 {
627 self.size
628 }
629
630 fn has_used_buffers(&self) -> bool {
631 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
632 self.descr_ring.is_marked_used(desc.flags)
633 }
634}
635
636impl VirtqPrivate for PackedVq {
637 type Descriptor = pvirtq::Desc;
638
639 fn create_indirect_ctrl(
640 buffer_tkn: &AvailBufferToken,
641 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
642 Ok(Self::descriptor_iter(buffer_tkn)?
643 .collect::<Vec<_>>()
644 .into_boxed_slice())
645 }
646}
647
648impl PackedVq {
649 #[allow(dead_code)]
650 pub(crate) fn new(
651 com_cfg: &mut ComCfg,
652 notif_cfg: &NotifCfg,
653 max_size: u16,
654 index: u16,
655 features: virtio::F,
656 ) -> Result<Self, VirtqError> {
657 if features.contains(virtio::F::IN_ORDER) {
665 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
666 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
667 }
668
669 let mut vq_handler = com_cfg
671 .select_vq(index)
672 .ok_or(VirtqError::QueueNotExisting(index))?;
673
674 let vq_size = if (max_size == 0) | (max_size > 0x8000) {
679 return Err(VirtqError::QueueSizeNotAllowed(max_size));
680 } else {
681 vq_handler.set_vq_size(max_size)
682 };
683
684 let mut descr_ring = DescriptorRing::new(vq_size);
685 let _mem_len =
687 mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
688
689 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
690 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
691 let drv_event = unsafe { drv_event.assume_init() };
693 let dev_event = unsafe { dev_event.assume_init() };
694 let drv_event = Box::leak(drv_event);
695 let dev_event = Box::leak(dev_event);
696
697 vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
699 vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
701 vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
702
703 let mut drv_event = DrvNotif {
704 f_notif_idx: false,
705 raw: drv_event,
706 };
707
708 let dev_event = DevNotif {
709 f_notif_idx: false,
710 raw: dev_event,
711 };
712
713 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
714
715 if features.contains(virtio::F::NOTIFICATION_DATA) {
716 notif_ctrl.enable_notif_data();
717 }
718
719 if features.contains(virtio::F::EVENT_IDX) {
720 drv_event.f_notif_idx = true;
721 }
722
723 vq_handler.enable_queue();
724
725 info!("Created PackedVq: idx={index}, size={vq_size}");
726
727 Ok(PackedVq {
728 descr_ring,
729 drv_event,
730 dev_event,
731 notif_ctrl,
732 size: vq_size,
733 index,
734 last_next: Cell::default(),
735 })
736 }
737}