hermit/drivers/virtio/virtqueue/
packed.rs1use alloc::boxed::Box;
11use alloc::vec::Vec;
12use core::cell::Cell;
13use core::ops;
14use core::sync::atomic::{Ordering, fence};
15
16use align_address::Align;
17#[cfg(not(feature = "pci"))]
18use virtio::mmio::NotificationData;
19#[cfg(feature = "pci")]
20use virtio::pci::NotificationData;
21use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
22use virtio::virtq::DescF;
23use virtio::{RingEventFlags, pvirtq, virtq};
24
25#[cfg(not(feature = "pci"))]
26use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
27#[cfg(feature = "pci")]
28use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
29use super::error::VirtqError;
30use super::index_alloc::IndexAlloc;
31use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
32use crate::arch::mm::paging::{BasePageSize, PageSize};
33use crate::mm::device_alloc::DeviceAlloc;
34
35trait RingIndexRange {
36 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
37}
38
39impl RingIndexRange for ops::Range<EventSuppressDesc> {
40 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
41 let start_off = self.start.desc_event_off();
42 let start_wrap = self.start.desc_event_wrap();
43 let end_off = self.end.desc_event_off();
44 let end_wrap = self.end.desc_event_wrap();
45 let item_off = item.desc_event_off();
46 let item_wrap = item.desc_event_wrap();
47
48 if start_wrap == end_wrap {
49 item_wrap == start_wrap && start_off <= item_off && item_off < end_off
50 } else if item_wrap == start_wrap {
51 start_off <= item_off
52 } else {
53 debug_assert!(item_wrap == end_wrap);
54 item_off < end_off
55 }
56 }
57}
58
59struct DescriptorRing {
61 ring: Box<[pvirtq::Desc], DeviceAlloc>,
62 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
63
64 write_index: EventSuppressDesc,
69 capacity: u16,
71 poll_index: EventSuppressDesc,
75 indexes: IndexAlloc,
77}
78
79impl DescriptorRing {
80 fn new(size: u16) -> Self {
81 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
82
83 let tkn_ref_ring = core::iter::repeat_with(|| None)
85 .take(size.into())
86 .collect::<Vec<_>>()
87 .into_boxed_slice();
88
89 let write_index = EventSuppressDesc::new()
90 .with_desc_event_off(0)
91 .with_desc_event_wrap(1);
92
93 let poll_index = write_index;
94
95 DescriptorRing {
96 ring,
97 tkn_ref_ring,
98 write_index,
99 capacity: size,
100 poll_index,
101 indexes: IndexAlloc::new(size.into()),
102 }
103 }
104
105 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
107 let mut ctrl = self.get_read_ctrler();
108
109 ctrl.poll_next()
110 .map(|(tkn, written_len)| {
111 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
112 })
113 .ok_or(VirtqError::NoNewUsed)
114 }
115
116 fn push_batch(
117 &mut self,
118 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
119 ) -> Result<EventSuppressDesc, VirtqError> {
120 let first_ctrl_settings;
124 let first_buffer;
125 let mut ctrl;
126
127 let mut tkn_iterator = tkn_lst.into_iter();
128 if let Some(first_tkn) = tkn_iterator.next() {
129 ctrl = self.push_without_making_available(&first_tkn)?;
130 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
131 first_buffer = first_tkn;
132 } else {
133 return Err(VirtqError::BufferNotSpecified);
135 }
136 for tkn in tkn_iterator {
138 ctrl.make_avail(tkn);
139 }
140
141 self.make_avail_with_state(
145 first_buffer,
146 first_ctrl_settings.0,
147 first_ctrl_settings.1,
148 first_ctrl_settings.2,
149 );
150
151 Ok(self.write_index)
152 }
153
154 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
155 self.push_batch([tkn])
156 }
157
158 fn push_without_making_available(
159 &mut self,
160 tkn: &TransferToken<pvirtq::Desc>,
161 ) -> Result<WriteCtrl<'_>, VirtqError> {
162 if tkn.num_consuming_descr() > self.capacity {
163 return Err(VirtqError::NoDescrAvail);
164 }
165
166 let mut ctrl = self.get_write_ctrler()?;
169
170 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
176 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
177 ctrl.write_desc(desc);
178 } else {
179 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
180 ctrl.write_desc(incomplete_desc);
181 }
182 }
183 Ok(ctrl)
184 }
185
186 fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
187 self.ring.as_mut_ptr()
188 }
189
190 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
193 let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
194 Ok(WriteCtrl {
195 start: self.write_index.desc_event_off(),
196 position: self.write_index.desc_event_off(),
197 modulo: u16::try_from(self.ring.len()).unwrap(),
198 first_flags: DescF::empty(),
199 buff_id: u16::try_from(desc_id).unwrap(),
200
201 desc_ring: self,
202 })
203 }
204
205 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
208 ReadCtrl {
209 position: self.poll_index.desc_event_off(),
210 modulo: u16::try_from(self.ring.len()).unwrap(),
211
212 desc_ring: self,
213 }
214 }
215
216 fn make_avail_with_state(
217 &mut self,
218 raw_tkn: TransferToken<pvirtq::Desc>,
219 start: u16,
220 buff_id: u16,
221 first_flags: DescF,
222 ) {
223 self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
225 fence(Ordering::SeqCst);
228 self.ring[usize::from(start)].flags = first_flags;
229 }
230
231 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
243 let avail = self.write_index.desc_event_wrap() != 0;
244 flags.set(virtq::DescF::AVAIL, avail);
245 flags.set(virtq::DescF::USED, !avail);
246 flags
247 }
248
249 fn is_marked_used(&self, flags: DescF) -> bool {
257 if self.poll_index.desc_event_wrap() != 0 {
258 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
259 } else {
260 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
261 }
262 }
263}
264
265struct ReadCtrl<'a> {
266 position: u16,
268 modulo: u16,
269
270 desc_ring: &'a mut DescriptorRing,
271}
272
273impl ReadCtrl<'_> {
274 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
277 let desc = &self.desc_ring.ring[usize::from(self.position)];
279 if self.desc_ring.is_marked_used(desc.flags) {
280 let buff_id = desc.id.to_ne();
281 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
282 .take()
283 .expect(
284 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
285 );
286
287 let write_len = desc.len.to_ne();
307
308 for _ in 0..tkn.num_consuming_descr() {
309 self.incrmt();
310 }
311 unsafe {
312 self.desc_ring.indexes.deallocate(buff_id.into());
313 }
314
315 Some((tkn, write_len))
316 } else {
317 None
318 }
319 }
320
321 fn incrmt(&mut self) {
322 let mut desc = self.desc_ring.poll_index;
323
324 if desc.desc_event_off() + 1 == self.modulo {
325 let wrap = desc.desc_event_wrap() ^ 1;
326 desc.set_desc_event_wrap(wrap);
327 }
328
329 let off = (desc.desc_event_off() + 1) % self.modulo;
330 desc.set_desc_event_off(off);
331
332 self.desc_ring.poll_index = desc;
333
334 self.position = desc.desc_event_off();
335
336 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
338 self.desc_ring.capacity += 1;
339 }
340}
341
342struct WriteCtrl<'a> {
346 start: u16,
350 position: u16,
353 modulo: u16,
354 first_flags: DescF,
356 buff_id: u16,
358
359 desc_ring: &'a mut DescriptorRing,
360}
361
362impl WriteCtrl<'_> {
363 fn incrmt(&mut self) {
371 assert!(self.desc_ring.capacity != 0);
373 self.desc_ring.capacity -= 1;
374
375 let mut desc = self.desc_ring.write_index;
376
377 if self.position + 1 == self.modulo {
380 let wrap = desc.desc_event_wrap() ^ 1;
381 desc.set_desc_event_wrap(wrap);
382 }
383
384 let off = (desc.desc_event_off() + 1) % self.modulo;
386 desc.set_desc_event_off(off);
387
388 self.desc_ring.write_index = desc;
389
390 self.position = (self.position + 1) % self.modulo;
391 }
392
393 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
395 incomplete_desc.id = self.buff_id.into();
396 if self.start == self.position {
397 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
401 } else {
402 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
404 }
405 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
406 self.incrmt();
407 }
408
409 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
410 assert!(self.start != self.position);
412 self.desc_ring
413 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
414 }
415}
416
417struct DrvNotif {
422 f_notif_idx: bool,
424 raw: &'static mut pvirtq::EventSuppress,
426}
427
428struct DevNotif {
433 f_notif_idx: bool,
435 raw: &'static mut pvirtq::EventSuppress,
437}
438
439impl DrvNotif {
440 fn enable_notif(&mut self) {
443 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
444 }
445
446 fn disable_notif(&mut self) {
449 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
450 }
451
452 fn enable_specific(&mut self, desc: EventSuppressDesc) {
454 if self.f_notif_idx {
456 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
457 self.raw.desc = desc;
458 }
459 }
460}
461
462impl DevNotif {
463 #[expect(dead_code)]
465 pub fn enable_notif_specific(&mut self) {
466 self.f_notif_idx = true;
467 }
468
469 fn is_notif(&self) -> bool {
472 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
473 }
474
475 fn notif_specific(&self) -> Option<EventSuppressDesc> {
476 if !self.f_notif_idx {
477 return None;
478 }
479
480 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
481 return None;
482 }
483
484 Some(self.raw.desc)
485 }
486}
487
488pub struct PackedVq {
491 descr_ring: DescriptorRing,
494 drv_event: DrvNotif,
496 dev_event: DevNotif,
498 notif_ctrl: NotifCtrl,
500 size: u16,
503 index: u16,
506 last_next: Cell<EventSuppressDesc>,
507}
508
509impl Virtq for PackedVq {
512 fn enable_notifs(&mut self) {
513 self.drv_event.enable_notif();
514 }
515
516 fn disable_notifs(&mut self) {
517 self.drv_event.disable_notif();
518 }
519
520 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
521 self.descr_ring.try_recv()
522 }
523
524 fn dispatch_batch(
525 &mut self,
526 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
527 notif: bool,
528 ) -> Result<(), VirtqError> {
529 assert!(!buffer_tkns.is_empty());
531
532 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
533 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
534 });
535
536 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
537
538 if notif {
539 self.drv_event.enable_specific(next_idx);
540 }
541
542 let range = self.last_next.get()..next_idx;
543 let notif_specific = self
544 .dev_event
545 .notif_specific()
546 .is_some_and(|idx| range.wrapping_contains(&idx));
547
548 if self.dev_event.is_notif() || notif_specific {
549 let notification_data = NotificationData::new()
550 .with_vqn(self.index)
551 .with_next_off(next_idx.desc_event_off())
552 .with_next_wrap(next_idx.desc_event_wrap());
553 self.notif_ctrl.notify_dev(notification_data);
554 self.last_next.set(next_idx);
555 }
556 Ok(())
557 }
558
559 fn dispatch_batch_await(
560 &mut self,
561 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
562 notif: bool,
563 ) -> Result<(), VirtqError> {
564 assert!(!buffer_tkns.is_empty());
566
567 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
568 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
569 });
570
571 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
572
573 if notif {
574 self.drv_event.enable_specific(next_idx);
575 }
576
577 let range = self.last_next.get()..next_idx;
578 let notif_specific = self
579 .dev_event
580 .notif_specific()
581 .is_some_and(|idx| range.wrapping_contains(&idx));
582
583 if self.dev_event.is_notif() | notif_specific {
584 let notification_data = NotificationData::new()
585 .with_vqn(self.index)
586 .with_next_off(next_idx.desc_event_off())
587 .with_next_wrap(next_idx.desc_event_wrap());
588 self.notif_ctrl.notify_dev(notification_data);
589 self.last_next.set(next_idx);
590 }
591 Ok(())
592 }
593
594 fn dispatch(
595 &mut self,
596 buffer_tkn: AvailBufferToken,
597 notif: bool,
598 buffer_type: BufferType,
599 ) -> Result<(), VirtqError> {
600 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
601 let next_idx = self.descr_ring.push(transfer_tkn)?;
602
603 if notif {
604 self.drv_event.enable_specific(next_idx);
605 }
606
607 let notif_specific = self
609 .dev_event
610 .notif_specific()
611 .map(EventSuppressDesc::into_bits)
612 == Some(self.last_next.get().into_bits());
613
614 if self.dev_event.is_notif() || notif_specific {
615 let notification_data = NotificationData::new()
616 .with_vqn(self.index)
617 .with_next_off(next_idx.desc_event_off())
618 .with_next_wrap(next_idx.desc_event_wrap());
619 self.notif_ctrl.notify_dev(notification_data);
620 self.last_next.set(next_idx);
621 }
622 Ok(())
623 }
624
625 fn index(&self) -> u16 {
626 self.index
627 }
628
629 fn size(&self) -> u16 {
630 self.size
631 }
632
633 fn has_used_buffers(&self) -> bool {
634 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
635 self.descr_ring.is_marked_used(desc.flags)
636 }
637}
638
639impl VirtqPrivate for PackedVq {
640 type Descriptor = pvirtq::Desc;
641
642 fn create_indirect_ctrl(
643 buffer_tkn: &AvailBufferToken,
644 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
645 Ok(Self::descriptor_iter(buffer_tkn)?
646 .collect::<Vec<_>>()
647 .into_boxed_slice())
648 }
649}
650
651impl PackedVq {
652 #[allow(dead_code)]
653 pub(crate) fn new(
654 com_cfg: &mut ComCfg,
655 notif_cfg: &NotifCfg,
656 max_size: u16,
657 index: u16,
658 features: virtio::F,
659 ) -> Result<Self, VirtqError> {
660 if features.contains(virtio::F::IN_ORDER) {
668 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
669 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
670 }
671
672 let Some(mut vq_handler) = com_cfg.select_vq(index) else {
674 return Err(VirtqError::QueueNotExisting(index));
675 };
676
677 let vq_size = if (max_size == 0) | (max_size > 0x8000) {
682 return Err(VirtqError::QueueSizeNotAllowed(max_size));
683 } else {
684 vq_handler.set_vq_size(max_size)
685 };
686
687 let mut descr_ring = DescriptorRing::new(vq_size);
688 let _mem_len =
690 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
691
692 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
693 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
694 let drv_event = unsafe { drv_event.assume_init() };
696 let dev_event = unsafe { dev_event.assume_init() };
697 let drv_event = Box::leak(drv_event);
698 let dev_event = Box::leak(dev_event);
699
700 vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
702 vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
704 vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
705
706 let mut drv_event = DrvNotif {
707 f_notif_idx: false,
708 raw: drv_event,
709 };
710
711 let dev_event = DevNotif {
712 f_notif_idx: false,
713 raw: dev_event,
714 };
715
716 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
717
718 if features.contains(virtio::F::NOTIFICATION_DATA) {
719 notif_ctrl.enable_notif_data();
720 }
721
722 if features.contains(virtio::F::EVENT_IDX) {
723 drv_event.f_notif_idx = true;
724 }
725
726 vq_handler.enable_queue();
727
728 info!("Created PackedVq: idx={index}, size={vq_size}");
729
730 Ok(PackedVq {
731 descr_ring,
732 drv_event,
733 dev_event,
734 notif_ctrl,
735 size: vq_size,
736 index,
737 last_next: Cell::default(),
738 })
739 }
740}