hermit/drivers/virtio/virtqueue/
packed.rs1#![allow(dead_code)]
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::ops;
16use core::sync::atomic::{Ordering, fence};
17
18use align_address::Align;
19#[cfg(not(feature = "pci"))]
20use virtio::mmio::NotificationData;
21#[cfg(feature = "pci")]
22use virtio::pci::NotificationData;
23use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
24use virtio::virtq::DescF;
25use virtio::{RingEventFlags, pvirtq, virtq};
26
27#[cfg(not(feature = "pci"))]
28use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
29#[cfg(feature = "pci")]
30use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
31use super::error::VirtqError;
32use super::index_alloc::IndexAlloc;
33use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
34use crate::arch::mm::paging::{BasePageSize, PageSize};
35use crate::mm::device_alloc::DeviceAlloc;
36
37trait RingIndexRange {
38 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool;
39}
40
41impl RingIndexRange for ops::Range<EventSuppressDesc> {
42 fn wrapping_contains(&self, item: &EventSuppressDesc) -> bool {
43 let start_off = self.start.desc_event_off();
44 let start_wrap = self.start.desc_event_wrap();
45 let end_off = self.end.desc_event_off();
46 let end_wrap = self.end.desc_event_wrap();
47 let item_off = item.desc_event_off();
48 let item_wrap = item.desc_event_wrap();
49
50 if start_wrap == end_wrap {
51 item_wrap == start_wrap && start_off <= item_off && item_off < end_off
52 } else if item_wrap == start_wrap {
53 start_off <= item_off
54 } else {
55 debug_assert!(item_wrap == end_wrap);
56 item_off < end_off
57 }
58 }
59}
60
61struct DescriptorRing {
63 ring: Box<[pvirtq::Desc], DeviceAlloc>,
64 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
65
66 write_index: EventSuppressDesc,
71 capacity: u16,
73 poll_index: EventSuppressDesc,
77 indexes: IndexAlloc,
79}
80
81impl DescriptorRing {
82 fn new(size: u16) -> Self {
83 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
84
85 let tkn_ref_ring = core::iter::repeat_with(|| None)
87 .take(size.into())
88 .collect::<Vec<_>>()
89 .into_boxed_slice();
90
91 let write_index = EventSuppressDesc::new()
92 .with_desc_event_off(0)
93 .with_desc_event_wrap(1);
94
95 let poll_index = write_index;
96
97 DescriptorRing {
98 ring,
99 tkn_ref_ring,
100 write_index,
101 capacity: size,
102 poll_index,
103 indexes: IndexAlloc::new(size.into()),
104 }
105 }
106
107 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
109 let mut ctrl = self.get_read_ctrler();
110
111 ctrl.poll_next()
112 .map(|(tkn, written_len)| {
113 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
114 })
115 .ok_or(VirtqError::NoNewUsed)
116 }
117
118 fn push_batch(
119 &mut self,
120 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
121 ) -> Result<EventSuppressDesc, VirtqError> {
122 let first_ctrl_settings;
126 let first_buffer;
127 let mut ctrl;
128
129 let mut tkn_iterator = tkn_lst.into_iter();
130 if let Some(first_tkn) = tkn_iterator.next() {
131 ctrl = self.push_without_making_available(&first_tkn)?;
132 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
133 first_buffer = first_tkn;
134 } else {
135 return Err(VirtqError::BufferNotSpecified);
137 }
138 for tkn in tkn_iterator {
140 ctrl.make_avail(tkn);
141 }
142
143 self.make_avail_with_state(
147 first_buffer,
148 first_ctrl_settings.0,
149 first_ctrl_settings.1,
150 first_ctrl_settings.2,
151 );
152
153 Ok(self.write_index)
154 }
155
156 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<EventSuppressDesc, VirtqError> {
157 self.push_batch([tkn])
158 }
159
160 fn push_without_making_available(
161 &mut self,
162 tkn: &TransferToken<pvirtq::Desc>,
163 ) -> Result<WriteCtrl<'_>, VirtqError> {
164 if tkn.num_consuming_descr() > self.capacity {
165 return Err(VirtqError::NoDescrAvail);
166 }
167
168 let mut ctrl = self.get_write_ctrler()?;
171
172 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
178 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
179 ctrl.write_desc(desc);
180 } else {
181 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
182 ctrl.write_desc(incomplete_desc);
183 }
184 }
185 Ok(ctrl)
186 }
187
188 fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
189 self.ring.as_mut_ptr()
190 }
191
192 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
195 let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
196 Ok(WriteCtrl {
197 start: self.write_index.desc_event_off(),
198 position: self.write_index.desc_event_off(),
199 modulo: u16::try_from(self.ring.len()).unwrap(),
200 first_flags: DescF::empty(),
201 buff_id: u16::try_from(desc_id).unwrap(),
202
203 desc_ring: self,
204 })
205 }
206
207 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
210 ReadCtrl {
211 position: self.poll_index.desc_event_off(),
212 modulo: u16::try_from(self.ring.len()).unwrap(),
213
214 desc_ring: self,
215 }
216 }
217
218 fn make_avail_with_state(
219 &mut self,
220 raw_tkn: TransferToken<pvirtq::Desc>,
221 start: u16,
222 buff_id: u16,
223 first_flags: DescF,
224 ) {
225 self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
227 fence(Ordering::SeqCst);
230 self.ring[usize::from(start)].flags = first_flags;
231 }
232
233 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
245 let avail = self.write_index.desc_event_wrap() != 0;
246 flags.set(virtq::DescF::AVAIL, avail);
247 flags.set(virtq::DescF::USED, !avail);
248 flags
249 }
250
251 fn is_marked_used(&self, flags: DescF) -> bool {
259 if self.poll_index.desc_event_wrap() != 0 {
260 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
261 } else {
262 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
263 }
264 }
265}
266
267struct ReadCtrl<'a> {
268 position: u16,
270 modulo: u16,
271
272 desc_ring: &'a mut DescriptorRing,
273}
274
275impl ReadCtrl<'_> {
276 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
279 let desc = &self.desc_ring.ring[usize::from(self.position)];
281 if self.desc_ring.is_marked_used(desc.flags) {
282 let buff_id = desc.id.to_ne();
283 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
284 .take()
285 .expect(
286 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
287 );
288
289 let write_len = desc.len.to_ne();
309
310 for _ in 0..tkn.num_consuming_descr() {
311 self.incrmt();
312 }
313 unsafe {
314 self.desc_ring.indexes.deallocate(buff_id.into());
315 }
316
317 Some((tkn, write_len))
318 } else {
319 None
320 }
321 }
322
323 fn incrmt(&mut self) {
324 let mut desc = self.desc_ring.poll_index;
325
326 if desc.desc_event_off() + 1 == self.modulo {
327 let wrap = desc.desc_event_wrap() ^ 1;
328 desc.set_desc_event_wrap(wrap);
329 }
330
331 let off = (desc.desc_event_off() + 1) % self.modulo;
332 desc.set_desc_event_off(off);
333
334 self.desc_ring.poll_index = desc;
335
336 self.position = desc.desc_event_off();
337
338 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
340 self.desc_ring.capacity += 1;
341 }
342}
343
344struct WriteCtrl<'a> {
348 start: u16,
352 position: u16,
355 modulo: u16,
356 first_flags: DescF,
358 buff_id: u16,
360
361 desc_ring: &'a mut DescriptorRing,
362}
363
364impl WriteCtrl<'_> {
365 fn incrmt(&mut self) {
373 assert!(self.desc_ring.capacity != 0);
375 self.desc_ring.capacity -= 1;
376
377 let mut desc = self.desc_ring.write_index;
378
379 if self.position + 1 == self.modulo {
382 let wrap = desc.desc_event_wrap() ^ 1;
383 desc.set_desc_event_wrap(wrap);
384 }
385
386 let off = (desc.desc_event_off() + 1) % self.modulo;
388 desc.set_desc_event_off(off);
389
390 self.desc_ring.write_index = desc;
391
392 self.position = (self.position + 1) % self.modulo;
393 }
394
395 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
397 incomplete_desc.id = self.buff_id.into();
398 if self.start == self.position {
399 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
403 } else {
404 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
406 }
407 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
408 self.incrmt();
409 }
410
411 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
412 assert!(self.start != self.position);
414 self.desc_ring
415 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
416 }
417}
418
419struct DrvNotif {
424 f_notif_idx: bool,
426 raw: &'static mut pvirtq::EventSuppress,
428}
429
430struct DevNotif {
435 f_notif_idx: bool,
437 raw: &'static mut pvirtq::EventSuppress,
439}
440
441impl DrvNotif {
442 fn enable_notif(&mut self) {
445 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
446 }
447
448 fn disable_notif(&mut self) {
451 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
452 }
453
454 fn enable_specific(&mut self, desc: EventSuppressDesc) {
456 if self.f_notif_idx {
458 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
459 self.raw.desc = desc;
460 }
461 }
462}
463
464impl DevNotif {
465 pub fn enable_notif_specific(&mut self) {
467 self.f_notif_idx = true;
468 }
469
470 fn is_notif(&self) -> bool {
473 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
474 }
475
476 fn notif_specific(&self) -> Option<EventSuppressDesc> {
477 if !self.f_notif_idx {
478 return None;
479 }
480
481 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
482 return None;
483 }
484
485 Some(self.raw.desc)
486 }
487}
488
489pub struct PackedVq {
492 descr_ring: DescriptorRing,
495 drv_event: DrvNotif,
497 dev_event: DevNotif,
499 notif_ctrl: NotifCtrl,
501 size: u16,
504 index: u16,
507 last_next: Cell<EventSuppressDesc>,
508}
509
510impl Virtq for PackedVq {
513 fn enable_notifs(&mut self) {
514 self.drv_event.enable_notif();
515 }
516
517 fn disable_notifs(&mut self) {
518 self.drv_event.disable_notif();
519 }
520
521 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
522 self.descr_ring.try_recv()
523 }
524
525 fn dispatch_batch(
526 &mut self,
527 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
528 notif: bool,
529 ) -> Result<(), VirtqError> {
530 assert!(!buffer_tkns.is_empty());
532
533 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
534 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
535 });
536
537 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
538
539 if notif {
540 self.drv_event.enable_specific(next_idx);
541 }
542
543 let range = self.last_next.get()..next_idx;
544 let notif_specific = self
545 .dev_event
546 .notif_specific()
547 .is_some_and(|idx| range.wrapping_contains(&idx));
548
549 if self.dev_event.is_notif() || notif_specific {
550 let notification_data = NotificationData::new()
551 .with_vqn(self.index)
552 .with_next_off(next_idx.desc_event_off())
553 .with_next_wrap(next_idx.desc_event_wrap());
554 self.notif_ctrl.notify_dev(notification_data);
555 self.last_next.set(next_idx);
556 }
557 Ok(())
558 }
559
560 fn dispatch_batch_await(
561 &mut self,
562 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
563 notif: bool,
564 ) -> Result<(), VirtqError> {
565 assert!(!buffer_tkns.is_empty());
567
568 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
569 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
570 });
571
572 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
573
574 if notif {
575 self.drv_event.enable_specific(next_idx);
576 }
577
578 let range = self.last_next.get()..next_idx;
579 let notif_specific = self
580 .dev_event
581 .notif_specific()
582 .is_some_and(|idx| range.wrapping_contains(&idx));
583
584 if self.dev_event.is_notif() | notif_specific {
585 let notification_data = NotificationData::new()
586 .with_vqn(self.index)
587 .with_next_off(next_idx.desc_event_off())
588 .with_next_wrap(next_idx.desc_event_wrap());
589 self.notif_ctrl.notify_dev(notification_data);
590 self.last_next.set(next_idx);
591 }
592 Ok(())
593 }
594
595 fn dispatch(
596 &mut self,
597 buffer_tkn: AvailBufferToken,
598 notif: bool,
599 buffer_type: BufferType,
600 ) -> Result<(), VirtqError> {
601 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
602 let next_idx = self.descr_ring.push(transfer_tkn)?;
603
604 if notif {
605 self.drv_event.enable_specific(next_idx);
606 }
607
608 let notif_specific = self
610 .dev_event
611 .notif_specific()
612 .map(EventSuppressDesc::into_bits)
613 == Some(self.last_next.get().into_bits());
614
615 if self.dev_event.is_notif() || notif_specific {
616 let notification_data = NotificationData::new()
617 .with_vqn(self.index)
618 .with_next_off(next_idx.desc_event_off())
619 .with_next_wrap(next_idx.desc_event_wrap());
620 self.notif_ctrl.notify_dev(notification_data);
621 self.last_next.set(next_idx);
622 }
623 Ok(())
624 }
625
626 fn index(&self) -> u16 {
627 self.index
628 }
629
630 fn size(&self) -> u16 {
631 self.size
632 }
633
634 fn has_used_buffers(&self) -> bool {
635 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index.desc_event_off())];
636 self.descr_ring.is_marked_used(desc.flags)
637 }
638}
639
640impl VirtqPrivate for PackedVq {
641 type Descriptor = pvirtq::Desc;
642
643 fn create_indirect_ctrl(
644 buffer_tkn: &AvailBufferToken,
645 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
646 Ok(Self::descriptor_iter(buffer_tkn)?
647 .collect::<Vec<_>>()
648 .into_boxed_slice())
649 }
650}
651
652impl PackedVq {
653 pub(crate) fn new(
654 com_cfg: &mut ComCfg,
655 notif_cfg: &NotifCfg,
656 size: u16,
657 index: u16,
658 features: virtio::F,
659 ) -> Result<Self, VirtqError> {
660 if features.contains(virtio::F::IN_ORDER) {
668 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
669 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
670 }
671
672 let Some(mut vq_handler) = com_cfg.select_vq(index) else {
674 return Err(VirtqError::QueueNotExisting(index));
675 };
676
677 let vq_size = if (size == 0) | (size > 0x8000) {
682 return Err(VirtqError::QueueSizeNotAllowed(size));
683 } else {
684 vq_handler.set_vq_size(size)
685 };
686
687 let mut descr_ring = DescriptorRing::new(vq_size);
688 let _mem_len =
690 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
691
692 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
693 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
694 let drv_event = unsafe { drv_event.assume_init() };
696 let dev_event = unsafe { dev_event.assume_init() };
697 let drv_event = Box::leak(drv_event);
698 let dev_event = Box::leak(dev_event);
699
700 vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
702 vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
704 vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
705
706 let mut drv_event = DrvNotif {
707 f_notif_idx: false,
708 raw: drv_event,
709 };
710
711 let dev_event = DevNotif {
712 f_notif_idx: false,
713 raw: dev_event,
714 };
715
716 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
717
718 if features.contains(virtio::F::NOTIFICATION_DATA) {
719 notif_ctrl.enable_notif_data();
720 }
721
722 if features.contains(virtio::F::EVENT_IDX) {
723 drv_event.f_notif_idx = true;
724 }
725
726 vq_handler.enable_queue();
727
728 info!("Created PackedVq: idx={index}, size={vq_size}");
729
730 Ok(PackedVq {
731 descr_ring,
732 drv_event,
733 dev_event,
734 notif_ctrl,
735 size: vq_size,
736 index,
737 last_next: Cell::default(),
738 })
739 }
740}