hermit/drivers/virtio/virtqueue/
packed.rs1#![allow(dead_code)]
11
12use alloc::boxed::Box;
13use alloc::vec::Vec;
14use core::cell::Cell;
15use core::ops;
16use core::sync::atomic::{Ordering, fence};
17
18use align_address::Align;
19#[cfg(not(feature = "pci"))]
20use virtio::mmio::NotificationData;
21#[cfg(feature = "pci")]
22use virtio::pci::NotificationData;
23use virtio::pvirtq::{EventSuppressDesc, EventSuppressFlags};
24use virtio::virtq::DescF;
25use virtio::{RingEventFlags, pvirtq, virtq};
26
27#[cfg(not(feature = "pci"))]
28use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
29#[cfg(feature = "pci")]
30use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
31use super::error::VirtqError;
32use super::{
33 AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
34};
35use crate::arch::mm::paging::{BasePageSize, PageSize};
36use crate::mm::device_alloc::DeviceAlloc;
37
38#[derive(Default, PartialEq, Eq, Clone, Copy, Debug)]
39struct RingIdx {
40 off: u16,
41 wrap: u8,
42}
43
44trait RingIndexRange {
45 fn wrapping_contains(&self, item: &RingIdx) -> bool;
46}
47
48impl RingIndexRange for ops::Range<RingIdx> {
49 fn wrapping_contains(&self, item: &RingIdx) -> bool {
50 let ops::Range { start, end } = self;
51
52 if start.wrap == end.wrap {
53 item.wrap == start.wrap && start.off <= item.off && item.off < end.off
54 } else if item.wrap == start.wrap {
55 start.off <= item.off
56 } else {
57 debug_assert!(item.wrap == end.wrap);
58 item.off < end.off
59 }
60 }
61}
62
63struct DescriptorRing {
65 ring: Box<[pvirtq::Desc], DeviceAlloc>,
66 tkn_ref_ring: Box<[Option<TransferToken<pvirtq::Desc>>]>,
67
68 write_index: u16,
72 capacity: u16,
74 poll_index: u16,
76 drv_wc: bool,
78 dev_wc: bool,
79 mem_pool: MemPool,
82}
83
84impl DescriptorRing {
85 fn new(size: u16) -> Self {
86 let ring = unsafe { Box::new_zeroed_slice_in(size.into(), DeviceAlloc).assume_init() };
87
88 let tkn_ref_ring = core::iter::repeat_with(|| None)
90 .take(size.into())
91 .collect::<Vec<_>>()
92 .into_boxed_slice();
93
94 DescriptorRing {
95 ring,
96 tkn_ref_ring,
97 write_index: 0,
98 capacity: size,
99 poll_index: 0,
100 drv_wc: true,
101 dev_wc: true,
102 mem_pool: MemPool::new(size),
103 }
104 }
105
106 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
108 let mut ctrl = self.get_read_ctrler();
109
110 ctrl.poll_next()
111 .map(|(tkn, written_len)| {
112 UsedBufferToken::from_avail_buffer_token(tkn.buff_tkn, written_len)
113 })
114 .ok_or(VirtqError::NoNewUsed)
115 }
116
117 fn push_batch(
118 &mut self,
119 tkn_lst: impl IntoIterator<Item = TransferToken<pvirtq::Desc>>,
120 ) -> Result<RingIdx, VirtqError> {
121 let first_ctrl_settings;
125 let first_buffer;
126 let mut ctrl;
127
128 let mut tkn_iterator = tkn_lst.into_iter();
129 if let Some(first_tkn) = tkn_iterator.next() {
130 ctrl = self.push_without_making_available(&first_tkn)?;
131 first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.first_flags);
132 first_buffer = first_tkn;
133 } else {
134 return Err(VirtqError::BufferNotSpecified);
136 }
137 for tkn in tkn_iterator {
139 ctrl.make_avail(tkn);
140 }
141
142 self.make_avail_with_state(
146 first_buffer,
147 first_ctrl_settings.0,
148 first_ctrl_settings.1,
149 first_ctrl_settings.2,
150 );
151 Ok(RingIdx {
152 off: self.write_index,
153 wrap: self.drv_wc.into(),
154 })
155 }
156
157 fn push(&mut self, tkn: TransferToken<pvirtq::Desc>) -> Result<RingIdx, VirtqError> {
158 self.push_batch([tkn])
159 }
160
161 fn push_without_making_available(
162 &mut self,
163 tkn: &TransferToken<pvirtq::Desc>,
164 ) -> Result<WriteCtrl<'_>, VirtqError> {
165 if tkn.num_consuming_descr() > self.capacity {
166 return Err(VirtqError::NoDescrAvail);
167 }
168
169 let mut ctrl = self.get_write_ctrler()?;
172
173 if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
179 let desc = PackedVq::indirect_desc(ctrl_desc.as_ref());
180 ctrl.write_desc(desc);
181 } else {
182 for incomplete_desc in PackedVq::descriptor_iter(&tkn.buff_tkn)? {
183 ctrl.write_desc(incomplete_desc);
184 }
185 }
186 Ok(ctrl)
187 }
188
189 fn as_mut_ptr(&mut self) -> *mut pvirtq::Desc {
190 self.ring.as_mut_ptr()
191 }
192
193 fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
196 let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
197 Ok(WriteCtrl {
198 start: self.write_index,
199 position: self.write_index,
200 modulo: u16::try_from(self.ring.len()).unwrap(),
201 first_flags: DescF::empty(),
202 buff_id: desc_id,
203
204 desc_ring: self,
205 })
206 }
207
208 fn get_read_ctrler(&mut self) -> ReadCtrl<'_> {
211 ReadCtrl {
212 position: self.poll_index,
213 modulo: u16::try_from(self.ring.len()).unwrap(),
214
215 desc_ring: self,
216 }
217 }
218
219 fn make_avail_with_state(
220 &mut self,
221 raw_tkn: TransferToken<pvirtq::Desc>,
222 start: u16,
223 buff_id: u16,
224 first_flags: DescF,
225 ) {
226 self.tkn_ref_ring[usize::from(buff_id)] = Some(raw_tkn);
228 fence(Ordering::SeqCst);
231 self.ring[usize::from(start)].flags = first_flags;
232 }
233
234 fn to_marked_avail(&self, mut flags: DescF) -> DescF {
246 flags.set(virtq::DescF::AVAIL, self.drv_wc);
247 flags.set(virtq::DescF::USED, !self.drv_wc);
248 flags
249 }
250
251 fn is_marked_used(&self, flags: DescF) -> bool {
259 if self.dev_wc {
260 flags.contains(virtq::DescF::AVAIL | virtq::DescF::USED)
261 } else {
262 !flags.intersects(virtq::DescF::AVAIL | virtq::DescF::USED)
263 }
264 }
265}
266
267struct ReadCtrl<'a> {
268 position: u16,
270 modulo: u16,
271
272 desc_ring: &'a mut DescriptorRing,
273}
274
275impl ReadCtrl<'_> {
276 fn poll_next(&mut self) -> Option<(TransferToken<pvirtq::Desc>, u32)> {
279 let desc = &self.desc_ring.ring[usize::from(self.position)];
281 if self.desc_ring.is_marked_used(desc.flags) {
282 let buff_id = desc.id.to_ne();
283 let tkn = self.desc_ring.tkn_ref_ring[usize::from(buff_id)]
284 .take()
285 .expect(
286 "The buff_id is incorrect or the reference to the TransferToken was misplaced.",
287 );
288
289 let write_len = desc.len.to_ne();
309
310 for _ in 0..tkn.num_consuming_descr() {
311 self.incrmt();
312 }
313 self.desc_ring.mem_pool.ret_id(buff_id);
314
315 Some((tkn, write_len))
316 } else {
317 None
318 }
319 }
320
321 fn incrmt(&mut self) {
322 if self.desc_ring.poll_index + 1 == self.modulo {
323 self.desc_ring.dev_wc ^= true;
324 }
325
326 assert!(self.desc_ring.capacity <= u16::try_from(self.desc_ring.ring.len()).unwrap());
328 self.desc_ring.capacity += 1;
329
330 self.desc_ring.poll_index = (self.desc_ring.poll_index + 1) % self.modulo;
331 self.position = self.desc_ring.poll_index;
332 }
333}
334
335struct WriteCtrl<'a> {
339 start: u16,
343 position: u16,
346 modulo: u16,
347 first_flags: DescF,
349 buff_id: u16,
351
352 desc_ring: &'a mut DescriptorRing,
353}
354
355impl WriteCtrl<'_> {
356 fn incrmt(&mut self) {
364 assert!(self.desc_ring.capacity != 0);
366 self.desc_ring.capacity -= 1;
367 if self.position + 1 == self.modulo {
370 self.desc_ring.drv_wc ^= true;
371 }
372 self.desc_ring.write_index = (self.desc_ring.write_index + 1) % self.modulo;
374
375 self.position = (self.position + 1) % self.modulo;
376 }
377
378 fn write_desc(&mut self, mut incomplete_desc: pvirtq::Desc) {
380 incomplete_desc.id = self.buff_id.into();
381 if self.start == self.position {
382 self.first_flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
386 } else {
387 incomplete_desc.flags = self.desc_ring.to_marked_avail(incomplete_desc.flags);
389 }
390 self.desc_ring.ring[usize::from(self.position)] = incomplete_desc;
391 self.incrmt();
392 }
393
394 fn make_avail(&mut self, raw_tkn: TransferToken<pvirtq::Desc>) {
395 assert!(self.start != self.position);
397 self.desc_ring
398 .make_avail_with_state(raw_tkn, self.start, self.buff_id, self.first_flags);
399 }
400}
401
402struct DrvNotif {
407 f_notif_idx: bool,
409 raw: &'static mut pvirtq::EventSuppress,
411}
412
413struct DevNotif {
418 f_notif_idx: bool,
420 raw: &'static mut pvirtq::EventSuppress,
422}
423
424impl DrvNotif {
425 fn enable_notif(&mut self) {
428 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Enable);
429 }
430
431 fn disable_notif(&mut self) {
434 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Disable);
435 }
436
437 fn enable_specific(&mut self, idx: RingIdx) {
439 if self.f_notif_idx {
441 self.raw.flags = EventSuppressFlags::new().with_desc_event_flags(RingEventFlags::Desc);
442 self.raw.desc = EventSuppressDesc::new()
443 .with_desc_event_off(idx.off)
444 .with_desc_event_wrap(idx.wrap);
445 }
446 }
447}
448
449impl DevNotif {
450 pub fn enable_notif_specific(&mut self) {
452 self.f_notif_idx = true;
453 }
454
455 fn is_notif(&self) -> bool {
458 self.raw.flags.desc_event_flags() == RingEventFlags::Enable
459 }
460
461 fn notif_specific(&self) -> Option<RingIdx> {
462 if !self.f_notif_idx {
463 return None;
464 }
465
466 if self.raw.flags.desc_event_flags() != RingEventFlags::Desc {
467 return None;
468 }
469
470 let off = self.raw.desc.desc_event_off();
471 let wrap = self.raw.desc.desc_event_wrap();
472
473 Some(RingIdx { off, wrap })
474 }
475}
476
477pub struct PackedVq {
480 descr_ring: DescriptorRing,
483 drv_event: DrvNotif,
485 dev_event: DevNotif,
487 notif_ctrl: NotifCtrl,
489 size: u16,
492 index: u16,
495 last_next: Cell<RingIdx>,
496}
497
498impl Virtq for PackedVq {
501 fn enable_notifs(&mut self) {
502 self.drv_event.enable_notif();
503 }
504
505 fn disable_notifs(&mut self) {
506 self.drv_event.disable_notif();
507 }
508
509 fn try_recv(&mut self) -> Result<UsedBufferToken, VirtqError> {
510 self.descr_ring.try_recv()
511 }
512
513 fn dispatch_batch(
514 &mut self,
515 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
516 notif: bool,
517 ) -> Result<(), VirtqError> {
518 assert!(!buffer_tkns.is_empty());
520
521 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
522 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
523 });
524
525 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
526
527 if notif {
528 self.drv_event.enable_specific(next_idx);
529 }
530
531 let range = self.last_next.get()..next_idx;
532 let notif_specific = self
533 .dev_event
534 .notif_specific()
535 .is_some_and(|idx| range.wrapping_contains(&idx));
536
537 if self.dev_event.is_notif() || notif_specific {
538 let notification_data = NotificationData::new()
539 .with_vqn(self.index)
540 .with_next_off(next_idx.off)
541 .with_next_wrap(next_idx.wrap);
542 self.notif_ctrl.notify_dev(notification_data);
543 self.last_next.set(next_idx);
544 }
545 Ok(())
546 }
547
548 fn dispatch_batch_await(
549 &mut self,
550 buffer_tkns: Vec<(AvailBufferToken, BufferType)>,
551 notif: bool,
552 ) -> Result<(), VirtqError> {
553 assert!(!buffer_tkns.is_empty());
555
556 let transfer_tkns = buffer_tkns.into_iter().map(|(buffer_tkn, buffer_type)| {
557 Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type)
558 });
559
560 let next_idx = self.descr_ring.push_batch(transfer_tkns)?;
561
562 if notif {
563 self.drv_event.enable_specific(next_idx);
564 }
565
566 let range = self.last_next.get()..next_idx;
567 let notif_specific = self
568 .dev_event
569 .notif_specific()
570 .is_some_and(|idx| range.wrapping_contains(&idx));
571
572 if self.dev_event.is_notif() | notif_specific {
573 let notification_data = NotificationData::new()
574 .with_vqn(self.index)
575 .with_next_off(next_idx.off)
576 .with_next_wrap(next_idx.wrap);
577 self.notif_ctrl.notify_dev(notification_data);
578 self.last_next.set(next_idx);
579 }
580 Ok(())
581 }
582
583 fn dispatch(
584 &mut self,
585 buffer_tkn: AvailBufferToken,
586 notif: bool,
587 buffer_type: BufferType,
588 ) -> Result<(), VirtqError> {
589 let transfer_tkn = Self::transfer_token_from_buffer_token(buffer_tkn, buffer_type);
590 let next_idx = self.descr_ring.push(transfer_tkn)?;
591
592 if notif {
593 self.drv_event.enable_specific(next_idx);
594 }
595
596 let notif_specific = self.dev_event.notif_specific() == Some(self.last_next.get());
597
598 if self.dev_event.is_notif() || notif_specific {
599 let notification_data = NotificationData::new()
600 .with_vqn(self.index)
601 .with_next_off(next_idx.off)
602 .with_next_wrap(next_idx.wrap);
603 self.notif_ctrl.notify_dev(notification_data);
604 self.last_next.set(next_idx);
605 }
606 Ok(())
607 }
608
609 fn index(&self) -> u16 {
610 self.index
611 }
612
613 fn size(&self) -> u16 {
614 self.size
615 }
616
617 fn has_used_buffers(&self) -> bool {
618 let desc = &self.descr_ring.ring[usize::from(self.descr_ring.poll_index)];
619 self.descr_ring.is_marked_used(desc.flags)
620 }
621}
622
623impl VirtqPrivate for PackedVq {
624 type Descriptor = pvirtq::Desc;
625
626 fn create_indirect_ctrl(
627 buffer_tkn: &AvailBufferToken,
628 ) -> Result<Box<[Self::Descriptor]>, VirtqError> {
629 Ok(Self::descriptor_iter(buffer_tkn)?
630 .collect::<Vec<_>>()
631 .into_boxed_slice())
632 }
633}
634
635impl PackedVq {
636 pub(crate) fn new(
637 com_cfg: &mut ComCfg,
638 notif_cfg: &NotifCfg,
639 size: u16,
640 index: u16,
641 features: virtio::F,
642 ) -> Result<Self, VirtqError> {
643 if features.contains(virtio::F::IN_ORDER) {
651 info!("PackedVq has no support for VIRTIO_F_IN_ORDER. Aborting...");
652 return Err(VirtqError::FeatureNotSupported(virtio::F::IN_ORDER));
653 }
654
655 let Some(mut vq_handler) = com_cfg.select_vq(index) else {
657 return Err(VirtqError::QueueNotExisting(index));
658 };
659
660 let vq_size = if (size == 0) | (size > 0x8000) {
665 return Err(VirtqError::QueueSizeNotAllowed(size));
666 } else {
667 vq_handler.set_vq_size(size)
668 };
669
670 let mut descr_ring = DescriptorRing::new(vq_size);
671 let _mem_len =
673 core::mem::size_of::<pvirtq::EventSuppress>().align_up(BasePageSize::SIZE as usize);
674
675 let drv_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
676 let dev_event = Box::<pvirtq::EventSuppress, _>::new_zeroed_in(DeviceAlloc);
677 let drv_event = unsafe { drv_event.assume_init() };
679 let dev_event = unsafe { dev_event.assume_init() };
680 let drv_event = Box::leak(drv_event);
681 let dev_event = Box::leak(dev_event);
682
683 vq_handler.set_ring_addr(DeviceAlloc.phys_addr_from(descr_ring.as_mut_ptr()));
685 vq_handler.set_drv_ctrl_addr(DeviceAlloc.phys_addr_from(drv_event));
687 vq_handler.set_dev_ctrl_addr(DeviceAlloc.phys_addr_from(dev_event));
688
689 let mut drv_event = DrvNotif {
690 f_notif_idx: false,
691 raw: drv_event,
692 };
693
694 let dev_event = DevNotif {
695 f_notif_idx: false,
696 raw: dev_event,
697 };
698
699 let mut notif_ctrl = NotifCtrl::new(notif_cfg.notification_location(&mut vq_handler));
700
701 if features.contains(virtio::F::NOTIFICATION_DATA) {
702 notif_ctrl.enable_notif_data();
703 }
704
705 if features.contains(virtio::F::EVENT_IDX) {
706 drv_event.f_notif_idx = true;
707 }
708
709 vq_handler.enable_queue();
710
711 info!("Created PackedVq: idx={index}, size={vq_size}");
712
713 Ok(PackedVq {
714 descr_ring,
715 drv_event,
716 dev_event,
717 notif_ctrl,
718 size: vq_size,
719 index,
720 last_next: Cell::default(),
721 })
722 }
723}