virtio_spec/virtq/
mod.rs

1//! Virtqueue definitions
2
3#[cfg(feature = "alloc")]
4mod alloc;
5
6use core::alloc::Layout;
7use core::ptr::{addr_of_mut, NonNull};
8use core::{mem, ptr};
9
10use crate::{le16, le32, le64};
11
12/// Split Virtqueue Descriptor
13#[doc(alias = "virtq_desc")]
14#[derive(Clone, Copy, Debug)]
15#[repr(C)]
16pub struct Desc {
17    /// Address (guest-physical).
18    pub addr: le64,
19
20    /// Length.
21    pub len: le32,
22
23    /// The flags as indicated in [`DescF`].
24    pub flags: DescF,
25
26    /// Next field if flags & NEXT
27    pub next: le16,
28}
29
30endian_bitflags! {
31    /// Virtqueue descriptor flags
32    #[doc(alias = "VIRTQ_DESC_F")]
33    pub struct DescF: le16 {
34        /// This marks a buffer as continuing via the next field.
35        #[doc(alias = "VIRTQ_DESC_F_NEXT")]
36        const NEXT = 1;
37
38        /// This marks a buffer as device write-only (otherwise device read-only).
39        #[doc(alias = "VIRTQ_DESC_F_WRITE")]
40        const WRITE = 2;
41
42        /// This means the buffer contains a list of buffer descriptors.
43        #[doc(alias = "VIRTQ_DESC_F_INDIRECT")]
44        const INDIRECT = 4;
45
46        #[doc(alias = "VIRTQ_DESC_F_AVAIL")]
47        const AVAIL = 1 << 7;
48
49        #[doc(alias = "VIRTQ_DESC_F_USED")]
50        const USED = 1 << 15;
51    }
52}
53
54/// The Virtqueue Available Ring
55#[doc(alias = "virtq_avail")]
56#[derive(Debug)]
57#[repr(C)]
58pub struct Avail {
59    pub flags: AvailF,
60    pub idx: le16,
61    ring_and_used_event: [le16],
62}
63
64impl Avail {
65    pub fn layout(queue_size: u16, has_event_idx: bool) -> Layout {
66        Layout::array::<le16>(2 + usize::from(queue_size) + usize::from(has_event_idx)).unwrap()
67    }
68
69    pub fn from_ptr(ptr: NonNull<[u8]>) -> Option<NonNull<Self>> {
70        let len = ptr.as_ptr().len();
71        // FIXME: use ptr::as_mut_ptr once stable
72        // https://github.com/rust-lang/rust/issues/74265
73        let ptr = ptr.as_ptr() as *mut u8;
74
75        if !ptr.cast::<le16>().is_aligned() {
76            return None;
77        }
78
79        if len % mem::size_of::<le16>() != 0 {
80            return None;
81        }
82
83        let len = len / mem::size_of::<le16>() - 2;
84        let ptr = ptr::slice_from_raw_parts_mut(ptr, len) as *mut Self;
85        Some(NonNull::new(ptr).unwrap())
86    }
87
88    pub fn ring_ptr(this: NonNull<Self>, has_event_idx: bool) -> NonNull<[le16]> {
89        let ptr = unsafe { addr_of_mut!((*this.as_ptr()).ring_and_used_event) };
90        let len = if cfg!(debug_assertions) {
91            ptr.len()
92                .checked_sub(usize::from(has_event_idx))
93                .expect("`has_event_idx` cannot be true if it was not true at creation")
94        } else {
95            ptr.len().saturating_sub(usize::from(has_event_idx))
96        };
97        let ptr = NonNull::new(ptr).unwrap().cast::<le16>();
98        NonNull::slice_from_raw_parts(ptr, len)
99    }
100
101    pub fn ring(&self, has_event_idx: bool) -> &[le16] {
102        let ptr = Self::ring_ptr(NonNull::from(self), has_event_idx);
103        unsafe { ptr.as_ref() }
104    }
105
106    pub fn ring_mut(&mut self, has_event_idx: bool) -> &mut [le16] {
107        let mut ptr = Self::ring_ptr(NonNull::from(self), has_event_idx);
108        unsafe { ptr.as_mut() }
109    }
110
111    pub fn used_event_ptr(this: NonNull<Self>, has_event_idx: bool) -> Option<NonNull<le16>> {
112        if !has_event_idx {
113            return None;
114        }
115
116        let ptr = unsafe { addr_of_mut!((*this.as_ptr()).ring_and_used_event) };
117        let len = ptr.len();
118
119        if len == 0 {
120            return None;
121        }
122
123        let ptr = NonNull::new(ptr).unwrap().cast::<le16>();
124        let ptr = unsafe { ptr.add(len - 1) };
125        Some(ptr)
126    }
127
128    pub fn used_event(&self, has_event_idx: bool) -> Option<&le16> {
129        Self::used_event_ptr(NonNull::from(self), has_event_idx).map(|ptr| unsafe { ptr.as_ref() })
130    }
131
132    pub fn used_event_mut(&mut self, has_event_idx: bool) -> Option<&mut le16> {
133        Self::used_event_ptr(NonNull::from(self), has_event_idx)
134            .map(|mut ptr| unsafe { ptr.as_mut() })
135    }
136}
137
138endian_bitflags! {
139    /// Virtqueue available ring flags
140    #[doc(alias = "VIRTQ_AVAIL_F")]
141    pub struct AvailF: le16 {
142        /// The driver uses this in avail->flags to advise the device: don’t
143        /// interrupt me when you consume a buffer.  It’s unreliable, so it’s
144        /// simply an optimization.
145        #[doc(alias = "VIRTQ_AVAIL_F_NO_INTERRUPT")]
146        const NO_INTERRUPT = 1;
147    }
148}
149
150/// The Virtqueue Used Ring
151#[doc(alias = "virtq_used")]
152#[derive(Debug)]
153#[repr(C)]
154#[repr(align(4))] // mem::align_of::<UsedElem>
155pub struct Used {
156    pub flags: UsedF,
157    pub idx: le16,
158    ring_and_avail_event: [le16],
159}
160
161impl Used {
162    pub fn layout(queue_size: u16, has_event_idx: bool) -> Layout {
163        let event_idx_layout = if has_event_idx {
164            Layout::new::<le16>()
165        } else {
166            Layout::new::<()>()
167        };
168
169        Layout::array::<le16>(2)
170            .unwrap()
171            .extend(Layout::array::<UsedElem>(queue_size.into()).unwrap())
172            .unwrap()
173            .0
174            .extend(event_idx_layout)
175            .unwrap()
176            .0
177            .pad_to_align()
178    }
179
180    pub fn from_ptr(ptr: NonNull<[u8]>, has_event_idx: bool) -> Option<NonNull<Self>> {
181        let len = ptr.len();
182        let ptr = ptr.cast::<u8>().as_ptr();
183
184        if !ptr.cast::<UsedElem>().is_aligned() {
185            return None;
186        }
187
188        if len % mem::size_of::<UsedElem>() != usize::from(!has_event_idx) * mem::size_of::<le32>()
189        {
190            return None;
191        }
192
193        let len = len / mem::size_of::<le16>() - 2 - usize::from(has_event_idx);
194        let ptr = ptr::slice_from_raw_parts(ptr, len) as *mut Used;
195        Some(NonNull::new(ptr).unwrap())
196    }
197
198    pub fn ring_ptr(this: NonNull<Self>) -> NonNull<[UsedElem]> {
199        let ptr = unsafe { addr_of_mut!((*this.as_ptr()).ring_and_avail_event) };
200        let len = ptr.len() * mem::size_of::<le16>() / mem::size_of::<UsedElem>();
201        let ptr = NonNull::new(ptr).unwrap().cast::<UsedElem>();
202        NonNull::slice_from_raw_parts(ptr, len)
203    }
204
205    pub fn ring(&self) -> &[UsedElem] {
206        let ptr = Self::ring_ptr(NonNull::from(self));
207        unsafe { ptr.as_ref() }
208    }
209
210    pub fn ring_mut(&mut self) -> &mut [UsedElem] {
211        let mut ptr = Self::ring_ptr(NonNull::from(self));
212        unsafe { ptr.as_mut() }
213    }
214
215    pub fn avail_event_ptr(this: NonNull<Self>) -> Option<NonNull<le16>> {
216        let ptr = unsafe { addr_of_mut!((*this.as_ptr()).ring_and_avail_event) };
217
218        if ptr.len() * mem::size_of::<le16>() % mem::size_of::<UsedElem>() != mem::size_of::<le16>()
219        {
220            return None;
221        }
222
223        let start = ptr as *mut le16;
224        let ptr = unsafe { start.add(ptr.len() - 1) };
225        Some(NonNull::new(ptr).unwrap())
226    }
227
228    pub fn avail_event(&self) -> Option<&le16> {
229        Self::avail_event_ptr(NonNull::from(self)).map(|ptr| unsafe { ptr.as_ref() })
230    }
231
232    pub fn avail_event_mut(&mut self) -> Option<&mut le16> {
233        Self::avail_event_ptr(NonNull::from(self)).map(|mut ptr| unsafe { ptr.as_mut() })
234    }
235}
236
237endian_bitflags! {
238    /// Virtqueue used ring flags
239    #[doc(alias = "VIRTQ_USED_F")]
240    pub struct UsedF: le16 {
241        /// The device uses this in used->flags to advise the driver: don’t kick me
242        /// when you add a buffer.  It’s unreliable, so it’s simply an
243        /// optimization.
244        #[doc(alias = "VIRTQ_USED_F_NO_NOTIFY")]
245        const NO_NOTIFY = 1;
246    }
247}
248
249/// Used Ring Entry
250#[doc(alias = "virtq_used_elem")]
251#[derive(Clone, Copy, Debug)]
252#[repr(C)]
253pub struct UsedElem {
254    /// Index of start of used descriptor chain.
255    ///
256    /// [`le32`] is used here for ids for padding reasons.
257    pub id: le32,
258
259    /// The number of bytes written into the device writable portion of
260    /// the buffer described by the descriptor chain.
261    pub len: le32,
262}