1#![allow(dead_code)]
5
6use alloc::vec::Vec;
7use core::ptr::NonNull;
8use core::{mem, ptr};
9
10use memory_addresses::PhysAddr;
11use pci_types::capability::PciCapability;
12use virtio::pci::{
13 CapCfgType, CapData, CommonCfg, CommonCfgVolatileFieldAccess, CommonCfgVolatileWideFieldAccess,
14 IsrStatus as IsrStatusRaw, NotificationData,
15};
16use virtio::{DeviceStatus, le16, le32};
17use volatile::access::ReadOnly;
18use volatile::{VolatilePtr, VolatileRef};
19
20use crate::arch::memory_barrier;
21use crate::arch::pci::PciConfigRegion;
22use crate::drivers::error::DriverError;
23#[cfg(feature = "fuse")]
24use crate::drivers::fs::virtio_fs::VirtioFsDriver;
25#[cfg(all(
26 not(all(target_arch = "x86_64", feature = "rtl8139")),
27 any(feature = "tcp", feature = "udp")
28))]
29use crate::drivers::net::virtio::VirtioNetDriver;
30use crate::drivers::pci::PciDevice;
31use crate::drivers::pci::error::PciError;
32use crate::drivers::virtio::error::VirtioError;
33use crate::drivers::virtio::transport::pci::PciBar as VirtioPciBar;
34#[cfg(feature = "vsock")]
35use crate::drivers::vsock::VirtioVsockDriver;
36
37pub fn map_dev_cfg<T>(cap: &PciCap) -> Option<&'static mut T> {
40 if cap.cap.cfg_type != CapCfgType::Device {
41 error!("Capability of device config has wrong id. Mapping not possible...");
42 return None;
43 };
44
45 if cap.bar_len() < cap.len() + cap.offset() {
46 error!(
47 "Device config of device {:x}, does not fit into memory specified by bar!",
48 cap.dev_id(),
49 );
50 return None;
51 }
52
53 if cap.len() < u64::try_from(mem::size_of::<T>()).unwrap() {
55 error!(
56 "Device specific config from device {:x}, does not represent actual structure specified by the standard!",
57 cap.dev_id()
58 );
59 return None;
60 }
61
62 let virt_addr_raw = cap.bar_addr() + cap.offset();
63
64 let dev_cfg: &'static mut T =
66 unsafe { &mut *(ptr::with_exposed_provenance_mut(virt_addr_raw.try_into().unwrap())) };
67
68 Some(dev_cfg)
69}
70
71#[derive(Clone)]
84pub struct PciCap {
85 bar: PciBar,
86 dev_id: u16,
87 cap: CapData,
88}
89
90impl PciCap {
91 pub fn offset(&self) -> u64 {
92 self.cap.offset.to_ne()
93 }
94
95 pub fn len(&self) -> u64 {
96 self.cap.length.to_ne()
97 }
98
99 pub fn bar_len(&self) -> u64 {
100 self.bar.length
101 }
102
103 pub fn bar_addr(&self) -> u64 {
104 self.bar.mem_addr
105 }
106
107 pub fn dev_id(&self) -> u16 {
108 self.dev_id
109 }
110
111 fn map_common_cfg(&self) -> Option<VolatileRef<'static, CommonCfg>> {
113 if self.bar.length < self.len() + self.offset() {
114 error!(
115 "Common config of the capability with id {} of device {:x} does not fit into memory specified by bar {:x}!",
116 self.cap.id, self.dev_id, self.bar.index
117 );
118 return None;
119 }
120
121 if self.len() < u64::try_from(mem::size_of::<CommonCfg>()).unwrap() {
123 error!(
124 "Common config of with id {}, does not represent actual structure specified by the standard!",
125 self.cap.id
126 );
127 return None;
128 }
129
130 let virt_addr_raw = self.bar.mem_addr + self.offset();
131 let ptr = NonNull::new(ptr::with_exposed_provenance_mut::<CommonCfg>(
132 virt_addr_raw.try_into().unwrap(),
133 ))
134 .unwrap();
135
136 let com_cfg_raw = unsafe { VolatileRef::new(ptr) };
138
139 Some(com_cfg_raw)
140 }
141
142 fn map_isr_status(&self) -> Option<VolatileRef<'static, IsrStatusRaw>> {
143 if self.bar.length < self.len() + self.offset() {
144 error!(
145 "ISR status config with id {} of device {:x}, does not fit into memory specified by bar {:x}!",
146 self.cap.id, self.dev_id, self.bar.index
147 );
148 return None;
149 }
150
151 let virt_addr_raw = self.bar.mem_addr + self.offset();
152 let ptr = NonNull::new(ptr::with_exposed_provenance_mut::<IsrStatusRaw>(
153 virt_addr_raw.try_into().unwrap(),
154 ))
155 .unwrap();
156
157 let isr_stat_raw = unsafe { VolatileRef::new(ptr) };
159
160 Some(isr_stat_raw)
161 }
162}
163
164pub struct UniCapsColl {
175 pub(crate) com_cfg: ComCfg,
176 pub(crate) notif_cfg: NotifCfg,
177 pub(crate) isr_cfg: IsrStatus,
178 pub(crate) sh_mem_cfg_list: Vec<ShMemCfg>,
179 pub(crate) dev_cfg_list: Vec<PciCap>,
180}
181pub struct ComCfg {
187 com_cfg: VolatileRef<'static, CommonCfg>,
190 rank: u8,
192}
193
194impl ComCfg {
196 fn new(raw: VolatileRef<'static, CommonCfg>, rank: u8) -> Self {
197 ComCfg { com_cfg: raw, rank }
198 }
199}
200
201pub struct VqCfgHandler<'a> {
202 vq_index: u16,
203 raw: VolatileRef<'a, CommonCfg>,
204}
205
206impl VqCfgHandler<'_> {
207 fn select_queue(&mut self) {
209 self.raw
210 .as_mut_ptr()
211 .queue_select()
212 .write(self.vq_index.into());
213 }
214
215 pub fn set_vq_size(&mut self, size: u16) -> u16 {
220 self.select_queue();
221 let queue_size = self.raw.as_mut_ptr().queue_size();
222
223 if queue_size.read().to_ne() >= size {
224 queue_size.write(size.into());
225 }
226
227 queue_size.read().to_ne()
228 }
229
230 pub fn set_ring_addr(&mut self, addr: PhysAddr) {
231 self.select_queue();
232 self.raw
233 .as_mut_ptr()
234 .queue_desc()
235 .write(addr.as_u64().into());
236 }
237
238 pub fn set_drv_ctrl_addr(&mut self, addr: PhysAddr) {
239 self.select_queue();
240 self.raw
241 .as_mut_ptr()
242 .queue_driver()
243 .write(addr.as_u64().into());
244 }
245
246 pub fn set_dev_ctrl_addr(&mut self, addr: PhysAddr) {
247 self.select_queue();
248 self.raw
249 .as_mut_ptr()
250 .queue_device()
251 .write(addr.as_u64().into());
252 }
253
254 pub fn notif_off(&mut self) -> u16 {
255 self.select_queue();
256 self.raw.as_mut_ptr().queue_notify_off().read().to_ne()
257 }
258
259 pub fn enable_queue(&mut self) {
260 self.select_queue();
261 self.raw.as_mut_ptr().queue_enable().write(1.into());
262 }
263}
264
265impl ComCfg {
267 pub fn select_vq(&mut self, index: u16) -> Option<VqCfgHandler<'_>> {
272 self.com_cfg.as_mut_ptr().queue_select().write(index.into());
273
274 if self.com_cfg.as_mut_ptr().queue_size().read().to_ne() == 0 {
275 None
276 } else {
277 Some(VqCfgHandler {
278 vq_index: index,
279 raw: self.com_cfg.borrow_mut(),
280 })
281 }
282 }
283
284 pub fn device_config_space(&self) -> VolatilePtr<'_, CommonCfg, ReadOnly> {
285 self.com_cfg.as_ptr()
286 }
287
288 pub fn dev_status(&self) -> u8 {
290 self.com_cfg.as_ptr().device_status().read().bits()
291 }
292
293 pub fn reset_dev(&mut self) {
295 memory_barrier();
296 self.com_cfg
297 .as_mut_ptr()
298 .device_status()
299 .write(DeviceStatus::empty());
300 }
301
302 pub fn set_failed(&mut self) {
306 memory_barrier();
307 self.com_cfg
308 .as_mut_ptr()
309 .device_status()
310 .write(DeviceStatus::FAILED);
311 }
312
313 pub fn ack_dev(&mut self) {
316 memory_barrier();
317 self.com_cfg
318 .as_mut_ptr()
319 .device_status()
320 .update(|s| s | DeviceStatus::ACKNOWLEDGE);
321 }
322
323 pub fn set_drv(&mut self) {
326 memory_barrier();
327 self.com_cfg
328 .as_mut_ptr()
329 .device_status()
330 .update(|s| s | DeviceStatus::DRIVER);
331 }
332
333 pub fn features_ok(&mut self) {
337 memory_barrier();
338 self.com_cfg
339 .as_mut_ptr()
340 .device_status()
341 .update(|s| s | DeviceStatus::FEATURES_OK);
342 }
343
344 pub fn check_features(&self) -> bool {
351 memory_barrier();
352 self.com_cfg
353 .as_ptr()
354 .device_status()
355 .read()
356 .contains(DeviceStatus::FEATURES_OK)
357 }
358
359 pub fn drv_ok(&mut self) {
363 memory_barrier();
364 self.com_cfg
365 .as_mut_ptr()
366 .device_status()
367 .update(|s| s | DeviceStatus::DRIVER_OK);
368 }
369
370 pub fn dev_features(&mut self) -> virtio::F {
372 let com_cfg = self.com_cfg.as_mut_ptr();
373 let device_feature_select = com_cfg.device_feature_select();
374 let device_feature = com_cfg.device_feature();
375
376 memory_barrier();
379 device_feature_select.write(1.into());
380 memory_barrier();
381
382 let mut device_features = u64::from(device_feature.read().to_ne()) << 32;
384
385 device_feature_select.write(0.into());
388 memory_barrier();
389
390 device_features |= u64::from(device_feature.read().to_ne());
392
393 virtio::F::from_bits_retain(u128::from(device_features).into())
394 }
395
396 pub fn set_drv_features(&mut self, features: virtio::F) {
398 let features = features.bits().to_ne() as u64;
399 let com_cfg = self.com_cfg.as_mut_ptr();
400 let driver_feature_select = com_cfg.driver_feature_select();
401 let driver_feature = com_cfg.driver_feature();
402
403 let high: u32 = (features >> 32) as u32;
404 let low: u32 = features as u32;
405
406 memory_barrier();
409 driver_feature_select.write(0.into());
410 memory_barrier();
411
412 driver_feature.write(low.into());
414
415 driver_feature_select.write(1.into());
418 memory_barrier();
419
420 driver_feature.write(high.into());
422 }
423}
424
425pub struct NotifCfg {
428 base_addr: u64,
430 notify_off_multiplier: u32,
431 rank: u8,
433 length: u64,
435}
436
437impl NotifCfg {
438 fn new(cap: &PciCap) -> Option<Self> {
439 if cap.bar.length < cap.len() + cap.offset() {
440 error!(
441 "Notification config with id {} of device {:x}, does not fit into memory specified by bar {:x}!",
442 cap.cap.id, cap.dev_id, cap.bar.index
443 );
444 return None;
445 }
446
447 let notify_off_multiplier = cap.cap.notify_off_multiplier?.to_ne();
448
449 let base_addr = cap.bar.mem_addr + cap.offset();
457
458 Some(NotifCfg {
459 base_addr,
460 notify_off_multiplier,
461 rank: cap.cap.id,
462 length: cap.len(),
463 })
464 }
465
466 pub fn notification_location(&self, vq_cfg_handler: &mut VqCfgHandler<'_>) -> *mut le32 {
467 let addend = u32::from(vq_cfg_handler.notif_off()) * self.notify_off_multiplier;
468 let addr = self.base_addr + u64::from(addend);
469 ptr::with_exposed_provenance_mut(addr.try_into().unwrap())
470 }
471}
472
473pub struct NotifCtrl {
476 f_notif_data: bool,
478 notif_addr: *mut le32,
480}
481
482unsafe impl Send for NotifCtrl {}
484
485impl NotifCtrl {
486 pub fn new(notif_addr: *mut le32) -> Self {
489 NotifCtrl {
490 f_notif_data: false,
491 notif_addr,
492 }
493 }
494
495 pub fn enable_notif_data(&mut self) {
497 self.f_notif_data = true;
498 }
499
500 pub fn notify_dev(&self, data: NotificationData) {
501 if self.f_notif_data {
506 unsafe {
507 self.notif_addr.write_volatile(data.into_bits());
508 }
509 } else {
510 unsafe {
511 self.notif_addr
512 .cast::<le16>()
513 .write_volatile(data.vqn().into());
514 }
515 }
516 }
517}
518
519pub struct IsrStatus {
526 isr_stat: VolatileRef<'static, IsrStatusRaw>,
529 rank: u8,
531}
532
533impl IsrStatus {
534 fn new(raw: VolatileRef<'static, IsrStatusRaw>, rank: u8) -> Self {
535 IsrStatus {
536 isr_stat: raw,
537 rank,
538 }
539 }
540
541 pub fn is_queue_interrupt(&self) -> IsrStatusRaw {
542 self.isr_stat.as_ptr().read()
543 }
544
545 pub fn acknowledge(&mut self) {
546 }
548}
549
550pub struct ShMemCfg {
568 mem_addr: u64,
569 length: u64,
570 sh_mem: ShMem,
571 id: u8,
574}
575
576impl ShMemCfg {
577 fn new(cap: &PciCap) -> Option<Self> {
578 if cap.bar.length < cap.len() + cap.offset() {
579 error!(
580 "Shared memory config of with id {} of device {:x}, does not fit into memory specified by bar {:x}!",
581 cap.cap.id, cap.dev_id, cap.bar.index
582 );
583 return None;
584 }
585
586 let offset = cap.cap.offset.to_ne();
587 let length = cap.cap.length.to_ne();
588
589 let virt_addr_raw = cap.bar.mem_addr + offset;
590 let raw_ptr = ptr::with_exposed_provenance_mut::<u8>(virt_addr_raw.try_into().unwrap());
591
592 unsafe {
594 for i in 0..usize::try_from(length).unwrap() {
595 *(raw_ptr.add(i)) = 0;
596 }
597 };
598
599 assert!(mem::size_of::<usize>() == 8);
604
605 Some(ShMemCfg {
606 mem_addr: virt_addr_raw,
607 length: cap.len(),
608 sh_mem: ShMem {
609 ptr: raw_ptr,
610 len: cap.bar.length as usize,
611 },
612 id: cap.cap.id,
613 })
614 }
615}
616
617struct ShMem {
621 ptr: *mut u8,
622 len: usize,
623}
624
625impl core::ops::Deref for ShMem {
626 type Target = [u8];
627
628 fn deref(&self) -> &[u8] {
629 unsafe { core::slice::from_raw_parts(self.ptr, self.len) }
630 }
631}
632
633impl core::ops::DerefMut for ShMem {
634 fn deref_mut(&mut self) -> &mut [u8] {
635 unsafe { core::slice::from_raw_parts_mut(self.ptr, self.len) }
636 }
637}
638
639impl Drop for ShMem {
641 fn drop(&mut self) {
642 for i in 0..self.len {
643 unsafe {
644 *(self.ptr.add(i)) = 0;
645 }
646 }
647 }
648}
649
650#[derive(Copy, Clone, Debug)]
655pub struct PciBar {
656 index: u8,
657 mem_addr: u64,
658 length: u64,
659}
660
661impl PciBar {
662 pub fn new(index: u8, mem_addr: u64, length: u64) -> Self {
663 PciBar {
664 index,
665 mem_addr,
666 length,
667 }
668 }
669}
670
671fn read_caps(device: &PciDevice<PciConfigRegion>) -> Result<Vec<PciCap>, PciError> {
677 let device_id = device.device_id();
678
679 let capabilities = device
680 .capabilities()
681 .unwrap()
682 .filter_map(|capability| match capability {
683 PciCapability::Vendor(capability) => Some(capability),
684 _ => None,
685 })
686 .map(|addr| CapData::read(addr, device.access()).unwrap())
687 .filter(|cap| cap.cfg_type != CapCfgType::Pci)
688 .map(|cap| {
689 let slot = cap.bar;
690 let (addr, size) = device.memory_map_bar(slot, true).unwrap();
691 PciCap {
692 bar: VirtioPciBar::new(slot, addr.as_u64(), size.try_into().unwrap()),
693 dev_id: device_id,
694 cap,
695 }
696 })
697 .collect::<Vec<_>>();
698
699 if capabilities.is_empty() {
700 error!("No virtio capability found for device {device_id:x}");
701 Err(PciError::NoVirtioCaps(device_id))
702 } else {
703 Ok(capabilities)
704 }
705}
706
707pub(crate) fn map_caps(device: &PciDevice<PciConfigRegion>) -> Result<UniCapsColl, VirtioError> {
708 let device_id = device.device_id();
709
710 if !device.status().has_capability_list() {
712 error!("Found virtio device without capability list. Aborting!");
713 return Err(VirtioError::FromPci(PciError::NoCapPtr(device_id)));
714 }
715
716 let cap_list = match read_caps(device) {
718 Ok(list) => list,
719 Err(pci_error) => return Err(VirtioError::FromPci(pci_error)),
720 };
721
722 let mut com_cfg = None;
723 let mut notif_cfg = None;
724 let mut isr_cfg = None;
725 let mut sh_mem_cfg_list = Vec::new();
726 let mut dev_cfg_list = Vec::new();
727 for pci_cap in cap_list {
729 match pci_cap.cap.cfg_type {
730 CapCfgType::Common => {
731 if com_cfg.is_none() {
732 match pci_cap.map_common_cfg() {
733 Some(cap) => com_cfg = Some(ComCfg::new(cap, pci_cap.cap.id)),
734 None => error!(
735 "Common config capability with id {}, of device {:x}, could not be mapped!",
736 pci_cap.cap.id, device_id
737 ),
738 }
739 }
740 }
741 CapCfgType::Notify => {
742 if notif_cfg.is_none() {
743 match NotifCfg::new(&pci_cap) {
744 Some(notif) => notif_cfg = Some(notif),
745 None => error!(
746 "Notification config capability with id {}, of device {device_id:x} could not be used!",
747 pci_cap.cap.id
748 ),
749 }
750 }
751 }
752 CapCfgType::Isr => {
753 if isr_cfg.is_none() {
754 match pci_cap.map_isr_status() {
755 Some(isr_stat) => isr_cfg = Some(IsrStatus::new(isr_stat, pci_cap.cap.id)),
756 None => error!(
757 "ISR status config capability with id {}, of device {device_id:x} could not be used!",
758 pci_cap.cap.id
759 ),
760 }
761 }
762 }
763 CapCfgType::SharedMemory => match ShMemCfg::new(&pci_cap) {
764 Some(sh_mem) => sh_mem_cfg_list.push(sh_mem),
765 None => error!(
766 "Shared Memory config capability with id {}, of device {device_id:x} could not be used!",
767 pci_cap.cap.id,
768 ),
769 },
770 CapCfgType::Device => dev_cfg_list.push(pci_cap),
771
772 _ => continue,
775 }
776 }
777
778 Ok(UniCapsColl {
779 com_cfg: com_cfg.ok_or(VirtioError::NoComCfg(device_id))?,
780 notif_cfg: notif_cfg.ok_or(VirtioError::NoNotifCfg(device_id))?,
781 isr_cfg: isr_cfg.ok_or(VirtioError::NoIsrCfg(device_id))?,
782 sh_mem_cfg_list,
783 dev_cfg_list,
784 })
785}
786
787pub(crate) fn init_device(
791 device: &PciDevice<PciConfigRegion>,
792) -> Result<VirtioDriver, DriverError> {
793 let device_id = device.device_id();
794
795 if device_id < 0x1040 {
796 warn!(
797 "Legacy/transitional Virtio device, with id: {device_id:#x} is NOT supported, skipping!"
798 );
799
800 return Err(DriverError::InitVirtioDevFail(
802 VirtioError::DevNotSupported(device_id),
803 ));
804 }
805
806 let id = virtio::Id::from(u8::try_from(device_id - 0x1040).unwrap());
807
808 match id {
809 #[cfg(all(
810 not(all(target_arch = "x86_64", feature = "rtl8139")),
811 any(feature = "tcp", feature = "udp")
812 ))]
813 virtio::Id::Net => match VirtioNetDriver::init(device) {
814 Ok(virt_net_drv) => {
815 info!("Virtio network driver initialized.");
816
817 let irq = device.get_irq().unwrap();
818 crate::arch::interrupts::add_irq_name(irq, "virtio");
819 info!("Virtio interrupt handler at line {irq}");
820
821 Ok(VirtioDriver::Network(virt_net_drv))
822 }
823 Err(virtio_error) => {
824 error!(
825 "Virtio networkd driver could not be initialized with device: {device_id:x}"
826 );
827 Err(DriverError::InitVirtioDevFail(virtio_error))
828 }
829 },
830 #[cfg(feature = "vsock")]
831 virtio::Id::Vsock => match VirtioVsockDriver::init(device) {
832 Ok(virt_sock_drv) => {
833 info!("Virtio sock driver initialized.");
834
835 let irq = device.get_irq().unwrap();
836 crate::arch::interrupts::add_irq_name(irq, "virtio");
837 info!("Virtio interrupt handler at line {irq}");
838
839 Ok(VirtioDriver::Vsock(virt_sock_drv))
840 }
841 Err(virtio_error) => {
842 error!("Virtio sock driver could not be initialized with device: {device_id:x}");
843 Err(DriverError::InitVirtioDevFail(virtio_error))
844 }
845 },
846 #[cfg(feature = "fuse")]
847 virtio::Id::Fs => {
848 match VirtioFsDriver::init(device) {
851 Ok(virt_fs_drv) => {
852 info!("Virtio filesystem driver initialized.");
853 Ok(VirtioDriver::FileSystem(virt_fs_drv))
854 }
855 Err(virtio_error) => {
856 error!(
857 "Virtio filesystem driver could not be initialized with device: {device_id:x}"
858 );
859 Err(DriverError::InitVirtioDevFail(virtio_error))
860 }
861 }
862 }
863 id => {
864 warn!("Virtio device {id:?} is not supported, skipping!");
865
866 Err(DriverError::InitVirtioDevFail(
868 VirtioError::DevNotSupported(device_id),
869 ))
870 }
871 }
872}
873
874pub(crate) enum VirtioDriver {
875 #[cfg(all(
876 not(all(target_arch = "x86_64", feature = "rtl8139")),
877 any(feature = "tcp", feature = "udp")
878 ))]
879 Network(VirtioNetDriver),
880 #[cfg(feature = "vsock")]
881 Vsock(VirtioVsockDriver),
882 #[cfg(feature = "fuse")]
883 FileSystem(VirtioFsDriver),
884}