From ff45de91805610bafb7d084345e7781731816d6e Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Wed, 25 Jun 2025 23:09:00 +0000 Subject: [PATCH 1/3] storage: Add storvsc_manager and disk_storvsc --- Cargo.lock | 23 + Cargo.toml | 1 + openhcl/openhcl_boot/src/main.rs | 4 +- openhcl/rootfs.config | 2 +- openhcl/underhill_core/Cargo.toml | 3 + openhcl/underhill_core/src/dispatch/mod.rs | 25 +- .../src/dispatch/vtl2_settings_worker.rs | 45 ++ openhcl/underhill_core/src/lib.rs | 2 + openhcl/underhill_core/src/options.rs | 6 + openhcl/underhill_core/src/servicing.rs | 13 + openhcl/underhill_core/src/storvsc_manager.rs | 421 ++++++++++++++ openhcl/underhill_core/src/worker.rs | 28 + openhcl/underhill_init/src/lib.rs | 5 + vm/devices/storage/disk_storvsc/Cargo.toml | 29 + vm/devices/storage/disk_storvsc/src/lib.rs | 532 ++++++++++++++++++ vm/devices/storage/scsi_defs/src/lib.rs | 54 ++ vm/devices/storage/storvsc_driver/Cargo.toml | 3 + vm/devices/storage/storvsc_driver/src/lib.rs | 210 ++++++- vm/devices/storage/storvsp/src/lib.rs | 1 + .../storage/storvsp_protocol/Cargo.toml | 1 + .../storage/storvsp_protocol/src/lib.rs | 3 +- vm/vmcore/guestmem/src/lib.rs | 7 +- 22 files changed, 1382 insertions(+), 36 deletions(-) create mode 100644 openhcl/underhill_core/src/storvsc_manager.rs create mode 100644 vm/devices/storage/disk_storvsc/Cargo.toml create mode 100644 vm/devices/storage/disk_storvsc/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 00595fe0b2..c057c487be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1373,6 +1373,22 @@ dependencies = [ "vm_resource", ] +[[package]] +name = "disk_storvsc" +version = "0.0.0" +dependencies = [ + "disk_backend", + "futures", + "inspect", + "scsi_buffers", + "scsi_defs", + "storvsc_driver", + "storvsp_protocol", + "tracing", + "vmbus_user_channel", + "zerocopy 0.8.24", +] + [[package]] name = "disk_striped" version = "0.0.0" @@ -6926,10 +6942,12 @@ dependencies = [ name = "storvsc_driver" version = "0.0.0" dependencies = [ + "anyhow", "futures", "futures-concurrency", "guestmem", "inspect", + "mesh", "mesh_channel", "pal_async", "scsi_buffers", @@ -6941,6 +6959,7 @@ dependencies = [ "thiserror 2.0.12", "tracing", "tracing_helpers", + "user_driver", "vmbus_async", "vmbus_channel", "vmbus_ring", @@ -6996,6 +7015,7 @@ version = "0.0.0" dependencies = [ "arbitrary", "guid", + "inspect", "open_enum", "scsi_defs", "zerocopy 0.8.24", @@ -7777,6 +7797,7 @@ dependencies = [ "disk_blockdevice", "disk_get_vmgs", "disk_nvme", + "disk_storvsc", "firmware_pcat", "firmware_uefi", "firmware_uefi_custom_vars", @@ -7843,7 +7864,9 @@ dependencies = [ "sparse_mmap", "state_unit", "storage_string", + "storvsc_driver", "storvsp", + "storvsp_protocol", "storvsp_resources", "tee_call", "test_with_tracing", diff --git a/Cargo.toml b/Cargo.toml index 2d3f4380b7..de85a77dde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -268,6 +268,7 @@ disk_layered = { path = "vm/devices/storage/disk_layered" } disk_nvme = { path = "vm/devices/storage/disk_nvme" } disk_delay = { path = "vm/devices/storage/disk_delay" } disk_prwrap = { path = "vm/devices/storage/disk_prwrap" } +disk_storvsc = { path = "vm/devices/storage/disk_storvsc" } disk_striped = { path = "vm/devices/storage/disk_striped" } disk_vhd1 = { path = "vm/devices/storage/disk_vhd1" } disk_vhdmp = { path = "vm/devices/storage/disk_vhdmp" } diff --git a/openhcl/openhcl_boot/src/main.rs b/openhcl/openhcl_boot/src/main.rs index 2fb4f47071..bdbffeda19 100644 --- a/openhcl/openhcl_boot/src/main.rs +++ b/openhcl/openhcl_boot/src/main.rs @@ -158,7 +158,9 @@ fn build_kernel_command_line( "rdinit=/underhill-init", // Default to user-mode NVMe driver. "OPENHCL_NVME_VFIO=1", - // The next three items reduce the memory overhead of the storvsc driver. + // Default to user-mode storvsc driver. + "OPENHCL_STORVSC_USERMODE=1", + // The next three items reduce the memory overhead of the kernel storvsc driver. // Since it is only used for DVD, performance is not critical. "hv_storvsc.storvsc_vcpus_per_sub_channel=2048", // Fix number of hardware queues at 2. diff --git a/openhcl/rootfs.config b/openhcl/rootfs.config index ed620e589f..099a51f9ff 100644 --- a/openhcl/rootfs.config +++ b/openhcl/rootfs.config @@ -35,7 +35,7 @@ file /lib/modules/001/pci-hyperv.ko ${OPENHCL_KERNEL_PATH}/build/native/ # Storvsc is last because it sometimes takes a long time to load and should not # block other device startup. -file /lib/modules/999/hv_storvsc.ko ${OPENHCL_KERNEL_PATH}/build/native/bin/${OPENHCL_KERNEL_ARCH}/modules/kernel/drivers/scsi/hv_storvsc.ko 0644 0 0 +#file /lib/modules/999/hv_storvsc.ko ${OPENHCL_KERNEL_PATH}/build/native/bin/${OPENHCL_KERNEL_ARCH}/modules/kernel/drivers/scsi/hv_storvsc.ko 0644 0 0 # These nodes are needed for early logging before devfs is mounted. nod /dev/null 0666 0 0 c 1 3 diff --git a/openhcl/underhill_core/Cargo.toml b/openhcl/underhill_core/Cargo.toml index aa2b8a47cc..45fa2452ab 100644 --- a/openhcl/underhill_core/Cargo.toml +++ b/openhcl/underhill_core/Cargo.toml @@ -48,6 +48,7 @@ disk_backend_resources.workspace = true disk_blockdevice.workspace = true disk_get_vmgs.workspace = true disk_nvme.workspace = true +disk_storvsc.workspace = true firmware_uefi.workspace = true firmware_uefi_custom_vars.workspace = true hyperv_ic_guest.workspace = true @@ -79,7 +80,9 @@ scsidisk.workspace = true scsidisk_resources.workspace = true serial_16550_resources.workspace = true storage_string.workspace = true +storvsc_driver.workspace = true storvsp.workspace = true +storvsp_protocol.workspace = true storvsp_resources.workspace = true tpm_resources.workspace = true tpm = { workspace = true, features = ["tpm"] } diff --git a/openhcl/underhill_core/src/dispatch/mod.rs b/openhcl/underhill_core/src/dispatch/mod.rs index 968f358eb8..2fe13d9658 100644 --- a/openhcl/underhill_core/src/dispatch/mod.rs +++ b/openhcl/underhill_core/src/dispatch/mod.rs @@ -16,6 +16,8 @@ use crate::reference_time::ReferenceTime; use crate::servicing; use crate::servicing::NvmeSavedState; use crate::servicing::ServicingState; +use crate::servicing::StorvscSavedState; +use crate::storvsc_manager::StorvscManager; use crate::vmbus_relay_unit::VmbusRelayHandle; use crate::worker::FirmwareType; use crate::worker::NetworkSettingsError; @@ -144,6 +146,7 @@ pub(crate) struct LoadedVm { pub uevent_listener: Arc, pub resolver: ResourceResolver, pub nvme_manager: Option, + pub storvsc_manager: Option, pub emuplat_servicing: EmuplatServicing, pub device_interfaces: Option, pub vmbus_client: Option, @@ -323,6 +326,7 @@ impl LoadedVm { resp.field("vmgs", self.vmgs.as_ref().map(|x| &x.0)); resp.field("network", &self.network_settings); resp.field("nvme", &self.nvme_manager); + resp.field("storvsc", &self.storvsc_manager); resp.field("resolver", &self.resolver); resp.field( "vtl0_memory_map", @@ -575,6 +579,13 @@ impl LoadedVm { } }; + // Reset all user-mode storvsc devices. + let shutdown_storvsc = async { + if let Some(storvsc_manager) = self.storvsc_manager.take() { + storvsc_manager.shutdown().instrument(tracing::info_span!("shutdown_storvsc", CVM_ALLOWED, %correlation_id)).await; + } + }; + // Unbind drivers from the PCI devices to prepare for a kernel // restart. let shutdown_pci = async { @@ -583,7 +594,7 @@ impl LoadedVm { .await }; - let (r, (), ()) = (shutdown_pci, shutdown_mana, shutdown_nvme).join().await; + let (r, (), (), ()) = (shutdown_pci, shutdown_mana, shutdown_nvme, shutdown_storvsc).join().await; r?; Ok(state) @@ -731,6 +742,17 @@ impl LoadedVm { None }; + let storvsc_state = if let Some(s) = &self.storvsc_manager { + s.save() + .instrument(tracing::info_span!("storvsc_manager_save", CVM_ALLOWED)) + .await + .map(|state| StorvscSavedState { + storvsc_state: state, + }) + } else { + None + }; + let units = self.save_units().await.context("state unit save failed")?; let vmgs = if let Some((vmgs_thin_client, vmgs_disk_metadata, _)) = self.vmgs.as_ref() { Some(( @@ -770,6 +792,7 @@ impl LoadedVm { nvme_state, dma_manager_state, vmbus_client, + storvsc_state, }, units, }; diff --git a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs index 694a4b3f52..733b49723e 100644 --- a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs +++ b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs @@ -5,6 +5,7 @@ use super::LoadedVm; use crate::nvme_manager::manager::NvmeDiskConfig; +use crate::storvsc_manager::StorvscDiskConfig; use crate::worker::NicConfig; use anyhow::Context; use cvm_tracing::CVM_ALLOWED; @@ -243,6 +244,7 @@ pub struct DeviceInterfaces { scsi_dvds: HashMap>, scsi_request: HashMap>, use_nvme_vfio: bool, + use_storvsc_usermode: bool, } impl Vtl2SettingsWorker { @@ -376,6 +378,7 @@ impl Vtl2SettingsWorker { &StorageContext { uevent_listener, use_nvme_vfio: self.interfaces.use_nvme_vfio, + use_storvsc_usermode: self.interfaces.use_storvsc_usermode, }, &disk, false, @@ -394,6 +397,7 @@ impl Vtl2SettingsWorker { &StorageContext { uevent_listener, use_nvme_vfio: self.interfaces.use_nvme_vfio, + use_storvsc_usermode: self.interfaces.use_storvsc_usermode, }, &disk, false, @@ -957,6 +961,7 @@ async fn make_disk_type_from_physical_devices( struct StorageContext<'a> { uevent_listener: &'a UeventListener, use_nvme_vfio: bool, + use_storvsc_usermode: bool, } #[instrument(skip_all, fields(CVM_ALLOWED))] @@ -1000,6 +1005,36 @@ async fn make_disk_type_from_physical_device( })); } + // Special case for VScsi when using usermode storvsc. + if storage_context.use_storvsc_usermode + && matches!( + single_device.device_type, + underhill_config::DeviceType::VScsi + ) + { + // Wait for the SCSI controller to arrive. + let devpath = uio_path(&controller_instance_id); + async { + ctx.until_cancelled(storage_context.uevent_listener.wait_for_devpath(&devpath)) + .await??; + Ok(()) + } + .await + .map_err(|err| Error::StorageCannotFindVtl2Device { + device_type: single_device.device_type, + instance_id: controller_instance_id, + sub_device_path, + source: err, + })?; + + // Cannot validate device actually exists yet. Check this later + // TODO: Is this true? + return Ok(Resource::new(StorvscDiskConfig { + instance_guid: controller_instance_id, + lun: sub_device_path as u8, + })); + } + // Wait for the device to arrive. let devname = async { let devname = ctx @@ -1428,6 +1463,7 @@ pub async fn create_storage_controllers_from_vtl2_settings( ctx: &mut CancelContext, uevent_listener: &UeventListener, use_nvme_vfio: bool, + use_storvsc_usermode: bool, settings: &Vtl2SettingsDynamic, sub_channels: u16, is_restoring: bool, @@ -1443,6 +1479,7 @@ pub async fn create_storage_controllers_from_vtl2_settings( let storage_context = StorageContext { uevent_listener, use_nvme_vfio, + use_storvsc_usermode, }; let ide_controller = make_ide_controller_config(ctx, &storage_context, settings, is_restoring).await?; @@ -1643,6 +1680,11 @@ fn vpci_path(instance_id: &Guid) -> (String, PathBuf) { (pci_id, devpath) } +fn uio_path(instance_id: &Guid) -> PathBuf { + // TODO: Is this enough? + PathBuf::from(format!("/sys/bus/vmbus/devices/{instance_id}/uio")) +} + /// Waits for the PCI path to get populated. The PCI path is just a symlink to the actual /// device path. This should be called once the device path is available. pub async fn wait_for_pci_path(pci_id: &String) { @@ -1782,6 +1824,7 @@ impl InitialControllers { uevent_listener: &UeventListener, dps: &DevicePlatformSettings, use_nvme_vfio: bool, + use_storvsc_usermode: bool, is_restoring: bool, default_io_queue_depth: u32, ) -> anyhow::Result { @@ -1801,6 +1844,7 @@ impl InitialControllers { &mut context, uevent_listener, use_nvme_vfio, + use_storvsc_usermode, dynamic, fixed.scsi_sub_channels, is_restoring, @@ -1851,6 +1895,7 @@ impl InitialControllers { scsi_dvds, scsi_request, use_nvme_vfio, + use_storvsc_usermode, }, }; diff --git a/openhcl/underhill_core/src/lib.rs b/openhcl/underhill_core/src/lib.rs index 652776296a..bee3f9af34 100644 --- a/openhcl/underhill_core/src/lib.rs +++ b/openhcl/underhill_core/src/lib.rs @@ -20,6 +20,7 @@ mod nvme_manager; mod options; mod reference_time; mod servicing; +mod storvsc_manager; mod threadpool_vm_task_backend; mod vmbus_relay_unit; mod vmgs_logger; @@ -330,6 +331,7 @@ async fn launch_workers( disable_uefi_frontpage: opt.disable_uefi_frontpage, guest_state_encryption_policy: opt.guest_state_encryption_policy, attempt_ak_cert_callback: opt.attempt_ak_cert_callback, + storvsc_usermode: opt.storvsc_usermode, }; let (mut remote_console_cfg, framebuffer_access) = diff --git a/openhcl/underhill_core/src/options.rs b/openhcl/underhill_core/src/options.rs index d12e339e2e..1f9e319abe 100644 --- a/openhcl/underhill_core/src/options.rs +++ b/openhcl/underhill_core/src/options.rs @@ -187,6 +187,10 @@ pub struct Options { /// (HCL_ATTEMPT_AK_CERT_CALLBACK=1) Attempt to renew the AK cert. /// If not specified, use the configuration in DPSv2 ManagementVtlFeatures. pub attempt_ak_cert_callback: Option, + + /// (OPENHCL_STORVSC_USERMODE=1) + /// Use the user-mode storvsc driver instead of the Linux driver. + pub storvsc_usermode: bool, } impl Options { @@ -322,6 +326,7 @@ impl Options { .map_err(|e| tracing::warn!("failed to parse HCL_ATTEMPT_AK_CERT_CALLBACK: {:#}", e)) .ok() .flatten(); + let storvsc_usermode = parse_env_bool("OPENHCL_STORVSC_USERMODE"); let mut args = std::env::args().chain(extra_args); // Skip our own filename. @@ -381,6 +386,7 @@ impl Options { disable_uefi_frontpage, guest_state_encryption_policy, attempt_ak_cert_callback, + storvsc_usermode, }) } diff --git a/openhcl/underhill_core/src/servicing.rs b/openhcl/underhill_core/src/servicing.rs index 0606f51804..4a3567d30c 100644 --- a/openhcl/underhill_core/src/servicing.rs +++ b/openhcl/underhill_core/src/servicing.rs @@ -47,6 +47,14 @@ mod state { pub nvme_state: crate::nvme_manager::save_restore::NvmeManagerSavedState, } + #[derive(Protobuf)] + #[mesh(package = "underhill")] + pub struct StorvscSavedState { + /// Storvsc manager (worker) saved state. + #[mesh(1)] + pub storvsc_state: crate::storvsc_manager::save_restore::StorvscManagerSavedState, + } + /// Servicing state needed to create the LoadedVm object. #[derive(Protobuf)] #[mesh(package = "underhill")] @@ -84,6 +92,8 @@ mod state { pub dma_manager_state: Option, #[mesh(10002)] pub vmbus_client: Option, + #[mesh(10003)] + pub storvsc_state: Option, } #[derive(Protobuf)] @@ -202,6 +212,7 @@ pub mod transposed { pub nvme_state: Option>, pub dma_manager_state: Option>, pub vmbus_client: Option>, + pub storvsc_state: Option>, } /// A transposed `Option`, where each field of @@ -232,6 +243,7 @@ pub mod transposed { nvme_state, dma_manager_state, vmbus_client, + storvsc_state, } = state; OptionServicingInitState { @@ -248,6 +260,7 @@ pub mod transposed { nvme_state: Some(nvme_state), dma_manager_state: Some(dma_manager_state), vmbus_client: Some(vmbus_client), + storvsc_state: Some(storvsc_state), } } else { OptionServicingInitState::default() diff --git a/openhcl/underhill_core/src/storvsc_manager.rs b/openhcl/underhill_core/src/storvsc_manager.rs new file mode 100644 index 0000000000..5472c82e71 --- /dev/null +++ b/openhcl/underhill_core/src/storvsc_manager.rs @@ -0,0 +1,421 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Provides access to storvsc driver instances shared between disks on each vStor controller. + +use crate::servicing::StorvscSavedState; +use crate::storvsc_manager::save_restore::StorvscManagerSavedState; +use crate::storvsc_manager::save_restore::StorvscSavedDriverConfig; +use anyhow::Context; +use async_trait::async_trait; +use disk_backend::resolve::ResolveDiskParameters; +use disk_backend::resolve::ResolvedDisk; +use futures::StreamExt; +use futures::TryFutureExt; +use futures::future::join_all; +use inspect::Inspect; +use mesh::MeshPayload; +use mesh::rpc::Rpc; +use mesh::rpc::RpcSend; +use openhcl_dma_manager::AllocationVisibility; +use openhcl_dma_manager::DmaClientParameters; +use openhcl_dma_manager::DmaClientSpawner; +use openhcl_dma_manager::LowerVtlPermissionPolicy; +use pal_async::task::Spawn; +use pal_async::task::Task; +use std::collections::HashMap; +use std::collections::hash_map; +use std::sync::Arc; +use storvsc_driver::StorvscDriver; +use thiserror::Error; +use tracing::Instrument; +use vm_resource::AsyncResolveResource; +use vm_resource::ResourceId; +use vm_resource::ResourceResolver; +use vm_resource::kind::DiskHandleKind; +use vmbus_user_channel::MappedRingMem; +use vmcore::vm_task::VmTaskDriverSource; + +#[derive(Debug, Error)] +#[error("storvsc driver {instance_guid} error")] +pub struct DriverError { + instance_guid: guid::Guid, + #[source] + source: InnerError, +} + +#[derive(Debug, Error)] +enum InnerError { + #[error("failed to initialize vmbus channel")] + Vmbus(#[source] vmbus_user_channel::Error), + #[error("failed to initialize storvsc driver")] + DriverInitFailed(#[source] storvsc_driver::StorvscError), + #[error("failed to create dma client for device")] + DmaClient(#[source] anyhow::Error), +} + +#[derive(Debug)] +pub struct StorvscManager { + task: Task<()>, + client: StorvscManagerClient, + /// Running environment (memory layout) supports save/restore. + save_restore_supported: bool, +} + +impl Inspect for StorvscManager { + fn inspect(&self, req: inspect::Request<'_>) { + let mut resp = req.respond(); + resp.merge(inspect::adhoc(|req| { + self.client.sender.send(Request::Inspect(req.defer())) + })); + } +} + +impl StorvscManager { + pub fn new( + driver_source: &VmTaskDriverSource, + save_restore_supported: bool, + is_isolated: bool, + saved_state: Option, + dma_client_spawner: DmaClientSpawner, + ) -> Self { + let (send, recv) = mesh::channel(); + let driver = driver_source.simple(); + let mut worker = StorvscManagerWorker { + driver_source: driver_source.clone(), + drivers: HashMap::new(), + save_restore_supported, + is_isolated, + dma_client_spawner, + }; + let task = driver.spawn("storvsc-manager", async move { + // Restore saved data (if present) before async worker thread runs. + if let Some(s) = saved_state.as_ref() { + if let Err(e) = StorvscManager::restore(&mut worker, s) + .instrument(tracing::info_span!("storvsc_manager_restore")) + .await + { + tracing::error!( + error = e.as_ref() as &dyn std::error::Error, + "failed to restore storvsc manager" + ); + } + }; + worker.run(recv).await + }); + Self { + task, + client: StorvscManagerClient { sender: send }, + save_restore_supported, + } + } + + pub fn client(&self) -> &StorvscManagerClient { + &self.client + } + + pub async fn shutdown(self) { + self.client.sender.send(Request::Shutdown { + span: tracing::info_span!("shutdown_storvsc_manager"), + }); + self.task.await; + } + + /// Save storvsc manager's state during servicing. + pub async fn save(&self) -> Option { + // The manager has no data to save, so everything will be done in the Worker task which can + // be contacted through the Client. + if self.save_restore_supported { + Some(self.client().save().await?) + } else { + None + } + } + + /// Restore the storvsc manager's state after servicing. + async fn restore( + worker: &mut StorvscManagerWorker, + saved_state: &StorvscSavedState, + ) -> anyhow::Result<()> { + worker + .restore(&saved_state.storvsc_state) + .instrument(tracing::info_span!("storvsc_worker_restore")) + .await?; + + Ok(()) + } +} + +enum Request { + Inspect(inspect::Deferred), + GetDriver(Rpc>, DriverError>>), + Save(Rpc<(), Result>), + Shutdown { span: tracing::Span }, +} + +#[derive(Debug, Clone)] +pub struct StorvscManagerClient { + sender: mesh::Sender, +} + +impl StorvscManagerClient { + pub async fn get_driver( + &self, + instance_guid: guid::Guid, + ) -> anyhow::Result>> { + Ok(self + .sender + .call(Request::GetDriver, instance_guid) + .instrument(tracing::info_span!( + "storvsc_get_driver", + instance_guid = instance_guid.to_string() + )) + .await + .context("storvsc manager is shutdown")??) + } + + pub async fn save(&self) -> Option { + match self.sender.call(Request::Save, ()).await { + Ok(s) => s.ok(), + Err(_) => None, + } + } +} + +#[derive(Inspect)] +struct StorvscManagerWorker { + #[inspect(skip)] + driver_source: VmTaskDriverSource, + #[inspect(iter_by_key)] + drivers: HashMap>>, + /// Running environment (memory layout) allows save/restore. + save_restore_supported: bool, + /// If this VM is isolated or not. This influences DMA client allocations. + is_isolated: bool, + #[inspect(skip)] + dma_client_spawner: DmaClientSpawner, +} + +impl StorvscManagerWorker { + async fn run(&mut self, mut recv: mesh::Receiver) { + let join_span = loop { + let Some(req) = recv.next().await else { + break tracing::Span::none(); + }; + match req { + Request::Inspect(deferred) => deferred.inspect(&self), + Request::GetDriver(rpc) => { + rpc.handle(async |instance_guid| { + self.get_driver(instance_guid) + .map_err(|source| DriverError { + instance_guid, + source, + }) + .await + }) + .await + } + Request::Save(rpc) => { + rpc.handle(async |_| self.save().await) + .instrument(tracing::info_span!("storvsc_save_state")) + .await + } + Request::Shutdown { span } => { + break span; + } + } + }; + + if !self.save_restore_supported { + async { + join_all(self.drivers.drain().map(|(guid, driver)| { + let guid_str = guid.to_string(); + async move { + driver + .stop() + .instrument(tracing::info_span!( + "shutdown_storvsc_driver", + guid = guid_str + )) + .await + } + })) + .await + } + .instrument(join_span) + .await; + } + } + + async fn get_driver( + &mut self, + instance_guid: guid::Guid, + ) -> Result>, InnerError> { + let storvsc = match self.drivers.entry(instance_guid) { + hash_map::Entry::Occupied(entry) => entry.get().clone(), + hash_map::Entry::Vacant(entry) => { + let file = vmbus_user_channel::open_uio_device(&instance_guid) + .map_err(InnerError::Vmbus)?; + let channel = vmbus_user_channel::channel(&self.driver_source.simple(), file) + .map_err(InnerError::Vmbus)?; + + let dma_client = self + .dma_client_spawner + .new_client(DmaClientParameters { + device_name: format!("storvsc_{}", instance_guid), + lower_vtl_policy: LowerVtlPermissionPolicy::Any, + allocation_visibility: if self.is_isolated { + AllocationVisibility::Shared + } else { + AllocationVisibility::Private + }, + persistent_allocations: self.save_restore_supported, + }) + .map_err(InnerError::DmaClient)?; + + let mut driver = Arc::new(StorvscDriver::new(dma_client)); + + Arc::get_mut(&mut driver) + .unwrap() + .run( + &self.driver_source, + channel, + storvsp_protocol::ProtocolVersion { + major_minor: storvsp_protocol::VERSION_BLUE, + reserved: 0, + }, + 0, // TODO: Pick right VP + ) + .map_err(InnerError::DriverInitFailed) + .await?; + + entry.insert(driver).clone() + } + }; + Ok(storvsc) + } + + /// Saves storvsc driver states into buffer during servicing. + pub async fn save(&mut self) -> anyhow::Result { + let mut storvsc_drivers: Vec = Vec::new(); + for (guid, driver) in self.drivers.iter_mut() { + storvsc_drivers.push(StorvscSavedDriverConfig { + instance_guid: *guid, + driver_state: driver + .save() + .instrument(tracing::info_span!( + "storvsc_driver_save", + instance_guid = guid.to_string() + )) + .await?, + }); + } + + Ok(StorvscManagerSavedState { storvsc_drivers }) + } + + /// Restores storvsc manager and driver states from the buffer after servicing. + pub async fn restore(&mut self, saved_state: &StorvscManagerSavedState) -> anyhow::Result<()> { + self.drivers = HashMap::new(); + for driver_state in &saved_state.storvsc_drivers { + let file = vmbus_user_channel::open_uio_device(&driver_state.instance_guid) + .map_err(InnerError::Vmbus)?; + let channel = vmbus_user_channel::channel(&self.driver_source.simple(), file) + .map_err(InnerError::Vmbus)?; + + let dma_client = self + .dma_client_spawner + .new_client(DmaClientParameters { + device_name: format!("storvsc_{}", driver_state.instance_guid), + lower_vtl_policy: LowerVtlPermissionPolicy::Any, + allocation_visibility: if self.is_isolated { + AllocationVisibility::Shared + } else { + AllocationVisibility::Private + }, + persistent_allocations: self.save_restore_supported, + }) + .map_err(InnerError::DmaClient)?; + + self.drivers.insert( + driver_state.instance_guid, + Arc::new( + StorvscDriver::restore( + &driver_state.driver_state, + &self.driver_source, + channel, + 0, + dma_client, + ) + .await?, + ), // TODO: Pick right VP + ); + } + Ok(()) + } +} + +pub struct StorvscDiskResolver { + manager: StorvscManagerClient, +} + +impl StorvscDiskResolver { + pub fn new(manager: StorvscManagerClient) -> Self { + Self { manager } + } +} + +#[async_trait] +impl AsyncResolveResource for StorvscDiskResolver { + type Output = ResolvedDisk; + type Error = anyhow::Error; + + async fn resolve( + &self, + _resolver: &ResourceResolver, + rsrc: StorvscDiskConfig, + _input: ResolveDiskParameters<'_>, + ) -> Result { + let disk = self + .manager + .get_driver(rsrc.instance_guid) + .await + .context("could not open storvsc disk")?; + + Ok( + ResolvedDisk::new(disk_storvsc::StorvscDisk::new(disk, rsrc.lun)) + .context("invalid disk")?, + ) + } +} + +#[derive(MeshPayload, Default)] +pub struct StorvscDiskConfig { + pub instance_guid: guid::Guid, + pub lun: u8, +} + +impl ResourceId for StorvscDiskConfig { + const ID: &'static str = "storvsc"; +} + +pub mod save_restore { + use mesh::payload::Protobuf; + use vmcore::save_restore::SavedStateRoot; + + #[derive(Protobuf, SavedStateRoot)] + #[mesh(package = "underhill")] + pub struct StorvscManagerSavedState { + #[mesh(1)] + pub storvsc_drivers: Vec, + } + + #[derive(Protobuf, Clone)] + #[mesh(package = "underhill")] + pub struct StorvscSavedDriverConfig { + #[mesh(1)] + pub instance_guid: guid::Guid, + #[mesh(2)] + pub driver_state: storvsc_driver::save_restore::StorvscDriverSavedState, + } +} diff --git a/openhcl/underhill_core/src/worker.rs b/openhcl/underhill_core/src/worker.rs index 5da13a050b..fdbdedd04a 100644 --- a/openhcl/underhill_core/src/worker.rs +++ b/openhcl/underhill_core/src/worker.rs @@ -51,6 +51,9 @@ use crate::reference_time::ReferenceTime; use crate::servicing; use crate::servicing::ServicingState; use crate::servicing::transposed::OptionServicingInitState; +use crate::storvsc_manager::StorvscDiskConfig; +use crate::storvsc_manager::StorvscDiskResolver; +use crate::storvsc_manager::StorvscManager; use crate::threadpool_vm_task_backend::ThreadpoolBackend; use crate::vmbus_relay_unit::VmbusRelayHandle; use crate::vmgs_logger::GetVmgsLogger; @@ -297,6 +300,8 @@ pub struct UnderhillEnvCfg { pub guest_state_encryption_policy: Option, /// Attempt to renew the AK cert pub attempt_ak_cert_callback: Option, + /// Use the user-mode storvsc driver. + pub storvsc_usermode: bool, } /// Bundle of config + runtime objects for hooking into the underhill remote @@ -1870,6 +1875,7 @@ async fn new_underhill_vm( &uevent_listener, &dps, env_cfg.nvme_vfio, + env_cfg.storvsc_usermode, is_restoring, default_io_queue_depth, ) @@ -1963,6 +1969,27 @@ async fn new_underhill_vm( None }; + let storvsc_manager = if env_cfg.storvsc_usermode { + let private_pool_available = !runtime_params.private_pool_ranges().is_empty(); + let save_restore_supported = private_pool_available; + + let manager = StorvscManager::new( + &driver_source, + save_restore_supported, + isolation.is_isolated(), + servicing_state.storvsc_state.unwrap_or(None), + dma_manager.client_spawner(), + ); + + resolver.add_async_resolver::( + StorvscDiskResolver::new(manager.client().clone()), + ); + + Some(manager) + } else { + None + }; + let initial_generation_id = match dps.general.generation_id.map(u128::from_ne_bytes) { Some(0) | None => { let mut gen_id = [0; 16]; @@ -3136,6 +3163,7 @@ async fn new_underhill_vm( uevent_listener, resolver, nvme_manager, + storvsc_manager, emuplat_servicing: EmuplatServicing { get_backed_adjust_gpa_range: emuplat_adjust_gpa_range, rtc_local_clock: rtc_time_source.0, diff --git a/openhcl/underhill_init/src/lib.rs b/openhcl/underhill_init/src/lib.rs index 0530183f90..9d991863a7 100644 --- a/openhcl/underhill_init/src/lib.rs +++ b/openhcl/underhill_init/src/lib.rs @@ -484,6 +484,11 @@ fn do_main() -> anyhow::Result<()> { // Crashdump "427b03e7-4ceb-4286-b5fc-486f4a1dd439", ), + ( + "/sys/bus/vmbus/drivers/uio_hv_generic/new_id", + // SCSI + "ba6163d9-04a1-4d29-b605-72e2ffb1dc7f", + ), ( "/proc/sys/kernel/core_pattern", if underhill_confidentiality::confidential_filtering_enabled() { diff --git a/vm/devices/storage/disk_storvsc/Cargo.toml b/vm/devices/storage/disk_storvsc/Cargo.toml new file mode 100644 index 0000000000..152a7cf6ae --- /dev/null +++ b/vm/devices/storage/disk_storvsc/Cargo.toml @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "disk_storvsc" +edition.workspace = true +rust-version.workspace = true + +[features] +test = [] + +[dependencies] +disk_backend.workspace = true +storvsc_driver.workspace = true + +scsi_buffers.workspace = true +scsi_defs.workspace = true +vmbus_user_channel.workspace = true + +storvsp_protocol.workspace = true +inspect.workspace = true +futures.workspace = true +tracing.workspace = true +zerocopy.workspace = true + +[dev-dependencies] + +[lints] +workspace = true diff --git a/vm/devices/storage/disk_storvsc/src/lib.rs b/vm/devices/storage/disk_storvsc/src/lib.rs new file mode 100644 index 0000000000..ba722161e4 --- /dev/null +++ b/vm/devices/storage/disk_storvsc/src/lib.rs @@ -0,0 +1,532 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Disk backend implementation that uses a user-mode storvsc driver. + +#![forbid(unsafe_code)] + +use disk_backend::DiskError; +use disk_backend::DiskIo; +use disk_backend::UnmapBehavior; +use inspect::Inspect; +use scsi_defs::ScsiOp; +use scsi_defs::ScsiStatus; +use std::sync::Arc; +use std::sync::atomic::AtomicU64; +use storvsc_driver::StorvscDriver; +use storvsc_driver::StorvscErrorKind; +use vmbus_user_channel::MappedRingMem; +use zerocopy::FromZeros; +use zerocopy::IntoBytes; + +/// Maximum number of retries when a retryable failure occurs, which should really only happen +/// during servicing. Servicing shouldn't happen frequently enough to make more than one retry +/// necessary, but provide some buffer room. +const MAX_RETRIES: usize = 5; + +/// Disk backend using a storvsc driver to the host. +#[derive(Inspect)] +pub struct StorvscDisk { + #[inspect(skip)] + driver: Arc>, + lun: u8, + num_sectors: u64, + sector_size: u32, + disk_id: Option<[u8; 16]>, + optimal_unmap_sectors: u32, + read_only: bool, +} + +impl StorvscDisk { + /// Creates a new storvsc-backed disk that uses the provided storvsc driver. + pub fn new(driver: Arc>, lun: u8) -> Self { + let mut disk = Self { + driver, + lun, + num_sectors: 0, + sector_size: 0, + disk_id: None, + optimal_unmap_sectors: 0, + read_only: false, + }; + disk.scan_metadata(); + disk + } +} + +impl StorvscDisk { + fn scan_metadata(&mut self) { + // Allocate region for data in for READ_CAPACITY(16), MODE_SENSE(10), and INQUIRY (Block Limits VPD) + let data_in_size = size_of::() + .max(size_of::()) + .max(size_of::()); + let data_in = match self.driver.allocate_dma_buffer(data_in_size) { + Ok(buf) => buf, + Err(err) => { + tracing::error!( + error = err.to_string(), + "Unable to allocate DMA buffer to read disk metadata" + ); + return; + } + }; + + // READ_CAPACITY(16) returns number of sectors and sector size in bytes. + let read_capacity16_cdb = scsi_defs::Cdb16 { + operation_code: ScsiOp::READ_CAPACITY16, + ..FromZeros::new_zeroed() + }; + match futures::executor::block_on(self.send_scsi_request( + read_capacity16_cdb.as_bytes(), + ScsiOp::READ_CAPACITY16, + data_in.base() as u64, + data_in_size, + true, + )) { + Ok(resp) => { + match resp.scsi_status { + ScsiStatus::GOOD => { + let capacity = data_in.read_obj::(0); + self.num_sectors = capacity.ex.logical_block_address.into(); + self.num_sectors += 1; // Add one to include the last sector + self.sector_size = capacity.ex.bytes_per_block.into(); + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + "READ_CAPACITY16 failed" + ); + return; + } + } + } + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "READ_CAPACITY16 failed" + ); + } + } + + // MODE_SENSE10 to get whether read-only. + let mode_sense10_cdb = scsi_defs::ModeSense10 { + operation_code: ScsiOp::MODE_SENSE10, + flags2: scsi_defs::ModeSenseFlags::new().with_page_code(scsi_defs::MODE_PAGE_CONTROL), + sub_page_code: 0, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + mode_sense10_cdb.as_bytes(), + mode_sense10_cdb.operation_code, + 0, + 0, + false, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let mode_page = data_in.read_obj::(0); + self.read_only = mode_page.flags3.swp(); + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + "MODE_SENSE10 failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "MODE_SENSE10 failed" + ); + } + } + + // INQUIRY for the Block Limits VPD page returns the optimal unmap sectors. + let inquiry_block_limits_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY.0, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_BLOCK_LIMITS, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + inquiry_block_limits_cdb.as_bytes(), + ScsiOp::INQUIRY, + data_in.base() as u64, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let block_limits_vpd = + data_in.read_obj::(0); + self.optimal_unmap_sectors = block_limits_vpd.optimal_unmap_granularity.into(); + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + "INQUIRY for Block Limits VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Block Limits VPD failed" + ); + } + } + + // INQUIRY for the Device Identification VPD page returns the designator (disk ID). + self.disk_id = None; + let inquiry_device_identification_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY.0, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_DEVICE_IDENTIFIERS, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + inquiry_device_identification_cdb.as_bytes(), + ScsiOp::INQUIRY, + data_in.base() as u64, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let mut buf_pos = 0; + let vpd_header = data_in.read_obj::(0); + buf_pos += size_of::(); + while buf_pos < vpd_header.page_length as usize + 4 { + let designator_header = + data_in.read_obj::(buf_pos); + buf_pos += size_of::(); + match designator_header.identifiertype { + scsi_defs::VPD_IDENTIFIER_TYPE_FCPH_NAME => { + // Reinterpret as NAA ID designator. + let designator_naa = + data_in.read_obj::(buf_pos); + let mut disk_id = [0u8; 16]; + disk_id[0] = designator_naa.ouid_msb; + disk_id[1..3] + .copy_from_slice(designator_naa.ouid_middle.as_slice()); + disk_id[3] = designator_naa.ouid_lsb; + disk_id[4..] + .copy_from_slice(designator_naa.vendor_specific_id.as_slice()); + self.disk_id = Some(disk_id); + break; + } + _ => { + buf_pos += size_of::() + + designator_header.identifier_length as usize; + } + } + } + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + "INQUIRY for Device Identification VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Block Limits VPD failed" + ); + } + } + } + + fn generate_scsi_request( + &self, + data_transfer_length: u32, + payload: &[u8], + is_read: bool, + ) -> storvsp_protocol::ScsiRequest { + assert!(payload.len() <= storvsp_protocol::MAX_DATA_BUFFER_LENGTH_WITH_PADDING); + let data_in: u8 = if is_read { 1 } else { 0 }; + let mut request = storvsp_protocol::ScsiRequest { + target_id: 0, + path_id: 0, + lun: self.lun, + length: storvsp_protocol::SCSI_REQUEST_LEN_V2 as u16, + cdb_length: payload.len() as u8, + data_transfer_length, + data_in, + ..FromZeros::new_zeroed() + }; + request.payload[0..payload.len()].copy_from_slice(payload); + request + } + + async fn send_scsi_request( + &self, + cdb: &[u8], + op: ScsiOp, + buf_gpa: u64, + byte_len: usize, + is_read: bool, + ) -> Result { + let request = self.generate_scsi_request(byte_len as u32, cdb, is_read); + + let mut num_tries = 0; + loop { + match self.driver.send_request(&request, buf_gpa, byte_len).await { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => break Ok(resp), // Request succeeded, break out of loop + _ => { + tracing::error!(?op, scsi_status = ?resp.scsi_status, "SCSI request failed"); + Err(DiskError::Io(std::io::Error::other(format!( + "SCSI request failed, op={:?}, scsi_status={:?}", + op, resp.scsi_status + )))) + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "SCSI request failed" + ); + match err.kind() { + StorvscErrorKind::CompletionError => Err(DiskError::Io( + std::io::Error::new(std::io::ErrorKind::Interrupted, err), + )), + StorvscErrorKind::Cancelled => Err(DiskError::Io(std::io::Error::new( + std::io::ErrorKind::Interrupted, + err, + ))), + StorvscErrorKind::CancelledRetry => { + if num_tries < MAX_RETRIES { + Ok(()) + } else { + break Err(DiskError::Io(std::io::Error::new( + std::io::ErrorKind::Interrupted, + err, + ))); + } + } + _ => Err(DiskError::Io(std::io::Error::other(err))), + } + } + }?; + num_tries += 1; + } + } +} + +impl DiskIo for StorvscDisk { + fn disk_type(&self) -> &str { + "storvsc" + } + + fn sector_count(&self) -> u64 { + self.num_sectors + } + + fn sector_size(&self) -> u32 { + self.sector_size + } + + fn disk_id(&self) -> Option<[u8; 16]> { + self.disk_id + } + + fn physical_sector_size(&self) -> u32 { + self.sector_size + } + + fn is_fua_respected(&self) -> bool { + true + } + + fn is_read_only(&self) -> bool { + self.read_only + } + + async fn read_vectored( + &self, + buffers: &scsi_buffers::RequestBuffers<'_>, + sector: u64, + ) -> Result<(), DiskError> { + if self.sector_size == 0 { + // Disk failed to initialize. + return Err(DiskError::IllegalBlock); + } + + if buffers.len() % self.sector_size as usize != 0 { + // Buffer length must be a multiple of sector size. + return Err(DiskError::InvalidInput); + } + + // Get LockedPages for the buffers to pass to the storvsc client. + let locked_buffers = buffers + .guest_memory() + .lock_gpns(false, buffers.range().gpns()) + .map_err(|_| DiskError::ReservationConflict)?; + + let cdb = scsi_defs::Cdb16 { + operation_code: ScsiOp::READ16, + logical_block: sector.into(), + transfer_blocks: (buffers.len() as u32 / self.sector_size).into(), + ..FromZeros::new_zeroed() + }; + + self.send_scsi_request( + cdb.as_bytes(), + cdb.operation_code, + locked_buffers.va(), + buffers.len(), + false, + ) + .await + .map(|_| ()) + } + + async fn write_vectored( + &self, + buffers: &scsi_buffers::RequestBuffers<'_>, + sector: u64, + fua: bool, + ) -> Result<(), DiskError> { + if self.sector_size == 0 { + // Disk failed to initialize. + return Err(DiskError::IllegalBlock); + } + + if buffers.len() % self.sector_size as usize != 0 { + // Buffer length must be a multiple of sector size. + return Err(DiskError::InvalidInput); + } + + // Get LockedPages for the buffers to pass to the storvsc client. + let locked_buffers = buffers + .guest_memory() + .lock_gpns(false, buffers.range().gpns()) + .map_err(|_| DiskError::ReservationConflict)?; + + let cdb = scsi_defs::Cdb16 { + operation_code: ScsiOp::WRITE16, + flags: scsi_defs::Cdb16Flags::new().with_fua(fua), + logical_block: sector.into(), + transfer_blocks: (buffers.len() as u32 / self.sector_size).into(), + ..FromZeros::new_zeroed() + }; + + self.send_scsi_request( + cdb.as_bytes(), + cdb.operation_code, + locked_buffers.va(), + buffers.len(), + true, + ) + .await + .map(|_| ()) + } + + async fn sync_cache(&self) -> Result<(), DiskError> { + if self.sector_size == 0 { + // Disk failed to initialize. + return Err(DiskError::IllegalBlock); + } + + let cdb = scsi_defs::Cdb16 { + operation_code: ScsiOp::SYNCHRONIZE_CACHE16, + logical_block: 0.into(), + transfer_blocks: 0.into(), // 0 indicates to sync all sectors + ..FromZeros::new_zeroed() + }; + + self.send_scsi_request(cdb.as_bytes(), cdb.operation_code, 0, 0, false) + .await + .map(|_| ()) + } + + async fn eject(&self) -> Result<(), DiskError> { + let cdb = scsi_defs::StartStop { + operation_code: ScsiOp::START_STOP_UNIT, + flag: scsi_defs::StartStopFlags::new().with_load_eject(true), + ..FromZeros::new_zeroed() + }; + + self.send_scsi_request(cdb.as_bytes(), cdb.operation_code, 0, 0, false) + .await + .map(|_| ()) + } + + async fn unmap( + &self, + sector: u64, + count: u64, + _block_level_only: bool, + ) -> Result<(), DiskError> { + let cdb = scsi_defs::Unmap { + operation_code: ScsiOp::UNMAP, + allocation_length: (size_of::() as u16).into(), + ..FromZeros::new_zeroed() + }; + + let unmap_param_list = scsi_defs::UnmapListHeader { + data_length: ((size_of::() - 2 + + size_of::()) as u16) + .into(), + block_descriptor_data_length: (size_of::() as u16) + .into(), + ..FromZeros::new_zeroed() + }; + + let unmap_descriptor = scsi_defs::UnmapBlockDescriptor { + start_lba: sector.into(), + lba_count: (count as u32).into(), // TODO: what if more than 2^32? + ..FromZeros::new_zeroed() + }; + + let data_out_size = + size_of::() + size_of::(); + let data_out = match self.driver.allocate_dma_buffer(data_out_size) { + Ok(buf) => buf, + Err(err) => { + tracing::error!( + error = err.to_string(), + "Unable to allocate DMA buffer for UNMAP" + ); + return Err(DiskError::Io(std::io::Error::other(err))); + } + }; + data_out.write_at(0, unmap_param_list.as_bytes()); + data_out.write_at( + size_of::(), + unmap_descriptor.as_bytes(), + ); + + self.send_scsi_request( + cdb.as_bytes(), + cdb.operation_code, + data_out.base() as u64, + data_out_size, + false, + ) + .await + .map(|_| ()) + } + + fn unmap_behavior(&self) -> UnmapBehavior { + UnmapBehavior::Unspecified + } + + fn optimal_unmap_sectors(&self) -> u32 { + self.optimal_unmap_sectors + } + + async fn wait_resize(&self, sector_count: u64) -> u64 { + // This is difficult because it cannot update the stored sector count + todo!() + } +} diff --git a/vm/devices/storage/scsi_defs/src/lib.rs b/vm/devices/storage/scsi_defs/src/lib.rs index 704942d4c4..1b805d3800 100644 --- a/vm/devices/storage/scsi_defs/src/lib.rs +++ b/vm/devices/storage/scsi_defs/src/lib.rs @@ -769,6 +769,60 @@ pub struct ModeSenseFlags { pub pc: u8, } +#[repr(C)] +#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] +pub struct ModeControlPage { + /* + UCHAR PageCode : 6; + UCHAR SPFBit : 1; + UCHAR PSBit : 1; + */ + pub page_code: u8, + pub page_length: u8, + /* + UCHAR RLECBit : 1; + UCHAR GLTSDBit : 1; + UCHAR D_SENSEBit : 1; + UCHAR Reserved1 : 1; + UCHAR TMF_ONLYBit : 1; + UCHAR TST : 3; + */ + pub flags1: u8, + /* + UCHAR Reserved2 : 1; + UCHAR QERR : 2; + UCHAR Reserved3 : 1; + UCHAR QueueAlgorithmModifier : 4; + */ + pub flags2: u8, + pub flags3: ModeControlPageFlags3, + /* + UCHAR AutoloadMode : 3; + UCHAR Reserved5 : 3; + UCHAR TASBit : 1; + UCHAR ATOBit : 1; + */ + pub flags4: u8, + pub reserved6: [u8; 2], + pub busy_timeout_period: [u8; 2], + pub extended_self_test_completion_time: [u8; 2], +} + +#[bitfield(u8)] +#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] +pub struct ModeControlPageFlags3 { + #[bits(3)] + pub reserved: u8, + #[bits(1)] + pub swp: bool, + #[bits(2)] + pub ua_intlck_ctrl: u8, + #[bits(1)] + pub rac: bool, + #[bits(1)] + pub vs: bool, +} + #[repr(C)] #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] pub struct ModeReadWriteRecoveryPage { diff --git a/vm/devices/storage/storvsc_driver/Cargo.toml b/vm/devices/storage/storvsc_driver/Cargo.toml index 352ff3efd4..239541ff80 100644 --- a/vm/devices/storage/storvsc_driver/Cargo.toml +++ b/vm/devices/storage/storvsc_driver/Cargo.toml @@ -21,11 +21,14 @@ storvsp_protocol.workspace = true guestmem.workspace = true inspect.workspace = true +mesh.workspace = true mesh_channel.workspace = true pal_async.workspace = true task_control.workspace = true +user_driver.workspace = true vmcore.workspace = true +anyhow.workspace = true futures.workspace = true futures-concurrency.workspace = true slab.workspace = true diff --git a/vm/devices/storage/storvsc_driver/src/lib.rs b/vm/devices/storage/storvsc_driver/src/lib.rs index e890845614..f0216f5876 100644 --- a/vm/devices/storage/storvsc_driver/src/lib.rs +++ b/vm/devices/storage/storvsc_driver/src/lib.rs @@ -9,21 +9,27 @@ pub mod test_helpers; #[cfg(not(feature = "test"))] mod test_helpers; +use crate::save_restore::StorvscDriverSavedState; use futures::FutureExt; +use futures::lock::Mutex; use futures_concurrency::future::Race; use guestmem::AccessError; use guestmem::MemoryRead; use guestmem::ranges::PagedRange; +use inspect::Inspect; use mesh_channel::Receiver; use mesh_channel::RecvError; use mesh_channel::Sender; use slab::Slab; +use std::sync::Arc; use task_control::AsyncRun; use task_control::InspectTask; use task_control::StopTask; use task_control::TaskControl; use thiserror::Error; use tracing_helpers::ErrorValueExt; +use user_driver::DmaClient; +use user_driver::memory::MemoryBlock; use vmbus_async::queue; use vmbus_async::queue::CompletionPacket; use vmbus_async::queue::DataPacket; @@ -43,19 +49,22 @@ use zerocopy::IntoBytes; use zerocopy::KnownLayout; /// Storvsc to provide a backend for SCSI devices over VMBus. +#[derive(Inspect)] pub struct StorvscDriver { - storvsc: TaskControl>, - version: storvsp_protocol::ProtocolVersion, - driver_source: VmTaskDriverSource, + #[inspect(skip)] // TODO: See how to inspect this + storvsc: Mutex>>, + #[inspect(skip)] new_request_sender: Option>, + #[inspect(skip)] + dma_client: Arc, } /// Storvsc backend for SCSI devices. struct Storvsc { - inner: StorvscInner, + pub(crate) inner: StorvscInner, version: storvsp_protocol::ProtocolVersion, queue: Queue, - num_sub_channels: Option, + pub(crate) num_sub_channels: Option, has_negotiated: bool, } @@ -71,8 +80,20 @@ struct StorvscRequest { completion_sender: Sender, } +/// Indicates the reason a storvsc operation was completed. +#[derive(Clone)] +pub enum StorvscCompleteReason { + /// Completion received. + CompletionReceived, + /// Cancelled due to shutdown. + Shutdown, + /// Cancelled due to save/restore. + SaveRestore, +} + /// Result of a Storvsc operation. If None, then operation was cancelled. pub struct StorvscCompletion { + reason: StorvscCompleteReason, completion: Option, } @@ -87,13 +108,17 @@ impl PendingOperation { fn complete(&mut self, result: storvsp_protocol::ScsiRequest) { self.sender.send(StorvscCompletion { + reason: StorvscCompleteReason::CompletionReceived, completion: Some(result), }) } - fn cancel(&mut self) { + fn cancel(&mut self, reason: StorvscCompleteReason) { // Sending completion with an empty result indicates cancellation or other error. - self.sender.send(StorvscCompletion { completion: None }); + self.sender.send(StorvscCompletion { + reason, + completion: None, + }); } } @@ -102,6 +127,32 @@ impl PendingOperation { #[error(transparent)] pub struct StorvscError(StorvscErrorInner); +/// The kind of storvsc error as visible from components sending requests. +#[derive(Debug)] +#[non_exhaustive] +pub enum StorvscErrorKind { + /// Error waiting for completion of operation. + CompletionError, + /// Pending operation cancelled. + Cancelled, + /// Pending operation cancelled, but can be retried. + CancelledRetry, + /// Another error kind not covered by the above. + Other, +} + +impl StorvscError { + /// Returns the kind of storvsc error that occurred. + pub fn kind(&self) -> StorvscErrorKind { + match self.0 { + StorvscErrorInner::CompletionError(_) => StorvscErrorKind::CompletionError, + StorvscErrorInner::Cancelled => StorvscErrorKind::Cancelled, + StorvscErrorInner::CancelledRetry => StorvscErrorKind::CancelledRetry, + _ => StorvscErrorKind::Other, + } + } +} + /// Inner errors from storvsc. #[derive(Debug, Error)] pub(crate) enum StorvscErrorInner { @@ -132,6 +183,9 @@ pub(crate) enum StorvscErrorInner { /// Operation cancelled. #[error("pending operation cancelled")] Cancelled, + /// Operation cancelled, but can be retried. + #[error("pending operation cancelled, but can be retried")] + CancelledRetry, /// Storvsc driver not fully initialized. #[error("driver not initialized")] Uninitialized, @@ -168,49 +222,113 @@ pub(crate) enum PacketError { impl StorvscDriver { /// Create a new driver instance connected to storvsp over VMBus. - pub fn new( - driver_source: &VmTaskDriverSource, - version: storvsp_protocol::ProtocolVersion, - ) -> Self { + pub fn new(dma_client: Arc) -> Self { Self { - storvsc: TaskControl::new(StorvscState), - version, - driver_source: driver_source.clone(), + storvsc: Mutex::new(TaskControl::new(StorvscState)), new_request_sender: None, + dma_client, } } /// Start Storvsc. pub async fn run( &mut self, + driver_source: &VmTaskDriverSource, channel: RawAsyncChannel, + version: storvsp_protocol::ProtocolVersion, target_vp: u32, ) -> Result<(), StorvscError> { - let driver = self - .driver_source + let driver = driver_source .builder() .target_vp(target_vp) .run_on_target(true) .build("storvsc"); let (new_request_sender, new_request_receiver) = mesh_channel::channel::(); - let mut storvsc = Storvsc::new(channel, self.version, new_request_receiver)?; + let mut storvsc = Storvsc::new(channel, version, new_request_receiver)?; storvsc.negotiate().await.unwrap(); self.new_request_sender = Some(new_request_sender); - self.storvsc.insert(&driver, "storvsc", storvsc); - self.storvsc.start(); + { + let mut s = self.storvsc.lock().await; + s.insert(&driver, "storvsc", storvsc); + s.start(); + } Ok(()) } /// Stop Storvsc. - pub async fn stop(&mut self) { - self.storvsc.stop().await; - self.storvsc.remove(); + pub async fn stop(&self) { + let mut s = self.storvsc.lock().await; + s.stop().await; + s.remove(); + } + + /// Saves the current state during servicing. + pub async fn save(&self) -> Result { + let mut s = self.storvsc.lock().await; + if s.stop().await { + let state = s.state_mut().unwrap(); + + // Cancel pending operations with save/restore reason. + for mut transaction in state.inner.transactions.drain() { + transaction.cancel(StorvscCompleteReason::SaveRestore); + } + + Ok(StorvscDriverSavedState { + version: state.version.major_minor, + num_sub_channels: state.num_sub_channels, + has_negotiated: state.has_negotiated, + }) + } else { + // Task was not running, so not state to save + Ok(StorvscDriverSavedState { + version: 0, + num_sub_channels: None, + has_negotiated: false, + }) + } + } + + /// Restore the state during servicing. + pub async fn restore( + state: &StorvscDriverSavedState, + driver_source: &VmTaskDriverSource, + channel: RawAsyncChannel, + target_vp: u32, + dma_client: Arc, + ) -> Result { + let driver = driver_source + .builder() + .target_vp(target_vp) + .run_on_target(true) + .build("storvsc"); + let (new_request_sender, new_request_receiver) = mesh_channel::channel::(); + let storvsc = Storvsc::new( + channel, + storvsp_protocol::ProtocolVersion { + major_minor: state.version, + reserved: 0, + }, + new_request_receiver, + )?; + let storvsc_driver = Self { + storvsc: Mutex::new(TaskControl::new(StorvscState)), + new_request_sender: Some(new_request_sender), + dma_client, + }; + + { + let mut s = storvsc_driver.storvsc.lock().await; + s.insert(&driver, "storvsc", storvsc); + s.start(); + } + + Ok(storvsc_driver) } /// Send a SCSI request to storvsp over VMBus. pub async fn send_request( - &mut self, + &self, request: &storvsp_protocol::ScsiRequest, buf_gpa: u64, byte_len: usize, @@ -235,12 +353,22 @@ impl StorvscDriver { .await .map_err(|err| StorvscError(StorvscErrorInner::CompletionError(err)))?; - if resp.completion.is_some() { - Ok(resp.completion.unwrap()) - } else { - Err(StorvscError(StorvscErrorInner::Cancelled)) + match resp.reason { + StorvscCompleteReason::CompletionReceived => match resp.completion { + Some(completion) => Ok(completion), + None => Err(StorvscError(StorvscErrorInner::Cancelled)), + }, + StorvscCompleteReason::Shutdown => Err(StorvscError(StorvscErrorInner::Cancelled)), + StorvscCompleteReason::SaveRestore => { + Err(StorvscError(StorvscErrorInner::CancelledRetry)) + } } } + + /// Allocates a DMA buffer for use by clients to this driver. + pub fn allocate_dma_buffer(&self, size: usize) -> Result { + self.dma_client.allocate_dma_buffer(size) + } } struct StorvscState; @@ -373,7 +501,9 @@ impl Storvsc { Err(StorvscError(StorvscErrorInner::Queue(err2))) => { if err2.is_closed_error() { // This is expected, cancel any pending completions - self.inner.cancel_pending_completions().await; + self.inner + .cancel_pending_completions(StorvscCompleteReason::Shutdown) + .await; Ok(()) } else { Err(StorvscError(StorvscErrorInner::Queue(err2))) @@ -460,9 +590,9 @@ impl StorvscInner { ) } - async fn cancel_pending_completions(&mut self) { + async fn cancel_pending_completions(&mut self, reason: StorvscCompleteReason) { for transaction in self.transactions.iter_mut() { - transaction.1.cancel(); + transaction.1.cancel(reason.clone()); } self.transactions.clear(); } @@ -730,6 +860,26 @@ fn parse_data(packet: &DataPacket<'_, T>) -> Result, + /// Whether negotiation has completed. + #[mesh(3)] + pub has_negotiated: bool, + } +} + #[cfg(test)] mod tests { use crate::test_helpers::TestStorvscWorker; diff --git a/vm/devices/storage/storvsp/src/lib.rs b/vm/devices/storage/storvsp/src/lib.rs index b66dd4a93d..3ac2ff8a2b 100644 --- a/vm/devices/storage/storvsp/src/lib.rs +++ b/vm/devices/storage/storvsp/src/lib.rs @@ -1660,6 +1660,7 @@ impl VmbusDevice for StorageDevice { interface_name: "scsi".to_owned(), instance_id: self.instance_id, interface_id: storvsp_protocol::SCSI_INTERFACE_ID, + channel_type: ChannelType::Pipe { message_mode: true }, ..Default::default() } } diff --git a/vm/devices/storage/storvsp_protocol/Cargo.toml b/vm/devices/storage/storvsp_protocol/Cargo.toml index 243afd9cba..15463ca2dd 100644 --- a/vm/devices/storage/storvsp_protocol/Cargo.toml +++ b/vm/devices/storage/storvsp_protocol/Cargo.toml @@ -15,6 +15,7 @@ arbitrary = { workspace = true, optional = true, features = ["derive"] } scsi_defs.workspace = true guid.workspace = true +inspect.workspace = true open_enum.workspace = true zerocopy.workspace = true [dev-dependencies] diff --git a/vm/devices/storage/storvsp_protocol/src/lib.rs b/vm/devices/storage/storvsp_protocol/src/lib.rs index 9f0cf7d0d6..2cecf67a52 100644 --- a/vm/devices/storage/storvsp_protocol/src/lib.rs +++ b/vm/devices/storage/storvsp_protocol/src/lib.rs @@ -5,6 +5,7 @@ #![forbid(unsafe_code)] use guid::Guid; +use inspect::Inspect; use open_enum::open_enum; use scsi_defs::ScsiStatus; use scsi_defs::srb::SrbStatusAndFlags; @@ -155,7 +156,7 @@ pub struct ChannelProperties { pub const STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL: u32 = 0x1; #[repr(C)] -#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)] +#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Inspect)] pub struct ProtocolVersion { // Major (MSB) and minor (LSB) version numbers. pub major_minor: u16, diff --git a/vm/vmcore/guestmem/src/lib.rs b/vm/vmcore/guestmem/src/lib.rs index 3a23e4f34b..f13959fc4e 100644 --- a/vm/vmcore/guestmem/src/lib.rs +++ b/vm/vmcore/guestmem/src/lib.rs @@ -2202,8 +2202,7 @@ impl Debug for LockedPages { } #[derive(Copy, Clone, Debug)] -// Field is read via slice transmute and pointer casts, not actually dead. -struct PagePtr(#[expect(dead_code)] *const AtomicU8); +struct PagePtr(*const AtomicU8); // SAFETY: PagePtr is just a pointer with no methods and has no inherent safety // constraints. @@ -2221,6 +2220,10 @@ impl LockedPages { // the slice. unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast::<&Page>(), self.pages.len()) } } + + pub fn va(&self) -> u64 { + self.pages.first().unwrap().0 as u64 + } } impl<'a> AsRef<[&'a Page]> for &'a LockedPages { From 9ea761334b1284c2437b6186c436e0b55c633c7e Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Fri, 29 Aug 2025 16:28:02 +0000 Subject: [PATCH 2/3] Protocol initialization and disk metadata works --- Cargo.lock | 2 + openhcl/underhill_core/src/get_tracing.rs | 2 +- openhcl/underhill_core/src/storvsc_manager.rs | 21 ++- openhcl/underhill_crash/src/lib.rs | 2 + .../guest_emulation_transport/src/worker.rs | 2 + .../serial/vmbus_serial_guest/src/lib.rs | 9 +- vm/devices/storage/disk_storvsc/Cargo.toml | 1 + vm/devices/storage/disk_storvsc/src/lib.rs | 148 +++++++++++++----- vm/devices/storage/scsi_defs/src/lib.rs | 6 +- vm/devices/storage/storvsc_driver/Cargo.toml | 1 + vm/devices/storage/storvsc_driver/src/lib.rs | 26 ++- .../vmbus/vmbus_user_channel/src/lib.rs | 22 ++- 12 files changed, 187 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c057c487be..bd7205363a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1379,6 +1379,7 @@ version = "0.0.0" dependencies = [ "disk_backend", "futures", + "igvm_defs", "inspect", "scsi_buffers", "scsi_defs", @@ -6943,6 +6944,7 @@ name = "storvsc_driver" version = "0.0.0" dependencies = [ "anyhow", + "cvm_tracing", "futures", "futures-concurrency", "guestmem", diff --git a/openhcl/underhill_core/src/get_tracing.rs b/openhcl/underhill_core/src/get_tracing.rs index 87d5982043..b4131bb6ab 100644 --- a/openhcl/underhill_core/src/get_tracing.rs +++ b/openhcl/underhill_core/src/get_tracing.rs @@ -78,7 +78,7 @@ pub fn init_tracing_backend(driver: impl 'static + SpawnDriver) -> anyhow::Resul legacy_openvmm_env("OPENVMM_PERF_TRACE").unwrap_or_else(|_| "off".to_owned()); let pipe = vmbus_user_channel::open_uio_device(&GET_LOG_INTERFACE_GUID) - .and_then(|dev| vmbus_user_channel::message_pipe(&driver, dev)) + .and_then(|dev| vmbus_user_channel::message_pipe(&driver, dev, None, None)) .map_err(|err| { tracing::error!( CVM_ALLOWED, diff --git a/openhcl/underhill_core/src/storvsc_manager.rs b/openhcl/underhill_core/src/storvsc_manager.rs index 5472c82e71..eb78c7692d 100644 --- a/openhcl/underhill_core/src/storvsc_manager.rs +++ b/openhcl/underhill_core/src/storvsc_manager.rs @@ -36,6 +36,9 @@ use vm_resource::kind::DiskHandleKind; use vmbus_user_channel::MappedRingMem; use vmcore::vm_task::VmTaskDriverSource; +const STORVSC_IN_RING_SIZE: usize = 0x1ff000; +const STORVSC_OUT_RING_SIZE: usize = 0x1ff000; + #[derive(Debug, Error)] #[error("storvsc driver {instance_guid} error")] pub struct DriverError { @@ -256,8 +259,13 @@ impl StorvscManagerWorker { hash_map::Entry::Vacant(entry) => { let file = vmbus_user_channel::open_uio_device(&instance_guid) .map_err(InnerError::Vmbus)?; - let channel = vmbus_user_channel::channel(&self.driver_source.simple(), file) - .map_err(InnerError::Vmbus)?; + let channel = vmbus_user_channel::channel( + &self.driver_source.simple(), + file, + Some(STORVSC_IN_RING_SIZE), + Some(STORVSC_OUT_RING_SIZE), + ) + .map_err(InnerError::Vmbus)?; let dma_client = self .dma_client_spawner @@ -320,8 +328,13 @@ impl StorvscManagerWorker { for driver_state in &saved_state.storvsc_drivers { let file = vmbus_user_channel::open_uio_device(&driver_state.instance_guid) .map_err(InnerError::Vmbus)?; - let channel = vmbus_user_channel::channel(&self.driver_source.simple(), file) - .map_err(InnerError::Vmbus)?; + let channel = vmbus_user_channel::channel( + &self.driver_source.simple(), + file, + Some(STORVSC_IN_RING_SIZE), + Some(STORVSC_OUT_RING_SIZE), + ) + .map_err(InnerError::Vmbus)?; let dma_client = self .dma_client_spawner diff --git a/openhcl/underhill_crash/src/lib.rs b/openhcl/underhill_crash/src/lib.rs index 8a0e46b42a..5acc4163dd 100644 --- a/openhcl/underhill_crash/src/lib.rs +++ b/openhcl/underhill_crash/src/lib.rs @@ -316,6 +316,8 @@ pub fn main() -> ! { let pipe = vmbus_user_channel::message_pipe( &driver, vmbus_user_channel::open_uio_device(&crash::CRASHDUMP_GUID)?, + None, + None, )?; send_dump(pipe, &mut dump_stream, &os_version, !options.no_kmsg).await?; diff --git a/vm/devices/get/guest_emulation_transport/src/worker.rs b/vm/devices/get/guest_emulation_transport/src/worker.rs index f30bdf02ca..fdf0cc191f 100644 --- a/vm/devices/get/guest_emulation_transport/src/worker.rs +++ b/vm/devices/get/guest_emulation_transport/src/worker.rs @@ -25,6 +25,8 @@ impl GuestEmulationTransportWorker { &driver, vmbus_user_channel::open_uio_device(&get_protocol::GUEST_EMULATION_INTERFACE_INSTANCE) .map_err(FatalError::OpenPipe)?, + None, + None, ) .map_err(FatalError::OpenPipe)?; GuestEmulationTransportWorker::with_pipe(driver, pipe).await diff --git a/vm/devices/serial/vmbus_serial_guest/src/lib.rs b/vm/devices/serial/vmbus_serial_guest/src/lib.rs index 7bbb17c0f6..deb4845f4d 100644 --- a/vm/devices/serial/vmbus_serial_guest/src/lib.rs +++ b/vm/devices/serial/vmbus_serial_guest/src/lib.rs @@ -103,8 +103,13 @@ mod user_pipe { rsrc: OpenVmbusSerialGuestConfig, input: ResolveSerialBackendParams<'_>, ) -> Result { - let pipe = vmbus_user_channel::message_pipe(input.driver.as_ref(), rsrc.uio_device) - .context("failed to open vmbus serial")?; + let pipe = vmbus_user_channel::message_pipe( + input.driver.as_ref(), + rsrc.uio_device, + None, + None, + ) + .context("failed to open vmbus serial")?; let driver = VmbusSerialDriver::new(pipe) .await diff --git a/vm/devices/storage/disk_storvsc/Cargo.toml b/vm/devices/storage/disk_storvsc/Cargo.toml index 152a7cf6ae..7e24181e5e 100644 --- a/vm/devices/storage/disk_storvsc/Cargo.toml +++ b/vm/devices/storage/disk_storvsc/Cargo.toml @@ -13,6 +13,7 @@ test = [] disk_backend.workspace = true storvsc_driver.workspace = true +igvm_defs.workspace = true scsi_buffers.workspace = true scsi_defs.workspace = true vmbus_user_channel.workspace = true diff --git a/vm/devices/storage/disk_storvsc/src/lib.rs b/vm/devices/storage/disk_storvsc/src/lib.rs index ba722161e4..6c7716fa88 100644 --- a/vm/devices/storage/disk_storvsc/src/lib.rs +++ b/vm/devices/storage/disk_storvsc/src/lib.rs @@ -8,11 +8,11 @@ use disk_backend::DiskError; use disk_backend::DiskIo; use disk_backend::UnmapBehavior; +use igvm_defs::PAGE_SIZE_4K; use inspect::Inspect; use scsi_defs::ScsiOp; use scsi_defs::ScsiStatus; use std::sync::Arc; -use std::sync::atomic::AtomicU64; use storvsc_driver::StorvscDriver; use storvsc_driver::StorvscErrorKind; use vmbus_user_channel::MappedRingMem; @@ -57,9 +57,14 @@ impl StorvscDisk { impl StorvscDisk { fn scan_metadata(&mut self) { // Allocate region for data in for READ_CAPACITY(16), MODE_SENSE(10), and INQUIRY (Block Limits VPD) - let data_in_size = size_of::() - .max(size_of::()) - .max(size_of::()); + // TODO: When we can allocate continguous pages, switch to that instead of using a single page and assert. + assert!( + size_of::() + .max(size_of::()) + .max(size_of::()) as u64 + <= PAGE_SIZE_4K + ); + let data_in_size = PAGE_SIZE_4K as usize; let data_in = match self.driver.allocate_dma_buffer(data_in_size) { Ok(buf) => buf, Err(err) => { @@ -72,14 +77,16 @@ impl StorvscDisk { }; // READ_CAPACITY(16) returns number of sectors and sector size in bytes. - let read_capacity16_cdb = scsi_defs::Cdb16 { + let read_capacity16_cdb = scsi_defs::ServiceActionIn16 { operation_code: ScsiOp::READ_CAPACITY16, + service_action: scsi_defs::SERVICE_ACTION_READ_CAPACITY16, + allocation_length: (data_in_size as u32).into(), ..FromZeros::new_zeroed() }; match futures::executor::block_on(self.send_scsi_request( read_capacity16_cdb.as_bytes(), - ScsiOp::READ_CAPACITY16, - data_in.base() as u64, + read_capacity16_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, data_in_size, true, )) { @@ -94,6 +101,7 @@ impl StorvscDisk { _ => { tracing::error!( scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, "READ_CAPACITY16 failed" ); return; @@ -108,10 +116,10 @@ impl StorvscDisk { } } - // MODE_SENSE10 to get whether read-only. + // MODE_SENSE10 to get whether read-only. This is in the header, so it doesn't matter which page we request. let mode_sense10_cdb = scsi_defs::ModeSense10 { operation_code: ScsiOp::MODE_SENSE10, - flags2: scsi_defs::ModeSenseFlags::new().with_page_code(scsi_defs::MODE_PAGE_CONTROL), + flags2: scsi_defs::ModeSenseFlags::new().with_page_code(scsi_defs::MODE_PAGE_ALL), sub_page_code: 0, allocation_length: (data_in_size as u16).into(), ..FromZeros::new_zeroed() @@ -120,18 +128,21 @@ impl StorvscDisk { match futures::executor::block_on(self.send_scsi_request( mode_sense10_cdb.as_bytes(), mode_sense10_cdb.operation_code, - 0, - 0, - false, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, )) { Ok(resp) => match resp.scsi_status { ScsiStatus::GOOD => { - let mode_page = data_in.read_obj::(0); - self.read_only = mode_page.flags3.swp(); + let mode_header = data_in.read_obj::(0); + self.read_only = mode_header.device_specific_parameter + & scsi_defs::MODE_DSP_WRITE_PROTECT + != 0; } _ => { tracing::error!( scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, "MODE_SENSE10 failed" ); } @@ -144,39 +155,87 @@ impl StorvscDisk { } } - // INQUIRY for the Block Limits VPD page returns the optimal unmap sectors. - let inquiry_block_limits_cdb = scsi_defs::CdbInquiry { - operation_code: ScsiOp::INQUIRY.0, + // INQUIRY for the Supported Pages VPD page to see if Block Limits VPD is supported. + self.optimal_unmap_sectors = 0; + let inquiry_supported_pages_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY, flags: scsi_defs::InquiryFlags::new().with_vpd(true), - page_code: scsi_defs::VPD_BLOCK_LIMITS, + page_code: scsi_defs::VPD_SUPPORTED_PAGES, allocation_length: (data_in_size as u16).into(), ..FromZeros::new_zeroed() }; match futures::executor::block_on(self.send_scsi_request( - inquiry_block_limits_cdb.as_bytes(), - ScsiOp::INQUIRY, - data_in.base() as u64, + inquiry_supported_pages_cdb.as_bytes(), + inquiry_supported_pages_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, data_in_size, true, )) { Ok(resp) => match resp.scsi_status { ScsiStatus::GOOD => { - let block_limits_vpd = - data_in.read_obj::(0); - self.optimal_unmap_sectors = block_limits_vpd.optimal_unmap_granularity.into(); + let mut buf_pos = 0; + let vpd_header = data_in.read_obj::(0); + buf_pos += size_of::(); + while buf_pos + < vpd_header.page_length as usize + size_of::() + { + if data_in.read_obj::(buf_pos) == scsi_defs::VPD_BLOCK_LIMITS { + // INQUIRY for the Block Limits VPD page returns the optimal unmap sectors. + let inquiry_block_limits_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_BLOCK_LIMITS, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + inquiry_block_limits_cdb.as_bytes(), + inquiry_block_limits_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let block_limits_vpd = + data_in + .read_obj::(0); + self.optimal_unmap_sectors = + block_limits_vpd.optimal_unmap_granularity.into(); + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, + "INQUIRY for Block Limits VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Block Limits VPD failed" + ); + } + } + } + buf_pos += 1; + } } _ => { tracing::error!( scsi_status = ?resp.scsi_status, - "INQUIRY for Block Limits VPD failed" + srb_status = ?resp.srb_status, + "INQUIRY for Supported Pages VPD failed" ); } }, Err(err) => { tracing::error!( error = &err as &dyn std::error::Error, - "INQUIRY for Block Limits VPD failed" + "INQUIRY for Supported Pages VPD failed" ); } } @@ -184,7 +243,7 @@ impl StorvscDisk { // INQUIRY for the Device Identification VPD page returns the designator (disk ID). self.disk_id = None; let inquiry_device_identification_cdb = scsi_defs::CdbInquiry { - operation_code: ScsiOp::INQUIRY.0, + operation_code: ScsiOp::INQUIRY, flags: scsi_defs::InquiryFlags::new().with_vpd(true), page_code: scsi_defs::VPD_DEVICE_IDENTIFIERS, allocation_length: (data_in_size as u16).into(), @@ -193,8 +252,8 @@ impl StorvscDisk { match futures::executor::block_on(self.send_scsi_request( inquiry_device_identification_cdb.as_bytes(), - ScsiOp::INQUIRY, - data_in.base() as u64, + inquiry_device_identification_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, data_in_size, true, )) { @@ -232,6 +291,7 @@ impl StorvscDisk { _ => { tracing::error!( scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, "INQUIRY for Device Identification VPD failed" ); } @@ -243,6 +303,15 @@ impl StorvscDisk { ); } } + + tracing::info!( + num_sectors = self.num_sectors, + sector_size = self.sector_size, + read_only = self.read_only, + optimal_unmap_sectors = self.optimal_unmap_sectors, + disk_id = ?self.disk_id, + "Read storvsc disk metadata" + ); } fn generate_scsi_request( @@ -285,8 +354,8 @@ impl StorvscDisk { _ => { tracing::error!(?op, scsi_status = ?resp.scsi_status, "SCSI request failed"); Err(DiskError::Io(std::io::Error::other(format!( - "SCSI request failed, op={:?}, scsi_status={:?}", - op, resp.scsi_status + "SCSI request failed, op={:?}, scsi_status={:?}, srb_status={:?}", + op, resp.scsi_status, resp.srb_status )))) } }, @@ -384,7 +453,7 @@ impl DiskIo for StorvscDisk { cdb.operation_code, locked_buffers.va(), buffers.len(), - false, + true, ) .await .map(|_| ()) @@ -425,7 +494,7 @@ impl DiskIo for StorvscDisk { cdb.operation_code, locked_buffers.va(), buffers.len(), - true, + false, ) .await .map(|_| ()) @@ -488,8 +557,13 @@ impl DiskIo for StorvscDisk { ..FromZeros::new_zeroed() }; - let data_out_size = - size_of::() + size_of::(); + // TODO: When we can allocate continguous pages, switch to that instead of using a single page and assert. + assert!( + (size_of::() + size_of::()) + as u64 + <= PAGE_SIZE_4K + ); + let data_out_size = PAGE_SIZE_4K as usize; let data_out = match self.driver.allocate_dma_buffer(data_out_size) { Ok(buf) => buf, Err(err) => { @@ -509,7 +583,7 @@ impl DiskIo for StorvscDisk { self.send_scsi_request( cdb.as_bytes(), cdb.operation_code, - data_out.base() as u64, + data_out.pfns()[0] * PAGE_SIZE_4K, data_out_size, false, ) @@ -525,7 +599,7 @@ impl DiskIo for StorvscDisk { self.optimal_unmap_sectors } - async fn wait_resize(&self, sector_count: u64) -> u64 { + async fn wait_resize(&self, _sector_count: u64) -> u64 { // This is difficult because it cannot update the stored sector count todo!() } diff --git a/vm/devices/storage/scsi_defs/src/lib.rs b/vm/devices/storage/scsi_defs/src/lib.rs index 1b805d3800..e664836127 100644 --- a/vm/devices/storage/scsi_defs/src/lib.rs +++ b/vm/devices/storage/scsi_defs/src/lib.rs @@ -235,7 +235,7 @@ pub const MEDIUM_NOT_PRESENT_TRAY_OPEN: u8 = 0x02; #[repr(C)] #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] pub struct CdbInquiry { - pub operation_code: u8, // 0x12 - SCSIOP_INQUIRY + pub operation_code: ScsiOp, // 0x12 - SCSIOP_INQUIRY pub flags: InquiryFlags, pub page_code: u8, pub allocation_length: U16BE, @@ -1107,8 +1107,8 @@ pub struct Cdb16Flags { pub struct ServiceActionIn16 { pub operation_code: ScsiOp, pub service_action: u8, - pub logical_block: [u8; 8], - pub allocation_length: [u8; 4], + pub logical_block: U64BE, + pub allocation_length: U32BE, pub flags: u8, pub control: u8, } diff --git a/vm/devices/storage/storvsc_driver/Cargo.toml b/vm/devices/storage/storvsc_driver/Cargo.toml index 239541ff80..85c351f51c 100644 --- a/vm/devices/storage/storvsc_driver/Cargo.toml +++ b/vm/devices/storage/storvsc_driver/Cargo.toml @@ -29,6 +29,7 @@ user_driver.workspace = true vmcore.workspace = true anyhow.workspace = true +cvm_tracing.workspace = true futures.workspace = true futures-concurrency.workspace = true slab.workspace = true diff --git a/vm/devices/storage/storvsc_driver/src/lib.rs b/vm/devices/storage/storvsc_driver/src/lib.rs index f0216f5876..3af24aaf9c 100644 --- a/vm/devices/storage/storvsc_driver/src/lib.rs +++ b/vm/devices/storage/storvsc_driver/src/lib.rs @@ -10,6 +10,7 @@ pub mod test_helpers; mod test_helpers; use crate::save_restore::StorvscDriverSavedState; +use cvm_tracing::CVM_CONFIDENTIAL; use futures::FutureExt; use futures::lock::Mutex; use futures_concurrency::future::Race; @@ -203,7 +204,7 @@ pub(crate) enum PacketError { /// Unexpected status. #[error("Unexpected status {0:?}")] UnexpectedStatus(storvsp_protocol::NtStatus), - /// Unrecognzied operation. + /// Unrecognized operation. #[error("Unrecognized operation {0:?}")] UnrecognizedOperation(storvsp_protocol::Operation), /// Invalid packet type. @@ -333,6 +334,13 @@ impl StorvscDriver { buf_gpa: u64, byte_len: usize, ) -> Result { + tracing::trace!( + CVM_CONFIDENTIAL, + buf_gpa, + byte_len, + length = request.length, + "Sending SCSI request" + ); let (sender, mut receiver) = mesh_channel::channel::(); let storvsc_request = StorvscRequest { request: *request, @@ -658,6 +666,13 @@ impl StorvscInner { transaction_id: u64, payload: &P, ) -> Result<(), StorvscError> { + tracing::trace!( + CVM_CONFIDENTIAL, + transaction_id, + ?operation, + ?status, + "Sending non-GPA Direct packet" + ); let payload_bytes = payload.as_bytes(); self.send_vmbus_packet( &mut writer.batched(), @@ -681,6 +696,15 @@ impl StorvscInner { gpa_start: u64, byte_len: usize, ) -> Result<(), StorvscError> { + tracing::trace!( + CVM_CONFIDENTIAL, + transaction_id, + ?operation, + ?status, + gpa_start, + byte_len, + "Sending GPA Direct packet" + ); let payload_bytes = payload.as_bytes(); let start_page: u64 = gpa_start / PAGE_SIZE as u64; let end_page: u64 = (gpa_start + (byte_len + PAGE_SIZE - 1) as u64) / PAGE_SIZE as u64; diff --git a/vm/devices/vmbus/vmbus_user_channel/src/lib.rs b/vm/devices/vmbus/vmbus_user_channel/src/lib.rs index c1a1ce1dd9..c347bac7f5 100644 --- a/vm/devices/vmbus/vmbus_user_channel/src/lib.rs +++ b/vm/devices/vmbus/vmbus_user_channel/src/lib.rs @@ -181,15 +181,19 @@ pub fn open_uio_device(instance_id: &Guid) -> Result { pub fn channel( driver: &(impl Driver + ?Sized), file: File, + in_ring_size: Option, + out_ring_size: Option, ) -> Result, Error> { - let total_mapping_size = CONTROL_SIZE + IN_RING_SIZE + CONTROL_SIZE + OUT_RING_SIZE; + let in_ring_size_len = in_ring_size.unwrap_or(IN_RING_SIZE); + let out_ring_size_len = out_ring_size.unwrap_or(OUT_RING_SIZE); + let total_mapping_size = CONTROL_SIZE + in_ring_size_len + CONTROL_SIZE + out_ring_size_len; let mapping = Arc::new(SparseMapping::new(total_mapping_size).map_err(ErrorInner::Mmap)?); // Double map the data portion of the ring buffers so that a packet spanning // the end of the ring buffer can be read linearly in VA space. let mapping_offset = 0; - let len = CONTROL_SIZE + OUT_RING_SIZE + CONTROL_SIZE + IN_RING_SIZE; + let len = CONTROL_SIZE + out_ring_size_len + CONTROL_SIZE + in_ring_size_len; mapping .map_file(mapping_offset, len, &file, 0_u64, true) @@ -206,13 +210,13 @@ pub fn channel( let out_mem = MappedRingMem { mapping: mapping.clone(), offset: 0, - len: OUT_RING_SIZE, + len: out_ring_size_len, }; let out_ring = OutgoingRing::new(out_mem).map_err(ErrorInner::Ring)?; let in_mem = MappedRingMem { mapping, - offset: CONTROL_SIZE + OUT_RING_SIZE, - len: IN_RING_SIZE, + offset: CONTROL_SIZE + out_ring_size_len, + len: in_ring_size_len, }; let in_ring = IncomingRing::new(in_mem).map_err(ErrorInner::Ring)?; @@ -281,8 +285,10 @@ impl SignalVmbusChannel for UioSignal { pub fn byte_pipe( driver: &(impl Driver + ?Sized), file: File, + in_ring_size: Option, + out_ring_size: Option, ) -> Result, Error> { - let channel = channel(driver, file)?; + let channel = channel(driver, file, in_ring_size, out_ring_size)?; let pipe = BytePipe::new(channel).map_err(ErrorInner::Pipe)?; Ok(pipe) } @@ -291,8 +297,10 @@ pub fn byte_pipe( pub fn message_pipe( driver: &(impl Driver + ?Sized), file: File, + in_ring_size: Option, + out_ring_size: Option, ) -> Result, Error> { - let channel = channel(driver, file)?; + let channel = channel(driver, file, in_ring_size, out_ring_size)?; let pipe = MessagePipe::new(channel).map_err(ErrorInner::Pipe)?; Ok(pipe) } From e29465a1bbf5770dd8273f8e7afb2ca84640327b Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Sat, 30 Aug 2025 00:33:09 +0000 Subject: [PATCH 3/3] Perform metadata operations on-demand and add resize notification --- Cargo.lock | 3 + vm/devices/storage/disk_storvsc/Cargo.toml | 4 +- vm/devices/storage/disk_storvsc/src/lib.rs | 551 ++++++++++-------- vm/devices/storage/storvsc_driver/Cargo.toml | 1 + vm/devices/storage/storvsc_driver/src/lib.rs | 126 +++- .../storvsc_driver/src/test_helpers.rs | 7 + 6 files changed, 423 insertions(+), 269 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd7205363a..b5883c91f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1378,11 +1378,13 @@ name = "disk_storvsc" version = "0.0.0" dependencies = [ "disk_backend", + "event-listener", "futures", "igvm_defs", "inspect", "scsi_buffers", "scsi_defs", + "static_assertions", "storvsc_driver", "storvsp_protocol", "tracing", @@ -6945,6 +6947,7 @@ version = "0.0.0" dependencies = [ "anyhow", "cvm_tracing", + "event-listener", "futures", "futures-concurrency", "guestmem", diff --git a/vm/devices/storage/disk_storvsc/Cargo.toml b/vm/devices/storage/disk_storvsc/Cargo.toml index 7e24181e5e..c322fde585 100644 --- a/vm/devices/storage/disk_storvsc/Cargo.toml +++ b/vm/devices/storage/disk_storvsc/Cargo.toml @@ -12,15 +12,17 @@ test = [] [dependencies] disk_backend.workspace = true storvsc_driver.workspace = true +storvsp_protocol.workspace = true igvm_defs.workspace = true scsi_buffers.workspace = true scsi_defs.workspace = true vmbus_user_channel.workspace = true -storvsp_protocol.workspace = true +event-listener.workspace = true inspect.workspace = true futures.workspace = true +static_assertions.workspace = true tracing.workspace = true zerocopy.workspace = true diff --git a/vm/devices/storage/disk_storvsc/src/lib.rs b/vm/devices/storage/disk_storvsc/src/lib.rs index 6c7716fa88..d8c7d05672 100644 --- a/vm/devices/storage/disk_storvsc/src/lib.rs +++ b/vm/devices/storage/disk_storvsc/src/lib.rs @@ -12,6 +12,7 @@ use igvm_defs::PAGE_SIZE_4K; use inspect::Inspect; use scsi_defs::ScsiOp; use scsi_defs::ScsiStatus; +use static_assertions::const_assert; use std::sync::Arc; use storvsc_driver::StorvscDriver; use storvsc_driver::StorvscErrorKind; @@ -30,53 +31,56 @@ pub struct StorvscDisk { #[inspect(skip)] driver: Arc>, lun: u8, + #[inspect(skip)] + resize_event: Arc, +} + +#[derive(Default)] +struct DiskCapacity { num_sectors: u64, sector_size: u32, - disk_id: Option<[u8; 16]>, - optimal_unmap_sectors: u32, - read_only: bool, } impl StorvscDisk { /// Creates a new storvsc-backed disk that uses the provided storvsc driver. pub fn new(driver: Arc>, lun: u8) -> Self { - let mut disk = Self { - driver, + let disk = Self { + driver: driver.clone(), lun, - num_sectors: 0, - sector_size: 0, - disk_id: None, - optimal_unmap_sectors: 0, - read_only: false, + resize_event: Arc::new(event_listener::Event::new()), }; - disk.scan_metadata(); + match driver.add_resize_listener(lun, disk.resize_event.clone()) { + Ok(()) => {} + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "Failed to add resize listener to storvsc driver" + ); + } + } disk } } impl StorvscDisk { - fn scan_metadata(&mut self) { - // Allocate region for data in for READ_CAPACITY(16), MODE_SENSE(10), and INQUIRY (Block Limits VPD) - // TODO: When we can allocate continguous pages, switch to that instead of using a single page and assert. - assert!( - size_of::() - .max(size_of::()) - .max(size_of::()) as u64 - <= PAGE_SIZE_4K - ); + fn disk_capacity(&self) -> DiskCapacity { + // Allocate region for data in for READ CAPACITY(16) + // At this time we cannot allocate contiguous pages, but this could be done without an + // assert if we could guarantee that the allocation is contiguous. + const_assert!(size_of::() as u64 <= PAGE_SIZE_4K); let data_in_size = PAGE_SIZE_4K as usize; let data_in = match self.driver.allocate_dma_buffer(data_in_size) { Ok(buf) => buf, Err(err) => { tracing::error!( error = err.to_string(), - "Unable to allocate DMA buffer to read disk metadata" + "Unable to allocate DMA buffer for READ CAPACITY(16)" ); - return; + return DiskCapacity::default(); } }; - // READ_CAPACITY(16) returns number of sectors and sector size in bytes. + // READ CAPACITY(16) returns number of sectors and sector size in bytes. let read_capacity16_cdb = scsi_defs::ServiceActionIn16 { operation_code: ScsiOp::READ_CAPACITY16, service_action: scsi_defs::SERVICE_ACTION_READ_CAPACITY16, @@ -94,224 +98,30 @@ impl StorvscDisk { match resp.scsi_status { ScsiStatus::GOOD => { let capacity = data_in.read_obj::(0); - self.num_sectors = capacity.ex.logical_block_address.into(); - self.num_sectors += 1; // Add one to include the last sector - self.sector_size = capacity.ex.bytes_per_block.into(); + let num_sectors: u64 = capacity.ex.logical_block_address.into(); + DiskCapacity { + num_sectors: num_sectors + 1, // Add one to include the last sector + sector_size: capacity.ex.bytes_per_block.into(), + } } _ => { tracing::error!( scsi_status = ?resp.scsi_status, srb_status = ?resp.srb_status, - "READ_CAPACITY16 failed" + "READ CAPACITY(16) failed" ); - return; - } - } - } - Err(err) => { - tracing::error!( - error = &err as &dyn std::error::Error, - "READ_CAPACITY16 failed" - ); - } - } - - // MODE_SENSE10 to get whether read-only. This is in the header, so it doesn't matter which page we request. - let mode_sense10_cdb = scsi_defs::ModeSense10 { - operation_code: ScsiOp::MODE_SENSE10, - flags2: scsi_defs::ModeSenseFlags::new().with_page_code(scsi_defs::MODE_PAGE_ALL), - sub_page_code: 0, - allocation_length: (data_in_size as u16).into(), - ..FromZeros::new_zeroed() - }; - - match futures::executor::block_on(self.send_scsi_request( - mode_sense10_cdb.as_bytes(), - mode_sense10_cdb.operation_code, - data_in.pfns()[0] * PAGE_SIZE_4K, - data_in_size, - true, - )) { - Ok(resp) => match resp.scsi_status { - ScsiStatus::GOOD => { - let mode_header = data_in.read_obj::(0); - self.read_only = mode_header.device_specific_parameter - & scsi_defs::MODE_DSP_WRITE_PROTECT - != 0; - } - _ => { - tracing::error!( - scsi_status = ?resp.scsi_status, - srb_status = ?resp.srb_status, - "MODE_SENSE10 failed" - ); - } - }, - Err(err) => { - tracing::error!( - error = &err as &dyn std::error::Error, - "MODE_SENSE10 failed" - ); - } - } - - // INQUIRY for the Supported Pages VPD page to see if Block Limits VPD is supported. - self.optimal_unmap_sectors = 0; - let inquiry_supported_pages_cdb = scsi_defs::CdbInquiry { - operation_code: ScsiOp::INQUIRY, - flags: scsi_defs::InquiryFlags::new().with_vpd(true), - page_code: scsi_defs::VPD_SUPPORTED_PAGES, - allocation_length: (data_in_size as u16).into(), - ..FromZeros::new_zeroed() - }; - - match futures::executor::block_on(self.send_scsi_request( - inquiry_supported_pages_cdb.as_bytes(), - inquiry_supported_pages_cdb.operation_code, - data_in.pfns()[0] * PAGE_SIZE_4K, - data_in_size, - true, - )) { - Ok(resp) => match resp.scsi_status { - ScsiStatus::GOOD => { - let mut buf_pos = 0; - let vpd_header = data_in.read_obj::(0); - buf_pos += size_of::(); - while buf_pos - < vpd_header.page_length as usize + size_of::() - { - if data_in.read_obj::(buf_pos) == scsi_defs::VPD_BLOCK_LIMITS { - // INQUIRY for the Block Limits VPD page returns the optimal unmap sectors. - let inquiry_block_limits_cdb = scsi_defs::CdbInquiry { - operation_code: ScsiOp::INQUIRY, - flags: scsi_defs::InquiryFlags::new().with_vpd(true), - page_code: scsi_defs::VPD_BLOCK_LIMITS, - allocation_length: (data_in_size as u16).into(), - ..FromZeros::new_zeroed() - }; - - match futures::executor::block_on(self.send_scsi_request( - inquiry_block_limits_cdb.as_bytes(), - inquiry_block_limits_cdb.operation_code, - data_in.pfns()[0] * PAGE_SIZE_4K, - data_in_size, - true, - )) { - Ok(resp) => match resp.scsi_status { - ScsiStatus::GOOD => { - let block_limits_vpd = - data_in - .read_obj::(0); - self.optimal_unmap_sectors = - block_limits_vpd.optimal_unmap_granularity.into(); - } - _ => { - tracing::error!( - scsi_status = ?resp.scsi_status, - srb_status = ?resp.srb_status, - "INQUIRY for Block Limits VPD failed" - ); - } - }, - Err(err) => { - tracing::error!( - error = &err as &dyn std::error::Error, - "INQUIRY for Block Limits VPD failed" - ); - } - } - } - buf_pos += 1; + DiskCapacity::default() } } - _ => { - tracing::error!( - scsi_status = ?resp.scsi_status, - srb_status = ?resp.srb_status, - "INQUIRY for Supported Pages VPD failed" - ); - } - }, - Err(err) => { - tracing::error!( - error = &err as &dyn std::error::Error, - "INQUIRY for Supported Pages VPD failed" - ); } - } - - // INQUIRY for the Device Identification VPD page returns the designator (disk ID). - self.disk_id = None; - let inquiry_device_identification_cdb = scsi_defs::CdbInquiry { - operation_code: ScsiOp::INQUIRY, - flags: scsi_defs::InquiryFlags::new().with_vpd(true), - page_code: scsi_defs::VPD_DEVICE_IDENTIFIERS, - allocation_length: (data_in_size as u16).into(), - ..FromZeros::new_zeroed() - }; - - match futures::executor::block_on(self.send_scsi_request( - inquiry_device_identification_cdb.as_bytes(), - inquiry_device_identification_cdb.operation_code, - data_in.pfns()[0] * PAGE_SIZE_4K, - data_in_size, - true, - )) { - Ok(resp) => match resp.scsi_status { - ScsiStatus::GOOD => { - let mut buf_pos = 0; - let vpd_header = data_in.read_obj::(0); - buf_pos += size_of::(); - while buf_pos < vpd_header.page_length as usize + 4 { - let designator_header = - data_in.read_obj::(buf_pos); - buf_pos += size_of::(); - match designator_header.identifiertype { - scsi_defs::VPD_IDENTIFIER_TYPE_FCPH_NAME => { - // Reinterpret as NAA ID designator. - let designator_naa = - data_in.read_obj::(buf_pos); - let mut disk_id = [0u8; 16]; - disk_id[0] = designator_naa.ouid_msb; - disk_id[1..3] - .copy_from_slice(designator_naa.ouid_middle.as_slice()); - disk_id[3] = designator_naa.ouid_lsb; - disk_id[4..] - .copy_from_slice(designator_naa.vendor_specific_id.as_slice()); - self.disk_id = Some(disk_id); - break; - } - _ => { - buf_pos += size_of::() - + designator_header.identifier_length as usize; - } - } - } - } - _ => { - tracing::error!( - scsi_status = ?resp.scsi_status, - srb_status = ?resp.srb_status, - "INQUIRY for Device Identification VPD failed" - ); - } - }, Err(err) => { tracing::error!( error = &err as &dyn std::error::Error, - "INQUIRY for Block Limits VPD failed" + "READ CAPACITY(16) failed" ); + DiskCapacity::default() } } - - tracing::info!( - num_sectors = self.num_sectors, - sector_size = self.sector_size, - read_only = self.read_only, - optimal_unmap_sectors = self.optimal_unmap_sectors, - disk_id = ?self.disk_id, - "Read storvsc disk metadata" - ); } fn generate_scsi_request( @@ -397,19 +207,102 @@ impl DiskIo for StorvscDisk { } fn sector_count(&self) -> u64 { - self.num_sectors + self.disk_capacity().num_sectors } fn sector_size(&self) -> u32 { - self.sector_size + self.disk_capacity().sector_size } fn disk_id(&self) -> Option<[u8; 16]> { - self.disk_id + // Allocate region for data in for INQUIRY (Device Identification VPD) + // At this time we cannot allocate contiguous pages, but this could be done without an + // assert if we could guarantee that the allocation is contiguous. + const_assert!( + (size_of::() + + size_of::()) as u64 + <= PAGE_SIZE_4K + ); + let data_in_size = PAGE_SIZE_4K as usize; + let data_in = match self.driver.allocate_dma_buffer(data_in_size) { + Ok(buf) => buf, + Err(err) => { + tracing::error!( + error = err.to_string(), + "Unable to allocate DMA buffer for INQUIRY" + ); + return None; + } + }; + + // INQUIRY for the Device Identification VPD page returns the designator (disk ID). + let inquiry_device_identification_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_DEVICE_IDENTIFIERS, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + let mut disk_id: Option<[u8; 16]> = None; + match futures::executor::block_on(self.send_scsi_request( + inquiry_device_identification_cdb.as_bytes(), + inquiry_device_identification_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let mut buf_pos = 0; + let vpd_header = data_in.read_obj::(0); + buf_pos += size_of::(); + while buf_pos < vpd_header.page_length as usize + 4 { + let designator_header = + data_in.read_obj::(buf_pos); + buf_pos += size_of::(); + match designator_header.identifiertype { + scsi_defs::VPD_IDENTIFIER_TYPE_FCPH_NAME => { + // Reinterpret as NAA ID designator. + let designator_naa = + data_in.read_obj::(buf_pos); + let mut created_disk_id = [0u8; 16]; + created_disk_id[0] = designator_naa.ouid_msb; + created_disk_id[1..3] + .copy_from_slice(designator_naa.ouid_middle.as_slice()); + created_disk_id[3] = designator_naa.ouid_lsb; + created_disk_id[4..] + .copy_from_slice(designator_naa.vendor_specific_id.as_slice()); + disk_id = Some(created_disk_id); + break; + } + _ => { + buf_pos += size_of::() + + designator_header.identifier_length as usize; + } + } + } + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, + "INQUIRY for Device Identification VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Block Limits VPD failed" + ); + } + } + disk_id } fn physical_sector_size(&self) -> u32 { - self.sector_size + self.disk_capacity().sector_size } fn is_fua_respected(&self) -> bool { @@ -417,7 +310,60 @@ impl DiskIo for StorvscDisk { } fn is_read_only(&self) -> bool { - self.read_only + // Allocate region for data in for MODE SENSE(10) + // At this time we cannot allocate contiguous pages, but this could be done without an + // assert if we could guarantee that the allocation is contiguous. + const_assert!(size_of::() as u64 <= PAGE_SIZE_4K); + let data_in_size = PAGE_SIZE_4K as usize; + let data_in = match self.driver.allocate_dma_buffer(data_in_size) { + Ok(buf) => buf, + Err(err) => { + tracing::error!( + error = err.to_string(), + "Unable to allocate DMA buffer for MODE SENSE(10)" + ); + return false; + } + }; + + // MODE SENSE(10) to get whether read-only. This is in the header, so it doesn't matter which page we request. + let mode_sense10_cdb = scsi_defs::ModeSense10 { + operation_code: ScsiOp::MODE_SENSE10, + flags2: scsi_defs::ModeSenseFlags::new().with_page_code(scsi_defs::MODE_PAGE_ALL), + sub_page_code: 0, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + mode_sense10_cdb.as_bytes(), + mode_sense10_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let mode_header = data_in.read_obj::(0); + mode_header.device_specific_parameter & scsi_defs::MODE_DSP_WRITE_PROTECT != 0 + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, + "MODE SENSE(10) failed" + ); + false + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "MODE SENSE(10) failed" + ); + false + } + } } async fn read_vectored( @@ -425,12 +371,13 @@ impl DiskIo for StorvscDisk { buffers: &scsi_buffers::RequestBuffers<'_>, sector: u64, ) -> Result<(), DiskError> { - if self.sector_size == 0 { - // Disk failed to initialize. + let sector_size = self.disk_capacity().sector_size; + if sector_size == 0 { + // Failed to get sector size. return Err(DiskError::IllegalBlock); } - if buffers.len() % self.sector_size as usize != 0 { + if buffers.len() % sector_size as usize != 0 { // Buffer length must be a multiple of sector size. return Err(DiskError::InvalidInput); } @@ -444,7 +391,7 @@ impl DiskIo for StorvscDisk { let cdb = scsi_defs::Cdb16 { operation_code: ScsiOp::READ16, logical_block: sector.into(), - transfer_blocks: (buffers.len() as u32 / self.sector_size).into(), + transfer_blocks: (buffers.len() as u32 / sector_size).into(), ..FromZeros::new_zeroed() }; @@ -465,12 +412,13 @@ impl DiskIo for StorvscDisk { sector: u64, fua: bool, ) -> Result<(), DiskError> { - if self.sector_size == 0 { - // Disk failed to initialize. + let sector_size = self.disk_capacity().sector_size; + if sector_size == 0 { + // Failed to get sector size. return Err(DiskError::IllegalBlock); } - if buffers.len() % self.sector_size as usize != 0 { + if buffers.len() % sector_size as usize != 0 { // Buffer length must be a multiple of sector size. return Err(DiskError::InvalidInput); } @@ -485,7 +433,7 @@ impl DiskIo for StorvscDisk { operation_code: ScsiOp::WRITE16, flags: scsi_defs::Cdb16Flags::new().with_fua(fua), logical_block: sector.into(), - transfer_blocks: (buffers.len() as u32 / self.sector_size).into(), + transfer_blocks: (buffers.len() as u32 / sector_size).into(), ..FromZeros::new_zeroed() }; @@ -501,11 +449,6 @@ impl DiskIo for StorvscDisk { } async fn sync_cache(&self) -> Result<(), DiskError> { - if self.sector_size == 0 { - // Disk failed to initialize. - return Err(DiskError::IllegalBlock); - } - let cdb = scsi_defs::Cdb16 { operation_code: ScsiOp::SYNCHRONIZE_CACHE16, logical_block: 0.into(), @@ -557,8 +500,9 @@ impl DiskIo for StorvscDisk { ..FromZeros::new_zeroed() }; - // TODO: When we can allocate continguous pages, switch to that instead of using a single page and assert. - assert!( + // At this time we cannot allocate contiguous pages, but this could be done without an + // assert if we could guarantee that the allocation is contiguous. + const_assert!( (size_of::() + size_of::()) as u64 <= PAGE_SIZE_4K @@ -596,11 +540,118 @@ impl DiskIo for StorvscDisk { } fn optimal_unmap_sectors(&self) -> u32 { - self.optimal_unmap_sectors + // Allocate region for data in for INQUIRY (Block Limits VPD) + // At this time we cannot allocate contiguous pages, but this could be done without an + // assert if we could guarantee that the allocation is contiguous. + const_assert!(size_of::() as u64 <= PAGE_SIZE_4K); + let data_in_size = PAGE_SIZE_4K as usize; + let data_in = match self.driver.allocate_dma_buffer(data_in_size) { + Ok(buf) => buf, + Err(err) => { + tracing::error!( + error = err.to_string(), + "Unable to allocate DMA buffer for INQUIRY" + ); + return 0; + } + }; + + // INQUIRY for the Supported Pages VPD page to see if Block Limits VPD is supported. + let inquiry_supported_pages_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_SUPPORTED_PAGES, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + let mut optimal_unmap_size: u32 = 0; + match futures::executor::block_on(self.send_scsi_request( + inquiry_supported_pages_cdb.as_bytes(), + inquiry_supported_pages_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let mut buf_pos = 0; + let vpd_header = data_in.read_obj::(0); + buf_pos += size_of::(); + while buf_pos + < vpd_header.page_length as usize + size_of::() + { + if data_in.read_obj::(buf_pos) == scsi_defs::VPD_BLOCK_LIMITS { + // INQUIRY for the Block Limits VPD page returns the optimal unmap sectors. + let inquiry_block_limits_cdb = scsi_defs::CdbInquiry { + operation_code: ScsiOp::INQUIRY, + flags: scsi_defs::InquiryFlags::new().with_vpd(true), + page_code: scsi_defs::VPD_BLOCK_LIMITS, + allocation_length: (data_in_size as u16).into(), + ..FromZeros::new_zeroed() + }; + + match futures::executor::block_on(self.send_scsi_request( + inquiry_block_limits_cdb.as_bytes(), + inquiry_block_limits_cdb.operation_code, + data_in.pfns()[0] * PAGE_SIZE_4K, + data_in_size, + true, + )) { + Ok(resp) => match resp.scsi_status { + ScsiStatus::GOOD => { + let block_limits_vpd = + data_in + .read_obj::(0); + optimal_unmap_size = + block_limits_vpd.optimal_unmap_granularity.into(); + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, + "INQUIRY for Block Limits VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Block Limits VPD failed" + ); + } + } + } + buf_pos += 1; + } + } + _ => { + tracing::error!( + scsi_status = ?resp.scsi_status, + srb_status = ?resp.srb_status, + "INQUIRY for Supported Pages VPD failed" + ); + } + }, + Err(err) => { + tracing::error!( + error = &err as &dyn std::error::Error, + "INQUIRY for Supported Pages VPD failed" + ); + } + } + + optimal_unmap_size } - async fn wait_resize(&self, _sector_count: u64) -> u64 { - // This is difficult because it cannot update the stored sector count - todo!() + async fn wait_resize(&self, sector_count: u64) -> u64 { + loop { + let listen = self.resize_event.listen(); + let current = self.sector_count(); + if current != sector_count { + break current; + } + listen.await; + } } } diff --git a/vm/devices/storage/storvsc_driver/Cargo.toml b/vm/devices/storage/storvsc_driver/Cargo.toml index 85c351f51c..f22c00f40f 100644 --- a/vm/devices/storage/storvsc_driver/Cargo.toml +++ b/vm/devices/storage/storvsc_driver/Cargo.toml @@ -30,6 +30,7 @@ vmcore.workspace = true anyhow.workspace = true cvm_tracing.workspace = true +event-listener.workspace = true futures.workspace = true futures-concurrency.workspace = true slab.workspace = true diff --git a/vm/devices/storage/storvsc_driver/src/lib.rs b/vm/devices/storage/storvsc_driver/src/lib.rs index 3af24aaf9c..67a1eb349c 100644 --- a/vm/devices/storage/storvsc_driver/src/lib.rs +++ b/vm/devices/storage/storvsc_driver/src/lib.rs @@ -22,6 +22,7 @@ use mesh_channel::Receiver; use mesh_channel::RecvError; use mesh_channel::Sender; use slab::Slab; +use std::collections::HashMap; use std::sync::Arc; use task_control::AsyncRun; use task_control::InspectTask; @@ -57,6 +58,8 @@ pub struct StorvscDriver { #[inspect(skip)] new_request_sender: Option>, #[inspect(skip)] + add_resize_listener_sender: Option>, + #[inspect(skip)] dma_client: Arc, } @@ -71,7 +74,9 @@ struct Storvsc { struct StorvscInner { new_request_receiver: Receiver, + add_resize_listener_receiver: Receiver, transactions: Slab, + resize_listeners: HashMap>, } struct StorvscRequest { @@ -81,6 +86,11 @@ struct StorvscRequest { completion_sender: Sender, } +struct StorvscAddResizeListenerRequest { + lun: u8, + event: Arc, +} + /// Indicates the reason a storvsc operation was completed. #[derive(Clone)] pub enum StorvscCompleteReason { @@ -90,6 +100,8 @@ pub enum StorvscCompleteReason { Shutdown, /// Cancelled due to save/restore. SaveRestore, + /// Cancelled due to UNIT ATTENTION sense key. + UnitAttention, } /// Result of a Storvsc operation. If None, then operation was cancelled. @@ -227,6 +239,7 @@ impl StorvscDriver { Self { storvsc: Mutex::new(TaskControl::new(StorvscState)), new_request_sender: None, + add_resize_listener_sender: None, dma_client, } } @@ -245,9 +258,17 @@ impl StorvscDriver { .run_on_target(true) .build("storvsc"); let (new_request_sender, new_request_receiver) = mesh_channel::channel::(); - let mut storvsc = Storvsc::new(channel, version, new_request_receiver)?; + let (add_resize_listener_sender, add_resize_listener_receiver) = + mesh_channel::channel::(); + let mut storvsc = Storvsc::new( + channel, + version, + new_request_receiver, + add_resize_listener_receiver, + )?; storvsc.negotiate().await.unwrap(); self.new_request_sender = Some(new_request_sender); + self.add_resize_listener_sender = Some(add_resize_listener_sender); { let mut s = self.storvsc.lock().await; @@ -304,6 +325,8 @@ impl StorvscDriver { .run_on_target(true) .build("storvsc"); let (new_request_sender, new_request_receiver) = mesh_channel::channel::(); + let (add_resize_listener_sender, add_resize_listener_receiver) = + mesh_channel::channel::(); let storvsc = Storvsc::new( channel, storvsp_protocol::ProtocolVersion { @@ -311,10 +334,12 @@ impl StorvscDriver { reserved: 0, }, new_request_receiver, + add_resize_listener_receiver, )?; let storvsc_driver = Self { storvsc: Mutex::new(TaskControl::new(StorvscState)), new_request_sender: Some(new_request_sender), + add_resize_listener_sender: Some(add_resize_listener_sender), dma_client, }; @@ -370,6 +395,9 @@ impl StorvscDriver { StorvscCompleteReason::SaveRestore => { Err(StorvscError(StorvscErrorInner::CancelledRetry)) } + StorvscCompleteReason::UnitAttention => { + Err(StorvscError(StorvscErrorInner::CancelledRetry)) + } } } @@ -377,6 +405,22 @@ impl StorvscDriver { pub fn allocate_dma_buffer(&self, size: usize) -> Result { self.dma_client.allocate_dma_buffer(size) } + + /// Registers a resize listener for a disk. + pub fn add_resize_listener( + &self, + lun: u8, + event: Arc, + ) -> Result<(), StorvscError> { + tracing::info!(lun, "Adding resize listener"); + match &self.add_resize_listener_sender { + Some(request_sender) => { + request_sender.send(StorvscAddResizeListenerRequest { lun, event }); + Ok(()) + } + None => Err(StorvscError(StorvscErrorInner::Uninitialized)), + } + } } struct StorvscState; @@ -416,6 +460,7 @@ impl Storvsc { channel: RawAsyncChannel, version: storvsp_protocol::ProtocolVersion, new_request_receiver: Receiver, + add_resize_listener_receiver: Receiver, ) -> Result { let queue = Queue::new(channel).map_err(|err| StorvscError(StorvscErrorInner::Queue(err)))?; @@ -423,7 +468,9 @@ impl Storvsc { Ok(Self { inner: StorvscInner { new_request_receiver, + add_resize_listener_receiver, transactions: Slab::new(), + resize_listeners: HashMap::new(), }, version, queue, @@ -526,20 +573,22 @@ impl StorvscInner { async fn process_main(&mut self, queue: &mut Queue) -> Result<(), StorvscError> { loop { enum Event<'a, M: RingMem> { - NewRequestReceived(Result), - VmbusPacketReceived(Result, queue::Error>), + NewRequest(Result), + ReceivedVmbusPacket(Result, queue::Error>), + AddResizeListener(Result), } let (mut reader, mut writer) = queue.split(); match ( - self.new_request_receiver + self.new_request_receiver.recv().map(Event::NewRequest), + reader.read().map(Event::ReceivedVmbusPacket), + self.add_resize_listener_receiver .recv() - .map(Event::NewRequestReceived), - reader.read().map(Event::VmbusPacketReceived), + .map(Event::AddResizeListener), ) .race() .await { - Event::NewRequestReceived(result) => match result { + Event::NewRequest(result) => match result { Ok(request) => { match self.send_request( &request.request, @@ -563,13 +612,27 @@ impl StorvscInner { Err(StorvscError(StorvscErrorInner::RequestError)) } }, - Event::VmbusPacketReceived(result) => match result { + Event::ReceivedVmbusPacket(result) => match result { Ok(packet_ref) => self.handle_packet(packet_ref.as_ref()), Err(err) => { tracing::error!("Error receiving VMBus packet, err={:?}", err); Err(StorvscError(StorvscErrorInner::Queue(err))) } }, + Event::AddResizeListener(result) => match result { + Ok(request) => { + // Replace listener if one already present for lun + self.resize_listeners.insert(request.lun, request.event); + Ok(()) + } + Err(err) => { + tracing::error!( + "Unable to receive add resize listener request, err={:?}", + err + ); + Err(StorvscError(StorvscErrorInner::RequestError)) + } + }, }?; } } @@ -628,17 +691,44 @@ impl StorvscInner { .map_err(|_err| StorvscError(StorvscErrorInner::DecodeError))? .to_owned(); - // Match completion against pending transactions - match self - .transactions - .get_mut(completion.transaction_id as usize) + // If CHECK CONDITION with sense UNIT ATTENTION, then notify any resize listeners and + // resend this request + if result.scsi_status == scsi_defs::ScsiStatus::CHECK_CONDITION + && result.srb_status.autosense_valid() + && scsi_defs::SenseData::ref_from_bytes(result.payload.as_slice()) + .map_err(|_err| StorvscError(StorvscErrorInner::DecodeError))? + .header + .sense_key + == scsi_defs::SenseKey::UNIT_ATTENTION { - Some(t) => Ok(t), - None => Err(StorvscError(StorvscErrorInner::PacketError( - PacketError::UnexpectedTransaction(completion.transaction_id), - ))), - }? - .complete(result); + if let Some(listener) = self.resize_listeners.get(&result.lun) { + listener.notify(usize::MAX); + } + + // Match completion against pending transactions + match self + .transactions + .get_mut(completion.transaction_id as usize) + { + Some(t) => Ok(t), + None => Err(StorvscError(StorvscErrorInner::PacketError( + PacketError::UnexpectedTransaction(completion.transaction_id), + ))), + }? + .cancel(StorvscCompleteReason::UnitAttention); + } else { + // Match completion against pending transactions + match self + .transactions + .get_mut(completion.transaction_id as usize) + { + Some(t) => Ok(t), + None => Err(StorvscError(StorvscErrorInner::PacketError( + PacketError::UnexpectedTransaction(completion.transaction_id), + ))), + }? + .complete(result); + } Ok(()) } diff --git a/vm/devices/storage/storvsc_driver/src/test_helpers.rs b/vm/devices/storage/storvsc_driver/src/test_helpers.rs index dc59d507c5..0a04dbc8f2 100644 --- a/vm/devices/storage/storvsc_driver/src/test_helpers.rs +++ b/vm/devices/storage/storvsc_driver/src/test_helpers.rs @@ -8,6 +8,7 @@ use crate::PacketError; use crate::Storvsc; +use crate::StorvscAddResizeListenerRequest; use crate::StorvscCompletion; use crate::StorvscError; use crate::StorvscErrorInner; @@ -219,6 +220,7 @@ fn parse_storvsp_packet( pub struct TestStorvscWorker { task: TaskControl>, new_request_sender: Option>, + add_resize_listener_sender: Option>, } impl TestStorvscWorker { @@ -226,11 +228,14 @@ impl TestStorvscWorker { Self { task: TaskControl::new(StorvscState), new_request_sender: None, + add_resize_listener_sender: None, } } pub fn start(&mut self, spawner: impl Spawn, channel: RawAsyncChannel) { let (new_request_sender, new_request_receiver) = mesh_channel::channel::(); + let (add_resize_listener_sender, add_resize_listener_receiver) = + mesh_channel::channel::(); let storvsc = Storvsc::new( channel, storvsp_protocol::ProtocolVersion { @@ -238,9 +243,11 @@ impl TestStorvscWorker { reserved: 0, }, new_request_receiver, + add_resize_listener_receiver, ) .unwrap(); self.new_request_sender = Some(new_request_sender); + self.add_resize_listener_sender = Some(add_resize_listener_sender); self.task.insert(spawner, "storvsc", storvsc); self.task.start();