diff --git a/Cargo.lock b/Cargo.lock index 5c7a583828..5b317cac72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6570,6 +6570,7 @@ dependencies = [ name = "nexus-sled-agent-shared" version = "0.1.0" dependencies = [ + "chrono", "daft", "id-map", "illumos-utils", diff --git a/nexus-sled-agent-shared/Cargo.toml b/nexus-sled-agent-shared/Cargo.toml index d68dbe33e9..07d4f28861 100644 --- a/nexus-sled-agent-shared/Cargo.toml +++ b/nexus-sled-agent-shared/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" workspace = true [dependencies] +chrono.workspace = true daft.workspace = true id-map.workspace = true illumos-utils.workspace = true diff --git a/nexus-sled-agent-shared/src/inventory.rs b/nexus-sled-agent-shared/src/inventory.rs index d3bd43f42b..4e6653ea80 100644 --- a/nexus-sled-agent-shared/src/inventory.rs +++ b/nexus-sled-agent-shared/src/inventory.rs @@ -4,8 +4,11 @@ //! Inventory types shared between Nexus and sled-agent. +use std::collections::BTreeMap; use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; +use std::time::Duration; +use chrono::{DateTime, Utc}; use daft::Diffable; use id_map::IdMap; use id_map::IdMappable; @@ -21,8 +24,8 @@ use omicron_common::{ }, zpool_name::ZpoolName, }; -use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::{DatasetUuid, OmicronZoneUuid}; +use omicron_uuid_kinds::{MupdateOverrideUuid, PhysicalDiskUuid}; use omicron_uuid_kinds::{SledUuid, ZpoolUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -115,6 +118,56 @@ pub struct Inventory { pub omicron_physical_disks_generation: Generation, } +/// Describes the last attempt made by the sled-agent-config-reconciler to +/// reconcile the current sled config against the actual state of the sled. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] +#[serde(rename_all = "snake_case")] +pub struct ConfigReconcilerInventory { + pub last_reconciled_config: OmicronSledConfig, + pub external_disks: + BTreeMap, + pub datasets: BTreeMap, + pub zones: BTreeMap, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] +#[serde(tag = "result", rename_all = "snake_case")] +pub enum ConfigReconcilerInventoryResult { + Ok, + Err { message: String }, +} + +impl From> for ConfigReconcilerInventoryResult { + fn from(result: Result<(), String>) -> Self { + match result { + Ok(()) => Self::Ok, + Err(message) => Self::Err { message }, + } + } +} + +/// Status of the sled-agent-config-reconciler task. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, JsonSchema, Serialize)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ConfigReconcilerInventoryStatus { + /// The reconciler task has not yet run for the first time since sled-agent + /// started. + NotYetRun, + /// The reconciler task is actively running. + Running { + config: OmicronSledConfig, + started_at: DateTime, + running_for: Duration, + }, + /// The reconciler task is currently idle, but previously did complete a + /// reconciliation attempt. + /// + /// This variant does not include the `OmicronSledConfig` used in the last + /// attempt, because that's always available via + /// [`ConfigReconcilerInventory::last_reconciled_config`]. + Idle { completed_at: DateTime, ran_for: Duration }, +} + /// Describes the role of the sled within the rack. /// /// Note that this may change if the sled is physically moved diff --git a/sled-agent/config-reconciler/src/dataset_serialization_task.rs b/sled-agent/config-reconciler/src/dataset_serialization_task.rs index ea5ecd52d8..9cc3528554 100644 --- a/sled-agent/config-reconciler/src/dataset_serialization_task.rs +++ b/sled-agent/config-reconciler/src/dataset_serialization_task.rs @@ -11,6 +11,7 @@ //! using oneshot channels to send responses". use crate::CurrentlyManagedZpoolsReceiver; +use crate::InventoryError; use camino::Utf8PathBuf; use debug_ignore::DebugIgnore; use futures::StreamExt; @@ -26,6 +27,7 @@ use illumos_utils::zfs::WhichDatasets; use illumos_utils::zfs::Zfs; use illumos_utils::zpool::PathInPool; use illumos_utils::zpool::ZpoolOrRamdisk; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; use nexus_sled_agent_shared::inventory::InventoryDataset; use omicron_common::disk::DatasetConfig; use omicron_common::disk::DatasetKind; @@ -34,6 +36,7 @@ use omicron_common::disk::SharedDatasetConfig; use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::DatasetUuid; use sled_storage::config::MountConfig; +use sled_storage::dataset::CRYPT_DATASET; use sled_storage::dataset::U2_DEBUG_DATASET; use sled_storage::dataset::ZONE_DATASET; use sled_storage::manager::NestedDatasetConfig; @@ -169,6 +172,25 @@ impl DatasetEnsureResult { }) } + pub(crate) fn to_inventory( + &self, + ) -> BTreeMap { + self.0 + .iter() + .map(|dataset| match &dataset.state { + DatasetState::Ensured => { + (dataset.config.id, ConfigReconcilerInventoryResult::Ok) + } + DatasetState::FailedToEnsure(err) => ( + dataset.config.id, + ConfigReconcilerInventoryResult::Err { + message: InlineErrorChain::new(err).to_string(), + }, + ), + }) + .collect() + } + pub(crate) fn all_mounted_debug_datasets<'a>( &'a self, mount_config: &'a MountConfig, @@ -285,9 +307,11 @@ impl DatasetTaskHandle { pub async fn inventory( &self, - _zpools: BTreeSet, - ) -> Result, DatasetTaskError> { - unimplemented!() + zpools: BTreeSet, + ) -> Result, InventoryError>, DatasetTaskError> + { + self.try_send_request(|tx| DatasetTaskRequest::Inventory { zpools, tx }) + .await } pub async fn datasets_ensure( @@ -398,6 +422,9 @@ impl DatasetTask { ) { // In all cases, we don't care if the receiver is gone. match request { + DatasetTaskRequest::Inventory { zpools, tx } => { + _ = tx.0.send(self.inventory(zpools, zfs).await); + } DatasetTaskRequest::DatasetsEnsure { datasets, tx } => { self.datasets_ensure(datasets, zfs).await; _ = tx.0.send(self.datasets.clone()); @@ -419,6 +446,38 @@ impl DatasetTask { } } + async fn inventory( + &mut self, + zpools: BTreeSet, + zfs: &T, + ) -> Result, InventoryError> { + let datasets_of_interest = zpools + .iter() + .flat_map(|zpool| { + [ + // We care about the zpool itself, and all direct children. + zpool.to_string(), + // Likewise, we care about the encrypted dataset, and all + // direct children. + format!("{zpool}/{CRYPT_DATASET}"), + // The zone dataset gives us additional context on "what + // zones have datasets provisioned". + format!("{zpool}/{ZONE_DATASET}"), + ] + }) + .collect::>(); + + let props = zfs + .get_dataset_properties( + &datasets_of_interest, + WhichDatasets::SelfAndChildren, + ) + .await + .map_err(InventoryError::ListDatasetProperties)?; + + Ok(props.into_iter().map(From::from).collect()) + } + async fn datasets_ensure( &mut self, config: IdMap, @@ -947,6 +1006,12 @@ impl DatasetTask { #[derive(Debug)] enum DatasetTaskRequest { + Inventory { + zpools: BTreeSet, + tx: DebugIgnore< + oneshot::Sender, InventoryError>>, + >, + }, DatasetsEnsure { datasets: IdMap, tx: DebugIgnore>, diff --git a/sled-agent/config-reconciler/src/handle.rs b/sled-agent/config-reconciler/src/handle.rs index 800d4ab9b9..7a3ae1ac0e 100644 --- a/sled-agent/config-reconciler/src/handle.rs +++ b/sled-agent/config-reconciler/src/handle.rs @@ -5,6 +5,8 @@ use camino::Utf8PathBuf; use illumos_utils::zpool::PathInPool; use key_manager::StorageKeyRequester; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventory; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryStatus; use nexus_sled_agent_shared::inventory::InventoryDataset; use nexus_sled_agent_shared::inventory::InventoryDisk; use nexus_sled_agent_shared::inventory::InventoryZpool; @@ -48,6 +50,7 @@ use crate::dataset_serialization_task::DatasetTaskHandle; use crate::dataset_serialization_task::NestedDatasetMountError; use crate::dump_setup_task; use crate::internal_disks::InternalDisksReceiver; +use crate::ledger::CurrentSledConfig; use crate::ledger::LedgerTaskHandle; use crate::raw_disks; use crate::raw_disks::RawDisksReceiver; @@ -57,6 +60,16 @@ use crate::reconciler_task::CurrentlyManagedZpools; use crate::reconciler_task::CurrentlyManagedZpoolsReceiver; use crate::reconciler_task::ReconcilerResult; +#[derive(Debug, thiserror::Error)] +pub enum InventoryError { + #[error("ledger contents not yet available")] + LedgerContentsNotAvailable, + #[error("could not contact dataset task")] + DatasetTaskError(#[from] DatasetTaskError), + #[error("could not list dataset properties")] + ListDatasetProperties(#[source] anyhow::Error), +} + #[derive(Debug, Clone, Copy)] pub enum TimeSyncConfig { // Waits for NTP to confirm that time has been synchronized. @@ -331,8 +344,55 @@ impl ConfigReconcilerHandle { } /// Collect inventory fields relevant to config reconciliation. - pub fn inventory(&self) -> ReconcilerInventory { - unimplemented!() + pub async fn inventory( + &self, + log: &Logger, + ) -> Result { + let ledgered_sled_config = match self + .ledger_task + .get() + .map(LedgerTaskHandle::current_config) + { + // If we haven't yet spawned the ledger task, or we have but + // it's still waiting on disks, we don't know whether we have a + // ledgered sled config. It's not reasonable to report `None` in + // this case (since `None` means "we don't have a config"), so + // bail out. + // + // This shouldn't happen in practice: sled-agent should both wait + // for the boot disk and spawn the reconciler task before starting + // the dropshot server that allows Nexus to collect inventory. + None | Some(CurrentSledConfig::WaitingForInternalDisks) => { + return Err(InventoryError::LedgerContentsNotAvailable); + } + Some(CurrentSledConfig::WaitingForInitialConfig) => None, + Some(CurrentSledConfig::Ledgered(config)) => Some(config), + }; + + let zpools = self.currently_managed_zpools_rx.to_inventory(log).await; + + let datasets = self + .dataset_task + .inventory(zpools.iter().map(|&(name, _)| name).collect()) + .await??; + + let (reconciler_status, last_reconciliation) = + self.reconciler_result_rx.borrow().to_inventory(); + + Ok(ReconcilerInventory { + disks: self.raw_disks_tx.to_inventory(), + zpools: zpools + .into_iter() + .map(|(name, total_size)| InventoryZpool { + id: name.id(), + total_size, + }) + .collect(), + datasets, + ledgered_sled_config, + reconciler_status, + last_reconciliation, + }) } } @@ -346,12 +406,20 @@ struct ReconcilerTaskDependencies { reconciler_task_log: Logger, } +/// Fields of sled-agent inventory reported by the config reconciler subsystem. +/// +/// Note that much like inventory in general, these fields are not collected +/// atomically; if there are active changes being made while this struct is +/// being assembled, different fields may have be populated from different +/// states of the world. #[derive(Debug)] pub struct ReconcilerInventory { pub disks: Vec, pub zpools: Vec, pub datasets: Vec, pub ledgered_sled_config: Option, + pub reconciler_status: ConfigReconcilerInventoryStatus, + pub last_reconciliation: Option, } #[derive(Debug, Clone)] diff --git a/sled-agent/config-reconciler/src/ledger.rs b/sled-agent/config-reconciler/src/ledger.rs index 65450a09de..4c6eabd1a3 100644 --- a/sled-agent/config-reconciler/src/ledger.rs +++ b/sled-agent/config-reconciler/src/ledger.rs @@ -117,6 +117,7 @@ pub(crate) enum CurrentSledConfig { #[derive(Debug)] pub(crate) struct LedgerTaskHandle { request_tx: mpsc::Sender, + current_config_rx: watch::Receiver, } impl LedgerTaskHandle { @@ -160,7 +161,14 @@ impl LedgerTaskHandle { .run(), ); - (Self { request_tx }, current_config_rx) + ( + Self { request_tx, current_config_rx: current_config_rx.clone() }, + current_config_rx, + ) + } + + pub(crate) fn current_config(&self) -> CurrentSledConfig { + self.current_config_rx.borrow().clone() } pub async fn set_new_config( diff --git a/sled-agent/config-reconciler/src/lib.rs b/sled-agent/config-reconciler/src/lib.rs index d5f6a67d1a..eaa1c6f5f1 100644 --- a/sled-agent/config-reconciler/src/lib.rs +++ b/sled-agent/config-reconciler/src/lib.rs @@ -71,6 +71,7 @@ pub use dataset_serialization_task::NestedDatasetMountError; pub use handle::AvailableDatasetsReceiver; pub use handle::ConfigReconcilerHandle; pub use handle::ConfigReconcilerSpawnToken; +pub use handle::InventoryError; pub use handle::ReconcilerInventory; pub use handle::TimeSyncConfig; pub use internal_disks::InternalDisks; diff --git a/sled-agent/config-reconciler/src/raw_disks.rs b/sled-agent/config-reconciler/src/raw_disks.rs index 70470416fd..17479da0f0 100644 --- a/sled-agent/config-reconciler/src/raw_disks.rs +++ b/sled-agent/config-reconciler/src/raw_disks.rs @@ -7,6 +7,7 @@ use id_map::IdMap; use id_map::IdMappable; +use nexus_sled_agent_shared::inventory::InventoryDisk; use omicron_common::disk::DiskIdentity; use sled_storage::disk::RawDisk; use slog::Logger; @@ -124,6 +125,26 @@ impl RawDisksSender { true }) } + + pub(crate) fn to_inventory(&self) -> Vec { + self.0 + .borrow() + .iter() + .map(|disk| { + let firmware = disk.firmware(); + InventoryDisk { + identity: disk.identity().clone(), + variant: disk.variant(), + slot: disk.slot(), + active_firmware_slot: firmware.active_slot(), + next_active_firmware_slot: firmware.next_active_slot(), + number_of_firmware_slots: firmware.number_of_slots(), + slot1_is_read_only: firmware.slot1_read_only(), + slot_firmware_versions: firmware.slots().to_vec(), + } + }) + .collect() + } } // Synthetic disks added by sled-agent on startup in test/dev environments are diff --git a/sled-agent/config-reconciler/src/reconciler_task.rs b/sled-agent/config-reconciler/src/reconciler_task.rs index 51c0a2ecc3..e09c586e14 100644 --- a/sled-agent/config-reconciler/src/reconciler_task.rs +++ b/sled-agent/config-reconciler/src/reconciler_task.rs @@ -10,13 +10,19 @@ use either::Either; use futures::future; use illumos_utils::zpool::PathInPool; use key_manager::StorageKeyRequester; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventory; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryStatus; use nexus_sled_agent_shared::inventory::OmicronSledConfig; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; use sled_storage::config::MountConfig; use sled_storage::disk::Disk; use slog::Logger; use slog::info; use slog::warn; use slog_error_chain::InlineErrorChain; +use std::collections::BTreeMap; use std::collections::HashSet; use std::sync::Arc; use std::time::Duration; @@ -78,11 +84,11 @@ pub(crate) fn spawn( ); } -#[derive(Debug, Clone)] +#[derive(Debug)] pub(crate) struct ReconcilerResult { mount_config: Arc, status: ReconcilerTaskStatus, - latest_result: Option>, + latest_result: Option, } impl ReconcilerResult { @@ -94,14 +100,14 @@ impl ReconcilerResult { } } - pub fn timesync_status(&self) -> TimeSyncStatus { + pub(crate) fn timesync_status(&self) -> TimeSyncStatus { self.latest_result - .as_deref() + .as_ref() .map(|inner| inner.timesync_status.clone()) .unwrap_or(TimeSyncStatus::NotYetChecked) } - pub fn all_mounted_debug_datasets( + pub(crate) fn all_mounted_debug_datasets( &self, ) -> impl Iterator + '_ { let Some(latest_result) = &self.latest_result else { @@ -114,7 +120,7 @@ impl ReconcilerResult { ) } - pub fn all_mounted_zone_root_datasets( + pub(crate) fn all_mounted_zone_root_datasets( &self, ) -> impl Iterator + '_ { let Some(latest_result) = &self.latest_result else { @@ -126,6 +132,16 @@ impl ReconcilerResult { .all_mounted_zone_root_datasets(&self.mount_config), ) } + + pub(crate) fn to_inventory( + &self, + ) -> (ConfigReconcilerInventoryStatus, Option) + { + let status = self.status.to_inventory(); + let latest_result = + self.latest_result.as_ref().map(|r| r.to_inventory()); + (status, latest_result) + } } #[derive(Debug, Clone)] @@ -144,13 +160,54 @@ pub enum ReconcilerTaskStatus { }, } +impl ReconcilerTaskStatus { + fn to_inventory(&self) -> ConfigReconcilerInventoryStatus { + match self { + Self::NotYetRunning + | Self::WaitingForInternalDisks + | Self::WaitingForInitialConfig => { + ConfigReconcilerInventoryStatus::NotYetRun + } + Self::PerformingReconciliation { + config, + started_at_time, + started_at_instant, + } => ConfigReconcilerInventoryStatus::Running { + config: config.clone(), + started_at: *started_at_time, + running_for: started_at_instant.elapsed(), + }, + Self::Idle { completed_at_time, ran_for } => { + ConfigReconcilerInventoryStatus::Idle { + completed_at: *completed_at_time, + ran_for: *ran_for, + } + } + } + } +} + #[derive(Debug)] -struct LatestReconcilerTaskResultInner { +struct LatestReconciliationResult { sled_config: OmicronSledConfig, + external_disks_inventory: + BTreeMap, datasets: DatasetEnsureResult, + zones_inventory: BTreeMap, timesync_status: TimeSyncStatus, } +impl LatestReconciliationResult { + fn to_inventory(&self) -> ConfigReconcilerInventory { + ConfigReconcilerInventory { + last_reconciled_config: self.sled_config.clone(), + external_disks: self.external_disks_inventory.clone(), + datasets: self.datasets.to_inventory(), + zones: self.zones_inventory.clone(), + } + } +} + struct ReconcilerTask { key_requester: StorageKeyRequester, dataset_task: DatasetTaskHandle, @@ -415,9 +472,11 @@ impl ReconcilerTask { ReconciliationResult::NoRetryNeeded }; - let inner = LatestReconcilerTaskResultInner { + let inner = LatestReconciliationResult { sled_config, + external_disks_inventory: self.external_disks.to_inventory(), datasets, + zones_inventory: self.zones.to_inventory(), timesync_status, }; self.reconciler_result_tx.send_modify(|r| { @@ -425,7 +484,7 @@ impl ReconcilerTask { completed_at_time: Utc::now(), ran_for: started_at_instant.elapsed(), }; - r.latest_result = Some(Arc::new(inner)); + r.latest_result = Some(inner); }); result diff --git a/sled-agent/config-reconciler/src/reconciler_task/external_disks.rs b/sled-agent/config-reconciler/src/reconciler_task/external_disks.rs index 40a815a0f0..5a1be2c666 100644 --- a/sled-agent/config-reconciler/src/reconciler_task/external_disks.rs +++ b/sled-agent/config-reconciler/src/reconciler_task/external_disks.rs @@ -10,8 +10,11 @@ use futures::future; use id_map::IdMap; use id_map::IdMappable; +use illumos_utils::zpool::Zpool; use illumos_utils::zpool::ZpoolName; use key_manager::StorageKeyRequester; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; +use omicron_common::api::external::ByteCount; use omicron_common::disk::DiskManagementError; use omicron_common::disk::DiskVariant; use omicron_common::disk::OmicronPhysicalDiskConfig; @@ -26,6 +29,7 @@ use slog::Logger; use slog::info; use slog::warn; use slog_error_chain::InlineErrorChain; +use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashSet; use std::future::Future; @@ -158,6 +162,56 @@ impl CurrentlyManagedZpoolsReceiver { } } } + + // This returns a tuple that can be converted into an `InventoryZpool`. It + // doesn't return an `InventoryZpool` directly because the latter only + // contains the zpool's ID, not the full name, and our caller wants the + // names too. + pub(crate) async fn to_inventory( + &self, + log: &Logger, + ) -> Vec<(ZpoolName, ByteCount)> { + let current_zpools = self.current(); + + let zpool_futs = + current_zpools.0.iter().map(|&zpool_name| async move { + let info_result = + Zpool::get_info(&zpool_name.to_string()).await; + + (zpool_name, info_result) + }); + + future::join_all(zpool_futs) + .await + .into_iter() + .filter_map(|(zpool_name, info_result)| { + let info = match info_result { + Ok(info) => info, + Err(err) => { + warn!( + log, "Failed to access zpool info"; + "zpool" => %zpool_name, + InlineErrorChain::new(&err), + ); + return None; + } + }; + let total_size = match ByteCount::try_from(info.size()) { + Ok(n) => n, + Err(err) => { + warn!( + log, "Failed to parse zpool size"; + "zpool" => %zpool_name, + "raw_size" => info.size(), + InlineErrorChain::new(&err), + ); + return None; + } + }; + Some((zpool_name, total_size)) + }) + .collect() + } } #[derive(Debug)] @@ -196,6 +250,25 @@ impl ExternalDisks { }) } + pub(crate) fn to_inventory( + &self, + ) -> BTreeMap { + self.disks + .iter() + .map(|disk| match &disk.state { + DiskState::Managed(_) => { + (disk.config.id, ConfigReconcilerInventoryResult::Ok) + } + DiskState::FailedToManage(err) => ( + disk.config.id, + ConfigReconcilerInventoryResult::Err { + message: InlineErrorChain::new(err).to_string(), + }, + ), + }) + .collect() + } + pub(super) fn currently_managed_zpools( &self, ) -> Arc { diff --git a/sled-agent/config-reconciler/src/reconciler_task/zones.rs b/sled-agent/config-reconciler/src/reconciler_task/zones.rs index ca265de244..0bf14582d7 100644 --- a/sled-agent/config-reconciler/src/reconciler_task/zones.rs +++ b/sled-agent/config-reconciler/src/reconciler_task/zones.rs @@ -20,6 +20,7 @@ use illumos_utils::zone::AdmError; use illumos_utils::zone::Api as _; use illumos_utils::zone::DeleteAddressError; use illumos_utils::zone::Zones; +use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; use nexus_sled_agent_shared::inventory::OmicronZoneConfig; use nexus_sled_agent_shared::inventory::OmicronZoneType; use omicron_common::address::Ipv6Subnet; @@ -31,6 +32,7 @@ use slog::Logger; use slog::info; use slog::warn; use slog_error_chain::InlineErrorChain; +use std::collections::BTreeMap; use std::net::IpAddr; use std::net::Ipv6Addr; use std::num::NonZeroUsize; @@ -97,6 +99,31 @@ impl OmicronZones { }) } + pub(crate) fn to_inventory( + &self, + ) -> BTreeMap { + self.zones + .iter() + .map(|zone| match &zone.state { + ZoneState::Running(_) => { + (zone.config.id, ConfigReconcilerInventoryResult::Ok) + } + ZoneState::PartiallyShutDown { err, .. } => ( + zone.config.id, + ConfigReconcilerInventoryResult::Err { + message: InlineErrorChain::new(err).to_string(), + }, + ), + ZoneState::FailedToStart(err) => ( + zone.config.id, + ConfigReconcilerInventoryResult::Err { + message: InlineErrorChain::new(err).to_string(), + }, + ), + }) + .collect() + } + /// Attempt to shut down any zones that aren't present in `desired_zones`, /// or that weren't present in some prior call but which didn't succeed in /// shutting down and are in a partially-shut-down state.