Implement enhanced storage pool visualization
All checks were successful
Build and Release / build-and-release (push) Successful in 2m34s
All checks were successful
Build and Release / build-and-release (push) Successful in 2m34s
- Add support for mergerfs pool grouping with data and parity disk separation - Implement pool health monitoring (healthy/degraded/critical status) - Create hierarchical tree view for multi-disk storage arrays - Add automatic pool type detection and member disk association - Maintain backward compatibility for single disk configurations - Support future extension for RAID and ZFS pool types
This commit is contained in:
parent
156d707377
commit
f9384d9df6
6
Cargo.lock
generated
6
Cargo.lock
generated
@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.97"
|
version = "0.1.98"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@ -301,7 +301,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.97"
|
version = "0.1.98"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@ -324,7 +324,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.97"
|
version = "0.1.98"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.98"
|
version = "0.1.99"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@ -15,12 +15,45 @@ struct StoragePool {
|
|||||||
name: String, // e.g., "steampool", "root"
|
name: String, // e.g., "steampool", "root"
|
||||||
mount_point: String, // e.g., "/mnt/steampool", "/"
|
mount_point: String, // e.g., "/mnt/steampool", "/"
|
||||||
filesystem: String, // e.g., "mergerfs", "ext4", "zfs", "btrfs"
|
filesystem: String, // e.g., "mergerfs", "ext4", "zfs", "btrfs"
|
||||||
storage_type: String, // e.g., "mergerfs", "single", "raid", "zfs"
|
pool_type: StoragePoolType, // Enhanced pool type with configuration
|
||||||
size: String, // e.g., "2.5TB"
|
size: String, // e.g., "2.5TB"
|
||||||
used: String, // e.g., "2.1TB"
|
used: String, // e.g., "2.1TB"
|
||||||
available: String, // e.g., "400GB"
|
available: String, // e.g., "400GB"
|
||||||
usage_percent: f32, // e.g., 85.0
|
usage_percent: f32, // e.g., 85.0
|
||||||
underlying_drives: Vec<DriveInfo>, // Individual physical drives
|
underlying_drives: Vec<DriveInfo>, // Individual physical drives
|
||||||
|
pool_health: PoolHealth, // Overall pool health status
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enhanced storage pool types with specific configurations
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum StoragePoolType {
|
||||||
|
Single, // Traditional single disk
|
||||||
|
MergerfsPool { // MergerFS with optional parity
|
||||||
|
data_disks: Vec<String>, // Member disk names (sdb, sdd)
|
||||||
|
parity_disks: Vec<String>, // Parity disk names (sdc)
|
||||||
|
},
|
||||||
|
#[allow(dead_code)]
|
||||||
|
RaidArray { // Hardware RAID (future)
|
||||||
|
level: String, // "RAID1", "RAID5", etc.
|
||||||
|
member_disks: Vec<String>,
|
||||||
|
spare_disks: Vec<String>,
|
||||||
|
},
|
||||||
|
#[allow(dead_code)]
|
||||||
|
ZfsPool { // ZFS pool (future)
|
||||||
|
pool_name: String,
|
||||||
|
vdevs: Vec<String>,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pool health status for redundant storage
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
enum PoolHealth {
|
||||||
|
Healthy, // All drives OK, parity current
|
||||||
|
Degraded, // One drive failed or parity outdated, still functional
|
||||||
|
Critical, // Multiple failures, data at risk
|
||||||
|
#[allow(dead_code)]
|
||||||
|
Rebuilding, // Actively rebuilding/scrubbing (future: SnapRAID status integration)
|
||||||
|
Unknown, // Cannot determine status
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information about an individual physical drive
|
/// Information about an individual physical drive
|
||||||
@ -75,12 +108,39 @@ impl DiskCollector {
|
|||||||
/// Get configured storage pools with individual drive information
|
/// Get configured storage pools with individual drive information
|
||||||
fn get_configured_storage_pools(&self) -> Result<Vec<StoragePool>> {
|
fn get_configured_storage_pools(&self) -> Result<Vec<StoragePool>> {
|
||||||
let mut storage_pools = Vec::new();
|
let mut storage_pools = Vec::new();
|
||||||
|
let mut processed_pools = std::collections::HashSet::new();
|
||||||
|
|
||||||
|
// First pass: Create enhanced pools (mergerfs, etc.)
|
||||||
for fs_config in &self.config.filesystems {
|
for fs_config in &self.config.filesystems {
|
||||||
if !fs_config.monitor {
|
if !fs_config.monitor {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let (pool_type, skip_in_single_mode) = self.determine_pool_type(&fs_config.storage_type);
|
||||||
|
|
||||||
|
// Skip member disks if they're part of a pool
|
||||||
|
if skip_in_single_mode {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this pool was already processed (in case of multiple member disks)
|
||||||
|
let pool_key = match &pool_type {
|
||||||
|
StoragePoolType::MergerfsPool { .. } => {
|
||||||
|
// For mergerfs pools, use the main mount point
|
||||||
|
if fs_config.fs_type == "fuse.mergerfs" {
|
||||||
|
fs_config.mount_point.clone()
|
||||||
|
} else {
|
||||||
|
continue; // Skip member disks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => fs_config.mount_point.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
if processed_pools.contains(&pool_key) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
processed_pools.insert(pool_key.clone());
|
||||||
|
|
||||||
// Get filesystem stats for the mount point
|
// Get filesystem stats for the mount point
|
||||||
match self.get_filesystem_info(&fs_config.mount_point) {
|
match self.get_filesystem_info(&fs_config.mount_point) {
|
||||||
Ok((total_bytes, used_bytes)) => {
|
Ok((total_bytes, used_bytes)) => {
|
||||||
@ -96,25 +156,29 @@ impl DiskCollector {
|
|||||||
let used = self.bytes_to_human_readable(used_bytes);
|
let used = self.bytes_to_human_readable(used_bytes);
|
||||||
let available = self.bytes_to_human_readable(available_bytes);
|
let available = self.bytes_to_human_readable(available_bytes);
|
||||||
|
|
||||||
// Get individual drive information using pre-detected devices
|
// Get underlying drives based on pool type
|
||||||
let device_names = self.detected_devices.get(&fs_config.mount_point).cloned().unwrap_or_default();
|
let underlying_drives = self.get_pool_drives(&pool_type, &fs_config.mount_point)?;
|
||||||
let underlying_drives = self.get_drive_info_for_devices(&device_names)?;
|
|
||||||
|
// Calculate pool health
|
||||||
|
let pool_health = self.calculate_pool_health(&pool_type, &underlying_drives);
|
||||||
|
let drive_count = underlying_drives.len();
|
||||||
|
|
||||||
storage_pools.push(StoragePool {
|
storage_pools.push(StoragePool {
|
||||||
name: fs_config.name.clone(),
|
name: fs_config.name.clone(),
|
||||||
mount_point: fs_config.mount_point.clone(),
|
mount_point: fs_config.mount_point.clone(),
|
||||||
filesystem: fs_config.fs_type.clone(),
|
filesystem: fs_config.fs_type.clone(),
|
||||||
storage_type: fs_config.storage_type.clone(),
|
pool_type: pool_type.clone(),
|
||||||
size,
|
size,
|
||||||
used,
|
used,
|
||||||
available,
|
available,
|
||||||
usage_percent: usage_percent as f32,
|
usage_percent: usage_percent as f32,
|
||||||
underlying_drives,
|
underlying_drives,
|
||||||
|
pool_health,
|
||||||
});
|
});
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Storage pool '{}' ({}) at {} with {} detected drives",
|
"Storage pool '{}' ({:?}) at {} with {} drives, health: {:?}",
|
||||||
fs_config.name, fs_config.storage_type, fs_config.mount_point, device_names.len()
|
fs_config.name, pool_type, fs_config.mount_point, drive_count, pool_health
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -129,6 +193,123 @@ impl DiskCollector {
|
|||||||
Ok(storage_pools)
|
Ok(storage_pools)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Determine the storage pool type from configuration
|
||||||
|
fn determine_pool_type(&self, storage_type: &str) -> (StoragePoolType, bool) {
|
||||||
|
match storage_type {
|
||||||
|
"single" => (StoragePoolType::Single, false),
|
||||||
|
"mergerfs_pool" | "mergerfs" => {
|
||||||
|
// Find associated member disks
|
||||||
|
let data_disks = self.find_pool_member_disks("mergerfs_member");
|
||||||
|
let parity_disks = self.find_pool_member_disks("parity");
|
||||||
|
(StoragePoolType::MergerfsPool { data_disks, parity_disks }, false)
|
||||||
|
}
|
||||||
|
"mergerfs_member" => (StoragePoolType::Single, true), // Skip, part of pool
|
||||||
|
"parity" => (StoragePoolType::Single, true), // Skip, part of pool
|
||||||
|
"raid1" | "raid5" | "raid6" => {
|
||||||
|
let member_disks = self.find_pool_member_disks(&format!("{}_member", storage_type));
|
||||||
|
(StoragePoolType::RaidArray {
|
||||||
|
level: storage_type.to_uppercase(),
|
||||||
|
member_disks,
|
||||||
|
spare_disks: Vec::new()
|
||||||
|
}, false)
|
||||||
|
}
|
||||||
|
_ => (StoragePoolType::Single, false) // Default to single
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find member disks for a specific storage type
|
||||||
|
fn find_pool_member_disks(&self, member_type: &str) -> Vec<String> {
|
||||||
|
let mut member_disks = Vec::new();
|
||||||
|
|
||||||
|
for fs_config in &self.config.filesystems {
|
||||||
|
if fs_config.storage_type == member_type && fs_config.monitor {
|
||||||
|
// Get device names for this mount point
|
||||||
|
if let Some(devices) = self.detected_devices.get(&fs_config.mount_point) {
|
||||||
|
member_disks.extend(devices.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
member_disks
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get drive information for a specific pool type
|
||||||
|
fn get_pool_drives(&self, pool_type: &StoragePoolType, mount_point: &str) -> Result<Vec<DriveInfo>> {
|
||||||
|
match pool_type {
|
||||||
|
StoragePoolType::Single => {
|
||||||
|
// Single disk - use detected devices for this mount point
|
||||||
|
let device_names = self.detected_devices.get(mount_point).cloned().unwrap_or_default();
|
||||||
|
self.get_drive_info_for_devices(&device_names)
|
||||||
|
}
|
||||||
|
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
|
||||||
|
// Mergerfs pool - collect all member drives
|
||||||
|
let mut all_disks = data_disks.clone();
|
||||||
|
all_disks.extend(parity_disks.clone());
|
||||||
|
self.get_drive_info_for_devices(&all_disks)
|
||||||
|
}
|
||||||
|
StoragePoolType::RaidArray { member_disks, spare_disks, .. } => {
|
||||||
|
// RAID array - collect member and spare drives
|
||||||
|
let mut all_disks = member_disks.clone();
|
||||||
|
all_disks.extend(spare_disks.clone());
|
||||||
|
self.get_drive_info_for_devices(&all_disks)
|
||||||
|
}
|
||||||
|
StoragePoolType::ZfsPool { .. } => {
|
||||||
|
// ZFS pool - use detected devices (future implementation)
|
||||||
|
let device_names = self.detected_devices.get(mount_point).cloned().unwrap_or_default();
|
||||||
|
self.get_drive_info_for_devices(&device_names)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate pool health based on drive status and pool type
|
||||||
|
fn calculate_pool_health(&self, pool_type: &StoragePoolType, drives: &[DriveInfo]) -> PoolHealth {
|
||||||
|
match pool_type {
|
||||||
|
StoragePoolType::Single => {
|
||||||
|
// Single disk - health is just the drive health
|
||||||
|
if drives.is_empty() {
|
||||||
|
PoolHealth::Unknown
|
||||||
|
} else if drives.iter().all(|d| d.health_status == "PASSED") {
|
||||||
|
PoolHealth::Healthy
|
||||||
|
} else {
|
||||||
|
PoolHealth::Critical
|
||||||
|
}
|
||||||
|
}
|
||||||
|
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
|
||||||
|
let failed_data = drives.iter()
|
||||||
|
.filter(|d| data_disks.contains(&d.device) && d.health_status != "PASSED")
|
||||||
|
.count();
|
||||||
|
let failed_parity = drives.iter()
|
||||||
|
.filter(|d| parity_disks.contains(&d.device) && d.health_status != "PASSED")
|
||||||
|
.count();
|
||||||
|
|
||||||
|
match (failed_data, failed_parity) {
|
||||||
|
(0, 0) => PoolHealth::Healthy,
|
||||||
|
(1, 0) => PoolHealth::Degraded, // Can recover with parity
|
||||||
|
(0, 1) => PoolHealth::Degraded, // Lost parity protection
|
||||||
|
_ => PoolHealth::Critical, // Multiple failures
|
||||||
|
}
|
||||||
|
}
|
||||||
|
StoragePoolType::RaidArray { level, .. } => {
|
||||||
|
let failed_drives = drives.iter().filter(|d| d.health_status != "PASSED").count();
|
||||||
|
|
||||||
|
// Basic RAID health logic (can be enhanced per RAID level)
|
||||||
|
match failed_drives {
|
||||||
|
0 => PoolHealth::Healthy,
|
||||||
|
1 if level.contains('1') || level.contains('5') || level.contains('6') => PoolHealth::Degraded,
|
||||||
|
_ => PoolHealth::Critical,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
StoragePoolType::ZfsPool { .. } => {
|
||||||
|
// ZFS health would require zpool status parsing (future)
|
||||||
|
if drives.iter().all(|d| d.health_status == "PASSED") {
|
||||||
|
PoolHealth::Healthy
|
||||||
|
} else {
|
||||||
|
PoolHealth::Degraded
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Get drive information for a list of device names
|
/// Get drive information for a list of device names
|
||||||
fn get_drive_info_for_devices(&self, device_names: &[String]) -> Result<Vec<DriveInfo>> {
|
fn get_drive_info_for_devices(&self, device_names: &[String]) -> Result<Vec<DriveInfo>> {
|
||||||
let mut drives = Vec::new();
|
let mut drives = Vec::new();
|
||||||
@ -448,8 +629,8 @@ impl Collector for DiskCollector {
|
|||||||
let used_gb = self.parse_size_to_gb(&storage_pool.used);
|
let used_gb = self.parse_size_to_gb(&storage_pool.used);
|
||||||
let avail_gb = self.parse_size_to_gb(&storage_pool.available);
|
let avail_gb = self.parse_size_to_gb(&storage_pool.available);
|
||||||
|
|
||||||
// Calculate status based on configured thresholds
|
// Calculate status based on configured thresholds and pool health
|
||||||
let pool_status = if storage_pool.usage_percent >= self.config.usage_critical_percent {
|
let usage_status = if storage_pool.usage_percent >= self.config.usage_critical_percent {
|
||||||
Status::Critical
|
Status::Critical
|
||||||
} else if storage_pool.usage_percent >= self.config.usage_warning_percent {
|
} else if storage_pool.usage_percent >= self.config.usage_warning_percent {
|
||||||
Status::Warning
|
Status::Warning
|
||||||
@ -457,6 +638,14 @@ impl Collector for DiskCollector {
|
|||||||
Status::Ok
|
Status::Ok
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let pool_status = match storage_pool.pool_health {
|
||||||
|
PoolHealth::Critical => Status::Critical,
|
||||||
|
PoolHealth::Degraded => Status::Warning,
|
||||||
|
PoolHealth::Rebuilding => Status::Warning,
|
||||||
|
PoolHealth::Healthy => usage_status,
|
||||||
|
PoolHealth::Unknown => Status::Unknown,
|
||||||
|
};
|
||||||
|
|
||||||
// Storage pool info metrics
|
// Storage pool info metrics
|
||||||
metrics.push(Metric {
|
metrics.push(Metric {
|
||||||
name: format!("disk_{}_mount_point", pool_name),
|
name: format!("disk_{}_mount_point", pool_name),
|
||||||
@ -476,15 +665,47 @@ impl Collector for DiskCollector {
|
|||||||
timestamp,
|
timestamp,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Enhanced pool type information
|
||||||
|
let pool_type_str = match &storage_pool.pool_type {
|
||||||
|
StoragePoolType::Single => "single".to_string(),
|
||||||
|
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
|
||||||
|
format!("mergerfs ({}+{})", data_disks.len(), parity_disks.len())
|
||||||
|
}
|
||||||
|
StoragePoolType::RaidArray { level, member_disks, spare_disks } => {
|
||||||
|
format!("{} ({}+{})", level, member_disks.len(), spare_disks.len())
|
||||||
|
}
|
||||||
|
StoragePoolType::ZfsPool { pool_name, .. } => {
|
||||||
|
format!("zfs ({})", pool_name)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
metrics.push(Metric {
|
metrics.push(Metric {
|
||||||
name: format!("disk_{}_storage_type", pool_name),
|
name: format!("disk_{}_pool_type", pool_name),
|
||||||
value: MetricValue::String(storage_pool.storage_type.clone()),
|
value: MetricValue::String(pool_type_str.clone()),
|
||||||
unit: None,
|
unit: None,
|
||||||
description: Some(format!("Type: {}", storage_pool.storage_type)),
|
description: Some(format!("Type: {}", pool_type_str)),
|
||||||
status: Status::Ok,
|
status: Status::Ok,
|
||||||
timestamp,
|
timestamp,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Pool health status
|
||||||
|
let health_str = match storage_pool.pool_health {
|
||||||
|
PoolHealth::Healthy => "healthy",
|
||||||
|
PoolHealth::Degraded => "degraded",
|
||||||
|
PoolHealth::Critical => "critical",
|
||||||
|
PoolHealth::Rebuilding => "rebuilding",
|
||||||
|
PoolHealth::Unknown => "unknown",
|
||||||
|
};
|
||||||
|
|
||||||
|
metrics.push(Metric {
|
||||||
|
name: format!("disk_{}_pool_health", pool_name),
|
||||||
|
value: MetricValue::String(health_str.to_string()),
|
||||||
|
unit: None,
|
||||||
|
description: Some(format!("Health: {}", health_str)),
|
||||||
|
status: pool_status,
|
||||||
|
timestamp,
|
||||||
|
});
|
||||||
|
|
||||||
// Storage pool size metrics
|
// Storage pool size metrics
|
||||||
metrics.push(Metric {
|
metrics.push(Metric {
|
||||||
name: format!("disk_{}_total_gb", pool_name),
|
name: format!("disk_{}_total_gb", pool_name),
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.98"
|
version = "0.1.99"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@ -45,12 +45,14 @@ pub struct SystemWidget {
|
|||||||
struct StoragePool {
|
struct StoragePool {
|
||||||
name: String,
|
name: String,
|
||||||
mount_point: String,
|
mount_point: String,
|
||||||
pool_type: String, // "Single", "Raid0", etc.
|
pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc.
|
||||||
|
pool_health: Option<String>, // "healthy", "degraded", "critical", "rebuilding"
|
||||||
drives: Vec<StorageDrive>,
|
drives: Vec<StorageDrive>,
|
||||||
usage_percent: Option<f32>,
|
usage_percent: Option<f32>,
|
||||||
used_gb: Option<f32>,
|
used_gb: Option<f32>,
|
||||||
total_gb: Option<f32>,
|
total_gb: Option<f32>,
|
||||||
status: Status,
|
status: Status,
|
||||||
|
health_status: Status, // Separate status for pool health vs usage
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -155,12 +157,14 @@ impl SystemWidget {
|
|||||||
let pool = pools.entry(pool_name.clone()).or_insert_with(|| StoragePool {
|
let pool = pools.entry(pool_name.clone()).or_insert_with(|| StoragePool {
|
||||||
name: pool_name.clone(),
|
name: pool_name.clone(),
|
||||||
mount_point: mount_point.clone(),
|
mount_point: mount_point.clone(),
|
||||||
pool_type: "Single".to_string(), // Default, could be enhanced
|
pool_type: "single".to_string(), // Default, will be updated
|
||||||
|
pool_health: None,
|
||||||
drives: Vec::new(),
|
drives: Vec::new(),
|
||||||
usage_percent: None,
|
usage_percent: None,
|
||||||
used_gb: None,
|
used_gb: None,
|
||||||
total_gb: None,
|
total_gb: None,
|
||||||
status: Status::Unknown,
|
status: Status::Unknown,
|
||||||
|
health_status: Status::Unknown,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Parse different metric types
|
// Parse different metric types
|
||||||
@ -177,6 +181,15 @@ impl SystemWidget {
|
|||||||
if let MetricValue::Float(total) = metric.value {
|
if let MetricValue::Float(total) = metric.value {
|
||||||
pool.total_gb = Some(total);
|
pool.total_gb = Some(total);
|
||||||
}
|
}
|
||||||
|
} else if metric.name.contains("_pool_type") {
|
||||||
|
if let MetricValue::String(pool_type) = &metric.value {
|
||||||
|
pool.pool_type = pool_type.clone();
|
||||||
|
}
|
||||||
|
} else if metric.name.contains("_pool_health") {
|
||||||
|
if let MetricValue::String(health) = &metric.value {
|
||||||
|
pool.pool_health = Some(health.clone());
|
||||||
|
pool.health_status = metric.status.clone();
|
||||||
|
}
|
||||||
} else if metric.name.contains("_temperature") {
|
} else if metric.name.contains("_temperature") {
|
||||||
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
|
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
|
||||||
// Find existing drive or create new one
|
// Find existing drive or create new one
|
||||||
@ -277,73 +290,149 @@ impl SystemWidget {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Render storage section with tree structure
|
/// Render storage section with enhanced tree structure
|
||||||
fn render_storage(&self) -> Vec<Line<'_>> {
|
fn render_storage(&self) -> Vec<Line<'_>> {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
|
|
||||||
for pool in &self.storage_pools {
|
for pool in &self.storage_pools {
|
||||||
// Pool header line
|
// Pool header line with type and health
|
||||||
let usage_text = match (pool.usage_percent, pool.used_gb, pool.total_gb) {
|
let pool_label = if pool.pool_type == "single" {
|
||||||
(Some(pct), Some(used), Some(total)) => {
|
|
||||||
format!("{:.0}% {:.1}GB/{:.1}GB", pct, used, total)
|
|
||||||
}
|
|
||||||
_ => "—% —GB/—GB".to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let pool_label = if pool.pool_type.to_lowercase() == "single" {
|
|
||||||
format!("{}:", pool.mount_point)
|
format!("{}:", pool.mount_point)
|
||||||
} else {
|
} else {
|
||||||
format!("{} ({}):", pool.mount_point, pool.pool_type)
|
format!("{} ({}):", pool.mount_point, pool.pool_type)
|
||||||
};
|
};
|
||||||
let pool_spans = StatusIcons::create_status_spans(
|
let pool_spans = StatusIcons::create_status_spans(
|
||||||
pool.status.clone(),
|
pool.health_status.clone(),
|
||||||
&pool_label
|
&pool_label
|
||||||
);
|
);
|
||||||
lines.push(Line::from(pool_spans));
|
lines.push(Line::from(pool_spans));
|
||||||
|
|
||||||
// Drive lines with tree structure
|
// Pool health line (for multi-disk pools)
|
||||||
let has_usage_line = pool.usage_percent.is_some();
|
if pool.pool_type != "single" {
|
||||||
for (i, drive) in pool.drives.iter().enumerate() {
|
if let Some(health) = &pool.pool_health {
|
||||||
let is_last_drive = i == pool.drives.len() - 1;
|
let health_text = match health.as_str() {
|
||||||
let tree_symbol = if is_last_drive && !has_usage_line { "└─" } else { "├─" };
|
"healthy" => format!("Pool Status: {} Healthy",
|
||||||
|
if pool.drives.len() > 1 { format!("({} drives)", pool.drives.len()) } else { String::new() }),
|
||||||
let mut drive_info = Vec::new();
|
"degraded" => "Pool Status: ⚠ Degraded".to_string(),
|
||||||
if let Some(temp) = drive.temperature {
|
"critical" => "Pool Status: ✗ Critical".to_string(),
|
||||||
drive_info.push(format!("T: {:.0}C", temp));
|
"rebuilding" => "Pool Status: ⟳ Rebuilding".to_string(),
|
||||||
|
_ => format!("Pool Status: ? {}", health),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut health_spans = vec![
|
||||||
|
Span::raw(" "),
|
||||||
|
Span::styled("├─ ", Typography::tree()),
|
||||||
|
];
|
||||||
|
health_spans.extend(StatusIcons::create_status_spans(pool.health_status.clone(), &health_text));
|
||||||
|
lines.push(Line::from(health_spans));
|
||||||
}
|
}
|
||||||
if let Some(wear) = drive.wear_percent {
|
|
||||||
drive_info.push(format!("W: {:.0}%", wear));
|
|
||||||
}
|
|
||||||
let drive_text = if drive_info.is_empty() {
|
|
||||||
drive.name.clone()
|
|
||||||
} else {
|
|
||||||
format!("{} {}", drive.name, drive_info.join(" • "))
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut drive_spans = vec![
|
|
||||||
Span::raw(" "),
|
|
||||||
Span::styled(tree_symbol, Typography::tree()),
|
|
||||||
Span::raw(" "),
|
|
||||||
];
|
|
||||||
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
|
|
||||||
lines.push(Line::from(drive_spans));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Usage line
|
// Total usage line (always show for pools)
|
||||||
if pool.usage_percent.is_some() {
|
let usage_text = match (pool.usage_percent, pool.used_gb, pool.total_gb) {
|
||||||
let tree_symbol = "└─";
|
(Some(pct), Some(used), Some(total)) => {
|
||||||
let mut usage_spans = vec![
|
format!("Total: {:.0}% {:.1}GB/{:.1}GB", pct, used, total)
|
||||||
Span::raw(" "),
|
}
|
||||||
Span::styled(tree_symbol, Typography::tree()),
|
_ => "Total: —% —GB/—GB".to_string(),
|
||||||
Span::raw(" "),
|
};
|
||||||
];
|
|
||||||
usage_spans.extend(StatusIcons::create_status_spans(pool.status.clone(), &usage_text));
|
let has_drives = !pool.drives.is_empty();
|
||||||
lines.push(Line::from(usage_spans));
|
let tree_symbol = if has_drives { "├─" } else { "└─" };
|
||||||
|
let mut usage_spans = vec![
|
||||||
|
Span::raw(" "),
|
||||||
|
Span::styled(tree_symbol, Typography::tree()),
|
||||||
|
Span::raw(" "),
|
||||||
|
];
|
||||||
|
usage_spans.extend(StatusIcons::create_status_spans(pool.status.clone(), &usage_text));
|
||||||
|
lines.push(Line::from(usage_spans));
|
||||||
|
|
||||||
|
// Drive lines with enhanced grouping
|
||||||
|
if pool.pool_type != "single" && pool.drives.len() > 1 {
|
||||||
|
// Group drives by type for mergerfs pools
|
||||||
|
let (data_drives, parity_drives): (Vec<_>, Vec<_>) = pool.drives.iter().enumerate()
|
||||||
|
.partition(|(_, drive)| {
|
||||||
|
// Simple heuristic: drives with 'parity' in name or sdc (common parity drive)
|
||||||
|
!drive.name.to_lowercase().contains("parity") && drive.name != "sdc"
|
||||||
|
});
|
||||||
|
|
||||||
|
// Show data drives
|
||||||
|
if !data_drives.is_empty() && pool.pool_type.contains("mergerfs") {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::raw(" "),
|
||||||
|
Span::styled("├─ ", Typography::tree()),
|
||||||
|
Span::styled("Data Disks:", Typography::secondary()),
|
||||||
|
]));
|
||||||
|
|
||||||
|
for (i, (_, drive)) in data_drives.iter().enumerate() {
|
||||||
|
let is_last = i == data_drives.len() - 1;
|
||||||
|
if is_last && parity_drives.is_empty() {
|
||||||
|
self.render_drive_line(&mut lines, drive, "│ └─");
|
||||||
|
} else {
|
||||||
|
self.render_drive_line(&mut lines, drive, "│ ├─");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show parity drives
|
||||||
|
if !parity_drives.is_empty() && pool.pool_type.contains("mergerfs") {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::raw(" "),
|
||||||
|
Span::styled("└─ ", Typography::tree()),
|
||||||
|
Span::styled("Parity:", Typography::secondary()),
|
||||||
|
]));
|
||||||
|
|
||||||
|
for (i, (_, drive)) in parity_drives.iter().enumerate() {
|
||||||
|
let is_last = i == parity_drives.len() - 1;
|
||||||
|
if is_last {
|
||||||
|
self.render_drive_line(&mut lines, drive, " └─");
|
||||||
|
} else {
|
||||||
|
self.render_drive_line(&mut lines, drive, " ├─");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Regular drive listing for non-mergerfs pools
|
||||||
|
for (i, drive) in pool.drives.iter().enumerate() {
|
||||||
|
let is_last = i == pool.drives.len() - 1;
|
||||||
|
let tree_symbol = if is_last { "└─" } else { "├─" };
|
||||||
|
self.render_drive_line(&mut lines, drive, tree_symbol);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Single drive or simple pools
|
||||||
|
for (i, drive) in pool.drives.iter().enumerate() {
|
||||||
|
let is_last = i == pool.drives.len() - 1;
|
||||||
|
let tree_symbol = if is_last { "└─" } else { "├─" };
|
||||||
|
self.render_drive_line(&mut lines, drive, tree_symbol);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lines
|
lines
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to render a single drive line
|
||||||
|
fn render_drive_line<'a>(&self, lines: &mut Vec<Line<'a>>, drive: &StorageDrive, tree_symbol: &'a str) {
|
||||||
|
let mut drive_info = Vec::new();
|
||||||
|
if let Some(temp) = drive.temperature {
|
||||||
|
drive_info.push(format!("T: {:.0}°C", temp));
|
||||||
|
}
|
||||||
|
if let Some(wear) = drive.wear_percent {
|
||||||
|
drive_info.push(format!("W: {:.0}%", wear));
|
||||||
|
}
|
||||||
|
let drive_text = if drive_info.is_empty() {
|
||||||
|
drive.name.clone()
|
||||||
|
} else {
|
||||||
|
format!("{} {}", drive.name, drive_info.join(" • "))
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut drive_spans = vec![
|
||||||
|
Span::raw(" "),
|
||||||
|
Span::styled(tree_symbol, Typography::tree()),
|
||||||
|
Span::raw(" "),
|
||||||
|
];
|
||||||
|
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
|
||||||
|
lines.push(Line::from(drive_spans));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Widget for SystemWidget {
|
impl Widget for SystemWidget {
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.98"
|
version = "0.1.99"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user