Implement unified pool visualization for single drives
All checks were successful
Build and Release / build-and-release (push) Successful in 1m19s

- Group single disk filesystems by physical drive during auto-discovery
- Create physical drive pools with filesystem children
- Display temperature, wear, and health at drive level
- Provide consistent hierarchical storage visualization
- Fix borrow checker issues in create_physical_drive_pool method
- Add PhysicalDrive case to all StoragePoolType match statements
This commit is contained in:
Christoffer Martinsson 2025-11-23 12:10:42 +01:00
parent 33b3beb342
commit d1272a6c13
6 changed files with 144 additions and 44 deletions

View File

@ -219,6 +219,45 @@ Storage:
- **Backwards Compatible**: Single disks continue working unchanged
- **Future Ready**: Easy extension for additional storage technologies
### Current Status (v0.1.100)
**✅ Completed:**
- Auto-discovery system implemented and deployed
- `/proc/mounts` parsing with smart heuristics for parity detection
- Storage topology stored at agent startup for efficient monitoring
- Universal zero-configuration for all hosts (cmbox, steambox, simonbox, srv01, srv02, srv03)
- Enhanced pool health calculation (healthy/degraded/critical)
- Hierarchical tree visualization with data/parity disk separation
**🔄 In Progress - Unified Pool Visualization:**
Current auto-discovery works but displays filesystems separately instead of grouped by physical drives. Need to implement unified pool concept where single drives are treated as pools.
**Current Display (needs improvement):**
```
● /boot: (separate entry)
● /nix_store: (separate entry)
● /: (separate entry)
```
**Target Display (unified pools):**
```
● nvme0n1:
├─ Drive: T: 35°C W: 1%
├─ /boot: 11% 0.1GB/1.0GB
├─ /nix_store: 23% 214.9GB/928.2GB
└─ /: 23% 214.9GB/928.2GB
```
**Required Changes:**
1. **Enhanced Auto-Discovery**: Group filesystems by backing physical drive during discovery
2. **UI Pool Logic**: Treat single drives as "pools" with drive name as header
3. **Drive Info Display**: Show temperature, wear, health at pool level for single drives
4. **Filesystem Children**: Display mount points as children under their physical drives
5. **Hybrid Rendering**: Physical grouping for single drives, logical grouping for mergerfs pools
**Expected Result**: Consistent hierarchical storage visualization where everything follows pool->children pattern, regardless of underlying storage technology.
## Important Communication Guidelines
Keep responses concise and focused. Avoid extensive implementation summaries unless requested.

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "cm-dashboard"
version = "0.1.99"
version = "0.1.100"
dependencies = [
"anyhow",
"chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-agent"
version = "0.1.99"
version = "0.1.100"
dependencies = [
"anyhow",
"async-trait",
@ -324,7 +324,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-shared"
version = "0.1.99"
version = "0.1.100"
dependencies = [
"chrono",
"serde",

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-agent"
version = "0.1.100"
version = "0.1.101"
edition = "2021"
[dependencies]

View File

@ -51,7 +51,10 @@ struct StoragePool {
/// Enhanced storage pool types with specific configurations
#[derive(Debug, Clone)]
enum StoragePoolType {
Single, // Traditional single disk
Single, // Traditional single disk (legacy)
PhysicalDrive { // Physical drive with multiple filesystems
filesystems: Vec<String>, // Mount points on this drive
},
MergerfsPool { // MergerFS with optional parity
data_disks: Vec<String>, // Member disk names (sdb, sdd)
parity_disks: Vec<String>, // Parity disk names (sdc)
@ -330,46 +333,17 @@ impl DiskCollector {
fn get_auto_discovered_storage_pools(&self, topology: &StorageTopology) -> Result<Vec<StoragePool>> {
let mut storage_pools = Vec::new();
// Process single disks
for disk_info in &topology.single_disks {
if let Ok((total_bytes, used_bytes)) = self.get_filesystem_info(&disk_info.mount_point) {
let available_bytes = total_bytes - used_bytes;
let usage_percent = if total_bytes > 0 {
(used_bytes as f64 / total_bytes as f64) * 100.0
} else { 0.0 };
// Group single disks by physical drive for unified pool display
let grouped_disks = self.group_filesystems_by_physical_drive(&topology.single_disks)?;
let size = self.bytes_to_human_readable(total_bytes);
let used = self.bytes_to_human_readable(used_bytes);
let available = self.bytes_to_human_readable(available_bytes);
let device_names = self.detected_devices.get(&disk_info.mount_point).cloned().unwrap_or_default();
let underlying_drives = self.get_drive_info_for_devices(&device_names)?;
// Generate simple name from mount point
let name = if disk_info.mount_point == "/" {
"root".to_string()
} else {
disk_info.mount_point.trim_start_matches('/').replace('/', "_")
};
storage_pools.push(StoragePool {
name,
mount_point: disk_info.mount_point.clone(),
filesystem: disk_info.fs_type.clone(),
pool_type: StoragePoolType::Single,
size,
used,
available,
usage_percent: usage_percent as f32,
underlying_drives,
pool_health: PoolHealth::Healthy,
});
debug!("Auto-discovered single disk: {} at {}", disk_info.fs_type, disk_info.mount_point);
}
// Process grouped single disks (each physical drive becomes a pool)
for (drive_name, filesystems) in grouped_disks {
// Create a unified pool for this physical drive
let pool = self.create_physical_drive_pool(&drive_name, &filesystems)?;
storage_pools.push(pool);
}
// Process mergerfs pools
// Process mergerfs pools (these remain as logical pools)
for pool_info in &topology.mergerfs_pools {
if let Ok((total_bytes, used_bytes)) = self.get_filesystem_info(&pool_info.mount_point) {
let available_bytes = total_bytes - used_bytes;
@ -434,6 +408,75 @@ impl DiskCollector {
Ok(storage_pools)
}
/// Group filesystems by their backing physical drive
fn group_filesystems_by_physical_drive(&self, filesystems: &[MountInfo]) -> Result<std::collections::HashMap<String, Vec<MountInfo>>> {
let mut grouped = std::collections::HashMap::new();
for fs in filesystems {
// Get the physical drive name for this mount point
if let Some(devices) = self.detected_devices.get(&fs.mount_point) {
if let Some(device_name) = devices.first() {
// Extract drive name (e.g., "nvme0n1" from "nvme0n1")
let drive_name = device_name.clone();
grouped.entry(drive_name).or_insert_with(Vec::new).push(fs.clone());
}
}
}
Ok(grouped)
}
/// Create a physical drive pool containing multiple filesystems
fn create_physical_drive_pool(&self, drive_name: &str, filesystems: &[MountInfo]) -> Result<StoragePool> {
if filesystems.is_empty() {
return Err(anyhow::anyhow!("No filesystems for drive {}", drive_name));
}
// Calculate total usage across all filesystems on this drive
let mut total_capacity = 0u64;
let mut total_used = 0u64;
for fs in filesystems {
if let Ok((capacity, used)) = self.get_filesystem_info(&fs.mount_point) {
total_capacity += capacity;
total_used += used;
}
}
let total_available = total_capacity.saturating_sub(total_used);
let usage_percent = if total_capacity > 0 {
(total_used as f64 / total_capacity as f64) * 100.0
} else { 0.0 };
// Get drive information for SMART data
let device_names = vec![drive_name.to_string()];
let underlying_drives = self.get_drive_info_for_devices(&device_names)?;
// Collect filesystem mount points for this drive
let filesystem_mount_points: Vec<String> = filesystems.iter()
.map(|fs| fs.mount_point.clone())
.collect();
Ok(StoragePool {
name: drive_name.to_string(),
mount_point: format!("(physical drive)"), // Special marker for physical drives
filesystem: "physical".to_string(),
pool_type: StoragePoolType::PhysicalDrive {
filesystems: filesystem_mount_points,
},
size: self.bytes_to_human_readable(total_capacity),
used: self.bytes_to_human_readable(total_used),
available: self.bytes_to_human_readable(total_available),
usage_percent: usage_percent as f32,
pool_health: if underlying_drives.iter().all(|d| d.health_status == "PASSED") {
PoolHealth::Healthy
} else {
PoolHealth::Critical
},
underlying_drives,
})
}
/// Calculate pool health specifically for mergerfs pools
fn calculate_mergerfs_pool_health(&self, data_members: &[String], parity_disks: &[String], drives: &[DriveInfo]) -> PoolHealth {
// Get device names for data and parity drives
@ -601,6 +644,11 @@ impl DiskCollector {
let device_names = self.detected_devices.get(mount_point).cloned().unwrap_or_default();
self.get_drive_info_for_devices(&device_names)
}
StoragePoolType::PhysicalDrive { .. } => {
// Physical drive - get drive info for the drive directly (mount_point not used)
let device_names = vec![mount_point.to_string()];
self.get_drive_info_for_devices(&device_names)
}
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
// Mergerfs pool - collect all member drives
let mut all_disks = data_disks.clone();
@ -634,6 +682,16 @@ impl DiskCollector {
PoolHealth::Critical
}
}
StoragePoolType::PhysicalDrive { .. } => {
// Physical drive - health is just the drive health (similar to Single)
if drives.is_empty() {
PoolHealth::Unknown
} else if drives.iter().all(|d| d.health_status == "PASSED") {
PoolHealth::Healthy
} else {
PoolHealth::Critical
}
}
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
let failed_data = drives.iter()
.filter(|d| data_disks.contains(&d.device) && d.health_status != "PASSED")
@ -1028,6 +1086,9 @@ impl Collector for DiskCollector {
// Enhanced pool type information
let pool_type_str = match &storage_pool.pool_type {
StoragePoolType::Single => "single".to_string(),
StoragePoolType::PhysicalDrive { filesystems } => {
format!("drive ({})", filesystems.len())
}
StoragePoolType::MergerfsPool { data_disks, parity_disks } => {
format!("mergerfs ({}+{})", data_disks.len(), parity_disks.len())
}

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard"
version = "0.1.100"
version = "0.1.101"
edition = "2021"
[dependencies]

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-shared"
version = "0.1.100"
version = "0.1.101"
edition = "2021"
[dependencies]