Update version to v0.1.133
All checks were successful
Build and Release / build-and-release (push) Successful in 2m9s

Bump version across all workspace crates for next release
including agent, dashboard, and shared components.
This commit is contained in:
Christoffer Martinsson 2025-11-23 22:25:19 +01:00
parent b2b301332f
commit c9b2d5e342
11 changed files with 280 additions and 1011 deletions

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.131" version = "0.1.132"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.131" version = "0.1.132"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -324,7 +324,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.131" version = "0.1.132"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.132" version = "0.1.133"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.132" version = "0.1.133"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -212,8 +212,8 @@ impl Dashboard {
println!("{}", "".repeat(80)); println!("{}", "".repeat(80));
} }
// Update data store // Store structured data directly
self.metric_store.process_agent_data(agent_data); self.metric_store.store_agent_data(agent_data);
// Check for agent version mismatches across hosts // Check for agent version mismatches across hosts
if let Some((current_version, outdated_hosts)) = self.metric_store.get_version_mismatches() { if let Some((current_version, outdated_hosts)) = self.metric_store.get_version_mismatches() {

View File

@ -1,4 +1,4 @@
use cm_dashboard_shared::{AgentData, Metric}; use cm_dashboard_shared::AgentData;
use std::collections::HashMap; use std::collections::HashMap;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tracing::{debug, info, warn}; use tracing::{debug, info, warn};
@ -7,8 +7,8 @@ use super::MetricDataPoint;
/// Central metric storage for the dashboard /// Central metric storage for the dashboard
pub struct MetricStore { pub struct MetricStore {
/// Current metrics: hostname -> metric_name -> metric /// Current structured data: hostname -> AgentData
current_metrics: HashMap<String, HashMap<String, Metric>>, current_agent_data: HashMap<String, AgentData>,
/// Historical metrics for trending /// Historical metrics for trending
historical_metrics: HashMap<String, Vec<MetricDataPoint>>, historical_metrics: HashMap<String, Vec<MetricDataPoint>>,
/// Last heartbeat timestamp per host /// Last heartbeat timestamp per host
@ -21,7 +21,7 @@ pub struct MetricStore {
impl MetricStore { impl MetricStore {
pub fn new(max_metrics_per_host: usize, history_retention_hours: u64) -> Self { pub fn new(max_metrics_per_host: usize, history_retention_hours: u64) -> Self {
Self { Self {
current_metrics: HashMap::new(), current_agent_data: HashMap::new(),
historical_metrics: HashMap::new(), historical_metrics: HashMap::new(),
last_heartbeat: HashMap::new(), last_heartbeat: HashMap::new(),
max_metrics_per_host, max_metrics_per_host,
@ -29,375 +29,43 @@ impl MetricStore {
} }
} }
/// Update metrics for a specific host
pub fn update_metrics(&mut self, hostname: &str, metrics: Vec<Metric>) { /// Store structured agent data directly
pub fn store_agent_data(&mut self, agent_data: AgentData) {
let now = Instant::now(); let now = Instant::now();
let hostname = agent_data.hostname.clone();
debug!("Updating {} metrics for host {}", metrics.len(), hostname); debug!("Storing structured data for host {}", hostname);
// Get or create host entry // Store the structured data directly
let host_metrics = self self.current_agent_data.insert(hostname.clone(), agent_data);
.current_metrics
.entry(hostname.to_string())
.or_insert_with(HashMap::new);
// Get or create historical entry // Update heartbeat timestamp
self.last_heartbeat.insert(hostname.clone(), now);
debug!("Updated heartbeat for host {}", hostname);
// Add to history
let host_history = self let host_history = self
.historical_metrics .historical_metrics
.entry(hostname.to_string()) .entry(hostname.clone())
.or_insert_with(Vec::new); .or_insert_with(Vec::new);
host_history.push(MetricDataPoint { received_at: now });
// Update current metrics and add to history // Cleanup old data
for metric in metrics { self.cleanup_host_data(&hostname);
let metric_name = metric.name.clone();
// Store current metric info!("Stored structured data for {}", hostname);
host_metrics.insert(metric_name.clone(), metric.clone());
// Add to history
host_history.push(MetricDataPoint { received_at: now });
// Track heartbeat metrics for connectivity detection
if metric_name == "agent_heartbeat" {
self.last_heartbeat.insert(hostname.to_string(), now);
debug!("Updated heartbeat for host {}", hostname);
}
}
// Get metrics count before cleanup
let metrics_count = host_metrics.len();
// Cleanup old history and enforce limits
self.cleanup_host_data(hostname);
info!(
"Updated metrics for {}: {} current metrics",
hostname, metrics_count
);
}
/// Process structured agent data (temporary bridge - converts back to metrics)
/// TODO: Replace entire metric system with direct structured data processing
pub fn process_agent_data(&mut self, agent_data: AgentData) {
let metrics = self.convert_agent_data_to_metrics(&agent_data);
self.update_metrics(&agent_data.hostname, metrics);
}
/// Convert structured agent data to legacy metrics (temporary bridge)
fn convert_agent_data_to_metrics(&self, agent_data: &AgentData) -> Vec<Metric> {
use cm_dashboard_shared::{Metric, MetricValue, Status};
let mut metrics = Vec::new();
// Convert CPU data
metrics.push(Metric::new(
"cpu_load_1min".to_string(),
MetricValue::Float(agent_data.system.cpu.load_1min),
Status::Ok,
));
metrics.push(Metric::new(
"cpu_load_5min".to_string(),
MetricValue::Float(agent_data.system.cpu.load_5min),
Status::Ok,
));
metrics.push(Metric::new(
"cpu_load_15min".to_string(),
MetricValue::Float(agent_data.system.cpu.load_15min),
Status::Ok,
));
metrics.push(Metric::new(
"cpu_frequency_mhz".to_string(),
MetricValue::Float(agent_data.system.cpu.frequency_mhz),
Status::Ok,
));
if let Some(temp) = agent_data.system.cpu.temperature_celsius {
metrics.push(Metric::new(
"cpu_temperature_celsius".to_string(),
MetricValue::Float(temp),
Status::Ok,
));
}
// Convert Memory data
metrics.push(Metric::new(
"memory_usage_percent".to_string(),
MetricValue::Float(agent_data.system.memory.usage_percent),
Status::Ok,
));
metrics.push(Metric::new(
"memory_total_gb".to_string(),
MetricValue::Float(agent_data.system.memory.total_gb),
Status::Ok,
));
metrics.push(Metric::new(
"memory_used_gb".to_string(),
MetricValue::Float(agent_data.system.memory.used_gb),
Status::Ok,
));
metrics.push(Metric::new(
"memory_available_gb".to_string(),
MetricValue::Float(agent_data.system.memory.available_gb),
Status::Ok,
));
metrics.push(Metric::new(
"memory_swap_total_gb".to_string(),
MetricValue::Float(agent_data.system.memory.swap_total_gb),
Status::Ok,
));
metrics.push(Metric::new(
"memory_swap_used_gb".to_string(),
MetricValue::Float(agent_data.system.memory.swap_used_gb),
Status::Ok,
));
// Convert tmpfs data
for tmpfs in &agent_data.system.memory.tmpfs {
if tmpfs.mount == "/tmp" {
metrics.push(Metric::new(
"memory_tmp_usage_percent".to_string(),
MetricValue::Float(tmpfs.usage_percent),
Status::Ok,
));
metrics.push(Metric::new(
"memory_tmp_used_gb".to_string(),
MetricValue::Float(tmpfs.used_gb),
Status::Ok,
));
metrics.push(Metric::new(
"memory_tmp_total_gb".to_string(),
MetricValue::Float(tmpfs.total_gb),
Status::Ok,
));
}
}
// Add agent metadata
metrics.push(Metric::new(
"agent_version".to_string(),
MetricValue::String(agent_data.agent_version.clone()),
Status::Ok,
));
metrics.push(Metric::new(
"agent_heartbeat".to_string(),
MetricValue::Integer(agent_data.timestamp as i64),
Status::Ok,
));
// Convert storage data
for drive in &agent_data.system.storage.drives {
// Drive-level metrics
if let Some(temp) = drive.temperature_celsius {
metrics.push(Metric::new(
format!("disk_{}_temperature", drive.name),
MetricValue::Float(temp),
Status::Ok,
));
}
if let Some(wear) = drive.wear_percent {
metrics.push(Metric::new(
format!("disk_{}_wear_percent", drive.name),
MetricValue::Float(wear),
Status::Ok,
));
}
metrics.push(Metric::new(
format!("disk_{}_health", drive.name),
MetricValue::String(drive.health.clone()),
Status::Ok,
));
// Calculate drive totals from all filesystems
let total_used: f32 = drive.filesystems.iter().map(|fs| fs.used_gb).sum();
let total_size: f32 = drive.filesystems.iter().map(|fs| fs.total_gb).sum();
let average_usage = if total_size > 0.0 { (total_used / total_size) * 100.0 } else { 0.0 };
// Drive total metrics (aggregated from filesystems)
metrics.push(Metric::new(
format!("disk_{}_usage_percent", drive.name),
MetricValue::Float(average_usage),
Status::Ok,
));
metrics.push(Metric::new(
format!("disk_{}_used_gb", drive.name),
MetricValue::Float(total_used),
Status::Ok,
));
metrics.push(Metric::new(
format!("disk_{}_total_gb", drive.name),
MetricValue::Float(total_size),
Status::Ok,
));
metrics.push(Metric::new(
format!("disk_{}_pool_type", drive.name),
MetricValue::String("drive".to_string()),
Status::Ok,
));
// Filesystem metrics
for fs in &drive.filesystems {
let fs_base = format!("disk_{}_fs_{}", drive.name, fs.mount.replace('/', "root"));
metrics.push(Metric::new(
format!("{}_usage_percent", fs_base),
MetricValue::Float(fs.usage_percent),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_used_gb", fs_base),
MetricValue::Float(fs.used_gb),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_total_gb", fs_base),
MetricValue::Float(fs.total_gb),
Status::Ok,
));
}
}
// Convert storage pools
for pool in &agent_data.system.storage.pools {
let pool_base = format!("disk_{}", pool.name);
metrics.push(Metric::new(
format!("{}_usage_percent", pool_base),
MetricValue::Float(pool.usage_percent),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_used_gb", pool_base),
MetricValue::Float(pool.used_gb),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_total_gb", pool_base),
MetricValue::Float(pool.total_gb),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_pool_type", pool_base),
MetricValue::String(pool.pool_type.clone()),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_mount_point", pool_base),
MetricValue::String(pool.mount.clone()),
Status::Ok,
));
// Pool drive data
for drive in &pool.data_drives {
if let Some(temp) = drive.temperature_celsius {
metrics.push(Metric::new(
format!("disk_{}_{}_temperature", pool.name, drive.name),
MetricValue::Float(temp),
Status::Ok,
));
}
if let Some(wear) = drive.wear_percent {
metrics.push(Metric::new(
format!("disk_{}_{}_wear_percent", pool.name, drive.name),
MetricValue::Float(wear),
Status::Ok,
));
}
}
for drive in &pool.parity_drives {
if let Some(temp) = drive.temperature_celsius {
metrics.push(Metric::new(
format!("disk_{}_{}_temperature", pool.name, drive.name),
MetricValue::Float(temp),
Status::Ok,
));
}
if let Some(wear) = drive.wear_percent {
metrics.push(Metric::new(
format!("disk_{}_{}_wear_percent", pool.name, drive.name),
MetricValue::Float(wear),
Status::Ok,
));
}
}
}
// Convert service data
for service in &agent_data.services {
let service_base = format!("service_{}", service.name);
metrics.push(Metric::new(
format!("{}_status", service_base),
MetricValue::String(service.status.clone()),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_memory_mb", service_base),
MetricValue::Float(service.memory_mb),
Status::Ok,
));
metrics.push(Metric::new(
format!("{}_disk_gb", service_base),
MetricValue::Float(service.disk_gb),
Status::Ok,
));
if service.user_stopped {
metrics.push(Metric::new(
format!("{}_user_stopped", service_base),
MetricValue::Boolean(true),
Status::Ok,
));
}
}
// Convert backup data
metrics.push(Metric::new(
"backup_status".to_string(),
MetricValue::String(agent_data.backup.status.clone()),
Status::Ok,
));
if let Some(last_run) = agent_data.backup.last_run {
metrics.push(Metric::new(
"backup_last_run_timestamp".to_string(),
MetricValue::Integer(last_run as i64),
Status::Ok,
));
}
if let Some(next_scheduled) = agent_data.backup.next_scheduled {
metrics.push(Metric::new(
"backup_next_scheduled_timestamp".to_string(),
MetricValue::Integer(next_scheduled as i64),
Status::Ok,
));
}
if let Some(size) = agent_data.backup.total_size_gb {
metrics.push(Metric::new(
"backup_size_gb".to_string(),
MetricValue::Float(size),
Status::Ok,
));
}
if let Some(health) = &agent_data.backup.repository_health {
metrics.push(Metric::new(
"backup_repository_health".to_string(),
MetricValue::String(health.clone()),
Status::Ok,
));
}
metrics
}
/// Get current metric for a specific host
pub fn get_metric(&self, hostname: &str, metric_name: &str) -> Option<&Metric> {
self.current_metrics.get(hostname)?.get(metric_name)
} }
/// Get all current metrics for a host as a vector
pub fn get_metrics_for_host(&self, hostname: &str) -> Vec<&Metric> {
if let Some(metrics_map) = self.current_metrics.get(hostname) { /// Get current structured data for a host
metrics_map.values().collect() pub fn get_agent_data(&self, hostname: &str) -> Option<&AgentData> {
} else { self.current_agent_data.get(hostname)
Vec::new()
}
} }
/// Get connected hosts (hosts with recent heartbeats) /// Get connected hosts (hosts with recent heartbeats)
pub fn get_connected_hosts(&self, timeout: Duration) -> Vec<String> { pub fn get_connected_hosts(&self, timeout: Duration) -> Vec<String> {
let now = Instant::now(); let now = Instant::now();
@ -428,10 +96,10 @@ impl MetricStore {
} }
} }
// Clear metrics for offline hosts // Clear data for offline hosts
for hostname in hosts_to_cleanup { for hostname in hosts_to_cleanup {
if let Some(metrics) = self.current_metrics.remove(&hostname) { if let Some(_agent_data) = self.current_agent_data.remove(&hostname) {
info!("Cleared {} metrics for offline host: {}", metrics.len(), hostname); info!("Cleared structured data for offline host: {}", hostname);
} }
// Keep heartbeat timestamp for reconnection detection // Keep heartbeat timestamp for reconnection detection
// Don't remove from last_heartbeat to track when host was last seen // Don't remove from last_heartbeat to track when host was last seen
@ -463,12 +131,8 @@ impl MetricStore {
pub fn get_agent_versions(&self) -> HashMap<String, String> { pub fn get_agent_versions(&self) -> HashMap<String, String> {
let mut versions = HashMap::new(); let mut versions = HashMap::new();
for (hostname, metrics) in &self.current_metrics { for (hostname, agent_data) in &self.current_agent_data {
if let Some(version_metric) = metrics.get("agent_version") { versions.insert(hostname.clone(), agent_data.agent_version.clone());
if let cm_dashboard_shared::MetricValue::String(version) = &version_metric.value {
versions.insert(hostname.clone(), version.clone());
}
}
} }
versions versions

View File

@ -102,58 +102,17 @@ impl TuiApp {
.or_insert_with(HostWidgets::new) .or_insert_with(HostWidgets::new)
} }
/// Update widgets with metrics from store (only for current host) /// Update widgets with structured data from store (only for current host)
pub fn update_metrics(&mut self, metric_store: &MetricStore) { pub fn update_metrics(&mut self, metric_store: &MetricStore) {
// Check for rebuild completion by agent hash change
if let Some(hostname) = self.current_host.clone() { if let Some(hostname) = self.current_host.clone() {
// Only update widgets if we have metrics for this host // Get structured data for this host
let all_metrics = metric_store.get_metrics_for_host(&hostname); if let Some(agent_data) = metric_store.get_agent_data(&hostname) {
if !all_metrics.is_empty() {
// Single pass metric categorization for better performance
let mut cpu_metrics = Vec::new();
let mut memory_metrics = Vec::new();
let mut service_metrics = Vec::new();
let mut backup_metrics = Vec::new();
let mut nixos_metrics = Vec::new();
let mut disk_metrics = Vec::new();
for metric in all_metrics {
if metric.name.starts_with("cpu_")
|| metric.name.contains("c_state_")
|| metric.name.starts_with("process_top_") {
cpu_metrics.push(metric);
} else if metric.name.starts_with("memory_") || metric.name.starts_with("disk_tmp_") {
memory_metrics.push(metric);
} else if metric.name.starts_with("service_") {
service_metrics.push(metric);
} else if metric.name.starts_with("backup_") {
backup_metrics.push(metric);
} else if metric.name == "system_nixos_build" || metric.name == "system_active_users" || metric.name == "agent_version" {
nixos_metrics.push(metric);
} else if metric.name.starts_with("disk_") {
disk_metrics.push(metric);
}
}
// Now get host widgets and update them
let host_widgets = self.get_or_create_host_widgets(&hostname); let host_widgets = self.get_or_create_host_widgets(&hostname);
// Collect all system metrics (CPU, memory, NixOS, disk/storage) // Update all widgets with structured data directly
let mut system_metrics = cpu_metrics; host_widgets.system_widget.update_from_agent_data(agent_data);
system_metrics.extend(memory_metrics); host_widgets.services_widget.update_from_agent_data(agent_data);
system_metrics.extend(nixos_metrics); host_widgets.backup_widget.update_from_agent_data(agent_data);
system_metrics.extend(disk_metrics);
host_widgets.system_widget.update_from_metrics(&system_metrics);
host_widgets
.services_widget
.update_from_metrics(&service_metrics);
host_widgets
.backup_widget
.update_from_metrics(&backup_metrics);
host_widgets.last_update = Some(Instant::now()); host_widgets.last_update = Some(Instant::now());
} }
@ -654,40 +613,14 @@ impl TuiApp {
frame.render_widget(host_title, chunks[1]); frame.render_widget(host_title, chunks[1]);
} }
/// Calculate overall status for a host based on its metrics /// Calculate overall status for a host based on its structured data
fn calculate_host_status(&self, hostname: &str, metric_store: &MetricStore) -> Status { fn calculate_host_status(&self, hostname: &str, metric_store: &MetricStore) -> Status {
let metrics = metric_store.get_metrics_for_host(hostname); // Check if we have structured data for this host
if let Some(_agent_data) = metric_store.get_agent_data(hostname) {
if metrics.is_empty() { // Return OK since we have data
return Status::Offline;
}
// First check if we have the aggregated host status summary from the agent
if let Some(host_summary_metric) = metric_store.get_metric(hostname, "host_status_summary") {
return host_summary_metric.status;
}
// Rewritten status aggregation - only Critical, Warning, or OK for top bar
let mut has_critical = false;
let mut has_warning = false;
for metric in &metrics {
match metric.status {
Status::Critical => has_critical = true,
Status::Warning => has_warning = true,
// Treat all other statuses as OK for top bar aggregation
Status::Ok | Status::Pending | Status::Inactive | Status::Unknown => {},
Status::Offline => {}, // Ignore offline
}
}
// Only return Critical, Warning, or OK - no other statuses
if has_critical {
Status::Critical
} else if has_warning {
Status::Warning
} else {
Status::Ok Status::Ok
} else {
Status::Offline
} }
} }

View File

@ -1,4 +1,5 @@
use cm_dashboard_shared::{Metric, Status}; use cm_dashboard_shared::{Metric, Status};
use super::Widget;
use ratatui::{ use ratatui::{
layout::Rect, layout::Rect,
widgets::Paragraph, widgets::Paragraph,
@ -6,7 +7,6 @@ use ratatui::{
}; };
use tracing::debug; use tracing::debug;
use super::Widget;
use crate::ui::theme::{StatusIcons, Typography}; use crate::ui::theme::{StatusIcons, Typography};
/// Backup widget displaying backup status, services, and repository information /// Backup widget displaying backup status, services, and repository information
@ -137,6 +137,23 @@ impl BackupWidget {
} }
impl Widget for BackupWidget { impl Widget for BackupWidget {
fn update_from_agent_data(&mut self, agent_data: &cm_dashboard_shared::AgentData) {
self.has_data = true;
let backup = &agent_data.backup;
self.overall_status = Status::Ok;
if let Some(size) = backup.total_size_gb {
self.total_repo_size_gb = Some(size);
}
if let Some(last_run) = backup.last_run {
self.last_run_timestamp = Some(last_run as i64);
}
}
}
impl BackupWidget {
fn update_from_metrics(&mut self, metrics: &[&Metric]) { fn update_from_metrics(&mut self, metrics: &[&Metric]) {
debug!("Backup widget updating with {} metrics", metrics.len()); debug!("Backup widget updating with {} metrics", metrics.len());
for metric in metrics { for metric in metrics {

View File

@ -1,4 +1,4 @@
use cm_dashboard_shared::Metric; use cm_dashboard_shared::AgentData;
pub mod backup; pub mod backup;
pub mod cpu; pub mod cpu;
@ -10,9 +10,8 @@ pub use backup::BackupWidget;
pub use services::ServicesWidget; pub use services::ServicesWidget;
pub use system::SystemWidget; pub use system::SystemWidget;
/// Widget trait for UI components that display metrics /// Widget trait for UI components that display structured data
pub trait Widget { pub trait Widget {
/// Update widget with new metrics data /// Update widget with structured agent data
fn update_from_metrics(&mut self, metrics: &[&Metric]); fn update_from_agent_data(&mut self, agent_data: &AgentData);
} }

View File

@ -1,4 +1,5 @@
use cm_dashboard_shared::{Metric, Status}; use cm_dashboard_shared::{Metric, Status};
use super::Widget;
use ratatui::{ use ratatui::{
layout::{Constraint, Direction, Layout, Rect}, layout::{Constraint, Direction, Layout, Rect},
widgets::Paragraph, widgets::Paragraph,
@ -7,7 +8,6 @@ use ratatui::{
use std::collections::HashMap; use std::collections::HashMap;
use tracing::debug; use tracing::debug;
use super::Widget;
use crate::ui::theme::{Components, StatusIcons, Theme, Typography}; use crate::ui::theme::{Components, StatusIcons, Theme, Typography};
use ratatui::style::Style; use ratatui::style::Style;
@ -255,6 +255,28 @@ impl ServicesWidget {
} }
impl Widget for ServicesWidget { impl Widget for ServicesWidget {
fn update_from_agent_data(&mut self, agent_data: &cm_dashboard_shared::AgentData) {
self.has_data = true;
self.parent_services.clear();
self.sub_services.clear();
for service in &agent_data.services {
let service_info = ServiceInfo {
status: service.status.clone(),
memory_mb: Some(service.memory_mb),
disk_gb: Some(service.disk_gb),
latency_ms: None,
widget_status: Status::Ok,
};
self.parent_services.insert(service.name.clone(), service_info);
}
self.status = Status::Ok;
}
}
impl ServicesWidget {
fn update_from_metrics(&mut self, metrics: &[&Metric]) { fn update_from_metrics(&mut self, metrics: &[&Metric]) {
debug!("Services widget updating with {} metrics", metrics.len()); debug!("Services widget updating with {} metrics", metrics.len());

View File

@ -1,4 +1,4 @@
use cm_dashboard_shared::{Metric, MetricValue, Status}; use cm_dashboard_shared::Status;
use ratatui::{ use ratatui::{
layout::Rect, layout::Rect,
text::{Line, Span, Text}, text::{Line, Span, Text},
@ -6,7 +6,6 @@ use ratatui::{
Frame, Frame,
}; };
use super::Widget;
use crate::ui::theme::{StatusIcons, Typography}; use crate::ui::theme::{StatusIcons, Typography};
/// System widget displaying NixOS info, CPU, RAM, and Storage in unified layout /// System widget displaying NixOS info, CPU, RAM, and Storage in unified layout
@ -14,7 +13,6 @@ use crate::ui::theme::{StatusIcons, Typography};
pub struct SystemWidget { pub struct SystemWidget {
// NixOS information // NixOS information
nixos_build: Option<String>, nixos_build: Option<String>,
config_hash: Option<String>,
agent_hash: Option<String>, agent_hash: Option<String>,
// CPU metrics // CPU metrics
@ -46,7 +44,6 @@ struct StoragePool {
name: String, name: String,
mount_point: String, mount_point: String,
pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc. pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc.
pool_health: Option<String>, // "healthy", "degraded", "critical", "rebuilding"
drives: Vec<StorageDrive>, drives: Vec<StorageDrive>,
filesystems: Vec<FileSystem>, // For physical drive pools: individual filesystem children filesystems: Vec<FileSystem>, // For physical drive pools: individual filesystem children
usage_percent: Option<f32>, usage_percent: Option<f32>,
@ -70,7 +67,6 @@ struct FileSystem {
usage_percent: Option<f32>, usage_percent: Option<f32>,
used_gb: Option<f32>, used_gb: Option<f32>,
total_gb: Option<f32>, total_gb: Option<f32>,
available_gb: Option<f32>,
status: Status, status: Status,
} }
@ -78,7 +74,6 @@ impl SystemWidget {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
nixos_build: None, nixos_build: None,
config_hash: None,
agent_hash: None, agent_hash: None,
cpu_load_1min: None, cpu_load_1min: None,
cpu_load_5min: None, cpu_load_5min: None,
@ -145,324 +140,106 @@ impl SystemWidget {
pub fn _get_agent_hash(&self) -> Option<&String> { pub fn _get_agent_hash(&self) -> Option<&String> {
self.agent_hash.as_ref() self.agent_hash.as_ref()
} }
}
/// Get default mount point for a pool name (fallback only - should use actual mount_point metrics) use super::Widget;
fn get_mount_point_for_pool(&self, pool_name: &str) -> String {
// For device names, use the device name directly as display name impl Widget for SystemWidget {
if pool_name.starts_with("nvme") || pool_name.starts_with("sd") || pool_name.starts_with("hd") { fn update_from_agent_data(&mut self, agent_data: &cm_dashboard_shared::AgentData) {
pool_name.to_string() self.has_data = true;
} else {
// For other pools, use the pool name as-is (will be overridden by mount_point metric) // Extract agent version
pool_name.to_string() self.agent_hash = Some(agent_data.agent_version.clone());
// Extract CPU data directly
let cpu = &agent_data.system.cpu;
self.cpu_load_1min = Some(cpu.load_1min);
self.cpu_load_5min = Some(cpu.load_5min);
self.cpu_load_15min = Some(cpu.load_15min);
self.cpu_frequency = Some(cpu.frequency_mhz);
self.cpu_status = Status::Ok;
// Extract memory data directly
let memory = &agent_data.system.memory;
self.memory_usage_percent = Some(memory.usage_percent);
self.memory_used_gb = Some(memory.used_gb);
self.memory_total_gb = Some(memory.total_gb);
self.memory_status = Status::Ok;
// Extract tmpfs data
if let Some(tmp_data) = memory.tmpfs.iter().find(|t| t.mount == "/tmp") {
self.tmp_usage_percent = Some(tmp_data.usage_percent);
self.tmp_used_gb = Some(tmp_data.used_gb);
self.tmp_total_gb = Some(tmp_data.total_gb);
self.tmp_status = Status::Ok;
} }
}
/// Parse storage metrics into pools and drives // Convert storage data to internal format
fn update_storage_from_metrics(&mut self, metrics: &[&Metric]) { self.update_storage_from_agent_data(agent_data);
}
}
impl SystemWidget {
/// Convert structured storage data to internal format
fn update_storage_from_agent_data(&mut self, agent_data: &cm_dashboard_shared::AgentData) {
let mut pools: std::collections::HashMap<String, StoragePool> = std::collections::HashMap::new(); let mut pools: std::collections::HashMap<String, StoragePool> = std::collections::HashMap::new();
for metric in metrics { // Convert drives
if metric.name.starts_with("disk_") { for drive in &agent_data.system.storage.drives {
if let Some(pool_name) = self.extract_pool_name(&metric.name) { let mut pool = StoragePool {
let pool = pools.entry(pool_name.clone()).or_insert_with(|| StoragePool { name: drive.name.clone(),
name: pool_name.clone(), mount_point: drive.name.clone(),
mount_point: self.get_mount_point_for_pool(&pool_name), // Default fallback pool_type: "drive".to_string(),
pool_type: "single".to_string(), // Default, will be updated drives: Vec::new(),
pool_health: None, filesystems: Vec::new(),
drives: Vec::new(), usage_percent: None,
filesystems: Vec::new(), used_gb: None,
usage_percent: None, total_gb: None,
used_gb: None, status: Status::Ok,
total_gb: None, health_status: Status::Ok,
status: Status::Unknown, };
health_status: Status::Unknown,
});
// Parse different metric types // Add drive info
if metric.name.contains("_usage_percent") && !metric.name.contains("_fs_") { let storage_drive = StorageDrive {
// Only use drive-level metrics for pool totals, not filesystem metrics name: drive.name.clone(),
if let MetricValue::Float(usage) = metric.value { temperature: drive.temperature_celsius,
pool.usage_percent = Some(usage); wear_percent: drive.wear_percent,
pool.status = metric.status.clone(); status: Status::Ok,
} };
} else if metric.name.contains("_used_gb") && !metric.name.contains("_fs_") { pool.drives.push(storage_drive);
// Only use drive-level metrics for pool totals, not filesystem metrics
if let MetricValue::Float(used) = metric.value {
pool.used_gb = Some(used);
}
} else if metric.name.contains("_total_gb") && !metric.name.contains("_fs_") {
// Only use drive-level metrics for pool totals, not filesystem metrics
if let MetricValue::Float(total) = metric.value {
pool.total_gb = Some(total);
}
} else if metric.name.contains("_mount_point") {
if let MetricValue::String(mount_point) = &metric.value {
pool.mount_point = mount_point.clone();
}
} else if metric.name.contains("_pool_type") {
if let MetricValue::String(pool_type) = &metric.value {
pool.pool_type = pool_type.clone();
}
} else if metric.name.contains("_pool_health") {
if let MetricValue::String(health) = &metric.value {
pool.pool_health = Some(health.clone());
pool.health_status = metric.status.clone();
}
} else if metric.name.contains("_health") && !metric.name.contains("_pool_health") {
// Handle physical drive health metrics (disk_{drive}_health)
if let MetricValue::String(health) = &metric.value {
// For physical drives, use the drive health as pool health
pool.pool_health = Some(health.clone());
pool.health_status = metric.status.clone();
}
} else if metric.name.contains("_temperature") {
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
// Find existing drive or create new one
let drive_exists = pool.drives.iter().any(|d| d.name == drive_name);
if !drive_exists {
pool.drives.push(StorageDrive {
name: drive_name.clone(),
temperature: None,
wear_percent: None,
status: Status::Unknown,
});
}
if let Some(drive) = pool.drives.iter_mut().find(|d| d.name == drive_name) { // Calculate totals from filesystems
if let MetricValue::Float(temp) = metric.value { let total_used: f32 = drive.filesystems.iter().map(|fs| fs.used_gb).sum();
drive.temperature = Some(temp); let total_size: f32 = drive.filesystems.iter().map(|fs| fs.total_gb).sum();
drive.status = metric.status.clone(); let average_usage = if total_size > 0.0 { (total_used / total_size) * 100.0 } else { 0.0 };
// For physical drives, if this is the main drive, also update pool health
if drive.name == pool.name && pool.health_status == Status::Unknown {
pool.health_status = metric.status.clone();
}
}
}
}
} else if metric.name.contains("_wear_percent") {
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
// For physical drives, ensure we create the drive object
let drive_exists = pool.drives.iter().any(|d| d.name == drive_name);
if !drive_exists {
pool.drives.push(StorageDrive {
name: drive_name.clone(),
temperature: None,
wear_percent: None,
status: Status::Unknown,
});
}
if let Some(drive) = pool.drives.iter_mut().find(|d| d.name == drive_name) { pool.usage_percent = Some(average_usage);
if let MetricValue::Float(wear) = metric.value { pool.used_gb = Some(total_used);
drive.wear_percent = Some(wear); pool.total_gb = Some(total_size);
drive.status = metric.status.clone();
// For physical drives, if this is the main drive, also update pool health
if drive.name == pool.name && pool.health_status == Status::Unknown {
pool.health_status = metric.status.clone();
}
}
}
}
} else if metric.name.contains("_fs_") {
// Handle filesystem metrics for physical drive pools (disk_{pool}_fs_{fs_name}_{metric})
if let (Some(fs_name), Some(metric_type)) = self.extract_filesystem_metric(&metric.name) {
// Find or create filesystem entry
let fs_exists = pool.filesystems.iter().any(|fs| {
let fs_id = if fs.mount_point == "/" {
"root".to_string()
} else {
fs.mount_point.trim_start_matches('/').replace('/', "_")
};
fs_id == fs_name
});
if !fs_exists { // Add filesystems
// Create filesystem entry with correct mount point for fs in &drive.filesystems {
let mount_point = if metric_type == "mount_point" { let filesystem = FileSystem {
if let MetricValue::String(mount) = &metric.value { mount_point: fs.mount.clone(),
mount.clone() usage_percent: Some(fs.usage_percent),
} else { used_gb: Some(fs.used_gb),
// Fallback: handle special cases total_gb: Some(fs.total_gb),
if fs_name == "root" { status: Status::Ok,
"/".to_string() };
} else { pool.filesystems.push(filesystem);
format!("/{}", fs_name.replace('_', "/"))
}
}
} else {
// Fallback for non-mount_point metrics: generate mount point from fs_name
if fs_name == "root" {
"/".to_string()
} else {
format!("/{}", fs_name.replace('_', "/"))
}
};
pool.filesystems.push(FileSystem {
mount_point,
usage_percent: None,
used_gb: None,
total_gb: None,
available_gb: None,
status: Status::Unknown,
});
}
// Update the filesystem with the metric value
if let Some(filesystem) = pool.filesystems.iter_mut().find(|fs| {
let fs_id = if fs.mount_point == "/" {
"root".to_string()
} else {
fs.mount_point.trim_start_matches('/').replace('/', "_")
};
fs_id == fs_name
}) {
match metric_type.as_str() {
"usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
filesystem.usage_percent = Some(usage);
filesystem.status = metric.status.clone();
}
}
"used_gb" => {
if let MetricValue::Float(used) = metric.value {
filesystem.used_gb = Some(used);
}
}
"total_gb" => {
if let MetricValue::Float(total) = metric.value {
filesystem.total_gb = Some(total);
}
}
"available_gb" => {
if let MetricValue::Float(available) = metric.value {
filesystem.available_gb = Some(available);
}
}
"mount_point" => {
if let MetricValue::String(mount) = &metric.value {
filesystem.mount_point = mount.clone();
}
}
_ => {}
}
}
}
}
}
} }
pools.insert(drive.name.clone(), pool);
} }
// Convert to sorted vec for consistent ordering // Convert pools
// Store pools
let mut pool_list: Vec<StoragePool> = pools.into_values().collect(); let mut pool_list: Vec<StoragePool> = pools.into_values().collect();
pool_list.sort_by(|a, b| a.name.cmp(&b.name)); // Sort alphabetically by name pool_list.sort_by(|a, b| a.name.cmp(&b.name));
self.storage_pools = pool_list; self.storage_pools = pool_list;
} }
/// Extract pool name from disk metric name
fn extract_pool_name(&self, metric_name: &str) -> Option<String> {
// Pattern: disk_{pool_name}_{various suffixes}
// Since pool_name can contain underscores, work backwards from known metric suffixes
if metric_name.starts_with("disk_") {
// Handle filesystem metrics: disk_{pool}_fs_{filesystem}_{metric}
if metric_name.contains("_fs_") {
if let Some(fs_pos) = metric_name.find("_fs_") {
return Some(metric_name[5..fs_pos].to_string()); // Skip "disk_", extract pool name before "_fs_"
}
}
// Handle pool-level metrics (usage_percent, used_gb, total_gb, mount_point, pool_type, pool_health)
// Use rfind to get the last occurrence of these suffixes
let pool_suffixes = ["_usage_percent", "_used_gb", "_total_gb", "_available_gb", "_mount_point", "_pool_type", "_pool_health"];
for suffix in pool_suffixes {
if let Some(suffix_pos) = metric_name.rfind(suffix) {
return Some(metric_name[5..suffix_pos].to_string()); // Skip "disk_"
}
}
// Handle physical drive metrics: disk_{drive}_health, disk_{drive}_wear_percent, and disk_{drive}_temperature
if (metric_name.ends_with("_health") && !metric_name.contains("_pool_health"))
|| metric_name.ends_with("_wear_percent")
|| metric_name.ends_with("_temperature") {
// Count underscores to distinguish physical drive metrics (disk_{drive}_metric)
// from pool drive metrics (disk_{pool}_{drive}_metric)
let underscore_count = metric_name.matches('_').count();
// disk_nvme0n1_wear_percent has 3 underscores: disk_nvme0n1_wear_percent
if underscore_count == 3 { // disk_{drive}_metric (where drive has underscores)
if let Some(suffix_pos) = metric_name.rfind("_health")
.or_else(|| metric_name.rfind("_wear_percent"))
.or_else(|| metric_name.rfind("_temperature")) {
return Some(metric_name[5..suffix_pos].to_string()); // Skip "disk_"
}
}
}
// Handle drive-specific metrics: disk_{pool}_{drive}_{metric}
let drive_suffixes = ["_temperature", "_health"];
for suffix in drive_suffixes {
if let Some(suffix_pos) = metric_name.rfind(suffix) {
// Extract pool name by finding the second-to-last underscore
let before_suffix = &metric_name[..suffix_pos];
if let Some(drive_start) = before_suffix.rfind('_') {
if drive_start > 5 {
return Some(metric_name[5..drive_start].to_string()); // Skip "disk_"
}
}
}
}
}
None
}
/// Extract filesystem name and metric type from filesystem metric names
/// Pattern: disk_{pool}_fs_{filesystem_name}_{metric_type}
fn extract_filesystem_metric(&self, metric_name: &str) -> (Option<String>, Option<String>) {
if metric_name.starts_with("disk_") && metric_name.contains("_fs_") {
// Find the _fs_ part
if let Some(fs_start) = metric_name.find("_fs_") {
let after_fs = &metric_name[fs_start + 4..]; // Skip "_fs_"
// Look for known metric suffixes (these can contain underscores)
let known_suffixes = ["usage_percent", "used_gb", "total_gb", "available_gb", "mount_point"];
for suffix in known_suffixes {
if after_fs.ends_with(suffix) {
// Extract filesystem name by removing suffix and underscore
if let Some(underscore_pos) = after_fs.rfind(&format!("_{}", suffix)) {
let fs_name = after_fs[..underscore_pos].to_string();
return (Some(fs_name), Some(suffix.to_string()));
}
}
}
}
}
(None, None)
}
/// Extract drive name from disk metric name
fn extract_drive_name(&self, metric_name: &str) -> Option<String> {
// Pattern: disk_{pool_name}_{drive_name}_{metric_type} OR disk_{drive_name}_{metric_type}
// Pool drives: disk_srv_media_sdb_temperature
// Physical drives: disk_nvme0n1_temperature
if metric_name.starts_with("disk_") {
if let Some(suffix_pos) = metric_name.rfind("_temperature")
.or_else(|| metric_name.rfind("_wear_percent"))
.or_else(|| metric_name.rfind("_health")) {
let before_suffix = &metric_name[..suffix_pos];
// Extract the last component as drive name (e.g., "sdb", "sdc", "nvme0n1")
if let Some(drive_start) = before_suffix.rfind('_') {
return Some(before_suffix[drive_start + 1..].to_string());
} else {
// Handle physical drive metrics: disk_{drive}_metric (no pool)
// Extract everything after "disk_" as the drive name
return Some(before_suffix[5..].to_string()); // Skip "disk_"
}
}
}
None
}
/// Render storage section with enhanced tree structure /// Render storage section with enhanced tree structure
fn render_storage(&self) -> Vec<Line<'_>> { fn render_storage(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new(); let mut lines = Vec::new();
@ -472,269 +249,126 @@ impl SystemWidget {
let pool_label = if pool.pool_type.starts_with("drive (") { let pool_label = if pool.pool_type.starts_with("drive (") {
// For physical drives, show the drive name with temperature and wear percentage if available // For physical drives, show the drive name with temperature and wear percentage if available
// Look for any drive with temp/wear data (physical drives may have drives named after the pool) // Look for any drive with temp/wear data (physical drives may have drives named after the pool)
let temp_opt = pool.drives.iter() let drive_info = pool.drives.iter()
.find_map(|d| d.temperature); .find(|d| d.name == pool.name)
let wear_opt = pool.drives.iter() .or_else(|| pool.drives.first());
.find_map(|d| d.wear_percent);
let mut drive_info = Vec::new(); if let Some(drive) = drive_info {
if let Some(temp) = temp_opt { let mut drive_details = Vec::new();
drive_info.push(format!("T: {:.0}°C", temp)); if let Some(temp) = drive.temperature {
} drive_details.push(format!("T: {}°C", temp as i32));
if let Some(wear) = wear_opt { }
drive_info.push(format!("W: {:.0}%", wear)); if let Some(wear) = drive.wear_percent {
} drive_details.push(format!("W: {}%", wear as i32));
}
if drive_info.is_empty() { if !drive_details.is_empty() {
format!("{}:", pool.name) format!("{} ({})", pool.name, drive_details.join(" "))
} else {
pool.name.clone()
}
} else { } else {
format!("{} {}:", pool.name, drive_info.join(" ")) pool.name.clone()
} }
} else if pool.pool_type == "single" {
format!("{}:", pool.mount_point)
} else { } else {
format!("{} ({}):", pool.mount_point, pool.pool_type) // For mergerfs pools, show pool name with format
format!("{} ({})", pool.mount_point, pool.pool_type)
}; };
let pool_spans = StatusIcons::create_status_spans(
pool.health_status.clone(), let pool_spans = StatusIcons::create_status_spans(pool.status.clone(), &pool_label);
&pool_label
);
lines.push(Line::from(pool_spans)); lines.push(Line::from(pool_spans));
// Skip pool health line as discussed - removed // Pool total usage line
if let (Some(usage), Some(used), Some(total)) = (pool.usage_percent, pool.used_gb, pool.total_gb) {
// Total usage line (only show for multi-drive pools, skip for single physical drives) let usage_spans = vec![
if !pool.pool_type.starts_with("drive (") { Span::styled(" ├─ ", Typography::tree()),
let usage_text = match (pool.usage_percent, pool.used_gb, pool.total_gb) {
(Some(pct), Some(used), Some(total)) => {
format!("Total: {:.0}% {:.1}GB/{:.1}GB", pct, used, total)
}
_ => "Total: —% —GB/—GB".to_string(),
};
let has_drives = !pool.drives.is_empty();
let has_filesystems = !pool.filesystems.is_empty();
let has_children = has_drives || has_filesystems;
let tree_symbol = if has_children { "├─" } else { "└─" };
let mut usage_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "), Span::raw(" "),
]; ];
usage_spans.extend(StatusIcons::create_status_spans(pool.status.clone(), &usage_text)); let mut usage_line_spans = usage_spans;
lines.push(Line::from(usage_spans)); usage_line_spans.extend(StatusIcons::create_status_spans(pool.status.clone(), &format!("Total: {}% {:.1}GB/{:.1}GB", usage as i32, used, total)));
lines.push(Line::from(usage_line_spans));
} }
// Drive lines with enhanced grouping // Drive details for physical drives
if pool.pool_type.contains("mergerfs") && pool.drives.len() > 1 { if pool.pool_type.starts_with("drive") {
// Group drives by type for mergerfs pools for drive in &pool.drives {
let (data_drives, parity_drives): (Vec<_>, Vec<_>) = pool.drives.iter().enumerate() if drive.name == pool.name {
.partition(|(_, drive)| { let mut drive_details = Vec::new();
// Simple heuristic: drives with 'parity' in name or sdc (common parity drive) if let Some(temp) = drive.temperature {
!drive.name.to_lowercase().contains("parity") && drive.name != "sdc" drive_details.push(format!("T: {}°C", temp as i32));
}); }
if let Some(wear) = drive.wear_percent {
drive_details.push(format!("W: {}%", wear as i32));
}
// Show data drives if !drive_details.is_empty() {
if !data_drives.is_empty() { let drive_text = format!("{} {}", drive.name, drive_details.join(" "));
lines.push(Line::from(vec![ let drive_spans = vec![
Span::raw(" "), Span::styled(" └─ ", Typography::tree()),
Span::styled("├─ ", Typography::tree()), Span::raw(" "),
Span::styled("Data Disks:", Typography::secondary()), ];
])); let mut drive_line_spans = drive_spans;
drive_line_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
for (i, (_, drive)) in data_drives.iter().enumerate() { lines.push(Line::from(drive_line_spans));
let is_last = i == data_drives.len() - 1;
if is_last && parity_drives.is_empty() {
self.render_drive_line(&mut lines, drive, "│ └─");
} else {
self.render_drive_line(&mut lines, drive, "│ ├─");
} }
} }
} }
// Show parity drives
if !parity_drives.is_empty() {
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled("└─ ", Typography::tree()),
Span::styled("Parity:", Typography::secondary()),
]));
for (i, (_, drive)) in parity_drives.iter().enumerate() {
let is_last = i == parity_drives.len() - 1;
if is_last {
self.render_drive_line(&mut lines, drive, " └─");
} else {
self.render_drive_line(&mut lines, drive, " ├─");
}
}
}
} else if pool.pool_type != "single" && pool.drives.len() > 1 {
// Regular drive listing for non-mergerfs multi-drive pools
for (i, drive) in pool.drives.iter().enumerate() {
let is_last = i == pool.drives.len() - 1;
let tree_symbol = if is_last { "└─" } else { "├─" };
self.render_drive_line(&mut lines, drive, tree_symbol);
}
} else if pool.pool_type.starts_with("drive (") {
// Physical drive pools: wear data shown in header, skip drive lines, show filesystems directly
for (i, filesystem) in pool.filesystems.iter().enumerate() {
let is_last = i == pool.filesystems.len() - 1;
let tree_symbol = if is_last { "└─" } else { "├─" };
let fs_text = match (filesystem.usage_percent, filesystem.used_gb, filesystem.total_gb) {
(Some(pct), Some(used), Some(total)) => {
format!("{}: {:.0}% {:.1}GB/{:.1}GB", filesystem.mount_point, pct, used, total)
}
(Some(pct), _, Some(total)) => {
format!("{}: {:.0}% —GB/{:.1}GB", filesystem.mount_point, pct, total)
}
(Some(pct), _, _) => {
format!("{}: {:.0}% —GB/—GB", filesystem.mount_point, pct)
}
(_, Some(used), Some(total)) => {
format!("{}: —% {:.1}GB/{:.1}GB", filesystem.mount_point, used, total)
}
_ => format!("{}: —% —GB/—GB", filesystem.mount_point),
};
let mut fs_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
fs_spans.extend(StatusIcons::create_status_spans(filesystem.status.clone(), &fs_text));
lines.push(Line::from(fs_spans));
}
} else { } else {
// Single drive or simple pools // For mergerfs pools, show data drives and parity drives in tree structure
for (i, drive) in pool.drives.iter().enumerate() { if !pool.drives.is_empty() {
let is_last = i == pool.drives.len() - 1; // Group drives by type based on naming conventions or show all as data drives
let tree_symbol = if is_last { "└─" } else { "├─" }; let (data_drives, parity_drives): (Vec<_>, Vec<_>) = pool.drives.iter()
self.render_drive_line(&mut lines, drive, tree_symbol); .partition(|d| !d.name.contains("parity") && !d.name.starts_with("sdc"));
if !data_drives.is_empty() {
lines.push(Line::from(vec![
Span::styled(" ├─ Data Disks:", Typography::secondary())
]));
for (i, drive) in data_drives.iter().enumerate() {
render_pool_drive(drive, i == data_drives.len() - 1 && parity_drives.is_empty(), &mut lines);
}
}
if !parity_drives.is_empty() {
lines.push(Line::from(vec![
Span::styled(" └─ Parity:", Typography::secondary())
]));
for (i, drive) in parity_drives.iter().enumerate() {
render_pool_drive(drive, i == parity_drives.len() - 1, &mut lines);
}
}
} }
} }
} }
lines lines
} }
/// Helper to render a single drive line
fn render_drive_line<'a>(&self, lines: &mut Vec<Line<'a>>, drive: &StorageDrive, tree_symbol: &'a str) {
let mut drive_info = Vec::new();
if let Some(temp) = drive.temperature {
drive_info.push(format!("T: {:.0}°C", temp));
}
if let Some(wear) = drive.wear_percent {
drive_info.push(format!("W: {:.0}%", wear));
}
// Always show drive name with info, or just name if no info available
let drive_text = if drive_info.is_empty() {
drive.name.clone()
} else {
format!("{} {}", drive.name, drive_info.join(" "))
};
let mut drive_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
lines.push(Line::from(drive_spans));
}
} }
impl Widget for SystemWidget { /// Helper function to render a drive in a storage pool
fn update_from_metrics(&mut self, metrics: &[&Metric]) { fn render_pool_drive(drive: &StorageDrive, is_last: bool, lines: &mut Vec<Line<'_>>) {
self.has_data = !metrics.is_empty(); let tree_symbol = if is_last { " └─" } else { " ├─" };
for metric in metrics { let mut drive_details = Vec::new();
match metric.name.as_str() { if let Some(temp) = drive.temperature {
// NixOS metrics drive_details.push(format!("T: {}°C", temp as i32));
"system_nixos_build" => { }
if let MetricValue::String(build) = &metric.value { if let Some(wear) = drive.wear_percent {
self.nixos_build = Some(build.clone()); drive_details.push(format!("W: {}%", wear as i32));
}
}
"system_config_hash" => {
if let MetricValue::String(hash) = &metric.value {
self.config_hash = Some(hash.clone());
}
}
"agent_version" => {
if let MetricValue::String(version) = &metric.value {
self.agent_hash = Some(version.clone());
}
}
// CPU metrics
"cpu_load_1min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_1min = Some(load);
self.cpu_status = metric.status.clone();
}
}
"cpu_load_5min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_5min = Some(load);
}
}
"cpu_load_15min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_15min = Some(load);
}
}
"cpu_frequency_mhz" => {
if let MetricValue::Float(freq) = metric.value {
self.cpu_frequency = Some(freq);
}
}
// Memory metrics
"memory_usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
self.memory_usage_percent = Some(usage);
self.memory_status = metric.status.clone();
}
}
"memory_used_gb" => {
if let MetricValue::Float(used) = metric.value {
self.memory_used_gb = Some(used);
}
}
"memory_total_gb" => {
if let MetricValue::Float(total) = metric.value {
self.memory_total_gb = Some(total);
}
}
// Tmpfs metrics
"memory_tmp_usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
self.tmp_usage_percent = Some(usage);
self.tmp_status = metric.status.clone();
}
}
"memory_tmp_used_gb" => {
if let MetricValue::Float(used) = metric.value {
self.tmp_used_gb = Some(used);
}
}
"memory_tmp_total_gb" => {
if let MetricValue::Float(total) = metric.value {
self.tmp_total_gb = Some(total);
}
}
_ => {}
}
}
// Update storage from all disk metrics
self.update_storage_from_metrics(metrics);
} }
let drive_text = if !drive_details.is_empty() {
format!("{} {}", drive.name, drive_details.join(" "))
} else {
format!("{}", drive.name)
};
let mut drive_spans = vec![
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
lines.push(Line::from(drive_spans));
} }
impl SystemWidget { impl SystemWidget {

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.132" version = "0.1.133"
edition = "2021" edition = "2021"
[dependencies] [dependencies]