diff --git a/Cargo.lock b/Cargo.lock index 11a2738..5234947 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cm-dashboard" -version = "0.1.144" +version = "0.1.145" dependencies = [ "anyhow", "chrono", @@ -301,7 +301,7 @@ dependencies = [ [[package]] name = "cm-dashboard-agent" -version = "0.1.144" +version = "0.1.145" dependencies = [ "anyhow", "async-trait", @@ -324,7 +324,7 @@ dependencies = [ [[package]] name = "cm-dashboard-shared" -version = "0.1.144" +version = "0.1.145" dependencies = [ "chrono", "serde", diff --git a/agent/Cargo.toml b/agent/Cargo.toml index 836e962..7213218 100644 --- a/agent/Cargo.toml +++ b/agent/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cm-dashboard-agent" -version = "0.1.145" +version = "0.1.146" edition = "2021" [dependencies] diff --git a/agent/src/collectors/systemd.rs b/agent/src/collectors/systemd.rs index c2a6ef7..b30accd 100644 --- a/agent/src/collectors/systemd.rs +++ b/agent/src/collectors/systemd.rs @@ -1,6 +1,6 @@ use anyhow::Result; use async_trait::async_trait; -use cm_dashboard_shared::{AgentData, ServiceData, Status}; +use cm_dashboard_shared::{AgentData, ServiceData, SubServiceData, SubServiceMetric, Status}; use std::process::Command; use std::sync::RwLock; use std::time::Instant; @@ -99,15 +99,9 @@ impl SystemdCollector { let memory_mb = self.get_service_memory_usage(service_name).await.unwrap_or(0.0); let disk_gb = self.get_service_disk_usage(service_name).await.unwrap_or(0.0); - let service_info = ServiceInfo { - name: service_name.clone(), - status: active_status.clone(), - memory_mb, - disk_gb, - }; - services.push(service_info); + let mut sub_services = Vec::new(); - // Sub-service metrics for specific services + // Collect sub-services for specific services if service_name.contains("nginx") && active_status == "active" { let nginx_sites = self.get_nginx_site_metrics(); for (site_name, latency_ms) in nginx_sites { @@ -117,11 +111,17 @@ impl SystemdCollector { "failed" }; - services.push(ServiceInfo { - name: site_name, - status: site_status.to_string(), - memory_mb: 0.0, - disk_gb: latency_ms / 1000.0, // Store latency in disk_gb field as workaround + let mut metrics = Vec::new(); + metrics.push(SubServiceMetric { + label: "latency_ms".to_string(), + value: latency_ms, + unit: Some("ms".to_string()), + }); + + sub_services.push(SubServiceData { + name: site_name.clone(), + service_status: self.calculate_service_status(&site_name, &site_status), + metrics, }); } } @@ -129,14 +129,35 @@ impl SystemdCollector { if service_name.contains("docker") && active_status == "active" { let docker_containers = self.get_docker_containers(); for (container_name, container_status) in docker_containers { - services.push(ServiceInfo { - name: container_name, - status: container_status, - memory_mb: 0.0, - disk_gb: 0.0, + // For now, docker containers have no additional metrics + // Future: could add memory_mb, cpu_percent, restart_count, etc. + let metrics = Vec::new(); + + sub_services.push(SubServiceData { + name: container_name.clone(), + service_status: self.calculate_service_status(&container_name, &container_status), + metrics, }); } } + + let service_info = ServiceInfo { + name: service_name.clone(), + status: active_status.clone(), + memory_mb, + disk_gb, + }; + services.push(service_info); + + // Add to AgentData with hierarchical structure + agent_data.services.push(ServiceData { + name: service_name.clone(), + memory_mb, + disk_gb, + user_stopped: false, // TODO: Integrate with service tracker + service_status: self.calculate_service_status(service_name, &active_status), + sub_services, + }); } Err(e) => { debug!("Failed to get status for service {}: {}", service_name, e); @@ -148,19 +169,7 @@ impl SystemdCollector { { let mut state = self.state.write().unwrap(); state.last_collection = Some(start_time); - state.services = services.clone(); - } - - // Populate AgentData with service information - for service in services { - agent_data.services.push(ServiceData { - name: service.name.clone(), - status: service.status.clone(), - memory_mb: service.memory_mb, - disk_gb: service.disk_gb, - user_stopped: false, // TODO: Integrate with service tracker - service_status: self.calculate_service_status(&service.name, &service.status), - }); + state.services = services; } let elapsed = start_time.elapsed(); @@ -832,11 +841,11 @@ impl Collector for SystemdCollector { for service in cached_services { agent_data.services.push(ServiceData { name: service.name.clone(), - status: service.status.clone(), memory_mb: service.memory_mb, disk_gb: service.disk_gb, user_stopped: false, // TODO: Integrate with service tracker service_status: self.calculate_service_status(&service.name, &service.status), + sub_services: Vec::new(), // Cached services don't have sub-services }); } Ok(()) diff --git a/dashboard/Cargo.toml b/dashboard/Cargo.toml index feb50c5..a56e07e 100644 --- a/dashboard/Cargo.toml +++ b/dashboard/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cm-dashboard" -version = "0.1.145" +version = "0.1.146" edition = "2021" [dependencies] diff --git a/dashboard/src/ui/widgets/services.rs b/dashboard/src/ui/widgets/services.rs index 95af6ad..7f77ccd 100644 --- a/dashboard/src/ui/widgets/services.rs +++ b/dashboard/src/ui/widgets/services.rs @@ -28,10 +28,9 @@ pub struct ServicesWidget { #[derive(Clone)] struct ServiceInfo { - status: String, memory_mb: Option, disk_gb: Option, - latency_ms: Option, + metrics: Vec<(String, f32, Option)>, // (label, value, unit) widget_status: Status, } @@ -113,10 +112,15 @@ impl ServicesWidget { name.to_string() }; - // Parent services always show actual systemctl status + // Convert Status enum to display text let status_str = match info.widget_status { - Status::Pending => "pending".to_string(), - _ => info.status.clone(), // Use actual status from agent (active/inactive/failed) + Status::Ok => "active", + Status::Inactive => "inactive", + Status::Critical => "failed", + Status::Pending => "pending", + Status::Warning => "warning", + Status::Unknown => "unknown", + Status::Offline => "offline", }; format!( @@ -153,15 +157,25 @@ impl ServicesWidget { Status::Offline => Theme::muted_text(), }; - // For sub-services, prefer latency if available - let status_str = if let Some(latency) = info.latency_ms { - if latency < 0.0 { - "timeout".to_string() - } else { - format!("{:.0}ms", latency) + // Display metrics or status for sub-services + let status_str = if !info.metrics.is_empty() { + // Show first metric with label and unit + let (label, value, unit) = &info.metrics[0]; + match unit { + Some(u) => format!("{}: {:.1} {}", label, value, u), + None => format!("{}: {:.1}", label, value), } } else { - info.status.clone() + // Convert Status enum to display text for sub-services + match info.widget_status { + Status::Ok => "active", + Status::Inactive => "inactive", + Status::Critical => "failed", + Status::Pending => "pending", + Status::Warning => "warning", + Status::Unknown => "unknown", + Status::Offline => "offline", + }.to_string() }; let tree_symbol = if is_last { "└─" } else { "├─" }; @@ -262,18 +276,48 @@ impl Widget for ServicesWidget { self.sub_services.clear(); for service in &agent_data.services { - let service_info = ServiceInfo { - status: service.status.clone(), + // Store parent service + let parent_info = ServiceInfo { memory_mb: Some(service.memory_mb), disk_gb: Some(service.disk_gb), - latency_ms: None, - widget_status: Status::Ok, + metrics: Vec::new(), // Parent services don't have custom metrics + widget_status: service.service_status, }; + self.parent_services.insert(service.name.clone(), parent_info); - self.parent_services.insert(service.name.clone(), service_info); + // Process sub-services if any + if !service.sub_services.is_empty() { + let mut sub_list = Vec::new(); + for sub_service in &service.sub_services { + // Convert metrics to display format + let metrics: Vec<(String, f32, Option)> = sub_service.metrics.iter() + .map(|m| (m.label.clone(), m.value, m.unit.clone())) + .collect(); + + let sub_info = ServiceInfo { + memory_mb: None, // Not used for sub-services + disk_gb: None, // Not used for sub-services + metrics, + widget_status: sub_service.service_status, + }; + sub_list.push((sub_service.name.clone(), sub_info)); + } + self.sub_services.insert(service.name.clone(), sub_list); + } } - self.status = Status::Ok; + // Aggregate status from all services + let mut all_statuses = Vec::new(); + all_statuses.extend(self.parent_services.values().map(|info| info.widget_status)); + for sub_list in self.sub_services.values() { + all_statuses.extend(sub_list.iter().map(|(_, info)| info.widget_status)); + } + + self.status = if all_statuses.is_empty() { + Status::Unknown + } else { + Status::aggregate(&all_statuses) + }; } } @@ -294,15 +338,13 @@ impl ServicesWidget { self.parent_services .entry(parent_service) .or_insert(ServiceInfo { - status: "unknown".to_string(), memory_mb: None, disk_gb: None, - latency_ms: None, + metrics: Vec::new(), widget_status: Status::Unknown, }); if metric.name.ends_with("_status") { - service_info.status = metric.value.as_string(); service_info.widget_status = metric.status; } else if metric.name.ends_with("_memory_mb") { if let Some(memory) = metric.value.as_f32() { @@ -331,10 +373,9 @@ impl ServicesWidget { sub_service_list.push(( sub_name.clone(), ServiceInfo { - status: "unknown".to_string(), memory_mb: None, disk_gb: None, - latency_ms: None, + metrics: Vec::new(), widget_status: Status::Unknown, }, )); @@ -342,7 +383,6 @@ impl ServicesWidget { }; if metric.name.ends_with("_status") { - sub_service_info.status = metric.value.as_string(); sub_service_info.widget_status = metric.status; } else if metric.name.ends_with("_memory_mb") { if let Some(memory) = metric.value.as_f32() { @@ -352,11 +392,6 @@ impl ServicesWidget { if let Some(disk) = metric.value.as_f32() { sub_service_info.disk_gb = Some(disk); } - } else if metric.name.ends_with("_latency_ms") { - if let Some(latency) = metric.value.as_f32() { - sub_service_info.latency_ms = Some(latency); - sub_service_info.widget_status = metric.status; - } } } } diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 5f69677..bfa01b0 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cm-dashboard-shared" -version = "0.1.145" +version = "0.1.146" edition = "2021" [dependencies] diff --git a/shared/src/agent_data.rs b/shared/src/agent_data.rs index 5dc8b0c..181c2ea 100644 --- a/shared/src/agent_data.rs +++ b/shared/src/agent_data.rs @@ -111,11 +111,27 @@ pub struct PoolDriveData { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ServiceData { pub name: String, - pub status: String, // "active", "inactive", "failed" pub memory_mb: f32, pub disk_gb: f32, pub user_stopped: bool, pub service_status: Status, + pub sub_services: Vec, +} + +/// Sub-service data (nginx sites, docker containers, etc.) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SubServiceData { + pub name: String, + pub service_status: Status, + pub metrics: Vec, +} + +/// Individual metric for a sub-service +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SubServiceMetric { + pub label: String, + pub value: f32, + pub unit: Option, } /// Backup system data