Fix service status icon mismatch with single source of truth architecture
All checks were successful
Build and Release / build-and-release (push) Successful in 1m8s

- Remove duplicate status string fields from ServiceData and SubServiceData
- Use only Status enum as single source of truth for service status
- Agent calculates Status enum using calculate_service_status()
- Dashboard converts Status enum to display text for UI
- Implement flexible metrics system for sub-services with label/value/unit
- Fix status icon/text mismatches (inactive services now show gray circles)
- Ensure perfect alignment between service icons and status text
This commit is contained in:
Christoffer Martinsson 2025-11-24 22:43:22 +01:00
parent eb892096d9
commit 75ec190b93
7 changed files with 129 additions and 69 deletions

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.144" version = "0.1.145"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.144" version = "0.1.145"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -324,7 +324,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.144" version = "0.1.145"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.145" version = "0.1.146"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -1,6 +1,6 @@
use anyhow::Result; use anyhow::Result;
use async_trait::async_trait; use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, ServiceData, Status}; use cm_dashboard_shared::{AgentData, ServiceData, SubServiceData, SubServiceMetric, Status};
use std::process::Command; use std::process::Command;
use std::sync::RwLock; use std::sync::RwLock;
use std::time::Instant; use std::time::Instant;
@ -99,15 +99,9 @@ impl SystemdCollector {
let memory_mb = self.get_service_memory_usage(service_name).await.unwrap_or(0.0); let memory_mb = self.get_service_memory_usage(service_name).await.unwrap_or(0.0);
let disk_gb = self.get_service_disk_usage(service_name).await.unwrap_or(0.0); let disk_gb = self.get_service_disk_usage(service_name).await.unwrap_or(0.0);
let service_info = ServiceInfo { let mut sub_services = Vec::new();
name: service_name.clone(),
status: active_status.clone(),
memory_mb,
disk_gb,
};
services.push(service_info);
// Sub-service metrics for specific services // Collect sub-services for specific services
if service_name.contains("nginx") && active_status == "active" { if service_name.contains("nginx") && active_status == "active" {
let nginx_sites = self.get_nginx_site_metrics(); let nginx_sites = self.get_nginx_site_metrics();
for (site_name, latency_ms) in nginx_sites { for (site_name, latency_ms) in nginx_sites {
@ -117,11 +111,17 @@ impl SystemdCollector {
"failed" "failed"
}; };
services.push(ServiceInfo { let mut metrics = Vec::new();
name: site_name, metrics.push(SubServiceMetric {
status: site_status.to_string(), label: "latency_ms".to_string(),
memory_mb: 0.0, value: latency_ms,
disk_gb: latency_ms / 1000.0, // Store latency in disk_gb field as workaround unit: Some("ms".to_string()),
});
sub_services.push(SubServiceData {
name: site_name.clone(),
service_status: self.calculate_service_status(&site_name, &site_status),
metrics,
}); });
} }
} }
@ -129,14 +129,35 @@ impl SystemdCollector {
if service_name.contains("docker") && active_status == "active" { if service_name.contains("docker") && active_status == "active" {
let docker_containers = self.get_docker_containers(); let docker_containers = self.get_docker_containers();
for (container_name, container_status) in docker_containers { for (container_name, container_status) in docker_containers {
services.push(ServiceInfo { // For now, docker containers have no additional metrics
name: container_name, // Future: could add memory_mb, cpu_percent, restart_count, etc.
status: container_status, let metrics = Vec::new();
memory_mb: 0.0,
disk_gb: 0.0, sub_services.push(SubServiceData {
name: container_name.clone(),
service_status: self.calculate_service_status(&container_name, &container_status),
metrics,
}); });
} }
} }
let service_info = ServiceInfo {
name: service_name.clone(),
status: active_status.clone(),
memory_mb,
disk_gb,
};
services.push(service_info);
// Add to AgentData with hierarchical structure
agent_data.services.push(ServiceData {
name: service_name.clone(),
memory_mb,
disk_gb,
user_stopped: false, // TODO: Integrate with service tracker
service_status: self.calculate_service_status(service_name, &active_status),
sub_services,
});
} }
Err(e) => { Err(e) => {
debug!("Failed to get status for service {}: {}", service_name, e); debug!("Failed to get status for service {}: {}", service_name, e);
@ -148,19 +169,7 @@ impl SystemdCollector {
{ {
let mut state = self.state.write().unwrap(); let mut state = self.state.write().unwrap();
state.last_collection = Some(start_time); state.last_collection = Some(start_time);
state.services = services.clone(); state.services = services;
}
// Populate AgentData with service information
for service in services {
agent_data.services.push(ServiceData {
name: service.name.clone(),
status: service.status.clone(),
memory_mb: service.memory_mb,
disk_gb: service.disk_gb,
user_stopped: false, // TODO: Integrate with service tracker
service_status: self.calculate_service_status(&service.name, &service.status),
});
} }
let elapsed = start_time.elapsed(); let elapsed = start_time.elapsed();
@ -832,11 +841,11 @@ impl Collector for SystemdCollector {
for service in cached_services { for service in cached_services {
agent_data.services.push(ServiceData { agent_data.services.push(ServiceData {
name: service.name.clone(), name: service.name.clone(),
status: service.status.clone(),
memory_mb: service.memory_mb, memory_mb: service.memory_mb,
disk_gb: service.disk_gb, disk_gb: service.disk_gb,
user_stopped: false, // TODO: Integrate with service tracker user_stopped: false, // TODO: Integrate with service tracker
service_status: self.calculate_service_status(&service.name, &service.status), service_status: self.calculate_service_status(&service.name, &service.status),
sub_services: Vec::new(), // Cached services don't have sub-services
}); });
} }
Ok(()) Ok(())

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.145" version = "0.1.146"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -28,10 +28,9 @@ pub struct ServicesWidget {
#[derive(Clone)] #[derive(Clone)]
struct ServiceInfo { struct ServiceInfo {
status: String,
memory_mb: Option<f32>, memory_mb: Option<f32>,
disk_gb: Option<f32>, disk_gb: Option<f32>,
latency_ms: Option<f32>, metrics: Vec<(String, f32, Option<String>)>, // (label, value, unit)
widget_status: Status, widget_status: Status,
} }
@ -113,10 +112,15 @@ impl ServicesWidget {
name.to_string() name.to_string()
}; };
// Parent services always show actual systemctl status // Convert Status enum to display text
let status_str = match info.widget_status { let status_str = match info.widget_status {
Status::Pending => "pending".to_string(), Status::Ok => "active",
_ => info.status.clone(), // Use actual status from agent (active/inactive/failed) Status::Inactive => "inactive",
Status::Critical => "failed",
Status::Pending => "pending",
Status::Warning => "warning",
Status::Unknown => "unknown",
Status::Offline => "offline",
}; };
format!( format!(
@ -153,15 +157,25 @@ impl ServicesWidget {
Status::Offline => Theme::muted_text(), Status::Offline => Theme::muted_text(),
}; };
// For sub-services, prefer latency if available // Display metrics or status for sub-services
let status_str = if let Some(latency) = info.latency_ms { let status_str = if !info.metrics.is_empty() {
if latency < 0.0 { // Show first metric with label and unit
"timeout".to_string() let (label, value, unit) = &info.metrics[0];
} else { match unit {
format!("{:.0}ms", latency) Some(u) => format!("{}: {:.1} {}", label, value, u),
None => format!("{}: {:.1}", label, value),
} }
} else { } else {
info.status.clone() // Convert Status enum to display text for sub-services
match info.widget_status {
Status::Ok => "active",
Status::Inactive => "inactive",
Status::Critical => "failed",
Status::Pending => "pending",
Status::Warning => "warning",
Status::Unknown => "unknown",
Status::Offline => "offline",
}.to_string()
}; };
let tree_symbol = if is_last { "└─" } else { "├─" }; let tree_symbol = if is_last { "└─" } else { "├─" };
@ -262,18 +276,48 @@ impl Widget for ServicesWidget {
self.sub_services.clear(); self.sub_services.clear();
for service in &agent_data.services { for service in &agent_data.services {
let service_info = ServiceInfo { // Store parent service
status: service.status.clone(), let parent_info = ServiceInfo {
memory_mb: Some(service.memory_mb), memory_mb: Some(service.memory_mb),
disk_gb: Some(service.disk_gb), disk_gb: Some(service.disk_gb),
latency_ms: None, metrics: Vec::new(), // Parent services don't have custom metrics
widget_status: Status::Ok, widget_status: service.service_status,
}; };
self.parent_services.insert(service.name.clone(), parent_info);
self.parent_services.insert(service.name.clone(), service_info); // Process sub-services if any
if !service.sub_services.is_empty() {
let mut sub_list = Vec::new();
for sub_service in &service.sub_services {
// Convert metrics to display format
let metrics: Vec<(String, f32, Option<String>)> = sub_service.metrics.iter()
.map(|m| (m.label.clone(), m.value, m.unit.clone()))
.collect();
let sub_info = ServiceInfo {
memory_mb: None, // Not used for sub-services
disk_gb: None, // Not used for sub-services
metrics,
widget_status: sub_service.service_status,
};
sub_list.push((sub_service.name.clone(), sub_info));
}
self.sub_services.insert(service.name.clone(), sub_list);
}
} }
self.status = Status::Ok; // Aggregate status from all services
let mut all_statuses = Vec::new();
all_statuses.extend(self.parent_services.values().map(|info| info.widget_status));
for sub_list in self.sub_services.values() {
all_statuses.extend(sub_list.iter().map(|(_, info)| info.widget_status));
}
self.status = if all_statuses.is_empty() {
Status::Unknown
} else {
Status::aggregate(&all_statuses)
};
} }
} }
@ -294,15 +338,13 @@ impl ServicesWidget {
self.parent_services self.parent_services
.entry(parent_service) .entry(parent_service)
.or_insert(ServiceInfo { .or_insert(ServiceInfo {
status: "unknown".to_string(),
memory_mb: None, memory_mb: None,
disk_gb: None, disk_gb: None,
latency_ms: None, metrics: Vec::new(),
widget_status: Status::Unknown, widget_status: Status::Unknown,
}); });
if metric.name.ends_with("_status") { if metric.name.ends_with("_status") {
service_info.status = metric.value.as_string();
service_info.widget_status = metric.status; service_info.widget_status = metric.status;
} else if metric.name.ends_with("_memory_mb") { } else if metric.name.ends_with("_memory_mb") {
if let Some(memory) = metric.value.as_f32() { if let Some(memory) = metric.value.as_f32() {
@ -331,10 +373,9 @@ impl ServicesWidget {
sub_service_list.push(( sub_service_list.push((
sub_name.clone(), sub_name.clone(),
ServiceInfo { ServiceInfo {
status: "unknown".to_string(),
memory_mb: None, memory_mb: None,
disk_gb: None, disk_gb: None,
latency_ms: None, metrics: Vec::new(),
widget_status: Status::Unknown, widget_status: Status::Unknown,
}, },
)); ));
@ -342,7 +383,6 @@ impl ServicesWidget {
}; };
if metric.name.ends_with("_status") { if metric.name.ends_with("_status") {
sub_service_info.status = metric.value.as_string();
sub_service_info.widget_status = metric.status; sub_service_info.widget_status = metric.status;
} else if metric.name.ends_with("_memory_mb") { } else if metric.name.ends_with("_memory_mb") {
if let Some(memory) = metric.value.as_f32() { if let Some(memory) = metric.value.as_f32() {
@ -352,11 +392,6 @@ impl ServicesWidget {
if let Some(disk) = metric.value.as_f32() { if let Some(disk) = metric.value.as_f32() {
sub_service_info.disk_gb = Some(disk); sub_service_info.disk_gb = Some(disk);
} }
} else if metric.name.ends_with("_latency_ms") {
if let Some(latency) = metric.value.as_f32() {
sub_service_info.latency_ms = Some(latency);
sub_service_info.widget_status = metric.status;
}
} }
} }
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.145" version = "0.1.146"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -111,11 +111,27 @@ pub struct PoolDriveData {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServiceData { pub struct ServiceData {
pub name: String, pub name: String,
pub status: String, // "active", "inactive", "failed"
pub memory_mb: f32, pub memory_mb: f32,
pub disk_gb: f32, pub disk_gb: f32,
pub user_stopped: bool, pub user_stopped: bool,
pub service_status: Status, pub service_status: Status,
pub sub_services: Vec<SubServiceData>,
}
/// Sub-service data (nginx sites, docker containers, etc.)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SubServiceData {
pub name: String,
pub service_status: Status,
pub metrics: Vec<SubServiceMetric>,
}
/// Individual metric for a sub-service
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SubServiceMetric {
pub label: String,
pub value: f32,
pub unit: Option<String>,
} }
/// Backup system data /// Backup system data