Fix service status icon mismatch with single source of truth architecture
All checks were successful
Build and Release / build-and-release (push) Successful in 1m8s
All checks were successful
Build and Release / build-and-release (push) Successful in 1m8s
- Remove duplicate status string fields from ServiceData and SubServiceData - Use only Status enum as single source of truth for service status - Agent calculates Status enum using calculate_service_status() - Dashboard converts Status enum to display text for UI - Implement flexible metrics system for sub-services with label/value/unit - Fix status icon/text mismatches (inactive services now show gray circles) - Ensure perfect alignment between service icons and status text
This commit is contained in:
parent
eb892096d9
commit
75ec190b93
6
Cargo.lock
generated
6
Cargo.lock
generated
@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard"
|
||||
version = "0.1.144"
|
||||
version = "0.1.145"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@ -301,7 +301,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard-agent"
|
||||
version = "0.1.144"
|
||||
version = "0.1.145"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@ -324,7 +324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard-shared"
|
||||
version = "0.1.144"
|
||||
version = "0.1.145"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"serde",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard-agent"
|
||||
version = "0.1.145"
|
||||
version = "0.1.146"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use cm_dashboard_shared::{AgentData, ServiceData, Status};
|
||||
use cm_dashboard_shared::{AgentData, ServiceData, SubServiceData, SubServiceMetric, Status};
|
||||
use std::process::Command;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
@ -99,15 +99,9 @@ impl SystemdCollector {
|
||||
let memory_mb = self.get_service_memory_usage(service_name).await.unwrap_or(0.0);
|
||||
let disk_gb = self.get_service_disk_usage(service_name).await.unwrap_or(0.0);
|
||||
|
||||
let service_info = ServiceInfo {
|
||||
name: service_name.clone(),
|
||||
status: active_status.clone(),
|
||||
memory_mb,
|
||||
disk_gb,
|
||||
};
|
||||
services.push(service_info);
|
||||
let mut sub_services = Vec::new();
|
||||
|
||||
// Sub-service metrics for specific services
|
||||
// Collect sub-services for specific services
|
||||
if service_name.contains("nginx") && active_status == "active" {
|
||||
let nginx_sites = self.get_nginx_site_metrics();
|
||||
for (site_name, latency_ms) in nginx_sites {
|
||||
@ -117,11 +111,17 @@ impl SystemdCollector {
|
||||
"failed"
|
||||
};
|
||||
|
||||
services.push(ServiceInfo {
|
||||
name: site_name,
|
||||
status: site_status.to_string(),
|
||||
memory_mb: 0.0,
|
||||
disk_gb: latency_ms / 1000.0, // Store latency in disk_gb field as workaround
|
||||
let mut metrics = Vec::new();
|
||||
metrics.push(SubServiceMetric {
|
||||
label: "latency_ms".to_string(),
|
||||
value: latency_ms,
|
||||
unit: Some("ms".to_string()),
|
||||
});
|
||||
|
||||
sub_services.push(SubServiceData {
|
||||
name: site_name.clone(),
|
||||
service_status: self.calculate_service_status(&site_name, &site_status),
|
||||
metrics,
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -129,14 +129,35 @@ impl SystemdCollector {
|
||||
if service_name.contains("docker") && active_status == "active" {
|
||||
let docker_containers = self.get_docker_containers();
|
||||
for (container_name, container_status) in docker_containers {
|
||||
services.push(ServiceInfo {
|
||||
name: container_name,
|
||||
status: container_status,
|
||||
memory_mb: 0.0,
|
||||
disk_gb: 0.0,
|
||||
// For now, docker containers have no additional metrics
|
||||
// Future: could add memory_mb, cpu_percent, restart_count, etc.
|
||||
let metrics = Vec::new();
|
||||
|
||||
sub_services.push(SubServiceData {
|
||||
name: container_name.clone(),
|
||||
service_status: self.calculate_service_status(&container_name, &container_status),
|
||||
metrics,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let service_info = ServiceInfo {
|
||||
name: service_name.clone(),
|
||||
status: active_status.clone(),
|
||||
memory_mb,
|
||||
disk_gb,
|
||||
};
|
||||
services.push(service_info);
|
||||
|
||||
// Add to AgentData with hierarchical structure
|
||||
agent_data.services.push(ServiceData {
|
||||
name: service_name.clone(),
|
||||
memory_mb,
|
||||
disk_gb,
|
||||
user_stopped: false, // TODO: Integrate with service tracker
|
||||
service_status: self.calculate_service_status(service_name, &active_status),
|
||||
sub_services,
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to get status for service {}: {}", service_name, e);
|
||||
@ -148,19 +169,7 @@ impl SystemdCollector {
|
||||
{
|
||||
let mut state = self.state.write().unwrap();
|
||||
state.last_collection = Some(start_time);
|
||||
state.services = services.clone();
|
||||
}
|
||||
|
||||
// Populate AgentData with service information
|
||||
for service in services {
|
||||
agent_data.services.push(ServiceData {
|
||||
name: service.name.clone(),
|
||||
status: service.status.clone(),
|
||||
memory_mb: service.memory_mb,
|
||||
disk_gb: service.disk_gb,
|
||||
user_stopped: false, // TODO: Integrate with service tracker
|
||||
service_status: self.calculate_service_status(&service.name, &service.status),
|
||||
});
|
||||
state.services = services;
|
||||
}
|
||||
|
||||
let elapsed = start_time.elapsed();
|
||||
@ -832,11 +841,11 @@ impl Collector for SystemdCollector {
|
||||
for service in cached_services {
|
||||
agent_data.services.push(ServiceData {
|
||||
name: service.name.clone(),
|
||||
status: service.status.clone(),
|
||||
memory_mb: service.memory_mb,
|
||||
disk_gb: service.disk_gb,
|
||||
user_stopped: false, // TODO: Integrate with service tracker
|
||||
service_status: self.calculate_service_status(&service.name, &service.status),
|
||||
sub_services: Vec::new(), // Cached services don't have sub-services
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard"
|
||||
version = "0.1.145"
|
||||
version = "0.1.146"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@ -28,10 +28,9 @@ pub struct ServicesWidget {
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ServiceInfo {
|
||||
status: String,
|
||||
memory_mb: Option<f32>,
|
||||
disk_gb: Option<f32>,
|
||||
latency_ms: Option<f32>,
|
||||
metrics: Vec<(String, f32, Option<String>)>, // (label, value, unit)
|
||||
widget_status: Status,
|
||||
}
|
||||
|
||||
@ -113,10 +112,15 @@ impl ServicesWidget {
|
||||
name.to_string()
|
||||
};
|
||||
|
||||
// Parent services always show actual systemctl status
|
||||
// Convert Status enum to display text
|
||||
let status_str = match info.widget_status {
|
||||
Status::Pending => "pending".to_string(),
|
||||
_ => info.status.clone(), // Use actual status from agent (active/inactive/failed)
|
||||
Status::Ok => "active",
|
||||
Status::Inactive => "inactive",
|
||||
Status::Critical => "failed",
|
||||
Status::Pending => "pending",
|
||||
Status::Warning => "warning",
|
||||
Status::Unknown => "unknown",
|
||||
Status::Offline => "offline",
|
||||
};
|
||||
|
||||
format!(
|
||||
@ -153,15 +157,25 @@ impl ServicesWidget {
|
||||
Status::Offline => Theme::muted_text(),
|
||||
};
|
||||
|
||||
// For sub-services, prefer latency if available
|
||||
let status_str = if let Some(latency) = info.latency_ms {
|
||||
if latency < 0.0 {
|
||||
"timeout".to_string()
|
||||
} else {
|
||||
format!("{:.0}ms", latency)
|
||||
// Display metrics or status for sub-services
|
||||
let status_str = if !info.metrics.is_empty() {
|
||||
// Show first metric with label and unit
|
||||
let (label, value, unit) = &info.metrics[0];
|
||||
match unit {
|
||||
Some(u) => format!("{}: {:.1} {}", label, value, u),
|
||||
None => format!("{}: {:.1}", label, value),
|
||||
}
|
||||
} else {
|
||||
info.status.clone()
|
||||
// Convert Status enum to display text for sub-services
|
||||
match info.widget_status {
|
||||
Status::Ok => "active",
|
||||
Status::Inactive => "inactive",
|
||||
Status::Critical => "failed",
|
||||
Status::Pending => "pending",
|
||||
Status::Warning => "warning",
|
||||
Status::Unknown => "unknown",
|
||||
Status::Offline => "offline",
|
||||
}.to_string()
|
||||
};
|
||||
let tree_symbol = if is_last { "└─" } else { "├─" };
|
||||
|
||||
@ -262,18 +276,48 @@ impl Widget for ServicesWidget {
|
||||
self.sub_services.clear();
|
||||
|
||||
for service in &agent_data.services {
|
||||
let service_info = ServiceInfo {
|
||||
status: service.status.clone(),
|
||||
// Store parent service
|
||||
let parent_info = ServiceInfo {
|
||||
memory_mb: Some(service.memory_mb),
|
||||
disk_gb: Some(service.disk_gb),
|
||||
latency_ms: None,
|
||||
widget_status: Status::Ok,
|
||||
metrics: Vec::new(), // Parent services don't have custom metrics
|
||||
widget_status: service.service_status,
|
||||
};
|
||||
self.parent_services.insert(service.name.clone(), parent_info);
|
||||
|
||||
self.parent_services.insert(service.name.clone(), service_info);
|
||||
// Process sub-services if any
|
||||
if !service.sub_services.is_empty() {
|
||||
let mut sub_list = Vec::new();
|
||||
for sub_service in &service.sub_services {
|
||||
// Convert metrics to display format
|
||||
let metrics: Vec<(String, f32, Option<String>)> = sub_service.metrics.iter()
|
||||
.map(|m| (m.label.clone(), m.value, m.unit.clone()))
|
||||
.collect();
|
||||
|
||||
let sub_info = ServiceInfo {
|
||||
memory_mb: None, // Not used for sub-services
|
||||
disk_gb: None, // Not used for sub-services
|
||||
metrics,
|
||||
widget_status: sub_service.service_status,
|
||||
};
|
||||
sub_list.push((sub_service.name.clone(), sub_info));
|
||||
}
|
||||
self.sub_services.insert(service.name.clone(), sub_list);
|
||||
}
|
||||
}
|
||||
|
||||
self.status = Status::Ok;
|
||||
// Aggregate status from all services
|
||||
let mut all_statuses = Vec::new();
|
||||
all_statuses.extend(self.parent_services.values().map(|info| info.widget_status));
|
||||
for sub_list in self.sub_services.values() {
|
||||
all_statuses.extend(sub_list.iter().map(|(_, info)| info.widget_status));
|
||||
}
|
||||
|
||||
self.status = if all_statuses.is_empty() {
|
||||
Status::Unknown
|
||||
} else {
|
||||
Status::aggregate(&all_statuses)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,15 +338,13 @@ impl ServicesWidget {
|
||||
self.parent_services
|
||||
.entry(parent_service)
|
||||
.or_insert(ServiceInfo {
|
||||
status: "unknown".to_string(),
|
||||
memory_mb: None,
|
||||
disk_gb: None,
|
||||
latency_ms: None,
|
||||
metrics: Vec::new(),
|
||||
widget_status: Status::Unknown,
|
||||
});
|
||||
|
||||
if metric.name.ends_with("_status") {
|
||||
service_info.status = metric.value.as_string();
|
||||
service_info.widget_status = metric.status;
|
||||
} else if metric.name.ends_with("_memory_mb") {
|
||||
if let Some(memory) = metric.value.as_f32() {
|
||||
@ -331,10 +373,9 @@ impl ServicesWidget {
|
||||
sub_service_list.push((
|
||||
sub_name.clone(),
|
||||
ServiceInfo {
|
||||
status: "unknown".to_string(),
|
||||
memory_mb: None,
|
||||
disk_gb: None,
|
||||
latency_ms: None,
|
||||
metrics: Vec::new(),
|
||||
widget_status: Status::Unknown,
|
||||
},
|
||||
));
|
||||
@ -342,7 +383,6 @@ impl ServicesWidget {
|
||||
};
|
||||
|
||||
if metric.name.ends_with("_status") {
|
||||
sub_service_info.status = metric.value.as_string();
|
||||
sub_service_info.widget_status = metric.status;
|
||||
} else if metric.name.ends_with("_memory_mb") {
|
||||
if let Some(memory) = metric.value.as_f32() {
|
||||
@ -352,11 +392,6 @@ impl ServicesWidget {
|
||||
if let Some(disk) = metric.value.as_f32() {
|
||||
sub_service_info.disk_gb = Some(disk);
|
||||
}
|
||||
} else if metric.name.ends_with("_latency_ms") {
|
||||
if let Some(latency) = metric.value.as_f32() {
|
||||
sub_service_info.latency_ms = Some(latency);
|
||||
sub_service_info.widget_status = metric.status;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard-shared"
|
||||
version = "0.1.145"
|
||||
version = "0.1.146"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@ -111,11 +111,27 @@ pub struct PoolDriveData {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServiceData {
|
||||
pub name: String,
|
||||
pub status: String, // "active", "inactive", "failed"
|
||||
pub memory_mb: f32,
|
||||
pub disk_gb: f32,
|
||||
pub user_stopped: bool,
|
||||
pub service_status: Status,
|
||||
pub sub_services: Vec<SubServiceData>,
|
||||
}
|
||||
|
||||
/// Sub-service data (nginx sites, docker containers, etc.)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SubServiceData {
|
||||
pub name: String,
|
||||
pub service_status: Status,
|
||||
pub metrics: Vec<SubServiceMetric>,
|
||||
}
|
||||
|
||||
/// Individual metric for a sub-service
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SubServiceMetric {
|
||||
pub label: String,
|
||||
pub value: f32,
|
||||
pub unit: Option<String>,
|
||||
}
|
||||
|
||||
/// Backup system data
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user