Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 41a7ee660a | |||
| 76931f0457 | |||
| 516d159d2f | |||
| 1656f20e96 | |||
| dcd350ec2c | |||
| a34b095857 | |||
| 7362464b46 | |||
| c8b79576fa | |||
| f53df5440b | |||
| d1b0e2c431 | |||
| b1719a60fc | |||
| d922e8d6f3 | |||
| 407bc9dbc2 | |||
| 3c278351c9 |
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard"
|
||||
version = "0.1.263"
|
||||
version = "0.1.278"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@@ -301,7 +301,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard-agent"
|
||||
version = "0.1.263"
|
||||
version = "0.1.278"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -325,7 +325,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cm-dashboard-shared"
|
||||
version = "0.1.263"
|
||||
version = "0.1.278"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"serde",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard-agent"
|
||||
version = "0.1.264"
|
||||
version = "0.1.278"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use gethostname::gethostname;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::interval;
|
||||
use tracing::{debug, error, info};
|
||||
@@ -28,7 +29,6 @@ struct TimedCollector {
|
||||
}
|
||||
|
||||
pub struct Agent {
|
||||
hostname: String,
|
||||
config: AgentConfig,
|
||||
zmq_handler: ZmqHandler,
|
||||
collectors: Vec<TimedCollector>,
|
||||
@@ -38,12 +38,40 @@ pub struct Agent {
|
||||
}
|
||||
|
||||
/// Track system component status for change detection
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct SystemStatus {
|
||||
// CPU
|
||||
cpu_load_status: cm_dashboard_shared::Status,
|
||||
cpu_temperature_status: cm_dashboard_shared::Status,
|
||||
// Memory
|
||||
memory_usage_status: cm_dashboard_shared::Status,
|
||||
// Add more as needed
|
||||
// Storage - keyed by drive name or pool name
|
||||
drive_statuses: HashMap<String, DriveStatus>,
|
||||
pool_statuses: HashMap<String, PoolStatus>,
|
||||
// Services - keyed by service name
|
||||
service_statuses: HashMap<String, cm_dashboard_shared::Status>,
|
||||
// Backup
|
||||
backup_status: cm_dashboard_shared::Status,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct DriveStatus {
|
||||
temperature_status: cm_dashboard_shared::Status,
|
||||
health_status: cm_dashboard_shared::Status,
|
||||
filesystem_statuses: HashMap<String, cm_dashboard_shared::Status>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct PoolStatus {
|
||||
health_status: cm_dashboard_shared::Status,
|
||||
usage_status: cm_dashboard_shared::Status,
|
||||
drive_statuses: HashMap<String, PoolDriveStatus>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct PoolDriveStatus {
|
||||
health_status: cm_dashboard_shared::Status,
|
||||
temperature_status: cm_dashboard_shared::Status,
|
||||
}
|
||||
|
||||
impl Agent {
|
||||
@@ -148,7 +176,6 @@ impl Agent {
|
||||
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
|
||||
|
||||
Ok(Self {
|
||||
hostname,
|
||||
config,
|
||||
zmq_handler,
|
||||
collectors,
|
||||
@@ -171,7 +198,9 @@ impl Agent {
|
||||
let mut transmission_interval = interval(Duration::from_secs(
|
||||
self.config.zmq.transmission_interval_seconds,
|
||||
));
|
||||
let mut notification_interval = interval(Duration::from_secs(30)); // Check notifications every 30s
|
||||
let mut notification_interval = interval(Duration::from_secs(
|
||||
self.config.notifications.check_interval_seconds,
|
||||
));
|
||||
|
||||
// Skip initial ticks to avoid immediate execution
|
||||
transmission_interval.tick().await;
|
||||
@@ -185,9 +214,21 @@ impl Agent {
|
||||
}
|
||||
}
|
||||
_ = notification_interval.tick() => {
|
||||
// Process any pending notifications
|
||||
// NOTE: With structured data, we might need to implement status tracking differently
|
||||
// For now, we skip this until status evaluation is migrated
|
||||
// Check for status changes and queue notifications
|
||||
let agent_data_snapshot = self.cached_agent_data.clone();
|
||||
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
|
||||
error!("Failed to check status changes: {}", e);
|
||||
}
|
||||
|
||||
// Check if all components recovered and flush pending recoveries
|
||||
self.notification_manager.flush_recoveries_if_all_ok();
|
||||
|
||||
// Flush any pending aggregated notifications
|
||||
if self.notification_manager.should_flush() {
|
||||
if let Err(e) = self.notification_manager.flush_notifications().await {
|
||||
error!("Failed to flush notifications: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = &mut shutdown_rx => {
|
||||
info!("Shutdown signal received, stopping agent loop");
|
||||
@@ -235,16 +276,8 @@ impl Agent {
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
// Clone for notification check (to avoid borrow issues)
|
||||
let agent_data_snapshot = self.cached_agent_data.clone();
|
||||
|
||||
// Check for status changes and send notifications
|
||||
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
|
||||
error!("Failed to check status changes: {}", e);
|
||||
}
|
||||
|
||||
// Broadcast the cached structured data via ZMQ
|
||||
if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data_snapshot).await {
|
||||
if let Err(e) = self.zmq_handler.publish_agent_data(&self.cached_agent_data).await {
|
||||
error!("Failed to broadcast agent data: {}", e);
|
||||
} else {
|
||||
debug!("Successfully broadcast structured agent data");
|
||||
@@ -253,38 +286,182 @@ impl Agent {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check for status changes and send notifications
|
||||
/// Check for status changes and queue notifications
|
||||
async fn check_status_changes_and_notify(&mut self, agent_data: &AgentData) -> Result<()> {
|
||||
// Extract current status
|
||||
let current_status = SystemStatus {
|
||||
cpu_load_status: agent_data.system.cpu.load_status.clone(),
|
||||
cpu_temperature_status: agent_data.system.cpu.temperature_status.clone(),
|
||||
memory_usage_status: agent_data.system.memory.usage_status.clone(),
|
||||
// Build current status from agent data
|
||||
let mut current_status = SystemStatus {
|
||||
cpu_load_status: agent_data.system.cpu.load_status,
|
||||
cpu_temperature_status: agent_data.system.cpu.temperature_status,
|
||||
memory_usage_status: agent_data.system.memory.usage_status,
|
||||
backup_status: agent_data.backup.backup_status,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Check for status changes
|
||||
if let Some(previous) = self.previous_status.clone() {
|
||||
self.check_and_notify_status_change(
|
||||
// Collect drive statuses
|
||||
for drive in &agent_data.system.storage.drives {
|
||||
let mut fs_statuses = HashMap::new();
|
||||
for fs in &drive.filesystems {
|
||||
fs_statuses.insert(fs.mount.clone(), fs.usage_status);
|
||||
}
|
||||
current_status.drive_statuses.insert(
|
||||
drive.name.clone(),
|
||||
DriveStatus {
|
||||
temperature_status: drive.temperature_status,
|
||||
health_status: drive.health_status,
|
||||
filesystem_statuses: fs_statuses,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Collect pool statuses
|
||||
for pool in &agent_data.system.storage.pools {
|
||||
let mut pool_drive_statuses = HashMap::new();
|
||||
for drive in pool.data_drives.iter().chain(pool.parity_drives.iter()) {
|
||||
pool_drive_statuses.insert(
|
||||
drive.name.clone(),
|
||||
PoolDriveStatus {
|
||||
health_status: drive.health_status,
|
||||
temperature_status: drive.temperature_status,
|
||||
},
|
||||
);
|
||||
}
|
||||
current_status.pool_statuses.insert(
|
||||
pool.name.clone(),
|
||||
PoolStatus {
|
||||
health_status: pool.health_status,
|
||||
usage_status: pool.usage_status,
|
||||
drive_statuses: pool_drive_statuses,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Collect service statuses (only for non-user-stopped services)
|
||||
for service in &agent_data.services {
|
||||
if !service.user_stopped {
|
||||
current_status
|
||||
.service_statuses
|
||||
.insert(service.name.clone(), service.service_status);
|
||||
}
|
||||
}
|
||||
|
||||
// Clone previous status to avoid borrow issues
|
||||
let previous = self.previous_status.clone();
|
||||
|
||||
// Compare with previous status and queue notifications
|
||||
if let Some(previous) = previous {
|
||||
// CPU
|
||||
self.queue_status_notification(
|
||||
"CPU Load",
|
||||
&previous.cpu_load_status,
|
||||
¤t_status.cpu_load_status,
|
||||
format!("CPU load: {:.1}", agent_data.system.cpu.load_1min)
|
||||
).await?;
|
||||
|
||||
self.check_and_notify_status_change(
|
||||
&format!("Load: {:.2}", agent_data.system.cpu.load_1min),
|
||||
);
|
||||
self.queue_status_notification(
|
||||
"CPU Temperature",
|
||||
&previous.cpu_temperature_status,
|
||||
¤t_status.cpu_temperature_status,
|
||||
format!("CPU temperature: {}°C",
|
||||
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32)
|
||||
).await?;
|
||||
&format!(
|
||||
"Temperature: {}°C",
|
||||
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32
|
||||
),
|
||||
);
|
||||
|
||||
self.check_and_notify_status_change(
|
||||
"Memory Usage",
|
||||
// Memory
|
||||
self.queue_status_notification(
|
||||
"Memory",
|
||||
&previous.memory_usage_status,
|
||||
¤t_status.memory_usage_status,
|
||||
format!("Memory usage: {:.1}%", agent_data.system.memory.usage_percent)
|
||||
).await?;
|
||||
&format!("Usage: {:.1}%", agent_data.system.memory.usage_percent),
|
||||
);
|
||||
|
||||
// Backup
|
||||
self.queue_status_notification(
|
||||
"Backup",
|
||||
&previous.backup_status,
|
||||
¤t_status.backup_status,
|
||||
&format!(
|
||||
"Last backup: {}",
|
||||
agent_data.backup.last_backup_time.as_deref().unwrap_or("unknown")
|
||||
),
|
||||
);
|
||||
|
||||
// Drives
|
||||
for (name, current_drive) in ¤t_status.drive_statuses {
|
||||
if let Some(prev_drive) = previous.drive_statuses.get(name) {
|
||||
self.queue_status_notification(
|
||||
&format!("Drive {} Health", name),
|
||||
&prev_drive.health_status,
|
||||
¤t_drive.health_status,
|
||||
"Health check failed",
|
||||
);
|
||||
self.queue_status_notification(
|
||||
&format!("Drive {} Temperature", name),
|
||||
&prev_drive.temperature_status,
|
||||
¤t_drive.temperature_status,
|
||||
"Temperature threshold exceeded",
|
||||
);
|
||||
|
||||
// Filesystem usage
|
||||
for (mount, current_fs_status) in ¤t_drive.filesystem_statuses {
|
||||
if let Some(prev_fs_status) = prev_drive.filesystem_statuses.get(mount) {
|
||||
self.queue_status_notification(
|
||||
&format!("Filesystem {}", mount),
|
||||
prev_fs_status,
|
||||
current_fs_status,
|
||||
"Disk usage threshold exceeded",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pools
|
||||
for (name, current_pool) in ¤t_status.pool_statuses {
|
||||
if let Some(prev_pool) = previous.pool_statuses.get(name) {
|
||||
self.queue_status_notification(
|
||||
&format!("Pool {} Health", name),
|
||||
&prev_pool.health_status,
|
||||
¤t_pool.health_status,
|
||||
"Pool health degraded",
|
||||
);
|
||||
self.queue_status_notification(
|
||||
&format!("Pool {} Usage", name),
|
||||
&prev_pool.usage_status,
|
||||
¤t_pool.usage_status,
|
||||
"Pool usage threshold exceeded",
|
||||
);
|
||||
|
||||
// Pool drives
|
||||
for (drive_name, current_pd) in ¤t_pool.drive_statuses {
|
||||
if let Some(prev_pd) = prev_pool.drive_statuses.get(drive_name) {
|
||||
self.queue_status_notification(
|
||||
&format!("Pool {} Drive {} Health", name, drive_name),
|
||||
&prev_pd.health_status,
|
||||
¤t_pd.health_status,
|
||||
"Pool drive health degraded",
|
||||
);
|
||||
self.queue_status_notification(
|
||||
&format!("Pool {} Drive {} Temperature", name, drive_name),
|
||||
&prev_pd.temperature_status,
|
||||
¤t_pd.temperature_status,
|
||||
"Pool drive temperature exceeded",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Services
|
||||
for (name, current_svc_status) in ¤t_status.service_statuses {
|
||||
if let Some(prev_svc_status) = previous.service_statuses.get(name) {
|
||||
self.queue_status_notification(
|
||||
&format!("Service {}", name),
|
||||
prev_svc_status,
|
||||
current_svc_status,
|
||||
"Service status changed",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store current status for next comparison
|
||||
@@ -292,43 +469,44 @@ impl Agent {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check individual status change and send notification if degraded
|
||||
async fn check_and_notify_status_change(
|
||||
/// Queue a notification based on status change
|
||||
fn queue_status_notification(
|
||||
&mut self,
|
||||
component: &str,
|
||||
previous: &cm_dashboard_shared::Status,
|
||||
current: &cm_dashboard_shared::Status,
|
||||
details: String
|
||||
) -> Result<()> {
|
||||
details: &str,
|
||||
) {
|
||||
use cm_dashboard_shared::Status;
|
||||
|
||||
// Only notify on status degradation (OK → Warning/Critical, Warning → Critical)
|
||||
let should_notify = match (previous, current) {
|
||||
(Status::Ok, Status::Warning) => true,
|
||||
(Status::Ok, Status::Critical) => true,
|
||||
(Status::Warning, Status::Critical) => true,
|
||||
_ => false,
|
||||
};
|
||||
// Check for degradation (alert)
|
||||
let is_alert = matches!(
|
||||
(previous, current),
|
||||
(Status::Ok, Status::Warning)
|
||||
| (Status::Ok, Status::Critical)
|
||||
| (Status::Warning, Status::Critical)
|
||||
);
|
||||
|
||||
if should_notify {
|
||||
let subject = format!("{} {} Alert", self.hostname, component);
|
||||
let body = format!(
|
||||
"Alert: {} status changed from {:?} to {:?}\n\nDetails: {}\n\nTime: {}",
|
||||
component,
|
||||
previous,
|
||||
current,
|
||||
details,
|
||||
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")
|
||||
// Check for recovery
|
||||
let is_recovery = matches!(
|
||||
(previous, current),
|
||||
(Status::Warning, Status::Ok)
|
||||
| (Status::Critical, Status::Ok)
|
||||
| (Status::Critical, Status::Warning)
|
||||
);
|
||||
|
||||
if is_alert {
|
||||
info!(
|
||||
"Alert: {} - {:?} → {:?}",
|
||||
component, previous, current
|
||||
);
|
||||
|
||||
info!("Sending notification: {} - {:?} → {:?}", component, previous, current);
|
||||
|
||||
if let Err(e) = self.notification_manager.send_direct_email(&subject, &body).await {
|
||||
error!("Failed to send notification for {}: {}", component, e);
|
||||
}
|
||||
self.notification_manager.queue_alert(component, previous, current, details);
|
||||
} else if is_recovery {
|
||||
info!(
|
||||
"Recovery: {} - {:?} → {:?}",
|
||||
component, previous, current
|
||||
);
|
||||
self.notification_manager.queue_recovery(component, previous, current, details);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
use async_trait::async_trait;
|
||||
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
|
||||
use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::{debug, warn};
|
||||
@@ -21,7 +21,7 @@ impl BackupCollector {
|
||||
}
|
||||
}
|
||||
|
||||
/// Scan directory for all backup status files
|
||||
/// Scan directory for backup status file (nfs-backup.toml)
|
||||
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
|
||||
let status_path = Path::new(&self.status_dir);
|
||||
|
||||
@@ -30,30 +30,15 @@ impl BackupCollector {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut status_files = Vec::new();
|
||||
|
||||
match fs::read_dir(status_path) {
|
||||
Ok(entries) => {
|
||||
for entry in entries {
|
||||
if let Ok(entry) = entry {
|
||||
let path = entry.path();
|
||||
if path.is_file() {
|
||||
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
|
||||
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
|
||||
status_files.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to read backup status directory: {}", e);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
// Look for nfs-backup.toml (new NFS-based backup)
|
||||
let nfs_backup_file = status_path.join("nfs-backup.toml");
|
||||
if nfs_backup_file.exists() {
|
||||
return Ok(vec![nfs_backup_file]);
|
||||
}
|
||||
|
||||
Ok(status_files)
|
||||
// No backup status file found
|
||||
debug!("No nfs-backup.toml found in {}", self.status_dir);
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
/// Read a single backup status file
|
||||
@@ -76,24 +61,13 @@ impl BackupCollector {
|
||||
/// Calculate backup status from TOML status field
|
||||
fn calculate_backup_status(status_str: &str) -> Status {
|
||||
match status_str.to_lowercase().as_str() {
|
||||
"success" => Status::Ok,
|
||||
"success" | "completed" => Status::Ok,
|
||||
"warning" => Status::Warning,
|
||||
"failed" | "error" => Status::Critical,
|
||||
_ => Status::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate usage status from disk usage percentage
|
||||
fn calculate_usage_status(usage_percent: f32) -> Status {
|
||||
if usage_percent < 80.0 {
|
||||
Status::Ok
|
||||
} else if usage_percent < 90.0 {
|
||||
Status::Warning
|
||||
} else {
|
||||
Status::Critical
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert BackupStatusToml to BackupData and populate AgentData
|
||||
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||
let status_files = self.scan_status_files().await?;
|
||||
@@ -101,76 +75,47 @@ impl BackupCollector {
|
||||
if status_files.is_empty() {
|
||||
debug!("No backup status files found");
|
||||
agent_data.backup = BackupData {
|
||||
last_backup_time: None,
|
||||
backup_status: Status::Unknown,
|
||||
repositories: Vec::new(),
|
||||
repository_status: Status::Unknown,
|
||||
disks: Vec::new(),
|
||||
};
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut all_repositories = HashSet::new();
|
||||
let mut disks = Vec::new();
|
||||
// Aggregate repository data across all backup status files
|
||||
let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new();
|
||||
let mut worst_status = Status::Ok;
|
||||
let mut latest_backup_time: Option<String> = None;
|
||||
|
||||
for status_file in status_files {
|
||||
match self.read_status_file(&status_file).await {
|
||||
Ok(backup_status) => {
|
||||
// Collect all service names
|
||||
for service_name in backup_status.services.keys() {
|
||||
all_repositories.insert(service_name.clone());
|
||||
}
|
||||
|
||||
// Calculate backup status
|
||||
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
|
||||
worst_status = worst_status.max(backup_status_enum);
|
||||
|
||||
// Calculate usage status from disk space
|
||||
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
|
||||
let usage_pct = disk_space.usage_percent as f32;
|
||||
(
|
||||
usage_pct,
|
||||
disk_space.used_gb as f32,
|
||||
disk_space.total_gb as f32,
|
||||
Self::calculate_usage_status(usage_pct),
|
||||
)
|
||||
} else {
|
||||
(0.0, 0.0, 0.0, Status::Unknown)
|
||||
};
|
||||
// Track latest backup time
|
||||
if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() {
|
||||
latest_backup_time = Some(backup_status.start_time.clone());
|
||||
}
|
||||
|
||||
// Update worst status
|
||||
worst_status = worst_status.max(backup_status_enum).max(usage_status);
|
||||
// Process each service in this backup
|
||||
for (service_name, service_status) in backup_status.services {
|
||||
// Convert bytes to GB
|
||||
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
|
||||
|
||||
// Build service list for this disk
|
||||
let services: Vec<String> = backup_status.services.keys().cloned().collect();
|
||||
// Calculate service status
|
||||
let service_status_enum = Self::calculate_backup_status(&service_status.status);
|
||||
worst_status = worst_status.max(service_status_enum);
|
||||
|
||||
// Get min and max archive counts to detect inconsistencies
|
||||
let archives_min: i64 = backup_status.services.values()
|
||||
.map(|service| service.archive_count)
|
||||
.min()
|
||||
.unwrap_or(0);
|
||||
|
||||
let archives_max: i64 = backup_status.services.values()
|
||||
.map(|service| service.archive_count)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Create disk data
|
||||
let disk_data = BackupDiskData {
|
||||
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
|
||||
product_name: backup_status.disk_product_name,
|
||||
wear_percent: backup_status.disk_wear_percent,
|
||||
temperature_celsius: None, // Not available in current TOML
|
||||
last_backup_time: Some(backup_status.start_time),
|
||||
backup_status: backup_status_enum,
|
||||
disk_usage_percent: usage_percent,
|
||||
disk_used_gb: used_gb,
|
||||
disk_total_gb: total_gb,
|
||||
usage_status,
|
||||
services,
|
||||
archives_min,
|
||||
archives_max,
|
||||
};
|
||||
|
||||
disks.push(disk_data);
|
||||
// Update or insert repository data
|
||||
repo_map.insert(service_name.clone(), BackupRepositoryData {
|
||||
name: service_name,
|
||||
archive_count: service_status.archive_count,
|
||||
repo_size_gb,
|
||||
status: service_status_enum,
|
||||
});
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to read backup status file {:?}: {}", status_file, e);
|
||||
@@ -178,12 +123,14 @@ impl BackupCollector {
|
||||
}
|
||||
}
|
||||
|
||||
let repositories: Vec<String> = all_repositories.into_iter().collect();
|
||||
// Convert HashMap to sorted Vec
|
||||
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
|
||||
repositories.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
agent_data.backup = BackupData {
|
||||
last_backup_time: latest_backup_time,
|
||||
backup_status: worst_status,
|
||||
repositories,
|
||||
repository_status: worst_status,
|
||||
disks,
|
||||
};
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -114,7 +114,7 @@ impl DiskCollector {
|
||||
let mut cmd = TokioCommand::new("lsblk");
|
||||
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
|
||||
|
||||
let output = run_command_with_timeout(cmd, 2).await
|
||||
let output = run_command_with_timeout(cmd, 10).await
|
||||
.map_err(|e| CollectorError::SystemRead {
|
||||
path: "block devices".to_string(),
|
||||
error: e.to_string(),
|
||||
@@ -184,7 +184,7 @@ impl DiskCollector {
|
||||
/// Get filesystem info for a single mount point
|
||||
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
|
||||
let output = StdCommand::new("timeout")
|
||||
.args(&["2", "df", "--block-size=1", mount_point])
|
||||
.args(&["10", "df", "--block-size=1", mount_point])
|
||||
.output()
|
||||
.map_err(|e| CollectorError::SystemRead {
|
||||
path: format!("df {}", mount_point),
|
||||
@@ -433,7 +433,7 @@ impl DiskCollector {
|
||||
cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
|
||||
}
|
||||
|
||||
let output = run_command_with_timeout(cmd, 3).await
|
||||
let output = run_command_with_timeout(cmd, 15).await
|
||||
.map_err(|e| CollectorError::SystemRead {
|
||||
path: format!("SMART data for {}", drive_name),
|
||||
error: e.to_string(),
|
||||
@@ -772,7 +772,7 @@ impl DiskCollector {
|
||||
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
|
||||
// Use lsblk to find the backing device with timeout
|
||||
let output = StdCommand::new("timeout")
|
||||
.args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
|
||||
.args(&["10", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
|
||||
.output()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;
|
||||
|
||||
|
||||
@@ -32,6 +32,9 @@ impl NixOSCollector {
|
||||
// Set NixOS build/generation information
|
||||
agent_data.build_version = self.get_nixos_generation().await;
|
||||
|
||||
// Set kernel version
|
||||
agent_data.kernel_version = self.get_kernel_version().await;
|
||||
|
||||
// Set current timestamp
|
||||
agent_data.timestamp = chrono::Utc::now().timestamp() as u64;
|
||||
|
||||
@@ -80,6 +83,14 @@ impl NixOSCollector {
|
||||
std::env::var("CM_DASHBOARD_VERSION").unwrap_or_else(|_| "unknown".to_string())
|
||||
}
|
||||
|
||||
/// Get kernel version from /proc/sys/kernel/osrelease
|
||||
async fn get_kernel_version(&self) -> Option<String> {
|
||||
match fs::read_to_string("/proc/sys/kernel/osrelease") {
|
||||
Ok(version) => Some(version.trim().to_string()),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get NixOS system generation (build) information from git commit
|
||||
async fn get_nixos_generation(&self) -> Option<String> {
|
||||
// Try to read git commit hash from file written during rebuild
|
||||
|
||||
@@ -230,6 +230,37 @@ impl SystemdCollector {
|
||||
}
|
||||
}
|
||||
|
||||
if service_name == "nfs-server" && status_info.active_state == "active" {
|
||||
// Add NFS exports as sub-services
|
||||
let exports = self.get_nfs_exports();
|
||||
for (export_path, info) in exports {
|
||||
let display = if !info.is_empty() {
|
||||
format!("{} {}", export_path, info)
|
||||
} else {
|
||||
export_path
|
||||
};
|
||||
sub_services.push(SubServiceData {
|
||||
name: display,
|
||||
service_status: Status::Info,
|
||||
metrics: Vec::new(),
|
||||
service_type: "nfs_export".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (service_name == "smbd" || service_name == "samba-smbd") && status_info.active_state == "active" {
|
||||
// Add SMB shares as sub-services
|
||||
let shares = self.get_smb_shares();
|
||||
for (share_name, share_path, mode) in shares {
|
||||
sub_services.push(SubServiceData {
|
||||
name: format!("{}: {} {}", share_name, share_path, mode),
|
||||
service_status: Status::Info,
|
||||
metrics: Vec::new(),
|
||||
service_type: "smb_share".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Create complete service data
|
||||
let service_data = ServiceData {
|
||||
name: service_name.clone(),
|
||||
@@ -922,15 +953,21 @@ impl SystemdCollector {
|
||||
"-s",
|
||||
"--max-time",
|
||||
"4",
|
||||
"https://ifconfig.me"
|
||||
"https://1.1.1.1/cdn-cgi/trace"
|
||||
])
|
||||
.output()
|
||||
.ok()?;
|
||||
|
||||
if output.status.success() {
|
||||
let ip = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if !ip.is_empty() && ip.contains('.') {
|
||||
return Some(ip);
|
||||
let response = String::from_utf8_lossy(&output.stdout);
|
||||
// Parse "ip=x.x.x.x" from the response
|
||||
for line in response.lines() {
|
||||
if let Some(ip) = line.strip_prefix("ip=") {
|
||||
let ip = ip.trim().to_string();
|
||||
if !ip.is_empty() {
|
||||
return Some(ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -948,10 +985,20 @@ impl SystemdCollector {
|
||||
let status_output = String::from_utf8_lossy(&output.stdout);
|
||||
let mut peers = Vec::new();
|
||||
|
||||
// Get current hostname to filter it out
|
||||
let current_hostname = gethostname::gethostname()
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
// Parse tailscale status output
|
||||
// Format: IP hostname user os status
|
||||
// Example: 100.110.98.3 wslbox cm@ linux active; direct 192.168.30.227:53757
|
||||
for line in status_output.lines() {
|
||||
// Note: First line is always the current host, skip it
|
||||
for (idx, line) in status_output.lines().enumerate() {
|
||||
if idx == 0 {
|
||||
continue; // Skip first line (current host)
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 5 {
|
||||
continue; // Skip invalid lines
|
||||
@@ -964,6 +1011,12 @@ impl SystemdCollector {
|
||||
// parts[4+] = status (e.g., "active;", "direct", "192.168.30.227:53757" or "idle;" or "offline")
|
||||
|
||||
let hostname = parts[1];
|
||||
|
||||
// Skip if this is the current host (double-check in case format changes)
|
||||
if hostname == current_hostname {
|
||||
continue;
|
||||
}
|
||||
|
||||
let status_parts = &parts[4..];
|
||||
|
||||
// Determine connection method from status
|
||||
@@ -995,6 +1048,164 @@ impl SystemdCollector {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get NFS exports from exportfs
|
||||
/// Returns a list of (export_path, info_string) tuples
|
||||
fn get_nfs_exports(&self) -> Vec<(String, String)> {
|
||||
let output = match Command::new("timeout")
|
||||
.args(["2", "exportfs", "-v"])
|
||||
.output()
|
||||
{
|
||||
Ok(output) if output.status.success() => output,
|
||||
_ => return Vec::new(),
|
||||
};
|
||||
|
||||
let exports_output = String::from_utf8_lossy(&output.stdout);
|
||||
let mut exports_map: std::collections::HashMap<String, Vec<(String, String)>> =
|
||||
std::collections::HashMap::new();
|
||||
let mut current_path: Option<String> = None;
|
||||
|
||||
for line in exports_output.lines() {
|
||||
let trimmed = line.trim();
|
||||
|
||||
if trimmed.is_empty() || trimmed.starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if trimmed.starts_with('/') {
|
||||
// Export path line - may have network on same line or continuation
|
||||
let parts: Vec<&str> = trimmed.splitn(2, char::is_whitespace).collect();
|
||||
let path = parts[0].to_string();
|
||||
current_path = Some(path.clone());
|
||||
|
||||
// Check if network info is on the same line
|
||||
if parts.len() > 1 {
|
||||
let rest = parts[1].trim();
|
||||
if let Some(paren_pos) = rest.find('(') {
|
||||
let network = rest[..paren_pos].trim();
|
||||
|
||||
if let Some(end_paren) = rest.find(')') {
|
||||
let options = &rest[paren_pos+1..end_paren];
|
||||
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
|
||||
"rw"
|
||||
} else {
|
||||
"ro"
|
||||
};
|
||||
|
||||
exports_map.entry(path)
|
||||
.or_insert_with(Vec::new)
|
||||
.push((network.to_string(), mode.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if let Some(ref path) = current_path {
|
||||
// Continuation line with network and options
|
||||
if let Some(paren_pos) = trimmed.find('(') {
|
||||
let network = trimmed[..paren_pos].trim();
|
||||
|
||||
if let Some(end_paren) = trimmed.find(')') {
|
||||
let options = &trimmed[paren_pos+1..end_paren];
|
||||
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
|
||||
"rw"
|
||||
} else {
|
||||
"ro"
|
||||
};
|
||||
|
||||
exports_map.entry(path.clone())
|
||||
.or_insert_with(Vec::new)
|
||||
.push((network.to_string(), mode.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build display strings: "path: mode [networks]"
|
||||
let mut exports: Vec<(String, String)> = exports_map
|
||||
.into_iter()
|
||||
.map(|(path, mut entries)| {
|
||||
if entries.is_empty() {
|
||||
return (path, String::new());
|
||||
}
|
||||
|
||||
let mode = entries[0].1.clone();
|
||||
let networks: Vec<String> = entries.drain(..).map(|(n, _)| n).collect();
|
||||
let info = format!("{} [{}]", mode, networks.join(", "));
|
||||
(path, info)
|
||||
})
|
||||
.collect();
|
||||
|
||||
exports.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
exports
|
||||
}
|
||||
|
||||
/// Get SMB shares from smb.conf
|
||||
/// Returns a list of (share_name, share_path, mode) tuples
|
||||
fn get_smb_shares(&self) -> Vec<(String, String, String)> {
|
||||
match std::fs::read_to_string("/etc/samba/smb.conf") {
|
||||
Ok(config) => {
|
||||
let mut shares = Vec::new();
|
||||
let mut current_share: Option<String> = None;
|
||||
let mut current_path: Option<String> = None;
|
||||
let mut current_mode: String = "ro".to_string(); // Default to read-only
|
||||
|
||||
for line in config.lines() {
|
||||
let line = line.trim();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if line.is_empty() || line.starts_with('#') || line.starts_with(';') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Detect share section [sharename]
|
||||
if line.starts_with('[') && line.ends_with(']') {
|
||||
// Save previous share if we have both name and path
|
||||
if let (Some(name), Some(path)) = (current_share.take(), current_path.take()) {
|
||||
// Skip special sections
|
||||
if name != "global" && name != "homes" && name != "printers" {
|
||||
shares.push((name, path, current_mode.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
// Start new share
|
||||
let share_name = line[1..line.len()-1].trim().to_string();
|
||||
current_share = Some(share_name);
|
||||
current_path = None;
|
||||
current_mode = "ro".to_string(); // Reset to default
|
||||
}
|
||||
// Look for path = /some/path
|
||||
else if line.starts_with("path") && line.contains('=') {
|
||||
if let Some(path_value) = line.split('=').nth(1) {
|
||||
current_path = Some(path_value.trim().to_string());
|
||||
}
|
||||
}
|
||||
// Look for read only = yes/no
|
||||
else if line.to_lowercase().starts_with("read only") && line.contains('=') {
|
||||
if let Some(value) = line.split('=').nth(1) {
|
||||
let val = value.trim().to_lowercase();
|
||||
current_mode = if val == "no" || val == "false" { "rw" } else { "ro" }.to_string();
|
||||
}
|
||||
}
|
||||
// Look for writable = yes/no (opposite of read only)
|
||||
else if line.to_lowercase().starts_with("writable") && line.contains('=') {
|
||||
if let Some(value) = line.split('=').nth(1) {
|
||||
let val = value.trim().to_lowercase();
|
||||
current_mode = if val == "yes" || val == "true" { "rw" } else { "ro" }.to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Don't forget the last share
|
||||
if let (Some(name), Some(path)) = (current_share, current_path) {
|
||||
if name != "global" && name != "homes" && name != "printers" {
|
||||
shares.push((name, path, current_mode));
|
||||
}
|
||||
}
|
||||
|
||||
shares
|
||||
}
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get nftables open ports grouped by protocol
|
||||
/// Returns: (tcp_ports_string, udp_ports_string)
|
||||
fn get_nftables_open_ports(&self) -> (String, String) {
|
||||
|
||||
@@ -141,8 +141,23 @@ pub struct NotificationConfig {
|
||||
pub from_email: String,
|
||||
pub to_email: String,
|
||||
pub rate_limit_minutes: u64,
|
||||
/// Whether to send notifications on warning status
|
||||
#[serde(default = "default_true")]
|
||||
pub trigger_on_warnings: bool,
|
||||
/// Whether to send notifications on failure/critical status
|
||||
#[serde(default = "default_true")]
|
||||
pub trigger_on_failures: bool,
|
||||
/// Only send recovery notification when all components are OK
|
||||
#[serde(default)]
|
||||
pub recovery_requires_all_ok: bool,
|
||||
/// Suppress individual recovery notifications (only notify on full recovery)
|
||||
#[serde(default)]
|
||||
pub suppress_individual_recoveries: bool,
|
||||
/// Email notification batching interval in seconds (default: 60)
|
||||
pub aggregation_interval_seconds: u64,
|
||||
/// How often to check for status changes in seconds (default: 30)
|
||||
#[serde(default = "default_check_interval_seconds")]
|
||||
pub check_interval_seconds: u64,
|
||||
/// List of metric names to exclude from email notifications
|
||||
#[serde(default)]
|
||||
pub exclude_email_metrics: Vec<String>,
|
||||
@@ -151,6 +166,14 @@ pub struct NotificationConfig {
|
||||
pub maintenance_mode_file: String,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_check_interval_seconds() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
|
||||
fn default_heartbeat_interval_seconds() -> u64 {
|
||||
5
|
||||
|
||||
@@ -1,60 +1,314 @@
|
||||
use crate::config::NotificationConfig;
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use cm_dashboard_shared::Status;
|
||||
use lettre::transport::smtp::SmtpTransport;
|
||||
use lettre::{Message, Transport};
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Manages notifications
|
||||
/// Manages notifications with rate limiting and aggregation
|
||||
pub struct NotificationManager {
|
||||
config: NotificationConfig,
|
||||
/// Last notification time per component for rate limiting
|
||||
last_notification: HashMap<String, Instant>,
|
||||
/// Pending notifications for aggregation
|
||||
pending_notifications: Vec<PendingNotification>,
|
||||
/// Pending recovery notifications (held until all OK if configured)
|
||||
pending_recoveries: Vec<PendingNotification>,
|
||||
/// Last aggregation flush time
|
||||
last_aggregation_flush: Option<Instant>,
|
||||
/// Track components currently in alert state
|
||||
components_in_alert: HashMap<String, Status>,
|
||||
}
|
||||
|
||||
/// A pending notification waiting to be aggregated
|
||||
#[derive(Debug, Clone)]
|
||||
struct PendingNotification {
|
||||
component: String,
|
||||
previous_status: String,
|
||||
current_status: String,
|
||||
details: String,
|
||||
timestamp: chrono::DateTime<Utc>,
|
||||
is_recovery: bool,
|
||||
}
|
||||
|
||||
impl NotificationManager {
|
||||
pub fn new(config: &NotificationConfig, _hostname: &str) -> Result<Self> {
|
||||
Ok(Self {
|
||||
config: config.clone(),
|
||||
last_notification: HashMap::new(),
|
||||
pending_notifications: Vec::new(),
|
||||
pending_recoveries: Vec::new(),
|
||||
last_aggregation_flush: None,
|
||||
components_in_alert: HashMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn send_direct_email(&mut self, subject: &str, body: &str) -> Result<()> {
|
||||
/// Check if a component is rate limited
|
||||
fn is_rate_limited(&self, component: &str) -> bool {
|
||||
if self.config.rate_limit_minutes == 0 {
|
||||
return false;
|
||||
}
|
||||
if let Some(last_time) = self.last_notification.get(component) {
|
||||
let rate_limit = Duration::from_secs(self.config.rate_limit_minutes * 60);
|
||||
last_time.elapsed() < rate_limit
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Queue a degradation notification (Ok→Warning, Ok→Critical, Warning→Critical)
|
||||
pub fn queue_alert(
|
||||
&mut self,
|
||||
component: &str,
|
||||
previous: &Status,
|
||||
current: &Status,
|
||||
details: &str,
|
||||
) {
|
||||
// Check if this status type should trigger notifications
|
||||
// Only Warning and Critical trigger notifications (not Inactive)
|
||||
let should_notify = match current {
|
||||
Status::Warning => self.config.trigger_on_warnings,
|
||||
Status::Critical => self.config.trigger_on_failures,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
if !should_notify {
|
||||
debug!(
|
||||
"Notification for {} suppressed (trigger_on_warnings={}, trigger_on_failures={})",
|
||||
component, self.config.trigger_on_warnings, self.config.trigger_on_failures
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check rate limit
|
||||
if self.is_rate_limited(component) {
|
||||
debug!(
|
||||
"Notification for {} rate limited (limit: {} min)",
|
||||
component, self.config.rate_limit_minutes
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check exclusions
|
||||
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
|
||||
debug!("Notification for {} excluded by config", component);
|
||||
return;
|
||||
}
|
||||
|
||||
// Track this component as in alert state
|
||||
self.components_in_alert.insert(component.to_string(), *current);
|
||||
|
||||
self.pending_notifications.push(PendingNotification {
|
||||
component: component.to_string(),
|
||||
previous_status: format!("{:?}", previous),
|
||||
current_status: format!("{:?}", current),
|
||||
details: details.to_string(),
|
||||
timestamp: Utc::now(),
|
||||
is_recovery: false,
|
||||
});
|
||||
|
||||
// Update rate limit tracker
|
||||
self.last_notification.insert(component.to_string(), Instant::now());
|
||||
|
||||
debug!(
|
||||
"Queued alert for {}: {:?} -> {:?}",
|
||||
component, previous, current
|
||||
);
|
||||
}
|
||||
|
||||
/// Queue a recovery notification (Warning→Ok, Critical→Ok, Critical→Warning)
|
||||
pub fn queue_recovery(
|
||||
&mut self,
|
||||
component: &str,
|
||||
previous: &Status,
|
||||
current: &Status,
|
||||
details: &str,
|
||||
) {
|
||||
// Remove from alert tracking
|
||||
self.components_in_alert.remove(component);
|
||||
|
||||
// Check if individual recoveries are suppressed
|
||||
if self.config.suppress_individual_recoveries {
|
||||
debug!(
|
||||
"Individual recovery for {} suppressed by config",
|
||||
component
|
||||
);
|
||||
|
||||
// Store recovery for potential batch notification
|
||||
self.pending_recoveries.push(PendingNotification {
|
||||
component: component.to_string(),
|
||||
previous_status: format!("{:?}", previous),
|
||||
current_status: format!("{:?}", current),
|
||||
details: details.to_string(),
|
||||
timestamp: Utc::now(),
|
||||
is_recovery: true,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Check exclusions
|
||||
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
|
||||
debug!("Recovery notification for {} excluded by config", component);
|
||||
return;
|
||||
}
|
||||
|
||||
self.pending_notifications.push(PendingNotification {
|
||||
component: component.to_string(),
|
||||
previous_status: format!("{:?}", previous),
|
||||
current_status: format!("{:?}", current),
|
||||
details: details.to_string(),
|
||||
timestamp: Utc::now(),
|
||||
is_recovery: true,
|
||||
});
|
||||
|
||||
debug!(
|
||||
"Queued recovery for {}: {:?} -> {:?}",
|
||||
component, previous, current
|
||||
);
|
||||
}
|
||||
|
||||
/// Check if all components have recovered (no components in alert state)
|
||||
pub fn all_components_ok(&self) -> bool {
|
||||
self.components_in_alert.is_empty()
|
||||
}
|
||||
|
||||
/// Flush suppressed recovery notifications when all components are OK
|
||||
pub fn flush_recoveries_if_all_ok(&mut self) {
|
||||
if !self.config.recovery_requires_all_ok || self.all_components_ok() {
|
||||
if !self.pending_recoveries.is_empty() {
|
||||
info!("All components recovered, sending batch recovery notification");
|
||||
self.pending_notifications.append(&mut self.pending_recoveries);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if it's time to flush aggregated notifications
|
||||
pub fn should_flush(&self) -> bool {
|
||||
if self.pending_notifications.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
match self.last_aggregation_flush {
|
||||
None => true, // First flush
|
||||
Some(last_flush) => {
|
||||
let aggregation_interval =
|
||||
Duration::from_secs(self.config.aggregation_interval_seconds);
|
||||
last_flush.elapsed() >= aggregation_interval
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush pending notifications as a single aggregated email
|
||||
pub async fn flush_notifications(&mut self) -> Result<()> {
|
||||
if self.pending_notifications.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !self.config.enabled {
|
||||
self.pending_notifications.clear();
|
||||
self.last_aggregation_flush = Some(Instant::now());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.is_maintenance_mode() {
|
||||
debug!("Maintenance mode active, suppressing email notification");
|
||||
debug!("Maintenance mode active, suppressing aggregated notifications");
|
||||
self.pending_notifications.clear();
|
||||
self.last_aggregation_flush = Some(Instant::now());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let hostname = gethostname::gethostname()
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
let hostname = gethostname::gethostname().to_string_lossy().to_string();
|
||||
|
||||
// Build aggregated email
|
||||
let notification_count = self.pending_notifications.len();
|
||||
let alert_count = self.pending_notifications.iter().filter(|n| !n.is_recovery).count();
|
||||
let recovery_count = self.pending_notifications.iter().filter(|n| n.is_recovery).count();
|
||||
|
||||
let subject = if notification_count == 1 {
|
||||
let n = &self.pending_notifications[0];
|
||||
if n.is_recovery {
|
||||
format!("[{}] {} Recovered: {}", hostname, n.component, n.current_status)
|
||||
} else {
|
||||
format!("[{}] {} Alert: {}", hostname, n.component, n.current_status)
|
||||
}
|
||||
} else if recovery_count > 0 && alert_count == 0 {
|
||||
format!("[{}] {} Components Recovered", hostname, recovery_count)
|
||||
} else if alert_count > 0 && recovery_count == 0 {
|
||||
format!("[{}] {} Status Alerts", hostname, alert_count)
|
||||
} else {
|
||||
format!("[{}] {} Alerts, {} Recoveries", hostname, alert_count, recovery_count)
|
||||
};
|
||||
|
||||
let mut body = String::new();
|
||||
body.push_str(&format!("Status notifications for host: {}\n", hostname));
|
||||
body.push_str(&format!("Time: {}\n\n", Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
|
||||
|
||||
// Group alerts and recoveries
|
||||
let alerts: Vec<_> = self.pending_notifications.iter().filter(|n| !n.is_recovery).collect();
|
||||
let recoveries: Vec<_> = self.pending_notifications.iter().filter(|n| n.is_recovery).collect();
|
||||
|
||||
if !alerts.is_empty() {
|
||||
body.push_str("=== ALERTS ===\n\n");
|
||||
for notification in &alerts {
|
||||
body.push_str(&format!(
|
||||
"• {} : {} → {}\n {}\n ({})\n\n",
|
||||
notification.component,
|
||||
notification.previous_status,
|
||||
notification.current_status,
|
||||
notification.details,
|
||||
notification.timestamp.format("%H:%M:%S UTC")
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if !recoveries.is_empty() {
|
||||
body.push_str("=== RECOVERIES ===\n\n");
|
||||
for notification in &recoveries {
|
||||
body.push_str(&format!(
|
||||
"• {} : {} → {}\n {}\n ({})\n\n",
|
||||
notification.component,
|
||||
notification.previous_status,
|
||||
notification.current_status,
|
||||
notification.details,
|
||||
notification.timestamp.format("%H:%M:%S UTC")
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
body.push_str("--\nCM Dashboard Agent");
|
||||
|
||||
// Send the aggregated email
|
||||
let from_email = self.config.from_email.replace("{hostname}", &hostname);
|
||||
|
||||
let email_body = format!(
|
||||
"{}\n\n--\nCM Dashboard Agent\nGenerated at {}",
|
||||
body,
|
||||
Utc::now().format("%Y-%m-%d %H:%M:%S %Z")
|
||||
);
|
||||
|
||||
let email = Message::builder()
|
||||
.from(from_email.parse()?)
|
||||
.to(self.config.to_email.parse()?)
|
||||
.subject(subject)
|
||||
.body(email_body)?;
|
||||
.subject(&subject)
|
||||
.body(body)?;
|
||||
|
||||
let mailer = SmtpTransport::unencrypted_localhost();
|
||||
let mailer = SmtpTransport::builder_dangerous(&self.config.smtp_host)
|
||||
.port(self.config.smtp_port)
|
||||
.build();
|
||||
|
||||
match mailer.send(&email) {
|
||||
Ok(_) => info!("Direct email sent successfully: {}", subject),
|
||||
Ok(_) => {
|
||||
info!(
|
||||
"Sent aggregated notification email with {} alerts",
|
||||
notification_count
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to send email: {}", e);
|
||||
error!("Failed to send aggregated email: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
self.pending_notifications.clear();
|
||||
self.last_aggregation_flush = Some(Instant::now());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard"
|
||||
version = "0.1.264"
|
||||
version = "0.1.278"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -22,7 +22,6 @@ pub struct Dashboard {
|
||||
headless: bool,
|
||||
initial_commands_sent: std::collections::HashSet<String>,
|
||||
config: DashboardConfig,
|
||||
title_area: Rect, // Store title area for mouse event handling
|
||||
system_area: Rect, // Store system area for mouse event handling
|
||||
services_area: Rect, // Store services area for mouse event handling
|
||||
}
|
||||
@@ -124,7 +123,6 @@ impl Dashboard {
|
||||
headless,
|
||||
initial_commands_sent: std::collections::HashSet::new(),
|
||||
config,
|
||||
title_area: Rect::default(),
|
||||
system_area: Rect::default(),
|
||||
services_area: Rect::default(),
|
||||
})
|
||||
@@ -138,11 +136,12 @@ impl Dashboard {
|
||||
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
|
||||
let mut last_heartbeat_check = Instant::now();
|
||||
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
|
||||
let mut needs_render = true; // Track if we need to render
|
||||
|
||||
loop {
|
||||
// Handle terminal events (keyboard and mouse input) only if not headless
|
||||
if !self.headless {
|
||||
match event::poll(Duration::from_millis(50)) {
|
||||
match event::poll(Duration::from_millis(200)) {
|
||||
Ok(true) => {
|
||||
match event::read() {
|
||||
Ok(event) => {
|
||||
@@ -152,6 +151,7 @@ impl Dashboard {
|
||||
// Handle keyboard input
|
||||
match tui_app.handle_input(event) {
|
||||
Ok(_) => {
|
||||
needs_render = true;
|
||||
// Check if we should quit
|
||||
if tui_app.should_quit() {
|
||||
info!("Quit requested, exiting dashboard");
|
||||
@@ -168,10 +168,11 @@ impl Dashboard {
|
||||
if let Err(e) = self.handle_mouse_event(mouse_event) {
|
||||
error!("Error handling mouse event: {}", e);
|
||||
}
|
||||
needs_render = true;
|
||||
}
|
||||
Event::Resize(_width, _height) => {
|
||||
// Terminal was resized - just continue and re-render
|
||||
// The next render will automatically use the new size
|
||||
// Terminal was resized - mark for re-render
|
||||
needs_render = true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -189,38 +190,6 @@ impl Dashboard {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Render UI immediately after handling input for responsive feedback
|
||||
if let Some(ref mut terminal) = self.terminal {
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
// Clear and autoresize terminal to handle any resize events
|
||||
if let Err(e) = terminal.autoresize() {
|
||||
warn!("Error autoresizing terminal: {}", e);
|
||||
}
|
||||
|
||||
// Check minimum terminal size to prevent panics
|
||||
let size = terminal.size().unwrap_or_default();
|
||||
if size.width < 90 || size.height < 15 {
|
||||
// Terminal too small, show error message
|
||||
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
|
||||
let _ = terminal.draw(|frame| {
|
||||
use ratatui::widgets::{Paragraph, Block, Borders};
|
||||
use ratatui::layout::Alignment;
|
||||
let msg = Paragraph::new(msg_text.clone())
|
||||
.alignment(Alignment::Center)
|
||||
.block(Block::default().borders(Borders::ALL));
|
||||
frame.render_widget(msg, frame.size());
|
||||
});
|
||||
} else if let Err(e) = terminal.draw(|frame| {
|
||||
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
|
||||
self.title_area = title_area;
|
||||
self.system_area = system_area;
|
||||
self.services_area = services_area;
|
||||
}) {
|
||||
error!("Error rendering TUI after input: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for new metrics
|
||||
@@ -259,6 +228,8 @@ impl Dashboard {
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
tui_app.update_metrics(&mut self.metric_store);
|
||||
}
|
||||
|
||||
needs_render = true; // New metrics received, need to render
|
||||
}
|
||||
|
||||
// Also check for command output messages
|
||||
@@ -287,10 +258,11 @@ impl Dashboard {
|
||||
tui_app.update_hosts(connected_hosts);
|
||||
}
|
||||
last_heartbeat_check = Instant::now();
|
||||
needs_render = true; // Heartbeat check happened, may have changed hosts
|
||||
}
|
||||
|
||||
// Render TUI (only if not headless)
|
||||
if !self.headless {
|
||||
// Render TUI only when needed (not headless and something changed)
|
||||
if !self.headless && needs_render {
|
||||
if let Some(ref mut terminal) = self.terminal {
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
// Clear and autoresize terminal to handle any resize events
|
||||
@@ -298,22 +270,9 @@ impl Dashboard {
|
||||
warn!("Error autoresizing terminal: {}", e);
|
||||
}
|
||||
|
||||
// Check minimum terminal size to prevent panics
|
||||
let size = terminal.size().unwrap_or_default();
|
||||
if size.width < 90 || size.height < 15 {
|
||||
// Terminal too small, show error message
|
||||
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
|
||||
let _ = terminal.draw(|frame| {
|
||||
use ratatui::widgets::{Paragraph, Block, Borders};
|
||||
use ratatui::layout::Alignment;
|
||||
let msg = Paragraph::new(msg_text.clone())
|
||||
.alignment(Alignment::Center)
|
||||
.block(Block::default().borders(Borders::ALL));
|
||||
frame.render_widget(msg, frame.size());
|
||||
});
|
||||
} else if let Err(e) = terminal.draw(|frame| {
|
||||
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
|
||||
self.title_area = title_area;
|
||||
// Render TUI regardless of terminal size
|
||||
if let Err(e) = terminal.draw(|frame| {
|
||||
let (_title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
|
||||
self.system_area = system_area;
|
||||
self.services_area = services_area;
|
||||
}) {
|
||||
@@ -322,10 +281,8 @@ impl Dashboard {
|
||||
}
|
||||
}
|
||||
}
|
||||
needs_render = false; // Reset flag after rendering
|
||||
}
|
||||
|
||||
// Small sleep to prevent excessive CPU usage
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
}
|
||||
|
||||
info!("Dashboard main loop ended");
|
||||
@@ -420,19 +377,13 @@ impl Dashboard {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check for title bar clicks (host selection)
|
||||
// Check for tab clicks in right panel (hosts | services)
|
||||
if matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
|
||||
if is_in_area(x, y, &self.title_area) {
|
||||
// Click in title bar - check if it's on a hostname
|
||||
// The title bar has "cm-dashboard vX.X.X" on the left (22 chars)
|
||||
// Then hostnames start at position 22
|
||||
if x >= 22 {
|
||||
let hostname = self.find_hostname_at_position(x);
|
||||
if let Some(host) = hostname {
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
tui_app.switch_to_host(&host);
|
||||
}
|
||||
}
|
||||
let services_end = self.services_area.x.saturating_add(self.services_area.width);
|
||||
if y == self.services_area.y && x >= self.services_area.x && x < services_end {
|
||||
// Click on top border of services area (where tabs are)
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
tui_app.handle_tab_click(x, &self.services_area);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
@@ -496,44 +447,66 @@ impl Dashboard {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Calculate which service was clicked
|
||||
// The services area includes a border, so we need to account for that
|
||||
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
|
||||
|
||||
if let Some(ref mut tui_app) = self.tui_app {
|
||||
if let Some(hostname) = tui_app.current_host.clone() {
|
||||
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||
if tui_app.focus_hosts {
|
||||
// Hosts tab is active - handle host click
|
||||
// The services area includes a border and header, so account for that
|
||||
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
|
||||
|
||||
// Account for scroll offset - the clicked line is relative to viewport
|
||||
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
|
||||
|
||||
// Map display line to parent service index
|
||||
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
|
||||
// Set the selected index to the clicked parent service
|
||||
host_widgets.services_widget.selected_index = parent_index;
|
||||
let total_hosts = tui_app.get_available_hosts().len();
|
||||
let clicked_index = tui_app.hosts_widget.y_to_host_index(relative_y);
|
||||
|
||||
if clicked_index < total_hosts {
|
||||
match button {
|
||||
MouseButton::Left => {
|
||||
// Left click just selects the service
|
||||
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||
}
|
||||
MouseButton::Right => {
|
||||
// Right click opens context menu
|
||||
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||
|
||||
// Get the service name for the popup
|
||||
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
|
||||
tui_app.popup_menu = Some(crate::ui::PopupMenu {
|
||||
service_name,
|
||||
x,
|
||||
y,
|
||||
selected_index: 0,
|
||||
});
|
||||
}
|
||||
// Left click: set selector and switch to host immediately
|
||||
tui_app.hosts_widget.set_selected_index(clicked_index, total_hosts);
|
||||
let selected_host = tui_app.get_available_hosts()[clicked_index].clone();
|
||||
tui_app.switch_to_host(&selected_host);
|
||||
debug!("Clicked host at index {}: {}", clicked_index, selected_host);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Services tab is active - handle service click
|
||||
// The services area includes a border, so we need to account for that
|
||||
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
|
||||
|
||||
if let Some(hostname) = tui_app.current_host.clone() {
|
||||
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||
|
||||
// Account for scroll offset - the clicked line is relative to viewport
|
||||
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
|
||||
|
||||
// Map display line to parent service index
|
||||
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
|
||||
// Set the selected index to the clicked parent service
|
||||
host_widgets.services_widget.selected_index = parent_index;
|
||||
|
||||
match button {
|
||||
MouseButton::Left => {
|
||||
// Left click just selects the service
|
||||
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||
}
|
||||
MouseButton::Right => {
|
||||
// Right click opens context menu
|
||||
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||
|
||||
// Get the service name for the popup
|
||||
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
|
||||
tui_app.popup_menu = Some(crate::ui::PopupMenu {
|
||||
service_name,
|
||||
x,
|
||||
y,
|
||||
selected_index: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -628,76 +601,12 @@ impl Dashboard {
|
||||
.unwrap_or_else(|| hostname.to_string())
|
||||
}
|
||||
|
||||
/// Find which hostname is at a given x position in the title bar
|
||||
fn find_hostname_at_position(&self, x: u16) -> Option<String> {
|
||||
if let Some(ref tui_app) = self.tui_app {
|
||||
// The hosts are RIGHT-ALIGNED in chunks[1]!
|
||||
// Need to calculate total width first, then right-align
|
||||
|
||||
// Get terminal width
|
||||
let terminal_width = if let Some(ref terminal) = self.terminal {
|
||||
terminal.size().unwrap_or_default().width
|
||||
} else {
|
||||
80
|
||||
};
|
||||
|
||||
// Calculate total width of all host text
|
||||
let mut total_width = 0_u16;
|
||||
for (i, host) in tui_app.get_available_hosts().iter().enumerate() {
|
||||
if i > 0 {
|
||||
total_width += 1; // space between hosts
|
||||
}
|
||||
total_width += 2; // icon + space
|
||||
let is_selected = Some(host) == tui_app.current_host.as_ref();
|
||||
if is_selected {
|
||||
total_width += 1 + host.len() as u16 + 1; // [hostname]
|
||||
} else {
|
||||
total_width += host.len() as u16;
|
||||
}
|
||||
}
|
||||
total_width += 1; // right padding
|
||||
|
||||
// chunks[1] starts at 22, has width of (terminal_width - 22)
|
||||
let chunk_width = terminal_width - 22;
|
||||
|
||||
// Right-aligned position
|
||||
let hosts_start_x = if total_width < chunk_width {
|
||||
22 + (chunk_width - total_width)
|
||||
} else {
|
||||
22
|
||||
};
|
||||
|
||||
// Now calculate positions starting from hosts_start_x
|
||||
let mut pos = hosts_start_x;
|
||||
|
||||
for (i, host) in tui_app.get_available_hosts().iter().enumerate() {
|
||||
if i > 0 {
|
||||
pos += 1; // " "
|
||||
}
|
||||
|
||||
let host_start = pos;
|
||||
pos += 2; // "● "
|
||||
|
||||
let is_selected = Some(host) == tui_app.current_host.as_ref();
|
||||
if is_selected {
|
||||
pos += 1 + host.len() as u16 + 1; // [hostname]
|
||||
} else {
|
||||
pos += host.len() as u16;
|
||||
}
|
||||
|
||||
if x >= host_start && x < pos {
|
||||
return Some(host.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a point is within a rectangular area
|
||||
fn is_in_area(x: u16, y: u16, area: &Rect) -> bool {
|
||||
x >= area.x && x < area.x + area.width
|
||||
&& y >= area.y && y < area.y + area.height
|
||||
x >= area.x && x < area.x.saturating_add(area.width)
|
||||
&& y >= area.y && y < area.y.saturating_add(area.height)
|
||||
}
|
||||
|
||||
impl Drop for Dashboard {
|
||||
|
||||
@@ -18,7 +18,7 @@ use crate::config::DashboardConfig;
|
||||
use crate::metrics::MetricStore;
|
||||
use cm_dashboard_shared::Status;
|
||||
use theme::{Components, Layout as ThemeLayout, Theme};
|
||||
use widgets::{ServicesWidget, SystemWidget, Widget};
|
||||
use widgets::{HostsWidget, ServicesWidget, SystemWidget, Widget};
|
||||
|
||||
|
||||
|
||||
@@ -64,8 +64,6 @@ pub struct TuiApp {
|
||||
pub current_host: Option<String>,
|
||||
/// Available hosts
|
||||
available_hosts: Vec<String>,
|
||||
/// Host index for navigation
|
||||
host_index: usize,
|
||||
/// Should quit application
|
||||
should_quit: bool,
|
||||
/// Track if user manually navigated away from localhost
|
||||
@@ -76,6 +74,10 @@ pub struct TuiApp {
|
||||
localhost: String,
|
||||
/// Active popup menu (if any)
|
||||
pub popup_menu: Option<PopupMenu>,
|
||||
/// Focus on hosts tab (false = Services, true = Hosts)
|
||||
pub focus_hosts: bool,
|
||||
/// Hosts widget for navigation and rendering
|
||||
pub hosts_widget: HostsWidget,
|
||||
}
|
||||
|
||||
impl TuiApp {
|
||||
@@ -85,12 +87,13 @@ impl TuiApp {
|
||||
host_widgets: HashMap::new(),
|
||||
current_host: None,
|
||||
available_hosts: config.hosts.keys().cloned().collect(),
|
||||
host_index: 0,
|
||||
should_quit: false,
|
||||
user_navigated_away: false,
|
||||
config,
|
||||
localhost,
|
||||
popup_menu: None,
|
||||
focus_hosts: true, // Start with Hosts tab focused by default
|
||||
hosts_widget: HostsWidget::new(),
|
||||
};
|
||||
|
||||
// Sort predefined hosts
|
||||
@@ -143,26 +146,31 @@ impl TuiApp {
|
||||
all_hosts.sort();
|
||||
self.available_hosts = all_hosts;
|
||||
|
||||
// Track if we had a host before this update
|
||||
let had_host = self.current_host.is_some();
|
||||
|
||||
// Get the current hostname (localhost) for auto-selection
|
||||
if !self.available_hosts.is_empty() {
|
||||
if self.available_hosts.contains(&self.localhost) && !self.user_navigated_away {
|
||||
// Localhost is available and user hasn't navigated away - switch to it
|
||||
self.current_host = Some(self.localhost.clone());
|
||||
// Find the actual index of localhost in the sorted list
|
||||
self.host_index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
|
||||
// Initialize selector bar on first host selection
|
||||
if !had_host {
|
||||
let index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
|
||||
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
|
||||
}
|
||||
} else if self.current_host.is_none() {
|
||||
// No current host - select first available (which is localhost if available)
|
||||
self.current_host = Some(self.available_hosts[0].clone());
|
||||
self.host_index = 0;
|
||||
// Initialize selector bar
|
||||
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
|
||||
} else if let Some(ref current) = self.current_host {
|
||||
if !self.available_hosts.contains(current) {
|
||||
// Current host disconnected - select first available and reset navigation flag
|
||||
// Current host disconnected - FORCE switch to first available
|
||||
self.current_host = Some(self.available_hosts[0].clone());
|
||||
self.host_index = 0;
|
||||
// Reset selector bar since we're forcing a host change
|
||||
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
|
||||
self.user_navigated_away = false; // Reset since we're forced to switch
|
||||
} else if let Some(index) = self.available_hosts.iter().position(|h| h == current) {
|
||||
// Update index for current host
|
||||
self.host_index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -183,12 +191,6 @@ impl TuiApp {
|
||||
KeyCode::Char('q') => {
|
||||
self.should_quit = true;
|
||||
}
|
||||
KeyCode::Left => {
|
||||
self.navigate_host(-1);
|
||||
}
|
||||
KeyCode::Right => {
|
||||
self.navigate_host(1);
|
||||
}
|
||||
KeyCode::Char('r') => {
|
||||
// System rebuild command - works on any panel for current host
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
@@ -356,25 +358,46 @@ impl TuiApp {
|
||||
}
|
||||
}
|
||||
KeyCode::Tab => {
|
||||
// Tab cycles to next host
|
||||
self.navigate_host(1);
|
||||
// Tab toggles between Services and Hosts tabs
|
||||
self.focus_hosts = !self.focus_hosts;
|
||||
}
|
||||
KeyCode::Up | KeyCode::Char('k') => {
|
||||
// Move service selection up
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.select_previous();
|
||||
if self.focus_hosts {
|
||||
// Move blue selector bar up when in Hosts tab
|
||||
self.hosts_widget.select_previous();
|
||||
} else {
|
||||
// Move service selection up when in Services tab
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.select_previous();
|
||||
}
|
||||
}
|
||||
}
|
||||
KeyCode::Down | KeyCode::Char('j') => {
|
||||
// Move service selection down
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let total_services = {
|
||||
if self.focus_hosts {
|
||||
// Move blue selector bar down when in Hosts tab
|
||||
let total_hosts = self.available_hosts.len();
|
||||
self.hosts_widget.select_next(total_hosts);
|
||||
} else {
|
||||
// Move service selection down when in Services tab
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let total_services = {
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.get_total_services_count()
|
||||
};
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.get_total_services_count()
|
||||
};
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.select_next(total_services);
|
||||
host_widgets.services_widget.select_next(total_services);
|
||||
}
|
||||
}
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
if self.focus_hosts {
|
||||
// Enter key switches to the selected host
|
||||
let selected_idx = self.hosts_widget.get_selected_index();
|
||||
if selected_idx < self.available_hosts.len() {
|
||||
let selected_host = self.available_hosts[selected_idx].clone();
|
||||
self.switch_to_host(&selected_host);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
@@ -386,7 +409,8 @@ impl TuiApp {
|
||||
/// Switch to a specific host by name
|
||||
pub fn switch_to_host(&mut self, hostname: &str) {
|
||||
if let Some(index) = self.available_hosts.iter().position(|h| h == hostname) {
|
||||
self.host_index = index;
|
||||
// Update selector bar position
|
||||
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
|
||||
self.current_host = Some(hostname.to_string());
|
||||
|
||||
// Check if user navigated away from localhost
|
||||
@@ -400,41 +424,33 @@ impl TuiApp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Navigate between hosts
|
||||
fn navigate_host(&mut self, direction: i32) {
|
||||
if self.available_hosts.is_empty() {
|
||||
return;
|
||||
/// Handle mouse click on tab title area
|
||||
pub fn handle_tab_click(&mut self, x: u16, area: &Rect) {
|
||||
// Tab title format: "hosts | services"
|
||||
// Calculate positions relative to area start
|
||||
let title_start_x = area.x + 1; // +1 for left border
|
||||
|
||||
// "hosts | services"
|
||||
// 0123456789...
|
||||
let hosts_start = title_start_x;
|
||||
let hosts_end = hosts_start + 5; // "hosts" is 5 chars
|
||||
let services_start = hosts_end + 3; // After " | "
|
||||
let services_end = services_start + 8; // "services" is 8 chars
|
||||
|
||||
if x >= hosts_start && x < hosts_end {
|
||||
// Clicked on "hosts"
|
||||
self.focus_hosts = true;
|
||||
} else if x >= services_start && x < services_end {
|
||||
// Clicked on "services"
|
||||
self.focus_hosts = false;
|
||||
}
|
||||
|
||||
let len = self.available_hosts.len();
|
||||
if direction > 0 {
|
||||
self.host_index = (self.host_index + 1) % len;
|
||||
} else {
|
||||
self.host_index = if self.host_index == 0 {
|
||||
len - 1
|
||||
} else {
|
||||
self.host_index - 1
|
||||
};
|
||||
}
|
||||
|
||||
self.current_host = Some(self.available_hosts[self.host_index].clone());
|
||||
|
||||
// Check if user navigated away from localhost
|
||||
if let Some(ref current) = self.current_host {
|
||||
if current != &self.localhost {
|
||||
self.user_navigated_away = true;
|
||||
} else {
|
||||
self.user_navigated_away = false; // User navigated back to localhost
|
||||
}
|
||||
}
|
||||
|
||||
info!("Switched to host: {}", self.current_host.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/// Get the currently selected service name from the services widget
|
||||
fn get_selected_service(&self) -> Option<String> {
|
||||
if let Some(hostname) = &self.current_host {
|
||||
@@ -498,14 +514,6 @@ impl TuiApp {
|
||||
true // No host selected is considered offline
|
||||
};
|
||||
|
||||
// If host is offline, render wake-up message instead of panels
|
||||
if current_host_offline {
|
||||
self.render_offline_host_message(frame, main_chunks[1]);
|
||||
self.render_btop_title(frame, main_chunks[0], metric_store);
|
||||
self.render_statusbar(frame, main_chunks[2], metric_store);
|
||||
return (main_chunks[0], Rect::default(), Rect::default()); // Return title area and empty areas when offline
|
||||
}
|
||||
|
||||
// Left side: system panel only (full height)
|
||||
let left_chunks = ratatui::layout::Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
@@ -515,20 +523,18 @@ impl TuiApp {
|
||||
// Render title bar
|
||||
self.render_btop_title(frame, main_chunks[0], metric_store);
|
||||
|
||||
// Render system panel
|
||||
// Render system panel or offline message in system panel area
|
||||
let system_area = left_chunks[0];
|
||||
self.render_system_panel(frame, system_area, metric_store);
|
||||
|
||||
// Render services widget for current host
|
||||
let services_area = content_chunks[1];
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let is_focused = true; // Always show service selection
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets
|
||||
.services_widget
|
||||
.render(frame, services_area, is_focused); // Services takes full right side
|
||||
if current_host_offline {
|
||||
self.render_offline_host_message(frame, system_area);
|
||||
} else {
|
||||
self.render_system_panel(frame, system_area, metric_store);
|
||||
}
|
||||
|
||||
// Render right panel with tabs (Services | Hosts)
|
||||
let services_area = content_chunks[1];
|
||||
self.render_right_panel_with_tabs(frame, services_area, metric_store);
|
||||
|
||||
// Render statusbar at the bottom
|
||||
self.render_statusbar(frame, main_chunks[2], metric_store);
|
||||
|
||||
@@ -545,7 +551,6 @@ impl TuiApp {
|
||||
fn render_btop_title(&self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
|
||||
use ratatui::style::Modifier;
|
||||
use ratatui::text::{Line, Span};
|
||||
use theme::StatusIcons;
|
||||
|
||||
if self.available_hosts.is_empty() {
|
||||
let title_text = "cm-dashboard • no hosts discovered";
|
||||
@@ -568,86 +573,34 @@ impl TuiApp {
|
||||
// Use the worst status color as background
|
||||
let background_color = Theme::status_color(worst_status);
|
||||
|
||||
// Split the title bar into left and right sections
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Length(22), Constraint::Min(0)])
|
||||
.split(area);
|
||||
// Single line title bar showing dashboard name (left) and dashboard IP (right)
|
||||
let left_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
// Left side: "cm-dashboard" text with version
|
||||
let title_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
|
||||
let left_span = Span::styled(
|
||||
&title_text,
|
||||
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
|
||||
);
|
||||
let left_title = Paragraph::new(Line::from(vec![left_span]))
|
||||
.style(Style::default().bg(background_color));
|
||||
frame.render_widget(left_title, chunks[0]);
|
||||
// Get dashboard local IP for right side
|
||||
let dashboard_ip = Self::get_local_ip();
|
||||
let right_text = format!("{} ", dashboard_ip);
|
||||
|
||||
// Right side: hosts with status indicators
|
||||
let mut host_spans = Vec::new();
|
||||
// Calculate spacing to push right text to the right
|
||||
let total_text_len = left_text.len() + right_text.len();
|
||||
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
|
||||
let spacing_str = " ".repeat(spacing);
|
||||
|
||||
for (i, host) in self.available_hosts.iter().enumerate() {
|
||||
if i > 0 {
|
||||
host_spans.push(Span::styled(
|
||||
" ",
|
||||
Style::default().fg(Theme::background()).bg(background_color)
|
||||
));
|
||||
}
|
||||
|
||||
// Always show normal status icon based on metrics (no command status at host level)
|
||||
let host_status = self.calculate_host_status(host, metric_store);
|
||||
let status_icon = StatusIcons::get_icon(host_status);
|
||||
|
||||
// Add status icon with background color as foreground against status background
|
||||
host_spans.push(Span::styled(
|
||||
format!("{} ", status_icon),
|
||||
Style::default().fg(Theme::background()).bg(background_color),
|
||||
));
|
||||
|
||||
if Some(host) == self.current_host.as_ref() {
|
||||
// Selected host with brackets in bold background color against status background
|
||||
host_spans.push(Span::styled(
|
||||
"[",
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.bg(background_color)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
));
|
||||
host_spans.push(Span::styled(
|
||||
host.clone(),
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.bg(background_color)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
));
|
||||
host_spans.push(Span::styled(
|
||||
"]",
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.bg(background_color)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
));
|
||||
} else {
|
||||
// Other hosts in normal background color against status background
|
||||
host_spans.push(Span::styled(
|
||||
host.clone(),
|
||||
Style::default().fg(Theme::background()).bg(background_color),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Add right padding
|
||||
host_spans.push(Span::styled(
|
||||
" ",
|
||||
Style::default().fg(Theme::background()).bg(background_color)
|
||||
));
|
||||
|
||||
let host_line = Line::from(host_spans);
|
||||
let host_title = Paragraph::new(vec![host_line])
|
||||
.style(Style::default().bg(background_color))
|
||||
.alignment(ratatui::layout::Alignment::Right);
|
||||
frame.render_widget(host_title, chunks[1]);
|
||||
let title = Paragraph::new(Line::from(vec![
|
||||
Span::styled(
|
||||
left_text,
|
||||
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
|
||||
),
|
||||
Span::styled(
|
||||
spacing_str,
|
||||
Style::default().bg(background_color)
|
||||
),
|
||||
Span::styled(
|
||||
right_text,
|
||||
Style::default().fg(Theme::background()).bg(background_color)
|
||||
),
|
||||
]))
|
||||
.style(Style::default().bg(background_color));
|
||||
frame.render_widget(title, area);
|
||||
}
|
||||
|
||||
/// Calculate overall status for a host based on its structured data
|
||||
@@ -735,7 +688,7 @@ impl TuiApp {
|
||||
use ratatui::widgets::Paragraph;
|
||||
|
||||
// Get current host info
|
||||
let (hostname_str, host_ip, build_version, agent_version) = if let Some(hostname) = &self.current_host {
|
||||
let (hostname_str, host_ip, kernel_version, build_version, agent_version) = if let Some(hostname) = &self.current_host {
|
||||
// Get the connection IP (the IP dashboard uses to connect to the agent)
|
||||
let ip = if let Some(host_details) = self.config.hosts.get(hostname) {
|
||||
host_details.get_connection_ip(hostname)
|
||||
@@ -743,32 +696,30 @@ impl TuiApp {
|
||||
hostname.clone()
|
||||
};
|
||||
|
||||
// Get build and agent versions from system widget
|
||||
let (build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
|
||||
// Get kernel, build and agent versions from system widget
|
||||
let (kernel, build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
|
||||
let kernel = host_widgets.system_widget.get_kernel_version().unwrap_or("N/A".to_string());
|
||||
let build = host_widgets.system_widget.get_build_version().unwrap_or("N/A".to_string());
|
||||
let agent = host_widgets.system_widget.get_agent_version().unwrap_or("N/A".to_string());
|
||||
(build, agent)
|
||||
(kernel, build, agent)
|
||||
} else {
|
||||
("N/A".to_string(), "N/A".to_string())
|
||||
("N/A".to_string(), "N/A".to_string(), "N/A".to_string())
|
||||
};
|
||||
|
||||
(hostname.clone(), ip, build, agent)
|
||||
(hostname.clone(), ip, kernel, build, agent)
|
||||
} else {
|
||||
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
|
||||
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
|
||||
};
|
||||
|
||||
let left_text = format!("Host: {} | {} | Build:{} | Agent:{}", hostname_str, host_ip, build_version, agent_version);
|
||||
let left_text = format!(" Host: {} | {} | {}", hostname_str, host_ip, kernel_version);
|
||||
let right_text = format!("Build:{} | Agent:{} ", build_version, agent_version);
|
||||
|
||||
// Get dashboard local IP
|
||||
let dashboard_ip = Self::get_local_ip();
|
||||
let right_text = format!("Dashboard: {}", dashboard_ip);
|
||||
|
||||
// Calculate spacing to push right text to the right (accounting for 1 char left padding)
|
||||
let spacing = area.width as usize - left_text.len() - right_text.len() - 2; // -2 for left padding
|
||||
let spacing_str = " ".repeat(spacing.max(1));
|
||||
// Calculate spacing to push right text to the right
|
||||
let total_text_len = left_text.len() + right_text.len();
|
||||
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
|
||||
let spacing_str = " ".repeat(spacing);
|
||||
|
||||
let line = Line::from(vec![
|
||||
Span::raw(" "), // 1 char left padding
|
||||
Span::styled(left_text, Style::default().fg(Theme::border())),
|
||||
Span::raw(spacing_str),
|
||||
Span::styled(right_text, Style::default().fg(Theme::border())),
|
||||
@@ -808,9 +759,75 @@ impl TuiApp {
|
||||
}
|
||||
|
||||
|
||||
/// Render offline host message with wake-up option
|
||||
/// Render right panel with tabs (hosts | services)
|
||||
fn render_right_panel_with_tabs(&mut self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
|
||||
use ratatui::style::Modifier;
|
||||
use ratatui::text::{Line, Span};
|
||||
use ratatui::widgets::{Block, Borders};
|
||||
|
||||
// Build tab title with bold styling for active tab (like cm-player)
|
||||
let hosts_style = if self.focus_hosts {
|
||||
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Theme::border_title())
|
||||
};
|
||||
|
||||
let services_style = if !self.focus_hosts {
|
||||
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Theme::border_title())
|
||||
};
|
||||
|
||||
let title = Line::from(vec![
|
||||
Span::styled("hosts", hosts_style),
|
||||
Span::raw(" | "),
|
||||
Span::styled("services", services_style),
|
||||
]);
|
||||
|
||||
// Create ONE block with tab title (like cm-player)
|
||||
let main_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.title(title.clone())
|
||||
.style(Style::default().fg(Theme::border()).bg(Theme::background()));
|
||||
|
||||
let inner_area = main_block.inner(area);
|
||||
frame.render_widget(main_block, area);
|
||||
|
||||
// Render appropriate content based on active tab
|
||||
if self.focus_hosts {
|
||||
// Render hosts list (no additional borders)
|
||||
let localhost = self.localhost.clone();
|
||||
let current_host = self.current_host.as_deref();
|
||||
self.hosts_widget.render(
|
||||
frame,
|
||||
inner_area,
|
||||
&self.available_hosts,
|
||||
&localhost,
|
||||
current_host,
|
||||
metric_store,
|
||||
|hostname, store| {
|
||||
// Inline calculate_host_status logic
|
||||
if store.get_agent_data(hostname).is_some() {
|
||||
Status::Ok
|
||||
} else {
|
||||
Status::Offline
|
||||
}
|
||||
},
|
||||
true, // Always focused when visible
|
||||
);
|
||||
} else {
|
||||
// Render services for current host (no additional borders - just content!)
|
||||
if let Some(hostname) = self.current_host.clone() {
|
||||
let is_focused = true;
|
||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||
host_widgets.services_widget.render_content(frame, inner_area, is_focused);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Render offline host message in system panel area
|
||||
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
|
||||
use ratatui::layout::Alignment;
|
||||
use ratatui::style::Modifier;
|
||||
use ratatui::text::{Line, Span};
|
||||
use ratatui::widgets::{Block, Borders, Paragraph};
|
||||
@@ -828,8 +845,9 @@ impl TuiApp {
|
||||
|
||||
// Create message content
|
||||
let mut lines = vec![
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
format!("Host '{}' is offline", hostname),
|
||||
format!(" Host '{}' is offline", hostname),
|
||||
Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
@@ -837,46 +855,26 @@ impl TuiApp {
|
||||
|
||||
if has_mac {
|
||||
lines.push(Line::from(Span::styled(
|
||||
"Press 'w' to wake up host",
|
||||
Style::default().fg(Theme::primary_text()).add_modifier(Modifier::BOLD),
|
||||
" Press 'w' to wake up host",
|
||||
Style::default().fg(Theme::primary_text()),
|
||||
)));
|
||||
} else {
|
||||
lines.push(Line::from(Span::styled(
|
||||
"No MAC address configured - cannot wake up",
|
||||
" No MAC address configured",
|
||||
Style::default().fg(Theme::muted_text()),
|
||||
)));
|
||||
}
|
||||
|
||||
// Create centered message
|
||||
// Render message in system panel with border
|
||||
let message = Paragraph::new(lines)
|
||||
.block(Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(Theme::muted_text()))
|
||||
.title(" Offline Host ")
|
||||
.title(" Offline ")
|
||||
.title_style(Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD)))
|
||||
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()))
|
||||
.alignment(Alignment::Center);
|
||||
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()));
|
||||
|
||||
// Center the message in the available area
|
||||
let popup_area = ratatui::layout::Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Percentage(40),
|
||||
Constraint::Length(6),
|
||||
Constraint::Percentage(40),
|
||||
])
|
||||
.split(area)[1];
|
||||
|
||||
let popup_area = ratatui::layout::Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([
|
||||
Constraint::Percentage(25),
|
||||
Constraint::Percentage(50),
|
||||
Constraint::Percentage(25),
|
||||
])
|
||||
.split(popup_area)[1];
|
||||
|
||||
frame.render_widget(message, popup_area);
|
||||
frame.render_widget(message, area);
|
||||
}
|
||||
|
||||
/// Parse MAC address string (e.g., "AA:BB:CC:DD:EE:FF") to [u8; 6]
|
||||
|
||||
229
dashboard/src/ui/widgets/hosts.rs
Normal file
229
dashboard/src/ui/widgets/hosts.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
use ratatui::{
|
||||
layout::Rect,
|
||||
style::{Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{List, ListItem},
|
||||
Frame,
|
||||
};
|
||||
|
||||
use crate::metrics::MetricStore;
|
||||
use crate::ui::theme::Theme;
|
||||
use cm_dashboard_shared::Status;
|
||||
|
||||
/// Hosts widget displaying all available hosts with selector bar navigation
|
||||
#[derive(Clone)]
|
||||
pub struct HostsWidget {
|
||||
/// Currently selected host index (for blue selector bar)
|
||||
pub selected_index: usize,
|
||||
/// Scroll offset for viewport
|
||||
pub scroll_offset: usize,
|
||||
/// Last rendered viewport height for scroll calculations
|
||||
last_viewport_height: usize,
|
||||
}
|
||||
|
||||
impl HostsWidget {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
selected_index: 0,
|
||||
scroll_offset: 0,
|
||||
last_viewport_height: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Move selection up
|
||||
pub fn select_previous(&mut self) {
|
||||
if self.selected_index > 0 {
|
||||
self.selected_index -= 1;
|
||||
self.ensure_selected_visible();
|
||||
}
|
||||
}
|
||||
|
||||
/// Move selection down
|
||||
pub fn select_next(&mut self, total_hosts: usize) {
|
||||
if total_hosts > 0 && self.selected_index < total_hosts.saturating_sub(1) {
|
||||
self.selected_index += 1;
|
||||
self.ensure_selected_visible();
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure selected item is visible in viewport (auto-scroll)
|
||||
fn ensure_selected_visible(&mut self) {
|
||||
if self.last_viewport_height == 0 {
|
||||
return; // Can't calculate without viewport height
|
||||
}
|
||||
|
||||
let viewport_height = self.last_viewport_height;
|
||||
|
||||
// If selection is above viewport, scroll up to show it
|
||||
if self.selected_index < self.scroll_offset {
|
||||
self.scroll_offset = self.selected_index;
|
||||
}
|
||||
|
||||
// If selection is below viewport, scroll down to show it
|
||||
if self.selected_index >= self.scroll_offset + viewport_height {
|
||||
self.scroll_offset = self.selected_index.saturating_sub(viewport_height.saturating_sub(1));
|
||||
}
|
||||
}
|
||||
|
||||
/// Scroll down manually
|
||||
pub fn scroll_down(&mut self, total_hosts: usize) {
|
||||
if self.last_viewport_height == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let viewport_height = self.last_viewport_height;
|
||||
let max_scroll = total_hosts.saturating_sub(viewport_height);
|
||||
|
||||
if self.scroll_offset < max_scroll {
|
||||
self.scroll_offset += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Scroll up manually
|
||||
pub fn scroll_up(&mut self) {
|
||||
if self.scroll_offset > 0 {
|
||||
self.scroll_offset -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the currently selected host index
|
||||
pub fn get_selected_index(&self) -> usize {
|
||||
self.selected_index
|
||||
}
|
||||
|
||||
/// Set selected index (used when switching to host via mouse)
|
||||
pub fn set_selected_index(&mut self, index: usize, total_hosts: usize) {
|
||||
if index < total_hosts {
|
||||
self.selected_index = index;
|
||||
self.ensure_selected_visible();
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert y coordinate to host index (accounting for scroll)
|
||||
pub fn y_to_host_index(&self, relative_y: usize) -> usize {
|
||||
self.scroll_offset + relative_y
|
||||
}
|
||||
|
||||
/// Render hosts list with selector bar
|
||||
pub fn render<F>(
|
||||
&mut self,
|
||||
frame: &mut Frame,
|
||||
area: Rect,
|
||||
available_hosts: &[String],
|
||||
localhost: &str,
|
||||
current_host: Option<&str>,
|
||||
metric_store: &MetricStore,
|
||||
mut calculate_host_status: F,
|
||||
is_focused: bool,
|
||||
) where F: FnMut(&str, &MetricStore) -> Status {
|
||||
use crate::ui::theme::{StatusIcons, Typography};
|
||||
use ratatui::widgets::Paragraph;
|
||||
|
||||
// Split area for header and list
|
||||
let chunks = ratatui::layout::Layout::default()
|
||||
.direction(ratatui::layout::Direction::Vertical)
|
||||
.constraints([
|
||||
ratatui::layout::Constraint::Length(1), // Header
|
||||
ratatui::layout::Constraint::Min(0), // List
|
||||
])
|
||||
.split(area);
|
||||
|
||||
// Render header
|
||||
let header = Paragraph::new("Hosts:").style(Typography::muted());
|
||||
frame.render_widget(header, chunks[0]);
|
||||
|
||||
// Store viewport height for scroll calculations (minus header)
|
||||
self.last_viewport_height = chunks[1].height as usize;
|
||||
|
||||
// Validate scroll offset
|
||||
if self.scroll_offset >= available_hosts.len() && !available_hosts.is_empty() {
|
||||
self.scroll_offset = available_hosts.len().saturating_sub(1);
|
||||
}
|
||||
|
||||
// Create list items for visible hosts
|
||||
let items: Vec<ListItem> = available_hosts
|
||||
.iter()
|
||||
.enumerate()
|
||||
.skip(self.scroll_offset)
|
||||
.take(chunks[1].height as usize)
|
||||
.map(|(idx, hostname)| {
|
||||
let host_status = calculate_host_status(hostname, metric_store);
|
||||
let status_icon = StatusIcons::get_icon(host_status);
|
||||
let status_color = Theme::status_color(host_status);
|
||||
|
||||
// Check if this is the selected host (for blue selector bar)
|
||||
let is_selected = is_focused && idx == self.selected_index;
|
||||
|
||||
// Check if this is the current (active) host
|
||||
let is_current = current_host == Some(hostname.as_str());
|
||||
|
||||
// Check if this is localhost
|
||||
let is_localhost = hostname == localhost;
|
||||
|
||||
// Build the line with icon and hostname
|
||||
let mut spans = vec![Span::styled(
|
||||
format!("{} ", status_icon),
|
||||
if is_selected {
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(status_color)
|
||||
},
|
||||
)];
|
||||
|
||||
// Add arrow indicator if this is the current host (like cm-player)
|
||||
if is_current {
|
||||
spans.push(Span::styled(
|
||||
"▸ ",
|
||||
if is_selected {
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(Theme::primary_text())
|
||||
.add_modifier(Modifier::BOLD)
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
// Add hostname with appropriate styling
|
||||
let hostname_text = if is_localhost {
|
||||
format!("{} (localhost)", hostname)
|
||||
} else {
|
||||
hostname.clone()
|
||||
};
|
||||
|
||||
spans.push(Span::styled(
|
||||
hostname_text,
|
||||
if is_selected {
|
||||
Style::default()
|
||||
.fg(Theme::background())
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else if is_current {
|
||||
Style::default()
|
||||
.fg(Theme::primary_text())
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Theme::primary_text())
|
||||
},
|
||||
));
|
||||
|
||||
let line = Line::from(spans);
|
||||
|
||||
// Apply blue background to selected row
|
||||
let base_style = if is_selected {
|
||||
Style::default().bg(Theme::highlight()) // Blue background
|
||||
} else {
|
||||
Style::default().bg(Theme::background())
|
||||
};
|
||||
|
||||
ListItem::new(line).style(base_style)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let hosts_list = List::new(items);
|
||||
frame.render_widget(hosts_list, chunks[1]);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
use cm_dashboard_shared::AgentData;
|
||||
|
||||
pub mod hosts;
|
||||
pub mod services;
|
||||
pub mod system;
|
||||
|
||||
pub use hosts::HostsWidget;
|
||||
pub use services::ServicesWidget;
|
||||
pub use system::SystemWidget;
|
||||
|
||||
|
||||
@@ -713,7 +713,11 @@ impl ServicesWidget {
|
||||
|
||||
/// Render with focus
|
||||
pub fn render(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
|
||||
let services_block = Components::widget_block("services");
|
||||
self.render_with_title(frame, area, is_focused, "services");
|
||||
}
|
||||
|
||||
pub fn render_with_title(&mut self, frame: &mut Frame, area: Rect, is_focused: bool, title: &str) {
|
||||
let services_block = Components::widget_block(title);
|
||||
let inner_area = services_block.inner(area);
|
||||
frame.render_widget(services_block, area);
|
||||
|
||||
@@ -758,6 +762,49 @@ impl ServicesWidget {
|
||||
self.render_services(frame, content_chunks[1], is_focused, columns);
|
||||
}
|
||||
|
||||
/// Render services content WITHOUT block (for tab mode like cm-player)
|
||||
pub fn render_content(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
|
||||
let content_chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Length(1), Constraint::Min(0)])
|
||||
.split(area);
|
||||
|
||||
// Determine which columns to show based on available width
|
||||
let columns = ColumnVisibility::from_width(area.width);
|
||||
|
||||
// Build header based on visible columns
|
||||
let mut header_parts = Vec::new();
|
||||
if columns.show_name {
|
||||
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
|
||||
}
|
||||
if columns.show_status {
|
||||
header_parts.push(format!("{:<width$}", "Status:", width = ColumnVisibility::STATUS_WIDTH as usize));
|
||||
}
|
||||
if columns.show_ram {
|
||||
header_parts.push(format!("{:<width$}", "RAM:", width = ColumnVisibility::RAM_WIDTH as usize));
|
||||
}
|
||||
if columns.show_uptime {
|
||||
header_parts.push(format!("{:<width$}", "Uptime:", width = ColumnVisibility::UPTIME_WIDTH as usize));
|
||||
}
|
||||
if columns.show_restarts {
|
||||
header_parts.push(format!("{:<width$}", "↻:", width = ColumnVisibility::RESTARTS_WIDTH as usize));
|
||||
}
|
||||
let header = header_parts.join(" ");
|
||||
|
||||
let header_para = Paragraph::new(header).style(Typography::muted());
|
||||
frame.render_widget(header_para, content_chunks[0]);
|
||||
|
||||
// Check if we have any services to display
|
||||
if self.parent_services.is_empty() && self.sub_services.is_empty() {
|
||||
let empty_text = Paragraph::new("No process data").style(Typography::muted());
|
||||
frame.render_widget(empty_text, content_chunks[1]);
|
||||
return;
|
||||
}
|
||||
|
||||
// Render the services list
|
||||
self.render_services(frame, content_chunks[1], is_focused, columns);
|
||||
}
|
||||
|
||||
/// Render services list
|
||||
fn render_services(&mut self, frame: &mut Frame, area: Rect, is_focused: bool, columns: ColumnVisibility) {
|
||||
// Build hierarchical service list for display
|
||||
|
||||
@@ -15,6 +15,7 @@ pub struct SystemWidget {
|
||||
// NixOS information
|
||||
nixos_build: Option<String>,
|
||||
agent_hash: Option<String>,
|
||||
kernel_version: Option<String>,
|
||||
|
||||
// Network interfaces
|
||||
network_interfaces: Vec<cm_dashboard_shared::NetworkInterfaceData>,
|
||||
@@ -44,9 +45,9 @@ pub struct SystemWidget {
|
||||
storage_pools: Vec<StoragePool>,
|
||||
|
||||
// Backup metrics
|
||||
backup_repositories: Vec<String>,
|
||||
backup_repository_status: Status,
|
||||
backup_disks: Vec<cm_dashboard_shared::BackupDiskData>,
|
||||
backup_last_time: Option<String>,
|
||||
backup_status: Status,
|
||||
backup_repositories: Vec<cm_dashboard_shared::BackupRepositoryData>,
|
||||
|
||||
// Overall status
|
||||
has_data: bool,
|
||||
@@ -94,6 +95,7 @@ impl SystemWidget {
|
||||
Self {
|
||||
nixos_build: None,
|
||||
agent_hash: None,
|
||||
kernel_version: None,
|
||||
network_interfaces: Vec::new(),
|
||||
cpu_load_1min: None,
|
||||
cpu_load_5min: None,
|
||||
@@ -112,9 +114,9 @@ impl SystemWidget {
|
||||
tmp_status: Status::Unknown,
|
||||
tmpfs_mounts: Vec::new(),
|
||||
storage_pools: Vec::new(),
|
||||
backup_last_time: None,
|
||||
backup_status: Status::Unknown,
|
||||
backup_repositories: Vec::new(),
|
||||
backup_repository_status: Status::Unknown,
|
||||
backup_disks: Vec::new(),
|
||||
has_data: false,
|
||||
scroll_offset: 0,
|
||||
last_viewport_height: 0,
|
||||
@@ -171,6 +173,11 @@ impl SystemWidget {
|
||||
pub fn get_agent_version(&self) -> Option<String> {
|
||||
self.agent_hash.clone()
|
||||
}
|
||||
|
||||
/// Get the kernel version
|
||||
pub fn get_kernel_version(&self) -> Option<String> {
|
||||
self.kernel_version.clone()
|
||||
}
|
||||
}
|
||||
|
||||
use super::Widget;
|
||||
@@ -185,6 +192,9 @@ impl Widget for SystemWidget {
|
||||
// Extract build version
|
||||
self.nixos_build = agent_data.build_version.clone();
|
||||
|
||||
// Extract kernel version
|
||||
self.kernel_version = agent_data.kernel_version.clone();
|
||||
|
||||
// Extract network interfaces
|
||||
self.network_interfaces = agent_data.system.network.interfaces.clone();
|
||||
|
||||
@@ -221,9 +231,9 @@ impl Widget for SystemWidget {
|
||||
|
||||
// Extract backup data
|
||||
let backup = &agent_data.backup;
|
||||
self.backup_last_time = backup.last_backup_time.clone();
|
||||
self.backup_status = backup.backup_status;
|
||||
self.backup_repositories = backup.repositories.clone();
|
||||
self.backup_repository_status = backup.repository_status;
|
||||
self.backup_disks = backup.disks.clone();
|
||||
|
||||
// Clamp scroll offset to valid range after update
|
||||
// This prevents scroll issues when switching between hosts
|
||||
@@ -533,79 +543,42 @@ impl SystemWidget {
|
||||
fn render_backup(&self) -> Vec<Line<'_>> {
|
||||
let mut lines = Vec::new();
|
||||
|
||||
// First section: Repository status and list
|
||||
if !self.backup_repositories.is_empty() {
|
||||
let repo_text = format!("Repo: {}", self.backup_repositories.len());
|
||||
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
|
||||
lines.push(Line::from(repo_spans));
|
||||
|
||||
// List all repositories (sorted for consistent display)
|
||||
let mut sorted_repos = self.backup_repositories.clone();
|
||||
sorted_repos.sort();
|
||||
let repo_count = sorted_repos.len();
|
||||
for (idx, repo) in sorted_repos.iter().enumerate() {
|
||||
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
|
||||
lines.push(Line::from(vec![
|
||||
Span::styled(format!(" {} ", tree_char), Typography::tree()),
|
||||
Span::styled(repo.clone(), Typography::secondary()),
|
||||
]));
|
||||
}
|
||||
if self.backup_repositories.is_empty() {
|
||||
return lines;
|
||||
}
|
||||
|
||||
// Second section: Per-disk backup information (sorted by serial for consistent display)
|
||||
let mut sorted_disks = self.backup_disks.clone();
|
||||
sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial));
|
||||
for disk in &sorted_disks {
|
||||
let truncated_serial = truncate_serial(&disk.serial);
|
||||
let mut details = Vec::new();
|
||||
// Format backup time (use complete timestamp)
|
||||
let time_display = if let Some(ref time_str) = self.backup_last_time {
|
||||
time_str.clone()
|
||||
} else {
|
||||
"unknown".to_string()
|
||||
};
|
||||
|
||||
if let Some(temp) = disk.temperature_celsius {
|
||||
details.push(format!("T: {}°C", temp as i32));
|
||||
}
|
||||
if let Some(wear) = disk.wear_percent {
|
||||
details.push(format!("W: {}%", wear as i32));
|
||||
}
|
||||
// Header: just the timestamp
|
||||
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display);
|
||||
lines.push(Line::from(repo_spans));
|
||||
|
||||
let disk_text = if !details.is_empty() {
|
||||
format!("{} {}", truncated_serial, details.join(" "))
|
||||
// List all repositories with archive count and size
|
||||
let repo_count = self.backup_repositories.len();
|
||||
for (idx, repo) in self.backup_repositories.iter().enumerate() {
|
||||
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
|
||||
|
||||
// Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
|
||||
let size_display = if repo.repo_size_gb < 0.001 {
|
||||
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
|
||||
} else if repo.repo_size_gb < 1.0 {
|
||||
format!("{:.0}MB", repo.repo_size_gb * 1024.0)
|
||||
} else {
|
||||
truncated_serial
|
||||
format!("{:.1}GB", repo.repo_size_gb)
|
||||
};
|
||||
|
||||
// Overall disk status (worst of backup and usage)
|
||||
let disk_status = disk.backup_status.max(disk.usage_status);
|
||||
let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
|
||||
lines.push(Line::from(disk_spans));
|
||||
let repo_text = format!("{} ({}) {}", repo.name, repo.archive_count, size_display);
|
||||
|
||||
// Show backup time with status
|
||||
if let Some(backup_time) = &disk.last_backup_time {
|
||||
let time_text = format!("Backup: {}", backup_time);
|
||||
let mut time_spans = vec![
|
||||
Span::styled(" ├─ ", Typography::tree()),
|
||||
];
|
||||
time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
|
||||
lines.push(Line::from(time_spans));
|
||||
}
|
||||
|
||||
// Show usage with status and archive count
|
||||
let archive_display = if disk.archives_min == disk.archives_max {
|
||||
format!("{}", disk.archives_min)
|
||||
} else {
|
||||
format!("{}-{}", disk.archives_min, disk.archives_max)
|
||||
};
|
||||
|
||||
let usage_text = format!(
|
||||
"Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
|
||||
archive_display,
|
||||
disk.disk_usage_percent,
|
||||
disk.disk_used_gb,
|
||||
disk.disk_total_gb
|
||||
);
|
||||
let mut usage_spans = vec![
|
||||
Span::styled(" └─ ", Typography::tree()),
|
||||
let mut repo_spans = vec![
|
||||
Span::styled(format!(" {} ", tree_char), Typography::tree()),
|
||||
];
|
||||
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text));
|
||||
lines.push(Line::from(usage_spans));
|
||||
repo_spans.extend(StatusIcons::create_status_spans(repo.status, &repo_text));
|
||||
lines.push(Line::from(repo_spans));
|
||||
}
|
||||
|
||||
lines
|
||||
@@ -876,13 +849,10 @@ impl SystemWidget {
|
||||
}
|
||||
|
||||
// Backup section
|
||||
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
|
||||
count += 1; // Header
|
||||
if !self.backup_repositories.is_empty() {
|
||||
count += 1; // Repo header
|
||||
count += self.backup_repositories.len();
|
||||
}
|
||||
count += self.backup_disks.len() * 3; // Each disk has 3 lines
|
||||
if !self.backup_repositories.is_empty() {
|
||||
count += 1; // Header: "Backup:"
|
||||
count += 1; // Repo count and timestamp header
|
||||
count += self.backup_repositories.len(); // Individual repos
|
||||
}
|
||||
|
||||
count
|
||||
@@ -988,7 +958,7 @@ impl SystemWidget {
|
||||
lines.extend(storage_lines);
|
||||
|
||||
// Backup section (if available)
|
||||
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
|
||||
if !self.backup_repositories.is_empty() {
|
||||
lines.push(Line::from(vec![
|
||||
Span::styled("Backup:", Typography::widget_title())
|
||||
]));
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard-shared"
|
||||
version = "0.1.264"
|
||||
version = "0.1.278"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -7,6 +7,8 @@ pub struct AgentData {
|
||||
pub hostname: String,
|
||||
pub agent_version: String,
|
||||
pub build_version: Option<String>,
|
||||
#[serde(default)]
|
||||
pub kernel_version: Option<String>,
|
||||
pub timestamp: u64,
|
||||
pub system: SystemData,
|
||||
pub services: Vec<ServiceData>,
|
||||
@@ -182,27 +184,18 @@ pub struct SubServiceMetric {
|
||||
/// Backup system data
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BackupData {
|
||||
pub repositories: Vec<String>,
|
||||
pub repository_status: Status,
|
||||
pub disks: Vec<BackupDiskData>,
|
||||
}
|
||||
|
||||
/// Backup repository disk information
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BackupDiskData {
|
||||
pub serial: String,
|
||||
pub product_name: Option<String>,
|
||||
pub wear_percent: Option<f32>,
|
||||
pub temperature_celsius: Option<f32>,
|
||||
pub last_backup_time: Option<String>,
|
||||
pub backup_status: Status,
|
||||
pub disk_usage_percent: f32,
|
||||
pub disk_used_gb: f32,
|
||||
pub disk_total_gb: f32,
|
||||
pub usage_status: Status,
|
||||
pub services: Vec<String>,
|
||||
pub archives_min: i64,
|
||||
pub archives_max: i64,
|
||||
pub repositories: Vec<BackupRepositoryData>,
|
||||
}
|
||||
|
||||
/// Individual backup repository information
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BackupRepositoryData {
|
||||
pub name: String,
|
||||
pub archive_count: i64,
|
||||
pub repo_size_gb: f32,
|
||||
pub status: Status,
|
||||
}
|
||||
|
||||
impl AgentData {
|
||||
@@ -212,6 +205,7 @@ impl AgentData {
|
||||
hostname,
|
||||
agent_version,
|
||||
build_version: None,
|
||||
kernel_version: None,
|
||||
timestamp: chrono::Utc::now().timestamp() as u64,
|
||||
system: SystemData {
|
||||
network: NetworkData {
|
||||
@@ -245,9 +239,9 @@ impl AgentData {
|
||||
},
|
||||
services: Vec::new(),
|
||||
backup: BackupData {
|
||||
last_backup_time: None,
|
||||
backup_status: Status::Unknown,
|
||||
repositories: Vec::new(),
|
||||
repository_status: Status::Unknown,
|
||||
disks: Vec::new(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user