Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 535784e849 | |||
| 41a7ee660a | |||
| 76931f0457 | |||
| 516d159d2f | |||
| 1656f20e96 | |||
| dcd350ec2c | |||
| a34b095857 | |||
| 7362464b46 | |||
| c8b79576fa | |||
| f53df5440b | |||
| d1b0e2c431 | |||
| b1719a60fc | |||
| d922e8d6f3 | |||
| 407bc9dbc2 | |||
| 3c278351c9 | |||
| 8da4522d85 | |||
| 5b1e39cfca | |||
| ffecbc3166 | |||
| 49f9504429 | |||
| bc9015e96b | |||
| aaec8e691c | |||
| 4a8cfbbde4 | |||
| d93260529b | |||
| 41e1be451e | |||
| 2863526ec8 | |||
| 5da9213da6 | |||
| a7755f02ae | |||
| b886fb2045 | |||
| cfb02e1763 | |||
| 5b53ca3d52 | |||
| 92a30913b4 | |||
| a288a8ef9a | |||
| c65d596099 | |||
| 98ed17947d |
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.245"
|
version = "0.1.280"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -301,7 +301,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.245"
|
version = "0.1.280"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -325,7 +325,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.245"
|
version = "0.1.280"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.246"
|
version = "0.1.280"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use gethostname::gethostname;
|
use gethostname::gethostname;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::time::interval;
|
use tokio::time::interval;
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
@@ -28,7 +29,6 @@ struct TimedCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct Agent {
|
pub struct Agent {
|
||||||
hostname: String,
|
|
||||||
config: AgentConfig,
|
config: AgentConfig,
|
||||||
zmq_handler: ZmqHandler,
|
zmq_handler: ZmqHandler,
|
||||||
collectors: Vec<TimedCollector>,
|
collectors: Vec<TimedCollector>,
|
||||||
@@ -38,12 +38,40 @@ pub struct Agent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Track system component status for change detection
|
/// Track system component status for change detection
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Default)]
|
||||||
struct SystemStatus {
|
struct SystemStatus {
|
||||||
|
// CPU
|
||||||
cpu_load_status: cm_dashboard_shared::Status,
|
cpu_load_status: cm_dashboard_shared::Status,
|
||||||
cpu_temperature_status: cm_dashboard_shared::Status,
|
cpu_temperature_status: cm_dashboard_shared::Status,
|
||||||
|
// Memory
|
||||||
memory_usage_status: cm_dashboard_shared::Status,
|
memory_usage_status: cm_dashboard_shared::Status,
|
||||||
// Add more as needed
|
// Storage - keyed by drive name or pool name
|
||||||
|
drive_statuses: HashMap<String, DriveStatus>,
|
||||||
|
pool_statuses: HashMap<String, PoolStatus>,
|
||||||
|
// Services - keyed by service name
|
||||||
|
service_statuses: HashMap<String, cm_dashboard_shared::Status>,
|
||||||
|
// Backup
|
||||||
|
backup_status: cm_dashboard_shared::Status,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct DriveStatus {
|
||||||
|
temperature_status: cm_dashboard_shared::Status,
|
||||||
|
health_status: cm_dashboard_shared::Status,
|
||||||
|
filesystem_statuses: HashMap<String, cm_dashboard_shared::Status>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct PoolStatus {
|
||||||
|
health_status: cm_dashboard_shared::Status,
|
||||||
|
usage_status: cm_dashboard_shared::Status,
|
||||||
|
drive_statuses: HashMap<String, PoolDriveStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct PoolDriveStatus {
|
||||||
|
health_status: cm_dashboard_shared::Status,
|
||||||
|
temperature_status: cm_dashboard_shared::Status,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Agent {
|
impl Agent {
|
||||||
@@ -148,7 +176,6 @@ impl Agent {
|
|||||||
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
|
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
hostname,
|
|
||||||
config,
|
config,
|
||||||
zmq_handler,
|
zmq_handler,
|
||||||
collectors,
|
collectors,
|
||||||
@@ -171,7 +198,9 @@ impl Agent {
|
|||||||
let mut transmission_interval = interval(Duration::from_secs(
|
let mut transmission_interval = interval(Duration::from_secs(
|
||||||
self.config.zmq.transmission_interval_seconds,
|
self.config.zmq.transmission_interval_seconds,
|
||||||
));
|
));
|
||||||
let mut notification_interval = interval(Duration::from_secs(30)); // Check notifications every 30s
|
let mut notification_interval = interval(Duration::from_secs(
|
||||||
|
self.config.notifications.check_interval_seconds,
|
||||||
|
));
|
||||||
|
|
||||||
// Skip initial ticks to avoid immediate execution
|
// Skip initial ticks to avoid immediate execution
|
||||||
transmission_interval.tick().await;
|
transmission_interval.tick().await;
|
||||||
@@ -185,9 +214,21 @@ impl Agent {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = notification_interval.tick() => {
|
_ = notification_interval.tick() => {
|
||||||
// Process any pending notifications
|
// Check for status changes and queue notifications
|
||||||
// NOTE: With structured data, we might need to implement status tracking differently
|
let agent_data_snapshot = self.cached_agent_data.clone();
|
||||||
// For now, we skip this until status evaluation is migrated
|
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
|
||||||
|
error!("Failed to check status changes: {}", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if all components recovered and flush pending recoveries
|
||||||
|
self.notification_manager.flush_recoveries_if_all_ok();
|
||||||
|
|
||||||
|
// Flush any pending aggregated notifications
|
||||||
|
if self.notification_manager.should_flush() {
|
||||||
|
if let Err(e) = self.notification_manager.flush_notifications().await {
|
||||||
|
error!("Failed to flush notifications: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ = &mut shutdown_rx => {
|
_ = &mut shutdown_rx => {
|
||||||
info!("Shutdown signal received, stopping agent loop");
|
info!("Shutdown signal received, stopping agent loop");
|
||||||
@@ -235,16 +276,8 @@ impl Agent {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.as_secs();
|
.as_secs();
|
||||||
|
|
||||||
// Clone for notification check (to avoid borrow issues)
|
|
||||||
let agent_data_snapshot = self.cached_agent_data.clone();
|
|
||||||
|
|
||||||
// Check for status changes and send notifications
|
|
||||||
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
|
|
||||||
error!("Failed to check status changes: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Broadcast the cached structured data via ZMQ
|
// Broadcast the cached structured data via ZMQ
|
||||||
if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data_snapshot).await {
|
if let Err(e) = self.zmq_handler.publish_agent_data(&self.cached_agent_data).await {
|
||||||
error!("Failed to broadcast agent data: {}", e);
|
error!("Failed to broadcast agent data: {}", e);
|
||||||
} else {
|
} else {
|
||||||
debug!("Successfully broadcast structured agent data");
|
debug!("Successfully broadcast structured agent data");
|
||||||
@@ -253,38 +286,182 @@ impl Agent {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check for status changes and send notifications
|
/// Check for status changes and queue notifications
|
||||||
async fn check_status_changes_and_notify(&mut self, agent_data: &AgentData) -> Result<()> {
|
async fn check_status_changes_and_notify(&mut self, agent_data: &AgentData) -> Result<()> {
|
||||||
// Extract current status
|
// Build current status from agent data
|
||||||
let current_status = SystemStatus {
|
let mut current_status = SystemStatus {
|
||||||
cpu_load_status: agent_data.system.cpu.load_status.clone(),
|
cpu_load_status: agent_data.system.cpu.load_status,
|
||||||
cpu_temperature_status: agent_data.system.cpu.temperature_status.clone(),
|
cpu_temperature_status: agent_data.system.cpu.temperature_status,
|
||||||
memory_usage_status: agent_data.system.memory.usage_status.clone(),
|
memory_usage_status: agent_data.system.memory.usage_status,
|
||||||
|
backup_status: agent_data.backup.backup_status,
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check for status changes
|
// Collect drive statuses
|
||||||
if let Some(previous) = self.previous_status.clone() {
|
for drive in &agent_data.system.storage.drives {
|
||||||
self.check_and_notify_status_change(
|
let mut fs_statuses = HashMap::new();
|
||||||
|
for fs in &drive.filesystems {
|
||||||
|
fs_statuses.insert(fs.mount.clone(), fs.usage_status);
|
||||||
|
}
|
||||||
|
current_status.drive_statuses.insert(
|
||||||
|
drive.name.clone(),
|
||||||
|
DriveStatus {
|
||||||
|
temperature_status: drive.temperature_status,
|
||||||
|
health_status: drive.health_status,
|
||||||
|
filesystem_statuses: fs_statuses,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect pool statuses
|
||||||
|
for pool in &agent_data.system.storage.pools {
|
||||||
|
let mut pool_drive_statuses = HashMap::new();
|
||||||
|
for drive in pool.data_drives.iter().chain(pool.parity_drives.iter()) {
|
||||||
|
pool_drive_statuses.insert(
|
||||||
|
drive.name.clone(),
|
||||||
|
PoolDriveStatus {
|
||||||
|
health_status: drive.health_status,
|
||||||
|
temperature_status: drive.temperature_status,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
current_status.pool_statuses.insert(
|
||||||
|
pool.name.clone(),
|
||||||
|
PoolStatus {
|
||||||
|
health_status: pool.health_status,
|
||||||
|
usage_status: pool.usage_status,
|
||||||
|
drive_statuses: pool_drive_statuses,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect service statuses (only for non-user-stopped services)
|
||||||
|
for service in &agent_data.services {
|
||||||
|
if !service.user_stopped {
|
||||||
|
current_status
|
||||||
|
.service_statuses
|
||||||
|
.insert(service.name.clone(), service.service_status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone previous status to avoid borrow issues
|
||||||
|
let previous = self.previous_status.clone();
|
||||||
|
|
||||||
|
// Compare with previous status and queue notifications
|
||||||
|
if let Some(previous) = previous {
|
||||||
|
// CPU
|
||||||
|
self.queue_status_notification(
|
||||||
"CPU Load",
|
"CPU Load",
|
||||||
&previous.cpu_load_status,
|
&previous.cpu_load_status,
|
||||||
¤t_status.cpu_load_status,
|
¤t_status.cpu_load_status,
|
||||||
format!("CPU load: {:.1}", agent_data.system.cpu.load_1min)
|
&format!("Load: {:.2}", agent_data.system.cpu.load_1min),
|
||||||
).await?;
|
);
|
||||||
|
self.queue_status_notification(
|
||||||
self.check_and_notify_status_change(
|
"CPU Temperature",
|
||||||
"CPU Temperature",
|
|
||||||
&previous.cpu_temperature_status,
|
&previous.cpu_temperature_status,
|
||||||
¤t_status.cpu_temperature_status,
|
¤t_status.cpu_temperature_status,
|
||||||
format!("CPU temperature: {}°C",
|
&format!(
|
||||||
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32)
|
"Temperature: {}°C",
|
||||||
).await?;
|
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
self.check_and_notify_status_change(
|
// Memory
|
||||||
"Memory Usage",
|
self.queue_status_notification(
|
||||||
&previous.memory_usage_status,
|
"Memory",
|
||||||
|
&previous.memory_usage_status,
|
||||||
¤t_status.memory_usage_status,
|
¤t_status.memory_usage_status,
|
||||||
format!("Memory usage: {:.1}%", agent_data.system.memory.usage_percent)
|
&format!("Usage: {:.1}%", agent_data.system.memory.usage_percent),
|
||||||
).await?;
|
);
|
||||||
|
|
||||||
|
// Backup
|
||||||
|
self.queue_status_notification(
|
||||||
|
"Backup",
|
||||||
|
&previous.backup_status,
|
||||||
|
¤t_status.backup_status,
|
||||||
|
&format!(
|
||||||
|
"Last backup: {}",
|
||||||
|
agent_data.backup.last_backup_time.as_deref().unwrap_or("unknown")
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Drives
|
||||||
|
for (name, current_drive) in ¤t_status.drive_statuses {
|
||||||
|
if let Some(prev_drive) = previous.drive_statuses.get(name) {
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Drive {} Health", name),
|
||||||
|
&prev_drive.health_status,
|
||||||
|
¤t_drive.health_status,
|
||||||
|
"Health check failed",
|
||||||
|
);
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Drive {} Temperature", name),
|
||||||
|
&prev_drive.temperature_status,
|
||||||
|
¤t_drive.temperature_status,
|
||||||
|
"Temperature threshold exceeded",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Filesystem usage
|
||||||
|
for (mount, current_fs_status) in ¤t_drive.filesystem_statuses {
|
||||||
|
if let Some(prev_fs_status) = prev_drive.filesystem_statuses.get(mount) {
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Filesystem {}", mount),
|
||||||
|
prev_fs_status,
|
||||||
|
current_fs_status,
|
||||||
|
"Disk usage threshold exceeded",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pools
|
||||||
|
for (name, current_pool) in ¤t_status.pool_statuses {
|
||||||
|
if let Some(prev_pool) = previous.pool_statuses.get(name) {
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Pool {} Health", name),
|
||||||
|
&prev_pool.health_status,
|
||||||
|
¤t_pool.health_status,
|
||||||
|
"Pool health degraded",
|
||||||
|
);
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Pool {} Usage", name),
|
||||||
|
&prev_pool.usage_status,
|
||||||
|
¤t_pool.usage_status,
|
||||||
|
"Pool usage threshold exceeded",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Pool drives
|
||||||
|
for (drive_name, current_pd) in ¤t_pool.drive_statuses {
|
||||||
|
if let Some(prev_pd) = prev_pool.drive_statuses.get(drive_name) {
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Pool {} Drive {} Health", name, drive_name),
|
||||||
|
&prev_pd.health_status,
|
||||||
|
¤t_pd.health_status,
|
||||||
|
"Pool drive health degraded",
|
||||||
|
);
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Pool {} Drive {} Temperature", name, drive_name),
|
||||||
|
&prev_pd.temperature_status,
|
||||||
|
¤t_pd.temperature_status,
|
||||||
|
"Pool drive temperature exceeded",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services
|
||||||
|
for (name, current_svc_status) in ¤t_status.service_statuses {
|
||||||
|
if let Some(prev_svc_status) = previous.service_statuses.get(name) {
|
||||||
|
self.queue_status_notification(
|
||||||
|
&format!("Service {}", name),
|
||||||
|
prev_svc_status,
|
||||||
|
current_svc_status,
|
||||||
|
"Service status changed",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store current status for next comparison
|
// Store current status for next comparison
|
||||||
@@ -292,43 +469,44 @@ impl Agent {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check individual status change and send notification if degraded
|
/// Queue a notification based on status change
|
||||||
async fn check_and_notify_status_change(
|
fn queue_status_notification(
|
||||||
&mut self,
|
&mut self,
|
||||||
component: &str,
|
component: &str,
|
||||||
previous: &cm_dashboard_shared::Status,
|
previous: &cm_dashboard_shared::Status,
|
||||||
current: &cm_dashboard_shared::Status,
|
current: &cm_dashboard_shared::Status,
|
||||||
details: String
|
details: &str,
|
||||||
) -> Result<()> {
|
) {
|
||||||
use cm_dashboard_shared::Status;
|
use cm_dashboard_shared::Status;
|
||||||
|
|
||||||
// Only notify on status degradation (OK → Warning/Critical, Warning → Critical)
|
// Check for degradation (alert)
|
||||||
let should_notify = match (previous, current) {
|
let is_alert = matches!(
|
||||||
(Status::Ok, Status::Warning) => true,
|
(previous, current),
|
||||||
(Status::Ok, Status::Critical) => true,
|
(Status::Ok, Status::Warning)
|
||||||
(Status::Warning, Status::Critical) => true,
|
| (Status::Ok, Status::Critical)
|
||||||
_ => false,
|
| (Status::Warning, Status::Critical)
|
||||||
};
|
);
|
||||||
|
|
||||||
if should_notify {
|
// Check for recovery
|
||||||
let subject = format!("{} {} Alert", self.hostname, component);
|
let is_recovery = matches!(
|
||||||
let body = format!(
|
(previous, current),
|
||||||
"Alert: {} status changed from {:?} to {:?}\n\nDetails: {}\n\nTime: {}",
|
(Status::Warning, Status::Ok)
|
||||||
component,
|
| (Status::Critical, Status::Ok)
|
||||||
previous,
|
| (Status::Critical, Status::Warning)
|
||||||
current,
|
);
|
||||||
details,
|
|
||||||
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")
|
if is_alert {
|
||||||
|
info!(
|
||||||
|
"Alert: {} - {:?} → {:?}",
|
||||||
|
component, previous, current
|
||||||
);
|
);
|
||||||
|
self.notification_manager.queue_alert(component, previous, current, details);
|
||||||
info!("Sending notification: {} - {:?} → {:?}", component, previous, current);
|
} else if is_recovery {
|
||||||
|
info!(
|
||||||
if let Err(e) = self.notification_manager.send_direct_email(&subject, &body).await {
|
"Recovery: {} - {:?} → {:?}",
|
||||||
error!("Failed to send notification for {}: {}", component, e);
|
component, previous, current
|
||||||
}
|
);
|
||||||
|
self.notification_manager.queue_recovery(component, previous, current, details);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
|
use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::HashMap;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use tracing::{debug, warn};
|
use tracing::{debug, warn};
|
||||||
@@ -21,7 +21,7 @@ impl BackupCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scan directory for all backup status files
|
/// Scan directory for backup status file (nfs-backup.toml)
|
||||||
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
|
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
|
||||||
let status_path = Path::new(&self.status_dir);
|
let status_path = Path::new(&self.status_dir);
|
||||||
|
|
||||||
@@ -30,30 +30,15 @@ impl BackupCollector {
|
|||||||
return Ok(Vec::new());
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut status_files = Vec::new();
|
// Look for nfs-backup.toml (new NFS-based backup)
|
||||||
|
let nfs_backup_file = status_path.join("nfs-backup.toml");
|
||||||
match fs::read_dir(status_path) {
|
if nfs_backup_file.exists() {
|
||||||
Ok(entries) => {
|
return Ok(vec![nfs_backup_file]);
|
||||||
for entry in entries {
|
|
||||||
if let Ok(entry) = entry {
|
|
||||||
let path = entry.path();
|
|
||||||
if path.is_file() {
|
|
||||||
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
|
|
||||||
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
|
|
||||||
status_files.push(path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to read backup status directory: {}", e);
|
|
||||||
return Ok(Vec::new());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(status_files)
|
// No backup status file found
|
||||||
|
debug!("No nfs-backup.toml found in {}", self.status_dir);
|
||||||
|
Ok(Vec::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read a single backup status file
|
/// Read a single backup status file
|
||||||
@@ -76,24 +61,13 @@ impl BackupCollector {
|
|||||||
/// Calculate backup status from TOML status field
|
/// Calculate backup status from TOML status field
|
||||||
fn calculate_backup_status(status_str: &str) -> Status {
|
fn calculate_backup_status(status_str: &str) -> Status {
|
||||||
match status_str.to_lowercase().as_str() {
|
match status_str.to_lowercase().as_str() {
|
||||||
"success" => Status::Ok,
|
"success" | "completed" => Status::Ok,
|
||||||
"warning" => Status::Warning,
|
"warning" => Status::Warning,
|
||||||
"failed" | "error" => Status::Critical,
|
"failed" | "error" => Status::Critical,
|
||||||
_ => Status::Unknown,
|
_ => Status::Unknown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate usage status from disk usage percentage
|
|
||||||
fn calculate_usage_status(usage_percent: f32) -> Status {
|
|
||||||
if usage_percent < 80.0 {
|
|
||||||
Status::Ok
|
|
||||||
} else if usage_percent < 90.0 {
|
|
||||||
Status::Warning
|
|
||||||
} else {
|
|
||||||
Status::Critical
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert BackupStatusToml to BackupData and populate AgentData
|
/// Convert BackupStatusToml to BackupData and populate AgentData
|
||||||
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||||
let status_files = self.scan_status_files().await?;
|
let status_files = self.scan_status_files().await?;
|
||||||
@@ -101,76 +75,47 @@ impl BackupCollector {
|
|||||||
if status_files.is_empty() {
|
if status_files.is_empty() {
|
||||||
debug!("No backup status files found");
|
debug!("No backup status files found");
|
||||||
agent_data.backup = BackupData {
|
agent_data.backup = BackupData {
|
||||||
|
last_backup_time: None,
|
||||||
|
backup_status: Status::Unknown,
|
||||||
repositories: Vec::new(),
|
repositories: Vec::new(),
|
||||||
repository_status: Status::Unknown,
|
|
||||||
disks: Vec::new(),
|
|
||||||
};
|
};
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut all_repositories = HashSet::new();
|
// Aggregate repository data across all backup status files
|
||||||
let mut disks = Vec::new();
|
let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new();
|
||||||
let mut worst_status = Status::Ok;
|
let mut worst_status = Status::Ok;
|
||||||
|
let mut latest_backup_time: Option<String> = None;
|
||||||
|
|
||||||
for status_file in status_files {
|
for status_file in status_files {
|
||||||
match self.read_status_file(&status_file).await {
|
match self.read_status_file(&status_file).await {
|
||||||
Ok(backup_status) => {
|
Ok(backup_status) => {
|
||||||
// Collect all service names
|
|
||||||
for service_name in backup_status.services.keys() {
|
|
||||||
all_repositories.insert(service_name.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate backup status
|
// Calculate backup status
|
||||||
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
|
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
|
||||||
|
worst_status = worst_status.max(backup_status_enum);
|
||||||
|
|
||||||
// Calculate usage status from disk space
|
// Track latest backup time
|
||||||
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
|
if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() {
|
||||||
let usage_pct = disk_space.usage_percent as f32;
|
latest_backup_time = Some(backup_status.start_time.clone());
|
||||||
(
|
}
|
||||||
usage_pct,
|
|
||||||
disk_space.used_gb as f32,
|
|
||||||
disk_space.total_gb as f32,
|
|
||||||
Self::calculate_usage_status(usage_pct),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(0.0, 0.0, 0.0, Status::Unknown)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Update worst status
|
// Process each service in this backup
|
||||||
worst_status = worst_status.max(backup_status_enum).max(usage_status);
|
for (service_name, service_status) in backup_status.services {
|
||||||
|
// Convert bytes to GB
|
||||||
|
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
|
||||||
|
|
||||||
// Build service list for this disk
|
// Calculate service status
|
||||||
let services: Vec<String> = backup_status.services.keys().cloned().collect();
|
let service_status_enum = Self::calculate_backup_status(&service_status.status);
|
||||||
|
worst_status = worst_status.max(service_status_enum);
|
||||||
|
|
||||||
// Get min and max archive counts to detect inconsistencies
|
// Update or insert repository data
|
||||||
let archives_min: i64 = backup_status.services.values()
|
repo_map.insert(service_name.clone(), BackupRepositoryData {
|
||||||
.map(|service| service.archive_count)
|
name: service_name,
|
||||||
.min()
|
archive_count: service_status.archive_count,
|
||||||
.unwrap_or(0);
|
repo_size_gb,
|
||||||
|
status: service_status_enum,
|
||||||
let archives_max: i64 = backup_status.services.values()
|
});
|
||||||
.map(|service| service.archive_count)
|
}
|
||||||
.max()
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
// Create disk data
|
|
||||||
let disk_data = BackupDiskData {
|
|
||||||
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
|
|
||||||
product_name: backup_status.disk_product_name,
|
|
||||||
wear_percent: backup_status.disk_wear_percent,
|
|
||||||
temperature_celsius: None, // Not available in current TOML
|
|
||||||
last_backup_time: Some(backup_status.start_time),
|
|
||||||
backup_status: backup_status_enum,
|
|
||||||
disk_usage_percent: usage_percent,
|
|
||||||
disk_used_gb: used_gb,
|
|
||||||
disk_total_gb: total_gb,
|
|
||||||
usage_status,
|
|
||||||
services,
|
|
||||||
archives_min,
|
|
||||||
archives_max,
|
|
||||||
};
|
|
||||||
|
|
||||||
disks.push(disk_data);
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("Failed to read backup status file {:?}: {}", status_file, e);
|
warn!("Failed to read backup status file {:?}: {}", status_file, e);
|
||||||
@@ -178,12 +123,14 @@ impl BackupCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let repositories: Vec<String> = all_repositories.into_iter().collect();
|
// Convert HashMap to sorted Vec
|
||||||
|
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
|
||||||
|
repositories.sort_by(|a, b| a.name.cmp(&b.name));
|
||||||
|
|
||||||
agent_data.backup = BackupData {
|
agent_data.backup = BackupData {
|
||||||
|
last_backup_time: latest_backup_time,
|
||||||
|
backup_status: worst_status,
|
||||||
repositories,
|
repositories,
|
||||||
repository_status: worst_status,
|
|
||||||
disks,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -282,8 +282,8 @@ impl Collector for CpuCollector {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate status using thresholds
|
// Calculate status using thresholds (use 5-minute average for stability)
|
||||||
agent_data.system.cpu.load_status = self.calculate_load_status(agent_data.system.cpu.load_1min);
|
agent_data.system.cpu.load_status = self.calculate_load_status(agent_data.system.cpu.load_5min);
|
||||||
agent_data.system.cpu.temperature_status = if let Some(temp) = agent_data.system.cpu.temperature_celsius {
|
agent_data.system.cpu.temperature_status = if let Some(temp) = agent_data.system.cpu.temperature_celsius {
|
||||||
self.calculate_temperature_status(temp)
|
self.calculate_temperature_status(temp)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ impl DiskCollector {
|
|||||||
let mut cmd = TokioCommand::new("lsblk");
|
let mut cmd = TokioCommand::new("lsblk");
|
||||||
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
|
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
|
||||||
|
|
||||||
let output = run_command_with_timeout(cmd, 2).await
|
let output = run_command_with_timeout(cmd, 10).await
|
||||||
.map_err(|e| CollectorError::SystemRead {
|
.map_err(|e| CollectorError::SystemRead {
|
||||||
path: "block devices".to_string(),
|
path: "block devices".to_string(),
|
||||||
error: e.to_string(),
|
error: e.to_string(),
|
||||||
@@ -184,7 +184,7 @@ impl DiskCollector {
|
|||||||
/// Get filesystem info for a single mount point
|
/// Get filesystem info for a single mount point
|
||||||
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
|
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
|
||||||
let output = StdCommand::new("timeout")
|
let output = StdCommand::new("timeout")
|
||||||
.args(&["2", "df", "--block-size=1", mount_point])
|
.args(&["10", "df", "--block-size=1", mount_point])
|
||||||
.output()
|
.output()
|
||||||
.map_err(|e| CollectorError::SystemRead {
|
.map_err(|e| CollectorError::SystemRead {
|
||||||
path: format!("df {}", mount_point),
|
path: format!("df {}", mount_point),
|
||||||
@@ -433,7 +433,7 @@ impl DiskCollector {
|
|||||||
cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
|
cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let output = run_command_with_timeout(cmd, 3).await
|
let output = run_command_with_timeout(cmd, 15).await
|
||||||
.map_err(|e| CollectorError::SystemRead {
|
.map_err(|e| CollectorError::SystemRead {
|
||||||
path: format!("SMART data for {}", drive_name),
|
path: format!("SMART data for {}", drive_name),
|
||||||
error: e.to_string(),
|
error: e.to_string(),
|
||||||
@@ -772,7 +772,7 @@ impl DiskCollector {
|
|||||||
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
|
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
|
||||||
// Use lsblk to find the backing device with timeout
|
// Use lsblk to find the backing device with timeout
|
||||||
let output = StdCommand::new("timeout")
|
let output = StdCommand::new("timeout")
|
||||||
.args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
|
.args(&["10", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
|
||||||
.output()
|
.output()
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;
|
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;
|
||||||
|
|
||||||
|
|||||||
@@ -181,6 +181,7 @@ impl NetworkCollector {
|
|||||||
link_status,
|
link_status,
|
||||||
parent_interface,
|
parent_interface,
|
||||||
vlan_id,
|
vlan_id,
|
||||||
|
connection_method: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,9 @@ impl NixOSCollector {
|
|||||||
// Set NixOS build/generation information
|
// Set NixOS build/generation information
|
||||||
agent_data.build_version = self.get_nixos_generation().await;
|
agent_data.build_version = self.get_nixos_generation().await;
|
||||||
|
|
||||||
|
// Set kernel version
|
||||||
|
agent_data.kernel_version = self.get_kernel_version().await;
|
||||||
|
|
||||||
// Set current timestamp
|
// Set current timestamp
|
||||||
agent_data.timestamp = chrono::Utc::now().timestamp() as u64;
|
agent_data.timestamp = chrono::Utc::now().timestamp() as u64;
|
||||||
|
|
||||||
@@ -80,6 +83,14 @@ impl NixOSCollector {
|
|||||||
std::env::var("CM_DASHBOARD_VERSION").unwrap_or_else(|_| "unknown".to_string())
|
std::env::var("CM_DASHBOARD_VERSION").unwrap_or_else(|_| "unknown".to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get kernel version from /proc/sys/kernel/osrelease
|
||||||
|
async fn get_kernel_version(&self) -> Option<String> {
|
||||||
|
match fs::read_to_string("/proc/sys/kernel/osrelease") {
|
||||||
|
Ok(version) => Some(version.trim().to_string()),
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Get NixOS system generation (build) information from git commit
|
/// Get NixOS system generation (build) information from git commit
|
||||||
async fn get_nixos_generation(&self) -> Option<String> {
|
async fn get_nixos_generation(&self) -> Option<String> {
|
||||||
// Try to read git commit hash from file written during rebuild
|
// Try to read git commit hash from file written during rebuild
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use cm_dashboard_shared::{AgentData, ServiceData, SubServiceData, SubServiceMetr
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use tracing::debug;
|
use tracing::{debug, info};
|
||||||
|
|
||||||
use super::{Collector, CollectorError};
|
use super::{Collector, CollectorError};
|
||||||
use crate::config::SystemdConfig;
|
use crate::config::SystemdConfig;
|
||||||
@@ -154,7 +154,8 @@ impl SystemdCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if service_name == "openvpn-vpn-connection" && status_info.active_state == "active" {
|
if service_name == "openvpn-vpn-download" && status_info.active_state == "active" {
|
||||||
|
// Add VPN route
|
||||||
if let Some(external_ip) = self.get_vpn_external_ip() {
|
if let Some(external_ip) = self.get_vpn_external_ip() {
|
||||||
let metrics = Vec::new();
|
let metrics = Vec::new();
|
||||||
|
|
||||||
@@ -165,9 +166,8 @@ impl SystemdCollector {
|
|||||||
service_type: "vpn_route".to_string(),
|
service_type: "vpn_route".to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if service_name == "openvpn-vpn-download" && status_info.active_state == "active" {
|
// Add torrent stats
|
||||||
if let Some((active_count, download_mbps, upload_mbps)) = self.get_qbittorrent_stats() {
|
if let Some((active_count, download_mbps, upload_mbps)) = self.get_qbittorrent_stats() {
|
||||||
let metrics = Vec::new();
|
let metrics = Vec::new();
|
||||||
|
|
||||||
@@ -178,6 +178,87 @@ impl SystemdCollector {
|
|||||||
service_type: "torrent_stats".to_string(),
|
service_type: "torrent_stats".to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add active torrent copy status for each copy operation
|
||||||
|
for torrent_name in self.get_active_torrent_copies() {
|
||||||
|
let metrics = Vec::new();
|
||||||
|
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: format!("Copy: {}", torrent_name),
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics,
|
||||||
|
service_type: "torrent_copy".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if service_name == "nftables" && status_info.active_state == "active" {
|
||||||
|
let (tcp_ports, udp_ports) = self.get_nftables_open_ports();
|
||||||
|
|
||||||
|
if !tcp_ports.is_empty() {
|
||||||
|
let metrics = Vec::new();
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: format!("wan tcp: {}", tcp_ports),
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics,
|
||||||
|
service_type: "firewall_port".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if !udp_ports.is_empty() {
|
||||||
|
let metrics = Vec::new();
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: format!("wan udp: {}", udp_ports),
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics,
|
||||||
|
service_type: "firewall_port".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if service_name == "tailscaled" && status_info.active_state == "active" {
|
||||||
|
// Add Tailscale peers with their connection methods as sub-services
|
||||||
|
let peers = self.get_tailscale_peers();
|
||||||
|
for (peer_name, conn_method) in peers {
|
||||||
|
let metrics = Vec::new();
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: format!("{}: {}", peer_name, conn_method),
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics,
|
||||||
|
service_type: "tailscale_peer".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if service_name == "nfs-server" && status_info.active_state == "active" {
|
||||||
|
// Add NFS exports as sub-services
|
||||||
|
let exports = self.get_nfs_exports();
|
||||||
|
for (export_path, info) in exports {
|
||||||
|
let display = if !info.is_empty() {
|
||||||
|
format!("{} {}", export_path, info)
|
||||||
|
} else {
|
||||||
|
export_path
|
||||||
|
};
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: display,
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics: Vec::new(),
|
||||||
|
service_type: "nfs_export".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (service_name == "smbd" || service_name == "samba-smbd") && status_info.active_state == "active" {
|
||||||
|
// Add SMB shares as sub-services
|
||||||
|
let shares = self.get_smb_shares();
|
||||||
|
for (share_name, share_path, mode) in shares {
|
||||||
|
sub_services.push(SubServiceData {
|
||||||
|
name: format!("{}: {} {}", share_name, share_path, mode),
|
||||||
|
service_status: Status::Info,
|
||||||
|
metrics: Vec::new(),
|
||||||
|
service_type: "smb_share".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create complete service data
|
// Create complete service data
|
||||||
@@ -872,21 +953,382 @@ impl SystemdCollector {
|
|||||||
"-s",
|
"-s",
|
||||||
"--max-time",
|
"--max-time",
|
||||||
"4",
|
"4",
|
||||||
"https://ifconfig.me"
|
"https://1.1.1.1/cdn-cgi/trace"
|
||||||
])
|
])
|
||||||
.output()
|
.output()
|
||||||
.ok()?;
|
.ok()?;
|
||||||
|
|
||||||
if output.status.success() {
|
if output.status.success() {
|
||||||
let ip = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
let response = String::from_utf8_lossy(&output.stdout);
|
||||||
if !ip.is_empty() && ip.contains('.') {
|
// Parse "ip=x.x.x.x" from the response
|
||||||
return Some(ip);
|
for line in response.lines() {
|
||||||
|
if let Some(ip) = line.strip_prefix("ip=") {
|
||||||
|
let ip = ip.trim().to_string();
|
||||||
|
if !ip.is_empty() {
|
||||||
|
return Some(ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get Tailscale connected peers with their connection methods
|
||||||
|
/// Returns a list of (device_name, connection_method) tuples
|
||||||
|
fn get_tailscale_peers(&self) -> Vec<(String, String)> {
|
||||||
|
match Command::new("timeout")
|
||||||
|
.args(["2", "tailscale", "status"])
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) if output.status.success() => {
|
||||||
|
let status_output = String::from_utf8_lossy(&output.stdout);
|
||||||
|
let mut peers = Vec::new();
|
||||||
|
|
||||||
|
// Get current hostname to filter it out
|
||||||
|
let current_hostname = gethostname::gethostname()
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
// Parse tailscale status output
|
||||||
|
// Format: IP hostname user os status
|
||||||
|
// Example: 100.110.98.3 wslbox cm@ linux active; direct 192.168.30.227:53757
|
||||||
|
// Note: First line is always the current host, skip it
|
||||||
|
for (idx, line) in status_output.lines().enumerate() {
|
||||||
|
if idx == 0 {
|
||||||
|
continue; // Skip first line (current host)
|
||||||
|
}
|
||||||
|
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() < 5 {
|
||||||
|
continue; // Skip invalid lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// parts[0] = IP
|
||||||
|
// parts[1] = hostname
|
||||||
|
// parts[2] = user
|
||||||
|
// parts[3] = OS
|
||||||
|
// parts[4+] = status (e.g., "active;", "direct", "192.168.30.227:53757" or "idle;" or "offline")
|
||||||
|
|
||||||
|
let hostname = parts[1];
|
||||||
|
|
||||||
|
// Skip if this is the current host (double-check in case format changes)
|
||||||
|
if hostname == current_hostname {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let status_parts = &parts[4..];
|
||||||
|
|
||||||
|
// Determine connection method from status
|
||||||
|
let connection_method = if status_parts.is_empty() {
|
||||||
|
continue; // Skip if no status
|
||||||
|
} else {
|
||||||
|
let status_str = status_parts.join(" ");
|
||||||
|
if status_str.contains("offline") {
|
||||||
|
continue; // Skip offline peers
|
||||||
|
} else if status_str.contains("direct") {
|
||||||
|
"direct"
|
||||||
|
} else if status_str.contains("relay") {
|
||||||
|
"relay"
|
||||||
|
} else if status_str.contains("idle") {
|
||||||
|
"idle"
|
||||||
|
} else if status_str.contains("active") {
|
||||||
|
"active"
|
||||||
|
} else {
|
||||||
|
continue; // Skip unknown status
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
peers.push((hostname.to_string(), connection_method.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
peers
|
||||||
|
}
|
||||||
|
_ => Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get NFS exports from exportfs
|
||||||
|
/// Returns a list of (export_path, info_string) tuples
|
||||||
|
fn get_nfs_exports(&self) -> Vec<(String, String)> {
|
||||||
|
let output = match Command::new("timeout")
|
||||||
|
.args(["2", "exportfs", "-v"])
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) if output.status.success() => output,
|
||||||
|
_ => return Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let exports_output = String::from_utf8_lossy(&output.stdout);
|
||||||
|
let mut exports_map: std::collections::HashMap<String, Vec<(String, String)>> =
|
||||||
|
std::collections::HashMap::new();
|
||||||
|
let mut current_path: Option<String> = None;
|
||||||
|
|
||||||
|
for line in exports_output.lines() {
|
||||||
|
let trimmed = line.trim();
|
||||||
|
|
||||||
|
if trimmed.is_empty() || trimmed.starts_with('#') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if trimmed.starts_with('/') {
|
||||||
|
// Export path line - may have network on same line or continuation
|
||||||
|
let parts: Vec<&str> = trimmed.splitn(2, char::is_whitespace).collect();
|
||||||
|
let path = parts[0].to_string();
|
||||||
|
current_path = Some(path.clone());
|
||||||
|
|
||||||
|
// Check if network info is on the same line
|
||||||
|
if parts.len() > 1 {
|
||||||
|
let rest = parts[1].trim();
|
||||||
|
if let Some(paren_pos) = rest.find('(') {
|
||||||
|
let network = rest[..paren_pos].trim();
|
||||||
|
|
||||||
|
if let Some(end_paren) = rest.find(')') {
|
||||||
|
let options = &rest[paren_pos+1..end_paren];
|
||||||
|
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
|
||||||
|
"rw"
|
||||||
|
} else {
|
||||||
|
"ro"
|
||||||
|
};
|
||||||
|
|
||||||
|
exports_map.entry(path)
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push((network.to_string(), mode.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if let Some(ref path) = current_path {
|
||||||
|
// Continuation line with network and options
|
||||||
|
if let Some(paren_pos) = trimmed.find('(') {
|
||||||
|
let network = trimmed[..paren_pos].trim();
|
||||||
|
|
||||||
|
if let Some(end_paren) = trimmed.find(')') {
|
||||||
|
let options = &trimmed[paren_pos+1..end_paren];
|
||||||
|
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
|
||||||
|
"rw"
|
||||||
|
} else {
|
||||||
|
"ro"
|
||||||
|
};
|
||||||
|
|
||||||
|
exports_map.entry(path.clone())
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push((network.to_string(), mode.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build display strings: "path: mode [networks]"
|
||||||
|
let mut exports: Vec<(String, String)> = exports_map
|
||||||
|
.into_iter()
|
||||||
|
.map(|(path, mut entries)| {
|
||||||
|
if entries.is_empty() {
|
||||||
|
return (path, String::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mode = entries[0].1.clone();
|
||||||
|
let networks: Vec<String> = entries.drain(..).map(|(n, _)| n).collect();
|
||||||
|
let info = format!("{} [{}]", mode, networks.join(", "));
|
||||||
|
(path, info)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
exports.sort_by(|a, b| a.0.cmp(&b.0));
|
||||||
|
exports
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get SMB shares from smb.conf
|
||||||
|
/// Returns a list of (share_name, share_path, mode) tuples
|
||||||
|
fn get_smb_shares(&self) -> Vec<(String, String, String)> {
|
||||||
|
match std::fs::read_to_string("/etc/samba/smb.conf") {
|
||||||
|
Ok(config) => {
|
||||||
|
let mut shares = Vec::new();
|
||||||
|
let mut current_share: Option<String> = None;
|
||||||
|
let mut current_path: Option<String> = None;
|
||||||
|
let mut current_mode: String = "ro".to_string(); // Default to read-only
|
||||||
|
|
||||||
|
for line in config.lines() {
|
||||||
|
let line = line.trim();
|
||||||
|
|
||||||
|
// Skip comments and empty lines
|
||||||
|
if line.is_empty() || line.starts_with('#') || line.starts_with(';') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detect share section [sharename]
|
||||||
|
if line.starts_with('[') && line.ends_with(']') {
|
||||||
|
// Save previous share if we have both name and path
|
||||||
|
if let (Some(name), Some(path)) = (current_share.take(), current_path.take()) {
|
||||||
|
// Skip special sections
|
||||||
|
if name != "global" && name != "homes" && name != "printers" {
|
||||||
|
shares.push((name, path, current_mode.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start new share
|
||||||
|
let share_name = line[1..line.len()-1].trim().to_string();
|
||||||
|
current_share = Some(share_name);
|
||||||
|
current_path = None;
|
||||||
|
current_mode = "ro".to_string(); // Reset to default
|
||||||
|
}
|
||||||
|
// Look for path = /some/path
|
||||||
|
else if line.starts_with("path") && line.contains('=') {
|
||||||
|
if let Some(path_value) = line.split('=').nth(1) {
|
||||||
|
current_path = Some(path_value.trim().to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Look for read only = yes/no
|
||||||
|
else if line.to_lowercase().starts_with("read only") && line.contains('=') {
|
||||||
|
if let Some(value) = line.split('=').nth(1) {
|
||||||
|
let val = value.trim().to_lowercase();
|
||||||
|
current_mode = if val == "no" || val == "false" { "rw" } else { "ro" }.to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Look for writable = yes/no (opposite of read only)
|
||||||
|
else if line.to_lowercase().starts_with("writable") && line.contains('=') {
|
||||||
|
if let Some(value) = line.split('=').nth(1) {
|
||||||
|
let val = value.trim().to_lowercase();
|
||||||
|
current_mode = if val == "yes" || val == "true" { "rw" } else { "ro" }.to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't forget the last share
|
||||||
|
if let (Some(name), Some(path)) = (current_share, current_path) {
|
||||||
|
if name != "global" && name != "homes" && name != "printers" {
|
||||||
|
shares.push((name, path, current_mode));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
shares
|
||||||
|
}
|
||||||
|
_ => Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get nftables open ports grouped by protocol
|
||||||
|
/// Returns: (tcp_ports_string, udp_ports_string)
|
||||||
|
fn get_nftables_open_ports(&self) -> (String, String) {
|
||||||
|
let output = Command::new("sudo")
|
||||||
|
.args(&["/run/current-system/sw/bin/nft", "list", "ruleset"])
|
||||||
|
.output();
|
||||||
|
|
||||||
|
let output = match output {
|
||||||
|
Ok(out) if out.status.success() => out,
|
||||||
|
Ok(out) => {
|
||||||
|
info!("nft command failed with status: {:?}, stderr: {}",
|
||||||
|
out.status, String::from_utf8_lossy(&out.stderr));
|
||||||
|
return (String::new(), String::new());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
info!("Failed to execute nft command: {}", e);
|
||||||
|
return (String::new(), String::new());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let output_str = match String::from_utf8(output.stdout) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(_) => {
|
||||||
|
info!("Failed to parse nft output as UTF-8");
|
||||||
|
return (String::new(), String::new());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut tcp_ports = std::collections::HashSet::new();
|
||||||
|
let mut udp_ports = std::collections::HashSet::new();
|
||||||
|
|
||||||
|
// Parse nftables output for WAN incoming accept rules with dport
|
||||||
|
// Looking for patterns like: tcp dport 22 accept or tcp dport { 22, 80, 443 } accept
|
||||||
|
// Only include rules in input_wan chain
|
||||||
|
let mut in_wan_chain = false;
|
||||||
|
|
||||||
|
for line in output_str.lines() {
|
||||||
|
let line = line.trim();
|
||||||
|
|
||||||
|
// Track if we're in the input_wan chain
|
||||||
|
if line.contains("chain input_wan") {
|
||||||
|
in_wan_chain = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset when exiting chain (closing brace) or entering other chains
|
||||||
|
if line == "}" || (line.starts_with("chain ") && !line.contains("input_wan")) {
|
||||||
|
in_wan_chain = false;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only process rules in input_wan chain
|
||||||
|
if !in_wan_chain {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if not an accept rule
|
||||||
|
if !line.contains("accept") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse TCP ports
|
||||||
|
if line.contains("tcp dport") {
|
||||||
|
for port in self.extract_ports_from_nft_rule(line) {
|
||||||
|
tcp_ports.insert(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse UDP ports
|
||||||
|
if line.contains("udp dport") {
|
||||||
|
for port in self.extract_ports_from_nft_rule(line) {
|
||||||
|
udp_ports.insert(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort and format
|
||||||
|
let mut tcp_vec: Vec<u16> = tcp_ports.into_iter().collect();
|
||||||
|
let mut udp_vec: Vec<u16> = udp_ports.into_iter().collect();
|
||||||
|
tcp_vec.sort();
|
||||||
|
udp_vec.sort();
|
||||||
|
|
||||||
|
let tcp_str = tcp_vec.iter().map(|p| p.to_string()).collect::<Vec<_>>().join(", ");
|
||||||
|
let udp_str = udp_vec.iter().map(|p| p.to_string()).collect::<Vec<_>>().join(", ");
|
||||||
|
|
||||||
|
info!("nftables WAN ports - TCP: '{}', UDP: '{}'", tcp_str, udp_str);
|
||||||
|
|
||||||
|
(tcp_str, udp_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract port numbers from nftables rule line
|
||||||
|
/// Returns vector of ports (handles both single ports and sets)
|
||||||
|
fn extract_ports_from_nft_rule(&self, line: &str) -> Vec<u16> {
|
||||||
|
let mut ports = Vec::new();
|
||||||
|
|
||||||
|
// Pattern: "tcp dport 22 accept" or "tcp dport { 22, 80, 443 } accept"
|
||||||
|
if let Some(dport_pos) = line.find("dport") {
|
||||||
|
let after_dport = &line[dport_pos + 5..].trim();
|
||||||
|
|
||||||
|
// Handle port sets like { 22, 80, 443 }
|
||||||
|
if after_dport.starts_with('{') {
|
||||||
|
if let Some(end_brace) = after_dport.find('}') {
|
||||||
|
let ports_str = &after_dport[1..end_brace];
|
||||||
|
// Parse each port in the set
|
||||||
|
for port_str in ports_str.split(',') {
|
||||||
|
if let Ok(port) = port_str.trim().parse::<u16>() {
|
||||||
|
ports.push(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Single port
|
||||||
|
if let Some(port_str) = after_dport.split_whitespace().next() {
|
||||||
|
if let Ok(port) = port_str.parse::<u16>() {
|
||||||
|
ports.push(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ports
|
||||||
|
}
|
||||||
|
|
||||||
/// Get aggregate qBittorrent torrent statistics
|
/// Get aggregate qBittorrent torrent statistics
|
||||||
/// Returns: (active_count, download_mbps, upload_mbps)
|
/// Returns: (active_count, download_mbps, upload_mbps)
|
||||||
fn get_qbittorrent_stats(&self) -> Option<(u32, f32, f32)> {
|
fn get_qbittorrent_stats(&self) -> Option<(u32, f32, f32)> {
|
||||||
@@ -941,6 +1383,31 @@ impl SystemdCollector {
|
|||||||
|
|
||||||
Some((active_count, download_mbps, upload_mbps))
|
Some((active_count, download_mbps, upload_mbps))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check for active torrent copy operations
|
||||||
|
/// Returns: Vec of filenames currently being copied
|
||||||
|
fn get_active_torrent_copies(&self) -> Vec<String> {
|
||||||
|
let marker_dir = "/tmp/torrent-copy";
|
||||||
|
let mut active_copies = Vec::new();
|
||||||
|
|
||||||
|
// Read all marker files from directory
|
||||||
|
if let Ok(entries) = std::fs::read_dir(marker_dir) {
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
if let Ok(file_type) = entry.file_type() {
|
||||||
|
if file_type.is_file() {
|
||||||
|
// Filename is the marker (sanitized torrent name)
|
||||||
|
if let Some(filename) = entry.file_name().to_str() {
|
||||||
|
// Convert sanitized name back (replace _ with /)
|
||||||
|
let display_name = filename.replace('_', "/");
|
||||||
|
active_copies.push(display_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
active_copies
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -141,8 +141,23 @@ pub struct NotificationConfig {
|
|||||||
pub from_email: String,
|
pub from_email: String,
|
||||||
pub to_email: String,
|
pub to_email: String,
|
||||||
pub rate_limit_minutes: u64,
|
pub rate_limit_minutes: u64,
|
||||||
|
/// Whether to send notifications on warning status
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub trigger_on_warnings: bool,
|
||||||
|
/// Whether to send notifications on failure/critical status
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub trigger_on_failures: bool,
|
||||||
|
/// Only send recovery notification when all components are OK
|
||||||
|
#[serde(default)]
|
||||||
|
pub recovery_requires_all_ok: bool,
|
||||||
|
/// Suppress individual recovery notifications (only notify on full recovery)
|
||||||
|
#[serde(default)]
|
||||||
|
pub suppress_individual_recoveries: bool,
|
||||||
/// Email notification batching interval in seconds (default: 60)
|
/// Email notification batching interval in seconds (default: 60)
|
||||||
pub aggregation_interval_seconds: u64,
|
pub aggregation_interval_seconds: u64,
|
||||||
|
/// How often to check for status changes in seconds (default: 30)
|
||||||
|
#[serde(default = "default_check_interval_seconds")]
|
||||||
|
pub check_interval_seconds: u64,
|
||||||
/// List of metric names to exclude from email notifications
|
/// List of metric names to exclude from email notifications
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub exclude_email_metrics: Vec<String>,
|
pub exclude_email_metrics: Vec<String>,
|
||||||
@@ -151,6 +166,14 @@ pub struct NotificationConfig {
|
|||||||
pub maintenance_mode_file: String,
|
pub maintenance_mode_file: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_check_interval_seconds() -> u64 {
|
||||||
|
30
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
fn default_heartbeat_interval_seconds() -> u64 {
|
fn default_heartbeat_interval_seconds() -> u64 {
|
||||||
5
|
5
|
||||||
|
|||||||
@@ -1,60 +1,314 @@
|
|||||||
use crate::config::NotificationConfig;
|
use crate::config::NotificationConfig;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
|
use cm_dashboard_shared::Status;
|
||||||
use lettre::transport::smtp::SmtpTransport;
|
use lettre::transport::smtp::SmtpTransport;
|
||||||
use lettre::{Message, Transport};
|
use lettre::{Message, Transport};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
/// Manages notifications
|
/// Manages notifications with rate limiting and aggregation
|
||||||
pub struct NotificationManager {
|
pub struct NotificationManager {
|
||||||
config: NotificationConfig,
|
config: NotificationConfig,
|
||||||
|
/// Last notification time per component for rate limiting
|
||||||
|
last_notification: HashMap<String, Instant>,
|
||||||
|
/// Pending notifications for aggregation
|
||||||
|
pending_notifications: Vec<PendingNotification>,
|
||||||
|
/// Pending recovery notifications (held until all OK if configured)
|
||||||
|
pending_recoveries: Vec<PendingNotification>,
|
||||||
|
/// Last aggregation flush time
|
||||||
|
last_aggregation_flush: Option<Instant>,
|
||||||
|
/// Track components currently in alert state
|
||||||
|
components_in_alert: HashMap<String, Status>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A pending notification waiting to be aggregated
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct PendingNotification {
|
||||||
|
component: String,
|
||||||
|
previous_status: String,
|
||||||
|
current_status: String,
|
||||||
|
details: String,
|
||||||
|
timestamp: chrono::DateTime<Utc>,
|
||||||
|
is_recovery: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NotificationManager {
|
impl NotificationManager {
|
||||||
pub fn new(config: &NotificationConfig, _hostname: &str) -> Result<Self> {
|
pub fn new(config: &NotificationConfig, _hostname: &str) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
config: config.clone(),
|
config: config.clone(),
|
||||||
|
last_notification: HashMap::new(),
|
||||||
|
pending_notifications: Vec::new(),
|
||||||
|
pending_recoveries: Vec::new(),
|
||||||
|
last_aggregation_flush: None,
|
||||||
|
components_in_alert: HashMap::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_direct_email(&mut self, subject: &str, body: &str) -> Result<()> {
|
/// Check if a component is rate limited
|
||||||
|
fn is_rate_limited(&self, component: &str) -> bool {
|
||||||
|
if self.config.rate_limit_minutes == 0 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(last_time) = self.last_notification.get(component) {
|
||||||
|
let rate_limit = Duration::from_secs(self.config.rate_limit_minutes * 60);
|
||||||
|
last_time.elapsed() < rate_limit
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Queue a degradation notification (Ok→Warning, Ok→Critical, Warning→Critical)
|
||||||
|
pub fn queue_alert(
|
||||||
|
&mut self,
|
||||||
|
component: &str,
|
||||||
|
previous: &Status,
|
||||||
|
current: &Status,
|
||||||
|
details: &str,
|
||||||
|
) {
|
||||||
|
// Check if this status type should trigger notifications
|
||||||
|
// Only Warning and Critical trigger notifications (not Inactive)
|
||||||
|
let should_notify = match current {
|
||||||
|
Status::Warning => self.config.trigger_on_warnings,
|
||||||
|
Status::Critical => self.config.trigger_on_failures,
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !should_notify {
|
||||||
|
debug!(
|
||||||
|
"Notification for {} suppressed (trigger_on_warnings={}, trigger_on_failures={})",
|
||||||
|
component, self.config.trigger_on_warnings, self.config.trigger_on_failures
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check rate limit
|
||||||
|
if self.is_rate_limited(component) {
|
||||||
|
debug!(
|
||||||
|
"Notification for {} rate limited (limit: {} min)",
|
||||||
|
component, self.config.rate_limit_minutes
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check exclusions
|
||||||
|
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
|
||||||
|
debug!("Notification for {} excluded by config", component);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track this component as in alert state
|
||||||
|
self.components_in_alert.insert(component.to_string(), *current);
|
||||||
|
|
||||||
|
self.pending_notifications.push(PendingNotification {
|
||||||
|
component: component.to_string(),
|
||||||
|
previous_status: format!("{:?}", previous),
|
||||||
|
current_status: format!("{:?}", current),
|
||||||
|
details: details.to_string(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
is_recovery: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update rate limit tracker
|
||||||
|
self.last_notification.insert(component.to_string(), Instant::now());
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Queued alert for {}: {:?} -> {:?}",
|
||||||
|
component, previous, current
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Queue a recovery notification (Warning→Ok, Critical→Ok, Critical→Warning)
|
||||||
|
pub fn queue_recovery(
|
||||||
|
&mut self,
|
||||||
|
component: &str,
|
||||||
|
previous: &Status,
|
||||||
|
current: &Status,
|
||||||
|
details: &str,
|
||||||
|
) {
|
||||||
|
// Remove from alert tracking
|
||||||
|
self.components_in_alert.remove(component);
|
||||||
|
|
||||||
|
// Check if individual recoveries are suppressed
|
||||||
|
if self.config.suppress_individual_recoveries {
|
||||||
|
debug!(
|
||||||
|
"Individual recovery for {} suppressed by config",
|
||||||
|
component
|
||||||
|
);
|
||||||
|
|
||||||
|
// Store recovery for potential batch notification
|
||||||
|
self.pending_recoveries.push(PendingNotification {
|
||||||
|
component: component.to_string(),
|
||||||
|
previous_status: format!("{:?}", previous),
|
||||||
|
current_status: format!("{:?}", current),
|
||||||
|
details: details.to_string(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
is_recovery: true,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check exclusions
|
||||||
|
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
|
||||||
|
debug!("Recovery notification for {} excluded by config", component);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.pending_notifications.push(PendingNotification {
|
||||||
|
component: component.to_string(),
|
||||||
|
previous_status: format!("{:?}", previous),
|
||||||
|
current_status: format!("{:?}", current),
|
||||||
|
details: details.to_string(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
is_recovery: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Queued recovery for {}: {:?} -> {:?}",
|
||||||
|
component, previous, current
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if all components have recovered (no components in alert state)
|
||||||
|
pub fn all_components_ok(&self) -> bool {
|
||||||
|
self.components_in_alert.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush suppressed recovery notifications when all components are OK
|
||||||
|
pub fn flush_recoveries_if_all_ok(&mut self) {
|
||||||
|
if !self.config.recovery_requires_all_ok || self.all_components_ok() {
|
||||||
|
if !self.pending_recoveries.is_empty() {
|
||||||
|
info!("All components recovered, sending batch recovery notification");
|
||||||
|
self.pending_notifications.append(&mut self.pending_recoveries);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if it's time to flush aggregated notifications
|
||||||
|
pub fn should_flush(&self) -> bool {
|
||||||
|
if self.pending_notifications.is_empty() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.last_aggregation_flush {
|
||||||
|
None => true, // First flush
|
||||||
|
Some(last_flush) => {
|
||||||
|
let aggregation_interval =
|
||||||
|
Duration::from_secs(self.config.aggregation_interval_seconds);
|
||||||
|
last_flush.elapsed() >= aggregation_interval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush pending notifications as a single aggregated email
|
||||||
|
pub async fn flush_notifications(&mut self) -> Result<()> {
|
||||||
|
if self.pending_notifications.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
if !self.config.enabled {
|
if !self.config.enabled {
|
||||||
|
self.pending_notifications.clear();
|
||||||
|
self.last_aggregation_flush = Some(Instant::now());
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.is_maintenance_mode() {
|
if self.is_maintenance_mode() {
|
||||||
debug!("Maintenance mode active, suppressing email notification");
|
debug!("Maintenance mode active, suppressing aggregated notifications");
|
||||||
|
self.pending_notifications.clear();
|
||||||
|
self.last_aggregation_flush = Some(Instant::now());
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let hostname = gethostname::gethostname()
|
let hostname = gethostname::gethostname().to_string_lossy().to_string();
|
||||||
.to_string_lossy()
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
|
// Build aggregated email
|
||||||
|
let notification_count = self.pending_notifications.len();
|
||||||
|
let alert_count = self.pending_notifications.iter().filter(|n| !n.is_recovery).count();
|
||||||
|
let recovery_count = self.pending_notifications.iter().filter(|n| n.is_recovery).count();
|
||||||
|
|
||||||
|
let subject = if notification_count == 1 {
|
||||||
|
let n = &self.pending_notifications[0];
|
||||||
|
if n.is_recovery {
|
||||||
|
format!("[{}] {} Recovered: {}", hostname, n.component, n.current_status)
|
||||||
|
} else {
|
||||||
|
format!("[{}] {} Alert: {}", hostname, n.component, n.current_status)
|
||||||
|
}
|
||||||
|
} else if recovery_count > 0 && alert_count == 0 {
|
||||||
|
format!("[{}] {} Components Recovered", hostname, recovery_count)
|
||||||
|
} else if alert_count > 0 && recovery_count == 0 {
|
||||||
|
format!("[{}] {} Status Alerts", hostname, alert_count)
|
||||||
|
} else {
|
||||||
|
format!("[{}] {} Alerts, {} Recoveries", hostname, alert_count, recovery_count)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut body = String::new();
|
||||||
|
body.push_str(&format!("Status notifications for host: {}\n", hostname));
|
||||||
|
body.push_str(&format!("Time: {}\n\n", Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
|
||||||
|
|
||||||
|
// Group alerts and recoveries
|
||||||
|
let alerts: Vec<_> = self.pending_notifications.iter().filter(|n| !n.is_recovery).collect();
|
||||||
|
let recoveries: Vec<_> = self.pending_notifications.iter().filter(|n| n.is_recovery).collect();
|
||||||
|
|
||||||
|
if !alerts.is_empty() {
|
||||||
|
body.push_str("=== ALERTS ===\n\n");
|
||||||
|
for notification in &alerts {
|
||||||
|
body.push_str(&format!(
|
||||||
|
"• {} : {} → {}\n {}\n ({})\n\n",
|
||||||
|
notification.component,
|
||||||
|
notification.previous_status,
|
||||||
|
notification.current_status,
|
||||||
|
notification.details,
|
||||||
|
notification.timestamp.format("%H:%M:%S UTC")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !recoveries.is_empty() {
|
||||||
|
body.push_str("=== RECOVERIES ===\n\n");
|
||||||
|
for notification in &recoveries {
|
||||||
|
body.push_str(&format!(
|
||||||
|
"• {} : {} → {}\n {}\n ({})\n\n",
|
||||||
|
notification.component,
|
||||||
|
notification.previous_status,
|
||||||
|
notification.current_status,
|
||||||
|
notification.details,
|
||||||
|
notification.timestamp.format("%H:%M:%S UTC")
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
body.push_str("--\nCM Dashboard Agent");
|
||||||
|
|
||||||
|
// Send the aggregated email
|
||||||
let from_email = self.config.from_email.replace("{hostname}", &hostname);
|
let from_email = self.config.from_email.replace("{hostname}", &hostname);
|
||||||
|
|
||||||
let email_body = format!(
|
|
||||||
"{}\n\n--\nCM Dashboard Agent\nGenerated at {}",
|
|
||||||
body,
|
|
||||||
Utc::now().format("%Y-%m-%d %H:%M:%S %Z")
|
|
||||||
);
|
|
||||||
|
|
||||||
let email = Message::builder()
|
let email = Message::builder()
|
||||||
.from(from_email.parse()?)
|
.from(from_email.parse()?)
|
||||||
.to(self.config.to_email.parse()?)
|
.to(self.config.to_email.parse()?)
|
||||||
.subject(subject)
|
.subject(&subject)
|
||||||
.body(email_body)?;
|
.body(body)?;
|
||||||
|
|
||||||
let mailer = SmtpTransport::unencrypted_localhost();
|
let mailer = SmtpTransport::builder_dangerous(&self.config.smtp_host)
|
||||||
|
.port(self.config.smtp_port)
|
||||||
|
.build();
|
||||||
|
|
||||||
match mailer.send(&email) {
|
match mailer.send(&email) {
|
||||||
Ok(_) => info!("Direct email sent successfully: {}", subject),
|
Ok(_) => {
|
||||||
|
info!(
|
||||||
|
"Sent aggregated notification email with {} alerts",
|
||||||
|
notification_count
|
||||||
|
);
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to send email: {}", e);
|
error!("Failed to send aggregated email: {}", e);
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.pending_notifications.clear();
|
||||||
|
self.last_aggregation_flush = Some(Instant::now());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.246"
|
version = "0.1.280"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use crossterm::{
|
use crossterm::{
|
||||||
event::{self},
|
event::{self, EnableMouseCapture, DisableMouseCapture, Event, MouseEvent, MouseEventKind, MouseButton},
|
||||||
execute,
|
execute,
|
||||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||||
};
|
};
|
||||||
use ratatui::{backend::CrosstermBackend, Terminal};
|
use ratatui::{backend::CrosstermBackend, Terminal, layout::Rect};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
@@ -22,6 +22,8 @@ pub struct Dashboard {
|
|||||||
headless: bool,
|
headless: bool,
|
||||||
initial_commands_sent: std::collections::HashSet<String>,
|
initial_commands_sent: std::collections::HashSet<String>,
|
||||||
config: DashboardConfig,
|
config: DashboardConfig,
|
||||||
|
system_area: Rect, // Store system area for mouse event handling
|
||||||
|
services_area: Rect, // Store services area for mouse event handling
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Dashboard {
|
impl Dashboard {
|
||||||
@@ -92,7 +94,7 @@ impl Dashboard {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut stdout = io::stdout();
|
let mut stdout = io::stdout();
|
||||||
if let Err(e) = execute!(stdout, EnterAlternateScreen) {
|
if let Err(e) = execute!(stdout, EnterAlternateScreen, EnableMouseCapture) {
|
||||||
error!("Failed to enter alternate screen: {}", e);
|
error!("Failed to enter alternate screen: {}", e);
|
||||||
let _ = disable_raw_mode();
|
let _ = disable_raw_mode();
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
@@ -121,6 +123,8 @@ impl Dashboard {
|
|||||||
headless,
|
headless,
|
||||||
initial_commands_sent: std::collections::HashSet::new(),
|
initial_commands_sent: std::collections::HashSet::new(),
|
||||||
config,
|
config,
|
||||||
|
system_area: Rect::default(),
|
||||||
|
services_area: Rect::default(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,27 +136,45 @@ impl Dashboard {
|
|||||||
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
|
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
|
||||||
let mut last_heartbeat_check = Instant::now();
|
let mut last_heartbeat_check = Instant::now();
|
||||||
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
|
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
|
||||||
|
let mut needs_render = true; // Track if we need to render
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// Handle terminal events (keyboard input) only if not headless
|
// Handle terminal events (keyboard and mouse input) only if not headless
|
||||||
if !self.headless {
|
if !self.headless {
|
||||||
match event::poll(Duration::from_millis(50)) {
|
match event::poll(Duration::from_millis(200)) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
match event::read() {
|
match event::read() {
|
||||||
Ok(event) => {
|
Ok(event) => {
|
||||||
if let Some(ref mut tui_app) = self.tui_app {
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
// Handle input
|
match event {
|
||||||
match tui_app.handle_input(event) {
|
Event::Key(_) => {
|
||||||
Ok(_) => {
|
// Handle keyboard input
|
||||||
// Check if we should quit
|
match tui_app.handle_input(event) {
|
||||||
if tui_app.should_quit() {
|
Ok(_) => {
|
||||||
info!("Quit requested, exiting dashboard");
|
needs_render = true;
|
||||||
break;
|
// Check if we should quit
|
||||||
|
if tui_app.should_quit() {
|
||||||
|
info!("Quit requested, exiting dashboard");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error handling input: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Event::Mouse(mouse_event) => {
|
||||||
error!("Error handling input: {}", e);
|
// Handle mouse events
|
||||||
|
if let Err(e) = self.handle_mouse_event(mouse_event) {
|
||||||
|
error!("Error handling mouse event: {}", e);
|
||||||
|
}
|
||||||
|
needs_render = true;
|
||||||
}
|
}
|
||||||
|
Event::Resize(_width, _height) => {
|
||||||
|
// Terminal was resized - mark for re-render
|
||||||
|
needs_render = true;
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -168,17 +190,6 @@ impl Dashboard {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render UI immediately after handling input for responsive feedback
|
|
||||||
if let Some(ref mut terminal) = self.terminal {
|
|
||||||
if let Some(ref mut tui_app) = self.tui_app {
|
|
||||||
if let Err(e) = terminal.draw(|frame| {
|
|
||||||
tui_app.render(frame, &self.metric_store);
|
|
||||||
}) {
|
|
||||||
error!("Error rendering TUI after input: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for new metrics
|
// Check for new metrics
|
||||||
@@ -217,8 +228,10 @@ impl Dashboard {
|
|||||||
if let Some(ref mut tui_app) = self.tui_app {
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
tui_app.update_metrics(&mut self.metric_store);
|
tui_app.update_metrics(&mut self.metric_store);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
needs_render = true; // New metrics received, need to render
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also check for command output messages
|
// Also check for command output messages
|
||||||
if let Ok(Some(cmd_output)) = self.zmq_consumer.receive_command_output().await {
|
if let Ok(Some(cmd_output)) = self.zmq_consumer.receive_command_output().await {
|
||||||
debug!(
|
debug!(
|
||||||
@@ -229,47 +242,338 @@ impl Dashboard {
|
|||||||
|
|
||||||
// Command output (terminal popup removed - output not displayed)
|
// Command output (terminal popup removed - output not displayed)
|
||||||
}
|
}
|
||||||
|
|
||||||
last_metrics_check = Instant::now();
|
last_metrics_check = Instant::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for host connectivity changes (heartbeat timeouts) periodically
|
// Check for host connectivity changes (heartbeat timeouts) periodically
|
||||||
if last_heartbeat_check.elapsed() >= heartbeat_check_interval {
|
if last_heartbeat_check.elapsed() >= heartbeat_check_interval {
|
||||||
let timeout = Duration::from_secs(self.config.zmq.heartbeat_timeout_seconds);
|
let timeout = Duration::from_secs(self.config.zmq.heartbeat_timeout_seconds);
|
||||||
|
|
||||||
// Clean up metrics for offline hosts
|
// Clean up metrics for offline hosts
|
||||||
self.metric_store.cleanup_offline_hosts(timeout);
|
self.metric_store.cleanup_offline_hosts(timeout);
|
||||||
|
|
||||||
if let Some(ref mut tui_app) = self.tui_app {
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
let connected_hosts = self.metric_store.get_connected_hosts(timeout);
|
let connected_hosts = self.metric_store.get_connected_hosts(timeout);
|
||||||
tui_app.update_hosts(connected_hosts);
|
tui_app.update_hosts(connected_hosts);
|
||||||
}
|
}
|
||||||
last_heartbeat_check = Instant::now();
|
last_heartbeat_check = Instant::now();
|
||||||
|
needs_render = true; // Heartbeat check happened, may have changed hosts
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render TUI (only if not headless)
|
// Render TUI only when needed (not headless and something changed)
|
||||||
if !self.headless {
|
if !self.headless && needs_render {
|
||||||
if let Some(ref mut terminal) = self.terminal {
|
if let Some(ref mut terminal) = self.terminal {
|
||||||
if let Some(ref mut tui_app) = self.tui_app {
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
// Clear and autoresize terminal to handle any resize events
|
||||||
|
if let Err(e) = terminal.autoresize() {
|
||||||
|
warn!("Error autoresizing terminal: {}", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render TUI regardless of terminal size
|
||||||
if let Err(e) = terminal.draw(|frame| {
|
if let Err(e) = terminal.draw(|frame| {
|
||||||
tui_app.render(frame, &self.metric_store);
|
let (_title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
|
||||||
|
self.system_area = system_area;
|
||||||
|
self.services_area = services_area;
|
||||||
}) {
|
}) {
|
||||||
error!("Error rendering TUI: {}", e);
|
error!("Error rendering TUI: {}", e);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
needs_render = false; // Reset flag after rendering
|
||||||
}
|
}
|
||||||
|
|
||||||
// Small sleep to prevent excessive CPU usage
|
|
||||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Dashboard main loop ended");
|
info!("Dashboard main loop ended");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle mouse events
|
||||||
|
fn handle_mouse_event(&mut self, mouse: MouseEvent) -> Result<()> {
|
||||||
|
let x = mouse.column;
|
||||||
|
let y = mouse.row;
|
||||||
|
|
||||||
|
// Handle popup menu if open
|
||||||
|
let popup_info = if let Some(ref tui_app) = self.tui_app {
|
||||||
|
tui_app.popup_menu.clone().map(|popup| {
|
||||||
|
let hostname = tui_app.current_host.clone();
|
||||||
|
(popup, hostname)
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some((popup, hostname)) = popup_info {
|
||||||
|
// Calculate popup bounds using screen coordinates
|
||||||
|
let popup_width = 20;
|
||||||
|
let popup_height = 5; // 3 items + 2 borders
|
||||||
|
|
||||||
|
// Get terminal size
|
||||||
|
let (screen_width, screen_height) = if let Some(ref terminal) = self.terminal {
|
||||||
|
let size = terminal.size().unwrap_or_default();
|
||||||
|
(size.width, size.height)
|
||||||
|
} else {
|
||||||
|
(80, 24) // fallback
|
||||||
|
};
|
||||||
|
|
||||||
|
let popup_x = if popup.x + popup_width < screen_width {
|
||||||
|
popup.x
|
||||||
|
} else {
|
||||||
|
screen_width.saturating_sub(popup_width)
|
||||||
|
};
|
||||||
|
|
||||||
|
let popup_y = if popup.y + popup_height < screen_height {
|
||||||
|
popup.y
|
||||||
|
} else {
|
||||||
|
screen_height.saturating_sub(popup_height)
|
||||||
|
};
|
||||||
|
|
||||||
|
let popup_area = Rect {
|
||||||
|
x: popup_x,
|
||||||
|
y: popup_y,
|
||||||
|
width: popup_width,
|
||||||
|
height: popup_height,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update selected index on mouse move
|
||||||
|
if matches!(mouse.kind, MouseEventKind::Moved) {
|
||||||
|
if is_in_area(x, y, &popup_area) {
|
||||||
|
let relative_y = y.saturating_sub(popup_y + 1) as usize; // +1 for top border
|
||||||
|
if relative_y < 3 {
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
if let Some(ref mut popup) = tui_app.popup_menu {
|
||||||
|
popup.selected_index = relative_y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
|
||||||
|
if is_in_area(x, y, &popup_area) {
|
||||||
|
// Click inside popup - execute action
|
||||||
|
let relative_y = y.saturating_sub(popup_y + 1) as usize; // +1 for top border
|
||||||
|
if relative_y < 3 {
|
||||||
|
// Execute the selected action
|
||||||
|
self.execute_service_action(relative_y, &popup.service_name, hostname.as_deref())?;
|
||||||
|
}
|
||||||
|
// Close popup after action
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
tui_app.popup_menu = None;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
// Click outside popup - close it
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
tui_app.popup_menu = None;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any other event while popup is open - don't process panels
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Determine which panel the mouse is over
|
||||||
|
let in_system_area = is_in_area(x, y, &self.system_area);
|
||||||
|
let in_services_area = is_in_area(x, y, &self.services_area);
|
||||||
|
|
||||||
|
if !in_system_area && !in_services_area {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle mouse events
|
||||||
|
match mouse.kind {
|
||||||
|
MouseEventKind::ScrollDown => {
|
||||||
|
if in_system_area {
|
||||||
|
// Scroll down in system panel
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
if let Some(hostname) = tui_app.current_host.clone() {
|
||||||
|
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||||
|
let visible_height = self.system_area.height as usize;
|
||||||
|
let total_lines = host_widgets.system_widget.get_total_lines();
|
||||||
|
host_widgets.system_widget.scroll_down(visible_height, total_lines);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if in_services_area {
|
||||||
|
// Scroll down in services panel
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
if let Some(hostname) = tui_app.current_host.clone() {
|
||||||
|
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||||
|
// Calculate visible height (panel height - borders and header)
|
||||||
|
let visible_height = self.services_area.height.saturating_sub(3) as usize;
|
||||||
|
host_widgets.services_widget.scroll_down(visible_height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MouseEventKind::ScrollUp => {
|
||||||
|
if in_system_area {
|
||||||
|
// Scroll up in system panel
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
if let Some(hostname) = tui_app.current_host.clone() {
|
||||||
|
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||||
|
host_widgets.system_widget.scroll_up();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if in_services_area {
|
||||||
|
// Scroll up in services panel
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
if let Some(hostname) = tui_app.current_host.clone() {
|
||||||
|
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||||
|
host_widgets.services_widget.scroll_up();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MouseEventKind::Down(button) => {
|
||||||
|
// Only handle clicks in services area (not system area)
|
||||||
|
if !in_services_area {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref mut tui_app) = self.tui_app {
|
||||||
|
// Handle service click
|
||||||
|
// The services area includes a border, so we need to account for that
|
||||||
|
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
|
||||||
|
|
||||||
|
if let Some(hostname) = tui_app.current_host.clone() {
|
||||||
|
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
|
||||||
|
|
||||||
|
// Account for scroll offset - the clicked line is relative to viewport
|
||||||
|
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
|
||||||
|
|
||||||
|
// Map display line to parent service index
|
||||||
|
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
|
||||||
|
// Set the selected index to the clicked parent service
|
||||||
|
host_widgets.services_widget.selected_index = parent_index;
|
||||||
|
|
||||||
|
match button {
|
||||||
|
MouseButton::Left => {
|
||||||
|
// Left click just selects the service
|
||||||
|
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||||
|
}
|
||||||
|
MouseButton::Right => {
|
||||||
|
// Right click opens context menu
|
||||||
|
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
|
||||||
|
|
||||||
|
// Get the service name for the popup
|
||||||
|
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
|
||||||
|
tui_app.popup_menu = Some(crate::ui::PopupMenu {
|
||||||
|
service_name,
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
selected_index: 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute service action from popup menu
|
||||||
|
fn execute_service_action(&self, action_index: usize, service_name: &str, hostname: Option<&str>) -> Result<()> {
|
||||||
|
let Some(hostname) = hostname else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let connection_ip = self.get_connection_ip(hostname);
|
||||||
|
|
||||||
|
match action_index {
|
||||||
|
0 => {
|
||||||
|
// Start Service
|
||||||
|
let service_start_command = format!(
|
||||||
|
"echo 'Starting service: {} on {}' && ssh -tt {}@{} \"bash -ic '{} start {}'\"",
|
||||||
|
service_name,
|
||||||
|
hostname,
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.service_manage_cmd,
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&service_start_command)
|
||||||
|
.spawn()
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
// Stop Service
|
||||||
|
let service_stop_command = format!(
|
||||||
|
"echo 'Stopping service: {} on {}' && ssh -tt {}@{} \"bash -ic '{} stop {}'\"",
|
||||||
|
service_name,
|
||||||
|
hostname,
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.service_manage_cmd,
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&service_stop_command)
|
||||||
|
.spawn()
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
2 => {
|
||||||
|
// View Logs
|
||||||
|
let logs_command = format!(
|
||||||
|
"ssh -tt {}@{} '{} logs {}'",
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.service_manage_cmd,
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&logs_command)
|
||||||
|
.spawn()
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get connection IP for a host
|
||||||
|
fn get_connection_ip(&self, hostname: &str) -> String {
|
||||||
|
self.config
|
||||||
|
.hosts
|
||||||
|
.get(hostname)
|
||||||
|
.and_then(|h| h.ip.clone())
|
||||||
|
.unwrap_or_else(|| hostname.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a point is within a rectangular area
|
||||||
|
fn is_in_area(x: u16, y: u16, area: &Rect) -> bool {
|
||||||
|
x >= area.x && x < area.x.saturating_add(area.width)
|
||||||
|
&& y >= area.y && y < area.y.saturating_add(area.height)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for Dashboard {
|
impl Drop for Dashboard {
|
||||||
@@ -278,7 +582,7 @@ impl Drop for Dashboard {
|
|||||||
if !self.headless {
|
if !self.headless {
|
||||||
let _ = disable_raw_mode();
|
let _ = disable_raw_mode();
|
||||||
if let Some(ref mut terminal) = self.terminal {
|
if let Some(ref mut terminal) = self.terminal {
|
||||||
let _ = execute!(terminal.backend_mut(), LeaveAlternateScreen);
|
let _ = execute!(terminal.backend_mut(), LeaveAlternateScreen, DisableMouseCapture);
|
||||||
let _ = terminal.show_cursor();
|
let _ = terminal.show_cursor();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,16 +86,6 @@ impl MetricStore {
|
|||||||
self.current_agent_data.get(hostname)
|
self.current_agent_data.get(hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get ZMQ communication statistics for a host
|
|
||||||
pub fn get_zmq_stats(&mut self, hostname: &str) -> Option<ZmqStats> {
|
|
||||||
let now = Instant::now();
|
|
||||||
self.zmq_stats.get_mut(hostname).map(|stats| {
|
|
||||||
// Update packet age
|
|
||||||
stats.last_packet_age_secs = now.duration_since(stats.last_packet_time).as_secs_f64();
|
|
||||||
stats.clone()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get connected hosts (hosts with recent heartbeats)
|
/// Get connected hosts (hosts with recent heartbeats)
|
||||||
pub fn get_connected_hosts(&self, timeout: Duration) -> Vec<String> {
|
pub fn get_connected_hosts(&self, timeout: Duration) -> Vec<String> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ pub mod widgets;
|
|||||||
use crate::config::DashboardConfig;
|
use crate::config::DashboardConfig;
|
||||||
use crate::metrics::MetricStore;
|
use crate::metrics::MetricStore;
|
||||||
use cm_dashboard_shared::Status;
|
use cm_dashboard_shared::Status;
|
||||||
use theme::{Components, Layout as ThemeLayout, Theme, Typography};
|
use theme::{Components, Layout as ThemeLayout, Theme};
|
||||||
use widgets::{ServicesWidget, SystemWidget, Widget};
|
use widgets::{HostsWidget, ServicesWidget, SystemWidget, Widget};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -47,16 +47,23 @@ impl HostWidgets {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Popup menu state
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PopupMenu {
|
||||||
|
pub service_name: String,
|
||||||
|
pub x: u16,
|
||||||
|
pub y: u16,
|
||||||
|
pub selected_index: usize,
|
||||||
|
}
|
||||||
|
|
||||||
/// Main TUI application
|
/// Main TUI application
|
||||||
pub struct TuiApp {
|
pub struct TuiApp {
|
||||||
/// Widget states per host (hostname -> HostWidgets)
|
/// Widget states per host (hostname -> HostWidgets)
|
||||||
host_widgets: HashMap<String, HostWidgets>,
|
host_widgets: HashMap<String, HostWidgets>,
|
||||||
/// Current active host
|
/// Current active host
|
||||||
current_host: Option<String>,
|
pub current_host: Option<String>,
|
||||||
/// Available hosts
|
/// Available hosts
|
||||||
available_hosts: Vec<String>,
|
available_hosts: Vec<String>,
|
||||||
/// Host index for navigation
|
|
||||||
host_index: usize,
|
|
||||||
/// Should quit application
|
/// Should quit application
|
||||||
should_quit: bool,
|
should_quit: bool,
|
||||||
/// Track if user manually navigated away from localhost
|
/// Track if user manually navigated away from localhost
|
||||||
@@ -65,6 +72,10 @@ pub struct TuiApp {
|
|||||||
config: DashboardConfig,
|
config: DashboardConfig,
|
||||||
/// Cached localhost hostname to avoid repeated system calls
|
/// Cached localhost hostname to avoid repeated system calls
|
||||||
localhost: String,
|
localhost: String,
|
||||||
|
/// Active popup menu (if any)
|
||||||
|
pub popup_menu: Option<PopupMenu>,
|
||||||
|
/// Hosts widget for navigation and rendering
|
||||||
|
pub hosts_widget: HostsWidget,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TuiApp {
|
impl TuiApp {
|
||||||
@@ -74,11 +85,12 @@ impl TuiApp {
|
|||||||
host_widgets: HashMap::new(),
|
host_widgets: HashMap::new(),
|
||||||
current_host: None,
|
current_host: None,
|
||||||
available_hosts: config.hosts.keys().cloned().collect(),
|
available_hosts: config.hosts.keys().cloned().collect(),
|
||||||
host_index: 0,
|
|
||||||
should_quit: false,
|
should_quit: false,
|
||||||
user_navigated_away: false,
|
user_navigated_away: false,
|
||||||
config,
|
config,
|
||||||
localhost,
|
localhost,
|
||||||
|
popup_menu: None,
|
||||||
|
hosts_widget: HostsWidget::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Sort predefined hosts
|
// Sort predefined hosts
|
||||||
@@ -93,7 +105,7 @@ impl TuiApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get or create host widgets for the given hostname
|
/// Get or create host widgets for the given hostname
|
||||||
fn get_or_create_host_widgets(&mut self, hostname: &str) -> &mut HostWidgets {
|
pub fn get_or_create_host_widgets(&mut self, hostname: &str) -> &mut HostWidgets {
|
||||||
self.host_widgets
|
self.host_widgets
|
||||||
.entry(hostname.to_string())
|
.entry(hostname.to_string())
|
||||||
.or_insert_with(HostWidgets::new)
|
.or_insert_with(HostWidgets::new)
|
||||||
@@ -130,27 +142,32 @@ impl TuiApp {
|
|||||||
|
|
||||||
all_hosts.sort();
|
all_hosts.sort();
|
||||||
self.available_hosts = all_hosts;
|
self.available_hosts = all_hosts;
|
||||||
|
|
||||||
|
// Track if we had a host before this update
|
||||||
|
let had_host = self.current_host.is_some();
|
||||||
|
|
||||||
// Get the current hostname (localhost) for auto-selection
|
// Get the current hostname (localhost) for auto-selection
|
||||||
if !self.available_hosts.is_empty() {
|
if !self.available_hosts.is_empty() {
|
||||||
if self.available_hosts.contains(&self.localhost) && !self.user_navigated_away {
|
if self.available_hosts.contains(&self.localhost) && !self.user_navigated_away {
|
||||||
// Localhost is available and user hasn't navigated away - switch to it
|
// Localhost is available and user hasn't navigated away - switch to it
|
||||||
self.current_host = Some(self.localhost.clone());
|
self.current_host = Some(self.localhost.clone());
|
||||||
// Find the actual index of localhost in the sorted list
|
// Initialize selector bar on first host selection
|
||||||
self.host_index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
|
if !had_host {
|
||||||
|
let index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
|
||||||
|
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
|
||||||
|
}
|
||||||
} else if self.current_host.is_none() {
|
} else if self.current_host.is_none() {
|
||||||
// No current host - select first available (which is localhost if available)
|
// No current host - select first available (which is localhost if available)
|
||||||
self.current_host = Some(self.available_hosts[0].clone());
|
self.current_host = Some(self.available_hosts[0].clone());
|
||||||
self.host_index = 0;
|
// Initialize selector bar
|
||||||
|
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
|
||||||
} else if let Some(ref current) = self.current_host {
|
} else if let Some(ref current) = self.current_host {
|
||||||
if !self.available_hosts.contains(current) {
|
if !self.available_hosts.contains(current) {
|
||||||
// Current host disconnected - select first available and reset navigation flag
|
// Current host disconnected - FORCE switch to first available
|
||||||
self.current_host = Some(self.available_hosts[0].clone());
|
self.current_host = Some(self.available_hosts[0].clone());
|
||||||
self.host_index = 0;
|
// Reset selector bar since we're forcing a host change
|
||||||
|
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
|
||||||
self.user_navigated_away = false; // Reset since we're forced to switch
|
self.user_navigated_away = false; // Reset since we're forced to switch
|
||||||
} else if let Some(index) = self.available_hosts.iter().position(|h| h == current) {
|
|
||||||
// Update index for current host
|
|
||||||
self.host_index = index;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,16 +176,18 @@ impl TuiApp {
|
|||||||
/// Handle keyboard input
|
/// Handle keyboard input
|
||||||
pub fn handle_input(&mut self, event: Event) -> Result<()> {
|
pub fn handle_input(&mut self, event: Event) -> Result<()> {
|
||||||
if let Event::Key(key) = event {
|
if let Event::Key(key) = event {
|
||||||
|
// Close popup on Escape
|
||||||
|
if matches!(key.code, KeyCode::Esc) {
|
||||||
|
if self.popup_menu.is_some() {
|
||||||
|
self.popup_menu = None;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match key.code {
|
match key.code {
|
||||||
KeyCode::Char('q') => {
|
KeyCode::Char('q') => {
|
||||||
self.should_quit = true;
|
self.should_quit = true;
|
||||||
}
|
}
|
||||||
KeyCode::Left => {
|
|
||||||
self.navigate_host(-1);
|
|
||||||
}
|
|
||||||
KeyCode::Right => {
|
|
||||||
self.navigate_host(1);
|
|
||||||
}
|
|
||||||
KeyCode::Char('r') => {
|
KeyCode::Char('r') => {
|
||||||
// System rebuild command - works on any panel for current host
|
// System rebuild command - works on any panel for current host
|
||||||
if let Some(hostname) = self.current_host.clone() {
|
if let Some(hostname) = self.current_host.clone() {
|
||||||
@@ -337,7 +356,11 @@ impl TuiApp {
|
|||||||
}
|
}
|
||||||
KeyCode::Tab => {
|
KeyCode::Tab => {
|
||||||
// Tab cycles to next host
|
// Tab cycles to next host
|
||||||
self.navigate_host(1);
|
self.cycle_next_host();
|
||||||
|
}
|
||||||
|
KeyCode::BackTab => {
|
||||||
|
// Shift+Tab cycles to previous host
|
||||||
|
self.cycle_previous_host();
|
||||||
}
|
}
|
||||||
KeyCode::Up | KeyCode::Char('k') => {
|
KeyCode::Up | KeyCode::Char('k') => {
|
||||||
// Move service selection up
|
// Move service selection up
|
||||||
@@ -363,37 +386,59 @@ impl TuiApp {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Navigate between hosts
|
/// Switch to a specific host by name
|
||||||
fn navigate_host(&mut self, direction: i32) {
|
pub fn switch_to_host(&mut self, hostname: &str) {
|
||||||
if self.available_hosts.is_empty() {
|
if let Some(index) = self.available_hosts.iter().position(|h| h == hostname) {
|
||||||
return;
|
// Update selector bar position
|
||||||
}
|
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
|
||||||
|
self.current_host = Some(hostname.to_string());
|
||||||
|
|
||||||
let len = self.available_hosts.len();
|
// Check if user navigated away from localhost
|
||||||
if direction > 0 {
|
if hostname != &self.localhost {
|
||||||
self.host_index = (self.host_index + 1) % len;
|
|
||||||
} else {
|
|
||||||
self.host_index = if self.host_index == 0 {
|
|
||||||
len - 1
|
|
||||||
} else {
|
|
||||||
self.host_index - 1
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
self.current_host = Some(self.available_hosts[self.host_index].clone());
|
|
||||||
|
|
||||||
// Check if user navigated away from localhost
|
|
||||||
if let Some(ref current) = self.current_host {
|
|
||||||
if current != &self.localhost {
|
|
||||||
self.user_navigated_away = true;
|
self.user_navigated_away = true;
|
||||||
} else {
|
} else {
|
||||||
self.user_navigated_away = false; // User navigated back to localhost
|
self.user_navigated_away = false; // User navigated back to localhost
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("Switched to host: {}", hostname);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Switched to host: {}", self.current_host.as_ref().unwrap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Cycle to next host (TAB)
|
||||||
|
fn cycle_next_host(&mut self) {
|
||||||
|
if self.available_hosts.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let current_idx = self.current_host
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|h| self.available_hosts.iter().position(|x| x == h))
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let next_idx = (current_idx + 1) % self.available_hosts.len();
|
||||||
|
let next_host = self.available_hosts[next_idx].clone();
|
||||||
|
self.switch_to_host(&next_host);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cycle to previous host (Shift+TAB)
|
||||||
|
fn cycle_previous_host(&mut self) {
|
||||||
|
if self.available_hosts.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let current_idx = self.current_host
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|h| self.available_hosts.iter().position(|x| x == h))
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let prev_idx = if current_idx == 0 {
|
||||||
|
self.available_hosts.len() - 1
|
||||||
|
} else {
|
||||||
|
current_idx - 1
|
||||||
|
};
|
||||||
|
let prev_host = self.available_hosts[prev_idx].clone();
|
||||||
|
self.switch_to_host(&prev_host);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -408,7 +453,6 @@ impl TuiApp {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Should quit application
|
/// Should quit application
|
||||||
pub fn should_quit(&self) -> bool {
|
pub fn should_quit(&self) -> bool {
|
||||||
self.should_quit
|
self.should_quit
|
||||||
@@ -421,7 +465,7 @@ impl TuiApp {
|
|||||||
|
|
||||||
|
|
||||||
/// Render the dashboard (real btop-style multi-panel layout)
|
/// Render the dashboard (real btop-style multi-panel layout)
|
||||||
pub fn render(&mut self, frame: &mut Frame, metric_store: &MetricStore) {
|
pub fn render(&mut self, frame: &mut Frame, metric_store: &MetricStore) -> (Rect, Rect, Rect) {
|
||||||
let size = frame.size();
|
let size = frame.size();
|
||||||
|
|
||||||
// Clear background to true black like btop
|
// Clear background to true black like btop
|
||||||
@@ -441,11 +485,11 @@ impl TuiApp {
|
|||||||
])
|
])
|
||||||
.split(size);
|
.split(size);
|
||||||
|
|
||||||
// New layout: left panels | right services (100% height)
|
// New layout: left panels (hosts + system) | right services (100% height)
|
||||||
let content_chunks = ratatui::layout::Layout::default()
|
let content_chunks = ratatui::layout::Layout::default()
|
||||||
.direction(Direction::Horizontal)
|
.direction(Direction::Horizontal)
|
||||||
.constraints([
|
.constraints([
|
||||||
Constraint::Percentage(ThemeLayout::LEFT_PANEL_WIDTH), // Left side: system, backup
|
Constraint::Percentage(ThemeLayout::LEFT_PANEL_WIDTH), // Left side: hosts, system
|
||||||
Constraint::Percentage(ThemeLayout::RIGHT_PANEL_WIDTH), // Right side: services (100% height)
|
Constraint::Percentage(ThemeLayout::RIGHT_PANEL_WIDTH), // Right side: services (100% height)
|
||||||
])
|
])
|
||||||
.split(main_chunks[1]); // main_chunks[1] is now the content area (between title and statusbar)
|
.split(main_chunks[1]); // main_chunks[1] is now the content area (between title and statusbar)
|
||||||
@@ -457,45 +501,55 @@ impl TuiApp {
|
|||||||
true // No host selected is considered offline
|
true // No host selected is considered offline
|
||||||
};
|
};
|
||||||
|
|
||||||
// If host is offline, render wake-up message instead of panels
|
// Calculate hosts panel height dynamically based on available width
|
||||||
if current_host_offline {
|
let hosts_inner_width = content_chunks[0].width.saturating_sub(2);
|
||||||
self.render_offline_host_message(frame, main_chunks[1]);
|
let hosts_content_height = HostsWidget::required_height(self.available_hosts.len(), hosts_inner_width);
|
||||||
self.render_btop_title(frame, main_chunks[0], metric_store);
|
let hosts_height = hosts_content_height + 2; // Add borders
|
||||||
self.render_statusbar(frame, main_chunks[2]);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Left side: system panel only (full height)
|
// Left side: hosts panel on top, system panel below
|
||||||
let left_chunks = ratatui::layout::Layout::default()
|
let left_chunks = ratatui::layout::Layout::default()
|
||||||
.direction(Direction::Vertical)
|
.direction(Direction::Vertical)
|
||||||
.constraints([Constraint::Percentage(100)]) // System section takes full height
|
.constraints([
|
||||||
|
Constraint::Length(hosts_height), // Hosts panel (compact, dynamic)
|
||||||
|
Constraint::Min(0), // System panel (rest)
|
||||||
|
])
|
||||||
.split(content_chunks[0]);
|
.split(content_chunks[0]);
|
||||||
|
|
||||||
// Render title bar
|
// Render title bar
|
||||||
self.render_btop_title(frame, main_chunks[0], metric_store);
|
self.render_btop_title(frame, main_chunks[0], metric_store);
|
||||||
|
|
||||||
// Render system panel
|
// Render hosts panel on left
|
||||||
self.render_system_panel(frame, left_chunks[0], metric_store);
|
self.render_hosts_panel(frame, left_chunks[0], metric_store);
|
||||||
|
|
||||||
// Render services widget for current host
|
// Render system panel below hosts
|
||||||
if let Some(hostname) = self.current_host.clone() {
|
let system_area = left_chunks[1];
|
||||||
let is_focused = true; // Always show service selection
|
self.render_system_panel(frame, system_area, metric_store);
|
||||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
|
||||||
host_widgets
|
// Render services panel on right
|
||||||
.services_widget
|
let services_area = content_chunks[1];
|
||||||
.render(frame, content_chunks[1], is_focused); // Services takes full right side
|
self.render_services_panel(frame, services_area);
|
||||||
}
|
|
||||||
|
|
||||||
// Render statusbar at the bottom
|
// Render statusbar at the bottom
|
||||||
self.render_statusbar(frame, main_chunks[2]); // main_chunks[2] is the statusbar area
|
self.render_statusbar(frame, main_chunks[2], metric_store);
|
||||||
|
|
||||||
|
// Render popup menu on top of everything if active
|
||||||
|
if let Some(ref popup) = self.popup_menu {
|
||||||
|
self.render_popup_menu(frame, popup);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render offline host popup on top of everything
|
||||||
|
if current_host_offline {
|
||||||
|
self.render_offline_popup(frame, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return all areas for mouse event handling
|
||||||
|
(main_chunks[0], system_area, services_area)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Render btop-style minimal title with host status colors
|
/// Render btop-style minimal title with host status colors
|
||||||
fn render_btop_title(&self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
|
fn render_btop_title(&self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
|
||||||
use ratatui::style::Modifier;
|
use ratatui::style::Modifier;
|
||||||
use ratatui::text::{Line, Span};
|
use ratatui::text::{Line, Span};
|
||||||
use theme::StatusIcons;
|
|
||||||
|
|
||||||
if self.available_hosts.is_empty() {
|
if self.available_hosts.is_empty() {
|
||||||
let title_text = "cm-dashboard • no hosts discovered";
|
let title_text = "cm-dashboard • no hosts discovered";
|
||||||
@@ -518,115 +572,182 @@ impl TuiApp {
|
|||||||
// Use the worst status color as background
|
// Use the worst status color as background
|
||||||
let background_color = Theme::status_color(worst_status);
|
let background_color = Theme::status_color(worst_status);
|
||||||
|
|
||||||
// Split the title bar into left and right sections
|
// Single line title bar showing dashboard name (left) and dashboard IP (right)
|
||||||
let chunks = Layout::default()
|
let left_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
|
||||||
.direction(Direction::Horizontal)
|
|
||||||
.constraints([Constraint::Length(22), Constraint::Min(0)])
|
|
||||||
.split(area);
|
|
||||||
|
|
||||||
// Left side: "cm-dashboard" text with version
|
// Get dashboard local IP for right side
|
||||||
let title_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
|
let dashboard_ip = Self::get_local_ip();
|
||||||
let left_span = Span::styled(
|
let right_text = format!("{} ", dashboard_ip);
|
||||||
&title_text,
|
|
||||||
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
|
|
||||||
);
|
|
||||||
let left_title = Paragraph::new(Line::from(vec![left_span]))
|
|
||||||
.style(Style::default().bg(background_color));
|
|
||||||
frame.render_widget(left_title, chunks[0]);
|
|
||||||
|
|
||||||
// Right side: hosts with status indicators
|
// Calculate spacing to push right text to the right
|
||||||
let mut host_spans = Vec::new();
|
let total_text_len = left_text.len() + right_text.len();
|
||||||
|
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
|
||||||
for (i, host) in self.available_hosts.iter().enumerate() {
|
let spacing_str = " ".repeat(spacing);
|
||||||
if i > 0 {
|
|
||||||
host_spans.push(Span::styled(
|
|
||||||
" ",
|
|
||||||
Style::default().fg(Theme::background()).bg(background_color)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always show normal status icon based on metrics (no command status at host level)
|
let title = Paragraph::new(Line::from(vec![
|
||||||
let host_status = self.calculate_host_status(host, metric_store);
|
Span::styled(
|
||||||
let status_icon = StatusIcons::get_icon(host_status);
|
left_text,
|
||||||
|
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
|
||||||
// Add status icon with background color as foreground against status background
|
),
|
||||||
host_spans.push(Span::styled(
|
Span::styled(
|
||||||
format!("{} ", status_icon),
|
spacing_str,
|
||||||
Style::default().fg(Theme::background()).bg(background_color),
|
Style::default().bg(background_color)
|
||||||
));
|
),
|
||||||
|
Span::styled(
|
||||||
if Some(host) == self.current_host.as_ref() {
|
right_text,
|
||||||
// Selected host in bold background color against status background
|
Style::default().fg(Theme::background()).bg(background_color)
|
||||||
host_spans.push(Span::styled(
|
),
|
||||||
host.clone(),
|
]))
|
||||||
Style::default()
|
.style(Style::default().bg(background_color));
|
||||||
.fg(Theme::background())
|
frame.render_widget(title, area);
|
||||||
.bg(background_color)
|
|
||||||
.add_modifier(Modifier::BOLD),
|
|
||||||
));
|
|
||||||
} else {
|
|
||||||
// Other hosts in normal background color against status background
|
|
||||||
host_spans.push(Span::styled(
|
|
||||||
host.clone(),
|
|
||||||
Style::default().fg(Theme::background()).bg(background_color),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add right padding
|
|
||||||
host_spans.push(Span::styled(
|
|
||||||
" ",
|
|
||||||
Style::default().fg(Theme::background()).bg(background_color)
|
|
||||||
));
|
|
||||||
|
|
||||||
let host_line = Line::from(host_spans);
|
|
||||||
let host_title = Paragraph::new(vec![host_line])
|
|
||||||
.style(Style::default().bg(background_color))
|
|
||||||
.alignment(ratatui::layout::Alignment::Right);
|
|
||||||
frame.render_widget(host_title, chunks[1]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate overall status for a host based on its structured data
|
/// Calculate overall status for a host based on its widget statuses
|
||||||
fn calculate_host_status(&self, hostname: &str, metric_store: &MetricStore) -> Status {
|
fn calculate_host_status(&self, hostname: &str, metric_store: &MetricStore) -> Status {
|
||||||
// Check if we have structured data for this host
|
// Check if we have data for this host
|
||||||
if let Some(_agent_data) = metric_store.get_agent_data(hostname) {
|
if metric_store.get_agent_data(hostname).is_none() {
|
||||||
// Return OK since we have data
|
return Status::Offline;
|
||||||
Status::Ok
|
}
|
||||||
|
|
||||||
|
// Get actual statuses from host widgets
|
||||||
|
if let Some(host_widgets) = self.host_widgets.get(hostname) {
|
||||||
|
let system_status = host_widgets.system_widget.get_overall_status();
|
||||||
|
let services_status = host_widgets.services_widget.get_overall_status();
|
||||||
|
Status::aggregate(&[system_status, services_status])
|
||||||
} else {
|
} else {
|
||||||
Status::Offline
|
Status::Ok // No widgets yet, but data exists
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Render dynamic statusbar with context-aware shortcuts
|
/// Render popup menu for service actions
|
||||||
fn render_statusbar(&self, frame: &mut Frame, area: Rect) {
|
fn render_popup_menu(&self, frame: &mut Frame, popup: &PopupMenu) {
|
||||||
let shortcuts = self.get_context_shortcuts();
|
use ratatui::widgets::{Block, Borders, Clear, List, ListItem};
|
||||||
let statusbar_text = shortcuts.join(" • ");
|
use ratatui::style::{Color, Modifier};
|
||||||
|
|
||||||
let statusbar = Paragraph::new(statusbar_text)
|
// Menu items
|
||||||
.style(Typography::secondary())
|
let items = vec![
|
||||||
.alignment(ratatui::layout::Alignment::Center);
|
"Start Service",
|
||||||
|
"Stop Service",
|
||||||
|
"View Logs",
|
||||||
|
];
|
||||||
|
|
||||||
|
// Calculate popup size
|
||||||
|
let width = 20;
|
||||||
|
let height = items.len() as u16 + 2; // +2 for borders
|
||||||
|
|
||||||
|
// Position popup near click location, but keep it on screen
|
||||||
|
let screen_width = frame.size().width;
|
||||||
|
let screen_height = frame.size().height;
|
||||||
|
|
||||||
|
let x = if popup.x + width < screen_width {
|
||||||
|
popup.x
|
||||||
|
} else {
|
||||||
|
screen_width.saturating_sub(width)
|
||||||
|
};
|
||||||
|
|
||||||
|
let y = if popup.y + height < screen_height {
|
||||||
|
popup.y
|
||||||
|
} else {
|
||||||
|
screen_height.saturating_sub(height)
|
||||||
|
};
|
||||||
|
|
||||||
|
let popup_area = Rect {
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create menu items with selection highlight
|
||||||
|
let menu_items: Vec<ListItem> = items
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, item)| {
|
||||||
|
let style = if i == popup.selected_index {
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Black)
|
||||||
|
.bg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Theme::primary_text())
|
||||||
|
};
|
||||||
|
ListItem::new(*item).style(style)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let menu_list = List::new(menu_items)
|
||||||
|
.block(
|
||||||
|
Block::default()
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()))
|
||||||
|
);
|
||||||
|
|
||||||
|
// Clear the area and render menu
|
||||||
|
frame.render_widget(Clear, popup_area);
|
||||||
|
frame.render_widget(menu_list, popup_area);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Render statusbar with host and client IPs
|
||||||
|
fn render_statusbar(&self, frame: &mut Frame, area: Rect, _metric_store: &MetricStore) {
|
||||||
|
use ratatui::text::{Line, Span};
|
||||||
|
use ratatui::widgets::Paragraph;
|
||||||
|
|
||||||
|
// Get current host info
|
||||||
|
let (hostname_str, host_ip, kernel_version, build_version, agent_version) = if let Some(hostname) = &self.current_host {
|
||||||
|
// Get the connection IP (the IP dashboard uses to connect to the agent)
|
||||||
|
let ip = if let Some(host_details) = self.config.hosts.get(hostname) {
|
||||||
|
host_details.get_connection_ip(hostname)
|
||||||
|
} else {
|
||||||
|
hostname.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get kernel, build and agent versions from system widget
|
||||||
|
let (kernel, build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
|
||||||
|
let kernel = host_widgets.system_widget.get_kernel_version().unwrap_or("N/A".to_string());
|
||||||
|
let build = host_widgets.system_widget.get_build_version().unwrap_or("N/A".to_string());
|
||||||
|
let agent = host_widgets.system_widget.get_agent_version().unwrap_or("N/A".to_string());
|
||||||
|
(kernel, build, agent)
|
||||||
|
} else {
|
||||||
|
("N/A".to_string(), "N/A".to_string(), "N/A".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
(hostname.clone(), ip, kernel, build, agent)
|
||||||
|
} else {
|
||||||
|
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let left_text = format!(" Host: {} | {} | {}", hostname_str, host_ip, kernel_version);
|
||||||
|
let right_text = format!("Build:{} | Agent:{} ", build_version, agent_version);
|
||||||
|
|
||||||
|
// Calculate spacing to push right text to the right
|
||||||
|
let total_text_len = left_text.len() + right_text.len();
|
||||||
|
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
|
||||||
|
let spacing_str = " ".repeat(spacing);
|
||||||
|
|
||||||
|
let line = Line::from(vec![
|
||||||
|
Span::styled(left_text, Style::default().fg(Theme::border())),
|
||||||
|
Span::raw(spacing_str),
|
||||||
|
Span::styled(right_text, Style::default().fg(Theme::border())),
|
||||||
|
]);
|
||||||
|
|
||||||
|
let statusbar = Paragraph::new(line);
|
||||||
frame.render_widget(statusbar, area);
|
frame.render_widget(statusbar, area);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get context-aware shortcuts based on focused panel
|
/// Get local IP address of the dashboard
|
||||||
fn get_context_shortcuts(&self) -> Vec<String> {
|
fn get_local_ip() -> String {
|
||||||
let mut shortcuts = Vec::new();
|
use std::net::UdpSocket;
|
||||||
|
|
||||||
// Global shortcuts
|
// Try to get local IP by creating a UDP socket
|
||||||
shortcuts.push("Tab: Host".to_string());
|
// This doesn't actually send data, just determines routing
|
||||||
shortcuts.push("↑↓/jk: Select".to_string());
|
if let Ok(socket) = UdpSocket::bind("0.0.0.0:0") {
|
||||||
shortcuts.push("r: Rebuild".to_string());
|
if socket.connect("8.8.8.8:80").is_ok() {
|
||||||
shortcuts.push("B: Backup".to_string());
|
if let Ok(addr) = socket.local_addr() {
|
||||||
shortcuts.push("s/S: Start/Stop".to_string());
|
return addr.ip().to_string();
|
||||||
shortcuts.push("L: Logs".to_string());
|
}
|
||||||
shortcuts.push("t: Terminal".to_string());
|
}
|
||||||
shortcuts.push("w: Wake".to_string());
|
}
|
||||||
|
"N/A".to_string()
|
||||||
// Always show quit
|
|
||||||
shortcuts.push("q: Quit".to_string());
|
|
||||||
|
|
||||||
shortcuts
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn render_system_panel(&mut self, frame: &mut Frame, area: Rect, _metric_store: &MetricStore) {
|
fn render_system_panel(&mut self, frame: &mut Frame, area: Rect, _metric_store: &MetricStore) {
|
||||||
@@ -643,12 +764,64 @@ impl TuiApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Render offline host message with wake-up option
|
/// Render hosts panel
|
||||||
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
|
fn render_hosts_panel(&mut self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
|
||||||
use ratatui::layout::Alignment;
|
use ratatui::widgets::{Block, Borders};
|
||||||
|
|
||||||
|
let hosts_block = Block::default()
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.title("hosts")
|
||||||
|
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
|
||||||
|
.title_style(Style::default().fg(Theme::primary_text()));
|
||||||
|
|
||||||
|
let hosts_inner = hosts_block.inner(area);
|
||||||
|
frame.render_widget(hosts_block, area);
|
||||||
|
|
||||||
|
let localhost = self.localhost.clone();
|
||||||
|
let current_host = self.current_host.as_deref();
|
||||||
|
self.hosts_widget.render(
|
||||||
|
frame,
|
||||||
|
hosts_inner,
|
||||||
|
&self.available_hosts,
|
||||||
|
&localhost,
|
||||||
|
current_host,
|
||||||
|
metric_store,
|
||||||
|
|hostname, store| {
|
||||||
|
if store.get_agent_data(hostname).is_some() {
|
||||||
|
Status::Ok
|
||||||
|
} else {
|
||||||
|
Status::Offline
|
||||||
|
}
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Render services panel
|
||||||
|
fn render_services_panel(&mut self, frame: &mut Frame, area: Rect) {
|
||||||
|
use ratatui::widgets::{Block, Borders};
|
||||||
|
|
||||||
|
let services_block = Block::default()
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.title("services")
|
||||||
|
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
|
||||||
|
.title_style(Style::default().fg(Theme::primary_text()));
|
||||||
|
|
||||||
|
let services_inner = services_block.inner(area);
|
||||||
|
frame.render_widget(services_block, area);
|
||||||
|
|
||||||
|
if let Some(hostname) = self.current_host.clone() {
|
||||||
|
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
||||||
|
host_widgets.services_widget.render_content(frame, services_inner, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Render offline host popup centered on screen
|
||||||
|
fn render_offline_popup(&self, frame: &mut Frame, screen: Rect) {
|
||||||
use ratatui::style::Modifier;
|
use ratatui::style::Modifier;
|
||||||
use ratatui::text::{Line, Span};
|
use ratatui::text::{Line, Span};
|
||||||
use ratatui::widgets::{Block, Borders, Paragraph};
|
use ratatui::widgets::{Block, Borders, Clear, Paragraph};
|
||||||
|
|
||||||
// Get hostname for message
|
// Get hostname for message
|
||||||
let hostname = self.current_host.as_ref()
|
let hostname = self.current_host.as_ref()
|
||||||
@@ -665,7 +838,7 @@ impl TuiApp {
|
|||||||
let mut lines = vec![
|
let mut lines = vec![
|
||||||
Line::from(Span::styled(
|
Line::from(Span::styled(
|
||||||
format!("Host '{}' is offline", hostname),
|
format!("Host '{}' is offline", hostname),
|
||||||
Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD),
|
Style::default().fg(Theme::status_color(Status::Offline)).add_modifier(Modifier::BOLD),
|
||||||
)),
|
)),
|
||||||
Line::from(""),
|
Line::from(""),
|
||||||
];
|
];
|
||||||
@@ -673,44 +846,38 @@ impl TuiApp {
|
|||||||
if has_mac {
|
if has_mac {
|
||||||
lines.push(Line::from(Span::styled(
|
lines.push(Line::from(Span::styled(
|
||||||
"Press 'w' to wake up host",
|
"Press 'w' to wake up host",
|
||||||
Style::default().fg(Theme::primary_text()).add_modifier(Modifier::BOLD),
|
Style::default().fg(Theme::primary_text()),
|
||||||
)));
|
)));
|
||||||
} else {
|
} else {
|
||||||
lines.push(Line::from(Span::styled(
|
lines.push(Line::from(Span::styled(
|
||||||
"No MAC address configured - cannot wake up",
|
"No MAC address configured",
|
||||||
Style::default().fg(Theme::muted_text()),
|
Style::default().fg(Theme::muted_text()),
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create centered message
|
// Calculate popup size and center it
|
||||||
|
let popup_width = 32u16;
|
||||||
|
let popup_height = 5u16;
|
||||||
|
let x = screen.width.saturating_sub(popup_width) / 2;
|
||||||
|
let y = screen.height.saturating_sub(popup_height) / 2;
|
||||||
|
|
||||||
|
let popup_area = Rect {
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
width: popup_width,
|
||||||
|
height: popup_height,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Render popup with border
|
||||||
let message = Paragraph::new(lines)
|
let message = Paragraph::new(lines)
|
||||||
.block(Block::default()
|
.block(Block::default()
|
||||||
.borders(Borders::ALL)
|
.borders(Borders::ALL)
|
||||||
.border_style(Style::default().fg(Theme::muted_text()))
|
.border_style(Style::default().fg(Theme::status_color(Status::Offline)))
|
||||||
.title(" Offline Host ")
|
.title(" Offline ")
|
||||||
.title_style(Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD)))
|
.title_style(Style::default().fg(Theme::status_color(Status::Offline)).add_modifier(Modifier::BOLD)))
|
||||||
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()))
|
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()));
|
||||||
.alignment(Alignment::Center);
|
|
||||||
|
|
||||||
// Center the message in the available area
|
|
||||||
let popup_area = ratatui::layout::Layout::default()
|
|
||||||
.direction(Direction::Vertical)
|
|
||||||
.constraints([
|
|
||||||
Constraint::Percentage(40),
|
|
||||||
Constraint::Length(6),
|
|
||||||
Constraint::Percentage(40),
|
|
||||||
])
|
|
||||||
.split(area)[1];
|
|
||||||
|
|
||||||
let popup_area = ratatui::layout::Layout::default()
|
|
||||||
.direction(Direction::Horizontal)
|
|
||||||
.constraints([
|
|
||||||
Constraint::Percentage(25),
|
|
||||||
Constraint::Percentage(50),
|
|
||||||
Constraint::Percentage(25),
|
|
||||||
])
|
|
||||||
.split(popup_area)[1];
|
|
||||||
|
|
||||||
|
frame.render_widget(Clear, popup_area);
|
||||||
frame.render_widget(message, popup_area);
|
frame.render_widget(message, popup_area);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -282,19 +282,14 @@ impl StatusIcons {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Components {
|
impl Components {
|
||||||
/// Standard widget block with title using bright foreground for title
|
/// Standard widget block with title using primary text color for title
|
||||||
pub fn widget_block(title: &str) -> Block<'_> {
|
pub fn widget_block(title: &str) -> Block<'_> {
|
||||||
Block::default()
|
Block::default()
|
||||||
.title(title)
|
.title(title)
|
||||||
.borders(Borders::ALL)
|
.borders(Borders::ALL)
|
||||||
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
|
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
|
||||||
.title_style(
|
.title_style(Style::default().fg(Theme::primary_text()))
|
||||||
Style::default()
|
|
||||||
.fg(Theme::border_title())
|
|
||||||
.bg(Theme::background()),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Typography {
|
impl Typography {
|
||||||
@@ -307,10 +302,10 @@ impl Typography {
|
|||||||
.add_modifier(Modifier::BOLD)
|
.add_modifier(Modifier::BOLD)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Secondary content text
|
/// Secondary content text (metrics without status)
|
||||||
pub fn secondary() -> Style {
|
pub fn secondary() -> Style {
|
||||||
Style::default()
|
Style::default()
|
||||||
.fg(Theme::secondary_text())
|
.fg(Theme::highlight())
|
||||||
.bg(Theme::background())
|
.bg(Theme::background())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
173
dashboard/src/ui/widgets/hosts.rs
Normal file
173
dashboard/src/ui/widgets/hosts.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
use ratatui::{
|
||||||
|
layout::Rect,
|
||||||
|
style::{Modifier, Style},
|
||||||
|
text::{Line, Span},
|
||||||
|
Frame,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::metrics::MetricStore;
|
||||||
|
use crate::ui::theme::Theme;
|
||||||
|
use cm_dashboard_shared::Status;
|
||||||
|
|
||||||
|
/// Hosts widget displaying all available hosts with selector bar navigation
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HostsWidget {
|
||||||
|
/// Currently selected host index (for blue selector bar)
|
||||||
|
pub selected_index: usize,
|
||||||
|
/// Scroll offset for viewport
|
||||||
|
pub scroll_offset: usize,
|
||||||
|
/// Last rendered viewport height for scroll calculations
|
||||||
|
last_viewport_height: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostsWidget {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
selected_index: 0,
|
||||||
|
scroll_offset: 0,
|
||||||
|
last_viewport_height: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure selected item is visible in viewport (auto-scroll)
|
||||||
|
fn ensure_selected_visible(&mut self) {
|
||||||
|
if self.last_viewport_height == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let viewport_height = self.last_viewport_height;
|
||||||
|
|
||||||
|
if self.selected_index < self.scroll_offset {
|
||||||
|
self.scroll_offset = self.selected_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.selected_index >= self.scroll_offset + viewport_height {
|
||||||
|
self.scroll_offset = self.selected_index.saturating_sub(viewport_height.saturating_sub(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set selected index (used when switching hosts via TAB)
|
||||||
|
pub fn set_selected_index(&mut self, index: usize, total_hosts: usize) {
|
||||||
|
if index < total_hosts {
|
||||||
|
self.selected_index = index;
|
||||||
|
self.ensure_selected_visible();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate the required height for hosts panel based on host count and available width
|
||||||
|
pub fn required_height(num_hosts: usize, available_width: u16) -> u16 {
|
||||||
|
if num_hosts == 0 {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
// Estimate column width: icon(2) + arrow(2) + max_hostname(~12) + padding(2) = ~18
|
||||||
|
let col_width = 18u16;
|
||||||
|
let num_columns = (available_width / col_width).max(1) as usize;
|
||||||
|
let rows_needed = (num_hosts + num_columns - 1) / num_columns;
|
||||||
|
rows_needed.max(1) as u16
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Render hosts list in dynamic columns based on available width
|
||||||
|
pub fn render<F>(
|
||||||
|
&mut self,
|
||||||
|
frame: &mut Frame,
|
||||||
|
area: Rect,
|
||||||
|
available_hosts: &[String],
|
||||||
|
localhost: &str,
|
||||||
|
current_host: Option<&str>,
|
||||||
|
metric_store: &MetricStore,
|
||||||
|
mut calculate_host_status: F,
|
||||||
|
_is_focused: bool,
|
||||||
|
) where F: FnMut(&str, &MetricStore) -> Status {
|
||||||
|
use crate::ui::theme::StatusIcons;
|
||||||
|
use ratatui::layout::{Constraint, Direction, Layout};
|
||||||
|
|
||||||
|
if available_hosts.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store viewport height for scroll calculations
|
||||||
|
self.last_viewport_height = area.height as usize;
|
||||||
|
|
||||||
|
// Calculate column width and number of columns that fit
|
||||||
|
let col_width = 18u16;
|
||||||
|
let num_columns = (area.width / col_width).max(1) as usize;
|
||||||
|
let rows_per_column = (available_hosts.len() + num_columns - 1) / num_columns;
|
||||||
|
|
||||||
|
// Create column constraints
|
||||||
|
let constraints: Vec<Constraint> = (0..num_columns)
|
||||||
|
.map(|_| Constraint::Ratio(1, num_columns as u32))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let columns = Layout::default()
|
||||||
|
.direction(Direction::Horizontal)
|
||||||
|
.constraints(constraints)
|
||||||
|
.split(area);
|
||||||
|
|
||||||
|
// Build host line helper
|
||||||
|
let mut build_host_line = |hostname: &str| -> Line {
|
||||||
|
let host_status = calculate_host_status(hostname, metric_store);
|
||||||
|
let status_icon = StatusIcons::get_icon(host_status);
|
||||||
|
let status_color = Theme::status_color(host_status);
|
||||||
|
|
||||||
|
let is_current = current_host == Some(hostname);
|
||||||
|
let is_localhost = hostname == localhost;
|
||||||
|
|
||||||
|
let mut spans = vec![Span::styled(
|
||||||
|
format!("{} ", status_icon),
|
||||||
|
Style::default().fg(status_color),
|
||||||
|
)];
|
||||||
|
|
||||||
|
if is_current {
|
||||||
|
spans.push(Span::styled(
|
||||||
|
"► ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Theme::primary_text())
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let hostname_display = if is_localhost {
|
||||||
|
format!("{}*", hostname)
|
||||||
|
} else {
|
||||||
|
hostname.to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
spans.push(Span::styled(
|
||||||
|
hostname_display,
|
||||||
|
if is_current {
|
||||||
|
Style::default()
|
||||||
|
.fg(Theme::primary_text())
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Theme::primary_text())
|
||||||
|
},
|
||||||
|
));
|
||||||
|
|
||||||
|
Line::from(spans)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Render each column
|
||||||
|
for col_idx in 0..num_columns {
|
||||||
|
let start = col_idx * rows_per_column;
|
||||||
|
let hosts_in_col: Vec<Line> = available_hosts
|
||||||
|
.iter()
|
||||||
|
.skip(start)
|
||||||
|
.take(rows_per_column)
|
||||||
|
.map(|hostname| build_host_line(hostname))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !hosts_in_col.is_empty() {
|
||||||
|
let text = ratatui::text::Text::from(hosts_in_col);
|
||||||
|
let para = ratatui::widgets::Paragraph::new(text);
|
||||||
|
frame.render_widget(para, columns[col_idx]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update selected index to match current host
|
||||||
|
if let Some(current) = current_host {
|
||||||
|
if let Some(idx) = available_hosts.iter().position(|h| h == current) {
|
||||||
|
self.selected_index = idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,8 +1,10 @@
|
|||||||
use cm_dashboard_shared::AgentData;
|
use cm_dashboard_shared::AgentData;
|
||||||
|
|
||||||
|
pub mod hosts;
|
||||||
pub mod services;
|
pub mod services;
|
||||||
pub mod system;
|
pub mod system;
|
||||||
|
|
||||||
|
pub use hosts::HostsWidget;
|
||||||
pub use services::ServicesWidget;
|
pub use services::ServicesWidget;
|
||||||
pub use system::SystemWidget;
|
pub use system::SystemWidget;
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use ratatui::{
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
use crate::ui::theme::{Components, StatusIcons, Theme, Typography};
|
use crate::ui::theme::{StatusIcons, Theme, Typography};
|
||||||
use ratatui::style::Style;
|
use ratatui::style::Style;
|
||||||
|
|
||||||
/// Column visibility configuration based on terminal width
|
/// Column visibility configuration based on terminal width
|
||||||
@@ -91,14 +91,17 @@ pub struct ServicesWidget {
|
|||||||
/// Last update indicator
|
/// Last update indicator
|
||||||
has_data: bool,
|
has_data: bool,
|
||||||
/// Currently selected service index (for navigation cursor)
|
/// Currently selected service index (for navigation cursor)
|
||||||
selected_index: usize,
|
pub selected_index: usize,
|
||||||
|
/// Scroll offset for viewport (which display line is at the top)
|
||||||
|
pub scroll_offset: usize,
|
||||||
|
/// Last rendered viewport height (for accurate scroll bounds)
|
||||||
|
last_viewport_height: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct ServiceInfo {
|
struct ServiceInfo {
|
||||||
metrics: Vec<(String, f32, Option<String>)>, // (label, value, unit)
|
metrics: Vec<(String, f32, Option<String>)>, // (label, value, unit)
|
||||||
widget_status: Status,
|
widget_status: Status,
|
||||||
service_type: String, // "nginx_site", "container", "image", or empty for parent services
|
|
||||||
memory_bytes: Option<u64>,
|
memory_bytes: Option<u64>,
|
||||||
restart_count: Option<u32>,
|
restart_count: Option<u32>,
|
||||||
uptime_seconds: Option<u64>,
|
uptime_seconds: Option<u64>,
|
||||||
@@ -112,9 +115,16 @@ impl ServicesWidget {
|
|||||||
status: Status::Unknown,
|
status: Status::Unknown,
|
||||||
has_data: false,
|
has_data: false,
|
||||||
selected_index: 0,
|
selected_index: 0,
|
||||||
|
scroll_offset: 0,
|
||||||
|
last_viewport_height: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get overall services status
|
||||||
|
pub fn get_overall_status(&self) -> Status {
|
||||||
|
self.status
|
||||||
|
}
|
||||||
|
|
||||||
/// Extract service name and determine if it's a parent or sub-service
|
/// Extract service name and determine if it's a parent or sub-service
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
fn extract_service_info(metric_name: &str) -> Option<(String, Option<String>)> {
|
fn extract_service_info(metric_name: &str) -> Option<(String, Option<String>)> {
|
||||||
@@ -145,9 +155,10 @@ impl ServicesWidget {
|
|||||||
|
|
||||||
/// Format parent service line - returns text without icon for span formatting
|
/// Format parent service line - returns text without icon for span formatting
|
||||||
fn format_parent_service_line(&self, name: &str, info: &ServiceInfo, columns: ColumnVisibility) -> String {
|
fn format_parent_service_line(&self, name: &str, info: &ServiceInfo, columns: ColumnVisibility) -> String {
|
||||||
|
// Account for icon prefix "● " (2 chars) in name column width
|
||||||
|
let name_width = ColumnVisibility::NAME_WIDTH.saturating_sub(2) as usize;
|
||||||
// Truncate long service names to fit layout
|
// Truncate long service names to fit layout
|
||||||
// NAME_WIDTH - 3 chars for "..." = max displayable chars
|
let max_name_len = name_width.saturating_sub(3); // -3 for "..."
|
||||||
let max_name_len = (ColumnVisibility::NAME_WIDTH - 3) as usize;
|
|
||||||
let short_name = if name.len() > max_name_len {
|
let short_name = if name.len() > max_name_len {
|
||||||
format!("{}...", &name[..max_name_len.saturating_sub(3)])
|
format!("{}...", &name[..max_name_len.saturating_sub(3)])
|
||||||
} else {
|
} else {
|
||||||
@@ -203,7 +214,7 @@ impl ServicesWidget {
|
|||||||
// Build format string based on column visibility
|
// Build format string based on column visibility
|
||||||
let mut parts = Vec::new();
|
let mut parts = Vec::new();
|
||||||
if columns.show_name {
|
if columns.show_name {
|
||||||
parts.push(format!("{:<width$}", short_name, width = ColumnVisibility::NAME_WIDTH as usize));
|
parts.push(format!("{:<width$}", short_name, width = name_width));
|
||||||
}
|
}
|
||||||
if columns.show_status {
|
if columns.show_status {
|
||||||
parts.push(format!("{:<width$}", status_str, width = ColumnVisibility::STATUS_WIDTH as usize));
|
parts.push(format!("{:<width$}", status_str, width = ColumnVisibility::STATUS_WIDTH as usize));
|
||||||
@@ -277,7 +288,7 @@ impl ServicesWidget {
|
|||||||
let tree_symbol = if is_last { "└─" } else { "├─" };
|
let tree_symbol = if is_last { "└─" } else { "├─" };
|
||||||
|
|
||||||
if info.widget_status == Status::Info {
|
if info.widget_status == Status::Info {
|
||||||
// Informational data - no status icon, show metrics if available
|
// Informational data - no status icon, use blue color
|
||||||
let mut spans = vec![
|
let mut spans = vec![
|
||||||
// Indentation and tree prefix
|
// Indentation and tree prefix
|
||||||
ratatui::text::Span::styled(
|
ratatui::text::Span::styled(
|
||||||
@@ -288,7 +299,7 @@ impl ServicesWidget {
|
|||||||
ratatui::text::Span::styled(
|
ratatui::text::Span::styled(
|
||||||
short_name,
|
short_name,
|
||||||
Style::default()
|
Style::default()
|
||||||
.fg(Theme::secondary_text())
|
.fg(Theme::highlight())
|
||||||
.bg(Theme::background()),
|
.bg(Theme::background()),
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
@@ -298,13 +309,14 @@ impl ServicesWidget {
|
|||||||
spans.push(ratatui::text::Span::styled(
|
spans.push(ratatui::text::Span::styled(
|
||||||
status_str,
|
status_str,
|
||||||
Style::default()
|
Style::default()
|
||||||
.fg(Theme::secondary_text())
|
.fg(Theme::highlight())
|
||||||
.bg(Theme::background()),
|
.bg(Theme::background()),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
spans
|
spans
|
||||||
} else {
|
} else {
|
||||||
|
// Sub-services with status - use secondary_text
|
||||||
vec![
|
vec![
|
||||||
// Indentation and tree prefix
|
// Indentation and tree prefix
|
||||||
ratatui::text::Span::styled(
|
ratatui::text::Span::styled(
|
||||||
@@ -338,18 +350,86 @@ impl ServicesWidget {
|
|||||||
pub fn select_previous(&mut self) {
|
pub fn select_previous(&mut self) {
|
||||||
if self.selected_index > 0 {
|
if self.selected_index > 0 {
|
||||||
self.selected_index -= 1;
|
self.selected_index -= 1;
|
||||||
|
self.ensure_selected_visible();
|
||||||
}
|
}
|
||||||
debug!("Service selection moved up to: {}", self.selected_index);
|
debug!("Service selection moved up to: {}", self.selected_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Move selection down
|
/// Move selection down
|
||||||
pub fn select_next(&mut self, total_services: usize) {
|
pub fn select_next(&mut self, total_services: usize) {
|
||||||
if total_services > 0 && self.selected_index < total_services.saturating_sub(1) {
|
if total_services > 0 && self.selected_index < total_services.saturating_sub(1) {
|
||||||
self.selected_index += 1;
|
self.selected_index += 1;
|
||||||
|
self.ensure_selected_visible();
|
||||||
}
|
}
|
||||||
debug!("Service selection: {}/{}", self.selected_index, total_services);
|
debug!("Service selection: {}/{}", self.selected_index, total_services);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Convert parent service index to display line index
|
||||||
|
fn parent_index_to_display_line(&self, parent_index: usize) -> usize {
|
||||||
|
let mut parent_services: Vec<_> = self.parent_services.iter().collect();
|
||||||
|
parent_services.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||||
|
|
||||||
|
let mut display_line = 0;
|
||||||
|
for (idx, (parent_name, _)) in parent_services.iter().enumerate() {
|
||||||
|
if idx == parent_index {
|
||||||
|
return display_line;
|
||||||
|
}
|
||||||
|
display_line += 1; // Parent service line
|
||||||
|
|
||||||
|
// Add sub-service lines
|
||||||
|
if let Some(sub_list) = self.sub_services.get(*parent_name) {
|
||||||
|
display_line += sub_list.len();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
display_line
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure the currently selected service is visible in the viewport
|
||||||
|
fn ensure_selected_visible(&mut self) {
|
||||||
|
if self.last_viewport_height == 0 {
|
||||||
|
return; // Can't adjust without knowing viewport size
|
||||||
|
}
|
||||||
|
|
||||||
|
let display_line = self.parent_index_to_display_line(self.selected_index);
|
||||||
|
let total_display_lines = self.get_total_display_lines();
|
||||||
|
let viewport_height = self.last_viewport_height;
|
||||||
|
|
||||||
|
// Check if selected line is above visible area
|
||||||
|
if display_line < self.scroll_offset {
|
||||||
|
self.scroll_offset = display_line;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate current effective viewport (accounting for "more below" if present)
|
||||||
|
let current_remaining = total_display_lines.saturating_sub(self.scroll_offset);
|
||||||
|
let current_has_more = current_remaining > viewport_height;
|
||||||
|
let current_effective = if current_has_more {
|
||||||
|
viewport_height.saturating_sub(1)
|
||||||
|
} else {
|
||||||
|
viewport_height
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check if selected line is below current visible area
|
||||||
|
if display_line >= self.scroll_offset + current_effective {
|
||||||
|
// Need to scroll down. Position selected line so there's room for "more below" if needed
|
||||||
|
// Strategy: if there are lines below the selected line, don't put it at the very bottom
|
||||||
|
let has_content_below = display_line < total_display_lines - 1;
|
||||||
|
|
||||||
|
if has_content_below {
|
||||||
|
// Leave room for "... X more below" message by positioning selected line
|
||||||
|
// one position higher than the last line
|
||||||
|
let target_position = viewport_height.saturating_sub(2);
|
||||||
|
self.scroll_offset = display_line.saturating_sub(target_position);
|
||||||
|
} else {
|
||||||
|
// This is the last line, can put it at the bottom
|
||||||
|
self.scroll_offset = display_line.saturating_sub(viewport_height - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Auto-scroll: selected={}, display_line={}, scroll_offset={}, viewport={}, total={}",
|
||||||
|
self.selected_index, display_line, self.scroll_offset, viewport_height, total_display_lines);
|
||||||
|
}
|
||||||
|
|
||||||
/// Get currently selected service name (for actions)
|
/// Get currently selected service name (for actions)
|
||||||
/// Only returns parent service names since only parent services can be selected
|
/// Only returns parent service names since only parent services can be selected
|
||||||
pub fn get_selected_service(&self) -> Option<String> {
|
pub fn get_selected_service(&self) -> Option<String> {
|
||||||
@@ -366,6 +446,81 @@ impl ServicesWidget {
|
|||||||
self.parent_services.len()
|
self.parent_services.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get total display lines (parent services + sub-services)
|
||||||
|
pub fn get_total_display_lines(&self) -> usize {
|
||||||
|
let mut total = self.parent_services.len();
|
||||||
|
for sub_list in self.sub_services.values() {
|
||||||
|
total += sub_list.len();
|
||||||
|
}
|
||||||
|
total
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scroll down by one line
|
||||||
|
pub fn scroll_down(&mut self, _visible_height: usize) {
|
||||||
|
let total_lines = self.get_total_display_lines();
|
||||||
|
|
||||||
|
// Use last_viewport_height if available (more accurate), otherwise can't scroll
|
||||||
|
let viewport_height = if self.last_viewport_height > 0 {
|
||||||
|
self.last_viewport_height
|
||||||
|
} else {
|
||||||
|
return; // Can't scroll without knowing viewport size
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate exact max scroll to match render logic
|
||||||
|
// Stop scrolling when all remaining content fits in viewport
|
||||||
|
// At scroll_offset N: remaining = total_lines - N
|
||||||
|
// We can show all when: remaining <= viewport_height
|
||||||
|
// So max_scroll is when: total_lines - max_scroll = viewport_height
|
||||||
|
// Therefore: max_scroll = total_lines - viewport_height (but at least 0)
|
||||||
|
let max_scroll = total_lines.saturating_sub(viewport_height);
|
||||||
|
|
||||||
|
debug!("Scroll down: total={}, viewport={}, offset={}, max={}", total_lines, viewport_height, self.scroll_offset, max_scroll);
|
||||||
|
|
||||||
|
if self.scroll_offset < max_scroll {
|
||||||
|
self.scroll_offset += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scroll up by one line
|
||||||
|
pub fn scroll_up(&mut self) {
|
||||||
|
if self.scroll_offset > 0 {
|
||||||
|
self.scroll_offset -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Map a display line index to a parent service index (returns None if clicked on sub-service)
|
||||||
|
pub fn display_line_to_parent_index(&self, display_line_index: usize) -> Option<usize> {
|
||||||
|
// Build the same display list to map line index to parent service index
|
||||||
|
let mut parent_index = 0;
|
||||||
|
let mut line_index = 0;
|
||||||
|
|
||||||
|
let mut parent_services: Vec<_> = self.parent_services.iter().collect();
|
||||||
|
parent_services.sort_by(|(a, _), (b, _)| a.cmp(b));
|
||||||
|
|
||||||
|
for (parent_name, _) in parent_services {
|
||||||
|
// Check if this line index matches a parent service
|
||||||
|
if line_index == display_line_index {
|
||||||
|
return Some(parent_index);
|
||||||
|
}
|
||||||
|
line_index += 1;
|
||||||
|
|
||||||
|
// Add sub-services for this parent (if any)
|
||||||
|
if let Some(sub_list) = self.sub_services.get(parent_name) {
|
||||||
|
for _ in sub_list {
|
||||||
|
if line_index == display_line_index {
|
||||||
|
// Clicked on a sub-service - return None (can't select sub-services)
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
line_index += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parent_index += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Calculate which parent service index corresponds to a display line index
|
/// Calculate which parent service index corresponds to a display line index
|
||||||
fn calculate_parent_service_index(&self, display_line_index: &usize) -> usize {
|
fn calculate_parent_service_index(&self, display_line_index: &usize) -> usize {
|
||||||
@@ -407,7 +562,6 @@ impl Widget for ServicesWidget {
|
|||||||
let parent_info = ServiceInfo {
|
let parent_info = ServiceInfo {
|
||||||
metrics: Vec::new(), // Parent services don't have custom metrics
|
metrics: Vec::new(), // Parent services don't have custom metrics
|
||||||
widget_status: service.service_status,
|
widget_status: service.service_status,
|
||||||
service_type: String::new(), // Parent services have no type
|
|
||||||
memory_bytes: service.memory_bytes,
|
memory_bytes: service.memory_bytes,
|
||||||
restart_count: service.restart_count,
|
restart_count: service.restart_count,
|
||||||
uptime_seconds: service.uptime_seconds,
|
uptime_seconds: service.uptime_seconds,
|
||||||
@@ -426,7 +580,6 @@ impl Widget for ServicesWidget {
|
|||||||
let sub_info = ServiceInfo {
|
let sub_info = ServiceInfo {
|
||||||
metrics,
|
metrics,
|
||||||
widget_status: sub_service.service_status,
|
widget_status: sub_service.service_status,
|
||||||
service_type: sub_service.service_type.clone(),
|
|
||||||
memory_bytes: None, // Sub-services don't have individual metrics yet
|
memory_bytes: None, // Sub-services don't have individual metrics yet
|
||||||
restart_count: None,
|
restart_count: None,
|
||||||
uptime_seconds: None,
|
uptime_seconds: None,
|
||||||
@@ -471,7 +624,6 @@ impl ServicesWidget {
|
|||||||
.or_insert(ServiceInfo {
|
.or_insert(ServiceInfo {
|
||||||
metrics: Vec::new(),
|
metrics: Vec::new(),
|
||||||
widget_status: Status::Unknown,
|
widget_status: Status::Unknown,
|
||||||
service_type: String::new(),
|
|
||||||
memory_bytes: None,
|
memory_bytes: None,
|
||||||
restart_count: None,
|
restart_count: None,
|
||||||
uptime_seconds: None,
|
uptime_seconds: None,
|
||||||
@@ -500,7 +652,6 @@ impl ServicesWidget {
|
|||||||
ServiceInfo {
|
ServiceInfo {
|
||||||
metrics: Vec::new(),
|
metrics: Vec::new(),
|
||||||
widget_status: Status::Unknown,
|
widget_status: Status::Unknown,
|
||||||
service_type: String::new(), // Unknown type in legacy path
|
|
||||||
memory_bytes: None,
|
memory_bytes: None,
|
||||||
restart_count: None,
|
restart_count: None,
|
||||||
uptime_seconds: None,
|
uptime_seconds: None,
|
||||||
@@ -542,12 +693,23 @@ impl ServicesWidget {
|
|||||||
self.selected_index = total_count - 1;
|
self.selected_index = total_count - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clamp scroll offset to valid range after update
|
||||||
|
// This prevents scroll issues when switching between hosts or when service count changes
|
||||||
|
let total_display_lines = self.get_total_display_lines();
|
||||||
|
if total_display_lines == 0 {
|
||||||
|
self.scroll_offset = 0;
|
||||||
|
} else if self.scroll_offset >= total_display_lines {
|
||||||
|
// Clamp to max valid value, not reset to 0
|
||||||
|
self.scroll_offset = total_display_lines.saturating_sub(1);
|
||||||
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Services widget updated: {} parent services, {} sub-service groups, total={}, selected={}, status={:?}",
|
"Services widget updated: {} parent services, {} sub-service groups, total={}, selected={}, scroll={}, status={:?}",
|
||||||
self.parent_services.len(),
|
self.parent_services.len(),
|
||||||
self.sub_services.len(),
|
self.sub_services.len(),
|
||||||
total_count,
|
total_count,
|
||||||
self.selected_index,
|
self.selected_index,
|
||||||
|
self.scroll_offset,
|
||||||
self.status
|
self.status
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -555,22 +717,17 @@ impl ServicesWidget {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ServicesWidget {
|
impl ServicesWidget {
|
||||||
|
/// Render services content WITHOUT block (for use inside panel)
|
||||||
/// Render with focus
|
pub fn render_content(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
|
||||||
pub fn render(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
|
|
||||||
let services_block = Components::widget_block("services");
|
|
||||||
let inner_area = services_block.inner(area);
|
|
||||||
frame.render_widget(services_block, area);
|
|
||||||
|
|
||||||
let content_chunks = Layout::default()
|
let content_chunks = Layout::default()
|
||||||
.direction(Direction::Vertical)
|
.direction(Direction::Vertical)
|
||||||
.constraints([Constraint::Length(1), Constraint::Min(0)])
|
.constraints([Constraint::Length(1), Constraint::Min(0)])
|
||||||
.split(inner_area);
|
.split(area);
|
||||||
|
|
||||||
// Determine which columns to show based on available width
|
// Determine which columns to show based on available width
|
||||||
let columns = ColumnVisibility::from_width(inner_area.width);
|
let columns = ColumnVisibility::from_width(area.width);
|
||||||
|
|
||||||
// Build header based on visible columns
|
// Build header - columns must align with service row format
|
||||||
let mut header_parts = Vec::new();
|
let mut header_parts = Vec::new();
|
||||||
if columns.show_name {
|
if columns.show_name {
|
||||||
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
|
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
|
||||||
@@ -639,20 +796,46 @@ impl ServicesWidget {
|
|||||||
// Show only what fits, with "X more below" if needed
|
// Show only what fits, with "X more below" if needed
|
||||||
let available_lines = area.height as usize;
|
let available_lines = area.height as usize;
|
||||||
let total_lines = display_lines.len();
|
let total_lines = display_lines.len();
|
||||||
|
|
||||||
// Reserve one line for "X more below" if needed
|
// Store viewport height for accurate scroll calculations
|
||||||
let lines_for_content = if total_lines > available_lines {
|
self.last_viewport_height = available_lines;
|
||||||
|
|
||||||
|
// Clamp scroll_offset to valid range based on current viewport and content
|
||||||
|
// This handles dynamic viewport size changes
|
||||||
|
let max_valid_scroll = total_lines.saturating_sub(available_lines);
|
||||||
|
if self.scroll_offset > max_valid_scroll {
|
||||||
|
self.scroll_offset = max_valid_scroll;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate how many lines remain after scroll offset
|
||||||
|
let remaining_lines = total_lines.saturating_sub(self.scroll_offset);
|
||||||
|
|
||||||
|
debug!("Render: total={}, viewport={}, offset={}, max={}, remaining={}",
|
||||||
|
total_lines, available_lines, self.scroll_offset, max_valid_scroll, remaining_lines);
|
||||||
|
|
||||||
|
// Check if all remaining content fits in viewport
|
||||||
|
let will_show_more_below = remaining_lines > available_lines;
|
||||||
|
|
||||||
|
// Reserve one line for "X more below" only if we can't fit everything
|
||||||
|
let lines_for_content = if will_show_more_below {
|
||||||
available_lines.saturating_sub(1)
|
available_lines.saturating_sub(1)
|
||||||
} else {
|
} else {
|
||||||
available_lines
|
available_lines.min(remaining_lines)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Apply scroll offset
|
||||||
let visible_lines: Vec<_> = display_lines
|
let visible_lines: Vec<_> = display_lines
|
||||||
.iter()
|
.iter()
|
||||||
|
.skip(self.scroll_offset)
|
||||||
.take(lines_for_content)
|
.take(lines_for_content)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let hidden_below = total_lines.saturating_sub(lines_for_content);
|
// Only calculate hidden_below if we actually reserved space for the message
|
||||||
|
let hidden_below = if will_show_more_below {
|
||||||
|
remaining_lines.saturating_sub(lines_for_content)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
let lines_to_show = visible_lines.len();
|
let lines_to_show = visible_lines.len();
|
||||||
|
|
||||||
@@ -666,8 +849,8 @@ impl ServicesWidget {
|
|||||||
|
|
||||||
for (i, (line_text, line_status, is_sub, sub_info)) in visible_lines.iter().enumerate()
|
for (i, (line_text, line_status, is_sub, sub_info)) in visible_lines.iter().enumerate()
|
||||||
{
|
{
|
||||||
let actual_index = i; // Simple index since we're not scrolling
|
let actual_index = self.scroll_offset + i; // Account for scroll offset
|
||||||
|
|
||||||
// Only parent services can be selected - calculate parent service index
|
// Only parent services can be selected - calculate parent service index
|
||||||
let is_selected = if !*is_sub {
|
let is_selected = if !*is_sub {
|
||||||
// This is a parent service - count how many parent services came before this one
|
// This is a parent service - count how many parent services came before this one
|
||||||
@@ -712,7 +895,7 @@ impl ServicesWidget {
|
|||||||
// Show "X more below" message if content was truncated
|
// Show "X more below" message if content was truncated
|
||||||
if hidden_below > 0 {
|
if hidden_below > 0 {
|
||||||
let more_text = format!("... {} more below", hidden_below);
|
let more_text = format!("... {} more below", hidden_below);
|
||||||
let more_para = Paragraph::new(more_text).style(Typography::muted());
|
let more_para = Paragraph::new(more_text).style(Style::default().fg(Theme::border()));
|
||||||
frame.render_widget(more_para, service_chunks[lines_to_show]);
|
frame.render_widget(more_para, service_chunks[lines_to_show]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
use cm_dashboard_shared::Status;
|
use cm_dashboard_shared::Status;
|
||||||
use ratatui::{
|
use ratatui::{
|
||||||
layout::Rect,
|
layout::Rect,
|
||||||
|
style::Style,
|
||||||
text::{Line, Span, Text},
|
text::{Line, Span, Text},
|
||||||
widgets::Paragraph,
|
widgets::Paragraph,
|
||||||
Frame,
|
Frame,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::ui::theme::{StatusIcons, Typography};
|
use crate::ui::theme::{StatusIcons, Theme, Typography};
|
||||||
|
|
||||||
/// System widget displaying NixOS info, Network, CPU, RAM, and Storage in unified layout
|
/// System widget displaying NixOS info, Network, CPU, RAM, and Storage in unified layout
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -14,6 +15,7 @@ pub struct SystemWidget {
|
|||||||
// NixOS information
|
// NixOS information
|
||||||
nixos_build: Option<String>,
|
nixos_build: Option<String>,
|
||||||
agent_hash: Option<String>,
|
agent_hash: Option<String>,
|
||||||
|
kernel_version: Option<String>,
|
||||||
|
|
||||||
// Network interfaces
|
// Network interfaces
|
||||||
network_interfaces: Vec<cm_dashboard_shared::NetworkInterfaceData>,
|
network_interfaces: Vec<cm_dashboard_shared::NetworkInterfaceData>,
|
||||||
@@ -43,12 +45,17 @@ pub struct SystemWidget {
|
|||||||
storage_pools: Vec<StoragePool>,
|
storage_pools: Vec<StoragePool>,
|
||||||
|
|
||||||
// Backup metrics
|
// Backup metrics
|
||||||
backup_repositories: Vec<String>,
|
backup_last_time: Option<String>,
|
||||||
backup_repository_status: Status,
|
backup_status: Status,
|
||||||
backup_disks: Vec<cm_dashboard_shared::BackupDiskData>,
|
backup_repositories: Vec<cm_dashboard_shared::BackupRepositoryData>,
|
||||||
|
|
||||||
// Overall status
|
// Overall status
|
||||||
has_data: bool,
|
has_data: bool,
|
||||||
|
|
||||||
|
// Scroll offset for viewport
|
||||||
|
pub scroll_offset: usize,
|
||||||
|
/// Last rendered viewport height (for accurate scroll bounds)
|
||||||
|
last_viewport_height: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -88,6 +95,7 @@ impl SystemWidget {
|
|||||||
Self {
|
Self {
|
||||||
nixos_build: None,
|
nixos_build: None,
|
||||||
agent_hash: None,
|
agent_hash: None,
|
||||||
|
kernel_version: None,
|
||||||
network_interfaces: Vec::new(),
|
network_interfaces: Vec::new(),
|
||||||
cpu_load_1min: None,
|
cpu_load_1min: None,
|
||||||
cpu_load_5min: None,
|
cpu_load_5min: None,
|
||||||
@@ -106,10 +114,12 @@ impl SystemWidget {
|
|||||||
tmp_status: Status::Unknown,
|
tmp_status: Status::Unknown,
|
||||||
tmpfs_mounts: Vec::new(),
|
tmpfs_mounts: Vec::new(),
|
||||||
storage_pools: Vec::new(),
|
storage_pools: Vec::new(),
|
||||||
|
backup_last_time: None,
|
||||||
|
backup_status: Status::Unknown,
|
||||||
backup_repositories: Vec::new(),
|
backup_repositories: Vec::new(),
|
||||||
backup_repository_status: Status::Unknown,
|
|
||||||
backup_disks: Vec::new(),
|
|
||||||
has_data: false,
|
has_data: false,
|
||||||
|
scroll_offset: 0,
|
||||||
|
last_viewport_height: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,6 +163,51 @@ impl SystemWidget {
|
|||||||
pub fn _get_agent_hash(&self) -> Option<&String> {
|
pub fn _get_agent_hash(&self) -> Option<&String> {
|
||||||
self.agent_hash.as_ref()
|
self.agent_hash.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the build version
|
||||||
|
pub fn get_build_version(&self) -> Option<String> {
|
||||||
|
self.nixos_build.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the agent version
|
||||||
|
pub fn get_agent_version(&self) -> Option<String> {
|
||||||
|
self.agent_hash.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the kernel version
|
||||||
|
pub fn get_kernel_version(&self) -> Option<String> {
|
||||||
|
self.kernel_version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get overall status by aggregating all component statuses
|
||||||
|
pub fn get_overall_status(&self) -> Status {
|
||||||
|
if !self.has_data {
|
||||||
|
return Status::Offline;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut statuses = vec![self.cpu_status, self.memory_status, self.backup_status];
|
||||||
|
|
||||||
|
// Add storage pool and drive statuses
|
||||||
|
for pool in &self.storage_pools {
|
||||||
|
statuses.push(pool.status);
|
||||||
|
for drive in &pool.drives {
|
||||||
|
statuses.push(drive.status);
|
||||||
|
}
|
||||||
|
for drive in &pool.data_drives {
|
||||||
|
statuses.push(drive.status);
|
||||||
|
}
|
||||||
|
for drive in &pool.parity_drives {
|
||||||
|
statuses.push(drive.status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add backup repository statuses
|
||||||
|
for repo in &self.backup_repositories {
|
||||||
|
statuses.push(repo.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
Status::aggregate(&statuses)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::Widget;
|
use super::Widget;
|
||||||
@@ -163,10 +218,13 @@ impl Widget for SystemWidget {
|
|||||||
|
|
||||||
// Extract agent version
|
// Extract agent version
|
||||||
self.agent_hash = Some(agent_data.agent_version.clone());
|
self.agent_hash = Some(agent_data.agent_version.clone());
|
||||||
|
|
||||||
// Extract build version
|
// Extract build version
|
||||||
self.nixos_build = agent_data.build_version.clone();
|
self.nixos_build = agent_data.build_version.clone();
|
||||||
|
|
||||||
|
// Extract kernel version
|
||||||
|
self.kernel_version = agent_data.kernel_version.clone();
|
||||||
|
|
||||||
// Extract network interfaces
|
// Extract network interfaces
|
||||||
self.network_interfaces = agent_data.system.network.interfaces.clone();
|
self.network_interfaces = agent_data.system.network.interfaces.clone();
|
||||||
|
|
||||||
@@ -203,9 +261,19 @@ impl Widget for SystemWidget {
|
|||||||
|
|
||||||
// Extract backup data
|
// Extract backup data
|
||||||
let backup = &agent_data.backup;
|
let backup = &agent_data.backup;
|
||||||
|
self.backup_last_time = backup.last_backup_time.clone();
|
||||||
|
self.backup_status = backup.backup_status;
|
||||||
self.backup_repositories = backup.repositories.clone();
|
self.backup_repositories = backup.repositories.clone();
|
||||||
self.backup_repository_status = backup.repository_status;
|
|
||||||
self.backup_disks = backup.disks.clone();
|
// Clamp scroll offset to valid range after update
|
||||||
|
// This prevents scroll issues when switching between hosts
|
||||||
|
let total_lines = self.get_total_lines();
|
||||||
|
if total_lines == 0 {
|
||||||
|
self.scroll_offset = 0;
|
||||||
|
} else if self.scroll_offset >= total_lines {
|
||||||
|
// Clamp to max valid value, not reset to 0
|
||||||
|
self.scroll_offset = total_lines.saturating_sub(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -505,79 +573,42 @@ impl SystemWidget {
|
|||||||
fn render_backup(&self) -> Vec<Line<'_>> {
|
fn render_backup(&self) -> Vec<Line<'_>> {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
|
|
||||||
// First section: Repository status and list
|
if self.backup_repositories.is_empty() {
|
||||||
if !self.backup_repositories.is_empty() {
|
return lines;
|
||||||
let repo_text = format!("Repo: {}", self.backup_repositories.len());
|
|
||||||
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
|
|
||||||
lines.push(Line::from(repo_spans));
|
|
||||||
|
|
||||||
// List all repositories (sorted for consistent display)
|
|
||||||
let mut sorted_repos = self.backup_repositories.clone();
|
|
||||||
sorted_repos.sort();
|
|
||||||
let repo_count = sorted_repos.len();
|
|
||||||
for (idx, repo) in sorted_repos.iter().enumerate() {
|
|
||||||
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
|
|
||||||
lines.push(Line::from(vec![
|
|
||||||
Span::styled(format!(" {} ", tree_char), Typography::tree()),
|
|
||||||
Span::styled(repo.clone(), Typography::secondary()),
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second section: Per-disk backup information (sorted by serial for consistent display)
|
// Format backup time (use complete timestamp)
|
||||||
let mut sorted_disks = self.backup_disks.clone();
|
let time_display = if let Some(ref time_str) = self.backup_last_time {
|
||||||
sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial));
|
time_str.clone()
|
||||||
for disk in &sorted_disks {
|
} else {
|
||||||
let truncated_serial = truncate_serial(&disk.serial);
|
"unknown".to_string()
|
||||||
let mut details = Vec::new();
|
};
|
||||||
|
|
||||||
if let Some(temp) = disk.temperature_celsius {
|
// Header: just the timestamp
|
||||||
details.push(format!("T: {}°C", temp as i32));
|
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display);
|
||||||
}
|
lines.push(Line::from(repo_spans));
|
||||||
if let Some(wear) = disk.wear_percent {
|
|
||||||
details.push(format!("W: {}%", wear as i32));
|
|
||||||
}
|
|
||||||
|
|
||||||
let disk_text = if !details.is_empty() {
|
// List all repositories with archive count and size
|
||||||
format!("{} {}", truncated_serial, details.join(" "))
|
let repo_count = self.backup_repositories.len();
|
||||||
|
for (idx, repo) in self.backup_repositories.iter().enumerate() {
|
||||||
|
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
|
||||||
|
|
||||||
|
// Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
|
||||||
|
let size_display = if repo.repo_size_gb < 0.001 {
|
||||||
|
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
|
||||||
|
} else if repo.repo_size_gb < 1.0 {
|
||||||
|
format!("{:.0}MB", repo.repo_size_gb * 1024.0)
|
||||||
} else {
|
} else {
|
||||||
truncated_serial
|
format!("{:.1}GB", repo.repo_size_gb)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Overall disk status (worst of backup and usage)
|
let repo_text = format!("{} ({}) {}", repo.name, repo.archive_count, size_display);
|
||||||
let disk_status = disk.backup_status.max(disk.usage_status);
|
|
||||||
let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
|
|
||||||
lines.push(Line::from(disk_spans));
|
|
||||||
|
|
||||||
// Show backup time with status
|
let mut repo_spans = vec![
|
||||||
if let Some(backup_time) = &disk.last_backup_time {
|
Span::styled(format!(" {} ", tree_char), Typography::tree()),
|
||||||
let time_text = format!("Backup: {}", backup_time);
|
|
||||||
let mut time_spans = vec![
|
|
||||||
Span::styled(" ├─ ", Typography::tree()),
|
|
||||||
];
|
|
||||||
time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
|
|
||||||
lines.push(Line::from(time_spans));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show usage with status and archive count
|
|
||||||
let archive_display = if disk.archives_min == disk.archives_max {
|
|
||||||
format!("{}", disk.archives_min)
|
|
||||||
} else {
|
|
||||||
format!("{}-{}", disk.archives_min, disk.archives_max)
|
|
||||||
};
|
|
||||||
|
|
||||||
let usage_text = format!(
|
|
||||||
"Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
|
|
||||||
archive_display,
|
|
||||||
disk.disk_usage_percent,
|
|
||||||
disk.disk_used_gb,
|
|
||||||
disk.disk_total_gb
|
|
||||||
);
|
|
||||||
let mut usage_spans = vec![
|
|
||||||
Span::styled(" └─ ", Typography::tree()),
|
|
||||||
];
|
];
|
||||||
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text));
|
repo_spans.extend(StatusIcons::create_status_spans(repo.status, &repo_text));
|
||||||
lines.push(Line::from(usage_spans));
|
lines.push(Line::from(repo_spans));
|
||||||
}
|
}
|
||||||
|
|
||||||
lines
|
lines
|
||||||
@@ -781,23 +812,87 @@ impl SystemWidget {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Render system widget
|
/// Render system widget
|
||||||
pub fn render(&mut self, frame: &mut Frame, area: Rect, hostname: &str, _config: Option<&crate::config::DashboardConfig>) {
|
/// Scroll down by one line
|
||||||
let mut lines = Vec::new();
|
pub fn scroll_down(&mut self, _visible_height: usize, _total_lines: usize) {
|
||||||
|
let total_lines = self.get_total_lines();
|
||||||
|
|
||||||
// NixOS section
|
// Use last_viewport_height if available (more accurate), otherwise can't scroll
|
||||||
lines.push(Line::from(vec![
|
let viewport_height = if self.last_viewport_height > 0 {
|
||||||
Span::styled(format!("NixOS {}:", hostname), Typography::widget_title())
|
self.last_viewport_height
|
||||||
]));
|
} else {
|
||||||
|
return; // Can't scroll without knowing viewport size
|
||||||
let build_text = self.nixos_build.as_deref().unwrap_or("unknown");
|
};
|
||||||
lines.push(Line::from(vec![
|
|
||||||
Span::styled(format!("Build: {}", build_text), Typography::secondary())
|
// Max scroll should allow us to see all remaining content
|
||||||
]));
|
// When scroll_offset + viewport_height >= total_lines, we can see everything
|
||||||
|
let max_scroll = if total_lines > viewport_height {
|
||||||
let agent_version_text = self.agent_hash.as_deref().unwrap_or("unknown");
|
total_lines - viewport_height
|
||||||
lines.push(Line::from(vec![
|
} else {
|
||||||
Span::styled(format!("Agent: {}", agent_version_text), Typography::secondary())
|
0
|
||||||
]));
|
};
|
||||||
|
|
||||||
|
if self.scroll_offset < max_scroll {
|
||||||
|
self.scroll_offset += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Scroll up by one line
|
||||||
|
pub fn scroll_up(&mut self) {
|
||||||
|
if self.scroll_offset > 0 {
|
||||||
|
self.scroll_offset -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get total line count (needs to be calculated before rendering)
|
||||||
|
pub fn get_total_lines(&self) -> usize {
|
||||||
|
let mut count = 0;
|
||||||
|
|
||||||
|
// CPU section (2+ lines for load/cstate, +1 if has model/cores)
|
||||||
|
count += 2;
|
||||||
|
if self.cpu_model_name.is_some() || self.cpu_core_count.is_some() {
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// RAM section (1 + tmpfs mounts)
|
||||||
|
count += 2;
|
||||||
|
count += self.tmpfs_mounts.len();
|
||||||
|
|
||||||
|
// Network section
|
||||||
|
if !self.network_interfaces.is_empty() {
|
||||||
|
count += 1; // Header
|
||||||
|
// Count network lines (would need to mirror render_network logic)
|
||||||
|
for iface in &self.network_interfaces {
|
||||||
|
count += 1; // Interface name
|
||||||
|
count += iface.ipv4_addresses.len();
|
||||||
|
count += iface.ipv6_addresses.len();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage section
|
||||||
|
count += 1; // Header
|
||||||
|
for pool in &self.storage_pools {
|
||||||
|
count += 1; // Pool header
|
||||||
|
count += pool.drives.len();
|
||||||
|
count += pool.data_drives.len();
|
||||||
|
count += pool.parity_drives.len();
|
||||||
|
count += pool.filesystems.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backup section
|
||||||
|
if !self.backup_repositories.is_empty() {
|
||||||
|
count += 1; // Header: "Backup:"
|
||||||
|
count += 1; // Repo count and timestamp header
|
||||||
|
count += self.backup_repositories.len(); // Individual repos
|
||||||
|
}
|
||||||
|
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render(&mut self, frame: &mut Frame, area: Rect, _hostname: &str, _config: Option<&crate::config::DashboardConfig>) {
|
||||||
|
// Store viewport height for accurate scroll calculations
|
||||||
|
self.last_viewport_height = area.height as usize;
|
||||||
|
|
||||||
|
let mut lines = Vec::new();
|
||||||
|
|
||||||
// CPU section
|
// CPU section
|
||||||
lines.push(Line::from(vec![
|
lines.push(Line::from(vec![
|
||||||
@@ -893,7 +988,7 @@ impl SystemWidget {
|
|||||||
lines.extend(storage_lines);
|
lines.extend(storage_lines);
|
||||||
|
|
||||||
// Backup section (if available)
|
// Backup section (if available)
|
||||||
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
|
if !self.backup_repositories.is_empty() {
|
||||||
lines.push(Line::from(vec![
|
lines.push(Line::from(vec![
|
||||||
Span::styled("Backup:", Typography::widget_title())
|
Span::styled("Backup:", Typography::widget_title())
|
||||||
]));
|
]));
|
||||||
@@ -905,29 +1000,51 @@ impl SystemWidget {
|
|||||||
// Apply scroll offset
|
// Apply scroll offset
|
||||||
let total_lines = lines.len();
|
let total_lines = lines.len();
|
||||||
let available_height = area.height as usize;
|
let available_height = area.height as usize;
|
||||||
|
|
||||||
// Show only what fits, with "X more below" if needed
|
// Clamp scroll_offset to valid range based on current viewport and content
|
||||||
if total_lines > available_height {
|
// This handles dynamic viewport size changes
|
||||||
let lines_for_content = available_height.saturating_sub(1); // Reserve one line for "more below"
|
let max_valid_scroll = total_lines.saturating_sub(available_height);
|
||||||
let mut visible_lines: Vec<Line> = lines
|
let clamped_scroll = self.scroll_offset.min(max_valid_scroll);
|
||||||
.into_iter()
|
|
||||||
.take(lines_for_content)
|
// Calculate how many lines remain after scroll offset
|
||||||
.collect();
|
let remaining_lines = total_lines.saturating_sub(clamped_scroll);
|
||||||
|
|
||||||
let hidden_below = total_lines.saturating_sub(lines_for_content);
|
// Check if all remaining content fits in viewport
|
||||||
if hidden_below > 0 {
|
let will_show_more_below = remaining_lines > available_height;
|
||||||
let more_line = Line::from(vec![
|
|
||||||
Span::styled(format!("... {} more below", hidden_below), Typography::muted())
|
// Reserve one line for "X more below" only if we can't fit everything
|
||||||
]);
|
let lines_for_content = if will_show_more_below {
|
||||||
visible_lines.push(more_line);
|
available_height.saturating_sub(1)
|
||||||
}
|
|
||||||
|
|
||||||
let paragraph = Paragraph::new(Text::from(visible_lines));
|
|
||||||
frame.render_widget(paragraph, area);
|
|
||||||
} else {
|
} else {
|
||||||
// All content fits and no scroll offset, render normally
|
available_height.min(remaining_lines)
|
||||||
let paragraph = Paragraph::new(Text::from(lines));
|
};
|
||||||
frame.render_widget(paragraph, area);
|
|
||||||
|
// Apply clamped scroll offset and take only what fits
|
||||||
|
let mut visible_lines: Vec<Line> = lines
|
||||||
|
.into_iter()
|
||||||
|
.skip(clamped_scroll)
|
||||||
|
.take(lines_for_content)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Note: we don't update self.scroll_offset here due to borrow checker constraints
|
||||||
|
// It will be clamped on next render if still out of bounds
|
||||||
|
|
||||||
|
// Only calculate hidden_below if we actually reserved space for the message
|
||||||
|
let hidden_below = if will_show_more_below {
|
||||||
|
remaining_lines.saturating_sub(lines_for_content)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add "more below" message if needed
|
||||||
|
if hidden_below > 0 {
|
||||||
|
let more_line = Line::from(vec![
|
||||||
|
Span::styled(format!("... {} more below", hidden_below), Style::default().fg(Theme::border()))
|
||||||
|
]);
|
||||||
|
visible_lines.push(more_line);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let paragraph = Paragraph::new(Text::from(visible_lines));
|
||||||
|
frame.render_widget(paragraph, area);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.246"
|
version = "0.1.280"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ pub struct AgentData {
|
|||||||
pub hostname: String,
|
pub hostname: String,
|
||||||
pub agent_version: String,
|
pub agent_version: String,
|
||||||
pub build_version: Option<String>,
|
pub build_version: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub kernel_version: Option<String>,
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
pub system: SystemData,
|
pub system: SystemData,
|
||||||
pub services: Vec<ServiceData>,
|
pub services: Vec<ServiceData>,
|
||||||
@@ -38,6 +40,7 @@ pub struct NetworkInterfaceData {
|
|||||||
pub link_status: Status,
|
pub link_status: Status,
|
||||||
pub parent_interface: Option<String>,
|
pub parent_interface: Option<String>,
|
||||||
pub vlan_id: Option<u16>,
|
pub vlan_id: Option<u16>,
|
||||||
|
pub connection_method: Option<String>, // For Tailscale: "direct", "relay", or "proxy"
|
||||||
}
|
}
|
||||||
|
|
||||||
/// CPU C-state usage information
|
/// CPU C-state usage information
|
||||||
@@ -181,27 +184,18 @@ pub struct SubServiceMetric {
|
|||||||
/// Backup system data
|
/// Backup system data
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct BackupData {
|
pub struct BackupData {
|
||||||
pub repositories: Vec<String>,
|
|
||||||
pub repository_status: Status,
|
|
||||||
pub disks: Vec<BackupDiskData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Backup repository disk information
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct BackupDiskData {
|
|
||||||
pub serial: String,
|
|
||||||
pub product_name: Option<String>,
|
|
||||||
pub wear_percent: Option<f32>,
|
|
||||||
pub temperature_celsius: Option<f32>,
|
|
||||||
pub last_backup_time: Option<String>,
|
pub last_backup_time: Option<String>,
|
||||||
pub backup_status: Status,
|
pub backup_status: Status,
|
||||||
pub disk_usage_percent: f32,
|
pub repositories: Vec<BackupRepositoryData>,
|
||||||
pub disk_used_gb: f32,
|
}
|
||||||
pub disk_total_gb: f32,
|
|
||||||
pub usage_status: Status,
|
/// Individual backup repository information
|
||||||
pub services: Vec<String>,
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub archives_min: i64,
|
pub struct BackupRepositoryData {
|
||||||
pub archives_max: i64,
|
pub name: String,
|
||||||
|
pub archive_count: i64,
|
||||||
|
pub repo_size_gb: f32,
|
||||||
|
pub status: Status,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AgentData {
|
impl AgentData {
|
||||||
@@ -211,6 +205,7 @@ impl AgentData {
|
|||||||
hostname,
|
hostname,
|
||||||
agent_version,
|
agent_version,
|
||||||
build_version: None,
|
build_version: None,
|
||||||
|
kernel_version: None,
|
||||||
timestamp: chrono::Utc::now().timestamp() as u64,
|
timestamp: chrono::Utc::now().timestamp() as u64,
|
||||||
system: SystemData {
|
system: SystemData {
|
||||||
network: NetworkData {
|
network: NetworkData {
|
||||||
@@ -244,9 +239,9 @@ impl AgentData {
|
|||||||
},
|
},
|
||||||
services: Vec::new(),
|
services: Vec::new(),
|
||||||
backup: BackupData {
|
backup: BackupData {
|
||||||
|
last_backup_time: None,
|
||||||
|
backup_status: Status::Unknown,
|
||||||
repositories: Vec::new(),
|
repositories: Vec::new(),
|
||||||
repository_status: Status::Unknown,
|
|
||||||
disks: Vec::new(),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user