Compare commits

...

3 Commits

Author SHA1 Message Date
535784e849 Improve dashboard UI layout and status aggregation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m36s
- Move hosts panel to left side above system panel
- Add dynamic column layout for hosts based on available width
- Fix status aggregation to properly calculate host status from widgets
- Align service panel columns with header
- Use blue color for metrics without status indicators
- Add offline host popup overlay
- Use foreground color for panel titles
2025-12-16 13:15:24 +01:00
41a7ee660a Add kernel version to statusbar
All checks were successful
Build and Release / build-and-release (push) Successful in 1m37s
2025-12-15 16:26:54 +01:00
76931f0457 Fix notification system with proper rate limiting and aggregation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m51s
- Add rate limiting using rate_limit_minutes config (was ignored)
- Add aggregation using aggregation_interval_seconds config (was ignored)
- Use smtp_host and smtp_port from config (was hardcoded localhost:25)
- Add trigger_on_warnings and trigger_on_failures config options
- Add recovery_requires_all_ok and suppress_individual_recoveries
- Use check_interval_seconds from config (was hardcoded 30s)
- Expand status tracking to all components (drives, pools, services, backup)
- Move notification checks from every collection to dedicated interval
- Separate alert and recovery notifications with proper email formatting
- Only notify on failed services (Critical), not inactive
2025-12-15 13:44:06 +01:00
16 changed files with 924 additions and 567 deletions

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "cm-dashboard"
version = "0.1.276"
version = "0.1.280"
dependencies = [
"anyhow",
"chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-agent"
version = "0.1.275"
version = "0.1.280"
dependencies = [
"anyhow",
"async-trait",
@@ -325,7 +325,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-shared"
version = "0.1.275"
version = "0.1.280"
dependencies = [
"chrono",
"serde",

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-agent"
version = "0.1.275"
version = "0.1.280"
edition = "2021"
[dependencies]

View File

@@ -1,5 +1,6 @@
use anyhow::Result;
use gethostname::gethostname;
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tokio::time::interval;
use tracing::{debug, error, info};
@@ -28,7 +29,6 @@ struct TimedCollector {
}
pub struct Agent {
hostname: String,
config: AgentConfig,
zmq_handler: ZmqHandler,
collectors: Vec<TimedCollector>,
@@ -38,12 +38,40 @@ pub struct Agent {
}
/// Track system component status for change detection
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Default)]
struct SystemStatus {
// CPU
cpu_load_status: cm_dashboard_shared::Status,
cpu_temperature_status: cm_dashboard_shared::Status,
// Memory
memory_usage_status: cm_dashboard_shared::Status,
// Add more as needed
// Storage - keyed by drive name or pool name
drive_statuses: HashMap<String, DriveStatus>,
pool_statuses: HashMap<String, PoolStatus>,
// Services - keyed by service name
service_statuses: HashMap<String, cm_dashboard_shared::Status>,
// Backup
backup_status: cm_dashboard_shared::Status,
}
#[derive(Debug, Clone, Default)]
struct DriveStatus {
temperature_status: cm_dashboard_shared::Status,
health_status: cm_dashboard_shared::Status,
filesystem_statuses: HashMap<String, cm_dashboard_shared::Status>,
}
#[derive(Debug, Clone, Default)]
struct PoolStatus {
health_status: cm_dashboard_shared::Status,
usage_status: cm_dashboard_shared::Status,
drive_statuses: HashMap<String, PoolDriveStatus>,
}
#[derive(Debug, Clone, Default)]
struct PoolDriveStatus {
health_status: cm_dashboard_shared::Status,
temperature_status: cm_dashboard_shared::Status,
}
impl Agent {
@@ -148,7 +176,6 @@ impl Agent {
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
Ok(Self {
hostname,
config,
zmq_handler,
collectors,
@@ -171,7 +198,9 @@ impl Agent {
let mut transmission_interval = interval(Duration::from_secs(
self.config.zmq.transmission_interval_seconds,
));
let mut notification_interval = interval(Duration::from_secs(30)); // Check notifications every 30s
let mut notification_interval = interval(Duration::from_secs(
self.config.notifications.check_interval_seconds,
));
// Skip initial ticks to avoid immediate execution
transmission_interval.tick().await;
@@ -185,9 +214,21 @@ impl Agent {
}
}
_ = notification_interval.tick() => {
// Process any pending notifications
// NOTE: With structured data, we might need to implement status tracking differently
// For now, we skip this until status evaluation is migrated
// Check for status changes and queue notifications
let agent_data_snapshot = self.cached_agent_data.clone();
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
error!("Failed to check status changes: {}", e);
}
// Check if all components recovered and flush pending recoveries
self.notification_manager.flush_recoveries_if_all_ok();
// Flush any pending aggregated notifications
if self.notification_manager.should_flush() {
if let Err(e) = self.notification_manager.flush_notifications().await {
error!("Failed to flush notifications: {}", e);
}
}
}
_ = &mut shutdown_rx => {
info!("Shutdown signal received, stopping agent loop");
@@ -235,16 +276,8 @@ impl Agent {
.unwrap()
.as_secs();
// Clone for notification check (to avoid borrow issues)
let agent_data_snapshot = self.cached_agent_data.clone();
// Check for status changes and send notifications
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
error!("Failed to check status changes: {}", e);
}
// Broadcast the cached structured data via ZMQ
if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data_snapshot).await {
if let Err(e) = self.zmq_handler.publish_agent_data(&self.cached_agent_data).await {
error!("Failed to broadcast agent data: {}", e);
} else {
debug!("Successfully broadcast structured agent data");
@@ -253,38 +286,182 @@ impl Agent {
Ok(())
}
/// Check for status changes and send notifications
/// Check for status changes and queue notifications
async fn check_status_changes_and_notify(&mut self, agent_data: &AgentData) -> Result<()> {
// Extract current status
let current_status = SystemStatus {
cpu_load_status: agent_data.system.cpu.load_status.clone(),
cpu_temperature_status: agent_data.system.cpu.temperature_status.clone(),
memory_usage_status: agent_data.system.memory.usage_status.clone(),
// Build current status from agent data
let mut current_status = SystemStatus {
cpu_load_status: agent_data.system.cpu.load_status,
cpu_temperature_status: agent_data.system.cpu.temperature_status,
memory_usage_status: agent_data.system.memory.usage_status,
backup_status: agent_data.backup.backup_status,
..Default::default()
};
// Check for status changes
if let Some(previous) = self.previous_status.clone() {
self.check_and_notify_status_change(
// Collect drive statuses
for drive in &agent_data.system.storage.drives {
let mut fs_statuses = HashMap::new();
for fs in &drive.filesystems {
fs_statuses.insert(fs.mount.clone(), fs.usage_status);
}
current_status.drive_statuses.insert(
drive.name.clone(),
DriveStatus {
temperature_status: drive.temperature_status,
health_status: drive.health_status,
filesystem_statuses: fs_statuses,
},
);
}
// Collect pool statuses
for pool in &agent_data.system.storage.pools {
let mut pool_drive_statuses = HashMap::new();
for drive in pool.data_drives.iter().chain(pool.parity_drives.iter()) {
pool_drive_statuses.insert(
drive.name.clone(),
PoolDriveStatus {
health_status: drive.health_status,
temperature_status: drive.temperature_status,
},
);
}
current_status.pool_statuses.insert(
pool.name.clone(),
PoolStatus {
health_status: pool.health_status,
usage_status: pool.usage_status,
drive_statuses: pool_drive_statuses,
},
);
}
// Collect service statuses (only for non-user-stopped services)
for service in &agent_data.services {
if !service.user_stopped {
current_status
.service_statuses
.insert(service.name.clone(), service.service_status);
}
}
// Clone previous status to avoid borrow issues
let previous = self.previous_status.clone();
// Compare with previous status and queue notifications
if let Some(previous) = previous {
// CPU
self.queue_status_notification(
"CPU Load",
&previous.cpu_load_status,
&current_status.cpu_load_status,
format!("CPU load: {:.1}", agent_data.system.cpu.load_1min)
).await?;
self.check_and_notify_status_change(
"CPU Temperature",
&format!("Load: {:.2}", agent_data.system.cpu.load_1min),
);
self.queue_status_notification(
"CPU Temperature",
&previous.cpu_temperature_status,
&current_status.cpu_temperature_status,
format!("CPU temperature: {}°C",
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32)
).await?;
&format!(
"Temperature: {}°C",
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32
),
);
self.check_and_notify_status_change(
"Memory Usage",
&previous.memory_usage_status,
// Memory
self.queue_status_notification(
"Memory",
&previous.memory_usage_status,
&current_status.memory_usage_status,
format!("Memory usage: {:.1}%", agent_data.system.memory.usage_percent)
).await?;
&format!("Usage: {:.1}%", agent_data.system.memory.usage_percent),
);
// Backup
self.queue_status_notification(
"Backup",
&previous.backup_status,
&current_status.backup_status,
&format!(
"Last backup: {}",
agent_data.backup.last_backup_time.as_deref().unwrap_or("unknown")
),
);
// Drives
for (name, current_drive) in &current_status.drive_statuses {
if let Some(prev_drive) = previous.drive_statuses.get(name) {
self.queue_status_notification(
&format!("Drive {} Health", name),
&prev_drive.health_status,
&current_drive.health_status,
"Health check failed",
);
self.queue_status_notification(
&format!("Drive {} Temperature", name),
&prev_drive.temperature_status,
&current_drive.temperature_status,
"Temperature threshold exceeded",
);
// Filesystem usage
for (mount, current_fs_status) in &current_drive.filesystem_statuses {
if let Some(prev_fs_status) = prev_drive.filesystem_statuses.get(mount) {
self.queue_status_notification(
&format!("Filesystem {}", mount),
prev_fs_status,
current_fs_status,
"Disk usage threshold exceeded",
);
}
}
}
}
// Pools
for (name, current_pool) in &current_status.pool_statuses {
if let Some(prev_pool) = previous.pool_statuses.get(name) {
self.queue_status_notification(
&format!("Pool {} Health", name),
&prev_pool.health_status,
&current_pool.health_status,
"Pool health degraded",
);
self.queue_status_notification(
&format!("Pool {} Usage", name),
&prev_pool.usage_status,
&current_pool.usage_status,
"Pool usage threshold exceeded",
);
// Pool drives
for (drive_name, current_pd) in &current_pool.drive_statuses {
if let Some(prev_pd) = prev_pool.drive_statuses.get(drive_name) {
self.queue_status_notification(
&format!("Pool {} Drive {} Health", name, drive_name),
&prev_pd.health_status,
&current_pd.health_status,
"Pool drive health degraded",
);
self.queue_status_notification(
&format!("Pool {} Drive {} Temperature", name, drive_name),
&prev_pd.temperature_status,
&current_pd.temperature_status,
"Pool drive temperature exceeded",
);
}
}
}
}
// Services
for (name, current_svc_status) in &current_status.service_statuses {
if let Some(prev_svc_status) = previous.service_statuses.get(name) {
self.queue_status_notification(
&format!("Service {}", name),
prev_svc_status,
current_svc_status,
"Service status changed",
);
}
}
}
// Store current status for next comparison
@@ -292,43 +469,44 @@ impl Agent {
Ok(())
}
/// Check individual status change and send notification if degraded
async fn check_and_notify_status_change(
/// Queue a notification based on status change
fn queue_status_notification(
&mut self,
component: &str,
previous: &cm_dashboard_shared::Status,
current: &cm_dashboard_shared::Status,
details: String
) -> Result<()> {
details: &str,
) {
use cm_dashboard_shared::Status;
// Only notify on status degradation (OK → Warning/Critical, Warning → Critical)
let should_notify = match (previous, current) {
(Status::Ok, Status::Warning) => true,
(Status::Ok, Status::Critical) => true,
(Status::Warning, Status::Critical) => true,
_ => false,
};
// Check for degradation (alert)
let is_alert = matches!(
(previous, current),
(Status::Ok, Status::Warning)
| (Status::Ok, Status::Critical)
| (Status::Warning, Status::Critical)
);
if should_notify {
let subject = format!("{} {} Alert", self.hostname, component);
let body = format!(
"Alert: {} status changed from {:?} to {:?}\n\nDetails: {}\n\nTime: {}",
component,
previous,
current,
details,
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")
// Check for recovery
let is_recovery = matches!(
(previous, current),
(Status::Warning, Status::Ok)
| (Status::Critical, Status::Ok)
| (Status::Critical, Status::Warning)
);
if is_alert {
info!(
"Alert: {} - {:?} → {:?}",
component, previous, current
);
info!("Sending notification: {} - {:?} → {:?}", component, previous, current);
if let Err(e) = self.notification_manager.send_direct_email(&subject, &body).await {
error!("Failed to send notification for {}: {}", component, e);
}
self.notification_manager.queue_alert(component, previous, current, details);
} else if is_recovery {
info!(
"Recovery: {} - {:?} → {:?}",
component, previous, current
);
self.notification_manager.queue_recovery(component, previous, current, details);
}
Ok(())
}
}

View File

@@ -32,6 +32,9 @@ impl NixOSCollector {
// Set NixOS build/generation information
agent_data.build_version = self.get_nixos_generation().await;
// Set kernel version
agent_data.kernel_version = self.get_kernel_version().await;
// Set current timestamp
agent_data.timestamp = chrono::Utc::now().timestamp() as u64;
@@ -80,6 +83,14 @@ impl NixOSCollector {
std::env::var("CM_DASHBOARD_VERSION").unwrap_or_else(|_| "unknown".to_string())
}
/// Get kernel version from /proc/sys/kernel/osrelease
async fn get_kernel_version(&self) -> Option<String> {
match fs::read_to_string("/proc/sys/kernel/osrelease") {
Ok(version) => Some(version.trim().to_string()),
Err(_) => None,
}
}
/// Get NixOS system generation (build) information from git commit
async fn get_nixos_generation(&self) -> Option<String> {
// Try to read git commit hash from file written during rebuild

View File

@@ -953,15 +953,21 @@ impl SystemdCollector {
"-s",
"--max-time",
"4",
"https://ifconfig.me"
"https://1.1.1.1/cdn-cgi/trace"
])
.output()
.ok()?;
if output.status.success() {
let ip = String::from_utf8_lossy(&output.stdout).trim().to_string();
if !ip.is_empty() && ip.contains('.') {
return Some(ip);
let response = String::from_utf8_lossy(&output.stdout);
// Parse "ip=x.x.x.x" from the response
for line in response.lines() {
if let Some(ip) = line.strip_prefix("ip=") {
let ip = ip.trim().to_string();
if !ip.is_empty() {
return Some(ip);
}
}
}
}

View File

@@ -141,8 +141,23 @@ pub struct NotificationConfig {
pub from_email: String,
pub to_email: String,
pub rate_limit_minutes: u64,
/// Whether to send notifications on warning status
#[serde(default = "default_true")]
pub trigger_on_warnings: bool,
/// Whether to send notifications on failure/critical status
#[serde(default = "default_true")]
pub trigger_on_failures: bool,
/// Only send recovery notification when all components are OK
#[serde(default)]
pub recovery_requires_all_ok: bool,
/// Suppress individual recovery notifications (only notify on full recovery)
#[serde(default)]
pub suppress_individual_recoveries: bool,
/// Email notification batching interval in seconds (default: 60)
pub aggregation_interval_seconds: u64,
/// How often to check for status changes in seconds (default: 30)
#[serde(default = "default_check_interval_seconds")]
pub check_interval_seconds: u64,
/// List of metric names to exclude from email notifications
#[serde(default)]
pub exclude_email_metrics: Vec<String>,
@@ -151,6 +166,14 @@ pub struct NotificationConfig {
pub maintenance_mode_file: String,
}
fn default_true() -> bool {
true
}
fn default_check_interval_seconds() -> u64 {
30
}
fn default_heartbeat_interval_seconds() -> u64 {
5

View File

@@ -1,60 +1,314 @@
use crate::config::NotificationConfig;
use anyhow::Result;
use chrono::Utc;
use cm_dashboard_shared::Status;
use lettre::transport::smtp::SmtpTransport;
use lettre::{Message, Transport};
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tracing::{debug, error, info};
/// Manages notifications
/// Manages notifications with rate limiting and aggregation
pub struct NotificationManager {
config: NotificationConfig,
/// Last notification time per component for rate limiting
last_notification: HashMap<String, Instant>,
/// Pending notifications for aggregation
pending_notifications: Vec<PendingNotification>,
/// Pending recovery notifications (held until all OK if configured)
pending_recoveries: Vec<PendingNotification>,
/// Last aggregation flush time
last_aggregation_flush: Option<Instant>,
/// Track components currently in alert state
components_in_alert: HashMap<String, Status>,
}
/// A pending notification waiting to be aggregated
#[derive(Debug, Clone)]
struct PendingNotification {
component: String,
previous_status: String,
current_status: String,
details: String,
timestamp: chrono::DateTime<Utc>,
is_recovery: bool,
}
impl NotificationManager {
pub fn new(config: &NotificationConfig, _hostname: &str) -> Result<Self> {
Ok(Self {
config: config.clone(),
last_notification: HashMap::new(),
pending_notifications: Vec::new(),
pending_recoveries: Vec::new(),
last_aggregation_flush: None,
components_in_alert: HashMap::new(),
})
}
pub async fn send_direct_email(&mut self, subject: &str, body: &str) -> Result<()> {
/// Check if a component is rate limited
fn is_rate_limited(&self, component: &str) -> bool {
if self.config.rate_limit_minutes == 0 {
return false;
}
if let Some(last_time) = self.last_notification.get(component) {
let rate_limit = Duration::from_secs(self.config.rate_limit_minutes * 60);
last_time.elapsed() < rate_limit
} else {
false
}
}
/// Queue a degradation notification (Ok→Warning, Ok→Critical, Warning→Critical)
pub fn queue_alert(
&mut self,
component: &str,
previous: &Status,
current: &Status,
details: &str,
) {
// Check if this status type should trigger notifications
// Only Warning and Critical trigger notifications (not Inactive)
let should_notify = match current {
Status::Warning => self.config.trigger_on_warnings,
Status::Critical => self.config.trigger_on_failures,
_ => false,
};
if !should_notify {
debug!(
"Notification for {} suppressed (trigger_on_warnings={}, trigger_on_failures={})",
component, self.config.trigger_on_warnings, self.config.trigger_on_failures
);
return;
}
// Check rate limit
if self.is_rate_limited(component) {
debug!(
"Notification for {} rate limited (limit: {} min)",
component, self.config.rate_limit_minutes
);
return;
}
// Check exclusions
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
debug!("Notification for {} excluded by config", component);
return;
}
// Track this component as in alert state
self.components_in_alert.insert(component.to_string(), *current);
self.pending_notifications.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: false,
});
// Update rate limit tracker
self.last_notification.insert(component.to_string(), Instant::now());
debug!(
"Queued alert for {}: {:?} -> {:?}",
component, previous, current
);
}
/// Queue a recovery notification (Warning→Ok, Critical→Ok, Critical→Warning)
pub fn queue_recovery(
&mut self,
component: &str,
previous: &Status,
current: &Status,
details: &str,
) {
// Remove from alert tracking
self.components_in_alert.remove(component);
// Check if individual recoveries are suppressed
if self.config.suppress_individual_recoveries {
debug!(
"Individual recovery for {} suppressed by config",
component
);
// Store recovery for potential batch notification
self.pending_recoveries.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: true,
});
return;
}
// Check exclusions
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
debug!("Recovery notification for {} excluded by config", component);
return;
}
self.pending_notifications.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: true,
});
debug!(
"Queued recovery for {}: {:?} -> {:?}",
component, previous, current
);
}
/// Check if all components have recovered (no components in alert state)
pub fn all_components_ok(&self) -> bool {
self.components_in_alert.is_empty()
}
/// Flush suppressed recovery notifications when all components are OK
pub fn flush_recoveries_if_all_ok(&mut self) {
if !self.config.recovery_requires_all_ok || self.all_components_ok() {
if !self.pending_recoveries.is_empty() {
info!("All components recovered, sending batch recovery notification");
self.pending_notifications.append(&mut self.pending_recoveries);
}
}
}
/// Check if it's time to flush aggregated notifications
pub fn should_flush(&self) -> bool {
if self.pending_notifications.is_empty() {
return false;
}
match self.last_aggregation_flush {
None => true, // First flush
Some(last_flush) => {
let aggregation_interval =
Duration::from_secs(self.config.aggregation_interval_seconds);
last_flush.elapsed() >= aggregation_interval
}
}
}
/// Flush pending notifications as a single aggregated email
pub async fn flush_notifications(&mut self) -> Result<()> {
if self.pending_notifications.is_empty() {
return Ok(());
}
if !self.config.enabled {
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
return Ok(());
}
if self.is_maintenance_mode() {
debug!("Maintenance mode active, suppressing email notification");
debug!("Maintenance mode active, suppressing aggregated notifications");
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
return Ok(());
}
let hostname = gethostname::gethostname()
.to_string_lossy()
.to_string();
let hostname = gethostname::gethostname().to_string_lossy().to_string();
// Build aggregated email
let notification_count = self.pending_notifications.len();
let alert_count = self.pending_notifications.iter().filter(|n| !n.is_recovery).count();
let recovery_count = self.pending_notifications.iter().filter(|n| n.is_recovery).count();
let subject = if notification_count == 1 {
let n = &self.pending_notifications[0];
if n.is_recovery {
format!("[{}] {} Recovered: {}", hostname, n.component, n.current_status)
} else {
format!("[{}] {} Alert: {}", hostname, n.component, n.current_status)
}
} else if recovery_count > 0 && alert_count == 0 {
format!("[{}] {} Components Recovered", hostname, recovery_count)
} else if alert_count > 0 && recovery_count == 0 {
format!("[{}] {} Status Alerts", hostname, alert_count)
} else {
format!("[{}] {} Alerts, {} Recoveries", hostname, alert_count, recovery_count)
};
let mut body = String::new();
body.push_str(&format!("Status notifications for host: {}\n", hostname));
body.push_str(&format!("Time: {}\n\n", Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
// Group alerts and recoveries
let alerts: Vec<_> = self.pending_notifications.iter().filter(|n| !n.is_recovery).collect();
let recoveries: Vec<_> = self.pending_notifications.iter().filter(|n| n.is_recovery).collect();
if !alerts.is_empty() {
body.push_str("=== ALERTS ===\n\n");
for notification in &alerts {
body.push_str(&format!(
"{} : {}{}\n {}\n ({})\n\n",
notification.component,
notification.previous_status,
notification.current_status,
notification.details,
notification.timestamp.format("%H:%M:%S UTC")
));
}
}
if !recoveries.is_empty() {
body.push_str("=== RECOVERIES ===\n\n");
for notification in &recoveries {
body.push_str(&format!(
"{} : {}{}\n {}\n ({})\n\n",
notification.component,
notification.previous_status,
notification.current_status,
notification.details,
notification.timestamp.format("%H:%M:%S UTC")
));
}
}
body.push_str("--\nCM Dashboard Agent");
// Send the aggregated email
let from_email = self.config.from_email.replace("{hostname}", &hostname);
let email_body = format!(
"{}\n\n--\nCM Dashboard Agent\nGenerated at {}",
body,
Utc::now().format("%Y-%m-%d %H:%M:%S %Z")
);
let email = Message::builder()
.from(from_email.parse()?)
.to(self.config.to_email.parse()?)
.subject(subject)
.body(email_body)?;
.subject(&subject)
.body(body)?;
let mailer = SmtpTransport::unencrypted_localhost();
let mailer = SmtpTransport::builder_dangerous(&self.config.smtp_host)
.port(self.config.smtp_port)
.build();
match mailer.send(&email) {
Ok(_) => info!("Direct email sent successfully: {}", subject),
Ok(_) => {
info!(
"Sent aggregated notification email with {} alerts",
notification_count
);
}
Err(e) => {
error!("Failed to send email: {}", e);
error!("Failed to send aggregated email: {}", e);
return Err(e.into());
}
}
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard"
version = "0.1.276"
version = "0.1.280"
edition = "2021"
[dependencies]

View File

@@ -377,17 +377,6 @@ impl Dashboard {
return Ok(());
}
// Check for tab clicks in right panel (hosts | services)
if matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
let services_end = self.services_area.x.saturating_add(self.services_area.width);
if y == self.services_area.y && x >= self.services_area.x && x < services_end {
// Click on top border of services area (where tabs are)
if let Some(ref mut tui_app) = self.tui_app {
tui_app.handle_tab_click(x, &self.services_area);
}
return Ok(());
}
}
// Determine which panel the mouse is over
let in_system_area = is_in_area(x, y, &self.system_area);
@@ -448,65 +437,43 @@ impl Dashboard {
}
if let Some(ref mut tui_app) = self.tui_app {
if tui_app.focus_hosts {
// Hosts tab is active - handle host click
// The services area includes a border and header, so account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
// Handle service click
// The services area includes a border, so we need to account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
let total_hosts = tui_app.get_available_hosts().len();
let clicked_index = tui_app.hosts_widget.y_to_host_index(relative_y);
if let Some(hostname) = tui_app.current_host.clone() {
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
// Account for scroll offset - the clicked line is relative to viewport
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
// Map display line to parent service index
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
// Set the selected index to the clicked parent service
host_widgets.services_widget.selected_index = parent_index;
if clicked_index < total_hosts {
match button {
MouseButton::Left => {
// Left click: set selector and switch to host immediately
tui_app.hosts_widget.set_selected_index(clicked_index, total_hosts);
let selected_host = tui_app.get_available_hosts()[clicked_index].clone();
tui_app.switch_to_host(&selected_host);
debug!("Clicked host at index {}: {}", clicked_index, selected_host);
// Left click just selects the service
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
}
MouseButton::Right => {
// Right click opens context menu
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
// Get the service name for the popup
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
tui_app.popup_menu = Some(crate::ui::PopupMenu {
service_name,
x,
y,
selected_index: 0,
});
}
}
_ => {}
}
}
} else {
// Services tab is active - handle service click
// The services area includes a border, so we need to account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
if let Some(hostname) = tui_app.current_host.clone() {
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
// Account for scroll offset - the clicked line is relative to viewport
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
// Map display line to parent service index
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
// Set the selected index to the clicked parent service
host_widgets.services_widget.selected_index = parent_index;
match button {
MouseButton::Left => {
// Left click just selects the service
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
}
MouseButton::Right => {
// Right click opens context menu
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
// Get the service name for the popup
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
tui_app.popup_menu = Some(crate::ui::PopupMenu {
service_name,
x,
y,
selected_index: 0,
});
}
}
_ => {}
}
}
}
}
}
}

View File

@@ -74,8 +74,6 @@ pub struct TuiApp {
localhost: String,
/// Active popup menu (if any)
pub popup_menu: Option<PopupMenu>,
/// Focus on hosts tab (false = Services, true = Hosts)
pub focus_hosts: bool,
/// Hosts widget for navigation and rendering
pub hosts_widget: HostsWidget,
}
@@ -92,7 +90,6 @@ impl TuiApp {
config,
localhost,
popup_menu: None,
focus_hosts: true, // Start with Hosts tab focused by default
hosts_widget: HostsWidget::new(),
};
@@ -358,46 +355,29 @@ impl TuiApp {
}
}
KeyCode::Tab => {
// Tab toggles between Services and Hosts tabs
self.focus_hosts = !self.focus_hosts;
// Tab cycles to next host
self.cycle_next_host();
}
KeyCode::BackTab => {
// Shift+Tab cycles to previous host
self.cycle_previous_host();
}
KeyCode::Up | KeyCode::Char('k') => {
if self.focus_hosts {
// Move blue selector bar up when in Hosts tab
self.hosts_widget.select_previous();
} else {
// Move service selection up when in Services tab
if let Some(hostname) = self.current_host.clone() {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_previous();
}
// Move service selection up
if let Some(hostname) = self.current_host.clone() {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_previous();
}
}
KeyCode::Down | KeyCode::Char('j') => {
if self.focus_hosts {
// Move blue selector bar down when in Hosts tab
let total_hosts = self.available_hosts.len();
self.hosts_widget.select_next(total_hosts);
} else {
// Move service selection down when in Services tab
if let Some(hostname) = self.current_host.clone() {
let total_services = {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.get_total_services_count()
};
// Move service selection down
if let Some(hostname) = self.current_host.clone() {
let total_services = {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_next(total_services);
}
}
}
KeyCode::Enter => {
if self.focus_hosts {
// Enter key switches to the selected host
let selected_idx = self.hosts_widget.get_selected_index();
if selected_idx < self.available_hosts.len() {
let selected_host = self.available_hosts[selected_idx].clone();
self.switch_to_host(&selected_host);
}
host_widgets.services_widget.get_total_services_count()
};
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_next(total_services);
}
}
_ => {}
@@ -424,29 +404,41 @@ impl TuiApp {
}
}
/// Handle mouse click on tab title area
pub fn handle_tab_click(&mut self, x: u16, area: &Rect) {
// Tab title format: "hosts | services"
// Calculate positions relative to area start
let title_start_x = area.x + 1; // +1 for left border
// "hosts | services"
// 0123456789...
let hosts_start = title_start_x;
let hosts_end = hosts_start + 5; // "hosts" is 5 chars
let services_start = hosts_end + 3; // After " | "
let services_end = services_start + 8; // "services" is 8 chars
if x >= hosts_start && x < hosts_end {
// Clicked on "hosts"
self.focus_hosts = true;
} else if x >= services_start && x < services_end {
// Clicked on "services"
self.focus_hosts = false;
/// Cycle to next host (TAB)
fn cycle_next_host(&mut self) {
if self.available_hosts.is_empty() {
return;
}
let current_idx = self.current_host
.as_ref()
.and_then(|h| self.available_hosts.iter().position(|x| x == h))
.unwrap_or(0);
let next_idx = (current_idx + 1) % self.available_hosts.len();
let next_host = self.available_hosts[next_idx].clone();
self.switch_to_host(&next_host);
}
/// Cycle to previous host (Shift+TAB)
fn cycle_previous_host(&mut self) {
if self.available_hosts.is_empty() {
return;
}
let current_idx = self.current_host
.as_ref()
.and_then(|h| self.available_hosts.iter().position(|x| x == h))
.unwrap_or(0);
let prev_idx = if current_idx == 0 {
self.available_hosts.len() - 1
} else {
current_idx - 1
};
let prev_host = self.available_hosts[prev_idx].clone();
self.switch_to_host(&prev_host);
}
@@ -461,11 +453,6 @@ impl TuiApp {
None
}
/// Get the list of available hosts
pub fn get_available_hosts(&self) -> &Vec<String> {
&self.available_hosts
}
/// Should quit application
pub fn should_quit(&self) -> bool {
self.should_quit
@@ -498,11 +485,11 @@ impl TuiApp {
])
.split(size);
// New layout: left panels | right services (100% height)
// New layout: left panels (hosts + system) | right services (100% height)
let content_chunks = ratatui::layout::Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(ThemeLayout::LEFT_PANEL_WIDTH), // Left side: system, backup
Constraint::Percentage(ThemeLayout::LEFT_PANEL_WIDTH), // Left side: hosts, system
Constraint::Percentage(ThemeLayout::RIGHT_PANEL_WIDTH), // Right side: services (100% height)
])
.split(main_chunks[1]); // main_chunks[1] is now the content area (between title and statusbar)
@@ -514,30 +501,33 @@ impl TuiApp {
true // No host selected is considered offline
};
// If host is offline, render wake-up message instead of panels
if current_host_offline {
self.render_offline_host_message(frame, main_chunks[1]);
self.render_btop_title(frame, main_chunks[0], metric_store);
self.render_statusbar(frame, main_chunks[2], metric_store);
return (main_chunks[0], Rect::default(), Rect::default()); // Return title area and empty areas when offline
}
// Calculate hosts panel height dynamically based on available width
let hosts_inner_width = content_chunks[0].width.saturating_sub(2);
let hosts_content_height = HostsWidget::required_height(self.available_hosts.len(), hosts_inner_width);
let hosts_height = hosts_content_height + 2; // Add borders
// Left side: system panel only (full height)
// Left side: hosts panel on top, system panel below
let left_chunks = ratatui::layout::Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Percentage(100)]) // System section takes full height
.constraints([
Constraint::Length(hosts_height), // Hosts panel (compact, dynamic)
Constraint::Min(0), // System panel (rest)
])
.split(content_chunks[0]);
// Render title bar
self.render_btop_title(frame, main_chunks[0], metric_store);
// Render system panel
let system_area = left_chunks[0];
// Render hosts panel on left
self.render_hosts_panel(frame, left_chunks[0], metric_store);
// Render system panel below hosts
let system_area = left_chunks[1];
self.render_system_panel(frame, system_area, metric_store);
// Render right panel with tabs (Services | Hosts)
// Render services panel on right
let services_area = content_chunks[1];
self.render_right_panel_with_tabs(frame, services_area, metric_store);
self.render_services_panel(frame, services_area);
// Render statusbar at the bottom
self.render_statusbar(frame, main_chunks[2], metric_store);
@@ -547,6 +537,11 @@ impl TuiApp {
self.render_popup_menu(frame, popup);
}
// Render offline host popup on top of everything
if current_host_offline {
self.render_offline_popup(frame, size);
}
// Return all areas for mouse event handling
(main_chunks[0], system_area, services_area)
}
@@ -607,14 +602,20 @@ impl TuiApp {
frame.render_widget(title, area);
}
/// Calculate overall status for a host based on its structured data
/// Calculate overall status for a host based on its widget statuses
fn calculate_host_status(&self, hostname: &str, metric_store: &MetricStore) -> Status {
// Check if we have structured data for this host
if let Some(_agent_data) = metric_store.get_agent_data(hostname) {
// Return OK since we have data
Status::Ok
// Check if we have data for this host
if metric_store.get_agent_data(hostname).is_none() {
return Status::Offline;
}
// Get actual statuses from host widgets
if let Some(host_widgets) = self.host_widgets.get(hostname) {
let system_status = host_widgets.system_widget.get_overall_status();
let services_status = host_widgets.services_widget.get_overall_status();
Status::aggregate(&[system_status, services_status])
} else {
Status::Offline
Status::Ok // No widgets yet, but data exists
}
}
@@ -692,7 +693,7 @@ impl TuiApp {
use ratatui::widgets::Paragraph;
// Get current host info
let (hostname_str, host_ip, build_version, agent_version) = if let Some(hostname) = &self.current_host {
let (hostname_str, host_ip, kernel_version, build_version, agent_version) = if let Some(hostname) = &self.current_host {
// Get the connection IP (the IP dashboard uses to connect to the agent)
let ip = if let Some(host_details) = self.config.hosts.get(hostname) {
host_details.get_connection_ip(hostname)
@@ -700,21 +701,22 @@ impl TuiApp {
hostname.clone()
};
// Get build and agent versions from system widget
let (build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
// Get kernel, build and agent versions from system widget
let (kernel, build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
let kernel = host_widgets.system_widget.get_kernel_version().unwrap_or("N/A".to_string());
let build = host_widgets.system_widget.get_build_version().unwrap_or("N/A".to_string());
let agent = host_widgets.system_widget.get_agent_version().unwrap_or("N/A".to_string());
(build, agent)
(kernel, build, agent)
} else {
("N/A".to_string(), "N/A".to_string())
("N/A".to_string(), "N/A".to_string(), "N/A".to_string())
};
(hostname.clone(), ip, build, agent)
(hostname.clone(), ip, kernel, build, agent)
} else {
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
};
let left_text = format!(" Host: {} | {}", hostname_str, host_ip);
let left_text = format!(" Host: {} | {} | {}", hostname_str, host_ip, kernel_version);
let right_text = format!("Build:{} | Agent:{} ", build_version, agent_version);
// Calculate spacing to push right text to the right
@@ -762,79 +764,64 @@ impl TuiApp {
}
/// Render right panel with tabs (hosts | services)
fn render_right_panel_with_tabs(&mut self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
use ratatui::style::Modifier;
use ratatui::text::{Line, Span};
/// Render hosts panel
fn render_hosts_panel(&mut self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
use ratatui::widgets::{Block, Borders};
// Build tab title with bold styling for active tab (like cm-player)
let hosts_style = if self.focus_hosts {
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::border_title())
};
let services_style = if !self.focus_hosts {
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::border_title())
};
let title = Line::from(vec![
Span::styled("hosts", hosts_style),
Span::raw(" | "),
Span::styled("services", services_style),
]);
// Create ONE block with tab title (like cm-player)
let main_block = Block::default()
let hosts_block = Block::default()
.borders(Borders::ALL)
.title(title.clone())
.style(Style::default().fg(Theme::border()).bg(Theme::background()));
.title("hosts")
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
.title_style(Style::default().fg(Theme::primary_text()));
let inner_area = main_block.inner(area);
frame.render_widget(main_block, area);
let hosts_inner = hosts_block.inner(area);
frame.render_widget(hosts_block, area);
// Render appropriate content based on active tab
if self.focus_hosts {
// Render hosts list (no additional borders)
let localhost = self.localhost.clone();
let current_host = self.current_host.as_deref();
self.hosts_widget.render(
frame,
inner_area,
&self.available_hosts,
&localhost,
current_host,
metric_store,
|hostname, store| {
// Inline calculate_host_status logic
if store.get_agent_data(hostname).is_some() {
Status::Ok
} else {
Status::Offline
}
},
true, // Always focused when visible
);
} else {
// Render services for current host (no additional borders - just content!)
if let Some(hostname) = self.current_host.clone() {
let is_focused = true;
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.render_content(frame, inner_area, is_focused);
}
let localhost = self.localhost.clone();
let current_host = self.current_host.as_deref();
self.hosts_widget.render(
frame,
hosts_inner,
&self.available_hosts,
&localhost,
current_host,
metric_store,
|hostname, store| {
if store.get_agent_data(hostname).is_some() {
Status::Ok
} else {
Status::Offline
}
},
false,
);
}
/// Render services panel
fn render_services_panel(&mut self, frame: &mut Frame, area: Rect) {
use ratatui::widgets::{Block, Borders};
let services_block = Block::default()
.borders(Borders::ALL)
.title("services")
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
.title_style(Style::default().fg(Theme::primary_text()));
let services_inner = services_block.inner(area);
frame.render_widget(services_block, area);
if let Some(hostname) = self.current_host.clone() {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.render_content(frame, services_inner, true);
}
}
/// Render offline host message with wake-up option
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
use ratatui::layout::Alignment;
/// Render offline host popup centered on screen
fn render_offline_popup(&self, frame: &mut Frame, screen: Rect) {
use ratatui::style::Modifier;
use ratatui::text::{Line, Span};
use ratatui::widgets::{Block, Borders, Paragraph};
use ratatui::widgets::{Block, Borders, Clear, Paragraph};
// Get hostname for message
let hostname = self.current_host.as_ref()
@@ -851,7 +838,7 @@ impl TuiApp {
let mut lines = vec![
Line::from(Span::styled(
format!("Host '{}' is offline", hostname),
Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD),
Style::default().fg(Theme::status_color(Status::Offline)).add_modifier(Modifier::BOLD),
)),
Line::from(""),
];
@@ -859,44 +846,38 @@ impl TuiApp {
if has_mac {
lines.push(Line::from(Span::styled(
"Press 'w' to wake up host",
Style::default().fg(Theme::primary_text()).add_modifier(Modifier::BOLD),
Style::default().fg(Theme::primary_text()),
)));
} else {
lines.push(Line::from(Span::styled(
"No MAC address configured - cannot wake up",
"No MAC address configured",
Style::default().fg(Theme::muted_text()),
)));
}
// Create centered message
// Calculate popup size and center it
let popup_width = 32u16;
let popup_height = 5u16;
let x = screen.width.saturating_sub(popup_width) / 2;
let y = screen.height.saturating_sub(popup_height) / 2;
let popup_area = Rect {
x,
y,
width: popup_width,
height: popup_height,
};
// Render popup with border
let message = Paragraph::new(lines)
.block(Block::default()
.borders(Borders::ALL)
.border_style(Style::default().fg(Theme::muted_text()))
.title(" Offline Host ")
.title_style(Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD)))
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()))
.alignment(Alignment::Center);
// Center the message in the available area
let popup_area = ratatui::layout::Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage(40),
Constraint::Length(6),
Constraint::Percentage(40),
])
.split(area)[1];
let popup_area = ratatui::layout::Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(25),
Constraint::Percentage(50),
Constraint::Percentage(25),
])
.split(popup_area)[1];
.border_style(Style::default().fg(Theme::status_color(Status::Offline)))
.title(" Offline ")
.title_style(Style::default().fg(Theme::status_color(Status::Offline)).add_modifier(Modifier::BOLD)))
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()));
frame.render_widget(Clear, popup_area);
frame.render_widget(message, popup_area);
}

View File

@@ -282,19 +282,14 @@ impl StatusIcons {
}
impl Components {
/// Standard widget block with title using bright foreground for title
/// Standard widget block with title using primary text color for title
pub fn widget_block(title: &str) -> Block<'_> {
Block::default()
.title(title)
.borders(Borders::ALL)
.style(Style::default().fg(Theme::border()).bg(Theme::background()))
.title_style(
Style::default()
.fg(Theme::border_title())
.bg(Theme::background()),
)
.title_style(Style::default().fg(Theme::primary_text()))
}
}
impl Typography {
@@ -307,10 +302,10 @@ impl Typography {
.add_modifier(Modifier::BOLD)
}
/// Secondary content text
/// Secondary content text (metrics without status)
pub fn secondary() -> Style {
Style::default()
.fg(Theme::secondary_text())
.fg(Theme::highlight())
.bg(Theme::background())
}

View File

@@ -2,7 +2,6 @@ use ratatui::{
layout::Rect,
style::{Modifier, Style},
text::{Line, Span},
widgets::{List, ListItem},
Frame,
};
@@ -30,68 +29,24 @@ impl HostsWidget {
}
}
/// Move selection up
pub fn select_previous(&mut self) {
if self.selected_index > 0 {
self.selected_index -= 1;
self.ensure_selected_visible();
}
}
/// Move selection down
pub fn select_next(&mut self, total_hosts: usize) {
if total_hosts > 0 && self.selected_index < total_hosts.saturating_sub(1) {
self.selected_index += 1;
self.ensure_selected_visible();
}
}
/// Ensure selected item is visible in viewport (auto-scroll)
fn ensure_selected_visible(&mut self) {
if self.last_viewport_height == 0 {
return; // Can't calculate without viewport height
}
let viewport_height = self.last_viewport_height;
// If selection is above viewport, scroll up to show it
if self.selected_index < self.scroll_offset {
self.scroll_offset = self.selected_index;
}
// If selection is below viewport, scroll down to show it
if self.selected_index >= self.scroll_offset + viewport_height {
self.scroll_offset = self.selected_index.saturating_sub(viewport_height.saturating_sub(1));
}
}
/// Scroll down manually
pub fn scroll_down(&mut self, total_hosts: usize) {
if self.last_viewport_height == 0 {
return;
}
let viewport_height = self.last_viewport_height;
let max_scroll = total_hosts.saturating_sub(viewport_height);
if self.scroll_offset < max_scroll {
self.scroll_offset += 1;
if self.selected_index < self.scroll_offset {
self.scroll_offset = self.selected_index;
}
if self.selected_index >= self.scroll_offset + viewport_height {
self.scroll_offset = self.selected_index.saturating_sub(viewport_height.saturating_sub(1));
}
}
/// Scroll up manually
pub fn scroll_up(&mut self) {
if self.scroll_offset > 0 {
self.scroll_offset -= 1;
}
}
/// Get the currently selected host index
pub fn get_selected_index(&self) -> usize {
self.selected_index
}
/// Set selected index (used when switching to host via mouse)
/// Set selected index (used when switching hosts via TAB)
pub fn set_selected_index(&mut self, index: usize, total_hosts: usize) {
if index < total_hosts {
self.selected_index = index;
@@ -99,12 +54,19 @@ impl HostsWidget {
}
}
/// Convert y coordinate to host index (accounting for scroll)
pub fn y_to_host_index(&self, relative_y: usize) -> usize {
self.scroll_offset + relative_y
/// Calculate the required height for hosts panel based on host count and available width
pub fn required_height(num_hosts: usize, available_width: u16) -> u16 {
if num_hosts == 0 {
return 1;
}
// Estimate column width: icon(2) + arrow(2) + max_hostname(~12) + padding(2) = ~18
let col_width = 18u16;
let num_columns = (available_width / col_width).max(1) as usize;
let rows_needed = (num_hosts + num_columns - 1) / num_columns;
rows_needed.max(1) as u16
}
/// Render hosts list with selector bar
/// Render hosts list in dynamic columns based on available width
pub fn render<F>(
&mut self,
frame: &mut Frame,
@@ -114,116 +76,98 @@ impl HostsWidget {
current_host: Option<&str>,
metric_store: &MetricStore,
mut calculate_host_status: F,
is_focused: bool,
_is_focused: bool,
) where F: FnMut(&str, &MetricStore) -> Status {
use crate::ui::theme::{StatusIcons, Typography};
use ratatui::widgets::Paragraph;
use crate::ui::theme::StatusIcons;
use ratatui::layout::{Constraint, Direction, Layout};
// Split area for header and list
let chunks = ratatui::layout::Layout::default()
.direction(ratatui::layout::Direction::Vertical)
.constraints([
ratatui::layout::Constraint::Length(1), // Header
ratatui::layout::Constraint::Min(0), // List
])
.split(area);
// Render header
let header = Paragraph::new("Hosts:").style(Typography::muted());
frame.render_widget(header, chunks[0]);
// Store viewport height for scroll calculations (minus header)
self.last_viewport_height = chunks[1].height as usize;
// Validate scroll offset
if self.scroll_offset >= available_hosts.len() && !available_hosts.is_empty() {
self.scroll_offset = available_hosts.len().saturating_sub(1);
if available_hosts.is_empty() {
return;
}
// Create list items for visible hosts
let items: Vec<ListItem> = available_hosts
.iter()
.enumerate()
.skip(self.scroll_offset)
.take(chunks[1].height as usize)
.map(|(idx, hostname)| {
let host_status = calculate_host_status(hostname, metric_store);
let status_icon = StatusIcons::get_icon(host_status);
let status_color = Theme::status_color(host_status);
// Store viewport height for scroll calculations
self.last_viewport_height = area.height as usize;
// Check if this is the selected host (for blue selector bar)
let is_selected = is_focused && idx == self.selected_index;
// Calculate column width and number of columns that fit
let col_width = 18u16;
let num_columns = (area.width / col_width).max(1) as usize;
let rows_per_column = (available_hosts.len() + num_columns - 1) / num_columns;
// Check if this is the current (active) host
let is_current = current_host == Some(hostname.as_str());
// Check if this is localhost
let is_localhost = hostname == localhost;
// Build the line with icon and hostname
let mut spans = vec![Span::styled(
format!("{} ", status_icon),
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(status_color)
},
)];
// Add arrow indicator if this is the current host (like cm-player)
if is_current {
spans.push(Span::styled(
"",
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD)
},
));
}
// Add hostname with appropriate styling
let hostname_text = if is_localhost {
format!("{} (localhost)", hostname)
} else {
hostname.clone()
};
spans.push(Span::styled(
hostname_text,
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else if is_current {
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::primary_text())
},
));
let line = Line::from(spans);
// Apply blue background to selected row
let base_style = if is_selected {
Style::default().bg(Theme::highlight()) // Blue background
} else {
Style::default().bg(Theme::background())
};
ListItem::new(line).style(base_style)
})
// Create column constraints
let constraints: Vec<Constraint> = (0..num_columns)
.map(|_| Constraint::Ratio(1, num_columns as u32))
.collect();
let hosts_list = List::new(items);
frame.render_widget(hosts_list, chunks[1]);
let columns = Layout::default()
.direction(Direction::Horizontal)
.constraints(constraints)
.split(area);
// Build host line helper
let mut build_host_line = |hostname: &str| -> Line {
let host_status = calculate_host_status(hostname, metric_store);
let status_icon = StatusIcons::get_icon(host_status);
let status_color = Theme::status_color(host_status);
let is_current = current_host == Some(hostname);
let is_localhost = hostname == localhost;
let mut spans = vec![Span::styled(
format!("{} ", status_icon),
Style::default().fg(status_color),
)];
if is_current {
spans.push(Span::styled(
"",
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD),
));
}
let hostname_display = if is_localhost {
format!("{}*", hostname)
} else {
hostname.to_string()
};
spans.push(Span::styled(
hostname_display,
if is_current {
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::primary_text())
},
));
Line::from(spans)
};
// Render each column
for col_idx in 0..num_columns {
let start = col_idx * rows_per_column;
let hosts_in_col: Vec<Line> = available_hosts
.iter()
.skip(start)
.take(rows_per_column)
.map(|hostname| build_host_line(hostname))
.collect();
if !hosts_in_col.is_empty() {
let text = ratatui::text::Text::from(hosts_in_col);
let para = ratatui::widgets::Paragraph::new(text);
frame.render_widget(para, columns[col_idx]);
}
}
// Update selected index to match current host
if let Some(current) = current_host {
if let Some(idx) = available_hosts.iter().position(|h| h == current) {
self.selected_index = idx;
}
}
}
}

View File

@@ -8,7 +8,7 @@ use ratatui::{
use std::collections::HashMap;
use tracing::debug;
use crate::ui::theme::{Components, StatusIcons, Theme, Typography};
use crate::ui::theme::{StatusIcons, Theme, Typography};
use ratatui::style::Style;
/// Column visibility configuration based on terminal width
@@ -120,6 +120,11 @@ impl ServicesWidget {
}
}
/// Get overall services status
pub fn get_overall_status(&self) -> Status {
self.status
}
/// Extract service name and determine if it's a parent or sub-service
#[allow(dead_code)]
fn extract_service_info(metric_name: &str) -> Option<(String, Option<String>)> {
@@ -150,9 +155,10 @@ impl ServicesWidget {
/// Format parent service line - returns text without icon for span formatting
fn format_parent_service_line(&self, name: &str, info: &ServiceInfo, columns: ColumnVisibility) -> String {
// Account for icon prefix "● " (2 chars) in name column width
let name_width = ColumnVisibility::NAME_WIDTH.saturating_sub(2) as usize;
// Truncate long service names to fit layout
// NAME_WIDTH - 3 chars for "..." = max displayable chars
let max_name_len = (ColumnVisibility::NAME_WIDTH - 3) as usize;
let max_name_len = name_width.saturating_sub(3); // -3 for "..."
let short_name = if name.len() > max_name_len {
format!("{}...", &name[..max_name_len.saturating_sub(3)])
} else {
@@ -208,7 +214,7 @@ impl ServicesWidget {
// Build format string based on column visibility
let mut parts = Vec::new();
if columns.show_name {
parts.push(format!("{:<width$}", short_name, width = ColumnVisibility::NAME_WIDTH as usize));
parts.push(format!("{:<width$}", short_name, width = name_width));
}
if columns.show_status {
parts.push(format!("{:<width$}", status_str, width = ColumnVisibility::STATUS_WIDTH as usize));
@@ -282,7 +288,7 @@ impl ServicesWidget {
let tree_symbol = if is_last { "└─" } else { "├─" };
if info.widget_status == Status::Info {
// Informational data - no status icon, show metrics if available
// Informational data - no status icon, use blue color
let mut spans = vec![
// Indentation and tree prefix
ratatui::text::Span::styled(
@@ -293,7 +299,7 @@ impl ServicesWidget {
ratatui::text::Span::styled(
short_name,
Style::default()
.fg(Theme::secondary_text())
.fg(Theme::highlight())
.bg(Theme::background()),
),
];
@@ -303,13 +309,14 @@ impl ServicesWidget {
spans.push(ratatui::text::Span::styled(
status_str,
Style::default()
.fg(Theme::secondary_text())
.fg(Theme::highlight())
.bg(Theme::background()),
));
}
spans
} else {
// Sub-services with status - use secondary_text
vec![
// Indentation and tree prefix
ratatui::text::Span::styled(
@@ -710,59 +717,7 @@ impl ServicesWidget {
}
impl ServicesWidget {
/// Render with focus
pub fn render(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
self.render_with_title(frame, area, is_focused, "services");
}
pub fn render_with_title(&mut self, frame: &mut Frame, area: Rect, is_focused: bool, title: &str) {
let services_block = Components::widget_block(title);
let inner_area = services_block.inner(area);
frame.render_widget(services_block, area);
let content_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Min(0)])
.split(inner_area);
// Determine which columns to show based on available width
let columns = ColumnVisibility::from_width(inner_area.width);
// Build header based on visible columns
let mut header_parts = Vec::new();
if columns.show_name {
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
}
if columns.show_status {
header_parts.push(format!("{:<width$}", "Status:", width = ColumnVisibility::STATUS_WIDTH as usize));
}
if columns.show_ram {
header_parts.push(format!("{:<width$}", "RAM:", width = ColumnVisibility::RAM_WIDTH as usize));
}
if columns.show_uptime {
header_parts.push(format!("{:<width$}", "Uptime:", width = ColumnVisibility::UPTIME_WIDTH as usize));
}
if columns.show_restarts {
header_parts.push(format!("{:<width$}", "↻:", width = ColumnVisibility::RESTARTS_WIDTH as usize));
}
let header = header_parts.join(" ");
let header_para = Paragraph::new(header).style(Typography::muted());
frame.render_widget(header_para, content_chunks[0]);
// Check if we have any services to display
if self.parent_services.is_empty() && self.sub_services.is_empty() {
let empty_text = Paragraph::new("No process data").style(Typography::muted());
frame.render_widget(empty_text, content_chunks[1]);
return;
}
// Render the services list
self.render_services(frame, content_chunks[1], is_focused, columns);
}
/// Render services content WITHOUT block (for tab mode like cm-player)
/// Render services content WITHOUT block (for use inside panel)
pub fn render_content(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
let content_chunks = Layout::default()
.direction(Direction::Vertical)
@@ -772,7 +727,7 @@ impl ServicesWidget {
// Determine which columns to show based on available width
let columns = ColumnVisibility::from_width(area.width);
// Build header based on visible columns
// Build header - columns must align with service row format
let mut header_parts = Vec::new();
if columns.show_name {
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));

View File

@@ -15,6 +15,7 @@ pub struct SystemWidget {
// NixOS information
nixos_build: Option<String>,
agent_hash: Option<String>,
kernel_version: Option<String>,
// Network interfaces
network_interfaces: Vec<cm_dashboard_shared::NetworkInterfaceData>,
@@ -94,6 +95,7 @@ impl SystemWidget {
Self {
nixos_build: None,
agent_hash: None,
kernel_version: None,
network_interfaces: Vec::new(),
cpu_load_1min: None,
cpu_load_5min: None,
@@ -171,6 +173,41 @@ impl SystemWidget {
pub fn get_agent_version(&self) -> Option<String> {
self.agent_hash.clone()
}
/// Get the kernel version
pub fn get_kernel_version(&self) -> Option<String> {
self.kernel_version.clone()
}
/// Get overall status by aggregating all component statuses
pub fn get_overall_status(&self) -> Status {
if !self.has_data {
return Status::Offline;
}
let mut statuses = vec![self.cpu_status, self.memory_status, self.backup_status];
// Add storage pool and drive statuses
for pool in &self.storage_pools {
statuses.push(pool.status);
for drive in &pool.drives {
statuses.push(drive.status);
}
for drive in &pool.data_drives {
statuses.push(drive.status);
}
for drive in &pool.parity_drives {
statuses.push(drive.status);
}
}
// Add backup repository statuses
for repo in &self.backup_repositories {
statuses.push(repo.status);
}
Status::aggregate(&statuses)
}
}
use super::Widget;
@@ -181,10 +218,13 @@ impl Widget for SystemWidget {
// Extract agent version
self.agent_hash = Some(agent_data.agent_version.clone());
// Extract build version
self.nixos_build = agent_data.build_version.clone();
// Extract kernel version
self.kernel_version = agent_data.kernel_version.clone();
// Extract network interfaces
self.network_interfaces = agent_data.system.network.interfaces.clone();

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-shared"
version = "0.1.275"
version = "0.1.280"
edition = "2021"
[dependencies]

View File

@@ -7,6 +7,8 @@ pub struct AgentData {
pub hostname: String,
pub agent_version: String,
pub build_version: Option<String>,
#[serde(default)]
pub kernel_version: Option<String>,
pub timestamp: u64,
pub system: SystemData,
pub services: Vec<ServiceData>,
@@ -203,6 +205,7 @@ impl AgentData {
hostname,
agent_version,
build_version: None,
kernel_version: None,
timestamp: chrono::Utc::now().timestamp() as u64,
system: SystemData {
network: NetworkData {