Compare commits

..

9 Commits

Author SHA1 Message Date
41a7ee660a Add kernel version to statusbar
All checks were successful
Build and Release / build-and-release (push) Successful in 1m37s
2025-12-15 16:26:54 +01:00
76931f0457 Fix notification system with proper rate limiting and aggregation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m51s
- Add rate limiting using rate_limit_minutes config (was ignored)
- Add aggregation using aggregation_interval_seconds config (was ignored)
- Use smtp_host and smtp_port from config (was hardcoded localhost:25)
- Add trigger_on_warnings and trigger_on_failures config options
- Add recovery_requires_all_ok and suppress_individual_recoveries
- Use check_interval_seconds from config (was hardcoded 30s)
- Expand status tracking to all components (drives, pools, services, backup)
- Move notification checks from every collection to dedicated interval
- Separate alert and recovery notifications with proper email formatting
- Only notify on failed services (Critical), not inactive
2025-12-15 13:44:06 +01:00
516d159d2f Reorganize dashboard UI with tabbed layout and improved status bars
All checks were successful
Build and Release / build-and-release (push) Successful in 1m36s
Add tabbed navigation in right panel with "hosts | services" tabs to
better utilize vertical space. Hosts tab displays all available hosts
with blue selector bar (j/k navigation, Enter to switch). Services tab
shows services for currently selected host.

Status bar improvements:
- Move dashboard IP to top-right status bar (non-bold)
- Restructure bottom status bar with right-aligned build/agent versions
- Fix overflow crashes using saturating_sub for small terminal windows

Additional changes:
- Add HostsWidget with scroll handling and mouse click support
- Bold styling for currently active host
- Create render_content() methods to avoid nested blocks in tabs
- Display SMB share read/write mode in share listings
2025-12-14 10:03:33 +01:00
1656f20e96 Fix NFS export parsing to handle both inline and continuation formats
All checks were successful
Build and Release / build-and-release (push) Successful in 1m20s
Support exportfs output where network info appears on same line as path
(e.g. /srv/media/tv 192.168.0.0/16(...)) in addition to continuation
line format. Ensures all NFS exports are detected correctly.
2025-12-11 11:10:59 +01:00
dcd350ec2c Add NFS export permissions and network display, fix SMB service detection
All checks were successful
Build and Release / build-and-release (push) Successful in 1m13s
Display NFS exports with ro/rw permissions and network ranges for better
visibility into share configuration. Support both smbd and samba-smbd
service names for SMB share detection across different distributions.
2025-12-11 10:59:00 +01:00
a34b095857 Simplify NFS export options display
All checks were successful
Build and Release / build-and-release (push) Successful in 1m14s
Filter NFS export options to show only key settings (rw/ro, sync/async)
instead of verbose option strings. Improves readability while maintaining
essential information about export configuration.
2025-12-11 10:26:27 +01:00
7362464b46 Deduplicate NFS exports and remove client info
All checks were successful
Build and Release / build-and-release (push) Successful in 1m48s
Fix NFS export display to show each export path only once instead of
once per client. Use HashMap to deduplicate by path and sort results
alphabetically. Remove IP addresses and client specifications from
display, showing only export paths with their options.

Prevents duplicate entries when a single export is shared with multiple
clients or networks.
2025-12-11 10:06:59 +01:00
c8b79576fa Add NFS/SMB share monitoring and increase disk timeouts
All checks were successful
Build and Release / build-and-release (push) Successful in 1m36s
Add sub-service display for NFS exports and SMB shares under their
respective services. NFS shows active exports from exportfs with
options. SMB shows configured shares from smb.conf with paths.

Increase disk operation timeouts to handle multiple drives:
- lsblk: 2s → 10s
- smartctl: 3s → 15s (critical for multi-drive systems)
- df: 2s → 10s

Prevents timeouts when querying SMART data from systems with multiple
drives (3+ data drives plus parity).
2025-12-11 09:30:06 +01:00
f53df5440b Remove 'Repo' prefix from backup header display
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Simplify backup section header by removing the 'Repo' prefix and
displaying only the timestamp with status icon. Repository details
are still shown as sub-items below the timestamp.
2025-12-09 20:32:05 +01:00
17 changed files with 1319 additions and 433 deletions

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "cm-dashboard"
version = "0.1.268"
version = "0.1.278"
dependencies = [
"anyhow",
"chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-agent"
version = "0.1.268"
version = "0.1.278"
dependencies = [
"anyhow",
"async-trait",
@@ -325,7 +325,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-shared"
version = "0.1.268"
version = "0.1.278"
dependencies = [
"chrono",
"serde",

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-agent"
version = "0.1.269"
version = "0.1.278"
edition = "2021"
[dependencies]

View File

@@ -1,5 +1,6 @@
use anyhow::Result;
use gethostname::gethostname;
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tokio::time::interval;
use tracing::{debug, error, info};
@@ -28,7 +29,6 @@ struct TimedCollector {
}
pub struct Agent {
hostname: String,
config: AgentConfig,
zmq_handler: ZmqHandler,
collectors: Vec<TimedCollector>,
@@ -38,12 +38,40 @@ pub struct Agent {
}
/// Track system component status for change detection
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Default)]
struct SystemStatus {
// CPU
cpu_load_status: cm_dashboard_shared::Status,
cpu_temperature_status: cm_dashboard_shared::Status,
// Memory
memory_usage_status: cm_dashboard_shared::Status,
// Add more as needed
// Storage - keyed by drive name or pool name
drive_statuses: HashMap<String, DriveStatus>,
pool_statuses: HashMap<String, PoolStatus>,
// Services - keyed by service name
service_statuses: HashMap<String, cm_dashboard_shared::Status>,
// Backup
backup_status: cm_dashboard_shared::Status,
}
#[derive(Debug, Clone, Default)]
struct DriveStatus {
temperature_status: cm_dashboard_shared::Status,
health_status: cm_dashboard_shared::Status,
filesystem_statuses: HashMap<String, cm_dashboard_shared::Status>,
}
#[derive(Debug, Clone, Default)]
struct PoolStatus {
health_status: cm_dashboard_shared::Status,
usage_status: cm_dashboard_shared::Status,
drive_statuses: HashMap<String, PoolDriveStatus>,
}
#[derive(Debug, Clone, Default)]
struct PoolDriveStatus {
health_status: cm_dashboard_shared::Status,
temperature_status: cm_dashboard_shared::Status,
}
impl Agent {
@@ -148,7 +176,6 @@ impl Agent {
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
Ok(Self {
hostname,
config,
zmq_handler,
collectors,
@@ -171,7 +198,9 @@ impl Agent {
let mut transmission_interval = interval(Duration::from_secs(
self.config.zmq.transmission_interval_seconds,
));
let mut notification_interval = interval(Duration::from_secs(30)); // Check notifications every 30s
let mut notification_interval = interval(Duration::from_secs(
self.config.notifications.check_interval_seconds,
));
// Skip initial ticks to avoid immediate execution
transmission_interval.tick().await;
@@ -185,9 +214,21 @@ impl Agent {
}
}
_ = notification_interval.tick() => {
// Process any pending notifications
// NOTE: With structured data, we might need to implement status tracking differently
// For now, we skip this until status evaluation is migrated
// Check for status changes and queue notifications
let agent_data_snapshot = self.cached_agent_data.clone();
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
error!("Failed to check status changes: {}", e);
}
// Check if all components recovered and flush pending recoveries
self.notification_manager.flush_recoveries_if_all_ok();
// Flush any pending aggregated notifications
if self.notification_manager.should_flush() {
if let Err(e) = self.notification_manager.flush_notifications().await {
error!("Failed to flush notifications: {}", e);
}
}
}
_ = &mut shutdown_rx => {
info!("Shutdown signal received, stopping agent loop");
@@ -235,16 +276,8 @@ impl Agent {
.unwrap()
.as_secs();
// Clone for notification check (to avoid borrow issues)
let agent_data_snapshot = self.cached_agent_data.clone();
// Check for status changes and send notifications
if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
error!("Failed to check status changes: {}", e);
}
// Broadcast the cached structured data via ZMQ
if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data_snapshot).await {
if let Err(e) = self.zmq_handler.publish_agent_data(&self.cached_agent_data).await {
error!("Failed to broadcast agent data: {}", e);
} else {
debug!("Successfully broadcast structured agent data");
@@ -253,38 +286,182 @@ impl Agent {
Ok(())
}
/// Check for status changes and send notifications
/// Check for status changes and queue notifications
async fn check_status_changes_and_notify(&mut self, agent_data: &AgentData) -> Result<()> {
// Extract current status
let current_status = SystemStatus {
cpu_load_status: agent_data.system.cpu.load_status.clone(),
cpu_temperature_status: agent_data.system.cpu.temperature_status.clone(),
memory_usage_status: agent_data.system.memory.usage_status.clone(),
// Build current status from agent data
let mut current_status = SystemStatus {
cpu_load_status: agent_data.system.cpu.load_status,
cpu_temperature_status: agent_data.system.cpu.temperature_status,
memory_usage_status: agent_data.system.memory.usage_status,
backup_status: agent_data.backup.backup_status,
..Default::default()
};
// Check for status changes
if let Some(previous) = self.previous_status.clone() {
self.check_and_notify_status_change(
// Collect drive statuses
for drive in &agent_data.system.storage.drives {
let mut fs_statuses = HashMap::new();
for fs in &drive.filesystems {
fs_statuses.insert(fs.mount.clone(), fs.usage_status);
}
current_status.drive_statuses.insert(
drive.name.clone(),
DriveStatus {
temperature_status: drive.temperature_status,
health_status: drive.health_status,
filesystem_statuses: fs_statuses,
},
);
}
// Collect pool statuses
for pool in &agent_data.system.storage.pools {
let mut pool_drive_statuses = HashMap::new();
for drive in pool.data_drives.iter().chain(pool.parity_drives.iter()) {
pool_drive_statuses.insert(
drive.name.clone(),
PoolDriveStatus {
health_status: drive.health_status,
temperature_status: drive.temperature_status,
},
);
}
current_status.pool_statuses.insert(
pool.name.clone(),
PoolStatus {
health_status: pool.health_status,
usage_status: pool.usage_status,
drive_statuses: pool_drive_statuses,
},
);
}
// Collect service statuses (only for non-user-stopped services)
for service in &agent_data.services {
if !service.user_stopped {
current_status
.service_statuses
.insert(service.name.clone(), service.service_status);
}
}
// Clone previous status to avoid borrow issues
let previous = self.previous_status.clone();
// Compare with previous status and queue notifications
if let Some(previous) = previous {
// CPU
self.queue_status_notification(
"CPU Load",
&previous.cpu_load_status,
&current_status.cpu_load_status,
format!("CPU load: {:.1}", agent_data.system.cpu.load_1min)
).await?;
self.check_and_notify_status_change(
"CPU Temperature",
&format!("Load: {:.2}", agent_data.system.cpu.load_1min),
);
self.queue_status_notification(
"CPU Temperature",
&previous.cpu_temperature_status,
&current_status.cpu_temperature_status,
format!("CPU temperature: {}°C",
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32)
).await?;
&format!(
"Temperature: {}°C",
agent_data.system.cpu.temperature_celsius.unwrap_or(0.0) as i32
),
);
self.check_and_notify_status_change(
"Memory Usage",
&previous.memory_usage_status,
// Memory
self.queue_status_notification(
"Memory",
&previous.memory_usage_status,
&current_status.memory_usage_status,
format!("Memory usage: {:.1}%", agent_data.system.memory.usage_percent)
).await?;
&format!("Usage: {:.1}%", agent_data.system.memory.usage_percent),
);
// Backup
self.queue_status_notification(
"Backup",
&previous.backup_status,
&current_status.backup_status,
&format!(
"Last backup: {}",
agent_data.backup.last_backup_time.as_deref().unwrap_or("unknown")
),
);
// Drives
for (name, current_drive) in &current_status.drive_statuses {
if let Some(prev_drive) = previous.drive_statuses.get(name) {
self.queue_status_notification(
&format!("Drive {} Health", name),
&prev_drive.health_status,
&current_drive.health_status,
"Health check failed",
);
self.queue_status_notification(
&format!("Drive {} Temperature", name),
&prev_drive.temperature_status,
&current_drive.temperature_status,
"Temperature threshold exceeded",
);
// Filesystem usage
for (mount, current_fs_status) in &current_drive.filesystem_statuses {
if let Some(prev_fs_status) = prev_drive.filesystem_statuses.get(mount) {
self.queue_status_notification(
&format!("Filesystem {}", mount),
prev_fs_status,
current_fs_status,
"Disk usage threshold exceeded",
);
}
}
}
}
// Pools
for (name, current_pool) in &current_status.pool_statuses {
if let Some(prev_pool) = previous.pool_statuses.get(name) {
self.queue_status_notification(
&format!("Pool {} Health", name),
&prev_pool.health_status,
&current_pool.health_status,
"Pool health degraded",
);
self.queue_status_notification(
&format!("Pool {} Usage", name),
&prev_pool.usage_status,
&current_pool.usage_status,
"Pool usage threshold exceeded",
);
// Pool drives
for (drive_name, current_pd) in &current_pool.drive_statuses {
if let Some(prev_pd) = prev_pool.drive_statuses.get(drive_name) {
self.queue_status_notification(
&format!("Pool {} Drive {} Health", name, drive_name),
&prev_pd.health_status,
&current_pd.health_status,
"Pool drive health degraded",
);
self.queue_status_notification(
&format!("Pool {} Drive {} Temperature", name, drive_name),
&prev_pd.temperature_status,
&current_pd.temperature_status,
"Pool drive temperature exceeded",
);
}
}
}
}
// Services
for (name, current_svc_status) in &current_status.service_statuses {
if let Some(prev_svc_status) = previous.service_statuses.get(name) {
self.queue_status_notification(
&format!("Service {}", name),
prev_svc_status,
current_svc_status,
"Service status changed",
);
}
}
}
// Store current status for next comparison
@@ -292,43 +469,44 @@ impl Agent {
Ok(())
}
/// Check individual status change and send notification if degraded
async fn check_and_notify_status_change(
/// Queue a notification based on status change
fn queue_status_notification(
&mut self,
component: &str,
previous: &cm_dashboard_shared::Status,
current: &cm_dashboard_shared::Status,
details: String
) -> Result<()> {
details: &str,
) {
use cm_dashboard_shared::Status;
// Only notify on status degradation (OK → Warning/Critical, Warning → Critical)
let should_notify = match (previous, current) {
(Status::Ok, Status::Warning) => true,
(Status::Ok, Status::Critical) => true,
(Status::Warning, Status::Critical) => true,
_ => false,
};
// Check for degradation (alert)
let is_alert = matches!(
(previous, current),
(Status::Ok, Status::Warning)
| (Status::Ok, Status::Critical)
| (Status::Warning, Status::Critical)
);
if should_notify {
let subject = format!("{} {} Alert", self.hostname, component);
let body = format!(
"Alert: {} status changed from {:?} to {:?}\n\nDetails: {}\n\nTime: {}",
component,
previous,
current,
details,
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")
// Check for recovery
let is_recovery = matches!(
(previous, current),
(Status::Warning, Status::Ok)
| (Status::Critical, Status::Ok)
| (Status::Critical, Status::Warning)
);
if is_alert {
info!(
"Alert: {} - {:?} → {:?}",
component, previous, current
);
info!("Sending notification: {} - {:?} → {:?}", component, previous, current);
if let Err(e) = self.notification_manager.send_direct_email(&subject, &body).await {
error!("Failed to send notification for {}: {}", component, e);
}
self.notification_manager.queue_alert(component, previous, current, details);
} else if is_recovery {
info!(
"Recovery: {} - {:?} → {:?}",
component, previous, current
);
self.notification_manager.queue_recovery(component, previous, current, details);
}
Ok(())
}
}

View File

@@ -114,7 +114,7 @@ impl DiskCollector {
let mut cmd = TokioCommand::new("lsblk");
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
let output = run_command_with_timeout(cmd, 2).await
let output = run_command_with_timeout(cmd, 10).await
.map_err(|e| CollectorError::SystemRead {
path: "block devices".to_string(),
error: e.to_string(),
@@ -184,7 +184,7 @@ impl DiskCollector {
/// Get filesystem info for a single mount point
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
let output = StdCommand::new("timeout")
.args(&["2", "df", "--block-size=1", mount_point])
.args(&["10", "df", "--block-size=1", mount_point])
.output()
.map_err(|e| CollectorError::SystemRead {
path: format!("df {}", mount_point),
@@ -433,7 +433,7 @@ impl DiskCollector {
cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
}
let output = run_command_with_timeout(cmd, 3).await
let output = run_command_with_timeout(cmd, 15).await
.map_err(|e| CollectorError::SystemRead {
path: format!("SMART data for {}", drive_name),
error: e.to_string(),
@@ -772,7 +772,7 @@ impl DiskCollector {
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
// Use lsblk to find the backing device with timeout
let output = StdCommand::new("timeout")
.args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
.args(&["10", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
.output()
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;

View File

@@ -32,6 +32,9 @@ impl NixOSCollector {
// Set NixOS build/generation information
agent_data.build_version = self.get_nixos_generation().await;
// Set kernel version
agent_data.kernel_version = self.get_kernel_version().await;
// Set current timestamp
agent_data.timestamp = chrono::Utc::now().timestamp() as u64;
@@ -80,6 +83,14 @@ impl NixOSCollector {
std::env::var("CM_DASHBOARD_VERSION").unwrap_or_else(|_| "unknown".to_string())
}
/// Get kernel version from /proc/sys/kernel/osrelease
async fn get_kernel_version(&self) -> Option<String> {
match fs::read_to_string("/proc/sys/kernel/osrelease") {
Ok(version) => Some(version.trim().to_string()),
Err(_) => None,
}
}
/// Get NixOS system generation (build) information from git commit
async fn get_nixos_generation(&self) -> Option<String> {
// Try to read git commit hash from file written during rebuild

View File

@@ -230,6 +230,37 @@ impl SystemdCollector {
}
}
if service_name == "nfs-server" && status_info.active_state == "active" {
// Add NFS exports as sub-services
let exports = self.get_nfs_exports();
for (export_path, info) in exports {
let display = if !info.is_empty() {
format!("{} {}", export_path, info)
} else {
export_path
};
sub_services.push(SubServiceData {
name: display,
service_status: Status::Info,
metrics: Vec::new(),
service_type: "nfs_export".to_string(),
});
}
}
if (service_name == "smbd" || service_name == "samba-smbd") && status_info.active_state == "active" {
// Add SMB shares as sub-services
let shares = self.get_smb_shares();
for (share_name, share_path, mode) in shares {
sub_services.push(SubServiceData {
name: format!("{}: {} {}", share_name, share_path, mode),
service_status: Status::Info,
metrics: Vec::new(),
service_type: "smb_share".to_string(),
});
}
}
// Create complete service data
let service_data = ServiceData {
name: service_name.clone(),
@@ -922,15 +953,21 @@ impl SystemdCollector {
"-s",
"--max-time",
"4",
"https://ifconfig.me"
"https://1.1.1.1/cdn-cgi/trace"
])
.output()
.ok()?;
if output.status.success() {
let ip = String::from_utf8_lossy(&output.stdout).trim().to_string();
if !ip.is_empty() && ip.contains('.') {
return Some(ip);
let response = String::from_utf8_lossy(&output.stdout);
// Parse "ip=x.x.x.x" from the response
for line in response.lines() {
if let Some(ip) = line.strip_prefix("ip=") {
let ip = ip.trim().to_string();
if !ip.is_empty() {
return Some(ip);
}
}
}
}
@@ -1011,6 +1048,164 @@ impl SystemdCollector {
}
}
/// Get NFS exports from exportfs
/// Returns a list of (export_path, info_string) tuples
fn get_nfs_exports(&self) -> Vec<(String, String)> {
let output = match Command::new("timeout")
.args(["2", "exportfs", "-v"])
.output()
{
Ok(output) if output.status.success() => output,
_ => return Vec::new(),
};
let exports_output = String::from_utf8_lossy(&output.stdout);
let mut exports_map: std::collections::HashMap<String, Vec<(String, String)>> =
std::collections::HashMap::new();
let mut current_path: Option<String> = None;
for line in exports_output.lines() {
let trimmed = line.trim();
if trimmed.is_empty() || trimmed.starts_with('#') {
continue;
}
if trimmed.starts_with('/') {
// Export path line - may have network on same line or continuation
let parts: Vec<&str> = trimmed.splitn(2, char::is_whitespace).collect();
let path = parts[0].to_string();
current_path = Some(path.clone());
// Check if network info is on the same line
if parts.len() > 1 {
let rest = parts[1].trim();
if let Some(paren_pos) = rest.find('(') {
let network = rest[..paren_pos].trim();
if let Some(end_paren) = rest.find(')') {
let options = &rest[paren_pos+1..end_paren];
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
"rw"
} else {
"ro"
};
exports_map.entry(path)
.or_insert_with(Vec::new)
.push((network.to_string(), mode.to_string()));
}
}
}
} else if let Some(ref path) = current_path {
// Continuation line with network and options
if let Some(paren_pos) = trimmed.find('(') {
let network = trimmed[..paren_pos].trim();
if let Some(end_paren) = trimmed.find(')') {
let options = &trimmed[paren_pos+1..end_paren];
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
"rw"
} else {
"ro"
};
exports_map.entry(path.clone())
.or_insert_with(Vec::new)
.push((network.to_string(), mode.to_string()));
}
}
}
}
// Build display strings: "path: mode [networks]"
let mut exports: Vec<(String, String)> = exports_map
.into_iter()
.map(|(path, mut entries)| {
if entries.is_empty() {
return (path, String::new());
}
let mode = entries[0].1.clone();
let networks: Vec<String> = entries.drain(..).map(|(n, _)| n).collect();
let info = format!("{} [{}]", mode, networks.join(", "));
(path, info)
})
.collect();
exports.sort_by(|a, b| a.0.cmp(&b.0));
exports
}
/// Get SMB shares from smb.conf
/// Returns a list of (share_name, share_path, mode) tuples
fn get_smb_shares(&self) -> Vec<(String, String, String)> {
match std::fs::read_to_string("/etc/samba/smb.conf") {
Ok(config) => {
let mut shares = Vec::new();
let mut current_share: Option<String> = None;
let mut current_path: Option<String> = None;
let mut current_mode: String = "ro".to_string(); // Default to read-only
for line in config.lines() {
let line = line.trim();
// Skip comments and empty lines
if line.is_empty() || line.starts_with('#') || line.starts_with(';') {
continue;
}
// Detect share section [sharename]
if line.starts_with('[') && line.ends_with(']') {
// Save previous share if we have both name and path
if let (Some(name), Some(path)) = (current_share.take(), current_path.take()) {
// Skip special sections
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path, current_mode.clone()));
}
}
// Start new share
let share_name = line[1..line.len()-1].trim().to_string();
current_share = Some(share_name);
current_path = None;
current_mode = "ro".to_string(); // Reset to default
}
// Look for path = /some/path
else if line.starts_with("path") && line.contains('=') {
if let Some(path_value) = line.split('=').nth(1) {
current_path = Some(path_value.trim().to_string());
}
}
// Look for read only = yes/no
else if line.to_lowercase().starts_with("read only") && line.contains('=') {
if let Some(value) = line.split('=').nth(1) {
let val = value.trim().to_lowercase();
current_mode = if val == "no" || val == "false" { "rw" } else { "ro" }.to_string();
}
}
// Look for writable = yes/no (opposite of read only)
else if line.to_lowercase().starts_with("writable") && line.contains('=') {
if let Some(value) = line.split('=').nth(1) {
let val = value.trim().to_lowercase();
current_mode = if val == "yes" || val == "true" { "rw" } else { "ro" }.to_string();
}
}
}
// Don't forget the last share
if let (Some(name), Some(path)) = (current_share, current_path) {
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path, current_mode));
}
}
shares
}
_ => Vec::new(),
}
}
/// Get nftables open ports grouped by protocol
/// Returns: (tcp_ports_string, udp_ports_string)
fn get_nftables_open_ports(&self) -> (String, String) {

View File

@@ -141,8 +141,23 @@ pub struct NotificationConfig {
pub from_email: String,
pub to_email: String,
pub rate_limit_minutes: u64,
/// Whether to send notifications on warning status
#[serde(default = "default_true")]
pub trigger_on_warnings: bool,
/// Whether to send notifications on failure/critical status
#[serde(default = "default_true")]
pub trigger_on_failures: bool,
/// Only send recovery notification when all components are OK
#[serde(default)]
pub recovery_requires_all_ok: bool,
/// Suppress individual recovery notifications (only notify on full recovery)
#[serde(default)]
pub suppress_individual_recoveries: bool,
/// Email notification batching interval in seconds (default: 60)
pub aggregation_interval_seconds: u64,
/// How often to check for status changes in seconds (default: 30)
#[serde(default = "default_check_interval_seconds")]
pub check_interval_seconds: u64,
/// List of metric names to exclude from email notifications
#[serde(default)]
pub exclude_email_metrics: Vec<String>,
@@ -151,6 +166,14 @@ pub struct NotificationConfig {
pub maintenance_mode_file: String,
}
fn default_true() -> bool {
true
}
fn default_check_interval_seconds() -> u64 {
30
}
fn default_heartbeat_interval_seconds() -> u64 {
5

View File

@@ -1,60 +1,314 @@
use crate::config::NotificationConfig;
use anyhow::Result;
use chrono::Utc;
use cm_dashboard_shared::Status;
use lettre::transport::smtp::SmtpTransport;
use lettre::{Message, Transport};
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tracing::{debug, error, info};
/// Manages notifications
/// Manages notifications with rate limiting and aggregation
pub struct NotificationManager {
config: NotificationConfig,
/// Last notification time per component for rate limiting
last_notification: HashMap<String, Instant>,
/// Pending notifications for aggregation
pending_notifications: Vec<PendingNotification>,
/// Pending recovery notifications (held until all OK if configured)
pending_recoveries: Vec<PendingNotification>,
/// Last aggregation flush time
last_aggregation_flush: Option<Instant>,
/// Track components currently in alert state
components_in_alert: HashMap<String, Status>,
}
/// A pending notification waiting to be aggregated
#[derive(Debug, Clone)]
struct PendingNotification {
component: String,
previous_status: String,
current_status: String,
details: String,
timestamp: chrono::DateTime<Utc>,
is_recovery: bool,
}
impl NotificationManager {
pub fn new(config: &NotificationConfig, _hostname: &str) -> Result<Self> {
Ok(Self {
config: config.clone(),
last_notification: HashMap::new(),
pending_notifications: Vec::new(),
pending_recoveries: Vec::new(),
last_aggregation_flush: None,
components_in_alert: HashMap::new(),
})
}
pub async fn send_direct_email(&mut self, subject: &str, body: &str) -> Result<()> {
/// Check if a component is rate limited
fn is_rate_limited(&self, component: &str) -> bool {
if self.config.rate_limit_minutes == 0 {
return false;
}
if let Some(last_time) = self.last_notification.get(component) {
let rate_limit = Duration::from_secs(self.config.rate_limit_minutes * 60);
last_time.elapsed() < rate_limit
} else {
false
}
}
/// Queue a degradation notification (Ok→Warning, Ok→Critical, Warning→Critical)
pub fn queue_alert(
&mut self,
component: &str,
previous: &Status,
current: &Status,
details: &str,
) {
// Check if this status type should trigger notifications
// Only Warning and Critical trigger notifications (not Inactive)
let should_notify = match current {
Status::Warning => self.config.trigger_on_warnings,
Status::Critical => self.config.trigger_on_failures,
_ => false,
};
if !should_notify {
debug!(
"Notification for {} suppressed (trigger_on_warnings={}, trigger_on_failures={})",
component, self.config.trigger_on_warnings, self.config.trigger_on_failures
);
return;
}
// Check rate limit
if self.is_rate_limited(component) {
debug!(
"Notification for {} rate limited (limit: {} min)",
component, self.config.rate_limit_minutes
);
return;
}
// Check exclusions
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
debug!("Notification for {} excluded by config", component);
return;
}
// Track this component as in alert state
self.components_in_alert.insert(component.to_string(), *current);
self.pending_notifications.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: false,
});
// Update rate limit tracker
self.last_notification.insert(component.to_string(), Instant::now());
debug!(
"Queued alert for {}: {:?} -> {:?}",
component, previous, current
);
}
/// Queue a recovery notification (Warning→Ok, Critical→Ok, Critical→Warning)
pub fn queue_recovery(
&mut self,
component: &str,
previous: &Status,
current: &Status,
details: &str,
) {
// Remove from alert tracking
self.components_in_alert.remove(component);
// Check if individual recoveries are suppressed
if self.config.suppress_individual_recoveries {
debug!(
"Individual recovery for {} suppressed by config",
component
);
// Store recovery for potential batch notification
self.pending_recoveries.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: true,
});
return;
}
// Check exclusions
if self.config.exclude_email_metrics.iter().any(|e| component.contains(e)) {
debug!("Recovery notification for {} excluded by config", component);
return;
}
self.pending_notifications.push(PendingNotification {
component: component.to_string(),
previous_status: format!("{:?}", previous),
current_status: format!("{:?}", current),
details: details.to_string(),
timestamp: Utc::now(),
is_recovery: true,
});
debug!(
"Queued recovery for {}: {:?} -> {:?}",
component, previous, current
);
}
/// Check if all components have recovered (no components in alert state)
pub fn all_components_ok(&self) -> bool {
self.components_in_alert.is_empty()
}
/// Flush suppressed recovery notifications when all components are OK
pub fn flush_recoveries_if_all_ok(&mut self) {
if !self.config.recovery_requires_all_ok || self.all_components_ok() {
if !self.pending_recoveries.is_empty() {
info!("All components recovered, sending batch recovery notification");
self.pending_notifications.append(&mut self.pending_recoveries);
}
}
}
/// Check if it's time to flush aggregated notifications
pub fn should_flush(&self) -> bool {
if self.pending_notifications.is_empty() {
return false;
}
match self.last_aggregation_flush {
None => true, // First flush
Some(last_flush) => {
let aggregation_interval =
Duration::from_secs(self.config.aggregation_interval_seconds);
last_flush.elapsed() >= aggregation_interval
}
}
}
/// Flush pending notifications as a single aggregated email
pub async fn flush_notifications(&mut self) -> Result<()> {
if self.pending_notifications.is_empty() {
return Ok(());
}
if !self.config.enabled {
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
return Ok(());
}
if self.is_maintenance_mode() {
debug!("Maintenance mode active, suppressing email notification");
debug!("Maintenance mode active, suppressing aggregated notifications");
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
return Ok(());
}
let hostname = gethostname::gethostname()
.to_string_lossy()
.to_string();
let hostname = gethostname::gethostname().to_string_lossy().to_string();
// Build aggregated email
let notification_count = self.pending_notifications.len();
let alert_count = self.pending_notifications.iter().filter(|n| !n.is_recovery).count();
let recovery_count = self.pending_notifications.iter().filter(|n| n.is_recovery).count();
let subject = if notification_count == 1 {
let n = &self.pending_notifications[0];
if n.is_recovery {
format!("[{}] {} Recovered: {}", hostname, n.component, n.current_status)
} else {
format!("[{}] {} Alert: {}", hostname, n.component, n.current_status)
}
} else if recovery_count > 0 && alert_count == 0 {
format!("[{}] {} Components Recovered", hostname, recovery_count)
} else if alert_count > 0 && recovery_count == 0 {
format!("[{}] {} Status Alerts", hostname, alert_count)
} else {
format!("[{}] {} Alerts, {} Recoveries", hostname, alert_count, recovery_count)
};
let mut body = String::new();
body.push_str(&format!("Status notifications for host: {}\n", hostname));
body.push_str(&format!("Time: {}\n\n", Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
// Group alerts and recoveries
let alerts: Vec<_> = self.pending_notifications.iter().filter(|n| !n.is_recovery).collect();
let recoveries: Vec<_> = self.pending_notifications.iter().filter(|n| n.is_recovery).collect();
if !alerts.is_empty() {
body.push_str("=== ALERTS ===\n\n");
for notification in &alerts {
body.push_str(&format!(
"{} : {}{}\n {}\n ({})\n\n",
notification.component,
notification.previous_status,
notification.current_status,
notification.details,
notification.timestamp.format("%H:%M:%S UTC")
));
}
}
if !recoveries.is_empty() {
body.push_str("=== RECOVERIES ===\n\n");
for notification in &recoveries {
body.push_str(&format!(
"{} : {}{}\n {}\n ({})\n\n",
notification.component,
notification.previous_status,
notification.current_status,
notification.details,
notification.timestamp.format("%H:%M:%S UTC")
));
}
}
body.push_str("--\nCM Dashboard Agent");
// Send the aggregated email
let from_email = self.config.from_email.replace("{hostname}", &hostname);
let email_body = format!(
"{}\n\n--\nCM Dashboard Agent\nGenerated at {}",
body,
Utc::now().format("%Y-%m-%d %H:%M:%S %Z")
);
let email = Message::builder()
.from(from_email.parse()?)
.to(self.config.to_email.parse()?)
.subject(subject)
.body(email_body)?;
.subject(&subject)
.body(body)?;
let mailer = SmtpTransport::unencrypted_localhost();
let mailer = SmtpTransport::builder_dangerous(&self.config.smtp_host)
.port(self.config.smtp_port)
.build();
match mailer.send(&email) {
Ok(_) => info!("Direct email sent successfully: {}", subject),
Ok(_) => {
info!(
"Sent aggregated notification email with {} alerts",
notification_count
);
}
Err(e) => {
error!("Failed to send email: {}", e);
error!("Failed to send aggregated email: {}", e);
return Err(e.into());
}
}
self.pending_notifications.clear();
self.last_aggregation_flush = Some(Instant::now());
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard"
version = "0.1.269"
version = "0.1.278"
edition = "2021"
[dependencies]

View File

@@ -22,7 +22,6 @@ pub struct Dashboard {
headless: bool,
initial_commands_sent: std::collections::HashSet<String>,
config: DashboardConfig,
title_area: Rect, // Store title area for mouse event handling
system_area: Rect, // Store system area for mouse event handling
services_area: Rect, // Store services area for mouse event handling
}
@@ -124,7 +123,6 @@ impl Dashboard {
headless,
initial_commands_sent: std::collections::HashSet::new(),
config,
title_area: Rect::default(),
system_area: Rect::default(),
services_area: Rect::default(),
})
@@ -272,22 +270,9 @@ impl Dashboard {
warn!("Error autoresizing terminal: {}", e);
}
// Check minimum terminal size to prevent panics
let size = terminal.size().unwrap_or_default();
if size.width < 90 || size.height < 15 {
// Terminal too small, show error message
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
let _ = terminal.draw(|frame| {
use ratatui::widgets::{Paragraph, Block, Borders};
use ratatui::layout::Alignment;
let msg = Paragraph::new(msg_text.clone())
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
frame.render_widget(msg, frame.size());
});
} else if let Err(e) = terminal.draw(|frame| {
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
self.title_area = title_area;
// Render TUI regardless of terminal size
if let Err(e) = terminal.draw(|frame| {
let (_title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
self.system_area = system_area;
self.services_area = services_area;
}) {
@@ -392,19 +377,13 @@ impl Dashboard {
return Ok(());
}
// Check for title bar clicks (host selection)
// Check for tab clicks in right panel (hosts | services)
if matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
if is_in_area(x, y, &self.title_area) {
// Click in title bar - check if it's on a hostname
// The title bar has "cm-dashboard vX.X.X" on the left (22 chars)
// Then hostnames start at position 22
if x >= 22 {
let hostname = self.find_hostname_at_position(x);
if let Some(host) = hostname {
if let Some(ref mut tui_app) = self.tui_app {
tui_app.switch_to_host(&host);
}
}
let services_end = self.services_area.x.saturating_add(self.services_area.width);
if y == self.services_area.y && x >= self.services_area.x && x < services_end {
// Click on top border of services area (where tabs are)
if let Some(ref mut tui_app) = self.tui_app {
tui_app.handle_tab_click(x, &self.services_area);
}
return Ok(());
}
@@ -468,44 +447,66 @@ impl Dashboard {
return Ok(());
}
// Calculate which service was clicked
// The services area includes a border, so we need to account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
if let Some(ref mut tui_app) = self.tui_app {
if let Some(hostname) = tui_app.current_host.clone() {
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
if tui_app.focus_hosts {
// Hosts tab is active - handle host click
// The services area includes a border and header, so account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
// Account for scroll offset - the clicked line is relative to viewport
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
// Map display line to parent service index
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
// Set the selected index to the clicked parent service
host_widgets.services_widget.selected_index = parent_index;
let total_hosts = tui_app.get_available_hosts().len();
let clicked_index = tui_app.hosts_widget.y_to_host_index(relative_y);
if clicked_index < total_hosts {
match button {
MouseButton::Left => {
// Left click just selects the service
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
}
MouseButton::Right => {
// Right click opens context menu
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
// Get the service name for the popup
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
tui_app.popup_menu = Some(crate::ui::PopupMenu {
service_name,
x,
y,
selected_index: 0,
});
}
// Left click: set selector and switch to host immediately
tui_app.hosts_widget.set_selected_index(clicked_index, total_hosts);
let selected_host = tui_app.get_available_hosts()[clicked_index].clone();
tui_app.switch_to_host(&selected_host);
debug!("Clicked host at index {}: {}", clicked_index, selected_host);
}
_ => {}
}
}
} else {
// Services tab is active - handle service click
// The services area includes a border, so we need to account for that
let relative_y = y.saturating_sub(self.services_area.y + 2) as usize; // +2 for border and header
if let Some(hostname) = tui_app.current_host.clone() {
let host_widgets = tui_app.get_or_create_host_widgets(&hostname);
// Account for scroll offset - the clicked line is relative to viewport
let display_line_index = host_widgets.services_widget.scroll_offset + relative_y;
// Map display line to parent service index
if let Some(parent_index) = host_widgets.services_widget.display_line_to_parent_index(display_line_index) {
// Set the selected index to the clicked parent service
host_widgets.services_widget.selected_index = parent_index;
match button {
MouseButton::Left => {
// Left click just selects the service
debug!("Left-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
}
MouseButton::Right => {
// Right click opens context menu
debug!("Right-clicked service at display line {} (parent index: {})", display_line_index, parent_index);
// Get the service name for the popup
if let Some(service_name) = host_widgets.services_widget.get_selected_service() {
tui_app.popup_menu = Some(crate::ui::PopupMenu {
service_name,
x,
y,
selected_index: 0,
});
}
}
_ => {}
}
}
}
}
}
}
@@ -600,76 +601,12 @@ impl Dashboard {
.unwrap_or_else(|| hostname.to_string())
}
/// Find which hostname is at a given x position in the title bar
fn find_hostname_at_position(&self, x: u16) -> Option<String> {
if let Some(ref tui_app) = self.tui_app {
// The hosts are RIGHT-ALIGNED in chunks[1]!
// Need to calculate total width first, then right-align
// Get terminal width
let terminal_width = if let Some(ref terminal) = self.terminal {
terminal.size().unwrap_or_default().width
} else {
80
};
// Calculate total width of all host text
let mut total_width = 0_u16;
for (i, host) in tui_app.get_available_hosts().iter().enumerate() {
if i > 0 {
total_width += 1; // space between hosts
}
total_width += 2; // icon + space
let is_selected = Some(host) == tui_app.current_host.as_ref();
if is_selected {
total_width += 1 + host.len() as u16 + 1; // [hostname]
} else {
total_width += host.len() as u16;
}
}
total_width += 1; // right padding
// chunks[1] starts at 22, has width of (terminal_width - 22)
let chunk_width = terminal_width - 22;
// Right-aligned position
let hosts_start_x = if total_width < chunk_width {
22 + (chunk_width - total_width)
} else {
22
};
// Now calculate positions starting from hosts_start_x
let mut pos = hosts_start_x;
for (i, host) in tui_app.get_available_hosts().iter().enumerate() {
if i > 0 {
pos += 1; // " "
}
let host_start = pos;
pos += 2; // "● "
let is_selected = Some(host) == tui_app.current_host.as_ref();
if is_selected {
pos += 1 + host.len() as u16 + 1; // [hostname]
} else {
pos += host.len() as u16;
}
if x >= host_start && x < pos {
return Some(host.clone());
}
}
}
None
}
}
/// Check if a point is within a rectangular area
fn is_in_area(x: u16, y: u16, area: &Rect) -> bool {
x >= area.x && x < area.x + area.width
&& y >= area.y && y < area.y + area.height
x >= area.x && x < area.x.saturating_add(area.width)
&& y >= area.y && y < area.y.saturating_add(area.height)
}
impl Drop for Dashboard {

View File

@@ -18,7 +18,7 @@ use crate::config::DashboardConfig;
use crate::metrics::MetricStore;
use cm_dashboard_shared::Status;
use theme::{Components, Layout as ThemeLayout, Theme};
use widgets::{ServicesWidget, SystemWidget, Widget};
use widgets::{HostsWidget, ServicesWidget, SystemWidget, Widget};
@@ -64,8 +64,6 @@ pub struct TuiApp {
pub current_host: Option<String>,
/// Available hosts
available_hosts: Vec<String>,
/// Host index for navigation
host_index: usize,
/// Should quit application
should_quit: bool,
/// Track if user manually navigated away from localhost
@@ -76,6 +74,10 @@ pub struct TuiApp {
localhost: String,
/// Active popup menu (if any)
pub popup_menu: Option<PopupMenu>,
/// Focus on hosts tab (false = Services, true = Hosts)
pub focus_hosts: bool,
/// Hosts widget for navigation and rendering
pub hosts_widget: HostsWidget,
}
impl TuiApp {
@@ -85,12 +87,13 @@ impl TuiApp {
host_widgets: HashMap::new(),
current_host: None,
available_hosts: config.hosts.keys().cloned().collect(),
host_index: 0,
should_quit: false,
user_navigated_away: false,
config,
localhost,
popup_menu: None,
focus_hosts: true, // Start with Hosts tab focused by default
hosts_widget: HostsWidget::new(),
};
// Sort predefined hosts
@@ -142,27 +145,32 @@ impl TuiApp {
all_hosts.sort();
self.available_hosts = all_hosts;
// Track if we had a host before this update
let had_host = self.current_host.is_some();
// Get the current hostname (localhost) for auto-selection
if !self.available_hosts.is_empty() {
if self.available_hosts.contains(&self.localhost) && !self.user_navigated_away {
// Localhost is available and user hasn't navigated away - switch to it
self.current_host = Some(self.localhost.clone());
// Find the actual index of localhost in the sorted list
self.host_index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
// Initialize selector bar on first host selection
if !had_host {
let index = self.available_hosts.iter().position(|h| h == &self.localhost).unwrap_or(0);
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
}
} else if self.current_host.is_none() {
// No current host - select first available (which is localhost if available)
self.current_host = Some(self.available_hosts[0].clone());
self.host_index = 0;
// Initialize selector bar
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
} else if let Some(ref current) = self.current_host {
if !self.available_hosts.contains(current) {
// Current host disconnected - select first available and reset navigation flag
// Current host disconnected - FORCE switch to first available
self.current_host = Some(self.available_hosts[0].clone());
self.host_index = 0;
// Reset selector bar since we're forcing a host change
self.hosts_widget.set_selected_index(0, self.available_hosts.len());
self.user_navigated_away = false; // Reset since we're forced to switch
} else if let Some(index) = self.available_hosts.iter().position(|h| h == current) {
// Update index for current host
self.host_index = index;
}
}
}
@@ -183,12 +191,6 @@ impl TuiApp {
KeyCode::Char('q') => {
self.should_quit = true;
}
KeyCode::Left => {
self.navigate_host(-1);
}
KeyCode::Right => {
self.navigate_host(1);
}
KeyCode::Char('r') => {
// System rebuild command - works on any panel for current host
if let Some(hostname) = self.current_host.clone() {
@@ -356,25 +358,46 @@ impl TuiApp {
}
}
KeyCode::Tab => {
// Tab cycles to next host
self.navigate_host(1);
// Tab toggles between Services and Hosts tabs
self.focus_hosts = !self.focus_hosts;
}
KeyCode::Up | KeyCode::Char('k') => {
// Move service selection up
if let Some(hostname) = self.current_host.clone() {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_previous();
if self.focus_hosts {
// Move blue selector bar up when in Hosts tab
self.hosts_widget.select_previous();
} else {
// Move service selection up when in Services tab
if let Some(hostname) = self.current_host.clone() {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_previous();
}
}
}
KeyCode::Down | KeyCode::Char('j') => {
// Move service selection down
if let Some(hostname) = self.current_host.clone() {
let total_services = {
if self.focus_hosts {
// Move blue selector bar down when in Hosts tab
let total_hosts = self.available_hosts.len();
self.hosts_widget.select_next(total_hosts);
} else {
// Move service selection down when in Services tab
if let Some(hostname) = self.current_host.clone() {
let total_services = {
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.get_total_services_count()
};
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.get_total_services_count()
};
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.select_next(total_services);
host_widgets.services_widget.select_next(total_services);
}
}
}
KeyCode::Enter => {
if self.focus_hosts {
// Enter key switches to the selected host
let selected_idx = self.hosts_widget.get_selected_index();
if selected_idx < self.available_hosts.len() {
let selected_host = self.available_hosts[selected_idx].clone();
self.switch_to_host(&selected_host);
}
}
}
_ => {}
@@ -386,7 +409,8 @@ impl TuiApp {
/// Switch to a specific host by name
pub fn switch_to_host(&mut self, hostname: &str) {
if let Some(index) = self.available_hosts.iter().position(|h| h == hostname) {
self.host_index = index;
// Update selector bar position
self.hosts_widget.set_selected_index(index, self.available_hosts.len());
self.current_host = Some(hostname.to_string());
// Check if user navigated away from localhost
@@ -400,41 +424,33 @@ impl TuiApp {
}
}
/// Navigate between hosts
fn navigate_host(&mut self, direction: i32) {
if self.available_hosts.is_empty() {
return;
/// Handle mouse click on tab title area
pub fn handle_tab_click(&mut self, x: u16, area: &Rect) {
// Tab title format: "hosts | services"
// Calculate positions relative to area start
let title_start_x = area.x + 1; // +1 for left border
// "hosts | services"
// 0123456789...
let hosts_start = title_start_x;
let hosts_end = hosts_start + 5; // "hosts" is 5 chars
let services_start = hosts_end + 3; // After " | "
let services_end = services_start + 8; // "services" is 8 chars
if x >= hosts_start && x < hosts_end {
// Clicked on "hosts"
self.focus_hosts = true;
} else if x >= services_start && x < services_end {
// Clicked on "services"
self.focus_hosts = false;
}
let len = self.available_hosts.len();
if direction > 0 {
self.host_index = (self.host_index + 1) % len;
} else {
self.host_index = if self.host_index == 0 {
len - 1
} else {
self.host_index - 1
};
}
self.current_host = Some(self.available_hosts[self.host_index].clone());
// Check if user navigated away from localhost
if let Some(ref current) = self.current_host {
if current != &self.localhost {
self.user_navigated_away = true;
} else {
self.user_navigated_away = false; // User navigated back to localhost
}
}
info!("Switched to host: {}", self.current_host.as_ref().unwrap());
}
/// Get the currently selected service name from the services widget
fn get_selected_service(&self) -> Option<String> {
if let Some(hostname) = &self.current_host {
@@ -498,14 +514,6 @@ impl TuiApp {
true // No host selected is considered offline
};
// If host is offline, render wake-up message instead of panels
if current_host_offline {
self.render_offline_host_message(frame, main_chunks[1]);
self.render_btop_title(frame, main_chunks[0], metric_store);
self.render_statusbar(frame, main_chunks[2], metric_store);
return (main_chunks[0], Rect::default(), Rect::default()); // Return title area and empty areas when offline
}
// Left side: system panel only (full height)
let left_chunks = ratatui::layout::Layout::default()
.direction(Direction::Vertical)
@@ -515,20 +523,18 @@ impl TuiApp {
// Render title bar
self.render_btop_title(frame, main_chunks[0], metric_store);
// Render system panel
// Render system panel or offline message in system panel area
let system_area = left_chunks[0];
self.render_system_panel(frame, system_area, metric_store);
// Render services widget for current host
let services_area = content_chunks[1];
if let Some(hostname) = self.current_host.clone() {
let is_focused = true; // Always show service selection
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets
.services_widget
.render(frame, services_area, is_focused); // Services takes full right side
if current_host_offline {
self.render_offline_host_message(frame, system_area);
} else {
self.render_system_panel(frame, system_area, metric_store);
}
// Render right panel with tabs (Services | Hosts)
let services_area = content_chunks[1];
self.render_right_panel_with_tabs(frame, services_area, metric_store);
// Render statusbar at the bottom
self.render_statusbar(frame, main_chunks[2], metric_store);
@@ -545,7 +551,6 @@ impl TuiApp {
fn render_btop_title(&self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
use ratatui::style::Modifier;
use ratatui::text::{Line, Span};
use theme::StatusIcons;
if self.available_hosts.is_empty() {
let title_text = "cm-dashboard • no hosts discovered";
@@ -568,86 +573,34 @@ impl TuiApp {
// Use the worst status color as background
let background_color = Theme::status_color(worst_status);
// Split the title bar into left and right sections
let chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Length(22), Constraint::Min(0)])
.split(area);
// Single line title bar showing dashboard name (left) and dashboard IP (right)
let left_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
// Left side: "cm-dashboard" text with version
let title_text = format!(" cm-dashboard v{}", env!("CARGO_PKG_VERSION"));
let left_span = Span::styled(
&title_text,
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
);
let left_title = Paragraph::new(Line::from(vec![left_span]))
.style(Style::default().bg(background_color));
frame.render_widget(left_title, chunks[0]);
// Get dashboard local IP for right side
let dashboard_ip = Self::get_local_ip();
let right_text = format!("{} ", dashboard_ip);
// Right side: hosts with status indicators
let mut host_spans = Vec::new();
for (i, host) in self.available_hosts.iter().enumerate() {
if i > 0 {
host_spans.push(Span::styled(
" ",
Style::default().fg(Theme::background()).bg(background_color)
));
}
// Calculate spacing to push right text to the right
let total_text_len = left_text.len() + right_text.len();
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
let spacing_str = " ".repeat(spacing);
// Always show normal status icon based on metrics (no command status at host level)
let host_status = self.calculate_host_status(host, metric_store);
let status_icon = StatusIcons::get_icon(host_status);
// Add status icon with background color as foreground against status background
host_spans.push(Span::styled(
format!("{} ", status_icon),
Style::default().fg(Theme::background()).bg(background_color),
));
if Some(host) == self.current_host.as_ref() {
// Selected host with brackets in bold background color against status background
host_spans.push(Span::styled(
"[",
Style::default()
.fg(Theme::background())
.bg(background_color)
.add_modifier(Modifier::BOLD),
));
host_spans.push(Span::styled(
host.clone(),
Style::default()
.fg(Theme::background())
.bg(background_color)
.add_modifier(Modifier::BOLD),
));
host_spans.push(Span::styled(
"]",
Style::default()
.fg(Theme::background())
.bg(background_color)
.add_modifier(Modifier::BOLD),
));
} else {
// Other hosts in normal background color against status background
host_spans.push(Span::styled(
host.clone(),
Style::default().fg(Theme::background()).bg(background_color),
));
}
}
// Add right padding
host_spans.push(Span::styled(
" ",
Style::default().fg(Theme::background()).bg(background_color)
));
let host_line = Line::from(host_spans);
let host_title = Paragraph::new(vec![host_line])
.style(Style::default().bg(background_color))
.alignment(ratatui::layout::Alignment::Right);
frame.render_widget(host_title, chunks[1]);
let title = Paragraph::new(Line::from(vec![
Span::styled(
left_text,
Style::default().fg(Theme::background()).bg(background_color).add_modifier(Modifier::BOLD)
),
Span::styled(
spacing_str,
Style::default().bg(background_color)
),
Span::styled(
right_text,
Style::default().fg(Theme::background()).bg(background_color)
),
]))
.style(Style::default().bg(background_color));
frame.render_widget(title, area);
}
/// Calculate overall status for a host based on its structured data
@@ -735,7 +688,7 @@ impl TuiApp {
use ratatui::widgets::Paragraph;
// Get current host info
let (hostname_str, host_ip, build_version, agent_version) = if let Some(hostname) = &self.current_host {
let (hostname_str, host_ip, kernel_version, build_version, agent_version) = if let Some(hostname) = &self.current_host {
// Get the connection IP (the IP dashboard uses to connect to the agent)
let ip = if let Some(host_details) = self.config.hosts.get(hostname) {
host_details.get_connection_ip(hostname)
@@ -743,32 +696,30 @@ impl TuiApp {
hostname.clone()
};
// Get build and agent versions from system widget
let (build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
// Get kernel, build and agent versions from system widget
let (kernel, build, agent) = if let Some(host_widgets) = self.host_widgets.get(hostname) {
let kernel = host_widgets.system_widget.get_kernel_version().unwrap_or("N/A".to_string());
let build = host_widgets.system_widget.get_build_version().unwrap_or("N/A".to_string());
let agent = host_widgets.system_widget.get_agent_version().unwrap_or("N/A".to_string());
(build, agent)
(kernel, build, agent)
} else {
("N/A".to_string(), "N/A".to_string())
("N/A".to_string(), "N/A".to_string(), "N/A".to_string())
};
(hostname.clone(), ip, build, agent)
(hostname.clone(), ip, kernel, build, agent)
} else {
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
("None".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string(), "N/A".to_string())
};
let left_text = format!("Host: {} | {} | Build:{} | Agent:{}", hostname_str, host_ip, build_version, agent_version);
let left_text = format!(" Host: {} | {} | {}", hostname_str, host_ip, kernel_version);
let right_text = format!("Build:{} | Agent:{} ", build_version, agent_version);
// Get dashboard local IP
let dashboard_ip = Self::get_local_ip();
let right_text = format!("Dashboard: {}", dashboard_ip);
// Calculate spacing to push right text to the right (accounting for 1 char left padding)
let spacing = area.width as usize - left_text.len() - right_text.len() - 2; // -2 for left padding
let spacing_str = " ".repeat(spacing.max(1));
// Calculate spacing to push right text to the right
let total_text_len = left_text.len() + right_text.len();
let spacing = (area.width as usize).saturating_sub(total_text_len).max(1);
let spacing_str = " ".repeat(spacing);
let line = Line::from(vec![
Span::raw(" "), // 1 char left padding
Span::styled(left_text, Style::default().fg(Theme::border())),
Span::raw(spacing_str),
Span::styled(right_text, Style::default().fg(Theme::border())),
@@ -808,9 +759,75 @@ impl TuiApp {
}
/// Render offline host message with wake-up option
/// Render right panel with tabs (hosts | services)
fn render_right_panel_with_tabs(&mut self, frame: &mut Frame, area: Rect, metric_store: &MetricStore) {
use ratatui::style::Modifier;
use ratatui::text::{Line, Span};
use ratatui::widgets::{Block, Borders};
// Build tab title with bold styling for active tab (like cm-player)
let hosts_style = if self.focus_hosts {
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::border_title())
};
let services_style = if !self.focus_hosts {
Style::default().fg(Theme::border_title()).add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::border_title())
};
let title = Line::from(vec![
Span::styled("hosts", hosts_style),
Span::raw(" | "),
Span::styled("services", services_style),
]);
// Create ONE block with tab title (like cm-player)
let main_block = Block::default()
.borders(Borders::ALL)
.title(title.clone())
.style(Style::default().fg(Theme::border()).bg(Theme::background()));
let inner_area = main_block.inner(area);
frame.render_widget(main_block, area);
// Render appropriate content based on active tab
if self.focus_hosts {
// Render hosts list (no additional borders)
let localhost = self.localhost.clone();
let current_host = self.current_host.as_deref();
self.hosts_widget.render(
frame,
inner_area,
&self.available_hosts,
&localhost,
current_host,
metric_store,
|hostname, store| {
// Inline calculate_host_status logic
if store.get_agent_data(hostname).is_some() {
Status::Ok
} else {
Status::Offline
}
},
true, // Always focused when visible
);
} else {
// Render services for current host (no additional borders - just content!)
if let Some(hostname) = self.current_host.clone() {
let is_focused = true;
let host_widgets = self.get_or_create_host_widgets(&hostname);
host_widgets.services_widget.render_content(frame, inner_area, is_focused);
}
}
}
/// Render offline host message in system panel area
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
use ratatui::layout::Alignment;
use ratatui::style::Modifier;
use ratatui::text::{Line, Span};
use ratatui::widgets::{Block, Borders, Paragraph};
@@ -828,8 +845,9 @@ impl TuiApp {
// Create message content
let mut lines = vec![
Line::from(""),
Line::from(Span::styled(
format!("Host '{}' is offline", hostname),
format!(" Host '{}' is offline", hostname),
Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD),
)),
Line::from(""),
@@ -837,46 +855,26 @@ impl TuiApp {
if has_mac {
lines.push(Line::from(Span::styled(
"Press 'w' to wake up host",
Style::default().fg(Theme::primary_text()).add_modifier(Modifier::BOLD),
" Press 'w' to wake up host",
Style::default().fg(Theme::primary_text()),
)));
} else {
lines.push(Line::from(Span::styled(
"No MAC address configured - cannot wake up",
" No MAC address configured",
Style::default().fg(Theme::muted_text()),
)));
}
// Create centered message
// Render message in system panel with border
let message = Paragraph::new(lines)
.block(Block::default()
.borders(Borders::ALL)
.border_style(Style::default().fg(Theme::muted_text()))
.title(" Offline Host ")
.title(" Offline ")
.title_style(Style::default().fg(Theme::muted_text()).add_modifier(Modifier::BOLD)))
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()))
.alignment(Alignment::Center);
.style(Style::default().bg(Theme::background()).fg(Theme::primary_text()));
// Center the message in the available area
let popup_area = ratatui::layout::Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage(40),
Constraint::Length(6),
Constraint::Percentage(40),
])
.split(area)[1];
let popup_area = ratatui::layout::Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(25),
Constraint::Percentage(50),
Constraint::Percentage(25),
])
.split(popup_area)[1];
frame.render_widget(message, popup_area);
frame.render_widget(message, area);
}
/// Parse MAC address string (e.g., "AA:BB:CC:DD:EE:FF") to [u8; 6]

View File

@@ -0,0 +1,229 @@
use ratatui::{
layout::Rect,
style::{Modifier, Style},
text::{Line, Span},
widgets::{List, ListItem},
Frame,
};
use crate::metrics::MetricStore;
use crate::ui::theme::Theme;
use cm_dashboard_shared::Status;
/// Hosts widget displaying all available hosts with selector bar navigation
#[derive(Clone)]
pub struct HostsWidget {
/// Currently selected host index (for blue selector bar)
pub selected_index: usize,
/// Scroll offset for viewport
pub scroll_offset: usize,
/// Last rendered viewport height for scroll calculations
last_viewport_height: usize,
}
impl HostsWidget {
pub fn new() -> Self {
Self {
selected_index: 0,
scroll_offset: 0,
last_viewport_height: 0,
}
}
/// Move selection up
pub fn select_previous(&mut self) {
if self.selected_index > 0 {
self.selected_index -= 1;
self.ensure_selected_visible();
}
}
/// Move selection down
pub fn select_next(&mut self, total_hosts: usize) {
if total_hosts > 0 && self.selected_index < total_hosts.saturating_sub(1) {
self.selected_index += 1;
self.ensure_selected_visible();
}
}
/// Ensure selected item is visible in viewport (auto-scroll)
fn ensure_selected_visible(&mut self) {
if self.last_viewport_height == 0 {
return; // Can't calculate without viewport height
}
let viewport_height = self.last_viewport_height;
// If selection is above viewport, scroll up to show it
if self.selected_index < self.scroll_offset {
self.scroll_offset = self.selected_index;
}
// If selection is below viewport, scroll down to show it
if self.selected_index >= self.scroll_offset + viewport_height {
self.scroll_offset = self.selected_index.saturating_sub(viewport_height.saturating_sub(1));
}
}
/// Scroll down manually
pub fn scroll_down(&mut self, total_hosts: usize) {
if self.last_viewport_height == 0 {
return;
}
let viewport_height = self.last_viewport_height;
let max_scroll = total_hosts.saturating_sub(viewport_height);
if self.scroll_offset < max_scroll {
self.scroll_offset += 1;
}
}
/// Scroll up manually
pub fn scroll_up(&mut self) {
if self.scroll_offset > 0 {
self.scroll_offset -= 1;
}
}
/// Get the currently selected host index
pub fn get_selected_index(&self) -> usize {
self.selected_index
}
/// Set selected index (used when switching to host via mouse)
pub fn set_selected_index(&mut self, index: usize, total_hosts: usize) {
if index < total_hosts {
self.selected_index = index;
self.ensure_selected_visible();
}
}
/// Convert y coordinate to host index (accounting for scroll)
pub fn y_to_host_index(&self, relative_y: usize) -> usize {
self.scroll_offset + relative_y
}
/// Render hosts list with selector bar
pub fn render<F>(
&mut self,
frame: &mut Frame,
area: Rect,
available_hosts: &[String],
localhost: &str,
current_host: Option<&str>,
metric_store: &MetricStore,
mut calculate_host_status: F,
is_focused: bool,
) where F: FnMut(&str, &MetricStore) -> Status {
use crate::ui::theme::{StatusIcons, Typography};
use ratatui::widgets::Paragraph;
// Split area for header and list
let chunks = ratatui::layout::Layout::default()
.direction(ratatui::layout::Direction::Vertical)
.constraints([
ratatui::layout::Constraint::Length(1), // Header
ratatui::layout::Constraint::Min(0), // List
])
.split(area);
// Render header
let header = Paragraph::new("Hosts:").style(Typography::muted());
frame.render_widget(header, chunks[0]);
// Store viewport height for scroll calculations (minus header)
self.last_viewport_height = chunks[1].height as usize;
// Validate scroll offset
if self.scroll_offset >= available_hosts.len() && !available_hosts.is_empty() {
self.scroll_offset = available_hosts.len().saturating_sub(1);
}
// Create list items for visible hosts
let items: Vec<ListItem> = available_hosts
.iter()
.enumerate()
.skip(self.scroll_offset)
.take(chunks[1].height as usize)
.map(|(idx, hostname)| {
let host_status = calculate_host_status(hostname, metric_store);
let status_icon = StatusIcons::get_icon(host_status);
let status_color = Theme::status_color(host_status);
// Check if this is the selected host (for blue selector bar)
let is_selected = is_focused && idx == self.selected_index;
// Check if this is the current (active) host
let is_current = current_host == Some(hostname.as_str());
// Check if this is localhost
let is_localhost = hostname == localhost;
// Build the line with icon and hostname
let mut spans = vec![Span::styled(
format!("{} ", status_icon),
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(status_color)
},
)];
// Add arrow indicator if this is the current host (like cm-player)
if is_current {
spans.push(Span::styled(
"",
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD)
},
));
}
// Add hostname with appropriate styling
let hostname_text = if is_localhost {
format!("{} (localhost)", hostname)
} else {
hostname.clone()
};
spans.push(Span::styled(
hostname_text,
if is_selected {
Style::default()
.fg(Theme::background())
.add_modifier(Modifier::BOLD)
} else if is_current {
Style::default()
.fg(Theme::primary_text())
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Theme::primary_text())
},
));
let line = Line::from(spans);
// Apply blue background to selected row
let base_style = if is_selected {
Style::default().bg(Theme::highlight()) // Blue background
} else {
Style::default().bg(Theme::background())
};
ListItem::new(line).style(base_style)
})
.collect();
let hosts_list = List::new(items);
frame.render_widget(hosts_list, chunks[1]);
}
}

View File

@@ -1,8 +1,10 @@
use cm_dashboard_shared::AgentData;
pub mod hosts;
pub mod services;
pub mod system;
pub use hosts::HostsWidget;
pub use services::ServicesWidget;
pub use system::SystemWidget;

View File

@@ -713,7 +713,11 @@ impl ServicesWidget {
/// Render with focus
pub fn render(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
let services_block = Components::widget_block("services");
self.render_with_title(frame, area, is_focused, "services");
}
pub fn render_with_title(&mut self, frame: &mut Frame, area: Rect, is_focused: bool, title: &str) {
let services_block = Components::widget_block(title);
let inner_area = services_block.inner(area);
frame.render_widget(services_block, area);
@@ -758,6 +762,49 @@ impl ServicesWidget {
self.render_services(frame, content_chunks[1], is_focused, columns);
}
/// Render services content WITHOUT block (for tab mode like cm-player)
pub fn render_content(&mut self, frame: &mut Frame, area: Rect, is_focused: bool) {
let content_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Min(0)])
.split(area);
// Determine which columns to show based on available width
let columns = ColumnVisibility::from_width(area.width);
// Build header based on visible columns
let mut header_parts = Vec::new();
if columns.show_name {
header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
}
if columns.show_status {
header_parts.push(format!("{:<width$}", "Status:", width = ColumnVisibility::STATUS_WIDTH as usize));
}
if columns.show_ram {
header_parts.push(format!("{:<width$}", "RAM:", width = ColumnVisibility::RAM_WIDTH as usize));
}
if columns.show_uptime {
header_parts.push(format!("{:<width$}", "Uptime:", width = ColumnVisibility::UPTIME_WIDTH as usize));
}
if columns.show_restarts {
header_parts.push(format!("{:<width$}", "↻:", width = ColumnVisibility::RESTARTS_WIDTH as usize));
}
let header = header_parts.join(" ");
let header_para = Paragraph::new(header).style(Typography::muted());
frame.render_widget(header_para, content_chunks[0]);
// Check if we have any services to display
if self.parent_services.is_empty() && self.sub_services.is_empty() {
let empty_text = Paragraph::new("No process data").style(Typography::muted());
frame.render_widget(empty_text, content_chunks[1]);
return;
}
// Render the services list
self.render_services(frame, content_chunks[1], is_focused, columns);
}
/// Render services list
fn render_services(&mut self, frame: &mut Frame, area: Rect, is_focused: bool, columns: ColumnVisibility) {
// Build hierarchical service list for display

View File

@@ -15,6 +15,7 @@ pub struct SystemWidget {
// NixOS information
nixos_build: Option<String>,
agent_hash: Option<String>,
kernel_version: Option<String>,
// Network interfaces
network_interfaces: Vec<cm_dashboard_shared::NetworkInterfaceData>,
@@ -94,6 +95,7 @@ impl SystemWidget {
Self {
nixos_build: None,
agent_hash: None,
kernel_version: None,
network_interfaces: Vec::new(),
cpu_load_1min: None,
cpu_load_5min: None,
@@ -171,6 +173,11 @@ impl SystemWidget {
pub fn get_agent_version(&self) -> Option<String> {
self.agent_hash.clone()
}
/// Get the kernel version
pub fn get_kernel_version(&self) -> Option<String> {
self.kernel_version.clone()
}
}
use super::Widget;
@@ -181,10 +188,13 @@ impl Widget for SystemWidget {
// Extract agent version
self.agent_hash = Some(agent_data.agent_version.clone());
// Extract build version
self.nixos_build = agent_data.build_version.clone();
// Extract kernel version
self.kernel_version = agent_data.kernel_version.clone();
// Extract network interfaces
self.network_interfaces = agent_data.system.network.interfaces.clone();
@@ -544,9 +554,8 @@ impl SystemWidget {
"unknown".to_string()
};
// Header: "Repo <complete timestamp>"
let repo_text = format!("Repo {}", time_display);
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &repo_text);
// Header: just the timestamp
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display);
lines.push(Line::from(repo_spans));
// List all repositories with archive count and size

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-shared"
version = "0.1.269"
version = "0.1.278"
edition = "2021"
[dependencies]

View File

@@ -7,6 +7,8 @@ pub struct AgentData {
pub hostname: String,
pub agent_version: String,
pub build_version: Option<String>,
#[serde(default)]
pub kernel_version: Option<String>,
pub timestamp: u64,
pub system: SystemData,
pub services: Vec<ServiceData>,
@@ -203,6 +205,7 @@ impl AgentData {
hostname,
agent_version,
build_version: None,
kernel_version: None,
timestamp: chrono::Utc::now().timestamp() as u64,
system: SystemData {
network: NetworkData {