Compare commits

..

9 Commits

Author SHA1 Message Date
966ba27b1e Remove status icon from VPN IP and change to lowercase
All checks were successful
Build and Release / build-and-release (push) Successful in 1m19s
Change display from "IP: X.X.X.X" to "ip: X.X.X.X".
Remove status icon for vpn_route service type.

Version: v0.1.236
2025-12-01 14:40:21 +01:00
6c6c9144bd Add info-level logging for VPN external IP debugging
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Change debug to info/warn logging to diagnose VPN IP query issues.
Use exact service name match instead of contains.

Version: v0.1.235
2025-12-01 14:30:25 +01:00
3fdcec8047 Add sudo for VPN namespace access
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Use sudo to access vpn namespace for external IP query.
Requires corresponding sudo permission in NixOS config.

Version: v0.1.234
2025-12-01 14:14:41 +01:00
1fcaf4a670 Fix VPN namespace name for external IP query
All checks were successful
Build and Release / build-and-release (push) Successful in 1m9s
Use correct namespace name 'vpn' instead of 'openvpn-namespace'.

Version: v0.1.233
2025-12-01 14:09:07 +01:00
885e19f7fd Add external IP display for OpenVPN connections
All checks were successful
Build and Release / build-and-release (push) Successful in 1m20s
Display VPN external IP as sub-service under openvpn-vpn-connection.
Query external IP through openvpn-namespace using curl ifconfig.me.

Version: v0.1.232
2025-12-01 13:49:54 +01:00
a7b69b8ae7 Fix duplicate data by clearing vectors before collection
All checks were successful
Build and Release / build-and-release (push) Successful in 1m19s
Collectors now clear their target vectors (tmpfs, drives, pools, services)
before populating to prevent duplicates when updating cached AgentData.

- Clear tmpfs list in memory collector
- Clear drives and pools in disk collector
- Clear services in systemd collector
- Bump version to v0.1.231
2025-12-01 13:21:26 +01:00
2d290f40b2 Fix data caching to prevent empty broadcasts
All checks were successful
Build and Release / build-and-release (push) Successful in 1m33s
CRITICAL FIX: Collectors now update cached AgentData instead of
creating new empty data each cycle. This prevents the dashboard
from seeing flashing/disappearing data.

- Add cached_agent_data field to Agent struct
- Update cached data when collectors run
- Always broadcast the full cached data every 2s
- Only individual collectors respect their intervals
- Bump version to v0.1.230
2025-12-01 13:14:53 +01:00
ad1fcaa27b Fix collector interval timing to prevent excessive SMART checks
All checks were successful
Build and Release / build-and-release (push) Successful in 1m46s
Collectors now respect their configured intervals instead of running
every transmission cycle (2s). This prevents disk SMART checks from
running every 2 seconds, which was causing constant disk activity.

- Add TimedCollector wrapper with interval tracking
- Only collect from collectors whose interval has elapsed
- Disk collector now properly runs every 300s instead of every 2s
- Bump version to v0.1.229
2025-12-01 13:03:45 +01:00
60ab4d4f9e Fix service panel column width calculation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Replace hardcoded terminal width thresholds with dynamic calculation
based on actual column requirements. Column visibility now adapts
correctly at 58, 52, 43, and 34 character widths instead of the
previous arbitrary 80, 60, 45 thresholds.

- Add width constants for each column (NAME=23, STATUS=10, etc)
- Calculate cumulative widths dynamically for each layout tier
- Ensure header and data formatting use consistent width values
- Fix service name truncation to respect calculated column width
2025-11-30 12:09:44 +01:00
9 changed files with 225 additions and 53 deletions

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.226" version = "0.1.236"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.226" version = "0.1.236"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -325,7 +325,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.226" version = "0.1.236"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.227" version = "0.1.236"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -1,6 +1,6 @@
use anyhow::Result; use anyhow::Result;
use gethostname::gethostname; use gethostname::gethostname;
use std::time::Duration; use std::time::{Duration, Instant};
use tokio::time::interval; use tokio::time::interval;
use tracing::{debug, error, info}; use tracing::{debug, error, info};
@@ -19,13 +19,22 @@ use crate::collectors::{
use crate::notifications::NotificationManager; use crate::notifications::NotificationManager;
use cm_dashboard_shared::AgentData; use cm_dashboard_shared::AgentData;
/// Wrapper for collectors with timing information
struct TimedCollector {
collector: Box<dyn Collector>,
interval: Duration,
last_collection: Option<Instant>,
name: String,
}
pub struct Agent { pub struct Agent {
hostname: String, hostname: String,
config: AgentConfig, config: AgentConfig,
zmq_handler: ZmqHandler, zmq_handler: ZmqHandler,
collectors: Vec<Box<dyn Collector>>, collectors: Vec<TimedCollector>,
notification_manager: NotificationManager, notification_manager: NotificationManager,
previous_status: Option<SystemStatus>, previous_status: Option<SystemStatus>,
cached_agent_data: AgentData,
} }
/// Track system component status for change detection /// Track system component status for change detection
@@ -55,36 +64,78 @@ impl Agent {
config.zmq.publisher_port config.zmq.publisher_port
); );
// Initialize collectors // Initialize collectors with timing information
let mut collectors: Vec<Box<dyn Collector>> = Vec::new(); let mut collectors: Vec<TimedCollector> = Vec::new();
// Add enabled collectors // Add enabled collectors
if config.collectors.cpu.enabled { if config.collectors.cpu.enabled {
collectors.push(Box::new(CpuCollector::new(config.collectors.cpu.clone()))); collectors.push(TimedCollector {
collector: Box::new(CpuCollector::new(config.collectors.cpu.clone())),
interval: Duration::from_secs(config.collectors.cpu.interval_seconds),
last_collection: None,
name: "CPU".to_string(),
});
info!("CPU collector initialized with {}s interval", config.collectors.cpu.interval_seconds);
} }
if config.collectors.memory.enabled { if config.collectors.memory.enabled {
collectors.push(Box::new(MemoryCollector::new(config.collectors.memory.clone()))); collectors.push(TimedCollector {
collector: Box::new(MemoryCollector::new(config.collectors.memory.clone())),
interval: Duration::from_secs(config.collectors.memory.interval_seconds),
last_collection: None,
name: "Memory".to_string(),
});
info!("Memory collector initialized with {}s interval", config.collectors.memory.interval_seconds);
} }
if config.collectors.disk.enabled { if config.collectors.disk.enabled {
collectors.push(Box::new(DiskCollector::new(config.collectors.disk.clone()))); collectors.push(TimedCollector {
collector: Box::new(DiskCollector::new(config.collectors.disk.clone())),
interval: Duration::from_secs(config.collectors.disk.interval_seconds),
last_collection: None,
name: "Disk".to_string(),
});
info!("Disk collector initialized with {}s interval", config.collectors.disk.interval_seconds);
} }
if config.collectors.systemd.enabled { if config.collectors.systemd.enabled {
collectors.push(Box::new(SystemdCollector::new(config.collectors.systemd.clone()))); collectors.push(TimedCollector {
collector: Box::new(SystemdCollector::new(config.collectors.systemd.clone())),
interval: Duration::from_secs(config.collectors.systemd.interval_seconds),
last_collection: None,
name: "Systemd".to_string(),
});
info!("Systemd collector initialized with {}s interval", config.collectors.systemd.interval_seconds);
} }
if config.collectors.backup.enabled { if config.collectors.backup.enabled {
collectors.push(Box::new(BackupCollector::new())); collectors.push(TimedCollector {
collector: Box::new(BackupCollector::new()),
interval: Duration::from_secs(config.collectors.backup.interval_seconds),
last_collection: None,
name: "Backup".to_string(),
});
info!("Backup collector initialized with {}s interval", config.collectors.backup.interval_seconds);
} }
if config.collectors.network.enabled { if config.collectors.network.enabled {
collectors.push(Box::new(NetworkCollector::new(config.collectors.network.clone()))); collectors.push(TimedCollector {
collector: Box::new(NetworkCollector::new(config.collectors.network.clone())),
interval: Duration::from_secs(config.collectors.network.interval_seconds),
last_collection: None,
name: "Network".to_string(),
});
info!("Network collector initialized with {}s interval", config.collectors.network.interval_seconds);
} }
if config.collectors.nixos.enabled { if config.collectors.nixos.enabled {
collectors.push(Box::new(NixOSCollector::new(config.collectors.nixos.clone()))); collectors.push(TimedCollector {
collector: Box::new(NixOSCollector::new(config.collectors.nixos.clone())),
interval: Duration::from_secs(config.collectors.nixos.interval_seconds),
last_collection: None,
name: "NixOS".to_string(),
});
info!("NixOS collector initialized with {}s interval", config.collectors.nixos.interval_seconds);
} }
info!("Initialized {} collectors", collectors.len()); info!("Initialized {} collectors", collectors.len());
@@ -93,6 +144,9 @@ impl Agent {
let notification_manager = NotificationManager::new(&config.notifications, &hostname)?; let notification_manager = NotificationManager::new(&config.notifications, &hostname)?;
info!("Notification manager initialized"); info!("Notification manager initialized");
// Initialize cached agent data
let cached_agent_data = AgentData::new(hostname.clone(), env!("CARGO_PKG_VERSION").to_string());
Ok(Self { Ok(Self {
hostname, hostname,
config, config,
@@ -100,6 +154,7 @@ impl Agent {
collectors, collectors,
notification_manager, notification_manager,
previous_status: None, previous_status: None,
cached_agent_data,
}) })
} }
@@ -149,24 +204,47 @@ impl Agent {
async fn collect_and_broadcast(&mut self) -> Result<()> { async fn collect_and_broadcast(&mut self) -> Result<()> {
debug!("Starting structured data collection"); debug!("Starting structured data collection");
// Initialize empty AgentData // Collect data from collectors whose intervals have elapsed
let mut agent_data = AgentData::new(self.hostname.clone(), env!("CARGO_PKG_VERSION").to_string()); // Update cached_agent_data with new data
let now = Instant::now();
for timed_collector in &mut self.collectors {
let should_collect = match timed_collector.last_collection {
None => true, // First collection
Some(last_time) => now.duration_since(last_time) >= timed_collector.interval,
};
// Collect data from all collectors if should_collect {
for collector in &self.collectors { if let Err(e) = timed_collector.collector.collect_structured(&mut self.cached_agent_data).await {
if let Err(e) = collector.collect_structured(&mut agent_data).await { error!("Collector {} failed: {}", timed_collector.name, e);
error!("Collector failed: {}", e); // Update last_collection time even on failure to prevent immediate retries
// Continue with other collectors even if one fails timed_collector.last_collection = Some(now);
} else {
timed_collector.last_collection = Some(now);
debug!(
"Collected from {} ({}s interval)",
timed_collector.name,
timed_collector.interval.as_secs()
);
} }
} }
}
// Update timestamp on cached data
self.cached_agent_data.timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
// Clone for notification check (to avoid borrow issues)
let agent_data_snapshot = self.cached_agent_data.clone();
// Check for status changes and send notifications // Check for status changes and send notifications
if let Err(e) = self.check_status_changes_and_notify(&agent_data).await { if let Err(e) = self.check_status_changes_and_notify(&agent_data_snapshot).await {
error!("Failed to check status changes: {}", e); error!("Failed to check status changes: {}", e);
} }
// Broadcast the structured data via ZMQ // Broadcast the cached structured data via ZMQ
if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data).await { if let Err(e) = self.zmq_handler.publish_agent_data(&agent_data_snapshot).await {
error!("Failed to broadcast agent data: {}", e); error!("Failed to broadcast agent data: {}", e);
} else { } else {
debug!("Successfully broadcast structured agent data"); debug!("Successfully broadcast structured agent data");

View File

@@ -66,6 +66,10 @@ impl DiskCollector {
/// Collect all storage data and populate AgentData /// Collect all storage data and populate AgentData
async fn collect_storage_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn collect_storage_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
// Clear drives and pools to prevent duplicates when updating cached data
agent_data.system.storage.drives.clear();
agent_data.system.storage.pools.clear();
// Step 1: Get mount points and their backing devices // Step 1: Get mount points and their backing devices
let mount_devices = self.get_mount_devices().await?; let mount_devices = self.get_mount_devices().await?;

View File

@@ -200,6 +200,9 @@ impl Collector for MemoryCollector {
debug!("Collecting memory metrics"); debug!("Collecting memory metrics");
let start = std::time::Instant::now(); let start = std::time::Instant::now();
// Clear tmpfs list to prevent duplicates when updating cached data
agent_data.system.memory.tmpfs.clear();
// Parse memory info from /proc/meminfo // Parse memory info from /proc/meminfo
let info = self.parse_meminfo().await?; let info = self.parse_meminfo().await?;

View File

@@ -159,6 +159,26 @@ impl SystemdCollector {
} }
} }
if service_name == "openvpn-vpn-connection" && status_info.active_state == "active" {
tracing::info!("Checking VPN external IP for service: {}", service_name);
match self.get_vpn_external_ip() {
Some(external_ip) => {
tracing::info!("Got VPN external IP: {}", external_ip);
let metrics = Vec::new();
sub_services.push(SubServiceData {
name: format!("ip: {}", external_ip),
service_status: Status::Ok,
metrics,
service_type: "vpn_route".to_string(),
});
}
None => {
tracing::warn!("Failed to get VPN external IP");
}
}
}
// Create complete service data // Create complete service data
let service_data = ServiceData { let service_data = ServiceData {
name: service_name.clone(), name: service_name.clone(),
@@ -836,11 +856,45 @@ impl SystemdCollector {
_ => value, // Assume bytes if no unit _ => value, // Assume bytes if no unit
} }
} }
/// Get VPN external IP by querying through the vpn namespace
fn get_vpn_external_ip(&self) -> Option<String> {
let output = Command::new("timeout")
.args(&[
"5",
"sudo",
"ip",
"netns",
"exec",
"vpn",
"curl",
"-s",
"--max-time",
"4",
"https://ifconfig.me"
])
.output()
.ok()?;
if output.status.success() {
let ip = String::from_utf8_lossy(&output.stdout).trim().to_string();
if !ip.is_empty() && ip.contains('.') {
return Some(ip);
}
}
let stderr = String::from_utf8_lossy(&output.stderr);
tracing::warn!("VPN external IP query failed. Exit code: {:?}, stderr: {}", output.status.code(), stderr);
None
}
} }
#[async_trait] #[async_trait]
impl Collector for SystemdCollector { impl Collector for SystemdCollector {
async fn collect_structured(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn collect_structured(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
// Clear services to prevent duplicates when updating cached data
agent_data.services.clear();
// Use cached complete data if available and fresh // Use cached complete data if available and fresh
if let Some(cached_complete_services) = self.get_cached_complete_services() { if let Some(cached_complete_services) = self.get_cached_complete_services() {
for service_data in cached_complete_services { for service_data in cached_complete_services {

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.227" version = "0.1.236"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -22,10 +22,25 @@ struct ColumnVisibility {
} }
impl ColumnVisibility { impl ColumnVisibility {
/// Calculate actual width needed for all columns
const NAME_WIDTH: u16 = 23;
const STATUS_WIDTH: u16 = 10;
const RAM_WIDTH: u16 = 8;
const UPTIME_WIDTH: u16 = 8;
const RESTARTS_WIDTH: u16 = 5;
const COLUMN_SPACING: u16 = 1; // Space between columns
/// Determine which columns to show based on available width /// Determine which columns to show based on available width
/// Priority order: Name > Status > RAM > Uptime > Restarts
fn from_width(width: u16) -> Self { fn from_width(width: u16) -> Self {
if width >= 80 { // Calculate cumulative widths for each configuration
// Full layout: Name (25) + Status (10) + RAM (8) + Uptime (8) + Restarts (5) = 56 chars let minimal = Self::NAME_WIDTH + Self::COLUMN_SPACING + Self::STATUS_WIDTH; // 34
let with_ram = minimal + Self::COLUMN_SPACING + Self::RAM_WIDTH; // 43
let with_uptime = with_ram + Self::COLUMN_SPACING + Self::UPTIME_WIDTH; // 52
let full = with_uptime + Self::COLUMN_SPACING + Self::RESTARTS_WIDTH; // 58
if width >= full {
// Show all columns
Self { Self {
show_name: true, show_name: true,
show_status: true, show_status: true,
@@ -33,8 +48,8 @@ impl ColumnVisibility {
show_uptime: true, show_uptime: true,
show_restarts: true, show_restarts: true,
} }
} else if width >= 60 { } else if width >= with_uptime {
// Hide restarts: Name (25) + Status (10) + RAM (8) + Uptime (8) = 51 chars // Hide restarts
Self { Self {
show_name: true, show_name: true,
show_status: true, show_status: true,
@@ -42,8 +57,8 @@ impl ColumnVisibility {
show_uptime: true, show_uptime: true,
show_restarts: false, show_restarts: false,
} }
} else if width >= 45 { } else if width >= with_ram {
// Hide uptime and restarts: Name (25) + Status (10) + RAM (8) = 43 chars // Hide uptime and restarts
Self { Self {
show_name: true, show_name: true,
show_status: true, show_status: true,
@@ -52,7 +67,7 @@ impl ColumnVisibility {
show_restarts: false, show_restarts: false,
} }
} else { } else {
// Minimal: Name (25) + Status (10) = 35 chars // Minimal: Name + Status only
Self { Self {
show_name: true, show_name: true,
show_status: true, show_status: true,
@@ -130,9 +145,11 @@ impl ServicesWidget {
/// Format parent service line - returns text without icon for span formatting /// Format parent service line - returns text without icon for span formatting
fn format_parent_service_line(&self, name: &str, info: &ServiceInfo, columns: ColumnVisibility) -> String { fn format_parent_service_line(&self, name: &str, info: &ServiceInfo, columns: ColumnVisibility) -> String {
// Truncate long service names to fit layout (account for icon space) // Truncate long service names to fit layout
let short_name = if name.len() > 22 { // NAME_WIDTH - 3 chars for "..." = max displayable chars
format!("{}...", &name[..19]) let max_name_len = (ColumnVisibility::NAME_WIDTH - 3) as usize;
let short_name = if name.len() > max_name_len {
format!("{}...", &name[..max_name_len.saturating_sub(3)])
} else { } else {
name.to_string() name.to_string()
}; };
@@ -185,19 +202,19 @@ impl ServicesWidget {
// Build format string based on column visibility // Build format string based on column visibility
let mut parts = Vec::new(); let mut parts = Vec::new();
if columns.show_name { if columns.show_name {
parts.push(format!("{:<23}", short_name)); parts.push(format!("{:<width$}", short_name, width = ColumnVisibility::NAME_WIDTH as usize));
} }
if columns.show_status { if columns.show_status {
parts.push(format!("{:<10}", status_str)); parts.push(format!("{:<width$}", status_str, width = ColumnVisibility::STATUS_WIDTH as usize));
} }
if columns.show_ram { if columns.show_ram {
parts.push(format!("{:<8}", memory_str)); parts.push(format!("{:<width$}", memory_str, width = ColumnVisibility::RAM_WIDTH as usize));
} }
if columns.show_uptime { if columns.show_uptime {
parts.push(format!("{:<8}", uptime_str)); parts.push(format!("{:<width$}", uptime_str, width = ColumnVisibility::UPTIME_WIDTH as usize));
} }
if columns.show_restarts { if columns.show_restarts {
parts.push(format!("{:<5}", restart_str)); parts.push(format!("{:<width$}", restart_str, width = ColumnVisibility::RESTARTS_WIDTH as usize));
} }
parts.join(" ") parts.join(" ")
@@ -281,6 +298,22 @@ impl ServicesWidget {
.bg(Theme::background()), .bg(Theme::background()),
), ),
] ]
} else if info.service_type == "vpn_route" {
// VPN route info - no status icon
vec![
// Indentation and tree prefix
ratatui::text::Span::styled(
format!(" {} ", tree_symbol),
Typography::tree(),
),
// Service name (no icon)
ratatui::text::Span::styled(
short_name,
Style::default()
.fg(Theme::secondary_text())
.bg(Theme::background()),
),
]
} else { } else {
vec![ vec![
// Indentation and tree prefix // Indentation and tree prefix
@@ -550,19 +583,19 @@ impl ServicesWidget {
// Build header based on visible columns // Build header based on visible columns
let mut header_parts = Vec::new(); let mut header_parts = Vec::new();
if columns.show_name { if columns.show_name {
header_parts.push(format!("{:<25}", "Service:")); header_parts.push(format!("{:<width$}", "Service:", width = ColumnVisibility::NAME_WIDTH as usize));
} }
if columns.show_status { if columns.show_status {
header_parts.push(format!("{:<10}", "Status:")); header_parts.push(format!("{:<width$}", "Status:", width = ColumnVisibility::STATUS_WIDTH as usize));
} }
if columns.show_ram { if columns.show_ram {
header_parts.push(format!("{:<8}", "RAM:")); header_parts.push(format!("{:<width$}", "RAM:", width = ColumnVisibility::RAM_WIDTH as usize));
} }
if columns.show_uptime { if columns.show_uptime {
header_parts.push(format!("{:<8}", "Uptime:")); header_parts.push(format!("{:<width$}", "Uptime:", width = ColumnVisibility::UPTIME_WIDTH as usize));
} }
if columns.show_restarts { if columns.show_restarts {
header_parts.push(format!("{:<5}", "↻:")); header_parts.push(format!("{:<width$}", "↻:", width = ColumnVisibility::RESTARTS_WIDTH as usize));
} }
let header = header_parts.join(" "); let header = header_parts.join(" ");

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.227" version = "0.1.236"
edition = "2021" edition = "2021"
[dependencies] [dependencies]