Compare commits

...

4 Commits

Author SHA1 Message Date
2910b7d875 Update version to 0.1.22 and fix system metric status calculation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s
- Fix /tmp usage status to use proper thresholds instead of hardcoded Ok status
- Fix wear level status to use configurable thresholds instead of hardcoded values
- Add dedicated tmp_status field to SystemWidget for proper /tmp status display
- Remove host-level hourglass icon during service operations
- Implement immediate service status updates after start/stop/restart commands
- Remove active users display and collection from NixOS section
- Fix immediate host status aggregation transmission to dashboard
2025-10-28 13:21:56 +01:00
43242debce Update version to 0.1.21 and fix dashboard data caching
All checks were successful
Build and Release / build-and-release (push) Successful in 1m13s
- Separate dashboard updates from email notifications for immediate status aggregation
- Add metric caching to MetricCollectionManager for instant dashboard updates
- Dashboard now receives cached data every 1 second instead of waiting for collection intervals
- Fix transmission to use cached metrics rather than triggering fresh collection
- Email notifications maintain separate 60-second batching interval
- Update configurable email notification aggregation interval
2025-10-28 12:16:31 +01:00
a2519b2814 Update version to 0.1.20 and fix email notification aggregation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s
- Fix email notification aggregation to send batched notifications instead of individual emails
- Fix startup data collection to properly process initial status without triggering change notifications
- Maintain event-driven transmission while preserving aggregated notification batching
- Update version from 0.1.19 to 0.1.20 across all components
2025-10-28 10:48:29 +01:00
91f037aa3e Update to v0.1.19 with event-driven status aggregation
All checks were successful
Build and Release / build-and-release (push) Successful in 2m4s
Major architectural improvements:

CORE CHANGES:
- Remove notification_interval_seconds - status aggregation now immediate
- Status calculation moved to collection phase instead of transmission
- Event-driven transmission triggers immediately on status changes
- Dual transmission strategy: immediate on change + periodic backup
- Real-time notifications without batching delays

TECHNICAL IMPROVEMENTS:
- process_metric() now returns bool indicating status change
- Immediate ZMQ broadcast when status changes detected
- Status aggregation happens during metric collection, not later
- Legacy get_nixos_build_info() method removed (unused)
- All compilation warnings fixed

BEHAVIOR CHANGES:
- Critical alerts sent instantly instead of waiting for intervals
- Dashboard receives real-time status updates
- Notifications triggered immediately on status transitions
- Backup periodic transmission every 1s ensures heartbeat

This provides much more responsive monitoring with instant alerting
while maintaining the reliability of periodic transmission as backup.
2025-10-28 10:36:34 +01:00
14 changed files with 127 additions and 146 deletions

6
Cargo.lock generated
View File

@@ -270,7 +270,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.18" version = "0.1.21"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -291,7 +291,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.18" version = "0.1.21"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -314,7 +314,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.18" version = "0.1.21"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.18" version = "0.1.22"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -71,11 +71,11 @@ impl Agent {
info!("Initial metric collection completed - all data cached and ready"); info!("Initial metric collection completed - all data cached and ready");
} }
// Separate intervals for collection and transmission // Separate intervals for collection, transmission, and email notifications
let mut collection_interval = let mut collection_interval =
interval(Duration::from_secs(self.config.collection_interval_seconds)); interval(Duration::from_secs(self.config.collection_interval_seconds));
let mut transmission_interval = interval(Duration::from_secs(self.config.zmq.transmission_interval_seconds)); let mut transmission_interval = interval(Duration::from_secs(self.config.zmq.transmission_interval_seconds));
let mut notification_interval = interval(Duration::from_secs(self.config.status_aggregation.notification_interval_seconds)); let mut notification_interval = interval(Duration::from_secs(self.config.notifications.aggregation_interval_seconds));
loop { loop {
tokio::select! { tokio::select! {
@@ -86,13 +86,13 @@ impl Agent {
} }
} }
_ = transmission_interval.tick() => { _ = transmission_interval.tick() => {
// Send all metrics via ZMQ every 1 second // Send all metrics via ZMQ (dashboard updates only)
if let Err(e) = self.broadcast_all_metrics().await { if let Err(e) = self.broadcast_all_metrics().await {
error!("Failed to broadcast metrics: {}", e); error!("Failed to broadcast metrics: {}", e);
} }
} }
_ = notification_interval.tick() => { _ = notification_interval.tick() => {
// Process batched notifications // Process batched email notifications (separate from dashboard updates)
if let Err(e) = self.host_status_manager.process_pending_notifications(&mut self.notification_manager).await { if let Err(e) = self.host_status_manager.process_pending_notifications(&mut self.notification_manager).await {
error!("Failed to process pending notifications: {}", e); error!("Failed to process pending notifications: {}", e);
} }
@@ -127,8 +127,8 @@ impl Agent {
info!("Force collected and cached {} metrics", metrics.len()); info!("Force collected and cached {} metrics", metrics.len());
// Process metrics through status manager // Process metrics through status manager (collect status data at startup)
self.process_metrics(&metrics).await; let _status_changed = self.process_metrics(&metrics).await;
Ok(()) Ok(())
} }
@@ -146,17 +146,24 @@ impl Agent {
debug!("Collected and cached {} metrics", metrics.len()); debug!("Collected and cached {} metrics", metrics.len());
// Process metrics through status manager // Process metrics through status manager and trigger immediate transmission if status changed
self.process_metrics(&metrics).await; let status_changed = self.process_metrics(&metrics).await;
if status_changed {
info!("Status change detected - triggering immediate metric transmission");
if let Err(e) = self.broadcast_all_metrics().await {
error!("Failed to broadcast metrics after status change: {}", e);
}
}
Ok(()) Ok(())
} }
async fn broadcast_all_metrics(&mut self) -> Result<()> { async fn broadcast_all_metrics(&mut self) -> Result<()> {
debug!("Broadcasting all metrics via ZMQ"); debug!("Broadcasting cached metrics via ZMQ");
// Get all current metrics from collectors // Get cached metrics (no fresh collection)
let mut metrics = self.metric_manager.collect_all_metrics().await?; let mut metrics = self.metric_manager.get_cached_metrics();
// Add the host status summary metric from status manager // Add the host status summary metric from status manager
let host_status_metric = self.host_status_manager.get_host_status_metric(); let host_status_metric = self.host_status_manager.get_host_status_metric();
@@ -171,7 +178,7 @@ impl Agent {
return Ok(()); return Ok(());
} }
debug!("Broadcasting {} metrics (including host status summary)", metrics.len()); debug!("Broadcasting {} cached metrics (including host status summary)", metrics.len());
// Create and send message with all current data // Create and send message with all current data
let message = MetricMessage::new(self.hostname.clone(), metrics); let message = MetricMessage::new(self.hostname.clone(), metrics);
@@ -181,11 +188,15 @@ impl Agent {
Ok(()) Ok(())
} }
async fn process_metrics(&mut self, metrics: &[Metric]) { async fn process_metrics(&mut self, metrics: &[Metric]) -> bool {
let mut status_changed = false;
for metric in metrics { for metric in metrics {
self.host_status_manager.process_metric(metric, &mut self.notification_manager).await; if self.host_status_manager.process_metric(metric, &mut self.notification_manager).await {
status_changed = true;
} }
} }
status_changed
}
/// Create agent version metric for cross-host version comparison /// Create agent version metric for cross-host version comparison
fn get_agent_version_metric(&self) -> Metric { fn get_agent_version_metric(&self) -> Metric {
@@ -259,7 +270,7 @@ impl Agent {
} }
/// Handle systemd service control commands /// Handle systemd service control commands
async fn handle_service_control(&self, service_name: &str, action: &ServiceAction) -> Result<()> { async fn handle_service_control(&mut self, service_name: &str, action: &ServiceAction) -> Result<()> {
let action_str = match action { let action_str = match action {
ServiceAction::Start => "start", ServiceAction::Start => "start",
ServiceAction::Stop => "stop", ServiceAction::Stop => "stop",
@@ -289,9 +300,12 @@ impl Agent {
// Force refresh metrics after service control to update service status // Force refresh metrics after service control to update service status
if matches!(action, ServiceAction::Start | ServiceAction::Stop | ServiceAction::Restart) { if matches!(action, ServiceAction::Start | ServiceAction::Stop | ServiceAction::Restart) {
info!("Triggering metric refresh after service control"); info!("Triggering immediate metric refresh after service control");
// Note: We can't call self.collect_metrics_only() here due to borrowing issues if let Err(e) = self.collect_metrics_only().await {
// The next metric collection cycle will pick up the changes error!("Failed to refresh metrics after service control: {}", e);
} else {
info!("Service status refreshed immediately after {} {}", action_str, service_name);
}
} }
Ok(()) Ok(())

View File

@@ -556,8 +556,8 @@ impl Collector for DiskCollector {
// Drive wear level (for SSDs) // Drive wear level (for SSDs)
if let Some(wear) = drive.wear_level { if let Some(wear) = drive.wear_level {
let wear_status = if wear >= 90.0 { Status::Critical } let wear_status = if wear >= self.config.wear_critical_percent { Status::Critical }
else if wear >= 80.0 { Status::Warning } else if wear >= self.config.wear_warning_percent { Status::Warning }
else { Status::Ok }; else { Status::Ok };
metrics.push(Metric { metrics.push(Metric {

View File

@@ -187,7 +187,7 @@ impl MemoryCollector {
} }
// Monitor tmpfs (/tmp) usage // Monitor tmpfs (/tmp) usage
if let Ok(tmpfs_metrics) = self.get_tmpfs_metrics() { if let Ok(tmpfs_metrics) = self.get_tmpfs_metrics(status_tracker) {
metrics.extend(tmpfs_metrics); metrics.extend(tmpfs_metrics);
} }
@@ -195,7 +195,7 @@ impl MemoryCollector {
} }
/// Get tmpfs (/tmp) usage metrics /// Get tmpfs (/tmp) usage metrics
fn get_tmpfs_metrics(&self) -> Result<Vec<Metric>, CollectorError> { fn get_tmpfs_metrics(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
use std::process::Command; use std::process::Command;
let output = Command::new("df") let output = Command::new("df")
@@ -249,12 +249,15 @@ impl MemoryCollector {
let mut metrics = Vec::new(); let mut metrics = Vec::new();
let timestamp = chrono::Utc::now().timestamp() as u64; let timestamp = chrono::Utc::now().timestamp() as u64;
// Calculate status using same thresholds as main memory
let tmp_status = self.calculate_usage_status("memory_tmp_usage_percent", usage_percent, status_tracker);
metrics.push(Metric { metrics.push(Metric {
name: "memory_tmp_usage_percent".to_string(), name: "memory_tmp_usage_percent".to_string(),
value: MetricValue::Float(usage_percent), value: MetricValue::Float(usage_percent),
unit: Some("%".to_string()), unit: Some("%".to_string()),
description: Some("tmpfs /tmp usage percentage".to_string()), description: Some("tmpfs /tmp usage percentage".to_string()),
status: Status::Ok, status: tmp_status,
timestamp, timestamp,
}); });

View File

@@ -10,7 +10,6 @@ use crate::config::NixOSConfig;
/// ///
/// Collects NixOS-specific system information including: /// Collects NixOS-specific system information including:
/// - NixOS version and build information /// - NixOS version and build information
/// - Currently active/logged in users
pub struct NixOSCollector { pub struct NixOSCollector {
} }
@@ -19,31 +18,6 @@ impl NixOSCollector {
Self {} Self {}
} }
/// Get NixOS build information
fn get_nixos_build_info(&self) -> Result<String, Box<dyn std::error::Error>> {
// Get nixos-version output directly
let output = Command::new("nixos-version").output()?;
if !output.status.success() {
return Err("nixos-version command failed".into());
}
let version_line = String::from_utf8_lossy(&output.stdout);
let version = version_line.trim();
if version.is_empty() {
return Err("Empty nixos-version output".into());
}
// Remove codename part (e.g., "(Warbler)")
let clean_version = if let Some(pos) = version.find(" (") {
version[..pos].to_string()
} else {
version.to_string()
};
Ok(clean_version)
}
/// Get agent hash from binary path /// Get agent hash from binary path
fn get_agent_hash(&self) -> Result<String, Box<dyn std::error::Error>> { fn get_agent_hash(&self) -> Result<String, Box<dyn std::error::Error>> {
@@ -90,27 +64,6 @@ impl NixOSCollector {
Err("Could not extract hash from nix store path".into()) Err("Could not extract hash from nix store path".into())
} }
/// Get currently active users
fn get_active_users(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let output = Command::new("who").output()?;
if !output.status.success() {
return Err("who command failed".into());
}
let who_output = String::from_utf8_lossy(&output.stdout);
let mut users = std::collections::HashSet::new();
for line in who_output.lines() {
if let Some(username) = line.split_whitespace().next() {
if !username.is_empty() {
users.insert(username.to_string());
}
}
}
Ok(users.into_iter().collect())
}
} }
#[async_trait] #[async_trait]
@@ -146,31 +99,6 @@ impl Collector for NixOSCollector {
} }
} }
// Collect active users
match self.get_active_users() {
Ok(users) => {
let users_str = users.join(", ");
metrics.push(Metric {
name: "system_active_users".to_string(),
value: MetricValue::String(users_str),
unit: None,
description: Some("Currently active users".to_string()),
status: Status::Ok,
timestamp,
});
}
Err(e) => {
debug!("Failed to get active users: {}", e);
metrics.push(Metric {
name: "system_active_users".to_string(),
value: MetricValue::String("unknown".to_string()),
unit: None,
description: Some("Active users (failed to detect)".to_string()),
status: Status::Unknown,
timestamp,
});
}
}
// Collect config hash // Collect config hash
match self.get_config_hash() { match self.get_config_hash() {

View File

@@ -143,6 +143,8 @@ pub struct NotificationConfig {
pub from_email: String, pub from_email: String,
pub to_email: String, pub to_email: String,
pub rate_limit_minutes: u64, pub rate_limit_minutes: u64,
/// Email notification batching interval in seconds (default: 60)
pub aggregation_interval_seconds: u64,
} }

View File

@@ -21,6 +21,7 @@ struct TimedCollector {
pub struct MetricCollectionManager { pub struct MetricCollectionManager {
collectors: Vec<TimedCollector>, collectors: Vec<TimedCollector>,
status_tracker: StatusTracker, status_tracker: StatusTracker,
cached_metrics: Vec<Metric>,
} }
impl MetricCollectionManager { impl MetricCollectionManager {
@@ -177,6 +178,7 @@ impl MetricCollectionManager {
Ok(Self { Ok(Self {
collectors, collectors,
status_tracker: StatusTracker::new(), status_tracker: StatusTracker::new(),
cached_metrics: Vec::new(),
}) })
} }
@@ -198,6 +200,9 @@ impl MetricCollectionManager {
} }
} }
} }
// Cache the collected metrics
self.cached_metrics = all_metrics.clone();
Ok(all_metrics) Ok(all_metrics)
} }
@@ -231,6 +236,18 @@ impl MetricCollectionManager {
} }
} }
} }
// Update cache with newly collected metrics
if !all_metrics.is_empty() {
// Merge new metrics with cached metrics (replace by name)
for new_metric in &all_metrics {
// Remove any existing metric with the same name
self.cached_metrics.retain(|cached| cached.name != new_metric.name);
// Add the new metric
self.cached_metrics.push(new_metric.clone());
}
}
Ok(all_metrics) Ok(all_metrics)
} }
@@ -239,4 +256,9 @@ impl MetricCollectionManager {
self.collect_metrics_timed().await self.collect_metrics_timed().await
} }
/// Get cached metrics without triggering fresh collection
pub fn get_cached_metrics(&self) -> Vec<Metric> {
self.cached_metrics.clone()
}
} }

View File

@@ -9,7 +9,6 @@ use chrono::Utc;
pub struct HostStatusConfig { pub struct HostStatusConfig {
pub enabled: bool, pub enabled: bool,
pub aggregation_method: String, // "worst_case" pub aggregation_method: String, // "worst_case"
pub notification_interval_seconds: u64,
} }
impl Default for HostStatusConfig { impl Default for HostStatusConfig {
@@ -17,7 +16,6 @@ impl Default for HostStatusConfig {
Self { Self {
enabled: true, enabled: true,
aggregation_method: "worst_case".to_string(), aggregation_method: "worst_case".to_string(),
notification_interval_seconds: 30,
} }
} }
} }
@@ -160,25 +158,62 @@ impl HostStatusManager {
/// Process a metric - updates status (notifications handled separately via batching) /// Process a metric - updates status and queues for aggregated notifications if status changed
pub async fn process_metric(&mut self, metric: &Metric, _notification_manager: &mut crate::notifications::NotificationManager) { pub async fn process_metric(&mut self, metric: &Metric, _notification_manager: &mut crate::notifications::NotificationManager) -> bool {
// Just update status - notifications are handled by process_pending_notifications let old_service_status = self.service_statuses.get(&metric.name).copied();
self.update_service_status(metric.name.clone(), metric.status); let old_host_status = self.current_host_status;
let new_service_status = metric.status;
// Update status (this recalculates host status internally)
self.update_service_status(metric.name.clone(), new_service_status);
let new_host_status = self.current_host_status;
let mut status_changed = false;
// Check if service status actually changed (ignore first-time status setting)
if let Some(old_service_status) = old_service_status {
if old_service_status != new_service_status {
debug!("Service status change detected for {}: {:?} -> {:?}", metric.name, old_service_status, new_service_status);
// Queue change for aggregated notification (not immediate)
self.queue_status_change(&metric.name, old_service_status, new_service_status);
status_changed = true;
}
} else {
debug!("Initial status set for {}: {:?}", metric.name, new_service_status);
} }
/// Process pending notifications - call this at notification intervals // Check if host status changed (this should trigger immediate transmission)
if old_host_status != new_host_status {
debug!("Host status change detected: {:?} -> {:?}", old_host_status, new_host_status);
status_changed = true;
}
status_changed // Return true if either service or host status changed
}
/// Queue status change for aggregated notification
fn queue_status_change(&mut self, metric_name: &str, old_status: Status, new_status: Status) {
// Add to pending changes for aggregated notification
let entry = self.pending_changes.entry(metric_name.to_string()).or_insert((old_status, old_status, 0));
entry.1 = new_status; // Update final status
entry.2 += 1; // Increment change count
// Set batch start time if this is the first change
if self.batch_start_time.is_none() {
self.batch_start_time = Some(Instant::now());
}
}
/// Process pending notifications - legacy method, now rarely used
pub async fn process_pending_notifications(&mut self, notification_manager: &mut crate::notifications::NotificationManager) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { pub async fn process_pending_notifications(&mut self, notification_manager: &mut crate::notifications::NotificationManager) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
if !self.config.enabled || self.pending_changes.is_empty() { if !self.config.enabled || self.pending_changes.is_empty() {
return Ok(()); return Ok(());
} }
let batch_start = self.batch_start_time.unwrap_or_else(Instant::now); // Process notifications immediately without interval batching
let batch_duration = batch_start.elapsed();
// Only process if enough time has passed
if batch_duration.as_secs() < self.config.notification_interval_seconds {
return Ok(());
}
// Create aggregated status changes // Create aggregated status changes
let aggregated = self.create_aggregated_changes(); let aggregated = self.create_aggregated_changes();

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.18" version = "0.1.22"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -14,7 +14,7 @@ use app::Dashboard;
/// Get hardcoded version /// Get hardcoded version
fn get_version() -> &'static str { fn get_version() -> &'static str {
"v0.1.18" "v0.1.22"
} }
/// Check if running inside tmux session /// Check if running inside tmux session

View File

@@ -724,24 +724,9 @@ impl TuiApp {
spans.push(Span::styled(" ", Typography::title())); spans.push(Span::styled(" ", Typography::title()));
} }
// Check if this host has a command status that affects the icon // Always show normal status icon based on metrics (no command status at host level)
let (status_icon, status_color) = if let Some(host_widgets) = self.host_widgets.get(host) {
match &host_widgets.command_status {
Some(CommandStatus::InProgress { .. }) => {
// Show working indicator for in-progress commands
("", Theme::highlight())
}
_ => {
// Normal status icon based on metrics
let host_status = self.calculate_host_status(host, metric_store); let host_status = self.calculate_host_status(host, metric_store);
(StatusIcons::get_icon(host_status), Theme::status_color(host_status)) let (status_icon, status_color) = (StatusIcons::get_icon(host_status), Theme::status_color(host_status));
}
}
} else {
// No host widgets yet, use normal status
let host_status = self.calculate_host_status(host, metric_store);
(StatusIcons::get_icon(host_status), Theme::status_color(host_status))
};
// Add status icon // Add status icon
spans.push(Span::styled( spans.push(Span::styled(

View File

@@ -15,7 +15,6 @@ pub struct SystemWidget {
// NixOS information // NixOS information
nixos_build: Option<String>, nixos_build: Option<String>,
config_hash: Option<String>, config_hash: Option<String>,
active_users: Option<String>,
agent_hash: Option<String>, agent_hash: Option<String>,
// CPU metrics // CPU metrics
@@ -33,6 +32,7 @@ pub struct SystemWidget {
tmp_used_gb: Option<f32>, tmp_used_gb: Option<f32>,
tmp_total_gb: Option<f32>, tmp_total_gb: Option<f32>,
memory_status: Status, memory_status: Status,
tmp_status: Status,
// Storage metrics (collected from disk metrics) // Storage metrics (collected from disk metrics)
storage_pools: Vec<StoragePool>, storage_pools: Vec<StoragePool>,
@@ -66,7 +66,6 @@ impl SystemWidget {
Self { Self {
nixos_build: None, nixos_build: None,
config_hash: None, config_hash: None,
active_users: None,
agent_hash: None, agent_hash: None,
cpu_load_1min: None, cpu_load_1min: None,
cpu_load_5min: None, cpu_load_5min: None,
@@ -80,6 +79,7 @@ impl SystemWidget {
tmp_used_gb: None, tmp_used_gb: None,
tmp_total_gb: None, tmp_total_gb: None,
memory_status: Status::Unknown, memory_status: Status::Unknown,
tmp_status: Status::Unknown,
storage_pools: Vec::new(), storage_pools: Vec::new(),
has_data: false, has_data: false,
} }
@@ -334,11 +334,6 @@ impl Widget for SystemWidget {
self.config_hash = Some(hash.clone()); self.config_hash = Some(hash.clone());
} }
} }
"system_active_users" => {
if let MetricValue::String(users) = &metric.value {
self.active_users = Some(users.clone());
}
}
"agent_version" => { "agent_version" => {
if let MetricValue::String(version) = &metric.value { if let MetricValue::String(version) = &metric.value {
self.agent_hash = Some(version.clone()); self.agent_hash = Some(version.clone());
@@ -390,6 +385,7 @@ impl Widget for SystemWidget {
"memory_tmp_usage_percent" => { "memory_tmp_usage_percent" => {
if let MetricValue::Float(usage) = metric.value { if let MetricValue::Float(usage) = metric.value {
self.tmp_usage_percent = Some(usage); self.tmp_usage_percent = Some(usage);
self.tmp_status = metric.status.clone();
} }
} }
"memory_tmp_used_gb" => { "memory_tmp_used_gb" => {
@@ -432,10 +428,6 @@ impl SystemWidget {
Span::styled(format!("Agent: {}", agent_version_text), Typography::secondary()) Span::styled(format!("Agent: {}", agent_version_text), Typography::secondary())
])); ]));
let users_text = self.active_users.as_deref().unwrap_or("unknown");
lines.push(Line::from(vec![
Span::styled(format!("Active users: {}", users_text), Typography::secondary())
]));
// CPU section // CPU section
lines.push(Line::from(vec![ lines.push(Line::from(vec![
@@ -472,7 +464,7 @@ impl SystemWidget {
Span::styled(" └─ ", Typography::tree()), Span::styled(" └─ ", Typography::tree()),
]; ];
tmp_spans.extend(StatusIcons::create_status_spans( tmp_spans.extend(StatusIcons::create_status_spans(
self.memory_status.clone(), self.tmp_status.clone(),
&format!("/tmp: {}", tmp_text) &format!("/tmp: {}", tmp_text)
)); ));
lines.push(Line::from(tmp_spans)); lines.push(Line::from(tmp_spans));

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.18" version = "0.1.22"
edition = "2021" edition = "2021"
[dependencies] [dependencies]