Compare commits

...

3 Commits

Author SHA1 Message Date
ae70946c61 Implement state-aware service command validation with immediate visual feedback
All checks were successful
Build and Release / build-and-release (push) Successful in 1m12s
- Add service state detection before executing start/stop/restart commands
- Prevent redundant operations (start active services, stop inactive services)
- Show immediate directional arrows for command feedback (↑ starting, ↓ stopping, ↻ restarting)
- Add get_service_status() method to ServicesWidget for state access
- Remove unused TerminalPopup code and dangling methods
- Clean up warnings and unused code throughout codebase

Service commands now validate current state and provide instant UX feedback while
preserving existing status icons and colors during transitions.
2025-10-28 13:48:24 +01:00
2910b7d875 Update version to 0.1.22 and fix system metric status calculation
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s
- Fix /tmp usage status to use proper thresholds instead of hardcoded Ok status
- Fix wear level status to use configurable thresholds instead of hardcoded values
- Add dedicated tmp_status field to SystemWidget for proper /tmp status display
- Remove host-level hourglass icon during service operations
- Implement immediate service status updates after start/stop/restart commands
- Remove active users display and collection from NixOS section
- Fix immediate host status aggregation transmission to dashboard
2025-10-28 13:21:56 +01:00
43242debce Update version to 0.1.21 and fix dashboard data caching
All checks were successful
Build and Release / build-and-release (push) Successful in 1m13s
- Separate dashboard updates from email notifications for immediate status aggregation
- Add metric caching to MetricCollectionManager for instant dashboard updates
- Dashboard now receives cached data every 1 second instead of waiting for collection intervals
- Fix transmission to use cached metrics rather than triggering fresh collection
- Email notifications maintain separate 60-second batching interval
- Update configurable email notification aggregation interval
2025-10-28 12:16:31 +01:00
16 changed files with 157 additions and 351 deletions

6
Cargo.lock generated
View File

@@ -270,7 +270,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "cm-dashboard"
version = "0.1.19"
version = "0.1.22"
dependencies = [
"anyhow",
"chrono",
@@ -291,7 +291,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-agent"
version = "0.1.19"
version = "0.1.22"
dependencies = [
"anyhow",
"async-trait",
@@ -314,7 +314,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-shared"
version = "0.1.19"
version = "0.1.22"
dependencies = [
"chrono",
"serde",

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-agent"
version = "0.1.20"
version = "0.1.23"
edition = "2021"
[dependencies]

View File

@@ -71,10 +71,11 @@ impl Agent {
info!("Initial metric collection completed - all data cached and ready");
}
// Separate intervals for collection and transmission
// Separate intervals for collection, transmission, and email notifications
let mut collection_interval =
interval(Duration::from_secs(self.config.collection_interval_seconds));
let mut transmission_interval = interval(Duration::from_secs(self.config.zmq.transmission_interval_seconds));
let mut notification_interval = interval(Duration::from_secs(self.config.notifications.aggregation_interval_seconds));
loop {
tokio::select! {
@@ -85,12 +86,13 @@ impl Agent {
}
}
_ = transmission_interval.tick() => {
// Send all metrics via ZMQ and process notifications immediately
// Send all metrics via ZMQ (dashboard updates only)
if let Err(e) = self.broadcast_all_metrics().await {
error!("Failed to broadcast metrics: {}", e);
}
// Process notifications immediately with each transmission
}
_ = notification_interval.tick() => {
// Process batched email notifications (separate from dashboard updates)
if let Err(e) = self.host_status_manager.process_pending_notifications(&mut self.notification_manager).await {
error!("Failed to process pending notifications: {}", e);
}
@@ -158,10 +160,10 @@ impl Agent {
}
async fn broadcast_all_metrics(&mut self) -> Result<()> {
debug!("Broadcasting all metrics via ZMQ");
debug!("Broadcasting cached metrics via ZMQ");
// Get all current metrics from collectors
let mut metrics = self.metric_manager.collect_all_metrics().await?;
// Get cached metrics (no fresh collection)
let mut metrics = self.metric_manager.get_cached_metrics();
// Add the host status summary metric from status manager
let host_status_metric = self.host_status_manager.get_host_status_metric();
@@ -176,7 +178,7 @@ impl Agent {
return Ok(());
}
debug!("Broadcasting {} metrics (including host status summary)", metrics.len());
debug!("Broadcasting {} cached metrics (including host status summary)", metrics.len());
// Create and send message with all current data
let message = MetricMessage::new(self.hostname.clone(), metrics);
@@ -268,7 +270,7 @@ impl Agent {
}
/// Handle systemd service control commands
async fn handle_service_control(&self, service_name: &str, action: &ServiceAction) -> Result<()> {
async fn handle_service_control(&mut self, service_name: &str, action: &ServiceAction) -> Result<()> {
let action_str = match action {
ServiceAction::Start => "start",
ServiceAction::Stop => "stop",
@@ -298,9 +300,12 @@ impl Agent {
// Force refresh metrics after service control to update service status
if matches!(action, ServiceAction::Start | ServiceAction::Stop | ServiceAction::Restart) {
info!("Triggering metric refresh after service control");
// Note: We can't call self.collect_metrics_only() here due to borrowing issues
// The next metric collection cycle will pick up the changes
info!("Triggering immediate metric refresh after service control");
if let Err(e) = self.collect_metrics_only().await {
error!("Failed to refresh metrics after service control: {}", e);
} else {
info!("Service status refreshed immediately after {} {}", action_str, service_name);
}
}
Ok(())

View File

@@ -556,8 +556,8 @@ impl Collector for DiskCollector {
// Drive wear level (for SSDs)
if let Some(wear) = drive.wear_level {
let wear_status = if wear >= 90.0 { Status::Critical }
else if wear >= 80.0 { Status::Warning }
let wear_status = if wear >= self.config.wear_critical_percent { Status::Critical }
else if wear >= self.config.wear_warning_percent { Status::Warning }
else { Status::Ok };
metrics.push(Metric {

View File

@@ -187,7 +187,7 @@ impl MemoryCollector {
}
// Monitor tmpfs (/tmp) usage
if let Ok(tmpfs_metrics) = self.get_tmpfs_metrics() {
if let Ok(tmpfs_metrics) = self.get_tmpfs_metrics(status_tracker) {
metrics.extend(tmpfs_metrics);
}
@@ -195,7 +195,7 @@ impl MemoryCollector {
}
/// Get tmpfs (/tmp) usage metrics
fn get_tmpfs_metrics(&self) -> Result<Vec<Metric>, CollectorError> {
fn get_tmpfs_metrics(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
use std::process::Command;
let output = Command::new("df")
@@ -249,12 +249,15 @@ impl MemoryCollector {
let mut metrics = Vec::new();
let timestamp = chrono::Utc::now().timestamp() as u64;
// Calculate status using same thresholds as main memory
let tmp_status = self.calculate_usage_status("memory_tmp_usage_percent", usage_percent, status_tracker);
metrics.push(Metric {
name: "memory_tmp_usage_percent".to_string(),
value: MetricValue::Float(usage_percent),
unit: Some("%".to_string()),
description: Some("tmpfs /tmp usage percentage".to_string()),
status: Status::Ok,
status: tmp_status,
timestamp,
});

View File

@@ -10,7 +10,6 @@ use crate::config::NixOSConfig;
///
/// Collects NixOS-specific system information including:
/// - NixOS version and build information
/// - Currently active/logged in users
pub struct NixOSCollector {
}
@@ -65,27 +64,6 @@ impl NixOSCollector {
Err("Could not extract hash from nix store path".into())
}
/// Get currently active users
fn get_active_users(&self) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let output = Command::new("who").output()?;
if !output.status.success() {
return Err("who command failed".into());
}
let who_output = String::from_utf8_lossy(&output.stdout);
let mut users = std::collections::HashSet::new();
for line in who_output.lines() {
if let Some(username) = line.split_whitespace().next() {
if !username.is_empty() {
users.insert(username.to_string());
}
}
}
Ok(users.into_iter().collect())
}
}
#[async_trait]
@@ -121,31 +99,6 @@ impl Collector for NixOSCollector {
}
}
// Collect active users
match self.get_active_users() {
Ok(users) => {
let users_str = users.join(", ");
metrics.push(Metric {
name: "system_active_users".to_string(),
value: MetricValue::String(users_str),
unit: None,
description: Some("Currently active users".to_string()),
status: Status::Ok,
timestamp,
});
}
Err(e) => {
debug!("Failed to get active users: {}", e);
metrics.push(Metric {
name: "system_active_users".to_string(),
value: MetricValue::String("unknown".to_string()),
unit: None,
description: Some("Active users (failed to detect)".to_string()),
status: Status::Unknown,
timestamp,
});
}
}
// Collect config hash
match self.get_config_hash() {

View File

@@ -143,6 +143,8 @@ pub struct NotificationConfig {
pub from_email: String,
pub to_email: String,
pub rate_limit_minutes: u64,
/// Email notification batching interval in seconds (default: 60)
pub aggregation_interval_seconds: u64,
}

View File

@@ -21,6 +21,7 @@ struct TimedCollector {
pub struct MetricCollectionManager {
collectors: Vec<TimedCollector>,
status_tracker: StatusTracker,
cached_metrics: Vec<Metric>,
}
impl MetricCollectionManager {
@@ -177,6 +178,7 @@ impl MetricCollectionManager {
Ok(Self {
collectors,
status_tracker: StatusTracker::new(),
cached_metrics: Vec::new(),
})
}
@@ -198,6 +200,9 @@ impl MetricCollectionManager {
}
}
}
// Cache the collected metrics
self.cached_metrics = all_metrics.clone();
Ok(all_metrics)
}
@@ -231,6 +236,18 @@ impl MetricCollectionManager {
}
}
}
// Update cache with newly collected metrics
if !all_metrics.is_empty() {
// Merge new metrics with cached metrics (replace by name)
for new_metric in &all_metrics {
// Remove any existing metric with the same name
self.cached_metrics.retain(|cached| cached.name != new_metric.name);
// Add the new metric
self.cached_metrics.push(new_metric.clone());
}
}
Ok(all_metrics)
}
@@ -238,5 +255,10 @@ impl MetricCollectionManager {
pub async fn collect_all_metrics(&mut self) -> Result<Vec<Metric>> {
self.collect_metrics_timed().await
}
/// Get cached metrics without triggering fresh collection
pub fn get_cached_metrics(&self) -> Vec<Metric> {
self.cached_metrics.clone()
}
}

View File

@@ -160,27 +160,37 @@ impl HostStatusManager {
/// Process a metric - updates status and queues for aggregated notifications if status changed
pub async fn process_metric(&mut self, metric: &Metric, _notification_manager: &mut crate::notifications::NotificationManager) -> bool {
let old_status = self.service_statuses.get(&metric.name).copied();
let new_status = metric.status;
let old_service_status = self.service_statuses.get(&metric.name).copied();
let old_host_status = self.current_host_status;
let new_service_status = metric.status;
// Update status
self.update_service_status(metric.name.clone(), new_status);
// Update status (this recalculates host status internally)
self.update_service_status(metric.name.clone(), new_service_status);
// Check if status actually changed (ignore first-time status setting)
if let Some(old_status) = old_status {
if old_status != new_status {
debug!("Status change detected for {}: {:?} -> {:?}", metric.name, old_status, new_status);
let new_host_status = self.current_host_status;
let mut status_changed = false;
// Check if service status actually changed (ignore first-time status setting)
if let Some(old_service_status) = old_service_status {
if old_service_status != new_service_status {
debug!("Service status change detected for {}: {:?} -> {:?}", metric.name, old_service_status, new_service_status);
// Queue change for aggregated notification (not immediate)
self.queue_status_change(&metric.name, old_status, new_status);
self.queue_status_change(&metric.name, old_service_status, new_service_status);
return true; // Status changed - caller should trigger immediate transmission
status_changed = true;
}
} else {
debug!("Initial status set for {}: {:?}", metric.name, new_status);
debug!("Initial status set for {}: {:?}", metric.name, new_service_status);
}
false // No status change (or first-time status)
// Check if host status changed (this should trigger immediate transmission)
if old_host_status != new_host_status {
debug!("Host status change detected: {:?} -> {:?}", old_host_status, new_host_status);
status_changed = true;
}
status_changed // Return true if either service or host status changed
}
/// Queue status change for aggregated notification

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard"
version = "0.1.20"
version = "0.1.23"
edition = "2021"
[dependencies]

View File

@@ -263,12 +263,7 @@ impl Dashboard {
cmd_output.output_line
);
// Forward to TUI if not headless
if let Some(ref mut tui_app) = self.tui_app {
tui_app.add_terminal_output(&cmd_output.hostname, cmd_output.output_line);
// Note: Popup stays open for manual review - close with ESC/Q
}
// Command output (terminal popup removed - output not displayed)
}
last_metrics_check = Instant::now();

View File

@@ -14,7 +14,7 @@ use app::Dashboard;
/// Get hardcoded version
fn get_version() -> &'static str {
"v0.1.20"
"v0.1.23"
}
/// Check if running inside tmux session

View File

@@ -89,50 +89,6 @@ impl HostWidgets {
}
}
/// Terminal popup for streaming command output
#[derive(Clone)]
pub struct TerminalPopup {
/// Is the popup currently visible
pub visible: bool,
/// Command being executed
pub _command_type: CommandType,
/// Target hostname
pub hostname: String,
/// Target service/operation name
pub target: String,
/// Output lines collected so far
pub output_lines: Vec<String>,
/// Scroll offset for the output
pub scroll_offset: usize,
/// Start time of the operation
pub start_time: Instant,
}
impl TerminalPopup {
pub fn _new(command_type: CommandType, hostname: String, target: String) -> Self {
Self {
visible: true,
_command_type: command_type,
hostname,
target,
output_lines: Vec::new(),
scroll_offset: 0,
start_time: Instant::now(),
}
}
pub fn add_output_line(&mut self, line: String) {
self.output_lines.push(line);
// Auto-scroll to bottom when new content arrives
if self.output_lines.len() > 20 {
self.scroll_offset = self.output_lines.len().saturating_sub(20);
}
}
pub fn close(&mut self) {
self.visible = false;
}
}
/// Main TUI application
pub struct TuiApp {
@@ -150,8 +106,6 @@ pub struct TuiApp {
should_quit: bool,
/// Track if user manually navigated away from localhost
user_navigated_away: bool,
/// Terminal popup for streaming command output
terminal_popup: Option<TerminalPopup>,
/// Dashboard configuration
config: DashboardConfig,
}
@@ -166,7 +120,6 @@ impl TuiApp {
focused_panel: PanelType::System, // Start with System panel focused
should_quit: false,
user_navigated_away: false,
terminal_popup: None,
config,
}
}
@@ -297,38 +250,6 @@ impl TuiApp {
/// Handle keyboard input
pub fn handle_input(&mut self, event: Event) -> Result<Option<UiCommand>> {
if let Event::Key(key) = event {
// If terminal popup is visible, handle popup-specific keys first
if let Some(ref mut popup) = self.terminal_popup {
if popup.visible {
match key.code {
KeyCode::Esc => {
popup.close();
self.terminal_popup = None;
return Ok(None);
}
KeyCode::Up => {
popup.scroll_offset = popup.scroll_offset.saturating_sub(1);
return Ok(None);
}
KeyCode::Down => {
let max_scroll = if popup.output_lines.len() > 20 {
popup.output_lines.len() - 20
} else {
0
};
popup.scroll_offset = (popup.scroll_offset + 1).min(max_scroll);
return Ok(None);
}
KeyCode::Char('q') => {
popup.close();
self.terminal_popup = None;
return Ok(None);
}
_ => return Ok(None), // Consume all other keys when popup is open
}
}
}
match key.code {
KeyCode::Char('q') => {
self.should_quit = true;
@@ -361,8 +282,9 @@ impl TuiApp {
PanelType::Services => {
// Service restart command
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
self.start_command(&hostname, CommandType::ServiceRestart, service_name.clone());
return Ok(Some(UiCommand::ServiceRestart { hostname, service_name }));
if self.start_command(&hostname, CommandType::ServiceRestart, service_name.clone()) {
return Ok(Some(UiCommand::ServiceRestart { hostname, service_name }));
}
}
}
_ => {
@@ -374,8 +296,9 @@ impl TuiApp {
if self.focused_panel == PanelType::Services {
// Service start command
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
self.start_command(&hostname, CommandType::ServiceStart, service_name.clone());
return Ok(Some(UiCommand::ServiceStart { hostname, service_name }));
if self.start_command(&hostname, CommandType::ServiceStart, service_name.clone()) {
return Ok(Some(UiCommand::ServiceStart { hostname, service_name }));
}
}
}
}
@@ -383,8 +306,9 @@ impl TuiApp {
if self.focused_panel == PanelType::Services {
// Service stop command
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
self.start_command(&hostname, CommandType::ServiceStop, service_name.clone());
return Ok(Some(UiCommand::ServiceStop { hostname, service_name }));
if self.start_command(&hostname, CommandType::ServiceStop, service_name.clone()) {
return Ok(Some(UiCommand::ServiceStop { hostname, service_name }));
}
}
}
}
@@ -495,26 +419,54 @@ impl TuiApp {
self.should_quit
}
/// Start command execution and track status for visual feedback
pub fn start_command(&mut self, hostname: &str, command_type: CommandType, target: String) {
if let Some(host_widgets) = self.host_widgets.get_mut(hostname) {
host_widgets.command_status = Some(CommandStatus::InProgress {
command_type,
target,
start_time: Instant::now(),
});
/// Get current service status for state-aware command validation
fn get_current_service_status(&self, hostname: &str, service_name: &str) -> Option<String> {
if let Some(host_widgets) = self.host_widgets.get(hostname) {
return host_widgets.services_widget.get_service_status(service_name);
}
None
}
/// Mark command as completed successfully
pub fn _complete_command(&mut self, hostname: &str) {
if let Some(host_widgets) = self.host_widgets.get_mut(hostname) {
// Simply clear the command status when completed
host_widgets.command_status = None;
/// Start command execution and track status for visual feedback (with state validation)
pub fn start_command(&mut self, hostname: &str, command_type: CommandType, target: String) -> bool {
// Get current service status to validate command
let current_status = self.get_current_service_status(hostname, &target);
// Validate if command makes sense for current state
let should_execute = match (&command_type, current_status.as_deref()) {
(CommandType::ServiceStart, Some("inactive") | Some("failed") | Some("dead")) => true,
(CommandType::ServiceStop, Some("active")) => true,
(CommandType::ServiceRestart, Some("active") | Some("inactive") | Some("failed") | Some("dead")) => true,
(CommandType::ServiceStart, Some("active")) => {
// Already running - show brief feedback but don't execute
// TODO: Could show a brief "already running" message
false
},
(CommandType::ServiceStop, Some("inactive") | Some("failed") | Some("dead")) => {
// Already stopped - show brief feedback but don't execute
// TODO: Could show a brief "already stopped" message
false
},
(_, None) => {
// Unknown service state - allow command to proceed
true
},
_ => true, // Default: allow other combinations
};
if should_execute {
if let Some(host_widgets) = self.host_widgets.get_mut(hostname) {
host_widgets.command_status = Some(CommandStatus::InProgress {
command_type,
target,
start_time: Instant::now(),
});
}
}
should_execute
}
/// Check for command timeouts and automatically clear them
pub fn check_command_timeouts(&mut self) {
let now = Instant::now();
@@ -538,26 +490,6 @@ impl TuiApp {
}
}
/// Add output line to terminal popup
pub fn add_terminal_output(&mut self, hostname: &str, line: String) {
if let Some(ref mut popup) = self.terminal_popup {
if popup.hostname == hostname && popup.visible {
popup.add_output_line(line);
}
}
}
/// Close terminal popup for a specific hostname
pub fn _close_terminal_popup(&mut self, hostname: &str) {
if let Some(ref mut popup) = self.terminal_popup {
if popup.hostname == hostname {
popup.close();
self.terminal_popup = None;
}
}
}
/// Scroll the focused panel up or down
pub fn scroll_focused_panel(&mut self, direction: i32) {
if let Some(hostname) = self.current_host.clone() {
@@ -695,12 +627,6 @@ impl TuiApp {
// Render statusbar at the bottom
self.render_statusbar(frame, main_chunks[2]); // main_chunks[2] is the statusbar area
// Render terminal popup on top of everything else
if let Some(ref popup) = self.terminal_popup {
if popup.visible {
self.render_terminal_popup(frame, size, popup);
}
}
}
/// Render btop-style minimal title with host status colors
@@ -724,24 +650,9 @@ impl TuiApp {
spans.push(Span::styled(" ", Typography::title()));
}
// Check if this host has a command status that affects the icon
let (status_icon, status_color) = if let Some(host_widgets) = self.host_widgets.get(host) {
match &host_widgets.command_status {
Some(CommandStatus::InProgress { .. }) => {
// Show working indicator for in-progress commands
("", Theme::highlight())
}
_ => {
// Normal status icon based on metrics
let host_status = self.calculate_host_status(host, metric_store);
(StatusIcons::get_icon(host_status), Theme::status_color(host_status))
}
}
} else {
// No host widgets yet, use normal status
let host_status = self.calculate_host_status(host, metric_store);
(StatusIcons::get_icon(host_status), Theme::status_color(host_status))
};
// Always show normal status icon based on metrics (no command status at host level)
let host_status = self.calculate_host_status(host, metric_store);
let (status_icon, status_color) = (StatusIcons::get_icon(host_status), Theme::status_color(host_status));
// Add status icon
spans.push(Span::styled(
@@ -896,112 +807,5 @@ impl TuiApp {
}
}
/// Render terminal popup with streaming output
fn render_terminal_popup(&self, frame: &mut Frame, area: Rect, popup: &TerminalPopup) {
use ratatui::{
style::{Color, Style},
text::{Line, Span},
widgets::{Block, Borders, Clear, Paragraph, Wrap},
};
// Calculate popup size (80% of screen, centered)
let popup_width = area.width * 80 / 100;
let popup_height = area.height * 80 / 100;
let popup_x = (area.width - popup_width) / 2;
let popup_y = (area.height - popup_height) / 2;
let popup_area = Rect {
x: popup_x,
y: popup_y,
width: popup_width,
height: popup_height,
};
// Clear background
frame.render_widget(Clear, popup_area);
// Create terminal-style block
let title = format!(" {}{} ({:.1}s) ",
popup.hostname,
popup.target,
popup.start_time.elapsed().as_secs_f32()
);
let block = Block::default()
.title(title)
.borders(Borders::ALL)
.border_style(Style::default().fg(Color::Cyan))
.style(Style::default().bg(Color::Black));
let inner_area = block.inner(popup_area);
frame.render_widget(block, popup_area);
// Render output content
let available_height = inner_area.height as usize;
let total_lines = popup.output_lines.len();
// Calculate which lines to show based on scroll offset
let start_line = popup.scroll_offset;
let end_line = (start_line + available_height).min(total_lines);
let visible_lines: Vec<Line> = popup.output_lines[start_line..end_line]
.iter()
.map(|line| {
// Style output lines with terminal colors
if line.contains("error") || line.contains("Error") || line.contains("failed") {
Line::from(Span::styled(line.clone(), Style::default().fg(Color::Red)))
} else if line.contains("warning") || line.contains("Warning") {
Line::from(Span::styled(line.clone(), Style::default().fg(Color::Yellow)))
} else if line.contains("building") || line.contains("Building") {
Line::from(Span::styled(line.clone(), Style::default().fg(Color::Blue)))
} else if line.contains("") || line.contains("success") || line.contains("completed") {
Line::from(Span::styled(line.clone(), Style::default().fg(Color::Green)))
} else {
Line::from(Span::styled(line.clone(), Style::default().fg(Color::White)))
}
})
.collect();
let content = Paragraph::new(visible_lines)
.wrap(Wrap { trim: false })
.style(Style::default().bg(Color::Black));
frame.render_widget(content, inner_area);
// Render scroll indicator if needed
if total_lines > available_height {
let scroll_info = format!(" {}% ",
if total_lines > 0 {
(end_line * 100) / total_lines
} else {
100
}
);
let scroll_area = Rect {
x: popup_area.x + popup_area.width - scroll_info.len() as u16 - 1,
y: popup_area.y + popup_area.height - 1,
width: scroll_info.len() as u16,
height: 1,
};
let scroll_widget = Paragraph::new(scroll_info)
.style(Style::default().fg(Color::Cyan).bg(Color::Black));
frame.render_widget(scroll_widget, scroll_area);
}
// Instructions at bottom
let instructions = " ESC/Q: Close • ↑↓: Scroll ";
let instructions_area = Rect {
x: popup_area.x + 1,
y: popup_area.y + popup_area.height - 1,
width: instructions.len() as u16,
height: 1,
};
let instructions_widget = Paragraph::new(instructions)
.style(Style::default().fg(Color::Gray).bg(Color::Black));
frame.render_widget(instructions_widget, instructions_area);
}
}

View File

@@ -273,6 +273,26 @@ impl ServicesWidget {
self.parent_services.len()
}
/// Get current status of a specific service by name
pub fn get_service_status(&self, service_name: &str) -> Option<String> {
// Check if it's a parent service
if let Some(parent_info) = self.parent_services.get(service_name) {
return Some(parent_info.status.clone());
}
// Check sub-services (format: parent_sub)
for (parent_name, sub_list) in &self.sub_services {
for (sub_name, sub_info) in sub_list {
let full_sub_name = format!("{}_{}", parent_name, sub_name);
if full_sub_name == service_name {
return Some(sub_info.status.clone());
}
}
}
None
}
/// Calculate which parent service index corresponds to a display line index
fn calculate_parent_service_index(&self, display_line_index: &usize) -> usize {
// Build the same display list to map line index to parent service index

View File

@@ -15,7 +15,6 @@ pub struct SystemWidget {
// NixOS information
nixos_build: Option<String>,
config_hash: Option<String>,
active_users: Option<String>,
agent_hash: Option<String>,
// CPU metrics
@@ -33,6 +32,7 @@ pub struct SystemWidget {
tmp_used_gb: Option<f32>,
tmp_total_gb: Option<f32>,
memory_status: Status,
tmp_status: Status,
// Storage metrics (collected from disk metrics)
storage_pools: Vec<StoragePool>,
@@ -66,7 +66,6 @@ impl SystemWidget {
Self {
nixos_build: None,
config_hash: None,
active_users: None,
agent_hash: None,
cpu_load_1min: None,
cpu_load_5min: None,
@@ -80,6 +79,7 @@ impl SystemWidget {
tmp_used_gb: None,
tmp_total_gb: None,
memory_status: Status::Unknown,
tmp_status: Status::Unknown,
storage_pools: Vec::new(),
has_data: false,
}
@@ -334,11 +334,6 @@ impl Widget for SystemWidget {
self.config_hash = Some(hash.clone());
}
}
"system_active_users" => {
if let MetricValue::String(users) = &metric.value {
self.active_users = Some(users.clone());
}
}
"agent_version" => {
if let MetricValue::String(version) = &metric.value {
self.agent_hash = Some(version.clone());
@@ -390,6 +385,7 @@ impl Widget for SystemWidget {
"memory_tmp_usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
self.tmp_usage_percent = Some(usage);
self.tmp_status = metric.status.clone();
}
}
"memory_tmp_used_gb" => {
@@ -432,10 +428,6 @@ impl SystemWidget {
Span::styled(format!("Agent: {}", agent_version_text), Typography::secondary())
]));
let users_text = self.active_users.as_deref().unwrap_or("unknown");
lines.push(Line::from(vec![
Span::styled(format!("Active users: {}", users_text), Typography::secondary())
]));
// CPU section
lines.push(Line::from(vec![
@@ -472,7 +464,7 @@ impl SystemWidget {
Span::styled(" └─ ", Typography::tree()),
];
tmp_spans.extend(StatusIcons::create_status_spans(
self.memory_status.clone(),
self.tmp_status.clone(),
&format!("/tmp: {}", tmp_text)
));
lines.push(Line::from(tmp_spans));

View File

@@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-shared"
version = "0.1.20"
version = "0.1.23"
edition = "2021"
[dependencies]