Compare commits

..

5 Commits

Author SHA1 Message Date
d1b0e2c431 Add kB unit support for backup repository sizes
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Extended size formatting to handle repositories smaller than 1MB by displaying in kB units. Size display logic now cascades: kB for < 1MB, MB for 1MB-1GB, GB for >= 1GB.
2025-12-09 19:51:03 +01:00
b1719a60fc Use nfs-backup.toml and support completed status
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Update agent to read nfs-backup.toml instead of legacy backup-status-*.toml
files. Add support for 'completed' status string used by backup script.

Changes:
- Read nfs-backup.toml from status directory
- Match 'completed' status as Status::Ok
- Simplify file scanning logic for single NFS backup file
2025-12-09 19:37:01 +01:00
d922e8d6f3 Restructure backup display to show per-repository metrics
All checks were successful
Build and Release / build-and-release (push) Successful in 1m15s
Remove disk-based backup display and implement repository-centric view
with per-repo archive counts and sizes. Backup now uses NFS storage
instead of direct disk monitoring.

Changes:
- Remove BackupDiskData, add BackupRepositoryData structure
- Display format: "Repo <timestamp>" with per-repo details
- Show archive count and size (MB/GB) for each repository
- Agent aggregates repo data from backup status TOML files
- Dashboard renders repo list with individual status indicators
2025-12-09 19:22:51 +01:00
407bc9dbc2 Reduce CPU usage with conditional rendering
All checks were successful
Build and Release / build-and-release (push) Successful in 1m12s
Implement event-driven rendering to dramatically reduce CPU usage.
Only render when something actually changes instead of constantly
rendering at 20 FPS.

Changes:
- Increase poll timeout from 50ms to 200ms (5 FPS)
- Add needs_render flag to track when rendering is required
- Trigger rendering only on: user input, new metrics, heartbeat
  checks, or terminal resize events
- Reset render flag after each render cycle

Based on cm-player optimization approach.
2025-12-09 11:56:40 +01:00
3c278351c9 Filter out current host from Tailscale peer list
All checks were successful
Build and Release / build-and-release (push) Successful in 1m50s
Skip the first line in tailscale status output which is always the
current host showing as idle. Add additional hostname check to prevent
showing the current host in the peer list. Only display actual remote
peers with their connection methods.
2025-12-09 10:47:18 +01:00
9 changed files with 132 additions and 245 deletions

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.263" version = "0.1.268"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.263" version = "0.1.268"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -325,7 +325,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.263" version = "0.1.268"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.264" version = "0.1.269"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -1,7 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status}; use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use tracing::{debug, warn}; use tracing::{debug, warn};
@@ -21,7 +21,7 @@ impl BackupCollector {
} }
} }
/// Scan directory for all backup status files /// Scan directory for backup status file (nfs-backup.toml)
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> { async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
let status_path = Path::new(&self.status_dir); let status_path = Path::new(&self.status_dir);
@@ -30,30 +30,15 @@ impl BackupCollector {
return Ok(Vec::new()); return Ok(Vec::new());
} }
let mut status_files = Vec::new(); // Look for nfs-backup.toml (new NFS-based backup)
let nfs_backup_file = status_path.join("nfs-backup.toml");
match fs::read_dir(status_path) { if nfs_backup_file.exists() {
Ok(entries) => { return Ok(vec![nfs_backup_file]);
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
status_files.push(path);
}
}
}
}
}
}
Err(e) => {
warn!("Failed to read backup status directory: {}", e);
return Ok(Vec::new());
}
} }
Ok(status_files) // No backup status file found
debug!("No nfs-backup.toml found in {}", self.status_dir);
Ok(Vec::new())
} }
/// Read a single backup status file /// Read a single backup status file
@@ -76,24 +61,13 @@ impl BackupCollector {
/// Calculate backup status from TOML status field /// Calculate backup status from TOML status field
fn calculate_backup_status(status_str: &str) -> Status { fn calculate_backup_status(status_str: &str) -> Status {
match status_str.to_lowercase().as_str() { match status_str.to_lowercase().as_str() {
"success" => Status::Ok, "success" | "completed" => Status::Ok,
"warning" => Status::Warning, "warning" => Status::Warning,
"failed" | "error" => Status::Critical, "failed" | "error" => Status::Critical,
_ => Status::Unknown, _ => Status::Unknown,
} }
} }
/// Calculate usage status from disk usage percentage
fn calculate_usage_status(usage_percent: f32) -> Status {
if usage_percent < 80.0 {
Status::Ok
} else if usage_percent < 90.0 {
Status::Warning
} else {
Status::Critical
}
}
/// Convert BackupStatusToml to BackupData and populate AgentData /// Convert BackupStatusToml to BackupData and populate AgentData
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
let status_files = self.scan_status_files().await?; let status_files = self.scan_status_files().await?;
@@ -101,76 +75,47 @@ impl BackupCollector {
if status_files.is_empty() { if status_files.is_empty() {
debug!("No backup status files found"); debug!("No backup status files found");
agent_data.backup = BackupData { agent_data.backup = BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(), repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
}; };
return Ok(()); return Ok(());
} }
let mut all_repositories = HashSet::new(); // Aggregate repository data across all backup status files
let mut disks = Vec::new(); let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new();
let mut worst_status = Status::Ok; let mut worst_status = Status::Ok;
let mut latest_backup_time: Option<String> = None;
for status_file in status_files { for status_file in status_files {
match self.read_status_file(&status_file).await { match self.read_status_file(&status_file).await {
Ok(backup_status) => { Ok(backup_status) => {
// Collect all service names
for service_name in backup_status.services.keys() {
all_repositories.insert(service_name.clone());
}
// Calculate backup status // Calculate backup status
let backup_status_enum = Self::calculate_backup_status(&backup_status.status); let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
worst_status = worst_status.max(backup_status_enum);
// Calculate usage status from disk space // Track latest backup time
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space { if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() {
let usage_pct = disk_space.usage_percent as f32; latest_backup_time = Some(backup_status.start_time.clone());
( }
usage_pct,
disk_space.used_gb as f32,
disk_space.total_gb as f32,
Self::calculate_usage_status(usage_pct),
)
} else {
(0.0, 0.0, 0.0, Status::Unknown)
};
// Update worst status // Process each service in this backup
worst_status = worst_status.max(backup_status_enum).max(usage_status); for (service_name, service_status) in backup_status.services {
// Convert bytes to GB
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
// Build service list for this disk // Calculate service status
let services: Vec<String> = backup_status.services.keys().cloned().collect(); let service_status_enum = Self::calculate_backup_status(&service_status.status);
worst_status = worst_status.max(service_status_enum);
// Get min and max archive counts to detect inconsistencies // Update or insert repository data
let archives_min: i64 = backup_status.services.values() repo_map.insert(service_name.clone(), BackupRepositoryData {
.map(|service| service.archive_count) name: service_name,
.min() archive_count: service_status.archive_count,
.unwrap_or(0); repo_size_gb,
status: service_status_enum,
let archives_max: i64 = backup_status.services.values() });
.map(|service| service.archive_count) }
.max()
.unwrap_or(0);
// Create disk data
let disk_data = BackupDiskData {
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
product_name: backup_status.disk_product_name,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
last_backup_time: Some(backup_status.start_time),
backup_status: backup_status_enum,
disk_usage_percent: usage_percent,
disk_used_gb: used_gb,
disk_total_gb: total_gb,
usage_status,
services,
archives_min,
archives_max,
};
disks.push(disk_data);
} }
Err(e) => { Err(e) => {
warn!("Failed to read backup status file {:?}: {}", status_file, e); warn!("Failed to read backup status file {:?}: {}", status_file, e);
@@ -178,12 +123,14 @@ impl BackupCollector {
} }
} }
let repositories: Vec<String> = all_repositories.into_iter().collect(); // Convert HashMap to sorted Vec
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
repositories.sort_by(|a, b| a.name.cmp(&b.name));
agent_data.backup = BackupData { agent_data.backup = BackupData {
last_backup_time: latest_backup_time,
backup_status: worst_status,
repositories, repositories,
repository_status: worst_status,
disks,
}; };
Ok(()) Ok(())

View File

@@ -948,10 +948,20 @@ impl SystemdCollector {
let status_output = String::from_utf8_lossy(&output.stdout); let status_output = String::from_utf8_lossy(&output.stdout);
let mut peers = Vec::new(); let mut peers = Vec::new();
// Get current hostname to filter it out
let current_hostname = gethostname::gethostname()
.to_string_lossy()
.to_string();
// Parse tailscale status output // Parse tailscale status output
// Format: IP hostname user os status // Format: IP hostname user os status
// Example: 100.110.98.3 wslbox cm@ linux active; direct 192.168.30.227:53757 // Example: 100.110.98.3 wslbox cm@ linux active; direct 192.168.30.227:53757
for line in status_output.lines() { // Note: First line is always the current host, skip it
for (idx, line) in status_output.lines().enumerate() {
if idx == 0 {
continue; // Skip first line (current host)
}
let parts: Vec<&str> = line.split_whitespace().collect(); let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 { if parts.len() < 5 {
continue; // Skip invalid lines continue; // Skip invalid lines
@@ -964,6 +974,12 @@ impl SystemdCollector {
// parts[4+] = status (e.g., "active;", "direct", "192.168.30.227:53757" or "idle;" or "offline") // parts[4+] = status (e.g., "active;", "direct", "192.168.30.227:53757" or "idle;" or "offline")
let hostname = parts[1]; let hostname = parts[1];
// Skip if this is the current host (double-check in case format changes)
if hostname == current_hostname {
continue;
}
let status_parts = &parts[4..]; let status_parts = &parts[4..];
// Determine connection method from status // Determine connection method from status

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.264" version = "0.1.269"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -138,11 +138,12 @@ impl Dashboard {
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
let mut last_heartbeat_check = Instant::now(); let mut last_heartbeat_check = Instant::now();
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
let mut needs_render = true; // Track if we need to render
loop { loop {
// Handle terminal events (keyboard and mouse input) only if not headless // Handle terminal events (keyboard and mouse input) only if not headless
if !self.headless { if !self.headless {
match event::poll(Duration::from_millis(50)) { match event::poll(Duration::from_millis(200)) {
Ok(true) => { Ok(true) => {
match event::read() { match event::read() {
Ok(event) => { Ok(event) => {
@@ -152,6 +153,7 @@ impl Dashboard {
// Handle keyboard input // Handle keyboard input
match tui_app.handle_input(event) { match tui_app.handle_input(event) {
Ok(_) => { Ok(_) => {
needs_render = true;
// Check if we should quit // Check if we should quit
if tui_app.should_quit() { if tui_app.should_quit() {
info!("Quit requested, exiting dashboard"); info!("Quit requested, exiting dashboard");
@@ -168,10 +170,11 @@ impl Dashboard {
if let Err(e) = self.handle_mouse_event(mouse_event) { if let Err(e) = self.handle_mouse_event(mouse_event) {
error!("Error handling mouse event: {}", e); error!("Error handling mouse event: {}", e);
} }
needs_render = true;
} }
Event::Resize(_width, _height) => { Event::Resize(_width, _height) => {
// Terminal was resized - just continue and re-render // Terminal was resized - mark for re-render
// The next render will automatically use the new size needs_render = true;
} }
_ => {} _ => {}
} }
@@ -189,38 +192,6 @@ impl Dashboard {
break; break;
} }
} }
// Render UI immediately after handling input for responsive feedback
if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events
if let Err(e) = terminal.autoresize() {
warn!("Error autoresizing terminal: {}", e);
}
// Check minimum terminal size to prevent panics
let size = terminal.size().unwrap_or_default();
if size.width < 90 || size.height < 15 {
// Terminal too small, show error message
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
let _ = terminal.draw(|frame| {
use ratatui::widgets::{Paragraph, Block, Borders};
use ratatui::layout::Alignment;
let msg = Paragraph::new(msg_text.clone())
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
frame.render_widget(msg, frame.size());
});
} else if let Err(e) = terminal.draw(|frame| {
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
self.title_area = title_area;
self.system_area = system_area;
self.services_area = services_area;
}) {
error!("Error rendering TUI after input: {}", e);
}
}
}
} }
// Check for new metrics // Check for new metrics
@@ -259,8 +230,10 @@ impl Dashboard {
if let Some(ref mut tui_app) = self.tui_app { if let Some(ref mut tui_app) = self.tui_app {
tui_app.update_metrics(&mut self.metric_store); tui_app.update_metrics(&mut self.metric_store);
} }
needs_render = true; // New metrics received, need to render
} }
// Also check for command output messages // Also check for command output messages
if let Ok(Some(cmd_output)) = self.zmq_consumer.receive_command_output().await { if let Ok(Some(cmd_output)) = self.zmq_consumer.receive_command_output().await {
debug!( debug!(
@@ -271,26 +244,27 @@ impl Dashboard {
// Command output (terminal popup removed - output not displayed) // Command output (terminal popup removed - output not displayed)
} }
last_metrics_check = Instant::now(); last_metrics_check = Instant::now();
} }
// Check for host connectivity changes (heartbeat timeouts) periodically // Check for host connectivity changes (heartbeat timeouts) periodically
if last_heartbeat_check.elapsed() >= heartbeat_check_interval { if last_heartbeat_check.elapsed() >= heartbeat_check_interval {
let timeout = Duration::from_secs(self.config.zmq.heartbeat_timeout_seconds); let timeout = Duration::from_secs(self.config.zmq.heartbeat_timeout_seconds);
// Clean up metrics for offline hosts // Clean up metrics for offline hosts
self.metric_store.cleanup_offline_hosts(timeout); self.metric_store.cleanup_offline_hosts(timeout);
if let Some(ref mut tui_app) = self.tui_app { if let Some(ref mut tui_app) = self.tui_app {
let connected_hosts = self.metric_store.get_connected_hosts(timeout); let connected_hosts = self.metric_store.get_connected_hosts(timeout);
tui_app.update_hosts(connected_hosts); tui_app.update_hosts(connected_hosts);
} }
last_heartbeat_check = Instant::now(); last_heartbeat_check = Instant::now();
needs_render = true; // Heartbeat check happened, may have changed hosts
} }
// Render TUI (only if not headless) // Render TUI only when needed (not headless and something changed)
if !self.headless { if !self.headless && needs_render {
if let Some(ref mut terminal) = self.terminal { if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app { if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events // Clear and autoresize terminal to handle any resize events
@@ -322,10 +296,8 @@ impl Dashboard {
} }
} }
} }
needs_render = false; // Reset flag after rendering
} }
// Small sleep to prevent excessive CPU usage
tokio::time::sleep(Duration::from_millis(10)).await;
} }
info!("Dashboard main loop ended"); info!("Dashboard main loop ended");

View File

@@ -44,9 +44,9 @@ pub struct SystemWidget {
storage_pools: Vec<StoragePool>, storage_pools: Vec<StoragePool>,
// Backup metrics // Backup metrics
backup_repositories: Vec<String>, backup_last_time: Option<String>,
backup_repository_status: Status, backup_status: Status,
backup_disks: Vec<cm_dashboard_shared::BackupDiskData>, backup_repositories: Vec<cm_dashboard_shared::BackupRepositoryData>,
// Overall status // Overall status
has_data: bool, has_data: bool,
@@ -112,9 +112,9 @@ impl SystemWidget {
tmp_status: Status::Unknown, tmp_status: Status::Unknown,
tmpfs_mounts: Vec::new(), tmpfs_mounts: Vec::new(),
storage_pools: Vec::new(), storage_pools: Vec::new(),
backup_last_time: None,
backup_status: Status::Unknown,
backup_repositories: Vec::new(), backup_repositories: Vec::new(),
backup_repository_status: Status::Unknown,
backup_disks: Vec::new(),
has_data: false, has_data: false,
scroll_offset: 0, scroll_offset: 0,
last_viewport_height: 0, last_viewport_height: 0,
@@ -221,9 +221,9 @@ impl Widget for SystemWidget {
// Extract backup data // Extract backup data
let backup = &agent_data.backup; let backup = &agent_data.backup;
self.backup_last_time = backup.last_backup_time.clone();
self.backup_status = backup.backup_status;
self.backup_repositories = backup.repositories.clone(); self.backup_repositories = backup.repositories.clone();
self.backup_repository_status = backup.repository_status;
self.backup_disks = backup.disks.clone();
// Clamp scroll offset to valid range after update // Clamp scroll offset to valid range after update
// This prevents scroll issues when switching between hosts // This prevents scroll issues when switching between hosts
@@ -533,79 +533,43 @@ impl SystemWidget {
fn render_backup(&self) -> Vec<Line<'_>> { fn render_backup(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new(); let mut lines = Vec::new();
// First section: Repository status and list if self.backup_repositories.is_empty() {
if !self.backup_repositories.is_empty() { return lines;
let repo_text = format!("Repo: {}", self.backup_repositories.len());
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
lines.push(Line::from(repo_spans));
// List all repositories (sorted for consistent display)
let mut sorted_repos = self.backup_repositories.clone();
sorted_repos.sort();
let repo_count = sorted_repos.len();
for (idx, repo) in sorted_repos.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
lines.push(Line::from(vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
Span::styled(repo.clone(), Typography::secondary()),
]));
}
} }
// Second section: Per-disk backup information (sorted by serial for consistent display) // Format backup time (use complete timestamp)
let mut sorted_disks = self.backup_disks.clone(); let time_display = if let Some(ref time_str) = self.backup_last_time {
sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial)); time_str.clone()
for disk in &sorted_disks { } else {
let truncated_serial = truncate_serial(&disk.serial); "unknown".to_string()
let mut details = Vec::new(); };
if let Some(temp) = disk.temperature_celsius { // Header: "Repo <complete timestamp>"
details.push(format!("T: {}°C", temp as i32)); let repo_text = format!("Repo {}", time_display);
} let repo_spans = StatusIcons::create_status_spans(self.backup_status, &repo_text);
if let Some(wear) = disk.wear_percent { lines.push(Line::from(repo_spans));
details.push(format!("W: {}%", wear as i32));
}
let disk_text = if !details.is_empty() { // List all repositories with archive count and size
format!("{} {}", truncated_serial, details.join(" ")) let repo_count = self.backup_repositories.len();
for (idx, repo) in self.backup_repositories.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
// Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
let size_display = if repo.repo_size_gb < 0.001 {
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
} else if repo.repo_size_gb < 1.0 {
format!("{:.0}MB", repo.repo_size_gb * 1024.0)
} else { } else {
truncated_serial format!("{:.1}GB", repo.repo_size_gb)
}; };
// Overall disk status (worst of backup and usage) let repo_text = format!("{} ({}) {}", repo.name, repo.archive_count, size_display);
let disk_status = disk.backup_status.max(disk.usage_status);
let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
lines.push(Line::from(disk_spans));
// Show backup time with status let mut repo_spans = vec![
if let Some(backup_time) = &disk.last_backup_time { Span::styled(format!(" {} ", tree_char), Typography::tree()),
let time_text = format!("Backup: {}", backup_time);
let mut time_spans = vec![
Span::styled(" ├─ ", Typography::tree()),
];
time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
lines.push(Line::from(time_spans));
}
// Show usage with status and archive count
let archive_display = if disk.archives_min == disk.archives_max {
format!("{}", disk.archives_min)
} else {
format!("{}-{}", disk.archives_min, disk.archives_max)
};
let usage_text = format!(
"Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
archive_display,
disk.disk_usage_percent,
disk.disk_used_gb,
disk.disk_total_gb
);
let mut usage_spans = vec![
Span::styled(" └─ ", Typography::tree()),
]; ];
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text)); repo_spans.extend(StatusIcons::create_status_spans(repo.status, &repo_text));
lines.push(Line::from(usage_spans)); lines.push(Line::from(repo_spans));
} }
lines lines
@@ -876,13 +840,10 @@ impl SystemWidget {
} }
// Backup section // Backup section
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() { if !self.backup_repositories.is_empty() {
count += 1; // Header count += 1; // Header: "Backup:"
if !self.backup_repositories.is_empty() { count += 1; // Repo count and timestamp header
count += 1; // Repo header count += self.backup_repositories.len(); // Individual repos
count += self.backup_repositories.len();
}
count += self.backup_disks.len() * 3; // Each disk has 3 lines
} }
count count
@@ -988,7 +949,7 @@ impl SystemWidget {
lines.extend(storage_lines); lines.extend(storage_lines);
// Backup section (if available) // Backup section (if available)
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() { if !self.backup_repositories.is_empty() {
lines.push(Line::from(vec![ lines.push(Line::from(vec![
Span::styled("Backup:", Typography::widget_title()) Span::styled("Backup:", Typography::widget_title())
])); ]));

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.264" version = "0.1.269"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -182,27 +182,18 @@ pub struct SubServiceMetric {
/// Backup system data /// Backup system data
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupData { pub struct BackupData {
pub repositories: Vec<String>,
pub repository_status: Status,
pub disks: Vec<BackupDiskData>,
}
/// Backup repository disk information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupDiskData {
pub serial: String,
pub product_name: Option<String>,
pub wear_percent: Option<f32>,
pub temperature_celsius: Option<f32>,
pub last_backup_time: Option<String>, pub last_backup_time: Option<String>,
pub backup_status: Status, pub backup_status: Status,
pub disk_usage_percent: f32, pub repositories: Vec<BackupRepositoryData>,
pub disk_used_gb: f32, }
pub disk_total_gb: f32,
pub usage_status: Status, /// Individual backup repository information
pub services: Vec<String>, #[derive(Debug, Clone, Serialize, Deserialize)]
pub archives_min: i64, pub struct BackupRepositoryData {
pub archives_max: i64, pub name: String,
pub archive_count: i64,
pub repo_size_gb: f32,
pub status: Status,
} }
impl AgentData { impl AgentData {
@@ -245,9 +236,9 @@ impl AgentData {
}, },
services: Vec::new(), services: Vec::new(),
backup: BackupData { backup: BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(), repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
}, },
} }
} }