Compare commits

..

No commits in common. "main" and "v0.1.265" have entirely different histories.

10 changed files with 247 additions and 291 deletions

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.274" version = "0.1.264"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.274" version = "0.1.264"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -325,7 +325,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.274" version = "0.1.264"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.275" version = "0.1.265"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -1,7 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status}; use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use tracing::{debug, warn}; use tracing::{debug, warn};
@ -21,7 +21,7 @@ impl BackupCollector {
} }
} }
/// Scan directory for backup status file (nfs-backup.toml) /// Scan directory for all backup status files
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> { async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
let status_path = Path::new(&self.status_dir); let status_path = Path::new(&self.status_dir);
@ -30,15 +30,30 @@ impl BackupCollector {
return Ok(Vec::new()); return Ok(Vec::new());
} }
// Look for nfs-backup.toml (new NFS-based backup) let mut status_files = Vec::new();
let nfs_backup_file = status_path.join("nfs-backup.toml");
if nfs_backup_file.exists() { match fs::read_dir(status_path) {
return Ok(vec![nfs_backup_file]); Ok(entries) => {
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
status_files.push(path);
}
}
}
}
}
}
Err(e) => {
warn!("Failed to read backup status directory: {}", e);
return Ok(Vec::new());
}
} }
// No backup status file found Ok(status_files)
debug!("No nfs-backup.toml found in {}", self.status_dir);
Ok(Vec::new())
} }
/// Read a single backup status file /// Read a single backup status file
@ -61,13 +76,24 @@ impl BackupCollector {
/// Calculate backup status from TOML status field /// Calculate backup status from TOML status field
fn calculate_backup_status(status_str: &str) -> Status { fn calculate_backup_status(status_str: &str) -> Status {
match status_str.to_lowercase().as_str() { match status_str.to_lowercase().as_str() {
"success" | "completed" => Status::Ok, "success" => Status::Ok,
"warning" => Status::Warning, "warning" => Status::Warning,
"failed" | "error" => Status::Critical, "failed" | "error" => Status::Critical,
_ => Status::Unknown, _ => Status::Unknown,
} }
} }
/// Calculate usage status from disk usage percentage
fn calculate_usage_status(usage_percent: f32) -> Status {
if usage_percent < 80.0 {
Status::Ok
} else if usage_percent < 90.0 {
Status::Warning
} else {
Status::Critical
}
}
/// Convert BackupStatusToml to BackupData and populate AgentData /// Convert BackupStatusToml to BackupData and populate AgentData
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
let status_files = self.scan_status_files().await?; let status_files = self.scan_status_files().await?;
@ -75,47 +101,76 @@ impl BackupCollector {
if status_files.is_empty() { if status_files.is_empty() {
debug!("No backup status files found"); debug!("No backup status files found");
agent_data.backup = BackupData { agent_data.backup = BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(), repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
}; };
return Ok(()); return Ok(());
} }
// Aggregate repository data across all backup status files let mut all_repositories = HashSet::new();
let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new(); let mut disks = Vec::new();
let mut worst_status = Status::Ok; let mut worst_status = Status::Ok;
let mut latest_backup_time: Option<String> = None;
for status_file in status_files { for status_file in status_files {
match self.read_status_file(&status_file).await { match self.read_status_file(&status_file).await {
Ok(backup_status) => { Ok(backup_status) => {
// Collect all service names
for service_name in backup_status.services.keys() {
all_repositories.insert(service_name.clone());
}
// Calculate backup status // Calculate backup status
let backup_status_enum = Self::calculate_backup_status(&backup_status.status); let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
worst_status = worst_status.max(backup_status_enum);
// Track latest backup time // Calculate usage status from disk space
if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() { let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
latest_backup_time = Some(backup_status.start_time.clone()); let usage_pct = disk_space.usage_percent as f32;
} (
usage_pct,
disk_space.used_gb as f32,
disk_space.total_gb as f32,
Self::calculate_usage_status(usage_pct),
)
} else {
(0.0, 0.0, 0.0, Status::Unknown)
};
// Process each service in this backup // Update worst status
for (service_name, service_status) in backup_status.services { worst_status = worst_status.max(backup_status_enum).max(usage_status);
// Convert bytes to GB
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
// Calculate service status // Build service list for this disk
let service_status_enum = Self::calculate_backup_status(&service_status.status); let services: Vec<String> = backup_status.services.keys().cloned().collect();
worst_status = worst_status.max(service_status_enum);
// Update or insert repository data // Get min and max archive counts to detect inconsistencies
repo_map.insert(service_name.clone(), BackupRepositoryData { let archives_min: i64 = backup_status.services.values()
name: service_name, .map(|service| service.archive_count)
archive_count: service_status.archive_count, .min()
repo_size_gb, .unwrap_or(0);
status: service_status_enum,
}); let archives_max: i64 = backup_status.services.values()
} .map(|service| service.archive_count)
.max()
.unwrap_or(0);
// Create disk data
let disk_data = BackupDiskData {
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
product_name: backup_status.disk_product_name,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
last_backup_time: Some(backup_status.start_time),
backup_status: backup_status_enum,
disk_usage_percent: usage_percent,
disk_used_gb: used_gb,
disk_total_gb: total_gb,
usage_status,
services,
archives_min,
archives_max,
};
disks.push(disk_data);
} }
Err(e) => { Err(e) => {
warn!("Failed to read backup status file {:?}: {}", status_file, e); warn!("Failed to read backup status file {:?}: {}", status_file, e);
@ -123,14 +178,12 @@ impl BackupCollector {
} }
} }
// Convert HashMap to sorted Vec let repositories: Vec<String> = all_repositories.into_iter().collect();
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
repositories.sort_by(|a, b| a.name.cmp(&b.name));
agent_data.backup = BackupData { agent_data.backup = BackupData {
last_backup_time: latest_backup_time,
backup_status: worst_status,
repositories, repositories,
repository_status: worst_status,
disks,
}; };
Ok(()) Ok(())

View File

@ -114,7 +114,7 @@ impl DiskCollector {
let mut cmd = TokioCommand::new("lsblk"); let mut cmd = TokioCommand::new("lsblk");
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]); cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
let output = run_command_with_timeout(cmd, 10).await let output = run_command_with_timeout(cmd, 2).await
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: "block devices".to_string(), path: "block devices".to_string(),
error: e.to_string(), error: e.to_string(),
@ -184,7 +184,7 @@ impl DiskCollector {
/// Get filesystem info for a single mount point /// Get filesystem info for a single mount point
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> { fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
let output = StdCommand::new("timeout") let output = StdCommand::new("timeout")
.args(&["10", "df", "--block-size=1", mount_point]) .args(&["2", "df", "--block-size=1", mount_point])
.output() .output()
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: format!("df {}", mount_point), path: format!("df {}", mount_point),
@ -433,7 +433,7 @@ impl DiskCollector {
cmd.args(&["-a", &format!("/dev/{}", drive_name)]); cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
} }
let output = run_command_with_timeout(cmd, 15).await let output = run_command_with_timeout(cmd, 3).await
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: format!("SMART data for {}", drive_name), path: format!("SMART data for {}", drive_name),
error: e.to_string(), error: e.to_string(),
@ -772,7 +772,7 @@ impl DiskCollector {
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> { fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
// Use lsblk to find the backing device with timeout // Use lsblk to find the backing device with timeout
let output = StdCommand::new("timeout") let output = StdCommand::new("timeout")
.args(&["10", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"]) .args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
.output() .output()
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?; .map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;

View File

@ -230,38 +230,6 @@ impl SystemdCollector {
} }
} }
if service_name == "nfs-server" && status_info.active_state == "active" {
// Add NFS exports as sub-services
let exports = self.get_nfs_exports();
for (export_path, info) in exports {
let display = if !info.is_empty() {
format!("{} {}", export_path, info)
} else {
export_path
};
sub_services.push(SubServiceData {
name: display,
service_status: Status::Info,
metrics: Vec::new(),
service_type: "nfs_export".to_string(),
});
}
}
if (service_name == "smbd" || service_name == "samba-smbd") && status_info.active_state == "active" {
// Add SMB shares as sub-services
let shares = self.get_smb_shares();
for (share_name, share_path) in shares {
let metrics = Vec::new();
sub_services.push(SubServiceData {
name: format!("{}: {}", share_name, share_path),
service_status: Status::Info,
metrics,
service_type: "smb_share".to_string(),
});
}
}
// Create complete service data // Create complete service data
let service_data = ServiceData { let service_data = ServiceData {
name: service_name.clone(), name: service_name.clone(),
@ -1043,148 +1011,6 @@ impl SystemdCollector {
} }
} }
/// Get NFS exports from exportfs
/// Returns a list of (export_path, info_string) tuples
fn get_nfs_exports(&self) -> Vec<(String, String)> {
let output = match Command::new("timeout")
.args(["2", "exportfs", "-v"])
.output()
{
Ok(output) if output.status.success() => output,
_ => return Vec::new(),
};
let exports_output = String::from_utf8_lossy(&output.stdout);
let mut exports_map: std::collections::HashMap<String, Vec<(String, String)>> =
std::collections::HashMap::new();
let mut current_path: Option<String> = None;
for line in exports_output.lines() {
let trimmed = line.trim();
if trimmed.is_empty() || trimmed.starts_with('#') {
continue;
}
if trimmed.starts_with('/') {
// Export path line - may have network on same line or continuation
let parts: Vec<&str> = trimmed.splitn(2, char::is_whitespace).collect();
let path = parts[0].to_string();
current_path = Some(path.clone());
// Check if network info is on the same line
if parts.len() > 1 {
let rest = parts[1].trim();
if let Some(paren_pos) = rest.find('(') {
let network = rest[..paren_pos].trim();
if let Some(end_paren) = rest.find(')') {
let options = &rest[paren_pos+1..end_paren];
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
"rw"
} else {
"ro"
};
exports_map.entry(path)
.or_insert_with(Vec::new)
.push((network.to_string(), mode.to_string()));
}
}
}
} else if let Some(ref path) = current_path {
// Continuation line with network and options
if let Some(paren_pos) = trimmed.find('(') {
let network = trimmed[..paren_pos].trim();
if let Some(end_paren) = trimmed.find(')') {
let options = &trimmed[paren_pos+1..end_paren];
let mode = if options.contains(",rw,") || options.ends_with(",rw") {
"rw"
} else {
"ro"
};
exports_map.entry(path.clone())
.or_insert_with(Vec::new)
.push((network.to_string(), mode.to_string()));
}
}
}
}
// Build display strings: "path: mode [networks]"
let mut exports: Vec<(String, String)> = exports_map
.into_iter()
.map(|(path, mut entries)| {
if entries.is_empty() {
return (path, String::new());
}
let mode = entries[0].1.clone();
let networks: Vec<String> = entries.drain(..).map(|(n, _)| n).collect();
let info = format!("{} [{}]", mode, networks.join(", "));
(path, info)
})
.collect();
exports.sort_by(|a, b| a.0.cmp(&b.0));
exports
}
/// Get SMB shares from smb.conf
/// Returns a list of (share_name, share_path) tuples
fn get_smb_shares(&self) -> Vec<(String, String)> {
match std::fs::read_to_string("/etc/samba/smb.conf") {
Ok(config) => {
let mut shares = Vec::new();
let mut current_share: Option<String> = None;
let mut current_path: Option<String> = None;
for line in config.lines() {
let line = line.trim();
// Skip comments and empty lines
if line.is_empty() || line.starts_with('#') || line.starts_with(';') {
continue;
}
// Detect share section [sharename]
if line.starts_with('[') && line.ends_with(']') {
// Save previous share if we have both name and path
if let (Some(name), Some(path)) = (current_share.take(), current_path.take()) {
// Skip special sections
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path));
}
}
// Start new share
let share_name = line[1..line.len()-1].trim().to_string();
current_share = Some(share_name);
current_path = None;
}
// Look for path = /some/path
else if line.starts_with("path") && line.contains('=') {
if let Some(path_value) = line.split('=').nth(1) {
current_path = Some(path_value.trim().to_string());
}
}
}
// Don't forget the last share
if let (Some(name), Some(path)) = (current_share, current_path) {
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path));
}
}
shares
}
_ => Vec::new(),
}
}
/// Get nftables open ports grouped by protocol /// Get nftables open ports grouped by protocol
/// Returns: (tcp_ports_string, udp_ports_string) /// Returns: (tcp_ports_string, udp_ports_string)
fn get_nftables_open_ports(&self) -> (String, String) { fn get_nftables_open_ports(&self) -> (String, String) {

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.275" version = "0.1.265"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -138,12 +138,11 @@ impl Dashboard {
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
let mut last_heartbeat_check = Instant::now(); let mut last_heartbeat_check = Instant::now();
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
let mut needs_render = true; // Track if we need to render
loop { loop {
// Handle terminal events (keyboard and mouse input) only if not headless // Handle terminal events (keyboard and mouse input) only if not headless
if !self.headless { if !self.headless {
match event::poll(Duration::from_millis(200)) { match event::poll(Duration::from_millis(50)) {
Ok(true) => { Ok(true) => {
match event::read() { match event::read() {
Ok(event) => { Ok(event) => {
@ -153,7 +152,6 @@ impl Dashboard {
// Handle keyboard input // Handle keyboard input
match tui_app.handle_input(event) { match tui_app.handle_input(event) {
Ok(_) => { Ok(_) => {
needs_render = true;
// Check if we should quit // Check if we should quit
if tui_app.should_quit() { if tui_app.should_quit() {
info!("Quit requested, exiting dashboard"); info!("Quit requested, exiting dashboard");
@ -170,11 +168,10 @@ impl Dashboard {
if let Err(e) = self.handle_mouse_event(mouse_event) { if let Err(e) = self.handle_mouse_event(mouse_event) {
error!("Error handling mouse event: {}", e); error!("Error handling mouse event: {}", e);
} }
needs_render = true;
} }
Event::Resize(_width, _height) => { Event::Resize(_width, _height) => {
// Terminal was resized - mark for re-render // Terminal was resized - just continue and re-render
needs_render = true; // The next render will automatically use the new size
} }
_ => {} _ => {}
} }
@ -192,6 +189,38 @@ impl Dashboard {
break; break;
} }
} }
// Render UI immediately after handling input for responsive feedback
if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events
if let Err(e) = terminal.autoresize() {
warn!("Error autoresizing terminal: {}", e);
}
// Check minimum terminal size to prevent panics
let size = terminal.size().unwrap_or_default();
if size.width < 90 || size.height < 15 {
// Terminal too small, show error message
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
let _ = terminal.draw(|frame| {
use ratatui::widgets::{Paragraph, Block, Borders};
use ratatui::layout::Alignment;
let msg = Paragraph::new(msg_text.clone())
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
frame.render_widget(msg, frame.size());
});
} else if let Err(e) = terminal.draw(|frame| {
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
self.title_area = title_area;
self.system_area = system_area;
self.services_area = services_area;
}) {
error!("Error rendering TUI after input: {}", e);
}
}
}
} }
// Check for new metrics // Check for new metrics
@ -230,8 +259,6 @@ impl Dashboard {
if let Some(ref mut tui_app) = self.tui_app { if let Some(ref mut tui_app) = self.tui_app {
tui_app.update_metrics(&mut self.metric_store); tui_app.update_metrics(&mut self.metric_store);
} }
needs_render = true; // New metrics received, need to render
} }
// Also check for command output messages // Also check for command output messages
@ -260,11 +287,10 @@ impl Dashboard {
tui_app.update_hosts(connected_hosts); tui_app.update_hosts(connected_hosts);
} }
last_heartbeat_check = Instant::now(); last_heartbeat_check = Instant::now();
needs_render = true; // Heartbeat check happened, may have changed hosts
} }
// Render TUI only when needed (not headless and something changed) // Render TUI (only if not headless)
if !self.headless && needs_render { if !self.headless {
if let Some(ref mut terminal) = self.terminal { if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app { if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events // Clear and autoresize terminal to handle any resize events
@ -296,8 +322,10 @@ impl Dashboard {
} }
} }
} }
needs_render = false; // Reset flag after rendering
} }
// Small sleep to prevent excessive CPU usage
tokio::time::sleep(Duration::from_millis(10)).await;
} }
info!("Dashboard main loop ended"); info!("Dashboard main loop ended");

View File

@ -44,9 +44,9 @@ pub struct SystemWidget {
storage_pools: Vec<StoragePool>, storage_pools: Vec<StoragePool>,
// Backup metrics // Backup metrics
backup_last_time: Option<String>, backup_repositories: Vec<String>,
backup_status: Status, backup_repository_status: Status,
backup_repositories: Vec<cm_dashboard_shared::BackupRepositoryData>, backup_disks: Vec<cm_dashboard_shared::BackupDiskData>,
// Overall status // Overall status
has_data: bool, has_data: bool,
@ -112,9 +112,9 @@ impl SystemWidget {
tmp_status: Status::Unknown, tmp_status: Status::Unknown,
tmpfs_mounts: Vec::new(), tmpfs_mounts: Vec::new(),
storage_pools: Vec::new(), storage_pools: Vec::new(),
backup_last_time: None,
backup_status: Status::Unknown,
backup_repositories: Vec::new(), backup_repositories: Vec::new(),
backup_repository_status: Status::Unknown,
backup_disks: Vec::new(),
has_data: false, has_data: false,
scroll_offset: 0, scroll_offset: 0,
last_viewport_height: 0, last_viewport_height: 0,
@ -221,9 +221,9 @@ impl Widget for SystemWidget {
// Extract backup data // Extract backup data
let backup = &agent_data.backup; let backup = &agent_data.backup;
self.backup_last_time = backup.last_backup_time.clone();
self.backup_status = backup.backup_status;
self.backup_repositories = backup.repositories.clone(); self.backup_repositories = backup.repositories.clone();
self.backup_repository_status = backup.repository_status;
self.backup_disks = backup.disks.clone();
// Clamp scroll offset to valid range after update // Clamp scroll offset to valid range after update
// This prevents scroll issues when switching between hosts // This prevents scroll issues when switching between hosts
@ -533,42 +533,79 @@ impl SystemWidget {
fn render_backup(&self) -> Vec<Line<'_>> { fn render_backup(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new(); let mut lines = Vec::new();
if self.backup_repositories.is_empty() { // First section: Repository status and list
return lines; if !self.backup_repositories.is_empty() {
let repo_text = format!("Repo: {}", self.backup_repositories.len());
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
lines.push(Line::from(repo_spans));
// List all repositories (sorted for consistent display)
let mut sorted_repos = self.backup_repositories.clone();
sorted_repos.sort();
let repo_count = sorted_repos.len();
for (idx, repo) in sorted_repos.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
lines.push(Line::from(vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
Span::styled(repo.clone(), Typography::secondary()),
]));
}
} }
// Format backup time (use complete timestamp) // Second section: Per-disk backup information (sorted by serial for consistent display)
let time_display = if let Some(ref time_str) = self.backup_last_time { let mut sorted_disks = self.backup_disks.clone();
time_str.clone() sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial));
for disk in &sorted_disks {
let truncated_serial = truncate_serial(&disk.serial);
let mut details = Vec::new();
if let Some(temp) = disk.temperature_celsius {
details.push(format!("T: {}°C", temp as i32));
}
if let Some(wear) = disk.wear_percent {
details.push(format!("W: {}%", wear as i32));
}
let disk_text = if !details.is_empty() {
format!("{} {}", truncated_serial, details.join(" "))
} else { } else {
"unknown".to_string() truncated_serial
}; };
// Header: just the timestamp // Overall disk status (worst of backup and usage)
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display); let disk_status = disk.backup_status.max(disk.usage_status);
lines.push(Line::from(repo_spans)); let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
lines.push(Line::from(disk_spans));
// List all repositories with archive count and size // Show backup time with status
let repo_count = self.backup_repositories.len(); if let Some(backup_time) = &disk.last_backup_time {
for (idx, repo) in self.backup_repositories.iter().enumerate() { let time_text = format!("Backup: {}", backup_time);
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" }; let mut time_spans = vec![
Span::styled(" ├─ ", Typography::tree()),
// Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
let size_display = if repo.repo_size_gb < 0.001 {
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
} else if repo.repo_size_gb < 1.0 {
format!("{:.0}MB", repo.repo_size_gb * 1024.0)
} else {
format!("{:.1}GB", repo.repo_size_gb)
};
let repo_text = format!("{} ({}) {}", repo.name, repo.archive_count, size_display);
let mut repo_spans = vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
]; ];
repo_spans.extend(StatusIcons::create_status_spans(repo.status, &repo_text)); time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
lines.push(Line::from(repo_spans)); lines.push(Line::from(time_spans));
}
// Show usage with status and archive count
let archive_display = if disk.archives_min == disk.archives_max {
format!("{}", disk.archives_min)
} else {
format!("{}-{}", disk.archives_min, disk.archives_max)
};
let usage_text = format!(
"Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
archive_display,
disk.disk_usage_percent,
disk.disk_used_gb,
disk.disk_total_gb
);
let mut usage_spans = vec![
Span::styled(" └─ ", Typography::tree()),
];
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text));
lines.push(Line::from(usage_spans));
} }
lines lines
@ -839,10 +876,13 @@ impl SystemWidget {
} }
// Backup section // Backup section
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
count += 1; // Header
if !self.backup_repositories.is_empty() { if !self.backup_repositories.is_empty() {
count += 1; // Header: "Backup:" count += 1; // Repo header
count += 1; // Repo count and timestamp header count += self.backup_repositories.len();
count += self.backup_repositories.len(); // Individual repos }
count += self.backup_disks.len() * 3; // Each disk has 3 lines
} }
count count
@ -948,7 +988,7 @@ impl SystemWidget {
lines.extend(storage_lines); lines.extend(storage_lines);
// Backup section (if available) // Backup section (if available)
if !self.backup_repositories.is_empty() { if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
lines.push(Line::from(vec![ lines.push(Line::from(vec![
Span::styled("Backup:", Typography::widget_title()) Span::styled("Backup:", Typography::widget_title())
])); ]));

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.275" version = "0.1.265"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -182,18 +182,27 @@ pub struct SubServiceMetric {
/// Backup system data /// Backup system data
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupData { pub struct BackupData {
pub last_backup_time: Option<String>, pub repositories: Vec<String>,
pub backup_status: Status, pub repository_status: Status,
pub repositories: Vec<BackupRepositoryData>, pub disks: Vec<BackupDiskData>,
} }
/// Individual backup repository information /// Backup repository disk information
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupRepositoryData { pub struct BackupDiskData {
pub name: String, pub serial: String,
pub archive_count: i64, pub product_name: Option<String>,
pub repo_size_gb: f32, pub wear_percent: Option<f32>,
pub status: Status, pub temperature_celsius: Option<f32>,
pub last_backup_time: Option<String>,
pub backup_status: Status,
pub disk_usage_percent: f32,
pub disk_used_gb: f32,
pub disk_total_gb: f32,
pub usage_status: Status,
pub services: Vec<String>,
pub archives_min: i64,
pub archives_max: i64,
} }
impl AgentData { impl AgentData {
@ -236,9 +245,9 @@ impl AgentData {
}, },
services: Vec::new(), services: Vec::new(),
backup: BackupData { backup: BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(), repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
}, },
} }
} }