Compare commits

...

14 Commits

Author SHA1 Message Date
caba78004e Fix empty Storage section by properly aliasing command types
All checks were successful
Build and Release / build-and-release (push) Successful in 2m6s
v0.1.220 broke disk collector by changing the import from
std::process::Command to tokio::process::Command, but lines 193 and
767 explicitly used std::process::Command::new() which silently failed.

Solution: Import both as aliases (TokioCommand/StdCommand) and use
appropriate type for each operation - async commands use TokioCommand
with run_command_with_timeout, sync commands use StdCommand with
system timeout wrapper.

Fixes: Empty Storage section after v0.1.220 deployment
Bump version to v0.1.221
2025-11-29 21:29:33 +01:00
77bf08a978 Fix blocking smartctl commands with proper async/timeout handling
All checks were successful
Build and Release / build-and-release (push) Successful in 2m2s
- Changed disk collector to use tokio::process::Command instead of std::process::Command
- Updated run_command_with_timeout to properly kill processes on timeout
- Fixes issue where smartctl hangs on problematic drives (/dev/sda) freezing entire agent
- Timeout now force-kills hung processes using kill -9, preventing orphaned smartctl processes

This resolves the issue where Data_3 showed unknown status because smartctl was hanging
indefinitely trying to read from a problematic drive, blocking the entire collector.

Bump version to v0.1.220

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 21:09:04 +01:00
929870f8b6 Bump version to v0.1.219
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s
2025-11-29 18:35:14 +01:00
7aae852b7b Bump version to v0.1.218
All checks were successful
Build and Release / build-and-release (push) Successful in 1m19s
2025-11-29 17:59:33 +01:00
40f3ff66d8 Show archive count range to detect inconsistencies
- Display single number if all services have same count
- Display min-max range if counts differ (indicates problem)
2025-11-29 17:59:24 +01:00
1c1beddb55 Bump version to v0.1.217
All checks were successful
Build and Release / build-and-release (push) Successful in 1m20s
2025-11-29 17:51:13 +01:00
620d1f10b6 Show archive count per service instead of total sum 2025-11-29 17:51:01 +01:00
a0d571a40e Bump version to v0.1.216
All checks were successful
Build and Release / build-and-release (push) Successful in 1m19s
2025-11-29 17:44:12 +01:00
977200fff3 Move archive count to Usage line in backup display 2025-11-29 17:44:05 +01:00
d692de5f83 Bump version to v0.1.215
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s
2025-11-29 17:41:49 +01:00
f5913dbd43 Add archive count to backup disk display 2025-11-29 17:41:11 +01:00
faa30a7839 Sort backup repositories and disks for stable display
All checks were successful
Build and Release / build-and-release (push) Successful in 1m21s
- Sort repositories alphabetically before rendering
- Sort backup disks by serial number
- Prevents display jumping between different orderings on updates
- Consistent display order across refreshes

Bump version to v0.1.214

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-29 17:15:17 +01:00
6e4a42799f Bump version to v0.1.213
All checks were successful
Build and Release / build-and-release (push) Successful in 1m22s
2025-11-29 16:46:16 +01:00
afb8d68e03 Implement multi-disk backup support
- Update BackupData structure to support multiple backup disks
- Scan /var/lib/backup/status/ directory for all status files
- Calculate status icons for backup and disk usage
- Aggregate repository status from all disks
- Update dashboard to display all backup disks with per-disk status
- Display repository list with count and aggregated status
2025-11-29 16:44:50 +01:00
10 changed files with 276 additions and 179 deletions

View File

@@ -327,9 +327,16 @@ Storage:
├─ ● Data_2: GGA04461 T: 28°C ├─ ● Data_2: GGA04461 T: 28°C
└─ ● Parity: WDZS8RY0 T: 29°C └─ ● Parity: WDZS8RY0 T: 29°C
Backup: Backup:
● Repo: 4
├─ getea
├─ vaultwarden
├─ mysql
└─ immich
● W800639Y W: 2%
├─ ● Backup: 2025-11-29T04:00:01.324623
└─ ● Usage: 8% 70GB/916GB
● WD-WCC7K1234567 T: 32°C W: 12% ● WD-WCC7K1234567 T: 32°C W: 12%
├─ Last: 2h ago (12.3GB) ├─ ● Backup: 2025-11-29T04:00:01.324623
├─ Next: in 22h
└─ ● Usage: 45% 678GB/1.5TB └─ ● Usage: 45% 678GB/1.5TB
``` ```

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.211" version = "0.1.220"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.211" version = "0.1.220"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -324,7 +324,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.211" version = "0.1.220"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.212" version = "0.1.221"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -1,36 +1,66 @@
use async_trait::async_trait; use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData}; use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::fs; use std::fs;
use std::path::Path; use std::path::{Path, PathBuf};
use tracing::debug; use tracing::{debug, warn};
use super::{Collector, CollectorError}; use super::{Collector, CollectorError};
/// Backup collector that reads backup status from TOML files with structured data output /// Backup collector that reads backup status from TOML files with structured data output
pub struct BackupCollector { pub struct BackupCollector {
/// Path to backup status file /// Directory containing backup status files
status_file_path: String, status_dir: String,
} }
impl BackupCollector { impl BackupCollector {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
status_file_path: "/var/lib/backup/backup-status.toml".to_string(), status_dir: "/var/lib/backup/status".to_string(),
} }
} }
/// Read backup status from TOML file /// Scan directory for all backup status files
async fn read_backup_status(&self) -> Result<Option<BackupStatusToml>, CollectorError> { async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
if !Path::new(&self.status_file_path).exists() { let status_path = Path::new(&self.status_dir);
debug!("Backup status file not found: {}", self.status_file_path);
return Ok(None); if !status_path.exists() {
debug!("Backup status directory not found: {}", self.status_dir);
return Ok(Vec::new());
} }
let content = fs::read_to_string(&self.status_file_path) let mut status_files = Vec::new();
match fs::read_dir(status_path) {
Ok(entries) => {
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
status_files.push(path);
}
}
}
}
}
}
Err(e) => {
warn!("Failed to read backup status directory: {}", e);
return Ok(Vec::new());
}
}
Ok(status_files)
}
/// Read a single backup status file
async fn read_status_file(&self, path: &Path) -> Result<BackupStatusToml, CollectorError> {
let content = fs::read_to_string(path)
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: self.status_file_path.clone(), path: path.to_string_lossy().to_string(),
error: e.to_string(), error: e.to_string(),
})?; })?;
@@ -40,66 +70,122 @@ impl BackupCollector {
error: format!("Failed to parse backup status TOML: {}", e), error: format!("Failed to parse backup status TOML: {}", e),
})?; })?;
Ok(Some(status)) Ok(status)
}
/// Calculate backup status from TOML status field
fn calculate_backup_status(status_str: &str) -> Status {
match status_str.to_lowercase().as_str() {
"success" => Status::Ok,
"warning" => Status::Warning,
"failed" | "error" => Status::Critical,
_ => Status::Unknown,
}
}
/// Calculate usage status from disk usage percentage
fn calculate_usage_status(usage_percent: f32) -> Status {
if usage_percent < 80.0 {
Status::Ok
} else if usage_percent < 90.0 {
Status::Warning
} else {
Status::Critical
}
} }
/// Convert BackupStatusToml to BackupData and populate AgentData /// Convert BackupStatusToml to BackupData and populate AgentData
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
if let Some(backup_status) = self.read_backup_status().await? { let status_files = self.scan_status_files().await?;
// Use raw start_time string from TOML
// Extract disk information if status_files.is_empty() {
let repository_disk = if let Some(disk_space) = &backup_status.disk_space { debug!("No backup status files found");
Some(BackupDiskData {
serial: backup_status.disk_serial_number.clone().unwrap_or_else(|| "Unknown".to_string()),
usage_percent: disk_space.usage_percent as f32,
used_gb: disk_space.used_gb as f32,
total_gb: disk_space.total_gb as f32,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
})
} else if let Some(serial) = &backup_status.disk_serial_number {
// Fallback: create minimal disk info if we have serial but no disk_space
Some(BackupDiskData {
serial: serial.clone(),
usage_percent: 0.0,
used_gb: 0.0,
total_gb: 0.0,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None,
})
} else {
None
};
// Calculate total repository size from services
let total_size_gb = backup_status.services
.values()
.map(|service| service.repo_size_bytes as f32 / (1024.0 * 1024.0 * 1024.0))
.sum::<f32>();
let backup_data = BackupData {
status: backup_status.status,
total_size_gb: Some(total_size_gb),
repository_health: Some("ok".to_string()), // Derive from status if needed
repository_disk,
last_backup_size_gb: None, // Not available in current TOML format
start_time_raw: Some(backup_status.start_time),
};
agent_data.backup = backup_data;
} else {
// No backup status available - set default values
agent_data.backup = BackupData { agent_data.backup = BackupData {
status: "unavailable".to_string(), repositories: Vec::new(),
total_size_gb: None, repository_status: Status::Unknown,
repository_health: None, disks: Vec::new(),
repository_disk: None,
last_backup_size_gb: None,
start_time_raw: None,
}; };
return Ok(());
} }
let mut all_repositories = HashSet::new();
let mut disks = Vec::new();
let mut worst_status = Status::Ok;
for status_file in status_files {
match self.read_status_file(&status_file).await {
Ok(backup_status) => {
// Collect all service names
for service_name in backup_status.services.keys() {
all_repositories.insert(service_name.clone());
}
// Calculate backup status
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
// Calculate usage status from disk space
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
let usage_pct = disk_space.usage_percent as f32;
(
usage_pct,
disk_space.used_gb as f32,
disk_space.total_gb as f32,
Self::calculate_usage_status(usage_pct),
)
} else {
(0.0, 0.0, 0.0, Status::Unknown)
};
// Update worst status
worst_status = worst_status.max(backup_status_enum).max(usage_status);
// Build service list for this disk
let services: Vec<String> = backup_status.services.keys().cloned().collect();
// Get min and max archive counts to detect inconsistencies
let archives_min: i64 = backup_status.services.values()
.map(|service| service.archive_count)
.min()
.unwrap_or(0);
let archives_max: i64 = backup_status.services.values()
.map(|service| service.archive_count)
.max()
.unwrap_or(0);
// Create disk data
let disk_data = BackupDiskData {
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
product_name: backup_status.disk_product_name,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
last_backup_time: Some(backup_status.start_time),
backup_status: backup_status_enum,
disk_usage_percent: usage_percent,
disk_used_gb: used_gb,
disk_total_gb: total_gb,
usage_status,
services,
archives_min,
archives_max,
};
disks.push(disk_data);
}
Err(e) => {
warn!("Failed to read backup status file {:?}: {}", status_file, e);
}
}
}
let repositories: Vec<String> = all_repositories.into_iter().collect();
agent_data.backup = BackupData {
repositories,
repository_status: worst_status,
disks,
};
Ok(()) Ok(())
} }
} }

View File

@@ -3,7 +3,8 @@ use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, DriveData, FilesystemData, PoolData, HysteresisThresholds, Status}; use cm_dashboard_shared::{AgentData, DriveData, FilesystemData, PoolData, HysteresisThresholds, Status};
use crate::config::DiskConfig; use crate::config::DiskConfig;
use std::process::Command; use tokio::process::Command as TokioCommand;
use std::process::Command as StdCommand;
use std::time::Instant; use std::time::Instant;
use std::collections::HashMap; use std::collections::HashMap;
use tracing::debug; use tracing::debug;
@@ -114,7 +115,7 @@ impl DiskCollector {
async fn get_mount_devices(&self) -> Result<HashMap<String, String>, CollectorError> { async fn get_mount_devices(&self) -> Result<HashMap<String, String>, CollectorError> {
use super::run_command_with_timeout; use super::run_command_with_timeout;
let mut cmd = Command::new("lsblk"); let mut cmd = TokioCommand::new("lsblk");
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]); cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
let output = run_command_with_timeout(cmd, 2).await let output = run_command_with_timeout(cmd, 2).await
@@ -189,7 +190,7 @@ impl DiskCollector {
/// Get filesystem info for a single mount point /// Get filesystem info for a single mount point
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> { fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
let output = std::process::Command::new("timeout") let output = StdCommand::new("timeout")
.args(&["2", "df", "--block-size=1", mount_point]) .args(&["2", "df", "--block-size=1", mount_point])
.output() .output()
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
@@ -420,7 +421,7 @@ impl DiskCollector {
// Use direct smartctl (no sudo) - service has CAP_SYS_RAWIO and CAP_SYS_ADMIN capabilities // Use direct smartctl (no sudo) - service has CAP_SYS_RAWIO and CAP_SYS_ADMIN capabilities
// For NVMe drives, specify device type explicitly // For NVMe drives, specify device type explicitly
let mut cmd = Command::new("smartctl"); let mut cmd = TokioCommand::new("smartctl");
if drive_name.starts_with("nvme") { if drive_name.starts_with("nvme") {
cmd.args(&["-d", "nvme", "-a", &format!("/dev/{}", drive_name)]); cmd.args(&["-d", "nvme", "-a", &format!("/dev/{}", drive_name)]);
} else { } else {
@@ -763,7 +764,7 @@ impl DiskCollector {
/// Get drive information for a mount path /// Get drive information for a mount path
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> { fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
// Use lsblk to find the backing device with timeout // Use lsblk to find the backing device with timeout
let output = Command::new("timeout") let output = StdCommand::new("timeout")
.args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"]) .args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
.output() .output()
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?; .map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;

View File

@@ -1,8 +1,7 @@
use async_trait::async_trait; use async_trait::async_trait;
use cm_dashboard_shared::{AgentData}; use cm_dashboard_shared::{AgentData};
use std::process::{Command, Output}; use std::process::Output;
use std::time::Duration; use std::time::Duration;
use tokio::time::timeout;
pub mod backup; pub mod backup;
pub mod cpu; pub mod cpu;
@@ -16,16 +15,29 @@ pub mod systemd;
pub use error::CollectorError; pub use error::CollectorError;
/// Run a command with a timeout to prevent blocking /// Run a command with a timeout to prevent blocking
pub async fn run_command_with_timeout(mut cmd: Command, timeout_secs: u64) -> std::io::Result<Output> { /// Properly kills the process if timeout is exceeded
pub async fn run_command_with_timeout(mut cmd: tokio::process::Command, timeout_secs: u64) -> std::io::Result<Output> {
use tokio::time::timeout;
let timeout_duration = Duration::from_secs(timeout_secs); let timeout_duration = Duration::from_secs(timeout_secs);
match timeout(timeout_duration, tokio::task::spawn_blocking(move || cmd.output())).await { let child = cmd.spawn()?;
Ok(Ok(result)) => result, let pid = child.id();
Ok(Err(e)) => Err(std::io::Error::new(std::io::ErrorKind::Other, e)),
Err(_) => Err(std::io::Error::new( match timeout(timeout_duration, child.wait_with_output()).await {
std::io::ErrorKind::TimedOut, Ok(result) => result,
format!("Command timed out after {} seconds", timeout_secs) Err(_) => {
)), // Timeout - force kill the process using system kill command
if let Some(process_id) = pid {
let _ = tokio::process::Command::new("kill")
.args(&["-9", &process_id.to_string()])
.output()
.await;
}
Err(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!("Command timed out after {} seconds", timeout_secs)
))
}
} }
} }

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.212" version = "0.1.221"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -45,15 +45,9 @@ pub struct SystemWidget {
storage_pools: Vec<StoragePool>, storage_pools: Vec<StoragePool>,
// Backup metrics // Backup metrics
backup_status: String, backup_repositories: Vec<String>,
backup_start_time_raw: Option<String>, backup_repository_status: Status,
backup_disk_serial: Option<String>, backup_disks: Vec<cm_dashboard_shared::BackupDiskData>,
backup_disk_usage_percent: Option<f32>,
backup_disk_used_gb: Option<f32>,
backup_disk_total_gb: Option<f32>,
backup_disk_wear_percent: Option<f32>,
backup_disk_temperature: Option<f32>,
backup_last_size_gb: Option<f32>,
// Overall status // Overall status
has_data: bool, has_data: bool,
@@ -114,15 +108,9 @@ impl SystemWidget {
tmp_status: Status::Unknown, tmp_status: Status::Unknown,
tmpfs_mounts: Vec::new(), tmpfs_mounts: Vec::new(),
storage_pools: Vec::new(), storage_pools: Vec::new(),
backup_status: "unknown".to_string(), backup_repositories: Vec::new(),
backup_start_time_raw: None, backup_repository_status: Status::Unknown,
backup_disk_serial: None, backup_disks: Vec::new(),
backup_disk_usage_percent: None,
backup_disk_used_gb: None,
backup_disk_total_gb: None,
backup_disk_wear_percent: None,
backup_disk_temperature: None,
backup_last_size_gb: None,
has_data: false, has_data: false,
} }
} }
@@ -221,25 +209,9 @@ impl Widget for SystemWidget {
// Extract backup data // Extract backup data
let backup = &agent_data.backup; let backup = &agent_data.backup;
self.backup_status = backup.status.clone(); self.backup_repositories = backup.repositories.clone();
self.backup_start_time_raw = backup.start_time_raw.clone(); self.backup_repository_status = backup.repository_status;
self.backup_last_size_gb = backup.last_backup_size_gb; self.backup_disks = backup.disks.clone();
if let Some(disk) = &backup.repository_disk {
self.backup_disk_serial = Some(disk.serial.clone());
self.backup_disk_usage_percent = Some(disk.usage_percent);
self.backup_disk_used_gb = Some(disk.used_gb);
self.backup_disk_total_gb = Some(disk.total_gb);
self.backup_disk_wear_percent = disk.wear_percent;
self.backup_disk_temperature = disk.temperature_celsius;
} else {
self.backup_disk_serial = None;
self.backup_disk_usage_percent = None;
self.backup_disk_used_gb = None;
self.backup_disk_total_gb = None;
self.backup_disk_wear_percent = None;
self.backup_disk_temperature = None;
}
} }
} }
@@ -539,14 +511,36 @@ impl SystemWidget {
fn render_backup(&self) -> Vec<Line<'_>> { fn render_backup(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new(); let mut lines = Vec::new();
// First line: serial number with temperature and wear // First section: Repository status and list
if let Some(serial) = &self.backup_disk_serial { if !self.backup_repositories.is_empty() {
let truncated_serial = truncate_serial(serial); let repo_text = format!("Repo: {}", self.backup_repositories.len());
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
lines.push(Line::from(repo_spans));
// List all repositories (sorted for consistent display)
let mut sorted_repos = self.backup_repositories.clone();
sorted_repos.sort();
let repo_count = sorted_repos.len();
for (idx, repo) in sorted_repos.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
lines.push(Line::from(vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
Span::styled(repo.clone(), Typography::secondary()),
]));
}
}
// Second section: Per-disk backup information (sorted by serial for consistent display)
let mut sorted_disks = self.backup_disks.clone();
sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial));
for disk in &sorted_disks {
let truncated_serial = truncate_serial(&disk.serial);
let mut details = Vec::new(); let mut details = Vec::new();
if let Some(temp) = self.backup_disk_temperature {
if let Some(temp) = disk.temperature_celsius {
details.push(format!("T: {}°C", temp as i32)); details.push(format!("T: {}°C", temp as i32));
} }
if let Some(wear) = self.backup_disk_wear_percent { if let Some(wear) = disk.wear_percent {
details.push(format!("W: {}%", wear as i32)); details.push(format!("W: {}%", wear as i32));
} }
@@ -556,44 +550,40 @@ impl SystemWidget {
truncated_serial truncated_serial
}; };
let backup_status = match self.backup_status.as_str() { // Overall disk status (worst of backup and usage)
"completed" | "success" => Status::Ok, let disk_status = disk.backup_status.max(disk.usage_status);
"running" => Status::Pending, let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
"failed" => Status::Critical,
_ => Status::Unknown,
};
let disk_spans = StatusIcons::create_status_spans(backup_status, &disk_text);
lines.push(Line::from(disk_spans)); lines.push(Line::from(disk_spans));
// Show backup time from TOML if available // Show backup time with status
if let Some(start_time) = &self.backup_start_time_raw { if let Some(backup_time) = &disk.last_backup_time {
let time_text = if let Some(size) = self.backup_last_size_gb { let time_text = format!("Backup: {}", backup_time);
format!("Time: {} ({:.1}GB)", start_time, size) let mut time_spans = vec![
} else {
format!("Time: {}", start_time)
};
lines.push(Line::from(vec![
Span::styled(" ├─ ", Typography::tree()), Span::styled(" ├─ ", Typography::tree()),
Span::styled(time_text, Typography::secondary()) ];
])); time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
lines.push(Line::from(time_spans));
} }
// Usage information // Show usage with status and archive count
if let (Some(used), Some(total), Some(usage_percent)) = ( let archive_display = if disk.archives_min == disk.archives_max {
self.backup_disk_used_gb, format!("{}", disk.archives_min)
self.backup_disk_total_gb, } else {
self.backup_disk_usage_percent format!("{}-{}", disk.archives_min, disk.archives_max)
) { };
let usage_text = format!("Usage: {:.0}% {:.0}GB/{:.0}GB", usage_percent, used, total);
let usage_spans = StatusIcons::create_status_spans(Status::Ok, &usage_text); let usage_text = format!(
let mut full_spans = vec![ "Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
Span::styled(" └─ ", Typography::tree()), archive_display,
]; disk.disk_usage_percent,
full_spans.extend(usage_spans); disk.disk_used_gb,
lines.push(Line::from(full_spans)); disk.disk_total_gb
} );
let mut usage_spans = vec![
Span::styled(" └─ ", Typography::tree()),
];
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text));
lines.push(Line::from(usage_spans));
} }
lines lines
@@ -901,7 +891,7 @@ impl SystemWidget {
lines.extend(storage_lines); lines.extend(storage_lines);
// Backup section (if available) // Backup section (if available)
if self.backup_status != "unavailable" && self.backup_status != "unknown" { if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
lines.push(Line::from(vec![ lines.push(Line::from(vec![
Span::styled("Backup:", Typography::widget_title()) Span::styled("Backup:", Typography::widget_title())
])); ]));

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.212" version = "0.1.221"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -176,23 +176,27 @@ pub struct SubServiceMetric {
/// Backup system data /// Backup system data
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupData { pub struct BackupData {
pub status: String, pub repositories: Vec<String>,
pub total_size_gb: Option<f32>, pub repository_status: Status,
pub repository_health: Option<String>, pub disks: Vec<BackupDiskData>,
pub repository_disk: Option<BackupDiskData>,
pub last_backup_size_gb: Option<f32>,
pub start_time_raw: Option<String>,
} }
/// Backup repository disk information /// Backup repository disk information
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupDiskData { pub struct BackupDiskData {
pub serial: String, pub serial: String,
pub usage_percent: f32, pub product_name: Option<String>,
pub used_gb: f32,
pub total_gb: f32,
pub wear_percent: Option<f32>, pub wear_percent: Option<f32>,
pub temperature_celsius: Option<f32>, pub temperature_celsius: Option<f32>,
pub last_backup_time: Option<String>,
pub backup_status: Status,
pub disk_usage_percent: f32,
pub disk_used_gb: f32,
pub disk_total_gb: f32,
pub usage_status: Status,
pub services: Vec<String>,
pub archives_min: i64,
pub archives_max: i64,
} }
impl AgentData { impl AgentData {
@@ -233,12 +237,9 @@ impl AgentData {
}, },
services: Vec::new(), services: Vec::new(),
backup: BackupData { backup: BackupData {
status: "unknown".to_string(), repositories: Vec::new(),
total_size_gb: None, repository_status: Status::Unknown,
repository_health: None, disks: Vec::new(),
repository_disk: None,
last_backup_size_gb: None,
start_time_raw: None,
}, },
} }
} }