Compare commits

...

8 Commits

Author SHA1 Message Date
f53df5440b Remove 'Repo' prefix from backup header display
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Simplify backup section header by removing the 'Repo' prefix and
displaying only the timestamp with status icon. Repository details
are still shown as sub-items below the timestamp.
2025-12-09 20:32:05 +01:00
d1b0e2c431 Add kB unit support for backup repository sizes
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Extended size formatting to handle repositories smaller than 1MB by displaying in kB units. Size display logic now cascades: kB for < 1MB, MB for 1MB-1GB, GB for >= 1GB.
2025-12-09 19:51:03 +01:00
b1719a60fc Use nfs-backup.toml and support completed status
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Update agent to read nfs-backup.toml instead of legacy backup-status-*.toml
files. Add support for 'completed' status string used by backup script.

Changes:
- Read nfs-backup.toml from status directory
- Match 'completed' status as Status::Ok
- Simplify file scanning logic for single NFS backup file
2025-12-09 19:37:01 +01:00
d922e8d6f3 Restructure backup display to show per-repository metrics
All checks were successful
Build and Release / build-and-release (push) Successful in 1m15s
Remove disk-based backup display and implement repository-centric view
with per-repo archive counts and sizes. Backup now uses NFS storage
instead of direct disk monitoring.

Changes:
- Remove BackupDiskData, add BackupRepositoryData structure
- Display format: "Repo <timestamp>" with per-repo details
- Show archive count and size (MB/GB) for each repository
- Agent aggregates repo data from backup status TOML files
- Dashboard renders repo list with individual status indicators
2025-12-09 19:22:51 +01:00
407bc9dbc2 Reduce CPU usage with conditional rendering
All checks were successful
Build and Release / build-and-release (push) Successful in 1m12s
Implement event-driven rendering to dramatically reduce CPU usage.
Only render when something actually changes instead of constantly
rendering at 20 FPS.

Changes:
- Increase poll timeout from 50ms to 200ms (5 FPS)
- Add needs_render flag to track when rendering is required
- Trigger rendering only on: user input, new metrics, heartbeat
  checks, or terminal resize events
- Reset render flag after each render cycle

Based on cm-player optimization approach.
2025-12-09 11:56:40 +01:00
3c278351c9 Filter out current host from Tailscale peer list
All checks were successful
Build and Release / build-and-release (push) Successful in 1m50s
Skip the first line in tailscale status output which is always the
current host showing as idle. Add additional hostname check to prevent
showing the current host in the peer list. Only display actual remote
peers with their connection methods.
2025-12-09 10:47:18 +01:00
8da4522d85 Fix Tailscale peer detection by parsing text output
All checks were successful
Build and Release / build-and-release (push) Successful in 1m13s
Replace JSON parsing with simpler text output parsing from tailscale
status command. The text format clearly shows hostname and connection
method (direct/relay/idle) making detection more reliable.

Fixes issues with incorrect hostname (localhost instead of actual name)
and incorrect connection method detection (showing relay when actually
using direct connection).
2025-12-09 10:34:55 +01:00
5b1e39cfca Show all connected Tailscale peers with connection methods
All checks were successful
Build and Release / build-and-release (push) Successful in 1m38s
Replace single connection method display with individual sub-service
rows for each online Tailscale peer. Each peer shows hostname and
connection type (direct, relay, or idle) allowing monitoring of all
connected devices and their connection quality.

Query tailscale status --json to enumerate all online peers and display
each as a separate sub-service under tailscaled.
2025-12-09 08:35:15 +01:00
9 changed files with 178 additions and 280 deletions

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "cm-dashboard"
version = "0.1.261"
version = "0.1.269"
dependencies = [
"anyhow",
"chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-agent"
version = "0.1.261"
version = "0.1.269"
dependencies = [
"anyhow",
"async-trait",
@ -325,7 +325,7 @@ dependencies = [
[[package]]
name = "cm-dashboard-shared"
version = "0.1.261"
version = "0.1.269"
dependencies = [
"chrono",
"serde",

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-agent"
version = "0.1.262"
version = "0.1.270"
edition = "2021"
[dependencies]

View File

@ -1,7 +1,7 @@
use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use tracing::{debug, warn};
@ -21,7 +21,7 @@ impl BackupCollector {
}
}
/// Scan directory for all backup status files
/// Scan directory for backup status file (nfs-backup.toml)
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
let status_path = Path::new(&self.status_dir);
@ -30,30 +30,15 @@ impl BackupCollector {
return Ok(Vec::new());
}
let mut status_files = Vec::new();
match fs::read_dir(status_path) {
Ok(entries) => {
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
status_files.push(path);
}
}
}
}
}
}
Err(e) => {
warn!("Failed to read backup status directory: {}", e);
return Ok(Vec::new());
}
// Look for nfs-backup.toml (new NFS-based backup)
let nfs_backup_file = status_path.join("nfs-backup.toml");
if nfs_backup_file.exists() {
return Ok(vec![nfs_backup_file]);
}
Ok(status_files)
// No backup status file found
debug!("No nfs-backup.toml found in {}", self.status_dir);
Ok(Vec::new())
}
/// Read a single backup status file
@ -76,24 +61,13 @@ impl BackupCollector {
/// Calculate backup status from TOML status field
fn calculate_backup_status(status_str: &str) -> Status {
match status_str.to_lowercase().as_str() {
"success" => Status::Ok,
"success" | "completed" => Status::Ok,
"warning" => Status::Warning,
"failed" | "error" => Status::Critical,
_ => Status::Unknown,
}
}
/// Calculate usage status from disk usage percentage
fn calculate_usage_status(usage_percent: f32) -> Status {
if usage_percent < 80.0 {
Status::Ok
} else if usage_percent < 90.0 {
Status::Warning
} else {
Status::Critical
}
}
/// Convert BackupStatusToml to BackupData and populate AgentData
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
let status_files = self.scan_status_files().await?;
@ -101,76 +75,47 @@ impl BackupCollector {
if status_files.is_empty() {
debug!("No backup status files found");
agent_data.backup = BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
};
return Ok(());
}
let mut all_repositories = HashSet::new();
let mut disks = Vec::new();
// Aggregate repository data across all backup status files
let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new();
let mut worst_status = Status::Ok;
let mut latest_backup_time: Option<String> = None;
for status_file in status_files {
match self.read_status_file(&status_file).await {
Ok(backup_status) => {
// Collect all service names
for service_name in backup_status.services.keys() {
all_repositories.insert(service_name.clone());
}
// Calculate backup status
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
worst_status = worst_status.max(backup_status_enum);
// Calculate usage status from disk space
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
let usage_pct = disk_space.usage_percent as f32;
(
usage_pct,
disk_space.used_gb as f32,
disk_space.total_gb as f32,
Self::calculate_usage_status(usage_pct),
)
} else {
(0.0, 0.0, 0.0, Status::Unknown)
};
// Track latest backup time
if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() {
latest_backup_time = Some(backup_status.start_time.clone());
}
// Update worst status
worst_status = worst_status.max(backup_status_enum).max(usage_status);
// Process each service in this backup
for (service_name, service_status) in backup_status.services {
// Convert bytes to GB
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
// Build service list for this disk
let services: Vec<String> = backup_status.services.keys().cloned().collect();
// Calculate service status
let service_status_enum = Self::calculate_backup_status(&service_status.status);
worst_status = worst_status.max(service_status_enum);
// Get min and max archive counts to detect inconsistencies
let archives_min: i64 = backup_status.services.values()
.map(|service| service.archive_count)
.min()
.unwrap_or(0);
let archives_max: i64 = backup_status.services.values()
.map(|service| service.archive_count)
.max()
.unwrap_or(0);
// Create disk data
let disk_data = BackupDiskData {
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
product_name: backup_status.disk_product_name,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
last_backup_time: Some(backup_status.start_time),
backup_status: backup_status_enum,
disk_usage_percent: usage_percent,
disk_used_gb: used_gb,
disk_total_gb: total_gb,
usage_status,
services,
archives_min,
archives_max,
};
disks.push(disk_data);
// Update or insert repository data
repo_map.insert(service_name.clone(), BackupRepositoryData {
name: service_name,
archive_count: service_status.archive_count,
repo_size_gb,
status: service_status_enum,
});
}
}
Err(e) => {
warn!("Failed to read backup status file {:?}: {}", status_file, e);
@ -178,12 +123,14 @@ impl BackupCollector {
}
}
let repositories: Vec<String> = all_repositories.into_iter().collect();
// Convert HashMap to sorted Vec
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
repositories.sort_by(|a, b| a.name.cmp(&b.name));
agent_data.backup = BackupData {
last_backup_time: latest_backup_time,
backup_status: worst_status,
repositories,
repository_status: worst_status,
disks,
};
Ok(())

View File

@ -217,14 +217,15 @@ impl SystemdCollector {
}
if service_name == "tailscaled" && status_info.active_state == "active" {
// Add Tailscale connection method as sub-service
if let Some(conn_method) = self.get_tailscale_connection_method() {
// Add Tailscale peers with their connection methods as sub-services
let peers = self.get_tailscale_peers();
for (peer_name, conn_method) in peers {
let metrics = Vec::new();
sub_services.push(SubServiceData {
name: format!("Connection: {}", conn_method),
name: format!("{}: {}", peer_name, conn_method),
service_status: Status::Info,
metrics,
service_type: "tailscale_connection".to_string(),
service_type: "tailscale_peer".to_string(),
});
}
}
@ -936,50 +937,77 @@ impl SystemdCollector {
None
}
/// Get Tailscale connection method (direct, relay, or proxy)
fn get_tailscale_connection_method(&self) -> Option<String> {
/// Get Tailscale connected peers with their connection methods
/// Returns a list of (device_name, connection_method) tuples
fn get_tailscale_peers(&self) -> Vec<(String, String)> {
match Command::new("timeout")
.args(["2", "tailscale", "status", "--json"])
.args(["2", "tailscale", "status"])
.output()
{
Ok(output) if output.status.success() => {
let json_str = String::from_utf8_lossy(&output.stdout);
let status_output = String::from_utf8_lossy(&output.stdout);
let mut peers = Vec::new();
if let Ok(json_data) = serde_json::from_str::<serde_json::Value>(&json_str) {
// Look for the self peer (current node) in the peer list
if let Some(peers) = json_data["Peer"].as_object() {
// Find the first active peer connection to determine connection method
for (_peer_id, peer_data) in peers {
if peer_data["Active"].as_bool().unwrap_or(false) {
// Check if using relay
let relay_node = peer_data["Relay"].as_str().unwrap_or("");
if !relay_node.is_empty() {
return Some("relay".to_string());
}
// Get current hostname to filter it out
let current_hostname = gethostname::gethostname()
.to_string_lossy()
.to_string();
// Check if using direct connection
if let Some(endpoints) = peer_data["CurAddr"].as_str() {
if !endpoints.is_empty() {
return Some("direct".to_string());
}
}
}
}
// Parse tailscale status output
// Format: IP hostname user os status
// Example: 100.110.98.3 wslbox cm@ linux active; direct 192.168.30.227:53757
// Note: First line is always the current host, skip it
for (idx, line) in status_output.lines().enumerate() {
if idx == 0 {
continue; // Skip first line (current host)
}
// Check if using proxy from backend state
if let Some(backend_state) = json_data["BackendState"].as_str() {
if backend_state == "Running" {
// If we're running but have no direct or relay, might be proxy
// This is a fallback heuristic
return Some("unknown".to_string());
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
continue; // Skip invalid lines
}
// parts[0] = IP
// parts[1] = hostname
// parts[2] = user
// parts[3] = OS
// parts[4+] = status (e.g., "active;", "direct", "192.168.30.227:53757" or "idle;" or "offline")
let hostname = parts[1];
// Skip if this is the current host (double-check in case format changes)
if hostname == current_hostname {
continue;
}
let status_parts = &parts[4..];
// Determine connection method from status
let connection_method = if status_parts.is_empty() {
continue; // Skip if no status
} else {
let status_str = status_parts.join(" ");
if status_str.contains("offline") {
continue; // Skip offline peers
} else if status_str.contains("direct") {
"direct"
} else if status_str.contains("relay") {
"relay"
} else if status_str.contains("idle") {
"idle"
} else if status_str.contains("active") {
"active"
} else {
continue; // Skip unknown status
}
};
peers.push((hostname.to_string(), connection_method.to_string()));
}
None
peers
}
_ => None,
_ => Vec::new(),
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard"
version = "0.1.262"
version = "0.1.270"
edition = "2021"
[dependencies]

View File

@ -138,11 +138,12 @@ impl Dashboard {
let metrics_check_interval = Duration::from_millis(100); // Check for metrics every 100ms
let mut last_heartbeat_check = Instant::now();
let heartbeat_check_interval = Duration::from_secs(1); // Check for host connectivity every 1 second
let mut needs_render = true; // Track if we need to render
loop {
// Handle terminal events (keyboard and mouse input) only if not headless
if !self.headless {
match event::poll(Duration::from_millis(50)) {
match event::poll(Duration::from_millis(200)) {
Ok(true) => {
match event::read() {
Ok(event) => {
@ -152,6 +153,7 @@ impl Dashboard {
// Handle keyboard input
match tui_app.handle_input(event) {
Ok(_) => {
needs_render = true;
// Check if we should quit
if tui_app.should_quit() {
info!("Quit requested, exiting dashboard");
@ -168,10 +170,11 @@ impl Dashboard {
if let Err(e) = self.handle_mouse_event(mouse_event) {
error!("Error handling mouse event: {}", e);
}
needs_render = true;
}
Event::Resize(_width, _height) => {
// Terminal was resized - just continue and re-render
// The next render will automatically use the new size
// Terminal was resized - mark for re-render
needs_render = true;
}
_ => {}
}
@ -189,38 +192,6 @@ impl Dashboard {
break;
}
}
// Render UI immediately after handling input for responsive feedback
if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events
if let Err(e) = terminal.autoresize() {
warn!("Error autoresizing terminal: {}", e);
}
// Check minimum terminal size to prevent panics
let size = terminal.size().unwrap_or_default();
if size.width < 90 || size.height < 15 {
// Terminal too small, show error message
let msg_text = format!("Terminal too small\n\nMinimum: 90x15\nCurrent: {}x{}", size.width, size.height);
let _ = terminal.draw(|frame| {
use ratatui::widgets::{Paragraph, Block, Borders};
use ratatui::layout::Alignment;
let msg = Paragraph::new(msg_text.clone())
.alignment(Alignment::Center)
.block(Block::default().borders(Borders::ALL));
frame.render_widget(msg, frame.size());
});
} else if let Err(e) = terminal.draw(|frame| {
let (title_area, system_area, services_area) = tui_app.render(frame, &self.metric_store);
self.title_area = title_area;
self.system_area = system_area;
self.services_area = services_area;
}) {
error!("Error rendering TUI after input: {}", e);
}
}
}
}
// Check for new metrics
@ -259,8 +230,10 @@ impl Dashboard {
if let Some(ref mut tui_app) = self.tui_app {
tui_app.update_metrics(&mut self.metric_store);
}
needs_render = true; // New metrics received, need to render
}
// Also check for command output messages
if let Ok(Some(cmd_output)) = self.zmq_consumer.receive_command_output().await {
debug!(
@ -271,26 +244,27 @@ impl Dashboard {
// Command output (terminal popup removed - output not displayed)
}
last_metrics_check = Instant::now();
}
// Check for host connectivity changes (heartbeat timeouts) periodically
if last_heartbeat_check.elapsed() >= heartbeat_check_interval {
let timeout = Duration::from_secs(self.config.zmq.heartbeat_timeout_seconds);
// Clean up metrics for offline hosts
self.metric_store.cleanup_offline_hosts(timeout);
if let Some(ref mut tui_app) = self.tui_app {
let connected_hosts = self.metric_store.get_connected_hosts(timeout);
tui_app.update_hosts(connected_hosts);
}
last_heartbeat_check = Instant::now();
needs_render = true; // Heartbeat check happened, may have changed hosts
}
// Render TUI (only if not headless)
if !self.headless {
// Render TUI only when needed (not headless and something changed)
if !self.headless && needs_render {
if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app {
// Clear and autoresize terminal to handle any resize events
@ -322,10 +296,8 @@ impl Dashboard {
}
}
}
needs_render = false; // Reset flag after rendering
}
// Small sleep to prevent excessive CPU usage
tokio::time::sleep(Duration::from_millis(10)).await;
}
info!("Dashboard main loop ended");

View File

@ -44,9 +44,9 @@ pub struct SystemWidget {
storage_pools: Vec<StoragePool>,
// Backup metrics
backup_repositories: Vec<String>,
backup_repository_status: Status,
backup_disks: Vec<cm_dashboard_shared::BackupDiskData>,
backup_last_time: Option<String>,
backup_status: Status,
backup_repositories: Vec<cm_dashboard_shared::BackupRepositoryData>,
// Overall status
has_data: bool,
@ -112,9 +112,9 @@ impl SystemWidget {
tmp_status: Status::Unknown,
tmpfs_mounts: Vec::new(),
storage_pools: Vec::new(),
backup_last_time: None,
backup_status: Status::Unknown,
backup_repositories: Vec::new(),
backup_repository_status: Status::Unknown,
backup_disks: Vec::new(),
has_data: false,
scroll_offset: 0,
last_viewport_height: 0,
@ -221,9 +221,9 @@ impl Widget for SystemWidget {
// Extract backup data
let backup = &agent_data.backup;
self.backup_last_time = backup.last_backup_time.clone();
self.backup_status = backup.backup_status;
self.backup_repositories = backup.repositories.clone();
self.backup_repository_status = backup.repository_status;
self.backup_disks = backup.disks.clone();
// Clamp scroll offset to valid range after update
// This prevents scroll issues when switching between hosts
@ -533,79 +533,42 @@ impl SystemWidget {
fn render_backup(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new();
// First section: Repository status and list
if !self.backup_repositories.is_empty() {
let repo_text = format!("Repo: {}", self.backup_repositories.len());
let repo_spans = StatusIcons::create_status_spans(self.backup_repository_status, &repo_text);
lines.push(Line::from(repo_spans));
// List all repositories (sorted for consistent display)
let mut sorted_repos = self.backup_repositories.clone();
sorted_repos.sort();
let repo_count = sorted_repos.len();
for (idx, repo) in sorted_repos.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
lines.push(Line::from(vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
Span::styled(repo.clone(), Typography::secondary()),
]));
}
if self.backup_repositories.is_empty() {
return lines;
}
// Second section: Per-disk backup information (sorted by serial for consistent display)
let mut sorted_disks = self.backup_disks.clone();
sorted_disks.sort_by(|a, b| a.serial.cmp(&b.serial));
for disk in &sorted_disks {
let truncated_serial = truncate_serial(&disk.serial);
let mut details = Vec::new();
// Format backup time (use complete timestamp)
let time_display = if let Some(ref time_str) = self.backup_last_time {
time_str.clone()
} else {
"unknown".to_string()
};
if let Some(temp) = disk.temperature_celsius {
details.push(format!("T: {}°C", temp as i32));
}
if let Some(wear) = disk.wear_percent {
details.push(format!("W: {}%", wear as i32));
}
// Header: just the timestamp
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display);
lines.push(Line::from(repo_spans));
let disk_text = if !details.is_empty() {
format!("{} {}", truncated_serial, details.join(" "))
// List all repositories with archive count and size
let repo_count = self.backup_repositories.len();
for (idx, repo) in self.backup_repositories.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
// Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
let size_display = if repo.repo_size_gb < 0.001 {
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
} else if repo.repo_size_gb < 1.0 {
format!("{:.0}MB", repo.repo_size_gb * 1024.0)
} else {
truncated_serial
format!("{:.1}GB", repo.repo_size_gb)
};
// Overall disk status (worst of backup and usage)
let disk_status = disk.backup_status.max(disk.usage_status);
let disk_spans = StatusIcons::create_status_spans(disk_status, &disk_text);
lines.push(Line::from(disk_spans));
let repo_text = format!("{} ({}) {}", repo.name, repo.archive_count, size_display);
// Show backup time with status
if let Some(backup_time) = &disk.last_backup_time {
let time_text = format!("Backup: {}", backup_time);
let mut time_spans = vec![
Span::styled(" ├─ ", Typography::tree()),
];
time_spans.extend(StatusIcons::create_status_spans(disk.backup_status, &time_text));
lines.push(Line::from(time_spans));
}
// Show usage with status and archive count
let archive_display = if disk.archives_min == disk.archives_max {
format!("{}", disk.archives_min)
} else {
format!("{}-{}", disk.archives_min, disk.archives_max)
};
let usage_text = format!(
"Usage: ({}) {:.0}% {:.0}GB/{:.0}GB",
archive_display,
disk.disk_usage_percent,
disk.disk_used_gb,
disk.disk_total_gb
);
let mut usage_spans = vec![
Span::styled(" └─ ", Typography::tree()),
let mut repo_spans = vec![
Span::styled(format!(" {} ", tree_char), Typography::tree()),
];
usage_spans.extend(StatusIcons::create_status_spans(disk.usage_status, &usage_text));
lines.push(Line::from(usage_spans));
repo_spans.extend(StatusIcons::create_status_spans(repo.status, &repo_text));
lines.push(Line::from(repo_spans));
}
lines
@ -876,13 +839,10 @@ impl SystemWidget {
}
// Backup section
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
count += 1; // Header
if !self.backup_repositories.is_empty() {
count += 1; // Repo header
count += self.backup_repositories.len();
}
count += self.backup_disks.len() * 3; // Each disk has 3 lines
if !self.backup_repositories.is_empty() {
count += 1; // Header: "Backup:"
count += 1; // Repo count and timestamp header
count += self.backup_repositories.len(); // Individual repos
}
count
@ -988,7 +948,7 @@ impl SystemWidget {
lines.extend(storage_lines);
// Backup section (if available)
if !self.backup_repositories.is_empty() || !self.backup_disks.is_empty() {
if !self.backup_repositories.is_empty() {
lines.push(Line::from(vec![
Span::styled("Backup:", Typography::widget_title())
]));

View File

@ -1,6 +1,6 @@
[package]
name = "cm-dashboard-shared"
version = "0.1.262"
version = "0.1.270"
edition = "2021"
[dependencies]

View File

@ -182,27 +182,18 @@ pub struct SubServiceMetric {
/// Backup system data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupData {
pub repositories: Vec<String>,
pub repository_status: Status,
pub disks: Vec<BackupDiskData>,
}
/// Backup repository disk information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupDiskData {
pub serial: String,
pub product_name: Option<String>,
pub wear_percent: Option<f32>,
pub temperature_celsius: Option<f32>,
pub last_backup_time: Option<String>,
pub backup_status: Status,
pub disk_usage_percent: f32,
pub disk_used_gb: f32,
pub disk_total_gb: f32,
pub usage_status: Status,
pub services: Vec<String>,
pub archives_min: i64,
pub archives_max: i64,
pub repositories: Vec<BackupRepositoryData>,
}
/// Individual backup repository information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupRepositoryData {
pub name: String,
pub archive_count: i64,
pub repo_size_gb: f32,
pub status: Status,
}
impl AgentData {
@ -245,9 +236,9 @@ impl AgentData {
},
services: Vec::new(),
backup: BackupData {
last_backup_time: None,
backup_status: Status::Unknown,
repositories: Vec::new(),
repository_status: Status::Unknown,
disks: Vec::new(),
},
}
}