Restructure backup display to show per-repository metrics
All checks were successful
Build and Release / build-and-release (push) Successful in 1m15s
All checks were successful
Build and Release / build-and-release (push) Successful in 1m15s
Remove disk-based backup display and implement repository-centric view with per-repo archive counts and sizes. Backup now uses NFS storage instead of direct disk monitoring. Changes: - Remove BackupDiskData, add BackupRepositoryData structure - Display format: "Repo <timestamp>" with per-repo details - Show archive count and size (MB/GB) for each repository - Agent aggregates repo data from backup status TOML files - Dashboard renders repo list with individual status indicators
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "cm-dashboard-agent"
|
||||
version = "0.1.266"
|
||||
version = "0.1.267"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use async_trait::async_trait;
|
||||
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData, Status};
|
||||
use cm_dashboard_shared::{AgentData, BackupData, BackupRepositoryData, Status};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::{debug, warn};
|
||||
@@ -83,17 +83,6 @@ impl BackupCollector {
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate usage status from disk usage percentage
|
||||
fn calculate_usage_status(usage_percent: f32) -> Status {
|
||||
if usage_percent < 80.0 {
|
||||
Status::Ok
|
||||
} else if usage_percent < 90.0 {
|
||||
Status::Warning
|
||||
} else {
|
||||
Status::Critical
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert BackupStatusToml to BackupData and populate AgentData
|
||||
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||
let status_files = self.scan_status_files().await?;
|
||||
@@ -101,76 +90,47 @@ impl BackupCollector {
|
||||
if status_files.is_empty() {
|
||||
debug!("No backup status files found");
|
||||
agent_data.backup = BackupData {
|
||||
last_backup_time: None,
|
||||
backup_status: Status::Unknown,
|
||||
repositories: Vec::new(),
|
||||
repository_status: Status::Unknown,
|
||||
disks: Vec::new(),
|
||||
};
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut all_repositories = HashSet::new();
|
||||
let mut disks = Vec::new();
|
||||
// Aggregate repository data across all backup status files
|
||||
let mut repo_map: HashMap<String, BackupRepositoryData> = HashMap::new();
|
||||
let mut worst_status = Status::Ok;
|
||||
let mut latest_backup_time: Option<String> = None;
|
||||
|
||||
for status_file in status_files {
|
||||
match self.read_status_file(&status_file).await {
|
||||
Ok(backup_status) => {
|
||||
// Collect all service names
|
||||
for service_name in backup_status.services.keys() {
|
||||
all_repositories.insert(service_name.clone());
|
||||
}
|
||||
|
||||
// Calculate backup status
|
||||
let backup_status_enum = Self::calculate_backup_status(&backup_status.status);
|
||||
worst_status = worst_status.max(backup_status_enum);
|
||||
|
||||
// Calculate usage status from disk space
|
||||
let (usage_percent, used_gb, total_gb, usage_status) = if let Some(disk_space) = &backup_status.disk_space {
|
||||
let usage_pct = disk_space.usage_percent as f32;
|
||||
(
|
||||
usage_pct,
|
||||
disk_space.used_gb as f32,
|
||||
disk_space.total_gb as f32,
|
||||
Self::calculate_usage_status(usage_pct),
|
||||
)
|
||||
} else {
|
||||
(0.0, 0.0, 0.0, Status::Unknown)
|
||||
};
|
||||
// Track latest backup time
|
||||
if latest_backup_time.is_none() || Some(&backup_status.start_time) > latest_backup_time.as_ref() {
|
||||
latest_backup_time = Some(backup_status.start_time.clone());
|
||||
}
|
||||
|
||||
// Update worst status
|
||||
worst_status = worst_status.max(backup_status_enum).max(usage_status);
|
||||
// Process each service in this backup
|
||||
for (service_name, service_status) in backup_status.services {
|
||||
// Convert bytes to GB
|
||||
let repo_size_gb = service_status.repo_size_bytes as f32 / 1_073_741_824.0;
|
||||
|
||||
// Build service list for this disk
|
||||
let services: Vec<String> = backup_status.services.keys().cloned().collect();
|
||||
// Calculate service status
|
||||
let service_status_enum = Self::calculate_backup_status(&service_status.status);
|
||||
worst_status = worst_status.max(service_status_enum);
|
||||
|
||||
// Get min and max archive counts to detect inconsistencies
|
||||
let archives_min: i64 = backup_status.services.values()
|
||||
.map(|service| service.archive_count)
|
||||
.min()
|
||||
.unwrap_or(0);
|
||||
|
||||
let archives_max: i64 = backup_status.services.values()
|
||||
.map(|service| service.archive_count)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Create disk data
|
||||
let disk_data = BackupDiskData {
|
||||
serial: backup_status.disk_serial_number.unwrap_or_else(|| "Unknown".to_string()),
|
||||
product_name: backup_status.disk_product_name,
|
||||
wear_percent: backup_status.disk_wear_percent,
|
||||
temperature_celsius: None, // Not available in current TOML
|
||||
last_backup_time: Some(backup_status.start_time),
|
||||
backup_status: backup_status_enum,
|
||||
disk_usage_percent: usage_percent,
|
||||
disk_used_gb: used_gb,
|
||||
disk_total_gb: total_gb,
|
||||
usage_status,
|
||||
services,
|
||||
archives_min,
|
||||
archives_max,
|
||||
};
|
||||
|
||||
disks.push(disk_data);
|
||||
// Update or insert repository data
|
||||
repo_map.insert(service_name.clone(), BackupRepositoryData {
|
||||
name: service_name,
|
||||
archive_count: service_status.archive_count,
|
||||
repo_size_gb,
|
||||
status: service_status_enum,
|
||||
});
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to read backup status file {:?}: {}", status_file, e);
|
||||
@@ -178,12 +138,14 @@ impl BackupCollector {
|
||||
}
|
||||
}
|
||||
|
||||
let repositories: Vec<String> = all_repositories.into_iter().collect();
|
||||
// Convert HashMap to sorted Vec
|
||||
let mut repositories: Vec<BackupRepositoryData> = repo_map.into_values().collect();
|
||||
repositories.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
agent_data.backup = BackupData {
|
||||
last_backup_time: latest_backup_time,
|
||||
backup_status: worst_status,
|
||||
repositories,
|
||||
repository_status: worst_status,
|
||||
disks,
|
||||
};
|
||||
|
||||
Ok(())
|
||||
|
||||
Reference in New Issue
Block a user