Integrate backup metrics into system widget with enhanced disk monitoring
All checks were successful
Build and Release / build-and-release (push) Successful in 2m5s

Replace standalone backup widget with compact backup section in system widget displaying disk serial, temperature, wear level, timing, and usage information.

Changes:
- Remove standalone backup widget and integrate into system widget
- Update backup collector to read TOML format from backup script
- Add BackupDiskData structure with serial, usage, temperature, wear fields
- Implement compact backup display matching specification format
- Add time formatting utilities for backup timing display
- Update backup data extraction from TOML with disk space parsing

Version bump to v0.1.149
This commit is contained in:
2025-11-24 23:55:35 +01:00
parent d89b3ac881
commit da37e28b6a
11 changed files with 293 additions and 490 deletions

View File

@@ -1,13 +1,15 @@
use async_trait::async_trait;
use cm_dashboard_shared::{AgentData, BackupData};
use chrono;
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use tracing::debug;
use super::{Collector, CollectorError};
/// Backup collector that reads backup status from JSON files with structured data output
/// Backup collector that reads backup status from TOML files with structured data output
pub struct BackupCollector {
/// Path to backup status file
status_file_path: String,
@@ -16,12 +18,12 @@ pub struct BackupCollector {
impl BackupCollector {
pub fn new() -> Self {
Self {
status_file_path: "/var/lib/backup/status.json".to_string(),
status_file_path: "/var/lib/backup/backup-status.toml".to_string(),
}
}
/// Read backup status from JSON file
async fn read_backup_status(&self) -> Result<Option<BackupStatus>, CollectorError> {
/// Read backup status from TOML file
async fn read_backup_status(&self) -> Result<Option<BackupStatusToml>, CollectorError> {
if !Path::new(&self.status_file_path).exists() {
debug!("Backup status file not found: {}", self.status_file_path);
return Ok(None);
@@ -33,24 +35,66 @@ impl BackupCollector {
error: e.to_string(),
})?;
let status: BackupStatus = serde_json::from_str(&content)
let status: BackupStatusToml = toml::from_str(&content)
.map_err(|e| CollectorError::Parse {
value: content.clone(),
error: format!("Failed to parse backup status JSON: {}", e),
error: format!("Failed to parse backup status TOML: {}", e),
})?;
Ok(Some(status))
}
/// Convert BackupStatus to BackupData and populate AgentData
/// Convert BackupStatusToml to BackupData and populate AgentData
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
if let Some(backup_status) = self.read_backup_status().await? {
// Parse start_time to get last_run timestamp
let last_run = if let Ok(parsed_time) = chrono::DateTime::parse_from_str(&backup_status.start_time, "%Y-%m-%d %H:%M:%S UTC") {
Some(parsed_time.timestamp() as u64)
} else {
None
};
// Calculate next_scheduled (if needed - may need to be provided by backup script)
let next_scheduled = None; // TODO: Extract from backup script if available
// Extract disk information
let repository_disk = if let Some(disk_space) = &backup_status.disk_space {
Some(BackupDiskData {
serial: backup_status.disk_serial_number.clone().unwrap_or_else(|| "Unknown".to_string()),
usage_percent: disk_space.usage_percent as f32,
used_gb: disk_space.used_gb as f32,
total_gb: disk_space.total_gb as f32,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None, // Not available in current TOML
})
} else if let Some(serial) = &backup_status.disk_serial_number {
// Fallback: create minimal disk info if we have serial but no disk_space
Some(BackupDiskData {
serial: serial.clone(),
usage_percent: 0.0,
used_gb: 0.0,
total_gb: 0.0,
wear_percent: backup_status.disk_wear_percent,
temperature_celsius: None,
})
} else {
None
};
// Calculate total repository size from services
let total_size_gb = backup_status.services
.values()
.map(|service| service.repo_size_bytes as f32 / (1024.0 * 1024.0 * 1024.0))
.sum::<f32>();
let backup_data = BackupData {
status: backup_status.status,
last_run: Some(backup_status.last_run),
next_scheduled: Some(backup_status.next_scheduled),
total_size_gb: Some(backup_status.total_size_gb),
repository_health: Some(backup_status.repository_health),
last_run,
next_scheduled,
total_size_gb: Some(total_size_gb),
repository_health: Some("ok".to_string()), // Derive from status if needed
repository_disk,
last_backup_size_gb: None, // Not available in current TOML format
};
agent_data.backup = backup_data;
@@ -62,6 +106,8 @@ impl BackupCollector {
next_scheduled: None,
total_size_gb: None,
repository_health: None,
repository_disk: None,
last_backup_size_gb: None,
};
}
@@ -77,12 +123,38 @@ impl Collector for BackupCollector {
}
}
/// Backup status structure from JSON file
/// TOML structure for backup status file
#[derive(Debug, Clone, Serialize, Deserialize)]
struct BackupStatus {
pub status: String, // "completed", "running", "failed", etc.
pub last_run: u64, // Unix timestamp
pub next_scheduled: u64, // Unix timestamp
pub total_size_gb: f32, // Total backup size in GB
pub repository_health: String, // "ok", "warning", "error"
struct BackupStatusToml {
pub backup_name: String,
pub start_time: String,
pub current_time: String,
pub duration_seconds: i64,
pub status: String,
pub last_updated: String,
pub disk_space: Option<DiskSpace>,
pub disk_product_name: Option<String>,
pub disk_serial_number: Option<String>,
pub disk_wear_percent: Option<f32>,
pub services: HashMap<String, ServiceStatus>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct DiskSpace {
pub total_bytes: u64,
pub used_bytes: u64,
pub available_bytes: u64,
pub total_gb: f64,
pub used_gb: f64,
pub available_gb: f64,
pub usage_percent: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ServiceStatus {
pub status: String,
pub exit_code: i64,
pub repo_path: String,
pub archive_count: i64,
pub repo_size_bytes: u64,
}