Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7b11db990c | |||
| 67b59e9551 | |||
| da37e28b6a | |||
| d89b3ac881 |
@@ -325,6 +325,12 @@ Storage:
|
|||||||
● nvme0n1 T: 25C W: 4%
|
● nvme0n1 T: 25C W: 4%
|
||||||
├─ ● /: 55% 250.5GB/456.4GB
|
├─ ● /: 55% 250.5GB/456.4GB
|
||||||
└─ ● /boot: 26% 0.3GB/1.0GB
|
└─ ● /boot: 26% 0.3GB/1.0GB
|
||||||
|
|
||||||
|
Backup:
|
||||||
|
● WD-WCC7K1234567 T: 32°C W: 12%
|
||||||
|
├─ Last: 2h ago (12.3GB)
|
||||||
|
├─ Next: in 22h
|
||||||
|
└─ ● Usage: 45% 678GB/1.5TB
|
||||||
```
|
```
|
||||||
|
|
||||||
## Important Communication Guidelines
|
## Important Communication Guidelines
|
||||||
|
|||||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.146"
|
version = "0.1.150"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -301,7 +301,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.146"
|
version = "0.1.150"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -324,7 +324,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.146"
|
version = "0.1.150"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.147"
|
version = "0.1.151"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -1,13 +1,15 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cm_dashboard_shared::{AgentData, BackupData};
|
use chrono::{NaiveDateTime, DateTime};
|
||||||
|
use cm_dashboard_shared::{AgentData, BackupData, BackupDiskData};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
use super::{Collector, CollectorError};
|
use super::{Collector, CollectorError};
|
||||||
|
|
||||||
/// Backup collector that reads backup status from JSON files with structured data output
|
/// Backup collector that reads backup status from TOML files with structured data output
|
||||||
pub struct BackupCollector {
|
pub struct BackupCollector {
|
||||||
/// Path to backup status file
|
/// Path to backup status file
|
||||||
status_file_path: String,
|
status_file_path: String,
|
||||||
@@ -16,12 +18,12 @@ pub struct BackupCollector {
|
|||||||
impl BackupCollector {
|
impl BackupCollector {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
status_file_path: "/var/lib/backup/status.json".to_string(),
|
status_file_path: "/var/lib/backup/backup-status.toml".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read backup status from JSON file
|
/// Read backup status from TOML file
|
||||||
async fn read_backup_status(&self) -> Result<Option<BackupStatus>, CollectorError> {
|
async fn read_backup_status(&self) -> Result<Option<BackupStatusToml>, CollectorError> {
|
||||||
if !Path::new(&self.status_file_path).exists() {
|
if !Path::new(&self.status_file_path).exists() {
|
||||||
debug!("Backup status file not found: {}", self.status_file_path);
|
debug!("Backup status file not found: {}", self.status_file_path);
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@@ -33,24 +35,57 @@ impl BackupCollector {
|
|||||||
error: e.to_string(),
|
error: e.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let status: BackupStatus = serde_json::from_str(&content)
|
let status: BackupStatusToml = toml::from_str(&content)
|
||||||
.map_err(|e| CollectorError::Parse {
|
.map_err(|e| CollectorError::Parse {
|
||||||
value: content.clone(),
|
value: content.clone(),
|
||||||
error: format!("Failed to parse backup status JSON: {}", e),
|
error: format!("Failed to parse backup status TOML: {}", e),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(Some(status))
|
Ok(Some(status))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert BackupStatus to BackupData and populate AgentData
|
/// Convert BackupStatusToml to BackupData and populate AgentData
|
||||||
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
async fn populate_backup_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||||
if let Some(backup_status) = self.read_backup_status().await? {
|
if let Some(backup_status) = self.read_backup_status().await? {
|
||||||
|
// Use raw start_time string from TOML
|
||||||
|
|
||||||
|
// Extract disk information
|
||||||
|
let repository_disk = if let Some(disk_space) = &backup_status.disk_space {
|
||||||
|
Some(BackupDiskData {
|
||||||
|
serial: backup_status.disk_serial_number.clone().unwrap_or_else(|| "Unknown".to_string()),
|
||||||
|
usage_percent: disk_space.usage_percent as f32,
|
||||||
|
used_gb: disk_space.used_gb as f32,
|
||||||
|
total_gb: disk_space.total_gb as f32,
|
||||||
|
wear_percent: backup_status.disk_wear_percent,
|
||||||
|
temperature_celsius: None, // Not available in current TOML
|
||||||
|
})
|
||||||
|
} else if let Some(serial) = &backup_status.disk_serial_number {
|
||||||
|
// Fallback: create minimal disk info if we have serial but no disk_space
|
||||||
|
Some(BackupDiskData {
|
||||||
|
serial: serial.clone(),
|
||||||
|
usage_percent: 0.0,
|
||||||
|
used_gb: 0.0,
|
||||||
|
total_gb: 0.0,
|
||||||
|
wear_percent: backup_status.disk_wear_percent,
|
||||||
|
temperature_celsius: None,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate total repository size from services
|
||||||
|
let total_size_gb = backup_status.services
|
||||||
|
.values()
|
||||||
|
.map(|service| service.repo_size_bytes as f32 / (1024.0 * 1024.0 * 1024.0))
|
||||||
|
.sum::<f32>();
|
||||||
|
|
||||||
let backup_data = BackupData {
|
let backup_data = BackupData {
|
||||||
status: backup_status.status,
|
status: backup_status.status,
|
||||||
last_run: Some(backup_status.last_run),
|
total_size_gb: Some(total_size_gb),
|
||||||
next_scheduled: Some(backup_status.next_scheduled),
|
repository_health: Some("ok".to_string()), // Derive from status if needed
|
||||||
total_size_gb: Some(backup_status.total_size_gb),
|
repository_disk,
|
||||||
repository_health: Some(backup_status.repository_health),
|
last_backup_size_gb: None, // Not available in current TOML format
|
||||||
|
start_time_raw: Some(backup_status.start_time),
|
||||||
};
|
};
|
||||||
|
|
||||||
agent_data.backup = backup_data;
|
agent_data.backup = backup_data;
|
||||||
@@ -58,10 +93,11 @@ impl BackupCollector {
|
|||||||
// No backup status available - set default values
|
// No backup status available - set default values
|
||||||
agent_data.backup = BackupData {
|
agent_data.backup = BackupData {
|
||||||
status: "unavailable".to_string(),
|
status: "unavailable".to_string(),
|
||||||
last_run: None,
|
|
||||||
next_scheduled: None,
|
|
||||||
total_size_gb: None,
|
total_size_gb: None,
|
||||||
repository_health: None,
|
repository_health: None,
|
||||||
|
repository_disk: None,
|
||||||
|
last_backup_size_gb: None,
|
||||||
|
start_time_raw: None,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,12 +113,38 @@ impl Collector for BackupCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Backup status structure from JSON file
|
/// TOML structure for backup status file
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
struct BackupStatus {
|
struct BackupStatusToml {
|
||||||
pub status: String, // "completed", "running", "failed", etc.
|
pub backup_name: String,
|
||||||
pub last_run: u64, // Unix timestamp
|
pub start_time: String,
|
||||||
pub next_scheduled: u64, // Unix timestamp
|
pub current_time: String,
|
||||||
pub total_size_gb: f32, // Total backup size in GB
|
pub duration_seconds: i64,
|
||||||
pub repository_health: String, // "ok", "warning", "error"
|
pub status: String,
|
||||||
|
pub last_updated: String,
|
||||||
|
pub disk_space: Option<DiskSpace>,
|
||||||
|
pub disk_product_name: Option<String>,
|
||||||
|
pub disk_serial_number: Option<String>,
|
||||||
|
pub disk_wear_percent: Option<f32>,
|
||||||
|
pub services: HashMap<String, ServiceStatus>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
struct DiskSpace {
|
||||||
|
pub total_bytes: u64,
|
||||||
|
pub used_bytes: u64,
|
||||||
|
pub available_bytes: u64,
|
||||||
|
pub total_gb: f64,
|
||||||
|
pub used_gb: f64,
|
||||||
|
pub available_gb: f64,
|
||||||
|
pub usage_percent: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
struct ServiceStatus {
|
||||||
|
pub status: String,
|
||||||
|
pub exit_code: i64,
|
||||||
|
pub repo_path: String,
|
||||||
|
pub archive_count: i64,
|
||||||
|
pub repo_size_bytes: u64,
|
||||||
}
|
}
|
||||||
@@ -50,6 +50,7 @@ struct MergerfsPool {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct PoolDrive {
|
struct PoolDrive {
|
||||||
name: String, // Drive name
|
name: String, // Drive name
|
||||||
|
mount_point: String, // e.g., "/mnt/disk1"
|
||||||
temperature_celsius: Option<f32>, // Drive temperature
|
temperature_celsius: Option<f32>, // Drive temperature
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -198,16 +199,80 @@ impl DiskCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Detect MergerFS pools from mount data
|
/// Detect MergerFS pools from mount data
|
||||||
fn detect_mergerfs_pools(&self, _filesystem_usage: &HashMap<String, (u64, u64)>) -> anyhow::Result<Vec<MergerfsPool>> {
|
fn detect_mergerfs_pools(&self, filesystem_usage: &HashMap<String, (u64, u64)>) -> anyhow::Result<Vec<MergerfsPool>> {
|
||||||
let pools = Vec::new();
|
let mounts_content = std::fs::read_to_string("/proc/mounts")
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to read /proc/mounts: {}", e))?;
|
||||||
|
let mut pools = Vec::new();
|
||||||
|
|
||||||
// For now, return empty pools - full mergerfs detection would require parsing /proc/mounts for fuse.mergerfs
|
for line in mounts_content.lines() {
|
||||||
// This ensures we don't break existing functionality
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() >= 3 && parts[2] == "fuse.mergerfs" {
|
||||||
|
let mount_point = parts[1].to_string();
|
||||||
|
let device_sources = parts[0]; // e.g., "/mnt/disk1:/mnt/disk2"
|
||||||
|
|
||||||
|
// Get pool usage
|
||||||
|
let (total_bytes, used_bytes) = filesystem_usage.get(&mount_point)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or((0, 0));
|
||||||
|
|
||||||
|
// Extract pool name from mount point (e.g., "/srv/media" -> "srv_media")
|
||||||
|
let pool_name = if mount_point == "/" {
|
||||||
|
"root".to_string()
|
||||||
|
} else {
|
||||||
|
mount_point.trim_start_matches('/').replace('/', "_")
|
||||||
|
};
|
||||||
|
|
||||||
|
if pool_name.is_empty() {
|
||||||
|
debug!("Skipping mergerfs pool with empty name: {}", mount_point);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse member paths - handle both full paths and numeric references
|
||||||
|
let raw_paths: Vec<String> = device_sources
|
||||||
|
.split(':')
|
||||||
|
.map(|s| s.trim().to_string())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Convert numeric references to actual mount points if needed
|
||||||
|
let member_paths = if raw_paths.iter().any(|path| !path.starts_with('/')) {
|
||||||
|
// Handle numeric format like "1:2" by finding corresponding /mnt/disk* paths
|
||||||
|
self.resolve_numeric_mergerfs_paths(&raw_paths)?
|
||||||
|
} else {
|
||||||
|
// Already full paths
|
||||||
|
raw_paths
|
||||||
|
};
|
||||||
|
|
||||||
|
// For SnapRAID setups, include parity drives that are related to this pool's data drives
|
||||||
|
let mut all_member_paths = member_paths.clone();
|
||||||
|
let related_parity_paths = self.discover_related_parity_drives(&member_paths)?;
|
||||||
|
all_member_paths.extend(related_parity_paths);
|
||||||
|
|
||||||
|
// Categorize as data vs parity drives
|
||||||
|
let (data_drives, parity_drives) = match self.categorize_pool_drives(&all_member_paths) {
|
||||||
|
Ok(drives) => drives,
|
||||||
|
Err(e) => {
|
||||||
|
debug!("Failed to categorize drives for pool {}: {}. Skipping.", mount_point, e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pools.push(MergerfsPool {
|
||||||
|
name: pool_name,
|
||||||
|
mount_point,
|
||||||
|
total_bytes,
|
||||||
|
used_bytes,
|
||||||
|
data_drives,
|
||||||
|
parity_drives,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Found {} mergerfs pools", pools.len());
|
||||||
Ok(pools)
|
Ok(pools)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Group filesystems by physical drive (excluding mergerfs members)
|
/// Group filesystems by physical drive (excluding mergerfs members) - exact old logic
|
||||||
fn group_by_physical_drive(
|
fn group_by_physical_drive(
|
||||||
&self,
|
&self,
|
||||||
mount_devices: &HashMap<String, String>,
|
mount_devices: &HashMap<String, String>,
|
||||||
@@ -216,14 +281,14 @@ impl DiskCollector {
|
|||||||
) -> anyhow::Result<Vec<PhysicalDrive>> {
|
) -> anyhow::Result<Vec<PhysicalDrive>> {
|
||||||
let mut drive_groups: HashMap<String, Vec<Filesystem>> = HashMap::new();
|
let mut drive_groups: HashMap<String, Vec<Filesystem>> = HashMap::new();
|
||||||
|
|
||||||
// Get all mergerfs member paths to exclude them
|
// Get all mergerfs member paths to exclude them - exactly like old code
|
||||||
let mut mergerfs_members = std::collections::HashSet::new();
|
let mut mergerfs_members = std::collections::HashSet::new();
|
||||||
for pool in mergerfs_pools {
|
for pool in mergerfs_pools {
|
||||||
for drive in &pool.data_drives {
|
for drive in &pool.data_drives {
|
||||||
mergerfs_members.insert(drive.name.clone());
|
mergerfs_members.insert(drive.mount_point.clone());
|
||||||
}
|
}
|
||||||
for drive in &pool.parity_drives {
|
for drive in &pool.parity_drives {
|
||||||
mergerfs_members.insert(drive.name.clone());
|
mergerfs_members.insert(drive.mount_point.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -444,28 +509,23 @@ impl DiskCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Populate pools data into AgentData
|
/// Populate pools data into AgentData
|
||||||
fn populate_pools_data(&self, mergerfs_pools: &[MergerfsPool], _smart_data: &HashMap<String, SmartData>, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
fn populate_pools_data(&self, mergerfs_pools: &[MergerfsPool], smart_data: &HashMap<String, SmartData>, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||||
for pool in mergerfs_pools {
|
for pool in mergerfs_pools {
|
||||||
|
// Calculate pool health based on member drive health
|
||||||
|
let (pool_health, data_drive_data, parity_drive_data) = self.calculate_pool_health(pool, smart_data);
|
||||||
|
|
||||||
let pool_data = PoolData {
|
let pool_data = PoolData {
|
||||||
name: pool.name.clone(),
|
name: pool.name.clone(),
|
||||||
mount: pool.mount_point.clone(),
|
mount: pool.mount_point.clone(),
|
||||||
pool_type: "mergerfs".to_string(),
|
pool_type: format!("mergerfs ({}+{})", pool.data_drives.len(), pool.parity_drives.len()),
|
||||||
health: "healthy".to_string(), // TODO: Calculate based on member drives
|
health: pool_health,
|
||||||
usage_percent: (pool.used_bytes as f32 / pool.total_bytes as f32) * 100.0,
|
usage_percent: if pool.total_bytes > 0 {
|
||||||
|
(pool.used_bytes as f32 / pool.total_bytes as f32) * 100.0
|
||||||
|
} else { 0.0 },
|
||||||
used_gb: pool.used_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
used_gb: pool.used_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
||||||
total_gb: pool.total_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
total_gb: pool.total_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
||||||
data_drives: pool.data_drives.iter().map(|d| cm_dashboard_shared::PoolDriveData {
|
data_drives: data_drive_data,
|
||||||
name: d.name.clone(),
|
parity_drives: parity_drive_data,
|
||||||
temperature_celsius: d.temperature_celsius,
|
|
||||||
health: "unknown".to_string(),
|
|
||||||
wear_percent: None,
|
|
||||||
}).collect(),
|
|
||||||
parity_drives: pool.parity_drives.iter().map(|d| cm_dashboard_shared::PoolDriveData {
|
|
||||||
name: d.name.clone(),
|
|
||||||
temperature_celsius: d.temperature_celsius,
|
|
||||||
health: "unknown".to_string(),
|
|
||||||
wear_percent: None,
|
|
||||||
}).collect(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
agent_data.system.storage.pools.push(pool_data);
|
agent_data.system.storage.pools.push(pool_data);
|
||||||
@@ -474,6 +534,55 @@ impl DiskCollector {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calculate pool health based on member drive status
|
||||||
|
fn calculate_pool_health(&self, pool: &MergerfsPool, smart_data: &HashMap<String, SmartData>) -> (String, Vec<cm_dashboard_shared::PoolDriveData>, Vec<cm_dashboard_shared::PoolDriveData>) {
|
||||||
|
let mut failed_data = 0;
|
||||||
|
let mut failed_parity = 0;
|
||||||
|
|
||||||
|
// Process data drives
|
||||||
|
let data_drive_data: Vec<cm_dashboard_shared::PoolDriveData> = pool.data_drives.iter().map(|d| {
|
||||||
|
let smart = smart_data.get(&d.name);
|
||||||
|
let health = smart.map(|s| s.health.clone()).unwrap_or_else(|| "UNKNOWN".to_string());
|
||||||
|
|
||||||
|
if health == "FAILED" {
|
||||||
|
failed_data += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
cm_dashboard_shared::PoolDriveData {
|
||||||
|
name: d.name.clone(),
|
||||||
|
temperature_celsius: smart.and_then(|s| s.temperature_celsius).or(d.temperature_celsius),
|
||||||
|
health,
|
||||||
|
wear_percent: smart.and_then(|s| s.wear_percent),
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
// Process parity drives
|
||||||
|
let parity_drive_data: Vec<cm_dashboard_shared::PoolDriveData> = pool.parity_drives.iter().map(|d| {
|
||||||
|
let smart = smart_data.get(&d.name);
|
||||||
|
let health = smart.map(|s| s.health.clone()).unwrap_or_else(|| "UNKNOWN".to_string());
|
||||||
|
|
||||||
|
if health == "FAILED" {
|
||||||
|
failed_parity += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
cm_dashboard_shared::PoolDriveData {
|
||||||
|
name: d.name.clone(),
|
||||||
|
temperature_celsius: smart.and_then(|s| s.temperature_celsius).or(d.temperature_celsius),
|
||||||
|
health,
|
||||||
|
wear_percent: smart.and_then(|s| s.wear_percent),
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
// Calculate overall pool health
|
||||||
|
let pool_health = match (failed_data, failed_parity) {
|
||||||
|
(0, 0) => "healthy".to_string(),
|
||||||
|
(1, 0) | (0, 1) => "degraded".to_string(), // One failure is degraded but recoverable
|
||||||
|
_ => "critical".to_string(), // Multiple failures are critical
|
||||||
|
};
|
||||||
|
|
||||||
|
(pool_health, data_drive_data, parity_drive_data)
|
||||||
|
}
|
||||||
|
|
||||||
/// Calculate filesystem usage status
|
/// Calculate filesystem usage status
|
||||||
fn calculate_filesystem_usage_status(&self, usage_percent: f32) -> Status {
|
fn calculate_filesystem_usage_status(&self, usage_percent: f32) -> Status {
|
||||||
// Use standard filesystem warning/critical thresholds
|
// Use standard filesystem warning/critical thresholds
|
||||||
@@ -499,6 +608,134 @@ impl DiskCollector {
|
|||||||
_ => Status::Unknown,
|
_ => Status::Unknown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Discover parity drives that are related to the given data drives
|
||||||
|
fn discover_related_parity_drives(&self, data_drives: &[String]) -> anyhow::Result<Vec<String>> {
|
||||||
|
let mount_devices = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_mount_devices())
|
||||||
|
}).map_err(|e| anyhow::anyhow!("Failed to get mount devices: {}", e))?;
|
||||||
|
|
||||||
|
let mut related_parity = Vec::new();
|
||||||
|
|
||||||
|
// Find parity drives that share the same parent directory as the data drives
|
||||||
|
for data_path in data_drives {
|
||||||
|
if let Some(parent_dir) = self.get_parent_directory(data_path) {
|
||||||
|
// Look for parity drives in the same parent directory
|
||||||
|
for (mount_point, _device) in &mount_devices {
|
||||||
|
if mount_point.contains("parity") && mount_point.starts_with(&parent_dir) {
|
||||||
|
if !related_parity.contains(mount_point) {
|
||||||
|
related_parity.push(mount_point.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(related_parity)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get parent directory of a mount path (e.g., "/mnt/disk1" -> "/mnt")
|
||||||
|
fn get_parent_directory(&self, path: &str) -> Option<String> {
|
||||||
|
if let Some(last_slash) = path.rfind('/') {
|
||||||
|
if last_slash > 0 {
|
||||||
|
return Some(path[..last_slash].to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Categorize pool member drives as data vs parity
|
||||||
|
fn categorize_pool_drives(&self, member_paths: &[String]) -> anyhow::Result<(Vec<PoolDrive>, Vec<PoolDrive>)> {
|
||||||
|
let mut data_drives = Vec::new();
|
||||||
|
let mut parity_drives = Vec::new();
|
||||||
|
|
||||||
|
for path in member_paths {
|
||||||
|
let drive_info = self.get_drive_info_for_path(path)?;
|
||||||
|
|
||||||
|
// Heuristic: if path contains "parity", it's parity
|
||||||
|
if path.to_lowercase().contains("parity") {
|
||||||
|
parity_drives.push(drive_info);
|
||||||
|
} else {
|
||||||
|
data_drives.push(drive_info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((data_drives, parity_drives))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get drive information for a mount path
|
||||||
|
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
|
||||||
|
// Use lsblk to find the backing device
|
||||||
|
let output = Command::new("lsblk")
|
||||||
|
.args(&["-rn", "-o", "NAME,MOUNTPOINT"])
|
||||||
|
.output()
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;
|
||||||
|
|
||||||
|
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||||
|
let mut device = String::new();
|
||||||
|
|
||||||
|
for line in output_str.lines() {
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() >= 2 && parts[1] == path {
|
||||||
|
device = parts[0].to_string();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if device.is_empty() {
|
||||||
|
return Err(anyhow::anyhow!("Could not find device for path {}", path));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract base device name (e.g., "sda1" -> "sda")
|
||||||
|
let base_device = self.extract_base_device(&format!("/dev/{}", device));
|
||||||
|
|
||||||
|
// Get temperature from SMART data if available
|
||||||
|
let temperature = if let Ok(smart_data) = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_smart_data(&base_device))
|
||||||
|
}) {
|
||||||
|
smart_data.temperature_celsius
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(PoolDrive {
|
||||||
|
name: base_device,
|
||||||
|
mount_point: path.to_string(),
|
||||||
|
temperature_celsius: temperature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve numeric mergerfs references like "1:2" to actual mount paths
|
||||||
|
fn resolve_numeric_mergerfs_paths(&self, numeric_refs: &[String]) -> anyhow::Result<Vec<String>> {
|
||||||
|
let mut resolved_paths = Vec::new();
|
||||||
|
|
||||||
|
// Get all mount points that look like /mnt/disk* or /mnt/parity*
|
||||||
|
let mount_devices = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_mount_devices())
|
||||||
|
}).map_err(|e| anyhow::anyhow!("Failed to get mount devices: {}", e))?;
|
||||||
|
|
||||||
|
let mut disk_mounts: Vec<String> = mount_devices.keys()
|
||||||
|
.filter(|path| path.starts_with("/mnt/disk") || path.starts_with("/mnt/parity"))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
disk_mounts.sort(); // Ensure consistent ordering
|
||||||
|
|
||||||
|
for num_ref in numeric_refs {
|
||||||
|
if let Ok(index) = num_ref.parse::<usize>() {
|
||||||
|
// Convert 1-based index to 0-based
|
||||||
|
if index > 0 && index <= disk_mounts.len() {
|
||||||
|
resolved_paths.push(disk_mounts[index - 1].clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: if we couldn't resolve, return the original paths
|
||||||
|
if resolved_paths.is_empty() {
|
||||||
|
resolved_paths = numeric_refs.to_vec();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(resolved_paths)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ struct ServiceCacheState {
|
|||||||
last_collection: Option<Instant>,
|
last_collection: Option<Instant>,
|
||||||
/// Cached service data
|
/// Cached service data
|
||||||
services: Vec<ServiceInfo>,
|
services: Vec<ServiceInfo>,
|
||||||
|
/// Cached complete service data with sub-services
|
||||||
|
cached_service_data: Vec<ServiceData>,
|
||||||
/// Interesting services to monitor (cached after discovery)
|
/// Interesting services to monitor (cached after discovery)
|
||||||
monitored_services: Vec<String>,
|
monitored_services: Vec<String>,
|
||||||
/// Cached service status information from discovery
|
/// Cached service status information from discovery
|
||||||
@@ -62,6 +64,7 @@ impl SystemdCollector {
|
|||||||
let state = ServiceCacheState {
|
let state = ServiceCacheState {
|
||||||
last_collection: None,
|
last_collection: None,
|
||||||
services: Vec::new(),
|
services: Vec::new(),
|
||||||
|
cached_service_data: Vec::new(),
|
||||||
monitored_services: Vec::new(),
|
monitored_services: Vec::new(),
|
||||||
service_status_cache: std::collections::HashMap::new(),
|
service_status_cache: std::collections::HashMap::new(),
|
||||||
last_discovery_time: None,
|
last_discovery_time: None,
|
||||||
@@ -93,6 +96,7 @@ impl SystemdCollector {
|
|||||||
|
|
||||||
// Collect service data for each monitored service
|
// Collect service data for each monitored service
|
||||||
let mut services = Vec::new();
|
let mut services = Vec::new();
|
||||||
|
let mut complete_service_data = Vec::new();
|
||||||
for service_name in &monitored_services {
|
for service_name in &monitored_services {
|
||||||
match self.get_service_status(service_name) {
|
match self.get_service_status(service_name) {
|
||||||
Ok((active_status, _detailed_info)) => {
|
Ok((active_status, _detailed_info)) => {
|
||||||
@@ -149,15 +153,19 @@ impl SystemdCollector {
|
|||||||
};
|
};
|
||||||
services.push(service_info);
|
services.push(service_info);
|
||||||
|
|
||||||
// Add to AgentData with hierarchical structure
|
// Create complete service data
|
||||||
agent_data.services.push(ServiceData {
|
let service_data = ServiceData {
|
||||||
name: service_name.clone(),
|
name: service_name.clone(),
|
||||||
memory_mb,
|
memory_mb,
|
||||||
disk_gb,
|
disk_gb,
|
||||||
user_stopped: false, // TODO: Integrate with service tracker
|
user_stopped: false, // TODO: Integrate with service tracker
|
||||||
service_status: self.calculate_service_status(service_name, &active_status),
|
service_status: self.calculate_service_status(service_name, &active_status),
|
||||||
sub_services,
|
sub_services,
|
||||||
});
|
};
|
||||||
|
|
||||||
|
// Add to AgentData and cache
|
||||||
|
agent_data.services.push(service_data.clone());
|
||||||
|
complete_service_data.push(service_data);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!("Failed to get status for service {}: {}", service_name, e);
|
debug!("Failed to get status for service {}: {}", service_name, e);
|
||||||
@@ -170,6 +178,7 @@ impl SystemdCollector {
|
|||||||
let mut state = self.state.write().unwrap();
|
let mut state = self.state.write().unwrap();
|
||||||
state.last_collection = Some(start_time);
|
state.last_collection = Some(start_time);
|
||||||
state.services = services;
|
state.services = services;
|
||||||
|
state.cached_service_data = complete_service_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start_time.elapsed();
|
let elapsed = start_time.elapsed();
|
||||||
@@ -232,10 +241,6 @@ impl SystemdCollector {
|
|||||||
if needs_refresh {
|
if needs_refresh {
|
||||||
// Only check nginx sites if nginx service is active
|
// Only check nginx sites if nginx service is active
|
||||||
if state.monitored_services.iter().any(|s| s.contains("nginx")) {
|
if state.monitored_services.iter().any(|s| s.contains("nginx")) {
|
||||||
debug!(
|
|
||||||
"Refreshing nginx site latency metrics (interval: {}s)",
|
|
||||||
state.nginx_check_interval_seconds
|
|
||||||
);
|
|
||||||
let fresh_metrics = self.get_nginx_sites_internal();
|
let fresh_metrics = self.get_nginx_sites_internal();
|
||||||
state.nginx_site_metrics = fresh_metrics;
|
state.nginx_site_metrics = fresh_metrics;
|
||||||
state.last_nginx_check_time = Some(Instant::now());
|
state.last_nginx_check_time = Some(Instant::now());
|
||||||
@@ -564,6 +569,16 @@ impl SystemdCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get cached complete service data with sub-services if available and fresh
|
||||||
|
fn get_cached_complete_services(&self) -> Option<Vec<ServiceData>> {
|
||||||
|
if !self.should_update_cache() {
|
||||||
|
let state = self.state.read().unwrap();
|
||||||
|
Some(state.cached_service_data.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Get nginx sites with latency checks (internal - no caching)
|
/// Get nginx sites with latency checks (internal - no caching)
|
||||||
fn get_nginx_sites_internal(&self) -> Vec<(String, f32)> {
|
fn get_nginx_sites_internal(&self) -> Vec<(String, f32)> {
|
||||||
let mut sites = Vec::new();
|
let mut sites = Vec::new();
|
||||||
@@ -571,13 +586,14 @@ impl SystemdCollector {
|
|||||||
// Discover nginx sites from configuration
|
// Discover nginx sites from configuration
|
||||||
let discovered_sites = self.discover_nginx_sites();
|
let discovered_sites = self.discover_nginx_sites();
|
||||||
|
|
||||||
|
// Always add all discovered sites, even if checks fail (like old version)
|
||||||
for (site_name, url) in &discovered_sites {
|
for (site_name, url) in &discovered_sites {
|
||||||
match self.check_site_latency(url) {
|
match self.check_site_latency(url) {
|
||||||
Ok(latency_ms) => {
|
Ok(latency_ms) => {
|
||||||
sites.push((site_name.clone(), latency_ms));
|
sites.push((site_name.clone(), latency_ms));
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// Site is unreachable - use -1.0 to indicate error
|
// Site is unreachable - use -1.0 to indicate error (like old version)
|
||||||
sites.push((site_name.clone(), -1.0));
|
sites.push((site_name.clone(), -1.0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -835,18 +851,10 @@ impl SystemdCollector {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Collector for SystemdCollector {
|
impl Collector for SystemdCollector {
|
||||||
async fn collect_structured(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
async fn collect_structured(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||||
// Use cached data if available and fresh
|
// Use cached complete data if available and fresh
|
||||||
if let Some(cached_services) = self.get_cached_services() {
|
if let Some(cached_complete_services) = self.get_cached_complete_services() {
|
||||||
debug!("Using cached systemd services data");
|
for service_data in cached_complete_services {
|
||||||
for service in cached_services {
|
agent_data.services.push(service_data);
|
||||||
agent_data.services.push(ServiceData {
|
|
||||||
name: service.name.clone(),
|
|
||||||
memory_mb: service.memory_mb,
|
|
||||||
disk_gb: service.disk_gb,
|
|
||||||
user_stopped: false, // TODO: Integrate with service tracker
|
|
||||||
service_status: self.calculate_service_status(&service.name, &service.status),
|
|
||||||
sub_services: Vec::new(), // Cached services don't have sub-services
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.147"
|
version = "0.1.151"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ use crate::config::DashboardConfig;
|
|||||||
use crate::metrics::MetricStore;
|
use crate::metrics::MetricStore;
|
||||||
use cm_dashboard_shared::Status;
|
use cm_dashboard_shared::Status;
|
||||||
use theme::{Components, Layout as ThemeLayout, Theme, Typography};
|
use theme::{Components, Layout as ThemeLayout, Theme, Typography};
|
||||||
use widgets::{BackupWidget, ServicesWidget, SystemWidget, Widget};
|
use widgets::{ServicesWidget, SystemWidget, Widget};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -32,8 +32,6 @@ pub struct HostWidgets {
|
|||||||
pub system_widget: SystemWidget,
|
pub system_widget: SystemWidget,
|
||||||
/// Services widget state
|
/// Services widget state
|
||||||
pub services_widget: ServicesWidget,
|
pub services_widget: ServicesWidget,
|
||||||
/// Backup widget state
|
|
||||||
pub backup_widget: BackupWidget,
|
|
||||||
/// Last update time for this host
|
/// Last update time for this host
|
||||||
pub last_update: Option<Instant>,
|
pub last_update: Option<Instant>,
|
||||||
}
|
}
|
||||||
@@ -43,7 +41,6 @@ impl HostWidgets {
|
|||||||
Self {
|
Self {
|
||||||
system_widget: SystemWidget::new(),
|
system_widget: SystemWidget::new(),
|
||||||
services_widget: ServicesWidget::new(),
|
services_widget: ServicesWidget::new(),
|
||||||
backup_widget: BackupWidget::new(),
|
|
||||||
last_update: None,
|
last_update: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -112,7 +109,6 @@ impl TuiApp {
|
|||||||
// Update all widgets with structured data directly
|
// Update all widgets with structured data directly
|
||||||
host_widgets.system_widget.update_from_agent_data(agent_data);
|
host_widgets.system_widget.update_from_agent_data(agent_data);
|
||||||
host_widgets.services_widget.update_from_agent_data(agent_data);
|
host_widgets.services_widget.update_from_agent_data(agent_data);
|
||||||
host_widgets.backup_widget.update_from_agent_data(agent_data);
|
|
||||||
|
|
||||||
host_widgets.last_update = Some(Instant::now());
|
host_widgets.last_update = Some(Instant::now());
|
||||||
}
|
}
|
||||||
@@ -469,40 +465,17 @@ impl TuiApp {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if backup panel should be shown
|
// Left side: system panel only (full height)
|
||||||
let show_backup = if let Some(hostname) = self.current_host.clone() {
|
let left_chunks = ratatui::layout::Layout::default()
|
||||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
.direction(Direction::Vertical)
|
||||||
host_widgets.backup_widget.has_data()
|
.constraints([Constraint::Percentage(100)]) // System section takes full height
|
||||||
} else {
|
.split(content_chunks[0]);
|
||||||
false
|
|
||||||
};
|
|
||||||
|
|
||||||
// Left side: dynamic layout based on backup data availability
|
|
||||||
let left_chunks = if show_backup {
|
|
||||||
// Show both system and backup panels
|
|
||||||
ratatui::layout::Layout::default()
|
|
||||||
.direction(Direction::Vertical)
|
|
||||||
.constraints([
|
|
||||||
Constraint::Percentage(ThemeLayout::SYSTEM_PANEL_HEIGHT), // System section
|
|
||||||
Constraint::Percentage(ThemeLayout::BACKUP_PANEL_HEIGHT), // Backup section
|
|
||||||
])
|
|
||||||
.split(content_chunks[0])
|
|
||||||
} else {
|
|
||||||
// Show only system panel (full height)
|
|
||||||
ratatui::layout::Layout::default()
|
|
||||||
.direction(Direction::Vertical)
|
|
||||||
.constraints([Constraint::Percentage(100)]) // System section takes full height
|
|
||||||
.split(content_chunks[0])
|
|
||||||
};
|
|
||||||
|
|
||||||
// Render title bar
|
// Render title bar
|
||||||
self.render_btop_title(frame, main_chunks[0], metric_store);
|
self.render_btop_title(frame, main_chunks[0], metric_store);
|
||||||
|
|
||||||
// Render new panel layout
|
// Render system panel
|
||||||
self.render_system_panel(frame, left_chunks[0], metric_store);
|
self.render_system_panel(frame, left_chunks[0], metric_store);
|
||||||
if show_backup && left_chunks.len() > 1 {
|
|
||||||
self.render_backup_panel(frame, left_chunks[1]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render services widget for current host
|
// Render services widget for current host
|
||||||
if let Some(hostname) = self.current_host.clone() {
|
if let Some(hostname) = self.current_host.clone() {
|
||||||
@@ -669,17 +642,6 @@ impl TuiApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn render_backup_panel(&mut self, frame: &mut Frame, area: Rect) {
|
|
||||||
let backup_block = Components::widget_block("backup");
|
|
||||||
let inner_area = backup_block.inner(area);
|
|
||||||
frame.render_widget(backup_block, area);
|
|
||||||
|
|
||||||
// Get current host widgets for backup widget
|
|
||||||
if let Some(hostname) = self.current_host.clone() {
|
|
||||||
let host_widgets = self.get_or_create_host_widgets(&hostname);
|
|
||||||
host_widgets.backup_widget.render(frame, inner_area);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Render offline host message with wake-up option
|
/// Render offline host message with wake-up option
|
||||||
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
|
fn render_offline_host_message(&self, frame: &mut Frame, area: Rect) {
|
||||||
|
|||||||
@@ -1,418 +0,0 @@
|
|||||||
use cm_dashboard_shared::{Metric, Status};
|
|
||||||
use super::Widget;
|
|
||||||
use ratatui::{
|
|
||||||
layout::Rect,
|
|
||||||
widgets::Paragraph,
|
|
||||||
Frame,
|
|
||||||
};
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
use crate::ui::theme::{StatusIcons, Typography};
|
|
||||||
|
|
||||||
/// Backup widget displaying backup status, services, and repository information
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct BackupWidget {
|
|
||||||
/// Overall backup status
|
|
||||||
overall_status: Status,
|
|
||||||
/// Last backup duration in seconds
|
|
||||||
duration_seconds: Option<i64>,
|
|
||||||
/// Last backup timestamp
|
|
||||||
last_run_timestamp: Option<i64>,
|
|
||||||
/// Total repository size in GB
|
|
||||||
total_repo_size_gb: Option<f32>,
|
|
||||||
/// Total disk space for backups in GB
|
|
||||||
backup_disk_total_gb: Option<f32>,
|
|
||||||
/// Used disk space for backups in GB
|
|
||||||
backup_disk_used_gb: Option<f32>,
|
|
||||||
/// Backup disk product name from SMART data
|
|
||||||
backup_disk_product_name: Option<String>,
|
|
||||||
/// Backup disk serial number from SMART data
|
|
||||||
backup_disk_serial_number: Option<String>,
|
|
||||||
/// Backup disk wear percentage from SMART data
|
|
||||||
backup_disk_wear_percent: Option<f32>,
|
|
||||||
/// All individual service metrics for detailed display
|
|
||||||
service_metrics: Vec<ServiceMetricData>,
|
|
||||||
/// Last update indicator
|
|
||||||
has_data: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
struct ServiceMetricData {
|
|
||||||
name: String,
|
|
||||||
status: Status,
|
|
||||||
archive_count: Option<i64>,
|
|
||||||
repo_size_gb: Option<f32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupWidget {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
overall_status: Status::Unknown,
|
|
||||||
duration_seconds: None,
|
|
||||||
last_run_timestamp: None,
|
|
||||||
total_repo_size_gb: None,
|
|
||||||
backup_disk_total_gb: None,
|
|
||||||
backup_disk_used_gb: None,
|
|
||||||
backup_disk_product_name: None,
|
|
||||||
backup_disk_serial_number: None,
|
|
||||||
backup_disk_wear_percent: None,
|
|
||||||
service_metrics: Vec::new(),
|
|
||||||
has_data: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if the backup widget has any data to display
|
|
||||||
pub fn has_data(&self) -> bool {
|
|
||||||
self.has_data
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Format size with proper units (xxxkB/MB/GB/TB)
|
|
||||||
fn format_size_with_proper_units(size_gb: f32) -> String {
|
|
||||||
if size_gb >= 1000.0 {
|
|
||||||
// TB range
|
|
||||||
format!("{:.1}TB", size_gb / 1000.0)
|
|
||||||
} else if size_gb >= 1.0 {
|
|
||||||
// GB range
|
|
||||||
format!("{:.1}GB", size_gb)
|
|
||||||
} else if size_gb >= 0.001 {
|
|
||||||
// MB range (size_gb * 1024 = MB)
|
|
||||||
let size_mb = size_gb * 1024.0;
|
|
||||||
format!("{:.1}MB", size_mb)
|
|
||||||
} else if size_gb >= 0.000001 {
|
|
||||||
// kB range (size_gb * 1024 * 1024 = kB)
|
|
||||||
let size_kb = size_gb * 1024.0 * 1024.0;
|
|
||||||
format!("{:.0}kB", size_kb)
|
|
||||||
} else {
|
|
||||||
// B range (size_gb * 1024^3 = bytes)
|
|
||||||
let size_bytes = size_gb * 1024.0 * 1024.0 * 1024.0;
|
|
||||||
format!("{:.0}B", size_bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Extract service name from metric name (e.g., "backup_service_gitea_status" -> "gitea")
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn extract_service_name(metric_name: &str) -> Option<String> {
|
|
||||||
if metric_name.starts_with("backup_service_") {
|
|
||||||
let name_part = &metric_name[15..]; // Remove "backup_service_" prefix
|
|
||||||
|
|
||||||
// Try to extract service name by removing known suffixes
|
|
||||||
if let Some(service_name) = name_part.strip_suffix("_status") {
|
|
||||||
Some(service_name.to_string())
|
|
||||||
} else if let Some(service_name) = name_part.strip_suffix("_archive_count") {
|
|
||||||
Some(service_name.to_string())
|
|
||||||
} else if let Some(service_name) = name_part.strip_suffix("_repo_size_gb") {
|
|
||||||
Some(service_name.to_string())
|
|
||||||
} else if let Some(service_name) = name_part.strip_suffix("_repo_path") {
|
|
||||||
Some(service_name.to_string())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Widget for BackupWidget {
|
|
||||||
fn update_from_agent_data(&mut self, agent_data: &cm_dashboard_shared::AgentData) {
|
|
||||||
self.has_data = true;
|
|
||||||
|
|
||||||
let backup = &agent_data.backup;
|
|
||||||
self.overall_status = Status::Ok;
|
|
||||||
|
|
||||||
if let Some(size) = backup.total_size_gb {
|
|
||||||
self.total_repo_size_gb = Some(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(last_run) = backup.last_run {
|
|
||||||
self.last_run_timestamp = Some(last_run as i64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupWidget {
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn update_from_metrics(&mut self, metrics: &[&Metric]) {
|
|
||||||
debug!("Backup widget updating with {} metrics", metrics.len());
|
|
||||||
for metric in metrics {
|
|
||||||
debug!(
|
|
||||||
"Backup metric: {} = {:?} (status: {:?})",
|
|
||||||
metric.name, metric.value, metric.status
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also debug the service_data after processing
|
|
||||||
debug!("Processing individual service metrics...");
|
|
||||||
|
|
||||||
// Log how many metrics are backup service metrics
|
|
||||||
let service_metric_count = metrics
|
|
||||||
.iter()
|
|
||||||
.filter(|m| m.name.starts_with("backup_service_"))
|
|
||||||
.count();
|
|
||||||
debug!(
|
|
||||||
"Found {} backup_service_ metrics out of {} total backup metrics",
|
|
||||||
service_metric_count,
|
|
||||||
metrics.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Reset service metrics
|
|
||||||
self.service_metrics.clear();
|
|
||||||
let mut service_data: std::collections::HashMap<String, ServiceMetricData> =
|
|
||||||
std::collections::HashMap::new();
|
|
||||||
|
|
||||||
for metric in metrics {
|
|
||||||
match metric.name.as_str() {
|
|
||||||
"backup_overall_status" => {
|
|
||||||
let status_str = metric.value.as_string();
|
|
||||||
self.overall_status = match status_str.as_str() {
|
|
||||||
"ok" => Status::Ok,
|
|
||||||
"warning" => Status::Warning,
|
|
||||||
"critical" => Status::Critical,
|
|
||||||
_ => Status::Unknown,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
"backup_duration_seconds" => {
|
|
||||||
self.duration_seconds = metric.value.as_i64();
|
|
||||||
}
|
|
||||||
"backup_last_run_timestamp" => {
|
|
||||||
self.last_run_timestamp = metric.value.as_i64();
|
|
||||||
}
|
|
||||||
"backup_total_repo_size_gb" => {
|
|
||||||
self.total_repo_size_gb = metric.value.as_f32();
|
|
||||||
}
|
|
||||||
"backup_disk_total_gb" => {
|
|
||||||
self.backup_disk_total_gb = metric.value.as_f32();
|
|
||||||
}
|
|
||||||
"backup_disk_used_gb" => {
|
|
||||||
self.backup_disk_used_gb = metric.value.as_f32();
|
|
||||||
}
|
|
||||||
"backup_disk_product_name" => {
|
|
||||||
self.backup_disk_product_name = Some(metric.value.as_string());
|
|
||||||
}
|
|
||||||
"backup_disk_serial_number" => {
|
|
||||||
self.backup_disk_serial_number = Some(metric.value.as_string());
|
|
||||||
}
|
|
||||||
"backup_disk_wear_percent" => {
|
|
||||||
self.backup_disk_wear_percent = metric.value.as_f32();
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// Handle individual service metrics
|
|
||||||
if let Some(service_name) = Self::extract_service_name(&metric.name) {
|
|
||||||
debug!(
|
|
||||||
"Extracted service name '{}' from metric '{}'",
|
|
||||||
service_name, metric.name
|
|
||||||
);
|
|
||||||
let entry = service_data.entry(service_name.clone()).or_insert_with(|| {
|
|
||||||
ServiceMetricData {
|
|
||||||
name: service_name,
|
|
||||||
status: Status::Unknown,
|
|
||||||
archive_count: None,
|
|
||||||
repo_size_gb: None,
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if metric.name.ends_with("_status") {
|
|
||||||
entry.status = metric.status;
|
|
||||||
debug!("Set status for {}: {:?}", entry.name, entry.status);
|
|
||||||
} else if metric.name.ends_with("_archive_count") {
|
|
||||||
entry.archive_count = metric.value.as_i64();
|
|
||||||
debug!(
|
|
||||||
"Set archive_count for {}: {:?}",
|
|
||||||
entry.name, entry.archive_count
|
|
||||||
);
|
|
||||||
} else if metric.name.ends_with("_repo_size_gb") {
|
|
||||||
entry.repo_size_gb = metric.value.as_f32();
|
|
||||||
debug!(
|
|
||||||
"Set repo_size_gb for {}: {:?}",
|
|
||||||
entry.name, entry.repo_size_gb
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
debug!(
|
|
||||||
"Could not extract service name from metric: {}",
|
|
||||||
metric.name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert service data to sorted vector
|
|
||||||
let mut services: Vec<ServiceMetricData> = service_data.into_values().collect();
|
|
||||||
services.sort_by(|a, b| a.name.cmp(&b.name));
|
|
||||||
self.service_metrics = services;
|
|
||||||
|
|
||||||
// Only show backup panel if we have meaningful backup data
|
|
||||||
self.has_data = !metrics.is_empty() && (
|
|
||||||
self.last_run_timestamp.is_some() ||
|
|
||||||
self.total_repo_size_gb.is_some() ||
|
|
||||||
!self.service_metrics.is_empty()
|
|
||||||
);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Backup widget updated: status={:?}, services={}, total_size={:?}GB",
|
|
||||||
self.overall_status,
|
|
||||||
self.service_metrics.len(),
|
|
||||||
self.total_repo_size_gb
|
|
||||||
);
|
|
||||||
|
|
||||||
// Debug individual service data
|
|
||||||
for service in &self.service_metrics {
|
|
||||||
debug!(
|
|
||||||
"Service {}: status={:?}, archives={:?}, size={:?}GB",
|
|
||||||
service.name, service.status, service.archive_count, service.repo_size_gb
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupWidget {
|
|
||||||
/// Render backup widget
|
|
||||||
pub fn render(&mut self, frame: &mut Frame, area: Rect) {
|
|
||||||
let mut lines = Vec::new();
|
|
||||||
|
|
||||||
// Latest backup section
|
|
||||||
lines.push(ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled("Latest backup:", Typography::widget_title())
|
|
||||||
]));
|
|
||||||
|
|
||||||
// Timestamp with status icon
|
|
||||||
let timestamp_text = if let Some(timestamp) = self.last_run_timestamp {
|
|
||||||
self.format_timestamp(timestamp)
|
|
||||||
} else {
|
|
||||||
"Unknown".to_string()
|
|
||||||
};
|
|
||||||
let timestamp_spans = StatusIcons::create_status_spans(
|
|
||||||
self.overall_status,
|
|
||||||
×tamp_text
|
|
||||||
);
|
|
||||||
lines.push(ratatui::text::Line::from(timestamp_spans));
|
|
||||||
|
|
||||||
// Duration as sub-item
|
|
||||||
if let Some(duration) = self.duration_seconds {
|
|
||||||
let duration_text = self.format_duration(duration);
|
|
||||||
lines.push(ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled(" └─ ", Typography::tree()),
|
|
||||||
ratatui::text::Span::styled(format!("Duration: {}", duration_text), Typography::secondary())
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disk section
|
|
||||||
lines.push(ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled("Disk:", Typography::widget_title())
|
|
||||||
]));
|
|
||||||
|
|
||||||
// Disk product name with status
|
|
||||||
if let Some(product) = &self.backup_disk_product_name {
|
|
||||||
let disk_spans = StatusIcons::create_status_spans(
|
|
||||||
Status::Ok, // Assuming disk is OK if we have data
|
|
||||||
product
|
|
||||||
);
|
|
||||||
lines.push(ratatui::text::Line::from(disk_spans));
|
|
||||||
|
|
||||||
// Collect sub-items to determine tree structure
|
|
||||||
let mut sub_items = Vec::new();
|
|
||||||
|
|
||||||
if let Some(serial) = &self.backup_disk_serial_number {
|
|
||||||
sub_items.push(format!("S/N: {}", serial));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(wear) = self.backup_disk_wear_percent {
|
|
||||||
sub_items.push(format!("Wear: {:.0}%", wear));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let (Some(used), Some(total)) = (self.backup_disk_used_gb, self.backup_disk_total_gb) {
|
|
||||||
let used_str = Self::format_size_with_proper_units(used);
|
|
||||||
let total_str = Self::format_size_with_proper_units(total);
|
|
||||||
sub_items.push(format!("Usage: {}/{}", used_str, total_str));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render sub-items with proper tree structure
|
|
||||||
let num_items = sub_items.len();
|
|
||||||
for (i, item) in sub_items.into_iter().enumerate() {
|
|
||||||
let is_last = i == num_items - 1;
|
|
||||||
let tree_char = if is_last { " └─ " } else { " ├─ " };
|
|
||||||
lines.push(ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled(tree_char, Typography::tree()),
|
|
||||||
ratatui::text::Span::styled(item, Typography::secondary())
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repos section
|
|
||||||
lines.push(ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled("Repos:", Typography::widget_title())
|
|
||||||
]));
|
|
||||||
|
|
||||||
// Add all repository lines (no truncation here - scroll will handle display)
|
|
||||||
for service in &self.service_metrics {
|
|
||||||
if let (Some(archives), Some(size_gb)) = (service.archive_count, service.repo_size_gb) {
|
|
||||||
let size_str = Self::format_size_with_proper_units(size_gb);
|
|
||||||
let repo_text = format!("{} ({}) {}", service.name, archives, size_str);
|
|
||||||
let repo_spans = StatusIcons::create_status_spans(service.status, &repo_text);
|
|
||||||
lines.push(ratatui::text::Line::from(repo_spans));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply scroll offset
|
|
||||||
let total_lines = lines.len();
|
|
||||||
let available_height = area.height as usize;
|
|
||||||
|
|
||||||
// Show only what fits, with "X more below" if needed
|
|
||||||
if total_lines > available_height {
|
|
||||||
let lines_for_content = available_height.saturating_sub(1); // Reserve one line for "more below"
|
|
||||||
let mut visible_lines: Vec<_> = lines
|
|
||||||
.into_iter()
|
|
||||||
.take(lines_for_content)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let hidden_below = total_lines.saturating_sub(lines_for_content);
|
|
||||||
if hidden_below > 0 {
|
|
||||||
let more_line = ratatui::text::Line::from(vec![
|
|
||||||
ratatui::text::Span::styled(format!("... {} more below", hidden_below), Typography::muted())
|
|
||||||
]);
|
|
||||||
visible_lines.push(more_line);
|
|
||||||
}
|
|
||||||
|
|
||||||
let paragraph = Paragraph::new(ratatui::text::Text::from(visible_lines));
|
|
||||||
frame.render_widget(paragraph, area);
|
|
||||||
} else {
|
|
||||||
let paragraph = Paragraph::new(ratatui::text::Text::from(lines));
|
|
||||||
frame.render_widget(paragraph, area);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackupWidget {
|
|
||||||
/// Format timestamp for display
|
|
||||||
fn format_timestamp(&self, timestamp: i64) -> String {
|
|
||||||
let datetime = chrono::DateTime::from_timestamp(timestamp, 0)
|
|
||||||
.unwrap_or_else(|| chrono::Utc::now());
|
|
||||||
datetime.format("%Y-%m-%d %H:%M:%S").to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format duration in seconds to human readable format
|
|
||||||
fn format_duration(&self, duration_seconds: i64) -> String {
|
|
||||||
let minutes = duration_seconds / 60;
|
|
||||||
let seconds = duration_seconds % 60;
|
|
||||||
|
|
||||||
if minutes > 0 {
|
|
||||||
format!("{}.{}m", minutes, seconds / 6) // Show 1 decimal for minutes
|
|
||||||
} else {
|
|
||||||
format!("{}s", seconds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for BackupWidget {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
use cm_dashboard_shared::AgentData;
|
use cm_dashboard_shared::AgentData;
|
||||||
|
|
||||||
pub mod backup;
|
|
||||||
pub mod cpu;
|
pub mod cpu;
|
||||||
pub mod memory;
|
pub mod memory;
|
||||||
pub mod services;
|
pub mod services;
|
||||||
pub mod system;
|
pub mod system;
|
||||||
|
|
||||||
pub use backup::BackupWidget;
|
|
||||||
pub use services::ServicesWidget;
|
pub use services::ServicesWidget;
|
||||||
pub use system::SystemWidget;
|
pub use system::SystemWidget;
|
||||||
|
|
||||||
|
|||||||
@@ -37,6 +37,17 @@ pub struct SystemWidget {
|
|||||||
// Storage metrics (collected from disk metrics)
|
// Storage metrics (collected from disk metrics)
|
||||||
storage_pools: Vec<StoragePool>,
|
storage_pools: Vec<StoragePool>,
|
||||||
|
|
||||||
|
// Backup metrics
|
||||||
|
backup_status: String,
|
||||||
|
backup_start_time_raw: Option<String>,
|
||||||
|
backup_disk_serial: Option<String>,
|
||||||
|
backup_disk_usage_percent: Option<f32>,
|
||||||
|
backup_disk_used_gb: Option<f32>,
|
||||||
|
backup_disk_total_gb: Option<f32>,
|
||||||
|
backup_disk_wear_percent: Option<f32>,
|
||||||
|
backup_disk_temperature: Option<f32>,
|
||||||
|
backup_last_size_gb: Option<f32>,
|
||||||
|
|
||||||
// Overall status
|
// Overall status
|
||||||
has_data: bool,
|
has_data: bool,
|
||||||
}
|
}
|
||||||
@@ -91,6 +102,15 @@ impl SystemWidget {
|
|||||||
tmp_status: Status::Unknown,
|
tmp_status: Status::Unknown,
|
||||||
tmpfs_mounts: Vec::new(),
|
tmpfs_mounts: Vec::new(),
|
||||||
storage_pools: Vec::new(),
|
storage_pools: Vec::new(),
|
||||||
|
backup_status: "unknown".to_string(),
|
||||||
|
backup_start_time_raw: None,
|
||||||
|
backup_disk_serial: None,
|
||||||
|
backup_disk_usage_percent: None,
|
||||||
|
backup_disk_used_gb: None,
|
||||||
|
backup_disk_total_gb: None,
|
||||||
|
backup_disk_wear_percent: None,
|
||||||
|
backup_disk_temperature: None,
|
||||||
|
backup_last_size_gb: None,
|
||||||
has_data: false,
|
has_data: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -170,6 +190,28 @@ impl Widget for SystemWidget {
|
|||||||
|
|
||||||
// Convert storage data to internal format
|
// Convert storage data to internal format
|
||||||
self.update_storage_from_agent_data(agent_data);
|
self.update_storage_from_agent_data(agent_data);
|
||||||
|
|
||||||
|
// Extract backup data
|
||||||
|
let backup = &agent_data.backup;
|
||||||
|
self.backup_status = backup.status.clone();
|
||||||
|
self.backup_start_time_raw = backup.start_time_raw.clone();
|
||||||
|
self.backup_last_size_gb = backup.last_backup_size_gb;
|
||||||
|
|
||||||
|
if let Some(disk) = &backup.repository_disk {
|
||||||
|
self.backup_disk_serial = Some(disk.serial.clone());
|
||||||
|
self.backup_disk_usage_percent = Some(disk.usage_percent);
|
||||||
|
self.backup_disk_used_gb = Some(disk.used_gb);
|
||||||
|
self.backup_disk_total_gb = Some(disk.total_gb);
|
||||||
|
self.backup_disk_wear_percent = disk.wear_percent;
|
||||||
|
self.backup_disk_temperature = disk.temperature_celsius;
|
||||||
|
} else {
|
||||||
|
self.backup_disk_serial = None;
|
||||||
|
self.backup_disk_usage_percent = None;
|
||||||
|
self.backup_disk_used_gb = None;
|
||||||
|
self.backup_disk_total_gb = None;
|
||||||
|
self.backup_disk_wear_percent = None;
|
||||||
|
self.backup_disk_temperature = None;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -352,6 +394,106 @@ fn render_pool_drive(drive: &StorageDrive, is_last: bool, lines: &mut Vec<Line<'
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SystemWidget {
|
impl SystemWidget {
|
||||||
|
/// Render backup section for display
|
||||||
|
fn render_backup(&self) -> Vec<Line<'_>> {
|
||||||
|
let mut lines = Vec::new();
|
||||||
|
|
||||||
|
// First line: serial number with temperature and wear
|
||||||
|
if let Some(serial) = &self.backup_disk_serial {
|
||||||
|
let mut details = Vec::new();
|
||||||
|
if let Some(temp) = self.backup_disk_temperature {
|
||||||
|
details.push(format!("T: {}°C", temp as i32));
|
||||||
|
}
|
||||||
|
if let Some(wear) = self.backup_disk_wear_percent {
|
||||||
|
details.push(format!("W: {}%", wear as i32));
|
||||||
|
}
|
||||||
|
|
||||||
|
let disk_text = if !details.is_empty() {
|
||||||
|
format!("{} {}", serial, details.join(" "))
|
||||||
|
} else {
|
||||||
|
serial.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup_status = match self.backup_status.as_str() {
|
||||||
|
"completed" | "success" => Status::Ok,
|
||||||
|
"running" => Status::Pending,
|
||||||
|
"failed" => Status::Critical,
|
||||||
|
_ => Status::Unknown,
|
||||||
|
};
|
||||||
|
|
||||||
|
let disk_spans = StatusIcons::create_status_spans(backup_status, &disk_text);
|
||||||
|
lines.push(Line::from(disk_spans));
|
||||||
|
|
||||||
|
// Show backup time from TOML if available
|
||||||
|
if let Some(start_time) = &self.backup_start_time_raw {
|
||||||
|
let time_text = if let Some(size) = self.backup_last_size_gb {
|
||||||
|
format!("Time: {} ({:.1}GB)", start_time, size)
|
||||||
|
} else {
|
||||||
|
format!("Time: {}", start_time)
|
||||||
|
};
|
||||||
|
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(" ├─ ", Typography::tree()),
|
||||||
|
Span::styled(time_text, Typography::secondary())
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage information
|
||||||
|
if let (Some(used), Some(total), Some(usage_percent)) = (
|
||||||
|
self.backup_disk_used_gb,
|
||||||
|
self.backup_disk_total_gb,
|
||||||
|
self.backup_disk_usage_percent
|
||||||
|
) {
|
||||||
|
let usage_text = format!("Usage: {:.0}% {:.0}GB/{:.0}GB", usage_percent, used, total);
|
||||||
|
let usage_spans = StatusIcons::create_status_spans(Status::Ok, &usage_text);
|
||||||
|
let mut full_spans = vec![
|
||||||
|
Span::styled(" └─ ", Typography::tree()),
|
||||||
|
];
|
||||||
|
full_spans.extend(usage_spans);
|
||||||
|
lines.push(Line::from(full_spans));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lines
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Format time ago from timestamp
|
||||||
|
fn format_time_ago(&self, timestamp: u64) -> String {
|
||||||
|
let now = chrono::Utc::now().timestamp() as u64;
|
||||||
|
let seconds_ago = now.saturating_sub(timestamp);
|
||||||
|
|
||||||
|
let hours = seconds_ago / 3600;
|
||||||
|
let minutes = (seconds_ago % 3600) / 60;
|
||||||
|
|
||||||
|
if hours > 0 {
|
||||||
|
format!("{}h ago", hours)
|
||||||
|
} else if minutes > 0 {
|
||||||
|
format!("{}m ago", minutes)
|
||||||
|
} else {
|
||||||
|
"now".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Format time until from future timestamp
|
||||||
|
fn format_time_until(&self, timestamp: u64) -> String {
|
||||||
|
let now = chrono::Utc::now().timestamp() as u64;
|
||||||
|
if timestamp <= now {
|
||||||
|
return "overdue".to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
let seconds_until = timestamp - now;
|
||||||
|
let hours = seconds_until / 3600;
|
||||||
|
let minutes = (seconds_until % 3600) / 60;
|
||||||
|
|
||||||
|
if hours > 0 {
|
||||||
|
format!("in {}h", hours)
|
||||||
|
} else if minutes > 0 {
|
||||||
|
format!("in {}m", minutes)
|
||||||
|
} else {
|
||||||
|
"soon".to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Render system widget
|
/// Render system widget
|
||||||
pub fn render(&mut self, frame: &mut Frame, area: Rect, hostname: &str, config: Option<&crate::config::DashboardConfig>) {
|
pub fn render(&mut self, frame: &mut Frame, area: Rect, hostname: &str, config: Option<&crate::config::DashboardConfig>) {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
@@ -445,6 +587,16 @@ impl SystemWidget {
|
|||||||
let storage_lines = self.render_storage();
|
let storage_lines = self.render_storage();
|
||||||
lines.extend(storage_lines);
|
lines.extend(storage_lines);
|
||||||
|
|
||||||
|
// Backup section (if available)
|
||||||
|
if self.backup_status != "unavailable" && self.backup_status != "unknown" {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled("Backup:", Typography::widget_title())
|
||||||
|
]));
|
||||||
|
|
||||||
|
let backup_lines = self.render_backup();
|
||||||
|
lines.extend(backup_lines);
|
||||||
|
}
|
||||||
|
|
||||||
// Apply scroll offset
|
// Apply scroll offset
|
||||||
let total_lines = lines.len();
|
let total_lines = lines.len();
|
||||||
let available_height = area.height as usize;
|
let available_height = area.height as usize;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.147"
|
version = "0.1.151"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -138,10 +138,22 @@ pub struct SubServiceMetric {
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct BackupData {
|
pub struct BackupData {
|
||||||
pub status: String,
|
pub status: String,
|
||||||
pub last_run: Option<u64>,
|
|
||||||
pub next_scheduled: Option<u64>,
|
|
||||||
pub total_size_gb: Option<f32>,
|
pub total_size_gb: Option<f32>,
|
||||||
pub repository_health: Option<String>,
|
pub repository_health: Option<String>,
|
||||||
|
pub repository_disk: Option<BackupDiskData>,
|
||||||
|
pub last_backup_size_gb: Option<f32>,
|
||||||
|
pub start_time_raw: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Backup repository disk information
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct BackupDiskData {
|
||||||
|
pub serial: String,
|
||||||
|
pub usage_percent: f32,
|
||||||
|
pub used_gb: f32,
|
||||||
|
pub total_gb: f32,
|
||||||
|
pub wear_percent: Option<f32>,
|
||||||
|
pub temperature_celsius: Option<f32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AgentData {
|
impl AgentData {
|
||||||
@@ -180,10 +192,11 @@ impl AgentData {
|
|||||||
services: Vec::new(),
|
services: Vec::new(),
|
||||||
backup: BackupData {
|
backup: BackupData {
|
||||||
status: "unknown".to_string(),
|
status: "unknown".to_string(),
|
||||||
last_run: None,
|
|
||||||
next_scheduled: None,
|
|
||||||
total_size_gb: None,
|
total_size_gb: None,
|
||||||
repository_health: None,
|
repository_health: None,
|
||||||
|
repository_disk: None,
|
||||||
|
last_backup_size_gb: None,
|
||||||
|
start_time_raw: None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user