This commit is contained in:
Christoffer Martinsson 2025-10-12 19:32:47 +02:00
parent fb91d8346f
commit 9c836e0862
3 changed files with 78 additions and 39 deletions

View File

@ -312,7 +312,7 @@ impl Collector for BackupCollector {
// Try to get borgbackup metrics first, fall back to restic if not available
let borgbackup_result = self.get_borgbackup_metrics().await;
let (backup_info, overall_status) = match borgbackup_result {
let (backup_info, overall_status) = match &borgbackup_result {
Ok(borg_metrics) => {
// Parse borgbackup timestamp to DateTime
let last_success = chrono::DateTime::from_timestamp(borg_metrics.timestamp, 0);
@ -329,6 +329,7 @@ impl Collector for BackupCollector {
last_success,
last_failure: None, // borgbackup metrics don't include failure info
size_gb: borg_metrics.repository.total_repository_size_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
latest_archive_size_gb: Some(borg_metrics.repository.latest_archive_size_bytes as f32 / (1024.0 * 1024.0 * 1024.0)),
snapshot_count: borg_metrics.repository.total_archives as u32,
};
@ -356,12 +357,14 @@ impl Collector for BackupCollector {
last_success: stats.last_success,
last_failure,
size_gb: stats.total_size as f32 / (1024.0 * 1024.0 * 1024.0),
latest_archive_size_gb: None, // Restic doesn't provide this easily
snapshot_count: stats.snapshot_count,
},
Err(_) => BackupInfo {
last_success: None,
last_failure,
size_gb: 0.0,
latest_archive_size_gb: None,
snapshot_count: 0,
},
};
@ -380,13 +383,27 @@ impl Collector for BackupCollector {
last_message: None,
});
let backup_metrics = json!({
// Add disk information if available from borgbackup metrics
let mut backup_json = json!({
"overall_status": overall_status,
"backup": backup_info,
"service": service_data,
"timestamp": Utc::now()
});
// If we got borgbackup metrics, include disk information
if let Ok(borg_metrics) = &borgbackup_result {
backup_json["disk"] = json!({
"device": borg_metrics.backup_disk.device,
"health": borg_metrics.backup_disk.health,
"total_gb": borg_metrics.backup_disk.total_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
"used_gb": borg_metrics.backup_disk.used_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
"usage_percent": borg_metrics.backup_disk.usage_percent
});
}
let backup_metrics = backup_json;
Ok(CollectorOutput {
agent_type: AgentType::Backup,
data: backup_metrics,
@ -419,6 +436,7 @@ struct BackupInfo {
last_success: Option<DateTime<Utc>>,
last_failure: Option<DateTime<Utc>>,
size_gb: f32,
latest_archive_size_gb: Option<f32>,
snapshot_count: u32,
}

View File

@ -97,6 +97,8 @@ pub struct BackupMetrics {
pub overall_status: BackupStatus,
pub backup: BackupInfo,
pub service: BackupServiceInfo,
#[serde(default)]
pub disk: Option<BackupDiskInfo>,
pub timestamp: DateTime<Utc>,
}
@ -105,6 +107,8 @@ pub struct BackupInfo {
pub last_success: Option<DateTime<Utc>>,
pub last_failure: Option<DateTime<Utc>>,
pub size_gb: f32,
#[serde(default)]
pub latest_archive_size_gb: Option<f32>,
pub snapshot_count: u32,
}
@ -115,6 +119,15 @@ pub struct BackupServiceInfo {
pub last_message: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupDiskInfo {
pub device: String,
pub health: String,
pub total_gb: f32,
pub used_gb: f32,
pub usage_percent: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BackupStatus {
Healthy,

View File

@ -35,60 +35,68 @@ fn render_metrics(frame: &mut Frame, _host: &HostDisplayData, metrics: &BackupMe
let mut data = WidgetData::new(
"Backups",
Some(WidgetStatus::new(widget_status)),
vec!["Aspect".to_string(), "Details".to_string()]
vec!["Backup".to_string(), "Status".to_string(), "Details".to_string()]
);
let repo_status = repo_status_level(metrics);
data.add_row(
Some(WidgetStatus::new(repo_status)),
vec![],
vec![
"Repo".to_string(),
format!(
"Snapshots: {} • Size: {:.1} GiB",
metrics.backup.snapshot_count, metrics.backup.size_gb
),
],
);
let service_status = service_status_level(metrics);
data.add_row(
Some(WidgetStatus::new(service_status)),
vec![],
vec![
"Service".to_string(),
format!(
"Enabled: {} • Pending jobs: {}",
metrics.service.enabled, metrics.service.pending_jobs
),
],
);
if let Some(last_failure) = metrics.backup.last_failure.as_ref() {
data.add_row(
Some(WidgetStatus::new(StatusLevel::Error)),
vec![],
vec![
"Last failure".to_string(),
format_timestamp(Some(last_failure)),
],
);
}
if let Some(message) = metrics.service.last_message.as_ref() {
let status_level = match metrics.overall_status {
BackupStatus::Failed => StatusLevel::Error,
BackupStatus::Warning => StatusLevel::Warning,
BackupStatus::Unknown => StatusLevel::Unknown,
BackupStatus::Healthy => StatusLevel::Ok,
// Latest backup
let (latest_status, latest_time) = if let Some(last_success) = metrics.backup.last_success.as_ref() {
let hours_ago = chrono::Utc::now().signed_duration_since(*last_success).num_hours();
let time_str = if hours_ago < 24 {
format!("{}h ago", hours_ago)
} else {
format!("{}d ago", hours_ago / 24)
};
(StatusLevel::Ok, time_str)
} else {
(StatusLevel::Warning, "Never".to_string())
};
data.add_row(
Some(WidgetStatus::new(status_level)),
Some(WidgetStatus::new(latest_status)),
vec![],
vec![
"Last message".to_string(),
message.clone(),
"Latest".to_string(),
latest_time,
format!("{:.1} GiB", metrics.backup.latest_archive_size_gb.unwrap_or(metrics.backup.size_gb)),
],
);
// Repository total
data.add_row(
Some(WidgetStatus::new(StatusLevel::Ok)),
vec![],
vec![
"Repo".to_string(),
format!("{} archives", metrics.backup.snapshot_count),
format!("{:.1} GiB total", metrics.backup.size_gb),
],
);
// Disk usage
if let Some(disk) = &metrics.disk {
let disk_status = match disk.health.as_str() {
"ok" => StatusLevel::Ok,
"failed" => StatusLevel::Error,
_ => StatusLevel::Warning,
};
data.add_row(
Some(WidgetStatus::new(disk_status)),
vec![],
vec![
"Disk usage".to_string(),
disk.health.clone(),
format!("{:.0} GB, {:.0}% used", disk.total_gb, disk.usage_percent),
],
);
} else {
data.add_row(
Some(WidgetStatus::new(StatusLevel::Unknown)),
vec![],
vec![
"Disk usage".to_string(),
"Unknown".to_string(),
"".to_string(),
],
);
}