Compare commits

...

5 Commits

Author SHA1 Message Date
7362464b46 Deduplicate NFS exports and remove client info
All checks were successful
Build and Release / build-and-release (push) Successful in 1m48s
Fix NFS export display to show each export path only once instead of
once per client. Use HashMap to deduplicate by path and sort results
alphabetically. Remove IP addresses and client specifications from
display, showing only export paths with their options.

Prevents duplicate entries when a single export is shared with multiple
clients or networks.
2025-12-11 10:06:59 +01:00
c8b79576fa Add NFS/SMB share monitoring and increase disk timeouts
All checks were successful
Build and Release / build-and-release (push) Successful in 1m36s
Add sub-service display for NFS exports and SMB shares under their
respective services. NFS shows active exports from exportfs with
options. SMB shows configured shares from smb.conf with paths.

Increase disk operation timeouts to handle multiple drives:
- lsblk: 2s → 10s
- smartctl: 3s → 15s (critical for multi-drive systems)
- df: 2s → 10s

Prevents timeouts when querying SMART data from systems with multiple
drives (3+ data drives plus parity).
2025-12-11 09:30:06 +01:00
f53df5440b Remove 'Repo' prefix from backup header display
All checks were successful
Build and Release / build-and-release (push) Successful in 1m10s
Simplify backup section header by removing the 'Repo' prefix and
displaying only the timestamp with status icon. Repository details
are still shown as sub-items below the timestamp.
2025-12-09 20:32:05 +01:00
d1b0e2c431 Add kB unit support for backup repository sizes
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Extended size formatting to handle repositories smaller than 1MB by displaying in kB units. Size display logic now cascades: kB for < 1MB, MB for 1MB-1GB, GB for >= 1GB.
2025-12-09 19:51:03 +01:00
b1719a60fc Use nfs-backup.toml and support completed status
All checks were successful
Build and Release / build-and-release (push) Successful in 1m24s
Update agent to read nfs-backup.toml instead of legacy backup-status-*.toml
files. Add support for 'completed' status string used by backup script.

Changes:
- Read nfs-backup.toml from status directory
- Match 'completed' status as Status::Ok
- Simplify file scanning logic for single NFS backup file
2025-12-09 19:37:01 +01:00
8 changed files with 169 additions and 39 deletions

6
Cargo.lock generated
View File

@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.267" version = "0.1.271"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.267" version = "0.1.271"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@@ -325,7 +325,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.267" version = "0.1.271"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.267" version = "0.1.272"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -21,7 +21,7 @@ impl BackupCollector {
} }
} }
/// Scan directory for all backup status files /// Scan directory for backup status file (nfs-backup.toml)
async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> { async fn scan_status_files(&self) -> Result<Vec<PathBuf>, CollectorError> {
let status_path = Path::new(&self.status_dir); let status_path = Path::new(&self.status_dir);
@@ -30,30 +30,15 @@ impl BackupCollector {
return Ok(Vec::new()); return Ok(Vec::new());
} }
let mut status_files = Vec::new(); // Look for nfs-backup.toml (new NFS-based backup)
let nfs_backup_file = status_path.join("nfs-backup.toml");
match fs::read_dir(status_path) { if nfs_backup_file.exists() {
Ok(entries) => { return Ok(vec![nfs_backup_file]);
for entry in entries {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_file() {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
if filename.starts_with("backup-status-") && filename.ends_with(".toml") {
status_files.push(path);
}
}
}
}
}
}
Err(e) => {
warn!("Failed to read backup status directory: {}", e);
return Ok(Vec::new());
}
} }
Ok(status_files) // No backup status file found
debug!("No nfs-backup.toml found in {}", self.status_dir);
Ok(Vec::new())
} }
/// Read a single backup status file /// Read a single backup status file
@@ -76,7 +61,7 @@ impl BackupCollector {
/// Calculate backup status from TOML status field /// Calculate backup status from TOML status field
fn calculate_backup_status(status_str: &str) -> Status { fn calculate_backup_status(status_str: &str) -> Status {
match status_str.to_lowercase().as_str() { match status_str.to_lowercase().as_str() {
"success" => Status::Ok, "success" | "completed" => Status::Ok,
"warning" => Status::Warning, "warning" => Status::Warning,
"failed" | "error" => Status::Critical, "failed" | "error" => Status::Critical,
_ => Status::Unknown, _ => Status::Unknown,

View File

@@ -114,7 +114,7 @@ impl DiskCollector {
let mut cmd = TokioCommand::new("lsblk"); let mut cmd = TokioCommand::new("lsblk");
cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]); cmd.args(&["-rn", "-o", "NAME,MOUNTPOINT"]);
let output = run_command_with_timeout(cmd, 2).await let output = run_command_with_timeout(cmd, 10).await
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: "block devices".to_string(), path: "block devices".to_string(),
error: e.to_string(), error: e.to_string(),
@@ -184,7 +184,7 @@ impl DiskCollector {
/// Get filesystem info for a single mount point /// Get filesystem info for a single mount point
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> { fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
let output = StdCommand::new("timeout") let output = StdCommand::new("timeout")
.args(&["2", "df", "--block-size=1", mount_point]) .args(&["10", "df", "--block-size=1", mount_point])
.output() .output()
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: format!("df {}", mount_point), path: format!("df {}", mount_point),
@@ -433,7 +433,7 @@ impl DiskCollector {
cmd.args(&["-a", &format!("/dev/{}", drive_name)]); cmd.args(&["-a", &format!("/dev/{}", drive_name)]);
} }
let output = run_command_with_timeout(cmd, 3).await let output = run_command_with_timeout(cmd, 15).await
.map_err(|e| CollectorError::SystemRead { .map_err(|e| CollectorError::SystemRead {
path: format!("SMART data for {}", drive_name), path: format!("SMART data for {}", drive_name),
error: e.to_string(), error: e.to_string(),
@@ -772,7 +772,7 @@ impl DiskCollector {
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> { fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
// Use lsblk to find the backing device with timeout // Use lsblk to find the backing device with timeout
let output = StdCommand::new("timeout") let output = StdCommand::new("timeout")
.args(&["2", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"]) .args(&["10", "lsblk", "-rn", "-o", "NAME,MOUNTPOINT"])
.output() .output()
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?; .map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;

View File

@@ -230,6 +230,39 @@ impl SystemdCollector {
} }
} }
if service_name == "nfs-server" && status_info.active_state == "active" {
// Add NFS exports as sub-services
let exports = self.get_nfs_exports();
for (export_path, options) in exports {
let metrics = Vec::new();
let display = if !options.is_empty() {
format!("{} ({})", export_path, options)
} else {
export_path
};
sub_services.push(SubServiceData {
name: display,
service_status: Status::Info,
metrics,
service_type: "nfs_export".to_string(),
});
}
}
if service_name == "smbd" && status_info.active_state == "active" {
// Add SMB shares as sub-services
let shares = self.get_smb_shares();
for (share_name, share_path) in shares {
let metrics = Vec::new();
sub_services.push(SubServiceData {
name: format!("{}: {}", share_name, share_path),
service_status: Status::Info,
metrics,
service_type: "smb_share".to_string(),
});
}
}
// Create complete service data // Create complete service data
let service_data = ServiceData { let service_data = ServiceData {
name: service_name.clone(), name: service_name.clone(),
@@ -1011,6 +1044,117 @@ impl SystemdCollector {
} }
} }
/// Get NFS exports from exportfs
/// Returns a list of (export_path, options) tuples
fn get_nfs_exports(&self) -> Vec<(String, String)> {
match Command::new("timeout")
.args(["2", "exportfs", "-v"])
.output()
{
Ok(output) if output.status.success() => {
let exports_output = String::from_utf8_lossy(&output.stdout);
let mut exports_map: std::collections::HashMap<String, String> = std::collections::HashMap::new();
for line in exports_output.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
// Format: "/path/to/export hostname(options)" or "/path/to/export 192.168.1.0/24(options)"
// exportfs -v shows each export once per client/network
// We want to deduplicate by path and just show one entry
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.is_empty() {
continue;
}
let export_path = parts[0].to_string();
// Extract options from parentheses (from the client specification)
let options = if parts.len() > 1 {
let client_spec = parts[1];
if let Some(start) = client_spec.find('(') {
if let Some(end) = client_spec.find(')') {
client_spec[start+1..end].to_string()
} else {
String::new()
}
} else {
String::new()
}
} else {
String::new()
};
// Only store the first occurrence of each path (deduplicates multiple clients)
exports_map.entry(export_path).or_insert(options);
}
// Convert HashMap to Vec
let mut exports: Vec<(String, String)> = exports_map.into_iter().collect();
// Sort by path for consistent display
exports.sort_by(|a, b| a.0.cmp(&b.0));
exports
}
_ => Vec::new(),
}
}
/// Get SMB shares from smb.conf
/// Returns a list of (share_name, share_path) tuples
fn get_smb_shares(&self) -> Vec<(String, String)> {
match std::fs::read_to_string("/etc/samba/smb.conf") {
Ok(config) => {
let mut shares = Vec::new();
let mut current_share: Option<String> = None;
let mut current_path: Option<String> = None;
for line in config.lines() {
let line = line.trim();
// Skip comments and empty lines
if line.is_empty() || line.starts_with('#') || line.starts_with(';') {
continue;
}
// Detect share section [sharename]
if line.starts_with('[') && line.ends_with(']') {
// Save previous share if we have both name and path
if let (Some(name), Some(path)) = (current_share.take(), current_path.take()) {
// Skip special sections
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path));
}
}
// Start new share
let share_name = line[1..line.len()-1].trim().to_string();
current_share = Some(share_name);
current_path = None;
}
// Look for path = /some/path
else if line.starts_with("path") && line.contains('=') {
if let Some(path_value) = line.split('=').nth(1) {
current_path = Some(path_value.trim().to_string());
}
}
}
// Don't forget the last share
if let (Some(name), Some(path)) = (current_share, current_path) {
if name != "global" && name != "homes" && name != "printers" {
shares.push((name, path));
}
}
shares
}
_ => Vec::new(),
}
}
/// Get nftables open ports grouped by protocol /// Get nftables open ports grouped by protocol
/// Returns: (tcp_ports_string, udp_ports_string) /// Returns: (tcp_ports_string, udp_ports_string)
fn get_nftables_open_ports(&self) -> (String, String) { fn get_nftables_open_ports(&self) -> (String, String) {

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.267" version = "0.1.272"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@@ -544,9 +544,8 @@ impl SystemWidget {
"unknown".to_string() "unknown".to_string()
}; };
// Header: "Repo <complete timestamp>" // Header: just the timestamp
let repo_text = format!("Repo {}", time_display); let repo_spans = StatusIcons::create_status_spans(self.backup_status, &time_display);
let repo_spans = StatusIcons::create_status_spans(self.backup_status, &repo_text);
lines.push(Line::from(repo_spans)); lines.push(Line::from(repo_spans));
// List all repositories with archive count and size // List all repositories with archive count and size
@@ -554,8 +553,10 @@ impl SystemWidget {
for (idx, repo) in self.backup_repositories.iter().enumerate() { for (idx, repo) in self.backup_repositories.iter().enumerate() {
let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" }; let tree_char = if idx == repo_count - 1 { "└─" } else { "├─" };
// Format size: use MB for < 1GB, otherwise GB // Format size: use kB for < 1MB, MB for < 1GB, otherwise GB
let size_display = if repo.repo_size_gb < 1.0 { let size_display = if repo.repo_size_gb < 0.001 {
format!("{:.0}kB", repo.repo_size_gb * 1024.0 * 1024.0)
} else if repo.repo_size_gb < 1.0 {
format!("{:.0}MB", repo.repo_size_gb * 1024.0) format!("{:.0}MB", repo.repo_size_gb * 1024.0)
} else { } else {
format!("{:.1}GB", repo.repo_size_gb) format!("{:.1}GB", repo.repo_size_gb)

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.267" version = "0.1.272"
edition = "2021" edition = "2021"
[dependencies] [dependencies]