Remove debug logging from disk collector
All checks were successful
Build and Release / build-and-release (push) Successful in 1m11s

Removed all debug! statements from disk collector to reduce log noise.

Bump version to v0.1.226
This commit is contained in:
Christoffer Martinsson 2025-11-30 00:44:38 +01:00
parent 0b1d8c0a73
commit c62c7fa698
5 changed files with 11 additions and 25 deletions

6
Cargo.lock generated
View File

@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.224" version = "0.1.225"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
@ -301,7 +301,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.224" version = "0.1.225"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -325,7 +325,7 @@ dependencies = [
[[package]] [[package]]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.224" version = "0.1.225"
dependencies = [ dependencies = [
"chrono", "chrono",
"serde", "serde",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-agent" name = "cm-dashboard-agent"
version = "0.1.225" version = "0.1.226"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -5,9 +5,7 @@ use cm_dashboard_shared::{AgentData, DriveData, FilesystemData, PoolData, Hyster
use crate::config::DiskConfig; use crate::config::DiskConfig;
use tokio::process::Command as TokioCommand; use tokio::process::Command as TokioCommand;
use std::process::Command as StdCommand; use std::process::Command as StdCommand;
use std::time::Instant;
use std::collections::HashMap; use std::collections::HashMap;
use tracing::debug;
use super::{Collector, CollectorError}; use super::{Collector, CollectorError};
@ -68,9 +66,6 @@ impl DiskCollector {
/// Collect all storage data and populate AgentData /// Collect all storage data and populate AgentData
async fn collect_storage_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { async fn collect_storage_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> {
let start_time = Instant::now();
debug!("Starting clean storage collection");
// Step 1: Get mount points and their backing devices // Step 1: Get mount points and their backing devices
let mount_devices = self.get_mount_devices().await?; let mount_devices = self.get_mount_devices().await?;
@ -105,9 +100,6 @@ impl DiskCollector {
self.populate_drives_data(&physical_drives, &smart_data, agent_data)?; self.populate_drives_data(&physical_drives, &smart_data, agent_data)?;
self.populate_pools_data(&mergerfs_pools, &smart_data, agent_data)?; self.populate_pools_data(&mergerfs_pools, &smart_data, agent_data)?;
let elapsed = start_time.elapsed();
debug!("Storage collection completed in {:?}", elapsed);
Ok(()) Ok(())
} }
@ -142,7 +134,6 @@ impl DiskCollector {
} }
} }
debug!("Found {} mounted block devices", mount_devices.len());
Ok(mount_devices) Ok(mount_devices)
} }
@ -155,8 +146,8 @@ impl DiskCollector {
Ok((total, used)) => { Ok((total, used)) => {
filesystem_usage.insert(mount_point.clone(), (total, used)); filesystem_usage.insert(mount_point.clone(), (total, used));
} }
Err(e) => { Err(_e) => {
debug!("Failed to get filesystem info for {}: {}", mount_point, e); // Silently skip filesystems we can't read
} }
} }
} }
@ -177,8 +168,6 @@ impl DiskCollector {
// Only add if we don't already have usage data for this mount point // Only add if we don't already have usage data for this mount point
if !filesystem_usage.contains_key(&mount_point) { if !filesystem_usage.contains_key(&mount_point) {
if let Ok((total, used)) = self.get_filesystem_info(&mount_point) { if let Ok((total, used)) = self.get_filesystem_info(&mount_point) {
debug!("Added MergerFS filesystem usage for {}: {}GB total, {}GB used",
mount_point, total as f32 / (1024.0 * 1024.0 * 1024.0), used as f32 / (1024.0 * 1024.0 * 1024.0));
filesystem_usage.insert(mount_point, (total, used)); filesystem_usage.insert(mount_point, (total, used));
} }
} }
@ -253,9 +242,8 @@ impl DiskCollector {
} else { } else {
mount_point.trim_start_matches('/').replace('/', "_") mount_point.trim_start_matches('/').replace('/', "_")
}; };
if pool_name.is_empty() { if pool_name.is_empty() {
debug!("Skipping mergerfs pool with empty name: {}", mount_point);
continue; continue;
} }
@ -283,8 +271,7 @@ impl DiskCollector {
// Categorize as data vs parity drives // Categorize as data vs parity drives
let (data_drives, parity_drives) = match self.categorize_pool_drives(&all_member_paths) { let (data_drives, parity_drives) = match self.categorize_pool_drives(&all_member_paths) {
Ok(drives) => drives, Ok(drives) => drives,
Err(e) => { Err(_e) => {
debug!("Failed to categorize drives for pool {}: {}. Skipping.", mount_point, e);
continue; continue;
} }
}; };
@ -299,8 +286,7 @@ impl DiskCollector {
}); });
} }
} }
debug!("Found {} mergerfs pools", pools.len());
Ok(pools) Ok(pools)
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard" name = "cm-dashboard"
version = "0.1.225" version = "0.1.226"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "cm-dashboard-shared" name = "cm-dashboard-shared"
version = "0.1.225" version = "0.1.226"
edition = "2021" edition = "2021"
[dependencies] [dependencies]