From 14618c59c61b2f8d731697f01b8388ace825a809 Mon Sep 17 00:00:00 2001 From: Christoffer Martinsson Date: Thu, 27 Nov 2025 22:45:44 +0100 Subject: [PATCH] Fix data duplication in cached collector architecture Critical bug fix: Collectors were appending to Vecs instead of replacing them, causing duplicate entries with each collection cycle. Fixed by adding .clear() calls before populating: - Memory collector: tmpfs Vec (was showing 11+ duplicates) - Disk collector: drives and pools Vecs - Systemd collector: services Vec - Network collector: Already correct (assigns new Vec) This prevents the exponential growth of duplicate entries in the dashboard UI. --- agent/src/collectors/disk.rs | 6 ++++++ agent/src/collectors/memory.rs | 5 ++++- agent/src/collectors/systemd.rs | 3 +++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/agent/src/collectors/disk.rs b/agent/src/collectors/disk.rs index 588bec8..dfbd5fa 100644 --- a/agent/src/collectors/disk.rs +++ b/agent/src/collectors/disk.rs @@ -530,6 +530,9 @@ impl DiskCollector { /// Populate drives data into AgentData fn populate_drives_data(&self, physical_drives: &[PhysicalDrive], smart_data: &HashMap, agent_data: &mut AgentData) -> Result<(), CollectorError> { + // Clear existing drives data to prevent duplicates in cached architecture + agent_data.system.storage.drives.clear(); + for drive in physical_drives { let smart = smart_data.get(&drive.name); @@ -567,6 +570,9 @@ impl DiskCollector { /// Populate pools data into AgentData fn populate_pools_data(&self, mergerfs_pools: &[MergerfsPool], smart_data: &HashMap, agent_data: &mut AgentData) -> Result<(), CollectorError> { + // Clear existing pools data to prevent duplicates in cached architecture + agent_data.system.storage.pools.clear(); + for pool in mergerfs_pools { // Calculate pool health and statuses based on member drive health let (pool_health, health_status, usage_status, data_drive_data, parity_drive_data) = self.calculate_pool_health(pool, smart_data); diff --git a/agent/src/collectors/memory.rs b/agent/src/collectors/memory.rs index e186704..9151ee2 100644 --- a/agent/src/collectors/memory.rs +++ b/agent/src/collectors/memory.rs @@ -97,9 +97,12 @@ impl MemoryCollector { /// Populate tmpfs data into AgentData async fn populate_tmpfs_data(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { + // Clear existing tmpfs data to prevent duplicates in cached architecture + agent_data.system.memory.tmpfs.clear(); + // Discover all tmpfs mount points let tmpfs_mounts = self.discover_tmpfs_mounts()?; - + if tmpfs_mounts.is_empty() { debug!("No tmpfs mounts found to monitor"); return Ok(()); diff --git a/agent/src/collectors/systemd.rs b/agent/src/collectors/systemd.rs index 9ba8663..4e17cf4 100644 --- a/agent/src/collectors/systemd.rs +++ b/agent/src/collectors/systemd.rs @@ -915,6 +915,9 @@ impl SystemdCollector { #[async_trait] impl Collector for SystemdCollector { async fn collect_structured(&self, agent_data: &mut AgentData) -> Result<(), CollectorError> { + // Clear existing services data to prevent duplicates in cached architecture + agent_data.services.clear(); + // Use cached complete data if available and fresh if let Some(cached_complete_services) = self.get_cached_complete_services() { for service_data in cached_complete_services {