Remove unused code and eliminate compiler warnings

- Remove unused fields from CommandStatus variants
- Clean up unused methods and unused collector fields
- Fix lifetime syntax warning in SystemWidget
- Delete unused cache module completely
- Remove redundant render methods from widgets

All agent and dashboard warnings eliminated while preserving
panel switching and scrolling functionality.
This commit is contained in:
2025-10-25 14:15:52 +02:00
parent 8dd943e8f1
commit 4b54a59e35
23 changed files with 38 additions and 1410 deletions

View File

@@ -1,26 +1,21 @@
use anyhow::Result;
use cm_dashboard_shared::{Metric, StatusTracker};
use std::collections::HashMap;
use std::time::Instant;
use tracing::{debug, error, info};
use tracing::{error, info};
use crate::cache::MetricCacheManager;
use crate::collectors::{
backup::BackupCollector, cpu::CpuCollector, disk::DiskCollector, memory::MemoryCollector,
nixos::NixOSCollector, systemd::SystemdCollector, Collector,
};
use crate::config::{AgentConfig, CollectorConfig};
/// Manages all metric collectors with intelligent caching
/// Manages all metric collectors
pub struct MetricCollectionManager {
collectors: Vec<Box<dyn Collector>>,
cache_manager: MetricCacheManager,
last_collection_times: HashMap<String, Instant>,
status_tracker: StatusTracker,
}
impl MetricCollectionManager {
pub async fn new(config: &CollectorConfig, agent_config: &AgentConfig) -> Result<Self> {
pub async fn new(config: &CollectorConfig, _agent_config: &AgentConfig) -> Result<Self> {
let mut collectors: Vec<Box<dyn Collector>> = Vec::new();
// Benchmark mode - only enable specific collector based on env var
@@ -109,153 +104,37 @@ impl MetricCollectionManager {
}
}
// Initialize cache manager with configuration
let cache_manager = MetricCacheManager::new(agent_config.cache.clone());
// Start background cache tasks
cache_manager.start_background_tasks().await;
info!(
"Metric collection manager initialized with {} collectors and caching enabled",
"Metric collection manager initialized with {} collectors",
collectors.len()
);
Ok(Self {
collectors,
cache_manager,
last_collection_times: HashMap::new(),
status_tracker: StatusTracker::new(),
})
}
/// Force collection from ALL collectors immediately (used at startup)
pub async fn collect_all_metrics_force(&mut self) -> Result<Vec<Metric>> {
let mut all_metrics = Vec::new();
let now = Instant::now();
info!(
"Force collecting from ALL {} collectors for startup",
self.collectors.len()
);
// Force collection from every collector regardless of intervals
for collector in &self.collectors {
let collector_name = collector.name();
match collector.collect(&mut self.status_tracker).await {
Ok(metrics) => {
info!(
"Force collected {} metrics from {} collector",
metrics.len(),
collector_name
);
// Cache all new metrics
for metric in &metrics {
self.cache_manager.cache_metric(metric.clone()).await;
}
all_metrics.extend(metrics);
self.last_collection_times
.insert(collector_name.to_string(), now);
}
Err(e) => {
error!(
"Collector '{}' failed during force collection: {}",
collector_name, e
);
// Continue with other collectors even if one fails
}
}
}
info!(
"Force collection completed: {} total metrics cached",
all_metrics.len()
);
Ok(all_metrics)
self.collect_all_metrics().await
}
/// Collect metrics from all collectors with intelligent caching
/// Collect metrics from all collectors
pub async fn collect_all_metrics(&mut self) -> Result<Vec<Metric>> {
let mut all_metrics = Vec::new();
let now = Instant::now();
// Collecting metrics from collectors (debug logging disabled for performance)
// Keep track of which collector types we're collecting fresh data from
let mut collecting_fresh = std::collections::HashSet::new();
// For each collector, check if we need to collect based on time intervals
for collector in &self.collectors {
let collector_name = collector.name();
// Determine cache interval for this collector type based on data volatility
let cache_interval_secs = match collector_name {
"cpu" | "memory" => 5, // Fast updates for volatile metrics
"systemd" => 30, // Service status changes less frequently
"disk" => 300, // SMART data changes very slowly (5 minutes)
"backup" => 600, // Backup status changes rarely (10 minutes)
_ => 30, // Default: moderate frequency
};
let should_collect =
if let Some(last_time) = self.last_collection_times.get(collector_name) {
now.duration_since(*last_time).as_secs() >= cache_interval_secs
} else {
true // First collection
};
if should_collect {
collecting_fresh.insert(collector_name.to_string());
match collector.collect(&mut self.status_tracker).await {
Ok(metrics) => {
// Collector returned fresh metrics (debug logging disabled for performance)
// Cache all new metrics
for metric in &metrics {
self.cache_manager.cache_metric(metric.clone()).await;
}
all_metrics.extend(metrics);
self.last_collection_times
.insert(collector_name.to_string(), now);
}
Err(e) => {
error!("Collector '{}' failed: {}", collector_name, e);
// Continue with other collectors even if one fails
}
match collector.collect(&mut self.status_tracker).await {
Ok(metrics) => {
all_metrics.extend(metrics);
}
Err(e) => {
error!("Collector failed: {}", e);
}
} else {
let _elapsed = self
.last_collection_times
.get(collector_name)
.map(|t| now.duration_since(*t).as_secs())
.unwrap_or(0);
// Collector skipped (debug logging disabled for performance)
}
}
// For 2-second intervals, skip cached metrics to avoid duplicates
// (Cache system disabled for realtime updates)
// Collected metrics total (debug logging disabled for performance)
Ok(all_metrics)
}
/// Get all cached metrics from the cache manager
pub async fn get_all_cached_metrics(&self) -> Result<Vec<Metric>> {
let cached_metrics = self.cache_manager.get_all_cached_metrics().await?;
debug!(
"Retrieved {} cached metrics for broadcast",
cached_metrics.len()
);
Ok(cached_metrics)
}
pub fn get_cache_manager(&self) -> &MetricCacheManager {
&self.cache_manager
}
}