Remove unused code and eliminate compiler warnings
- Remove unused fields from CommandStatus variants - Clean up unused methods and unused collector fields - Fix lifetime syntax warning in SystemWidget - Delete unused cache module completely - Remove redundant render methods from widgets All agent and dashboard warnings eliminated while preserving panel switching and scrolling functionality.
This commit is contained in:
@@ -86,9 +86,9 @@ impl Agent {
|
||||
}
|
||||
}
|
||||
_ = transmission_interval.tick() => {
|
||||
// Send all cached metrics via ZMQ every 1 second
|
||||
if let Err(e) = self.broadcast_all_cached_metrics().await {
|
||||
error!("Failed to broadcast cached metrics: {}", e);
|
||||
// Send all metrics via ZMQ every 1 second
|
||||
if let Err(e) = self.broadcast_all_metrics().await {
|
||||
error!("Failed to broadcast metrics: {}", e);
|
||||
}
|
||||
}
|
||||
_ = notification_interval.tick() => {
|
||||
@@ -152,34 +152,34 @@ impl Agent {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn broadcast_all_cached_metrics(&mut self) -> Result<()> {
|
||||
debug!("Broadcasting all cached metrics via ZMQ");
|
||||
async fn broadcast_all_metrics(&mut self) -> Result<()> {
|
||||
debug!("Broadcasting all metrics via ZMQ");
|
||||
|
||||
// Get all cached metrics from the metric manager
|
||||
let mut cached_metrics = self.metric_manager.get_all_cached_metrics().await?;
|
||||
// Get all current metrics from collectors
|
||||
let mut metrics = self.metric_manager.collect_all_metrics().await?;
|
||||
|
||||
// Add the host status summary metric from status manager
|
||||
let host_status_metric = self.host_status_manager.get_host_status_metric();
|
||||
cached_metrics.push(host_status_metric);
|
||||
metrics.push(host_status_metric);
|
||||
|
||||
if cached_metrics.is_empty() {
|
||||
debug!("No cached metrics to broadcast");
|
||||
if metrics.is_empty() {
|
||||
debug!("No metrics to broadcast");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!("Broadcasting {} cached metrics (including host status summary)", cached_metrics.len());
|
||||
debug!("Broadcasting {} metrics (including host status summary)", metrics.len());
|
||||
|
||||
// Create and send message with all cached data
|
||||
let message = MetricMessage::new(self.hostname.clone(), cached_metrics);
|
||||
// Create and send message with all current data
|
||||
let message = MetricMessage::new(self.hostname.clone(), metrics);
|
||||
self.zmq_handler.publish_metrics(&message).await?;
|
||||
|
||||
debug!("Cached metrics broadcasted successfully");
|
||||
debug!("Metrics broadcasted successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_metrics(&mut self, metrics: &[Metric]) {
|
||||
for metric in metrics {
|
||||
self.host_status_manager.process_metric(metric, &mut self.notification_manager, self.metric_manager.get_cache_manager()).await;
|
||||
self.host_status_manager.process_metric(metric, &mut self.notification_manager).await;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
10
agent/src/cache/cached_metric.rs
vendored
10
agent/src/cache/cached_metric.rs
vendored
@@ -1,10 +0,0 @@
|
||||
use cm_dashboard_shared::Metric;
|
||||
use std::time::Instant;
|
||||
|
||||
/// A cached metric with metadata
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CachedMetric {
|
||||
pub metric: Metric,
|
||||
pub collected_at: Instant,
|
||||
pub access_count: u64,
|
||||
}
|
||||
33
agent/src/cache/manager.rs
vendored
33
agent/src/cache/manager.rs
vendored
@@ -1,33 +0,0 @@
|
||||
use super::ConfigurableCache;
|
||||
use cm_dashboard_shared::{CacheConfig, Metric};
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
/// Manages metric caching with background tasks
|
||||
pub struct MetricCacheManager {
|
||||
cache: Arc<ConfigurableCache>,
|
||||
}
|
||||
|
||||
impl MetricCacheManager {
|
||||
pub fn new(config: CacheConfig) -> Self {
|
||||
let cache = Arc::new(ConfigurableCache::new(config.clone()));
|
||||
|
||||
Self { cache }
|
||||
}
|
||||
|
||||
/// Start background cache management tasks
|
||||
pub async fn start_background_tasks(&self) {
|
||||
// Temporarily disabled to isolate CPU usage issue
|
||||
info!("Cache manager background tasks disabled for debugging");
|
||||
}
|
||||
|
||||
/// Store metric in cache
|
||||
pub async fn cache_metric(&self, metric: Metric) {
|
||||
self.cache.store_metric(metric).await;
|
||||
}
|
||||
|
||||
/// Get all cached metrics (including expired ones) for broadcasting
|
||||
pub async fn get_all_cached_metrics(&self) -> Vec<Metric> {
|
||||
self.cache.get_all_cached_metrics().await
|
||||
}
|
||||
}
|
||||
94
agent/src/cache/mod.rs
vendored
94
agent/src/cache/mod.rs
vendored
@@ -1,94 +0,0 @@
|
||||
use cm_dashboard_shared::{CacheConfig, Metric};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{info, warn, debug};
|
||||
|
||||
/// Simple persistent cache for metrics
|
||||
pub struct SimpleCache {
|
||||
metrics: RwLock<HashMap<String, Metric>>,
|
||||
persist_path: String,
|
||||
}
|
||||
|
||||
impl SimpleCache {
|
||||
pub fn new(config: CacheConfig) -> Self {
|
||||
let cache = Self {
|
||||
metrics: RwLock::new(HashMap::new()),
|
||||
persist_path: config.persist_path,
|
||||
};
|
||||
|
||||
// Clear cache file on startup to ensure fresh data
|
||||
cache.clear_cache_file();
|
||||
cache
|
||||
}
|
||||
|
||||
/// Store metric in cache
|
||||
pub async fn store_metric(&self, metric: Metric) {
|
||||
let mut metrics = self.metrics.write().await;
|
||||
metrics.insert(metric.name.clone(), metric);
|
||||
}
|
||||
|
||||
/// Get all cached metrics
|
||||
pub async fn get_all_cached_metrics(&self) -> Vec<Metric> {
|
||||
let metrics = self.metrics.read().await;
|
||||
metrics.values().cloned().collect()
|
||||
}
|
||||
|
||||
/// Save cache to disk
|
||||
pub async fn save_to_disk(&self) {
|
||||
// Cache persistence disabled to prevent stale data issues during debugging
|
||||
debug!("Cache persistence disabled - not saving to disk");
|
||||
}
|
||||
|
||||
/// Load cache from disk (DISABLED)
|
||||
fn load_from_disk(&self) {
|
||||
// Cache loading disabled to prevent stale data issues during debugging
|
||||
info!("Cache loading disabled - starting with fresh cache");
|
||||
}
|
||||
|
||||
/// Clear cache file on startup to ensure fresh data
|
||||
fn clear_cache_file(&self) {
|
||||
if Path::new(&self.persist_path).exists() {
|
||||
match fs::remove_file(&self.persist_path) {
|
||||
Ok(_) => info!("Cleared cache file {} on startup", self.persist_path),
|
||||
Err(e) => warn!("Failed to clear cache file {}: {}", self.persist_path, e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MetricCacheManager {
|
||||
cache: Arc<SimpleCache>,
|
||||
}
|
||||
|
||||
impl MetricCacheManager {
|
||||
pub fn new(config: CacheConfig) -> Self {
|
||||
Self {
|
||||
cache: Arc::new(SimpleCache::new(config)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn store_metric(&self, metric: Metric) {
|
||||
self.cache.store_metric(metric).await;
|
||||
}
|
||||
|
||||
pub async fn cache_metric(&self, metric: Metric) {
|
||||
self.store_metric(metric).await;
|
||||
}
|
||||
|
||||
pub async fn start_background_tasks(&self) {
|
||||
// No background tasks needed for simple cache
|
||||
}
|
||||
|
||||
pub async fn get_all_cached_metrics(&self) -> Result<Vec<Metric>, anyhow::Error> {
|
||||
Ok(self.cache.get_all_cached_metrics().await)
|
||||
}
|
||||
|
||||
pub async fn save_to_disk(&self) {
|
||||
self.cache.save_to_disk().await;
|
||||
}
|
||||
}
|
||||
@@ -107,9 +107,6 @@ impl BackupCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for BackupCollector {
|
||||
fn name(&self) -> &str {
|
||||
"backup"
|
||||
}
|
||||
|
||||
async fn collect(&self, _status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
let backup_status_option = self.read_backup_status().await?;
|
||||
|
||||
@@ -15,7 +15,6 @@ use crate::config::CpuConfig;
|
||||
/// - No process spawning
|
||||
/// - <0.1ms collection time target
|
||||
pub struct CpuCollector {
|
||||
name: String,
|
||||
load_thresholds: HysteresisThresholds,
|
||||
temperature_thresholds: HysteresisThresholds,
|
||||
}
|
||||
@@ -34,7 +33,6 @@ impl CpuCollector {
|
||||
);
|
||||
|
||||
Self {
|
||||
name: "cpu".to_string(),
|
||||
load_thresholds,
|
||||
temperature_thresholds,
|
||||
}
|
||||
@@ -197,9 +195,6 @@ impl CpuCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for CpuCollector {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
async fn collect(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
debug!("Collecting CPU metrics");
|
||||
|
||||
@@ -325,33 +325,6 @@ impl DiskCollector {
|
||||
Some(device_name.to_string())
|
||||
}
|
||||
|
||||
/// Get directory size using du command (efficient for single directory)
|
||||
fn get_directory_size(&self, path: &str) -> Result<u64> {
|
||||
let output = Command::new("du")
|
||||
.arg("-s")
|
||||
.arg("--block-size=1")
|
||||
.arg(path)
|
||||
.output()?;
|
||||
|
||||
// du returns success even with permission denied warnings in stderr
|
||||
// We only care if the command completely failed or produced no stdout
|
||||
let output_str = String::from_utf8(output.stdout)?;
|
||||
|
||||
if output_str.trim().is_empty() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"du command produced no output for {}",
|
||||
path
|
||||
));
|
||||
}
|
||||
|
||||
let size_str = output_str
|
||||
.split_whitespace()
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to parse du output"))?;
|
||||
|
||||
let size_bytes = size_str.parse::<u64>()?;
|
||||
Ok(size_bytes)
|
||||
}
|
||||
|
||||
/// Get filesystem info using df command
|
||||
fn get_filesystem_info(&self, path: &str) -> Result<(u64, u64)> {
|
||||
@@ -382,23 +355,6 @@ impl DiskCollector {
|
||||
Ok((total_bytes, used_bytes))
|
||||
}
|
||||
|
||||
/// Calculate status based on usage percentage
|
||||
fn calculate_usage_status(&self, used_bytes: u64, total_bytes: u64) -> Status {
|
||||
if total_bytes == 0 {
|
||||
return Status::Unknown;
|
||||
}
|
||||
|
||||
let usage_percent = (used_bytes as f64 / total_bytes as f64) * 100.0;
|
||||
|
||||
// Thresholds for disk usage
|
||||
if usage_percent >= 95.0 {
|
||||
Status::Critical
|
||||
} else if usage_percent >= 85.0 {
|
||||
Status::Warning
|
||||
} else {
|
||||
Status::Ok
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse size string (e.g., "120G", "45M") to GB value
|
||||
fn parse_size_to_gb(&self, size_str: &str) -> f32 {
|
||||
@@ -435,9 +391,6 @@ impl DiskCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for DiskCollector {
|
||||
fn name(&self) -> &str {
|
||||
"disk"
|
||||
}
|
||||
|
||||
async fn collect(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
@@ -15,7 +15,6 @@ use crate::config::MemoryConfig;
|
||||
/// - No regex or complex parsing
|
||||
/// - <0.1ms collection time target
|
||||
pub struct MemoryCollector {
|
||||
name: String,
|
||||
usage_thresholds: HysteresisThresholds,
|
||||
}
|
||||
|
||||
@@ -42,7 +41,6 @@ impl MemoryCollector {
|
||||
);
|
||||
|
||||
Self {
|
||||
name: "memory".to_string(),
|
||||
usage_thresholds,
|
||||
}
|
||||
}
|
||||
@@ -284,9 +282,6 @@ impl MemoryCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for MemoryCollector {
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
async fn collect(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
debug!("Collecting memory metrics");
|
||||
|
||||
@@ -16,9 +16,6 @@ pub use error::CollectorError;
|
||||
/// Base trait for all collectors with extreme efficiency requirements
|
||||
#[async_trait]
|
||||
pub trait Collector: Send + Sync {
|
||||
/// Name of this collector
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Collect all metrics this collector provides
|
||||
async fn collect(&self, status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError>;
|
||||
|
||||
|
||||
@@ -12,12 +12,11 @@ use crate::config::NixOSConfig;
|
||||
/// - NixOS version and build information
|
||||
/// - Currently active/logged in users
|
||||
pub struct NixOSCollector {
|
||||
config: NixOSConfig,
|
||||
}
|
||||
|
||||
impl NixOSCollector {
|
||||
pub fn new(config: NixOSConfig) -> Self {
|
||||
Self { config }
|
||||
pub fn new(_config: NixOSConfig) -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
/// Get NixOS build information
|
||||
@@ -116,9 +115,6 @@ impl NixOSCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for NixOSCollector {
|
||||
fn name(&self) -> &str {
|
||||
"nixos"
|
||||
}
|
||||
|
||||
async fn collect(&self, _status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
debug!("Collecting NixOS system information");
|
||||
|
||||
@@ -42,7 +42,6 @@ struct ServiceStatusInfo {
|
||||
load_state: String,
|
||||
active_state: String,
|
||||
sub_state: String,
|
||||
description: String,
|
||||
}
|
||||
|
||||
impl SystemdCollector {
|
||||
@@ -170,18 +169,12 @@ impl SystemdCollector {
|
||||
let load_state = fields.get(1).unwrap_or(&"unknown").to_string();
|
||||
let active_state = fields.get(2).unwrap_or(&"unknown").to_string();
|
||||
let sub_state = fields.get(3).unwrap_or(&"unknown").to_string();
|
||||
let description = if fields.len() > 4 {
|
||||
fields[4..].join(" ")
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
// Cache the status information
|
||||
status_cache.insert(service_name.to_string(), ServiceStatusInfo {
|
||||
load_state: load_state.clone(),
|
||||
active_state: active_state.clone(),
|
||||
sub_state: sub_state.clone(),
|
||||
description,
|
||||
});
|
||||
|
||||
all_service_names.insert(service_name.to_string());
|
||||
@@ -432,9 +425,6 @@ impl SystemdCollector {
|
||||
|
||||
#[async_trait]
|
||||
impl Collector for SystemdCollector {
|
||||
fn name(&self) -> &str {
|
||||
"systemd"
|
||||
}
|
||||
|
||||
async fn collect(&self, _status_tracker: &mut StatusTracker) -> Result<Vec<Metric>, CollectorError> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
@@ -4,7 +4,6 @@ use tracing::{error, info};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
mod agent;
|
||||
mod cache;
|
||||
mod collectors;
|
||||
mod communication;
|
||||
mod config;
|
||||
|
||||
@@ -1,26 +1,21 @@
|
||||
use anyhow::Result;
|
||||
use cm_dashboard_shared::{Metric, StatusTracker};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
use tracing::{debug, error, info};
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::cache::MetricCacheManager;
|
||||
use crate::collectors::{
|
||||
backup::BackupCollector, cpu::CpuCollector, disk::DiskCollector, memory::MemoryCollector,
|
||||
nixos::NixOSCollector, systemd::SystemdCollector, Collector,
|
||||
};
|
||||
use crate::config::{AgentConfig, CollectorConfig};
|
||||
|
||||
/// Manages all metric collectors with intelligent caching
|
||||
/// Manages all metric collectors
|
||||
pub struct MetricCollectionManager {
|
||||
collectors: Vec<Box<dyn Collector>>,
|
||||
cache_manager: MetricCacheManager,
|
||||
last_collection_times: HashMap<String, Instant>,
|
||||
status_tracker: StatusTracker,
|
||||
}
|
||||
|
||||
impl MetricCollectionManager {
|
||||
pub async fn new(config: &CollectorConfig, agent_config: &AgentConfig) -> Result<Self> {
|
||||
pub async fn new(config: &CollectorConfig, _agent_config: &AgentConfig) -> Result<Self> {
|
||||
let mut collectors: Vec<Box<dyn Collector>> = Vec::new();
|
||||
|
||||
// Benchmark mode - only enable specific collector based on env var
|
||||
@@ -109,153 +104,37 @@ impl MetricCollectionManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize cache manager with configuration
|
||||
let cache_manager = MetricCacheManager::new(agent_config.cache.clone());
|
||||
|
||||
// Start background cache tasks
|
||||
cache_manager.start_background_tasks().await;
|
||||
|
||||
info!(
|
||||
"Metric collection manager initialized with {} collectors and caching enabled",
|
||||
"Metric collection manager initialized with {} collectors",
|
||||
collectors.len()
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
collectors,
|
||||
cache_manager,
|
||||
last_collection_times: HashMap::new(),
|
||||
status_tracker: StatusTracker::new(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Force collection from ALL collectors immediately (used at startup)
|
||||
pub async fn collect_all_metrics_force(&mut self) -> Result<Vec<Metric>> {
|
||||
let mut all_metrics = Vec::new();
|
||||
let now = Instant::now();
|
||||
|
||||
info!(
|
||||
"Force collecting from ALL {} collectors for startup",
|
||||
self.collectors.len()
|
||||
);
|
||||
|
||||
// Force collection from every collector regardless of intervals
|
||||
for collector in &self.collectors {
|
||||
let collector_name = collector.name();
|
||||
|
||||
match collector.collect(&mut self.status_tracker).await {
|
||||
Ok(metrics) => {
|
||||
info!(
|
||||
"Force collected {} metrics from {} collector",
|
||||
metrics.len(),
|
||||
collector_name
|
||||
);
|
||||
|
||||
// Cache all new metrics
|
||||
for metric in &metrics {
|
||||
self.cache_manager.cache_metric(metric.clone()).await;
|
||||
}
|
||||
|
||||
all_metrics.extend(metrics);
|
||||
self.last_collection_times
|
||||
.insert(collector_name.to_string(), now);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Collector '{}' failed during force collection: {}",
|
||||
collector_name, e
|
||||
);
|
||||
// Continue with other collectors even if one fails
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Force collection completed: {} total metrics cached",
|
||||
all_metrics.len()
|
||||
);
|
||||
Ok(all_metrics)
|
||||
self.collect_all_metrics().await
|
||||
}
|
||||
|
||||
/// Collect metrics from all collectors with intelligent caching
|
||||
/// Collect metrics from all collectors
|
||||
pub async fn collect_all_metrics(&mut self) -> Result<Vec<Metric>> {
|
||||
let mut all_metrics = Vec::new();
|
||||
let now = Instant::now();
|
||||
|
||||
// Collecting metrics from collectors (debug logging disabled for performance)
|
||||
|
||||
// Keep track of which collector types we're collecting fresh data from
|
||||
let mut collecting_fresh = std::collections::HashSet::new();
|
||||
|
||||
// For each collector, check if we need to collect based on time intervals
|
||||
for collector in &self.collectors {
|
||||
let collector_name = collector.name();
|
||||
|
||||
// Determine cache interval for this collector type based on data volatility
|
||||
let cache_interval_secs = match collector_name {
|
||||
"cpu" | "memory" => 5, // Fast updates for volatile metrics
|
||||
"systemd" => 30, // Service status changes less frequently
|
||||
"disk" => 300, // SMART data changes very slowly (5 minutes)
|
||||
"backup" => 600, // Backup status changes rarely (10 minutes)
|
||||
_ => 30, // Default: moderate frequency
|
||||
};
|
||||
|
||||
let should_collect =
|
||||
if let Some(last_time) = self.last_collection_times.get(collector_name) {
|
||||
now.duration_since(*last_time).as_secs() >= cache_interval_secs
|
||||
} else {
|
||||
true // First collection
|
||||
};
|
||||
|
||||
if should_collect {
|
||||
collecting_fresh.insert(collector_name.to_string());
|
||||
match collector.collect(&mut self.status_tracker).await {
|
||||
Ok(metrics) => {
|
||||
// Collector returned fresh metrics (debug logging disabled for performance)
|
||||
|
||||
// Cache all new metrics
|
||||
for metric in &metrics {
|
||||
self.cache_manager.cache_metric(metric.clone()).await;
|
||||
}
|
||||
|
||||
all_metrics.extend(metrics);
|
||||
self.last_collection_times
|
||||
.insert(collector_name.to_string(), now);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Collector '{}' failed: {}", collector_name, e);
|
||||
// Continue with other collectors even if one fails
|
||||
}
|
||||
match collector.collect(&mut self.status_tracker).await {
|
||||
Ok(metrics) => {
|
||||
all_metrics.extend(metrics);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Collector failed: {}", e);
|
||||
}
|
||||
} else {
|
||||
let _elapsed = self
|
||||
.last_collection_times
|
||||
.get(collector_name)
|
||||
.map(|t| now.duration_since(*t).as_secs())
|
||||
.unwrap_or(0);
|
||||
// Collector skipped (debug logging disabled for performance)
|
||||
}
|
||||
}
|
||||
|
||||
// For 2-second intervals, skip cached metrics to avoid duplicates
|
||||
// (Cache system disabled for realtime updates)
|
||||
|
||||
// Collected metrics total (debug logging disabled for performance)
|
||||
Ok(all_metrics)
|
||||
}
|
||||
|
||||
|
||||
/// Get all cached metrics from the cache manager
|
||||
pub async fn get_all_cached_metrics(&self) -> Result<Vec<Metric>> {
|
||||
let cached_metrics = self.cache_manager.get_all_cached_metrics().await?;
|
||||
debug!(
|
||||
"Retrieved {} cached metrics for broadcast",
|
||||
cached_metrics.len()
|
||||
);
|
||||
Ok(cached_metrics)
|
||||
}
|
||||
|
||||
pub fn get_cache_manager(&self) -> &MetricCacheManager {
|
||||
&self.cache_manager
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ impl HostStatusManager {
|
||||
|
||||
/// Update the status of a specific service and recalculate host status
|
||||
/// Updates real-time status and buffers changes for email notifications
|
||||
pub fn update_service_status(&mut self, service: String, status: Status, cache_manager: Option<&crate::cache::MetricCacheManager>) {
|
||||
pub fn update_service_status(&mut self, service: String, status: Status) {
|
||||
if !self.config.enabled {
|
||||
return;
|
||||
}
|
||||
@@ -82,14 +82,6 @@ impl HostStatusManager {
|
||||
return;
|
||||
}
|
||||
|
||||
// Save cache when status changes (clone cache manager reference for async)
|
||||
if let Some(cache) = cache_manager {
|
||||
let cache = cache.clone();
|
||||
tokio::spawn(async move {
|
||||
cache.save_to_disk().await;
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize batch if this is the first change
|
||||
if self.batch_start_time.is_none() {
|
||||
self.batch_start_time = Some(Instant::now());
|
||||
@@ -169,9 +161,9 @@ impl HostStatusManager {
|
||||
|
||||
|
||||
/// Process a metric - updates status (notifications handled separately via batching)
|
||||
pub async fn process_metric(&mut self, metric: &Metric, _notification_manager: &mut crate::notifications::NotificationManager, cache_manager: &crate::cache::MetricCacheManager) {
|
||||
pub async fn process_metric(&mut self, metric: &Metric, _notification_manager: &mut crate::notifications::NotificationManager) {
|
||||
// Just update status - notifications are handled by process_pending_notifications
|
||||
self.update_service_status(metric.name.clone(), metric.status, Some(cache_manager));
|
||||
self.update_service_status(metric.name.clone(), metric.status);
|
||||
}
|
||||
|
||||
/// Process pending notifications - call this at notification intervals
|
||||
|
||||
Reference in New Issue
Block a user