Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c9d12793ef | |||
| 8f80015273 | |||
| 7a95a9d762 | |||
| 7b11db990c |
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -279,7 +279,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.149"
|
version = "0.1.154"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -301,7 +301,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.149"
|
version = "0.1.154"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -324,7 +324,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.149"
|
version = "0.1.154"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.150"
|
version = "0.1.154"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ struct MergerfsPool {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct PoolDrive {
|
struct PoolDrive {
|
||||||
name: String, // Drive name
|
name: String, // Drive name
|
||||||
|
mount_point: String, // e.g., "/mnt/disk1"
|
||||||
temperature_celsius: Option<f32>, // Drive temperature
|
temperature_celsius: Option<f32>, // Drive temperature
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,11 +76,17 @@ impl DiskCollector {
|
|||||||
let mount_devices = self.get_mount_devices().await?;
|
let mount_devices = self.get_mount_devices().await?;
|
||||||
|
|
||||||
// Step 2: Get filesystem usage for each mount point using df
|
// Step 2: Get filesystem usage for each mount point using df
|
||||||
let filesystem_usage = self.get_filesystem_usage(&mount_devices).map_err(|e| CollectorError::Parse {
|
let mut filesystem_usage = self.get_filesystem_usage(&mount_devices).map_err(|e| CollectorError::Parse {
|
||||||
value: "filesystem usage".to_string(),
|
value: "filesystem usage".to_string(),
|
||||||
error: format!("Failed to get filesystem usage: {}", e),
|
error: format!("Failed to get filesystem usage: {}", e),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
// Step 2.5: Add MergerFS mount points that weren't in lsblk output
|
||||||
|
self.add_mergerfs_filesystem_usage(&mut filesystem_usage).map_err(|e| CollectorError::Parse {
|
||||||
|
value: "mergerfs filesystem usage".to_string(),
|
||||||
|
error: format!("Failed to get mergerfs filesystem usage: {}", e),
|
||||||
|
})?;
|
||||||
|
|
||||||
// Step 3: Detect MergerFS pools
|
// Step 3: Detect MergerFS pools
|
||||||
let mergerfs_pools = self.detect_mergerfs_pools(&filesystem_usage).map_err(|e| CollectorError::Parse {
|
let mergerfs_pools = self.detect_mergerfs_pools(&filesystem_usage).map_err(|e| CollectorError::Parse {
|
||||||
value: "mergerfs pools".to_string(),
|
value: "mergerfs pools".to_string(),
|
||||||
@@ -155,6 +162,30 @@ impl DiskCollector {
|
|||||||
Ok(filesystem_usage)
|
Ok(filesystem_usage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add filesystem usage for MergerFS mount points that aren't in lsblk
|
||||||
|
fn add_mergerfs_filesystem_usage(&self, filesystem_usage: &mut HashMap<String, (u64, u64)>) -> anyhow::Result<()> {
|
||||||
|
let mounts_content = std::fs::read_to_string("/proc/mounts")
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to read /proc/mounts: {}", e))?;
|
||||||
|
|
||||||
|
for line in mounts_content.lines() {
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() >= 3 && parts[2] == "fuse.mergerfs" {
|
||||||
|
let mount_point = parts[1].to_string();
|
||||||
|
|
||||||
|
// Only add if we don't already have usage data for this mount point
|
||||||
|
if !filesystem_usage.contains_key(&mount_point) {
|
||||||
|
if let Ok((total, used)) = self.get_filesystem_info(&mount_point) {
|
||||||
|
debug!("Added MergerFS filesystem usage for {}: {}GB total, {}GB used",
|
||||||
|
mount_point, total as f32 / (1024.0 * 1024.0 * 1024.0), used as f32 / (1024.0 * 1024.0 * 1024.0));
|
||||||
|
filesystem_usage.insert(mount_point, (total, used));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Get filesystem info for a single mount point
|
/// Get filesystem info for a single mount point
|
||||||
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
|
fn get_filesystem_info(&self, mount_point: &str) -> Result<(u64, u64), CollectorError> {
|
||||||
let output = Command::new("df")
|
let output = Command::new("df")
|
||||||
@@ -198,16 +229,80 @@ impl DiskCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Detect MergerFS pools from mount data
|
/// Detect MergerFS pools from mount data
|
||||||
fn detect_mergerfs_pools(&self, _filesystem_usage: &HashMap<String, (u64, u64)>) -> anyhow::Result<Vec<MergerfsPool>> {
|
fn detect_mergerfs_pools(&self, filesystem_usage: &HashMap<String, (u64, u64)>) -> anyhow::Result<Vec<MergerfsPool>> {
|
||||||
let pools = Vec::new();
|
let mounts_content = std::fs::read_to_string("/proc/mounts")
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to read /proc/mounts: {}", e))?;
|
||||||
|
let mut pools = Vec::new();
|
||||||
|
|
||||||
// For now, return empty pools - full mergerfs detection would require parsing /proc/mounts for fuse.mergerfs
|
for line in mounts_content.lines() {
|
||||||
// This ensures we don't break existing functionality
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() >= 3 && parts[2] == "fuse.mergerfs" {
|
||||||
|
let mount_point = parts[1].to_string();
|
||||||
|
let device_sources = parts[0]; // e.g., "/mnt/disk1:/mnt/disk2"
|
||||||
|
|
||||||
|
// Get pool usage
|
||||||
|
let (total_bytes, used_bytes) = filesystem_usage.get(&mount_point)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or((0, 0));
|
||||||
|
|
||||||
|
// Extract pool name from mount point (e.g., "/srv/media" -> "srv_media")
|
||||||
|
let pool_name = if mount_point == "/" {
|
||||||
|
"root".to_string()
|
||||||
|
} else {
|
||||||
|
mount_point.trim_start_matches('/').replace('/', "_")
|
||||||
|
};
|
||||||
|
|
||||||
|
if pool_name.is_empty() {
|
||||||
|
debug!("Skipping mergerfs pool with empty name: {}", mount_point);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse member paths - handle both full paths and numeric references
|
||||||
|
let raw_paths: Vec<String> = device_sources
|
||||||
|
.split(':')
|
||||||
|
.map(|s| s.trim().to_string())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Convert numeric references to actual mount points if needed
|
||||||
|
let member_paths = if raw_paths.iter().any(|path| !path.starts_with('/')) {
|
||||||
|
// Handle numeric format like "1:2" by finding corresponding /mnt/disk* paths
|
||||||
|
self.resolve_numeric_mergerfs_paths(&raw_paths)?
|
||||||
|
} else {
|
||||||
|
// Already full paths
|
||||||
|
raw_paths
|
||||||
|
};
|
||||||
|
|
||||||
|
// For SnapRAID setups, include parity drives that are related to this pool's data drives
|
||||||
|
let mut all_member_paths = member_paths.clone();
|
||||||
|
let related_parity_paths = self.discover_related_parity_drives(&member_paths)?;
|
||||||
|
all_member_paths.extend(related_parity_paths);
|
||||||
|
|
||||||
|
// Categorize as data vs parity drives
|
||||||
|
let (data_drives, parity_drives) = match self.categorize_pool_drives(&all_member_paths) {
|
||||||
|
Ok(drives) => drives,
|
||||||
|
Err(e) => {
|
||||||
|
debug!("Failed to categorize drives for pool {}: {}. Skipping.", mount_point, e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pools.push(MergerfsPool {
|
||||||
|
name: pool_name,
|
||||||
|
mount_point,
|
||||||
|
total_bytes,
|
||||||
|
used_bytes,
|
||||||
|
data_drives,
|
||||||
|
parity_drives,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Found {} mergerfs pools", pools.len());
|
||||||
Ok(pools)
|
Ok(pools)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Group filesystems by physical drive (excluding mergerfs members)
|
/// Group filesystems by physical drive (excluding mergerfs members) - exact old logic
|
||||||
fn group_by_physical_drive(
|
fn group_by_physical_drive(
|
||||||
&self,
|
&self,
|
||||||
mount_devices: &HashMap<String, String>,
|
mount_devices: &HashMap<String, String>,
|
||||||
@@ -216,14 +311,14 @@ impl DiskCollector {
|
|||||||
) -> anyhow::Result<Vec<PhysicalDrive>> {
|
) -> anyhow::Result<Vec<PhysicalDrive>> {
|
||||||
let mut drive_groups: HashMap<String, Vec<Filesystem>> = HashMap::new();
|
let mut drive_groups: HashMap<String, Vec<Filesystem>> = HashMap::new();
|
||||||
|
|
||||||
// Get all mergerfs member paths to exclude them
|
// Get all mergerfs member paths to exclude them - exactly like old code
|
||||||
let mut mergerfs_members = std::collections::HashSet::new();
|
let mut mergerfs_members = std::collections::HashSet::new();
|
||||||
for pool in mergerfs_pools {
|
for pool in mergerfs_pools {
|
||||||
for drive in &pool.data_drives {
|
for drive in &pool.data_drives {
|
||||||
mergerfs_members.insert(drive.name.clone());
|
mergerfs_members.insert(drive.mount_point.clone());
|
||||||
}
|
}
|
||||||
for drive in &pool.parity_drives {
|
for drive in &pool.parity_drives {
|
||||||
mergerfs_members.insert(drive.name.clone());
|
mergerfs_members.insert(drive.mount_point.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -342,12 +437,14 @@ impl DiskCollector {
|
|||||||
// Return unknown data rather than failing completely
|
// Return unknown data rather than failing completely
|
||||||
return Ok(SmartData {
|
return Ok(SmartData {
|
||||||
health: "UNKNOWN".to_string(),
|
health: "UNKNOWN".to_string(),
|
||||||
|
serial_number: None,
|
||||||
temperature_celsius: None,
|
temperature_celsius: None,
|
||||||
wear_percent: None,
|
wear_percent: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut health = "UNKNOWN".to_string();
|
let mut health = "UNKNOWN".to_string();
|
||||||
|
let mut serial_number = None;
|
||||||
let mut temperature = None;
|
let mut temperature = None;
|
||||||
let mut wear_percent = None;
|
let mut wear_percent = None;
|
||||||
|
|
||||||
@@ -360,6 +457,15 @@ impl DiskCollector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Serial number parsing
|
||||||
|
if line.starts_with("Serial Number:") {
|
||||||
|
if let Some(serial_part) = line.split("Serial Number:").nth(1) {
|
||||||
|
if let Some(serial_str) = serial_part.split_whitespace().next() {
|
||||||
|
serial_number = Some(serial_str.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Temperature parsing for different drive types
|
// Temperature parsing for different drive types
|
||||||
if line.contains("Temperature_Celsius") || line.contains("Airflow_Temperature_Cel") {
|
if line.contains("Temperature_Celsius") || line.contains("Airflow_Temperature_Cel") {
|
||||||
// Traditional SATA drives: attribute table format
|
// Traditional SATA drives: attribute table format
|
||||||
@@ -402,6 +508,7 @@ impl DiskCollector {
|
|||||||
|
|
||||||
Ok(SmartData {
|
Ok(SmartData {
|
||||||
health,
|
health,
|
||||||
|
serial_number,
|
||||||
temperature_celsius: temperature,
|
temperature_celsius: temperature,
|
||||||
wear_percent,
|
wear_percent,
|
||||||
})
|
})
|
||||||
@@ -444,28 +551,25 @@ impl DiskCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Populate pools data into AgentData
|
/// Populate pools data into AgentData
|
||||||
fn populate_pools_data(&self, mergerfs_pools: &[MergerfsPool], _smart_data: &HashMap<String, SmartData>, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
fn populate_pools_data(&self, mergerfs_pools: &[MergerfsPool], smart_data: &HashMap<String, SmartData>, agent_data: &mut AgentData) -> Result<(), CollectorError> {
|
||||||
for pool in mergerfs_pools {
|
for pool in mergerfs_pools {
|
||||||
|
// Calculate pool health and statuses based on member drive health
|
||||||
|
let (pool_health, health_status, usage_status, data_drive_data, parity_drive_data) = self.calculate_pool_health(pool, smart_data);
|
||||||
|
|
||||||
let pool_data = PoolData {
|
let pool_data = PoolData {
|
||||||
name: pool.name.clone(),
|
name: pool.name.clone(),
|
||||||
mount: pool.mount_point.clone(),
|
mount: pool.mount_point.clone(),
|
||||||
pool_type: "mergerfs".to_string(),
|
pool_type: format!("mergerfs ({}+{})", pool.data_drives.len(), pool.parity_drives.len()),
|
||||||
health: "healthy".to_string(), // TODO: Calculate based on member drives
|
health: pool_health,
|
||||||
usage_percent: (pool.used_bytes as f32 / pool.total_bytes as f32) * 100.0,
|
usage_percent: if pool.total_bytes > 0 {
|
||||||
|
(pool.used_bytes as f32 / pool.total_bytes as f32) * 100.0
|
||||||
|
} else { 0.0 },
|
||||||
used_gb: pool.used_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
used_gb: pool.used_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
||||||
total_gb: pool.total_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
total_gb: pool.total_bytes as f32 / (1024.0 * 1024.0 * 1024.0),
|
||||||
data_drives: pool.data_drives.iter().map(|d| cm_dashboard_shared::PoolDriveData {
|
data_drives: data_drive_data,
|
||||||
name: d.name.clone(),
|
parity_drives: parity_drive_data,
|
||||||
temperature_celsius: d.temperature_celsius,
|
health_status,
|
||||||
health: "unknown".to_string(),
|
usage_status,
|
||||||
wear_percent: None,
|
|
||||||
}).collect(),
|
|
||||||
parity_drives: pool.parity_drives.iter().map(|d| cm_dashboard_shared::PoolDriveData {
|
|
||||||
name: d.name.clone(),
|
|
||||||
temperature_celsius: d.temperature_celsius,
|
|
||||||
health: "unknown".to_string(),
|
|
||||||
wear_percent: None,
|
|
||||||
}).collect(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
agent_data.system.storage.pools.push(pool_data);
|
agent_data.system.storage.pools.push(pool_data);
|
||||||
@@ -474,6 +578,84 @@ impl DiskCollector {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calculate pool health based on member drive status
|
||||||
|
fn calculate_pool_health(&self, pool: &MergerfsPool, smart_data: &HashMap<String, SmartData>) -> (String, cm_dashboard_shared::Status, cm_dashboard_shared::Status, Vec<cm_dashboard_shared::PoolDriveData>, Vec<cm_dashboard_shared::PoolDriveData>) {
|
||||||
|
let mut failed_data = 0;
|
||||||
|
let mut failed_parity = 0;
|
||||||
|
|
||||||
|
// Process data drives
|
||||||
|
let data_drive_data: Vec<cm_dashboard_shared::PoolDriveData> = pool.data_drives.iter().map(|d| {
|
||||||
|
let smart = smart_data.get(&d.name);
|
||||||
|
let health = smart.map(|s| s.health.clone()).unwrap_or_else(|| "UNKNOWN".to_string());
|
||||||
|
let temperature = smart.and_then(|s| s.temperature_celsius).or(d.temperature_celsius);
|
||||||
|
|
||||||
|
if health == "FAILED" {
|
||||||
|
failed_data += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate drive statuses using config thresholds
|
||||||
|
let health_status = self.calculate_health_status(&health);
|
||||||
|
let temperature_status = temperature.map(|t| self.temperature_thresholds.evaluate(t)).unwrap_or(cm_dashboard_shared::Status::Unknown);
|
||||||
|
|
||||||
|
cm_dashboard_shared::PoolDriveData {
|
||||||
|
name: d.name.clone(),
|
||||||
|
serial_number: smart.and_then(|s| s.serial_number.clone()),
|
||||||
|
temperature_celsius: temperature,
|
||||||
|
health,
|
||||||
|
wear_percent: smart.and_then(|s| s.wear_percent),
|
||||||
|
health_status,
|
||||||
|
temperature_status,
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
// Process parity drives
|
||||||
|
let parity_drive_data: Vec<cm_dashboard_shared::PoolDriveData> = pool.parity_drives.iter().map(|d| {
|
||||||
|
let smart = smart_data.get(&d.name);
|
||||||
|
let health = smart.map(|s| s.health.clone()).unwrap_or_else(|| "UNKNOWN".to_string());
|
||||||
|
let temperature = smart.and_then(|s| s.temperature_celsius).or(d.temperature_celsius);
|
||||||
|
|
||||||
|
if health == "FAILED" {
|
||||||
|
failed_parity += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate drive statuses using config thresholds
|
||||||
|
let health_status = self.calculate_health_status(&health);
|
||||||
|
let temperature_status = temperature.map(|t| self.temperature_thresholds.evaluate(t)).unwrap_or(cm_dashboard_shared::Status::Unknown);
|
||||||
|
|
||||||
|
cm_dashboard_shared::PoolDriveData {
|
||||||
|
name: d.name.clone(),
|
||||||
|
serial_number: smart.and_then(|s| s.serial_number.clone()),
|
||||||
|
temperature_celsius: temperature,
|
||||||
|
health,
|
||||||
|
wear_percent: smart.and_then(|s| s.wear_percent),
|
||||||
|
health_status,
|
||||||
|
temperature_status,
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
// Calculate overall pool health string and status
|
||||||
|
let (pool_health, health_status) = match (failed_data, failed_parity) {
|
||||||
|
(0, 0) => ("healthy".to_string(), cm_dashboard_shared::Status::Ok),
|
||||||
|
(1, 0) | (0, 1) => ("degraded".to_string(), cm_dashboard_shared::Status::Warning),
|
||||||
|
_ => ("critical".to_string(), cm_dashboard_shared::Status::Critical),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate pool usage status using config thresholds
|
||||||
|
let usage_percent = if pool.total_bytes > 0 {
|
||||||
|
(pool.used_bytes as f32 / pool.total_bytes as f32) * 100.0
|
||||||
|
} else { 0.0 };
|
||||||
|
|
||||||
|
let usage_status = if usage_percent >= self.config.usage_critical_percent {
|
||||||
|
cm_dashboard_shared::Status::Critical
|
||||||
|
} else if usage_percent >= self.config.usage_warning_percent {
|
||||||
|
cm_dashboard_shared::Status::Warning
|
||||||
|
} else {
|
||||||
|
cm_dashboard_shared::Status::Ok
|
||||||
|
};
|
||||||
|
|
||||||
|
(pool_health, health_status, usage_status, data_drive_data, parity_drive_data)
|
||||||
|
}
|
||||||
|
|
||||||
/// Calculate filesystem usage status
|
/// Calculate filesystem usage status
|
||||||
fn calculate_filesystem_usage_status(&self, usage_percent: f32) -> Status {
|
fn calculate_filesystem_usage_status(&self, usage_percent: f32) -> Status {
|
||||||
// Use standard filesystem warning/critical thresholds
|
// Use standard filesystem warning/critical thresholds
|
||||||
@@ -499,6 +681,134 @@ impl DiskCollector {
|
|||||||
_ => Status::Unknown,
|
_ => Status::Unknown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Discover parity drives that are related to the given data drives
|
||||||
|
fn discover_related_parity_drives(&self, data_drives: &[String]) -> anyhow::Result<Vec<String>> {
|
||||||
|
let mount_devices = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_mount_devices())
|
||||||
|
}).map_err(|e| anyhow::anyhow!("Failed to get mount devices: {}", e))?;
|
||||||
|
|
||||||
|
let mut related_parity = Vec::new();
|
||||||
|
|
||||||
|
// Find parity drives that share the same parent directory as the data drives
|
||||||
|
for data_path in data_drives {
|
||||||
|
if let Some(parent_dir) = self.get_parent_directory(data_path) {
|
||||||
|
// Look for parity drives in the same parent directory
|
||||||
|
for (mount_point, _device) in &mount_devices {
|
||||||
|
if mount_point.contains("parity") && mount_point.starts_with(&parent_dir) {
|
||||||
|
if !related_parity.contains(mount_point) {
|
||||||
|
related_parity.push(mount_point.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(related_parity)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get parent directory of a mount path (e.g., "/mnt/disk1" -> "/mnt")
|
||||||
|
fn get_parent_directory(&self, path: &str) -> Option<String> {
|
||||||
|
if let Some(last_slash) = path.rfind('/') {
|
||||||
|
if last_slash > 0 {
|
||||||
|
return Some(path[..last_slash].to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Categorize pool member drives as data vs parity
|
||||||
|
fn categorize_pool_drives(&self, member_paths: &[String]) -> anyhow::Result<(Vec<PoolDrive>, Vec<PoolDrive>)> {
|
||||||
|
let mut data_drives = Vec::new();
|
||||||
|
let mut parity_drives = Vec::new();
|
||||||
|
|
||||||
|
for path in member_paths {
|
||||||
|
let drive_info = self.get_drive_info_for_path(path)?;
|
||||||
|
|
||||||
|
// Heuristic: if path contains "parity", it's parity
|
||||||
|
if path.to_lowercase().contains("parity") {
|
||||||
|
parity_drives.push(drive_info);
|
||||||
|
} else {
|
||||||
|
data_drives.push(drive_info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((data_drives, parity_drives))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get drive information for a mount path
|
||||||
|
fn get_drive_info_for_path(&self, path: &str) -> anyhow::Result<PoolDrive> {
|
||||||
|
// Use lsblk to find the backing device
|
||||||
|
let output = Command::new("lsblk")
|
||||||
|
.args(&["-rn", "-o", "NAME,MOUNTPOINT"])
|
||||||
|
.output()
|
||||||
|
.map_err(|e| anyhow::anyhow!("Failed to run lsblk: {}", e))?;
|
||||||
|
|
||||||
|
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||||
|
let mut device = String::new();
|
||||||
|
|
||||||
|
for line in output_str.lines() {
|
||||||
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||||
|
if parts.len() >= 2 && parts[1] == path {
|
||||||
|
device = parts[0].to_string();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if device.is_empty() {
|
||||||
|
return Err(anyhow::anyhow!("Could not find device for path {}", path));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract base device name (e.g., "sda1" -> "sda")
|
||||||
|
let base_device = self.extract_base_device(&format!("/dev/{}", device));
|
||||||
|
|
||||||
|
// Get temperature from SMART data if available
|
||||||
|
let temperature = if let Ok(smart_data) = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_smart_data(&base_device))
|
||||||
|
}) {
|
||||||
|
smart_data.temperature_celsius
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(PoolDrive {
|
||||||
|
name: base_device,
|
||||||
|
mount_point: path.to_string(),
|
||||||
|
temperature_celsius: temperature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve numeric mergerfs references like "1:2" to actual mount paths
|
||||||
|
fn resolve_numeric_mergerfs_paths(&self, numeric_refs: &[String]) -> anyhow::Result<Vec<String>> {
|
||||||
|
let mut resolved_paths = Vec::new();
|
||||||
|
|
||||||
|
// Get all mount points that look like /mnt/disk* or /mnt/parity*
|
||||||
|
let mount_devices = tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current().block_on(self.get_mount_devices())
|
||||||
|
}).map_err(|e| anyhow::anyhow!("Failed to get mount devices: {}", e))?;
|
||||||
|
|
||||||
|
let mut disk_mounts: Vec<String> = mount_devices.keys()
|
||||||
|
.filter(|path| path.starts_with("/mnt/disk") || path.starts_with("/mnt/parity"))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
disk_mounts.sort(); // Ensure consistent ordering
|
||||||
|
|
||||||
|
for num_ref in numeric_refs {
|
||||||
|
if let Ok(index) = num_ref.parse::<usize>() {
|
||||||
|
// Convert 1-based index to 0-based
|
||||||
|
if index > 0 && index <= disk_mounts.len() {
|
||||||
|
resolved_paths.push(disk_mounts[index - 1].clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: if we couldn't resolve, return the original paths
|
||||||
|
if resolved_paths.is_empty() {
|
||||||
|
resolved_paths = numeric_refs.to_vec();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(resolved_paths)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -512,6 +822,7 @@ impl Collector for DiskCollector {
|
|||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct SmartData {
|
struct SmartData {
|
||||||
health: String,
|
health: String,
|
||||||
|
serial_number: Option<String>,
|
||||||
temperature_celsius: Option<f32>,
|
temperature_celsius: Option<f32>,
|
||||||
wear_percent: Option<f32>,
|
wear_percent: Option<f32>,
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.150"
|
version = "0.1.154"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -57,7 +57,9 @@ struct StoragePool {
|
|||||||
name: String,
|
name: String,
|
||||||
mount_point: String,
|
mount_point: String,
|
||||||
pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc.
|
pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc.
|
||||||
drives: Vec<StorageDrive>,
|
drives: Vec<StorageDrive>, // For physical drives
|
||||||
|
data_drives: Vec<StorageDrive>, // For MergerFS pools
|
||||||
|
parity_drives: Vec<StorageDrive>, // For MergerFS pools
|
||||||
filesystems: Vec<FileSystem>, // For physical drive pools: individual filesystem children
|
filesystems: Vec<FileSystem>, // For physical drive pools: individual filesystem children
|
||||||
usage_percent: Option<f32>,
|
usage_percent: Option<f32>,
|
||||||
used_gb: Option<f32>,
|
used_gb: Option<f32>,
|
||||||
@@ -227,6 +229,8 @@ impl SystemWidget {
|
|||||||
mount_point: drive.name.clone(),
|
mount_point: drive.name.clone(),
|
||||||
pool_type: "drive".to_string(),
|
pool_type: "drive".to_string(),
|
||||||
drives: Vec::new(),
|
drives: Vec::new(),
|
||||||
|
data_drives: Vec::new(),
|
||||||
|
parity_drives: Vec::new(),
|
||||||
filesystems: Vec::new(),
|
filesystems: Vec::new(),
|
||||||
usage_percent: None,
|
usage_percent: None,
|
||||||
used_gb: None,
|
used_gb: None,
|
||||||
@@ -267,7 +271,81 @@ impl SystemWidget {
|
|||||||
pools.insert(drive.name.clone(), pool);
|
pools.insert(drive.name.clone(), pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert pools
|
// Convert pools (MergerFS, RAID, etc.)
|
||||||
|
for pool in &agent_data.system.storage.pools {
|
||||||
|
// Use agent-calculated status (combined health and usage status)
|
||||||
|
let pool_status = if pool.health_status == Status::Critical || pool.usage_status == Status::Critical {
|
||||||
|
Status::Critical
|
||||||
|
} else if pool.health_status == Status::Warning || pool.usage_status == Status::Warning {
|
||||||
|
Status::Warning
|
||||||
|
} else if pool.health_status == Status::Ok && pool.usage_status == Status::Ok {
|
||||||
|
Status::Ok
|
||||||
|
} else {
|
||||||
|
Status::Unknown
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut storage_pool = StoragePool {
|
||||||
|
name: pool.name.clone(),
|
||||||
|
mount_point: pool.mount.clone(),
|
||||||
|
pool_type: pool.pool_type.clone(),
|
||||||
|
drives: Vec::new(),
|
||||||
|
data_drives: Vec::new(),
|
||||||
|
parity_drives: Vec::new(),
|
||||||
|
filesystems: Vec::new(),
|
||||||
|
usage_percent: Some(pool.usage_percent),
|
||||||
|
used_gb: Some(pool.used_gb),
|
||||||
|
total_gb: Some(pool.total_gb),
|
||||||
|
status: pool_status,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add data drives - use agent-calculated status
|
||||||
|
for drive in &pool.data_drives {
|
||||||
|
// Use combined health and temperature status
|
||||||
|
let drive_status = if drive.health_status == Status::Critical || drive.temperature_status == Status::Critical {
|
||||||
|
Status::Critical
|
||||||
|
} else if drive.health_status == Status::Warning || drive.temperature_status == Status::Warning {
|
||||||
|
Status::Warning
|
||||||
|
} else if drive.health_status == Status::Ok && drive.temperature_status == Status::Ok {
|
||||||
|
Status::Ok
|
||||||
|
} else {
|
||||||
|
Status::Unknown
|
||||||
|
};
|
||||||
|
|
||||||
|
let display_name = drive.serial_number.clone().unwrap_or(drive.name.clone());
|
||||||
|
let storage_drive = StorageDrive {
|
||||||
|
name: display_name,
|
||||||
|
temperature: drive.temperature_celsius,
|
||||||
|
wear_percent: drive.wear_percent,
|
||||||
|
status: drive_status,
|
||||||
|
};
|
||||||
|
storage_pool.data_drives.push(storage_drive);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add parity drives - use agent-calculated status
|
||||||
|
for drive in &pool.parity_drives {
|
||||||
|
// Use combined health and temperature status
|
||||||
|
let drive_status = if drive.health_status == Status::Critical || drive.temperature_status == Status::Critical {
|
||||||
|
Status::Critical
|
||||||
|
} else if drive.health_status == Status::Warning || drive.temperature_status == Status::Warning {
|
||||||
|
Status::Warning
|
||||||
|
} else if drive.health_status == Status::Ok && drive.temperature_status == Status::Ok {
|
||||||
|
Status::Ok
|
||||||
|
} else {
|
||||||
|
Status::Unknown
|
||||||
|
};
|
||||||
|
|
||||||
|
let display_name = drive.serial_number.clone().unwrap_or(drive.name.clone());
|
||||||
|
let storage_drive = StorageDrive {
|
||||||
|
name: display_name,
|
||||||
|
temperature: drive.temperature_celsius,
|
||||||
|
wear_percent: drive.wear_percent,
|
||||||
|
status: drive_status,
|
||||||
|
};
|
||||||
|
storage_pool.parity_drives.push(storage_drive);
|
||||||
|
}
|
||||||
|
|
||||||
|
pools.insert(pool.name.clone(), storage_pool);
|
||||||
|
}
|
||||||
|
|
||||||
// Store pools
|
// Store pools
|
||||||
let mut pool_list: Vec<StoragePool> = pools.into_values().collect();
|
let mut pool_list: Vec<StoragePool> = pools.into_values().collect();
|
||||||
@@ -306,8 +384,8 @@ impl SystemWidget {
|
|||||||
pool.name.clone()
|
pool.name.clone()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For mergerfs pools, show pool name with format
|
// For mergerfs pools, show pool name with format like "mergerfs (2+1):"
|
||||||
format!("{} ({})", pool.mount_point, pool.pool_type)
|
format!("{}:", pool.pool_type)
|
||||||
};
|
};
|
||||||
|
|
||||||
let pool_spans = StatusIcons::create_status_spans(pool.status.clone(), &pool_label);
|
let pool_spans = StatusIcons::create_status_spans(pool.status.clone(), &pool_label);
|
||||||
@@ -336,30 +414,71 @@ impl SystemWidget {
|
|||||||
lines.push(Line::from(fs_spans));
|
lines.push(Line::from(fs_spans));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For mergerfs pools, show data drives and parity drives in tree structure
|
// For mergerfs pools, show structure matching CLAUDE.md format:
|
||||||
if !pool.drives.is_empty() {
|
// ● mergerfs (2+1):
|
||||||
// Group drives by type based on naming conventions or show all as data drives
|
// ├─ Total: ● 63% 2355.2GB/3686.4GB
|
||||||
let (data_drives, parity_drives): (Vec<_>, Vec<_>) = pool.drives.iter()
|
// ├─ Data Disks:
|
||||||
.partition(|d| !d.name.contains("parity") && !d.name.starts_with("sdc"));
|
// │ ├─ ● sdb T: 24°C W: 5%
|
||||||
|
// │ └─ ● sdd T: 27°C W: 5%
|
||||||
|
// ├─ Parity: ● sdc T: 24°C W: 5%
|
||||||
|
// └─ Mount: /srv/media
|
||||||
|
|
||||||
|
// Pool total usage
|
||||||
|
let total_text = format!("Total: {:.0}% {:.1}GB/{:.1}GB",
|
||||||
|
pool.usage_percent.unwrap_or(0.0),
|
||||||
|
pool.used_gb.unwrap_or(0.0),
|
||||||
|
pool.total_gb.unwrap_or(0.0)
|
||||||
|
);
|
||||||
|
let mut total_spans = vec![
|
||||||
|
Span::styled(" ├─ ", Typography::tree()),
|
||||||
|
];
|
||||||
|
total_spans.extend(StatusIcons::create_status_spans(Status::Ok, &total_text));
|
||||||
|
lines.push(Line::from(total_spans));
|
||||||
|
|
||||||
if !data_drives.is_empty() {
|
// Data Disks section
|
||||||
lines.push(Line::from(vec![
|
if !pool.data_drives.is_empty() {
|
||||||
Span::styled(" ├─ Data Disks:", Typography::secondary())
|
lines.push(Line::from(vec![
|
||||||
]));
|
Span::styled(" ├─ ", Typography::tree()),
|
||||||
for (i, drive) in data_drives.iter().enumerate() {
|
Span::styled("Data Disks:", Typography::secondary())
|
||||||
render_pool_drive(drive, i == data_drives.len() - 1 && parity_drives.is_empty(), &mut lines);
|
]));
|
||||||
}
|
for (i, drive) in pool.data_drives.iter().enumerate() {
|
||||||
}
|
let is_last = i == pool.data_drives.len() - 1;
|
||||||
|
let tree_symbol = if is_last { " │ └─ " } else { " │ ├─ " };
|
||||||
if !parity_drives.is_empty() {
|
render_mergerfs_drive(drive, tree_symbol, &mut lines);
|
||||||
lines.push(Line::from(vec![
|
|
||||||
Span::styled(" └─ Parity:", Typography::secondary())
|
|
||||||
]));
|
|
||||||
for (i, drive) in parity_drives.iter().enumerate() {
|
|
||||||
render_pool_drive(drive, i == parity_drives.len() - 1, &mut lines);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parity section
|
||||||
|
if !pool.parity_drives.is_empty() {
|
||||||
|
for drive in &pool.parity_drives {
|
||||||
|
let mut drive_details = Vec::new();
|
||||||
|
if let Some(temp) = drive.temperature {
|
||||||
|
drive_details.push(format!("T: {}°C", temp as i32));
|
||||||
|
}
|
||||||
|
if let Some(wear) = drive.wear_percent {
|
||||||
|
drive_details.push(format!("W: {}%", wear as i32));
|
||||||
|
}
|
||||||
|
|
||||||
|
let drive_text = if !drive_details.is_empty() {
|
||||||
|
format!("{} {}", drive.name, drive_details.join(" "))
|
||||||
|
} else {
|
||||||
|
drive.name.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut parity_spans = vec![
|
||||||
|
Span::styled(" ├─ ", Typography::tree()),
|
||||||
|
Span::styled("Parity: ", Typography::secondary()),
|
||||||
|
];
|
||||||
|
parity_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
|
||||||
|
lines.push(Line::from(parity_spans));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount point
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(" └─ Mount: ", Typography::tree()),
|
||||||
|
Span::styled(&pool.mount_point, Typography::secondary())
|
||||||
|
]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -367,6 +486,29 @@ impl SystemWidget {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper function to render a drive in a MergerFS pool
|
||||||
|
fn render_mergerfs_drive<'a>(drive: &StorageDrive, tree_symbol: &'a str, lines: &mut Vec<Line<'a>>) {
|
||||||
|
let mut drive_details = Vec::new();
|
||||||
|
if let Some(temp) = drive.temperature {
|
||||||
|
drive_details.push(format!("T: {}°C", temp as i32));
|
||||||
|
}
|
||||||
|
if let Some(wear) = drive.wear_percent {
|
||||||
|
drive_details.push(format!("W: {}%", wear as i32));
|
||||||
|
}
|
||||||
|
|
||||||
|
let drive_text = if !drive_details.is_empty() {
|
||||||
|
format!("{} {}", drive.name, drive_details.join(" "))
|
||||||
|
} else {
|
||||||
|
drive.name.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut drive_spans = vec![
|
||||||
|
Span::styled(tree_symbol, Typography::tree()),
|
||||||
|
];
|
||||||
|
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
|
||||||
|
lines.push(Line::from(drive_spans));
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper function to render a drive in a storage pool
|
/// Helper function to render a drive in a storage pool
|
||||||
fn render_pool_drive(drive: &StorageDrive, is_last: bool, lines: &mut Vec<Line<'_>>) {
|
fn render_pool_drive(drive: &StorageDrive, is_last: bool, lines: &mut Vec<Line<'_>>) {
|
||||||
let tree_symbol = if is_last { " └─" } else { " ├─" };
|
let tree_symbol = if is_last { " └─" } else { " ├─" };
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.150"
|
version = "0.1.154"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -96,15 +96,20 @@ pub struct PoolData {
|
|||||||
pub total_gb: f32,
|
pub total_gb: f32,
|
||||||
pub data_drives: Vec<PoolDriveData>,
|
pub data_drives: Vec<PoolDriveData>,
|
||||||
pub parity_drives: Vec<PoolDriveData>,
|
pub parity_drives: Vec<PoolDriveData>,
|
||||||
|
pub health_status: Status,
|
||||||
|
pub usage_status: Status,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Drive in a storage pool
|
/// Drive in a storage pool
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct PoolDriveData {
|
pub struct PoolDriveData {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
pub serial_number: Option<String>,
|
||||||
pub temperature_celsius: Option<f32>,
|
pub temperature_celsius: Option<f32>,
|
||||||
pub wear_percent: Option<f32>,
|
pub wear_percent: Option<f32>,
|
||||||
pub health: String,
|
pub health: String,
|
||||||
|
pub health_status: Status,
|
||||||
|
pub temperature_status: Status,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Service monitoring data
|
/// Service monitoring data
|
||||||
|
|||||||
Reference in New Issue
Block a user