Christoffer Martinsson 43fb838c9b
All checks were successful
Build and Release / build-and-release (push) Successful in 2m9s
Fix duplicate drive display in mergerfs pools
- Restructure storage rendering logic to prevent drive duplication
- Use specific mergerfs check instead of generic multi-drive condition
- Ensure drives only appear once under organized data/parity sections
2025-11-23 17:46:09 +01:00

859 lines
38 KiB
Rust

use cm_dashboard_shared::{Metric, MetricValue, Status};
use ratatui::{
layout::Rect,
text::{Line, Span, Text},
widgets::Paragraph,
Frame,
};
use super::Widget;
use crate::ui::theme::{StatusIcons, Typography};
/// System widget displaying NixOS info, CPU, RAM, and Storage in unified layout
#[derive(Clone)]
pub struct SystemWidget {
// NixOS information
nixos_build: Option<String>,
config_hash: Option<String>,
agent_hash: Option<String>,
// CPU metrics
cpu_load_1min: Option<f32>,
cpu_load_5min: Option<f32>,
cpu_load_15min: Option<f32>,
cpu_frequency: Option<f32>,
cpu_status: Status,
// Memory metrics
memory_usage_percent: Option<f32>,
memory_used_gb: Option<f32>,
memory_total_gb: Option<f32>,
tmp_usage_percent: Option<f32>,
tmp_used_gb: Option<f32>,
tmp_total_gb: Option<f32>,
memory_status: Status,
tmp_status: Status,
// Storage metrics (collected from disk metrics)
storage_pools: Vec<StoragePool>,
// Overall status
has_data: bool,
}
#[derive(Clone)]
struct StoragePool {
name: String,
mount_point: String,
pool_type: String, // "single", "mergerfs (2+1)", "RAID5 (3+1)", etc.
pool_health: Option<String>, // "healthy", "degraded", "critical", "rebuilding"
drives: Vec<StorageDrive>,
filesystems: Vec<FileSystem>, // For physical drive pools: individual filesystem children
usage_percent: Option<f32>,
used_gb: Option<f32>,
total_gb: Option<f32>,
status: Status,
health_status: Status, // Separate status for pool health vs usage
}
#[derive(Clone)]
struct StorageDrive {
name: String,
temperature: Option<f32>,
wear_percent: Option<f32>,
status: Status,
}
#[derive(Clone)]
struct FileSystem {
mount_point: String,
usage_percent: Option<f32>,
used_gb: Option<f32>,
total_gb: Option<f32>,
available_gb: Option<f32>,
status: Status,
}
impl SystemWidget {
pub fn new() -> Self {
Self {
nixos_build: None,
config_hash: None,
agent_hash: None,
cpu_load_1min: None,
cpu_load_5min: None,
cpu_load_15min: None,
cpu_frequency: None,
cpu_status: Status::Unknown,
memory_usage_percent: None,
memory_used_gb: None,
memory_total_gb: None,
tmp_usage_percent: None,
tmp_used_gb: None,
tmp_total_gb: None,
memory_status: Status::Unknown,
tmp_status: Status::Unknown,
storage_pools: Vec::new(),
has_data: false,
}
}
/// Format CPU load averages
fn format_cpu_load(&self) -> String {
match (self.cpu_load_1min, self.cpu_load_5min, self.cpu_load_15min) {
(Some(l1), Some(l5), Some(l15)) => {
format!("{:.2} {:.2} {:.2}", l1, l5, l15)
}
_ => "— — —".to_string(),
}
}
/// Format CPU frequency
fn format_cpu_frequency(&self) -> String {
match self.cpu_frequency {
Some(freq) => format!("{:.0} MHz", freq),
None => "— MHz".to_string(),
}
}
/// Format memory usage
fn format_memory_usage(&self) -> String {
match (self.memory_usage_percent, self.memory_used_gb, self.memory_total_gb) {
(Some(pct), Some(used), Some(total)) => {
format!("{:.0}% {:.1}GB/{:.1}GB", pct, used, total)
}
_ => "—% —GB/—GB".to_string(),
}
}
/// Format /tmp usage
fn format_tmp_usage(&self) -> String {
match (self.tmp_usage_percent, self.tmp_used_gb, self.tmp_total_gb) {
(Some(pct), Some(used), Some(total)) => {
let used_str = if used < 0.1 {
format!("{:.0}B", used * 1024.0) // Show as MB if very small
} else {
format!("{:.1}GB", used)
};
format!("{:.0}% {}/{:.1}GB", pct, used_str, total)
}
_ => "—% —GB/—GB".to_string(),
}
}
/// Get the current agent hash for rebuild completion detection
pub fn _get_agent_hash(&self) -> Option<&String> {
self.agent_hash.as_ref()
}
/// Get mount point for a pool name
fn get_mount_point_for_pool(&self, pool_name: &str) -> String {
match pool_name {
"root" => "/".to_string(),
"steampool" => "/mnt/steampool".to_string(),
"steampool_1" => "/steampool_1".to_string(),
"steampool_2" => "/steampool_2".to_string(),
_ => format!("/{}", pool_name), // Default fallback
}
}
/// Parse storage metrics into pools and drives
fn update_storage_from_metrics(&mut self, metrics: &[&Metric]) {
let mut pools: std::collections::HashMap<String, StoragePool> = std::collections::HashMap::new();
for metric in metrics {
if metric.name.starts_with("disk_") {
if let Some(pool_name) = self.extract_pool_name(&metric.name) {
let pool = pools.entry(pool_name.clone()).or_insert_with(|| StoragePool {
name: pool_name.clone(),
mount_point: self.get_mount_point_for_pool(&pool_name), // Default fallback
pool_type: "single".to_string(), // Default, will be updated
pool_health: None,
drives: Vec::new(),
filesystems: Vec::new(),
usage_percent: None,
used_gb: None,
total_gb: None,
status: Status::Unknown,
health_status: Status::Unknown,
});
// Parse different metric types
if metric.name.contains("_usage_percent") && !metric.name.contains("_fs_") {
// Only use drive-level metrics for pool totals, not filesystem metrics
if let MetricValue::Float(usage) = metric.value {
pool.usage_percent = Some(usage);
pool.status = metric.status.clone();
}
} else if metric.name.contains("_used_gb") && !metric.name.contains("_fs_") {
// Only use drive-level metrics for pool totals, not filesystem metrics
if let MetricValue::Float(used) = metric.value {
pool.used_gb = Some(used);
}
} else if metric.name.contains("_total_gb") && !metric.name.contains("_fs_") {
// Only use drive-level metrics for pool totals, not filesystem metrics
if let MetricValue::Float(total) = metric.value {
pool.total_gb = Some(total);
}
} else if metric.name.contains("_mount_point") {
if let MetricValue::String(mount_point) = &metric.value {
pool.mount_point = mount_point.clone();
}
} else if metric.name.contains("_pool_type") {
if let MetricValue::String(pool_type) = &metric.value {
pool.pool_type = pool_type.clone();
}
} else if metric.name.contains("_pool_health") {
if let MetricValue::String(health) = &metric.value {
pool.pool_health = Some(health.clone());
pool.health_status = metric.status.clone();
}
} else if metric.name.contains("_temperature") {
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
// Find existing drive or create new one
let drive_exists = pool.drives.iter().any(|d| d.name == drive_name);
if !drive_exists {
pool.drives.push(StorageDrive {
name: drive_name.clone(),
temperature: None,
wear_percent: None,
status: Status::Unknown,
});
}
if let Some(drive) = pool.drives.iter_mut().find(|d| d.name == drive_name) {
if let MetricValue::Float(temp) = metric.value {
drive.temperature = Some(temp);
drive.status = metric.status.clone();
}
}
}
} else if metric.name.contains("_wear_percent") {
if let Some(drive_name) = self.extract_drive_name(&metric.name) {
// Find existing drive or create new one
let drive_exists = pool.drives.iter().any(|d| d.name == drive_name);
if !drive_exists {
pool.drives.push(StorageDrive {
name: drive_name.clone(),
temperature: None,
wear_percent: None,
status: Status::Unknown,
});
}
if let Some(drive) = pool.drives.iter_mut().find(|d| d.name == drive_name) {
if let MetricValue::Float(wear) = metric.value {
drive.wear_percent = Some(wear);
drive.status = metric.status.clone();
}
}
}
} else if metric.name.contains("_fs_") {
// Handle filesystem metrics for physical drive pools (disk_{pool}_fs_{fs_name}_{metric})
if let (Some(fs_name), Some(metric_type)) = self.extract_filesystem_metric(&metric.name) {
// Find or create filesystem entry
let fs_exists = pool.filesystems.iter().any(|fs| {
let fs_id = if fs.mount_point == "/" {
"root".to_string()
} else {
fs.mount_point.trim_start_matches('/').replace('/', "_")
};
fs_id == fs_name
});
if !fs_exists {
// Create filesystem entry with correct mount point
let mount_point = if metric_type == "mount_point" {
if let MetricValue::String(mount) = &metric.value {
mount.clone()
} else {
// Fallback: handle special cases
if fs_name == "root" {
"/".to_string()
} else {
format!("/{}", fs_name.replace('_', "/"))
}
}
} else {
// Fallback for non-mount_point metrics: generate mount point from fs_name
if fs_name == "root" {
"/".to_string()
} else {
format!("/{}", fs_name.replace('_', "/"))
}
};
pool.filesystems.push(FileSystem {
mount_point,
usage_percent: None,
used_gb: None,
total_gb: None,
available_gb: None,
status: Status::Unknown,
});
}
// Update the filesystem with the metric value
if let Some(filesystem) = pool.filesystems.iter_mut().find(|fs| {
let fs_id = if fs.mount_point == "/" {
"root".to_string()
} else {
fs.mount_point.trim_start_matches('/').replace('/', "_")
};
fs_id == fs_name
}) {
match metric_type.as_str() {
"usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
filesystem.usage_percent = Some(usage);
filesystem.status = metric.status.clone();
}
}
"used_gb" => {
if let MetricValue::Float(used) = metric.value {
filesystem.used_gb = Some(used);
}
}
"total_gb" => {
if let MetricValue::Float(total) = metric.value {
filesystem.total_gb = Some(total);
}
}
"available_gb" => {
if let MetricValue::Float(available) = metric.value {
filesystem.available_gb = Some(available);
}
}
"mount_point" => {
if let MetricValue::String(mount) = &metric.value {
filesystem.mount_point = mount.clone();
}
}
_ => {}
}
}
}
}
}
}
}
// Convert to sorted vec for consistent ordering
let mut pool_list: Vec<StoragePool> = pools.into_values().collect();
pool_list.sort_by(|a, b| a.name.cmp(&b.name)); // Sort alphabetically by name
self.storage_pools = pool_list;
}
/// Extract pool name from disk metric name
fn extract_pool_name(&self, metric_name: &str) -> Option<String> {
// Pattern: disk_{pool_name}_{various suffixes}
// Since pool_name can contain underscores, work backwards from known metric suffixes
if metric_name.starts_with("disk_") {
// Handle filesystem metrics: disk_{pool}_fs_{filesystem}_{metric}
if metric_name.contains("_fs_") {
if let Some(fs_pos) = metric_name.find("_fs_") {
return Some(metric_name[5..fs_pos].to_string()); // Skip "disk_", extract pool name before "_fs_"
}
}
// Handle pool-level metrics (usage_percent, used_gb, total_gb, mount_point, pool_type, pool_health)
else if let Some(suffix_pos) = metric_name.rfind("_usage_percent")
.or_else(|| metric_name.rfind("_used_gb"))
.or_else(|| metric_name.rfind("_total_gb"))
.or_else(|| metric_name.rfind("_available_gb"))
.or_else(|| metric_name.rfind("_mount_point"))
.or_else(|| metric_name.rfind("_pool_type"))
.or_else(|| metric_name.rfind("_pool_health")) {
return Some(metric_name[5..suffix_pos].to_string()); // Skip "disk_"
}
// Handle drive-specific metrics: disk_{pool}_{drive_role}_{metric} (for mergerfs) or disk_{pool}_{drive}_{metric} (for physical drives)
else if let Some(suffix_pos) = metric_name.rfind("_temperature")
.or_else(|| metric_name.rfind("_wear_percent"))
.or_else(|| metric_name.rfind("_health")) {
// For mergerfs pools, metrics look like: disk_srv_media_data_0_temperature or disk_srv_media_parity_0_temperature
// We need to extract just "srv_media" as the pool name
let before_suffix = &metric_name[..suffix_pos];
// Check if this looks like a mergerfs drive metric (contains data_ or parity_)
if before_suffix.contains("_data_") {
if let Some(data_pos) = before_suffix.find("_data_") {
return Some(metric_name[5..data_pos].to_string()); // Extract pool name before "_data_"
}
} else if before_suffix.contains("_parity_") {
if let Some(parity_pos) = before_suffix.find("_parity_") {
return Some(metric_name[5..parity_pos].to_string()); // Extract pool name before "_parity_"
}
}
// Fallback for physical drive metrics: find the second-to-last underscore
if let Some(drive_start) = before_suffix.rfind('_') {
if drive_start > 5 {
return Some(metric_name[5..drive_start].to_string()); // Skip "disk_"
}
}
}
// Fallback: extract first component after disk_ prefix
else if let Some(captures) = metric_name.strip_prefix("disk_") {
if let Some(pos) = captures.find('_') {
return Some(captures[..pos].to_string());
}
}
}
None
}
/// Extract filesystem name and metric type from filesystem metric names
/// Pattern: disk_{pool}_fs_{filesystem_name}_{metric_type}
fn extract_filesystem_metric(&self, metric_name: &str) -> (Option<String>, Option<String>) {
if metric_name.starts_with("disk_") && metric_name.contains("_fs_") {
// Find the _fs_ part
if let Some(fs_start) = metric_name.find("_fs_") {
let after_fs = &metric_name[fs_start + 4..]; // Skip "_fs_"
// Look for known metric suffixes (these can contain underscores)
let known_suffixes = ["usage_percent", "used_gb", "total_gb", "available_gb", "mount_point"];
for suffix in known_suffixes {
if after_fs.ends_with(suffix) {
// Extract filesystem name by removing suffix and underscore
if let Some(underscore_pos) = after_fs.rfind(&format!("_{}", suffix)) {
let fs_name = after_fs[..underscore_pos].to_string();
return (Some(fs_name), Some(suffix.to_string()));
}
}
}
}
}
(None, None)
}
/// Extract drive name from disk metric name
fn extract_drive_name(&self, metric_name: &str) -> Option<String> {
// Pattern: disk_{pool_name}_{drive_name}_{metric_type}
// For mergerfs: disk_{pool_name}_{data|parity}_{index}_{metric_type}
// Since pool_name can contain underscores, work backwards from known metric suffixes
if metric_name.starts_with("disk_") {
if let Some(suffix_pos) = metric_name.rfind("_temperature")
.or_else(|| metric_name.rfind("_wear_percent"))
.or_else(|| metric_name.rfind("_health")) {
let before_suffix = &metric_name[..suffix_pos];
// For mergerfs drive metrics: extract the role_index part (e.g., "data_0", "parity_1")
if before_suffix.contains("_data_") || before_suffix.contains("_parity_") {
if let Some(role_start) = before_suffix.rfind("_data_").or_else(|| before_suffix.rfind("_parity_")) {
return Some(before_suffix[role_start + 1..].to_string()); // e.g., "data_0" or "parity_1"
}
}
// Fallback for physical drive metrics: get the last component
if let Some(drive_start) = before_suffix.rfind('_') {
return Some(before_suffix[drive_start + 1..].to_string());
}
}
}
None
}
/// Render storage section with enhanced tree structure
fn render_storage(&self) -> Vec<Line<'_>> {
let mut lines = Vec::new();
for pool in &self.storage_pools {
// Pool header line with type and health
let pool_label = if pool.pool_type == "single" {
format!("{}:", pool.mount_point)
} else {
format!("{} ({}):", pool.mount_point, pool.pool_type)
};
let pool_spans = StatusIcons::create_status_spans(
pool.health_status.clone(),
&pool_label
);
lines.push(Line::from(pool_spans));
// Pool health line (for multi-disk pools)
if pool.pool_type != "single" {
if let Some(health) = &pool.pool_health {
let health_text = match health.as_str() {
"healthy" => format!("Pool Status: {} Healthy",
if pool.drives.len() > 1 { format!("({} drives)", pool.drives.len()) } else { String::new() }),
"degraded" => "Pool Status: ⚠ Degraded".to_string(),
"critical" => "Pool Status: ✗ Critical".to_string(),
"rebuilding" => "Pool Status: ⟳ Rebuilding".to_string(),
_ => format!("Pool Status: ? {}", health),
};
let mut health_spans = vec![
Span::raw(" "),
Span::styled("├─ ", Typography::tree()),
];
health_spans.extend(StatusIcons::create_status_spans(pool.health_status.clone(), &health_text));
lines.push(Line::from(health_spans));
}
}
// Total usage line (always show for pools)
let usage_text = match (pool.usage_percent, pool.used_gb, pool.total_gb) {
(Some(pct), Some(used), Some(total)) => {
format!("Total: {:.0}% {:.1}GB/{:.1}GB", pct, used, total)
}
_ => "Total: —% —GB/—GB".to_string(),
};
let has_drives = !pool.drives.is_empty();
let has_filesystems = !pool.filesystems.is_empty();
let has_children = has_drives || has_filesystems;
let tree_symbol = if has_children { "├─" } else { "└─" };
let mut usage_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
usage_spans.extend(StatusIcons::create_status_spans(pool.status.clone(), &usage_text));
lines.push(Line::from(usage_spans));
// Drive lines with enhanced grouping
if pool.pool_type.contains("mergerfs") && pool.drives.len() > 1 {
// Group drives by type for mergerfs pools
let (data_drives, parity_drives): (Vec<_>, Vec<_>) = pool.drives.iter().enumerate()
.partition(|(_, drive)| {
// Simple heuristic: drives with 'parity' in name or sdc (common parity drive)
!drive.name.to_lowercase().contains("parity") && drive.name != "sdc"
});
// Show data drives
if !data_drives.is_empty() {
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled("├─ ", Typography::tree()),
Span::styled("Data Disks:", Typography::secondary()),
]));
for (i, (_, drive)) in data_drives.iter().enumerate() {
let is_last = i == data_drives.len() - 1;
if is_last && parity_drives.is_empty() {
self.render_drive_line(&mut lines, drive, "│ └─");
} else {
self.render_drive_line(&mut lines, drive, "│ ├─");
}
}
}
// Show parity drives
if !parity_drives.is_empty() {
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled("└─ ", Typography::tree()),
Span::styled("Parity:", Typography::secondary()),
]));
for (i, (_, drive)) in parity_drives.iter().enumerate() {
let is_last = i == parity_drives.len() - 1;
if is_last {
self.render_drive_line(&mut lines, drive, " └─");
} else {
self.render_drive_line(&mut lines, drive, " ├─");
}
}
}
} else if pool.pool_type != "single" && pool.drives.len() > 1 {
// Regular drive listing for non-mergerfs multi-drive pools
for (i, drive) in pool.drives.iter().enumerate() {
let is_last = i == pool.drives.len() - 1;
let tree_symbol = if is_last { "└─" } else { "├─" };
self.render_drive_line(&mut lines, drive, tree_symbol);
}
} else if pool.pool_type.starts_with("drive (") {
// Physical drive pools: show drive info + filesystem children
// First show drive information
for drive in &pool.drives {
let mut drive_info = Vec::new();
if let Some(temp) = drive.temperature {
drive_info.push(format!("T: {:.0}°C", temp));
}
if let Some(wear) = drive.wear_percent {
drive_info.push(format!("W: {:.0}%", wear));
}
let drive_text = if drive_info.is_empty() {
format!("Drive: {}", drive.name)
} else {
format!("Drive: {}", drive_info.join(" "))
};
let has_filesystems = !pool.filesystems.is_empty();
let tree_symbol = if has_filesystems { "├─" } else { "└─" };
let mut drive_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
lines.push(Line::from(drive_spans));
}
// Then show filesystem children
for (i, filesystem) in pool.filesystems.iter().enumerate() {
let is_last = i == pool.filesystems.len() - 1;
let tree_symbol = if is_last { "└─" } else { "├─" };
let fs_text = match (filesystem.usage_percent, filesystem.used_gb, filesystem.total_gb) {
(Some(pct), Some(used), Some(total)) => {
format!("{}: {:.0}% {:.1}GB/{:.1}GB", filesystem.mount_point, pct, used, total)
}
(Some(pct), _, Some(total)) => {
format!("{}: {:.0}% —GB/{:.1}GB", filesystem.mount_point, pct, total)
}
(Some(pct), _, _) => {
format!("{}: {:.0}% —GB/—GB", filesystem.mount_point, pct)
}
(_, Some(used), Some(total)) => {
format!("{}: —% {:.1}GB/{:.1}GB", filesystem.mount_point, used, total)
}
_ => format!("{}: —% —GB/—GB", filesystem.mount_point),
};
let mut fs_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
fs_spans.extend(StatusIcons::create_status_spans(filesystem.status.clone(), &fs_text));
lines.push(Line::from(fs_spans));
}
} else {
// Single drive or simple pools
for (i, drive) in pool.drives.iter().enumerate() {
let is_last = i == pool.drives.len() - 1;
let tree_symbol = if is_last { "└─" } else { "├─" };
self.render_drive_line(&mut lines, drive, tree_symbol);
}
}
}
lines
}
/// Helper to render a single drive line
fn render_drive_line<'a>(&self, lines: &mut Vec<Line<'a>>, drive: &StorageDrive, tree_symbol: &'a str) {
let mut drive_info = Vec::new();
if let Some(temp) = drive.temperature {
drive_info.push(format!("T: {:.0}°C", temp));
}
if let Some(wear) = drive.wear_percent {
drive_info.push(format!("W: {:.0}%", wear));
}
let drive_text = if drive_info.is_empty() {
drive.name.clone()
} else {
format!("{} {}", drive.name, drive_info.join(""))
};
let mut drive_spans = vec![
Span::raw(" "),
Span::styled(tree_symbol, Typography::tree()),
Span::raw(" "),
];
drive_spans.extend(StatusIcons::create_status_spans(drive.status.clone(), &drive_text));
lines.push(Line::from(drive_spans));
}
}
impl Widget for SystemWidget {
fn update_from_metrics(&mut self, metrics: &[&Metric]) {
self.has_data = !metrics.is_empty();
for metric in metrics {
match metric.name.as_str() {
// NixOS metrics
"system_nixos_build" => {
if let MetricValue::String(build) = &metric.value {
self.nixos_build = Some(build.clone());
}
}
"system_config_hash" => {
if let MetricValue::String(hash) = &metric.value {
self.config_hash = Some(hash.clone());
}
}
"agent_version" => {
if let MetricValue::String(version) = &metric.value {
self.agent_hash = Some(version.clone());
}
}
// CPU metrics
"cpu_load_1min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_1min = Some(load);
self.cpu_status = metric.status.clone();
}
}
"cpu_load_5min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_5min = Some(load);
}
}
"cpu_load_15min" => {
if let MetricValue::Float(load) = metric.value {
self.cpu_load_15min = Some(load);
}
}
"cpu_frequency_mhz" => {
if let MetricValue::Float(freq) = metric.value {
self.cpu_frequency = Some(freq);
}
}
// Memory metrics
"memory_usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
self.memory_usage_percent = Some(usage);
self.memory_status = metric.status.clone();
}
}
"memory_used_gb" => {
if let MetricValue::Float(used) = metric.value {
self.memory_used_gb = Some(used);
}
}
"memory_total_gb" => {
if let MetricValue::Float(total) = metric.value {
self.memory_total_gb = Some(total);
}
}
// Tmpfs metrics
"memory_tmp_usage_percent" => {
if let MetricValue::Float(usage) = metric.value {
self.tmp_usage_percent = Some(usage);
self.tmp_status = metric.status.clone();
}
}
"memory_tmp_used_gb" => {
if let MetricValue::Float(used) = metric.value {
self.tmp_used_gb = Some(used);
}
}
"memory_tmp_total_gb" => {
if let MetricValue::Float(total) = metric.value {
self.tmp_total_gb = Some(total);
}
}
_ => {}
}
}
// Update storage from all disk metrics
self.update_storage_from_metrics(metrics);
}
}
impl SystemWidget {
/// Render system widget
pub fn render(&mut self, frame: &mut Frame, area: Rect, hostname: &str, config: Option<&crate::config::DashboardConfig>) {
let mut lines = Vec::new();
// NixOS section
lines.push(Line::from(vec![
Span::styled(format!("NixOS {}:", hostname), Typography::widget_title())
]));
let build_text = self.nixos_build.as_deref().unwrap_or("unknown");
lines.push(Line::from(vec![
Span::styled(format!("Build: {}", build_text), Typography::secondary())
]));
let agent_version_text = self.agent_hash.as_deref().unwrap_or("unknown");
lines.push(Line::from(vec![
Span::styled(format!("Agent: {}", agent_version_text), Typography::secondary())
]));
// Display detected connection IP
if let Some(config) = config {
if let Some(host_details) = config.hosts.get(hostname) {
let detected_ip = host_details.get_connection_ip(hostname);
lines.push(Line::from(vec![
Span::styled(format!("IP: {}", detected_ip), Typography::secondary())
]));
}
}
// CPU section
lines.push(Line::from(vec![
Span::styled("CPU:", Typography::widget_title())
]));
let load_text = self.format_cpu_load();
let cpu_spans = StatusIcons::create_status_spans(
self.cpu_status.clone(),
&format!("Load: {}", load_text)
);
lines.push(Line::from(cpu_spans));
let freq_text = self.format_cpu_frequency();
lines.push(Line::from(vec![
Span::styled(" └─ ", Typography::tree()),
Span::styled(format!("Freq: {}", freq_text), Typography::secondary())
]));
// RAM section
lines.push(Line::from(vec![
Span::styled("RAM:", Typography::widget_title())
]));
let memory_text = self.format_memory_usage();
let memory_spans = StatusIcons::create_status_spans(
self.memory_status.clone(),
&format!("Usage: {}", memory_text)
);
lines.push(Line::from(memory_spans));
let tmp_text = self.format_tmp_usage();
let mut tmp_spans = vec![
Span::styled(" └─ ", Typography::tree()),
];
tmp_spans.extend(StatusIcons::create_status_spans(
self.tmp_status.clone(),
&format!("/tmp: {}", tmp_text)
));
lines.push(Line::from(tmp_spans));
// Storage section
lines.push(Line::from(vec![
Span::styled("Storage:", Typography::widget_title())
]));
// Storage items - let main overflow logic handle truncation
let storage_lines = self.render_storage();
lines.extend(storage_lines);
// Apply scroll offset
let total_lines = lines.len();
let available_height = area.height as usize;
// Show only what fits, with "X more below" if needed
if total_lines > available_height {
let lines_for_content = available_height.saturating_sub(1); // Reserve one line for "more below"
let mut visible_lines: Vec<Line> = lines
.into_iter()
.take(lines_for_content)
.collect();
let hidden_below = total_lines.saturating_sub(lines_for_content);
if hidden_below > 0 {
let more_line = Line::from(vec![
Span::styled(format!("... {} more below", hidden_below), Typography::muted())
]);
visible_lines.push(more_line);
}
let paragraph = Paragraph::new(Text::from(visible_lines));
frame.render_widget(paragraph, area);
} else {
// All content fits and no scroll offset, render normally
let paragraph = Paragraph::new(Text::from(lines));
frame.render_widget(paragraph, area);
}
}
}