Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| de252d27b9 | |||
| db0e41a7d3 | |||
| ec460496d8 | |||
| 33e700529e | |||
| d644b7d40a | |||
| f635ba9c75 |
@@ -113,13 +113,13 @@ jobs:
|
|||||||
NIX_HASH="sha256-$(python3 -c "import base64, binascii; print(base64.b64encode(binascii.unhexlify('$NEW_HASH')).decode())")"
|
NIX_HASH="sha256-$(python3 -c "import base64, binascii; print(base64.b64encode(binascii.unhexlify('$NEW_HASH')).decode())")"
|
||||||
|
|
||||||
# Update the NixOS configuration
|
# Update the NixOS configuration
|
||||||
sed -i "s|version = \"v[^\"]*\"|version = \"$VERSION\"|" hosts/common/cm-dashboard.nix
|
sed -i "s|version = \"v[^\"]*\"|version = \"$VERSION\"|" hosts/services/cm-dashboard.nix
|
||||||
sed -i "s|sha256 = \"sha256-[^\"]*\"|sha256 = \"$NIX_HASH\"|" hosts/common/cm-dashboard.nix
|
sed -i "s|sha256 = \"sha256-[^\"]*\"|sha256 = \"$NIX_HASH\"|" hosts/services/cm-dashboard.nix
|
||||||
|
|
||||||
# Commit and push changes
|
# Commit and push changes
|
||||||
git config user.name "Gitea Actions"
|
git config user.name "Gitea Actions"
|
||||||
git config user.email "actions@gitea.cmtec.se"
|
git config user.email "actions@gitea.cmtec.se"
|
||||||
git add hosts/common/cm-dashboard.nix
|
git add hosts/services/cm-dashboard.nix
|
||||||
git commit -m "Auto-update cm-dashboard to $VERSION
|
git commit -m "Auto-update cm-dashboard to $VERSION
|
||||||
|
|
||||||
- Update version to $VERSION with automated release
|
- Update version to $VERSION with automated release
|
||||||
|
|||||||
@@ -49,8 +49,12 @@ hostname2 = [
|
|||||||
### Navigation
|
### Navigation
|
||||||
- **Tab**: Switch between hosts
|
- **Tab**: Switch between hosts
|
||||||
- **↑↓ or j/k**: Select services
|
- **↑↓ or j/k**: Select services
|
||||||
|
- **s**: Start selected service (UserStart)
|
||||||
|
- **S**: Stop selected service (UserStop)
|
||||||
- **J**: Show service logs (journalctl)
|
- **J**: Show service logs (journalctl)
|
||||||
- **L**: Show custom log files
|
- **L**: Show custom log files
|
||||||
|
- **R**: Rebuild current host
|
||||||
|
- **B**: Run backup on current host
|
||||||
- **q**: Quit dashboard
|
- **q**: Quit dashboard
|
||||||
|
|
||||||
## Core Architecture Principles
|
## Core Architecture Principles
|
||||||
@@ -115,7 +119,7 @@ This automatically:
|
|||||||
- Uploads binaries via Gitea API
|
- Uploads binaries via Gitea API
|
||||||
|
|
||||||
### NixOS Configuration Updates
|
### NixOS Configuration Updates
|
||||||
Edit `~/projects/nixosbox/hosts/common/cm-dashboard.nix`:
|
Edit `~/projects/nixosbox/hosts/services/cm-dashboard.nix`:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
version = "v0.1.X";
|
version = "v0.1.X";
|
||||||
|
|||||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -270,7 +270,7 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.68"
|
version = "0.1.73"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -292,7 +292,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.68"
|
version = "0.1.73"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -315,7 +315,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.68"
|
version = "0.1.73"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@@ -88,7 +88,9 @@ cm-dashboard • ● cmbox ● srv01 ● srv02 ● steambox
|
|||||||
- **s**: Start selected service (UserStart)
|
- **s**: Start selected service (UserStart)
|
||||||
- **S**: Stop selected service (UserStop)
|
- **S**: Stop selected service (UserStop)
|
||||||
- **J**: Show service logs (journalctl in tmux popup)
|
- **J**: Show service logs (journalctl in tmux popup)
|
||||||
|
- **L**: Show custom log files (tail -f custom paths in tmux popup)
|
||||||
- **R**: Rebuild current host
|
- **R**: Rebuild current host
|
||||||
|
- **B**: Run backup on current host
|
||||||
- **q**: Quit
|
- **q**: Quit
|
||||||
|
|
||||||
### Status Indicators
|
### Status Indicators
|
||||||
@@ -173,9 +175,10 @@ subscriber_ports = [6130]
|
|||||||
[hosts]
|
[hosts]
|
||||||
predefined_hosts = ["cmbox", "srv01", "srv02"]
|
predefined_hosts = ["cmbox", "srv01", "srv02"]
|
||||||
|
|
||||||
[ui]
|
[ssh]
|
||||||
ssh_user = "cm"
|
rebuild_user = "cm"
|
||||||
rebuild_alias = "nixos-rebuild-cmtec"
|
rebuild_alias = "nixos-rebuild-cmtec"
|
||||||
|
backup_alias = "cm-backup-run"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Technical Implementation
|
## Technical Implementation
|
||||||
@@ -329,7 +332,7 @@ This triggers automated:
|
|||||||
- Tarball upload to Gitea
|
- Tarball upload to Gitea
|
||||||
|
|
||||||
### NixOS Integration
|
### NixOS Integration
|
||||||
Update `~/projects/nixosbox/hosts/common/cm-dashboard.nix`:
|
Update `~/projects/nixosbox/hosts/services/cm-dashboard.nix`:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
version = "v0.1.43";
|
version = "v0.1.43";
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-agent"
|
name = "cm-dashboard-agent"
|
||||||
version = "0.1.69"
|
version = "0.1.74"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::time::Duration;
|
|||||||
use tokio::time::interval;
|
use tokio::time::interval;
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
use crate::communication::{AgentCommand, ServiceAction, ZmqHandler};
|
use crate::communication::{AgentCommand, ZmqHandler};
|
||||||
use crate::config::AgentConfig;
|
use crate::config::AgentConfig;
|
||||||
use crate::metrics::MetricCollectionManager;
|
use crate::metrics::MetricCollectionManager;
|
||||||
use crate::notifications::NotificationManager;
|
use crate::notifications::NotificationManager;
|
||||||
@@ -315,75 +315,10 @@ impl Agent {
|
|||||||
info!("Processing Ping command - agent is alive");
|
info!("Processing Ping command - agent is alive");
|
||||||
// Could send a response back via ZMQ if needed
|
// Could send a response back via ZMQ if needed
|
||||||
}
|
}
|
||||||
AgentCommand::ServiceControl { service_name, action } => {
|
|
||||||
info!("Processing ServiceControl command: {} {:?}", service_name, action);
|
|
||||||
if let Err(e) = self.handle_service_control(&service_name, &action).await {
|
|
||||||
error!("Failed to execute service control: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle systemd service control commands
|
|
||||||
async fn handle_service_control(&mut self, service_name: &str, action: &ServiceAction) -> Result<()> {
|
|
||||||
let (action_str, is_user_action) = match action {
|
|
||||||
ServiceAction::Start => ("start", false),
|
|
||||||
ServiceAction::Stop => ("stop", false),
|
|
||||||
ServiceAction::Status => ("status", false),
|
|
||||||
ServiceAction::UserStart => ("start", true),
|
|
||||||
ServiceAction::UserStop => ("stop", true),
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("Executing systemctl {} {} (user action: {})", action_str, service_name, is_user_action);
|
|
||||||
|
|
||||||
// Handle user-stopped service tracking before systemctl execution (stop only)
|
|
||||||
match action {
|
|
||||||
ServiceAction::UserStop => {
|
|
||||||
info!("Marking service '{}' as user-stopped", service_name);
|
|
||||||
if let Err(e) = self.service_tracker.mark_user_stopped(service_name) {
|
|
||||||
error!("Failed to mark service as user-stopped: {}", e);
|
|
||||||
} else {
|
|
||||||
// Sync to global tracker
|
|
||||||
UserStoppedServiceTracker::update_global(&self.service_tracker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
let output = tokio::process::Command::new("sudo")
|
|
||||||
.arg("systemctl")
|
|
||||||
.arg(action_str)
|
|
||||||
.arg(format!("{}.service", service_name))
|
|
||||||
.output()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
|
||||||
info!("Service {} {} completed successfully", service_name, action_str);
|
|
||||||
if !output.stdout.is_empty() {
|
|
||||||
debug!("stdout: {}", String::from_utf8_lossy(&output.stdout));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: User-stopped flag will be cleared by systemd collector
|
|
||||||
// when service actually reaches 'active' state, not here
|
|
||||||
} else {
|
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
||||||
error!("Service {} {} failed: {}", service_name, action_str, stderr);
|
|
||||||
return Err(anyhow::anyhow!("systemctl {} {} failed: {}", action_str, service_name, stderr));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force refresh metrics after service control to update service status
|
|
||||||
if matches!(action, ServiceAction::Start | ServiceAction::Stop | ServiceAction::UserStart | ServiceAction::UserStop) {
|
|
||||||
info!("Triggering immediate metric refresh after service control");
|
|
||||||
if let Err(e) = self.collect_metrics_only().await {
|
|
||||||
error!("Failed to refresh metrics after service control: {}", e);
|
|
||||||
} else {
|
|
||||||
info!("Service status refreshed immediately after {} {}", action_str, service_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check metrics for user-stopped services that are now active and clear their flags
|
/// Check metrics for user-stopped services that are now active and clear their flags
|
||||||
fn clear_user_stopped_flags_for_active_services(&mut self, metrics: &[Metric]) {
|
fn clear_user_stopped_flags_for_active_services(&mut self, metrics: &[Metric]) {
|
||||||
|
|||||||
@@ -98,19 +98,4 @@ pub enum AgentCommand {
|
|||||||
ToggleCollector { name: String, enabled: bool },
|
ToggleCollector { name: String, enabled: bool },
|
||||||
/// Request status/health check
|
/// Request status/health check
|
||||||
Ping,
|
Ping,
|
||||||
/// Control systemd service
|
|
||||||
ServiceControl {
|
|
||||||
service_name: String,
|
|
||||||
action: ServiceAction,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Service control actions
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub enum ServiceAction {
|
|
||||||
Start,
|
|
||||||
Stop,
|
|
||||||
Status,
|
|
||||||
UserStart, // User-initiated start (clears user-stopped flag)
|
|
||||||
UserStop, // User-initiated stop (marks as user-stopped)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard"
|
name = "cm-dashboard"
|
||||||
version = "0.1.69"
|
version = "0.1.74"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -9,14 +9,13 @@ use std::io;
|
|||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use crate::communication::{AgentCommand, ServiceAction, ZmqCommandSender, ZmqConsumer};
|
use crate::communication::{ZmqConsumer};
|
||||||
use crate::config::DashboardConfig;
|
use crate::config::DashboardConfig;
|
||||||
use crate::metrics::MetricStore;
|
use crate::metrics::MetricStore;
|
||||||
use crate::ui::{TuiApp, UiCommand};
|
use crate::ui::{TuiApp, UiCommand};
|
||||||
|
|
||||||
pub struct Dashboard {
|
pub struct Dashboard {
|
||||||
zmq_consumer: ZmqConsumer,
|
zmq_consumer: ZmqConsumer,
|
||||||
zmq_command_sender: ZmqCommandSender,
|
|
||||||
metric_store: MetricStore,
|
metric_store: MetricStore,
|
||||||
tui_app: Option<TuiApp>,
|
tui_app: Option<TuiApp>,
|
||||||
terminal: Option<Terminal<CrosstermBackend<io::Stdout>>>,
|
terminal: Option<Terminal<CrosstermBackend<io::Stdout>>>,
|
||||||
@@ -58,14 +57,6 @@ impl Dashboard {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize ZMQ command sender
|
|
||||||
let zmq_command_sender = match ZmqCommandSender::new(&config.zmq) {
|
|
||||||
Ok(sender) => sender,
|
|
||||||
Err(e) => {
|
|
||||||
error!("Failed to initialize ZMQ command sender: {}", e);
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to connect to hosts but don't fail if none are available
|
// Try to connect to hosts but don't fail if none are available
|
||||||
match zmq_consumer.connect_to_predefined_hosts(&config.hosts).await {
|
match zmq_consumer.connect_to_predefined_hosts(&config.hosts).await {
|
||||||
@@ -124,7 +115,6 @@ impl Dashboard {
|
|||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
zmq_consumer,
|
zmq_consumer,
|
||||||
zmq_command_sender,
|
|
||||||
metric_store,
|
metric_store,
|
||||||
tui_app,
|
tui_app,
|
||||||
terminal,
|
terminal,
|
||||||
@@ -134,12 +124,6 @@ impl Dashboard {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a command to a specific agent
|
|
||||||
pub async fn send_command(&mut self, hostname: &str, command: AgentCommand) -> Result<()> {
|
|
||||||
self.zmq_command_sender
|
|
||||||
.send_command(hostname, command)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(&mut self) -> Result<()> {
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
info!("Starting dashboard main loop");
|
info!("Starting dashboard main loop");
|
||||||
@@ -212,34 +196,18 @@ impl Dashboard {
|
|||||||
metric_message.metrics.len()
|
metric_message.metrics.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check if this is the first time we've seen this host
|
// Track first contact with host (no command needed - agent sends data every 2s)
|
||||||
let is_new_host = !self
|
let is_new_host = !self
|
||||||
.initial_commands_sent
|
.initial_commands_sent
|
||||||
.contains(&metric_message.hostname);
|
.contains(&metric_message.hostname);
|
||||||
|
|
||||||
if is_new_host {
|
if is_new_host {
|
||||||
info!(
|
info!(
|
||||||
"First contact with host {}, sending initial CollectNow command",
|
"First contact with host {} - data will update automatically",
|
||||||
metric_message.hostname
|
metric_message.hostname
|
||||||
);
|
);
|
||||||
|
self.initial_commands_sent
|
||||||
// Send CollectNow command for immediate refresh
|
.insert(metric_message.hostname.clone());
|
||||||
if let Err(e) = self
|
|
||||||
.send_command(&metric_message.hostname, AgentCommand::CollectNow)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
error!(
|
|
||||||
"Failed to send initial CollectNow command to {}: {}",
|
|
||||||
metric_message.hostname, e
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"✓ Sent initial CollectNow command to {}",
|
|
||||||
metric_message.hostname
|
|
||||||
);
|
|
||||||
self.initial_commands_sent
|
|
||||||
.insert(metric_message.hostname.clone());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update metric store
|
// Update metric store
|
||||||
@@ -312,22 +280,6 @@ impl Dashboard {
|
|||||||
/// Execute a UI command by sending it to the appropriate agent
|
/// Execute a UI command by sending it to the appropriate agent
|
||||||
async fn execute_ui_command(&self, command: UiCommand) -> Result<()> {
|
async fn execute_ui_command(&self, command: UiCommand) -> Result<()> {
|
||||||
match command {
|
match command {
|
||||||
UiCommand::ServiceStart { hostname, service_name } => {
|
|
||||||
info!("Sending user start command for service {} on {}", service_name, hostname);
|
|
||||||
let agent_command = AgentCommand::ServiceControl {
|
|
||||||
service_name: service_name.clone(),
|
|
||||||
action: ServiceAction::UserStart,
|
|
||||||
};
|
|
||||||
self.zmq_command_sender.send_command(&hostname, agent_command).await?;
|
|
||||||
}
|
|
||||||
UiCommand::ServiceStop { hostname, service_name } => {
|
|
||||||
info!("Sending user stop command for service {} on {}", service_name, hostname);
|
|
||||||
let agent_command = AgentCommand::ServiceControl {
|
|
||||||
service_name: service_name.clone(),
|
|
||||||
action: ServiceAction::UserStop,
|
|
||||||
};
|
|
||||||
self.zmq_command_sender.send_command(&hostname, agent_command).await?;
|
|
||||||
}
|
|
||||||
UiCommand::TriggerBackup { hostname } => {
|
UiCommand::TriggerBackup { hostname } => {
|
||||||
info!("Trigger backup requested for {}", hostname);
|
info!("Trigger backup requested for {}", hostname);
|
||||||
// TODO: Implement backup trigger command
|
// TODO: Implement backup trigger command
|
||||||
|
|||||||
@@ -5,40 +5,6 @@ use zmq::{Context, Socket, SocketType};
|
|||||||
|
|
||||||
use crate::config::ZmqConfig;
|
use crate::config::ZmqConfig;
|
||||||
|
|
||||||
/// Commands that can be sent to agents
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub enum AgentCommand {
|
|
||||||
/// Request immediate metric collection
|
|
||||||
CollectNow,
|
|
||||||
/// Change collection interval
|
|
||||||
SetInterval { seconds: u64 },
|
|
||||||
/// Enable/disable a collector
|
|
||||||
ToggleCollector { name: String, enabled: bool },
|
|
||||||
/// Request status/health check
|
|
||||||
Ping,
|
|
||||||
/// Control systemd service
|
|
||||||
ServiceControl {
|
|
||||||
service_name: String,
|
|
||||||
action: ServiceAction,
|
|
||||||
},
|
|
||||||
/// Rebuild NixOS system
|
|
||||||
SystemRebuild {
|
|
||||||
git_url: String,
|
|
||||||
git_branch: String,
|
|
||||||
working_dir: String,
|
|
||||||
api_key_file: Option<String>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Service control actions
|
|
||||||
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub enum ServiceAction {
|
|
||||||
Start,
|
|
||||||
Stop,
|
|
||||||
Status,
|
|
||||||
UserStart, // User-initiated start (clears user-stopped flag)
|
|
||||||
UserStop, // User-initiated stop (marks as user-stopped)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// ZMQ consumer for receiving metrics from agents
|
/// ZMQ consumer for receiving metrics from agents
|
||||||
pub struct ZmqConsumer {
|
pub struct ZmqConsumer {
|
||||||
@@ -84,6 +50,7 @@ impl ZmqConsumer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Connect to predefined hosts using their configuration
|
/// Connect to predefined hosts using their configuration
|
||||||
pub async fn connect_to_predefined_hosts(&mut self, hosts: &std::collections::HashMap<String, crate::config::HostDetails>) -> Result<()> {
|
pub async fn connect_to_predefined_hosts(&mut self, hosts: &std::collections::HashMap<String, crate::config::HostDetails>) -> Result<()> {
|
||||||
let default_port = self.config.subscriber_ports[0];
|
let default_port = self.config.subscriber_ports[0];
|
||||||
@@ -104,27 +71,13 @@ impl ZmqConsumer {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Connect to a host using its configuration details with fallback support
|
/// Connect to a host using its configuration details
|
||||||
pub async fn connect_to_host_with_details(&mut self, hostname: &str, host_details: &crate::config::HostDetails, port: u16) -> Result<()> {
|
pub async fn connect_to_host_with_details(&mut self, hostname: &str, host_details: &crate::config::HostDetails, port: u16) -> Result<()> {
|
||||||
// Get primary connection IP
|
// Get primary connection IP only - no fallbacks
|
||||||
let primary_ip = host_details.get_connection_ip(hostname);
|
let primary_ip = host_details.get_connection_ip(hostname);
|
||||||
|
|
||||||
// Try primary connection
|
// Connect directly without fallback attempts
|
||||||
if let Ok(()) = self.connect_to_host(&primary_ip, port).await {
|
self.connect_to_host(&primary_ip, port).await
|
||||||
info!("Connected to {} via primary address: {}", hostname, primary_ip);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try fallback IPs if primary fails
|
|
||||||
let fallbacks = host_details.get_fallback_ips(hostname);
|
|
||||||
for fallback_ip in fallbacks {
|
|
||||||
if let Ok(()) = self.connect_to_host(&fallback_ip, port).await {
|
|
||||||
info!("Connected to {} via fallback address: {}", hostname, fallback_ip);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(anyhow::anyhow!("Failed to connect to {} using all available addresses", hostname))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Receive command output from any connected agent (non-blocking)
|
/// Receive command output from any connected agent (non-blocking)
|
||||||
@@ -215,42 +168,3 @@ impl ZmqConsumer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ZMQ command sender for sending commands to agents
|
|
||||||
pub struct ZmqCommandSender {
|
|
||||||
context: Context,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZmqCommandSender {
|
|
||||||
pub fn new(_config: &ZmqConfig) -> Result<Self> {
|
|
||||||
let context = Context::new();
|
|
||||||
|
|
||||||
info!("ZMQ command sender initialized");
|
|
||||||
|
|
||||||
Ok(Self { context })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send a command to a specific agent
|
|
||||||
pub async fn send_command(&self, hostname: &str, command: AgentCommand) -> Result<()> {
|
|
||||||
// Create a new PUSH socket for this command (ZMQ best practice)
|
|
||||||
let socket = self.context.socket(SocketType::PUSH)?;
|
|
||||||
|
|
||||||
// Set socket options
|
|
||||||
socket.set_linger(1000)?; // Wait up to 1 second on close
|
|
||||||
socket.set_sndtimeo(5000)?; // 5 second send timeout
|
|
||||||
|
|
||||||
// Connect to agent's command port (6131)
|
|
||||||
let address = format!("tcp://{}:6131", hostname);
|
|
||||||
socket.connect(&address)?;
|
|
||||||
|
|
||||||
// Serialize command
|
|
||||||
let serialized = serde_json::to_vec(&command)?;
|
|
||||||
|
|
||||||
// Send command
|
|
||||||
socket.send(&serialized, 0)?;
|
|
||||||
|
|
||||||
info!("Sent command {:?} to agent at {}", command, hostname);
|
|
||||||
|
|
||||||
// Socket will be automatically closed when dropped
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -31,76 +31,15 @@ pub struct HostDetails {
|
|||||||
pub mac_address: Option<String>,
|
pub mac_address: Option<String>,
|
||||||
/// Primary IP address (local network)
|
/// Primary IP address (local network)
|
||||||
pub ip: Option<String>,
|
pub ip: Option<String>,
|
||||||
/// Tailscale network IP address
|
|
||||||
pub tailscale_ip: Option<String>,
|
|
||||||
/// Preferred connection type: "local", "tailscale", or "auto" (fallback)
|
|
||||||
#[serde(default = "default_connection_type")]
|
|
||||||
pub connection_type: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_connection_type() -> String {
|
|
||||||
"auto".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HostDetails {
|
impl HostDetails {
|
||||||
/// Get the preferred IP address for connection based on connection_type
|
/// Get the IP address for connection (uses ip field or hostname as fallback)
|
||||||
pub fn get_connection_ip(&self, hostname: &str) -> String {
|
pub fn get_connection_ip(&self, hostname: &str) -> String {
|
||||||
match self.connection_type.as_str() {
|
self.ip.as_ref().unwrap_or(&hostname.to_string()).clone()
|
||||||
"tailscale" => {
|
|
||||||
if let Some(ref ts_ip) = self.tailscale_ip {
|
|
||||||
ts_ip.clone()
|
|
||||||
} else {
|
|
||||||
// Fallback to local IP or hostname
|
|
||||||
self.ip.as_ref().unwrap_or(&hostname.to_string()).clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"local" => {
|
|
||||||
if let Some(ref local_ip) = self.ip {
|
|
||||||
local_ip.clone()
|
|
||||||
} else {
|
|
||||||
hostname.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"auto" | _ => {
|
|
||||||
// Try local first, then tailscale, then hostname
|
|
||||||
if let Some(ref local_ip) = self.ip {
|
|
||||||
local_ip.clone()
|
|
||||||
} else if let Some(ref ts_ip) = self.tailscale_ip {
|
|
||||||
ts_ip.clone()
|
|
||||||
} else {
|
|
||||||
hostname.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get fallback IP addresses for connection retry
|
|
||||||
pub fn get_fallback_ips(&self, hostname: &str) -> Vec<String> {
|
|
||||||
let mut fallbacks = Vec::new();
|
|
||||||
|
|
||||||
// Add all available IPs except the primary one
|
|
||||||
let primary = self.get_connection_ip(hostname);
|
|
||||||
|
|
||||||
// Add fallbacks in priority order: local first, then tailscale
|
|
||||||
if let Some(ref local_ip) = self.ip {
|
|
||||||
if local_ip != &primary {
|
|
||||||
fallbacks.push(local_ip.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref ts_ip) = self.tailscale_ip {
|
|
||||||
if ts_ip != &primary {
|
|
||||||
fallbacks.push(ts_ip.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always include hostname as final fallback if not already primary
|
|
||||||
if hostname != primary {
|
|
||||||
fallbacks.push(hostname.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
fallbacks
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// System configuration
|
/// System configuration
|
||||||
@@ -112,11 +51,12 @@ pub struct SystemConfig {
|
|||||||
pub nixos_config_api_key_file: Option<String>,
|
pub nixos_config_api_key_file: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// SSH configuration for rebuild operations
|
/// SSH configuration for rebuild and backup operations
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct SshConfig {
|
pub struct SshConfig {
|
||||||
pub rebuild_user: String,
|
pub rebuild_user: String,
|
||||||
pub rebuild_alias: String,
|
pub rebuild_alias: String,
|
||||||
|
pub backup_alias: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Service log file configuration per host
|
/// Service log file configuration per host
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ use widgets::{BackupWidget, ServicesWidget, SystemWidget, Widget};
|
|||||||
/// Commands that can be triggered from the UI
|
/// Commands that can be triggered from the UI
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum UiCommand {
|
pub enum UiCommand {
|
||||||
ServiceStart { hostname: String, service_name: String },
|
|
||||||
ServiceStop { hostname: String, service_name: String },
|
|
||||||
TriggerBackup { hostname: String },
|
TriggerBackup { hostname: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,20 +270,84 @@ impl TuiApp {
|
|||||||
.ok(); // Ignore errors, tmux will handle them
|
.ok(); // Ignore errors, tmux will handle them
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
KeyCode::Char('B') => {
|
||||||
|
// Backup command - works on any panel for current host
|
||||||
|
if let Some(hostname) = self.current_host.clone() {
|
||||||
|
let connection_ip = self.get_connection_ip(&hostname);
|
||||||
|
// Create command that shows logo, runs backup, and waits for user input
|
||||||
|
let logo_and_backup = format!(
|
||||||
|
"bash -c 'cat << \"EOF\"\nBackup Operation\nTarget: {} ({})\n\nEOF\nssh -tt {}@{} \"bash -ic {}\"\necho\necho \"========================================\"\necho \"Backup completed. Press any key to close...\"\necho \"========================================\"\nread -n 1 -s\nexit'",
|
||||||
|
hostname,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.backup_alias
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&logo_and_backup)
|
||||||
|
.spawn()
|
||||||
|
.ok(); // Ignore errors, tmux will handle them
|
||||||
|
}
|
||||||
|
}
|
||||||
KeyCode::Char('s') => {
|
KeyCode::Char('s') => {
|
||||||
// Service start command
|
// Service start command via SSH with progress display
|
||||||
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
|
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
|
||||||
if self.start_command(&hostname, CommandType::ServiceStart, service_name.clone()) {
|
// Start transition tracking for visual feedback
|
||||||
return Ok(Some(UiCommand::ServiceStart { hostname, service_name }));
|
self.start_command(&hostname, CommandType::ServiceStart, service_name.clone());
|
||||||
}
|
|
||||||
|
let connection_ip = self.get_connection_ip(&hostname);
|
||||||
|
let service_start_command = format!(
|
||||||
|
"bash -c 'cat << \"EOF\"\nService Start: {}.service\nTarget: {} ({})\n\nEOF\nssh -tt {}@{} \"sudo systemctl start {}.service && echo \\\"Service started successfully\\\" && sudo systemctl status {}.service --no-pager -l\"\necho\necho \"========================================\"\necho \"Operation completed. Press any key to close...\"\necho \"========================================\"\nread -n 1 -s\nexit'",
|
||||||
|
service_name,
|
||||||
|
hostname,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
service_name,
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&service_start_command)
|
||||||
|
.spawn()
|
||||||
|
.ok(); // Ignore errors, tmux will handle them
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KeyCode::Char('S') => {
|
KeyCode::Char('S') => {
|
||||||
// Service stop command
|
// Service stop command via SSH with progress display
|
||||||
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
|
if let (Some(service_name), Some(hostname)) = (self.get_selected_service(), self.current_host.clone()) {
|
||||||
if self.start_command(&hostname, CommandType::ServiceStop, service_name.clone()) {
|
// Start transition tracking for visual feedback
|
||||||
return Ok(Some(UiCommand::ServiceStop { hostname, service_name }));
|
self.start_command(&hostname, CommandType::ServiceStop, service_name.clone());
|
||||||
}
|
|
||||||
|
let connection_ip = self.get_connection_ip(&hostname);
|
||||||
|
let service_stop_command = format!(
|
||||||
|
"bash -c 'cat << \"EOF\"\nService Stop: {}.service\nTarget: {} ({})\n\nEOF\nssh -tt {}@{} \"sudo systemctl stop {}.service && echo \\\"Service stopped successfully\\\" && sudo systemctl status {}.service --no-pager -l\"\necho\necho \"========================================\"\necho \"Operation completed. Press any key to close...\"\necho \"========================================\"\nread -n 1 -s\nexit'",
|
||||||
|
service_name,
|
||||||
|
hostname,
|
||||||
|
connection_ip,
|
||||||
|
self.config.ssh.rebuild_user,
|
||||||
|
connection_ip,
|
||||||
|
service_name,
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
|
||||||
|
std::process::Command::new("tmux")
|
||||||
|
.arg("split-window")
|
||||||
|
.arg("-v")
|
||||||
|
.arg("-p")
|
||||||
|
.arg("30")
|
||||||
|
.arg(&service_stop_command)
|
||||||
|
.spawn()
|
||||||
|
.ok(); // Ignore errors, tmux will handle them
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KeyCode::Char('J') => {
|
KeyCode::Char('J') => {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "cm-dashboard-shared"
|
name = "cm-dashboard-shared"
|
||||||
version = "0.1.69"
|
version = "0.1.74"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
Reference in New Issue
Block a user