Replace complex SystemRebuild with simple SSH + tmux popup approach
All checks were successful
Build and Release / build-and-release (push) Successful in 2m6s

- Remove all SystemRebuild command infrastructure from agent and dashboard
- Replace with direct tmux popup execution: ssh {user}@{host} {alias}
- Add configurable SSH user and rebuild alias in dashboard config
- Eliminate agent process crashes during rebuilds
- Simplify architecture by removing ZMQ command streaming complexity
- Clean up all related dead code and fix compilation warnings

Benefits:
- Process isolation: rebuild runs independently via SSH
- Crash resilience: agent/dashboard can restart without affecting rebuilds
- Configuration flexibility: SSH user and alias configurable per deployment
- Operational simplicity: standard tmux popup interface
This commit is contained in:
2025-10-27 14:25:45 +01:00
parent ac5d2d4db5
commit e61a845965
9 changed files with 73 additions and 425 deletions

View File

@@ -22,7 +22,7 @@ pub struct Dashboard {
terminal: Option<Terminal<CrosstermBackend<io::Stdout>>>,
headless: bool,
initial_commands_sent: std::collections::HashSet<String>,
config: DashboardConfig,
_config: DashboardConfig,
}
impl Dashboard {
@@ -91,7 +91,7 @@ impl Dashboard {
(None, None)
} else {
// Initialize TUI app
let tui_app = TuiApp::new();
let tui_app = TuiApp::new(config.clone());
// Setup terminal
if let Err(e) = enable_raw_mode() {
@@ -133,7 +133,7 @@ impl Dashboard {
terminal,
headless,
initial_commands_sent: std::collections::HashSet::new(),
config,
_config: config,
})
}
@@ -245,24 +245,10 @@ impl Dashboard {
// Update TUI with new hosts and metrics (only if not headless)
if let Some(ref mut tui_app) = self.tui_app {
let mut connected_hosts = self
let connected_hosts = self
.metric_store
.get_connected_hosts(Duration::from_secs(30));
// Add hosts that are rebuilding but may be temporarily disconnected
// Use extended timeout (5 minutes) for rebuilding hosts
let rebuilding_hosts = self
.metric_store
.get_connected_hosts(Duration::from_secs(300));
for host in rebuilding_hosts {
if !connected_hosts.contains(&host) {
// Check if this host is rebuilding in the UI
if tui_app.is_host_rebuilding(&host) {
connected_hosts.push(host);
}
}
}
tui_app.update_hosts(connected_hosts);
tui_app.update_metrics(&self.metric_store);
@@ -290,14 +276,14 @@ impl Dashboard {
// Render TUI (only if not headless)
if !self.headless {
if let (Some(ref mut terminal), Some(ref mut tui_app)) =
(&mut self.terminal, &mut self.tui_app)
{
if let Err(e) = terminal.draw(|frame| {
tui_app.render(frame, &self.metric_store);
}) {
error!("Error rendering TUI: {}", e);
break;
if let Some(ref mut terminal) = self.terminal {
if let Some(ref mut tui_app) = self.tui_app {
if let Err(e) = terminal.draw(|frame| {
tui_app.render(frame, &self.metric_store);
}) {
error!("Error rendering TUI: {}", e);
break;
}
}
}
}
@@ -337,16 +323,6 @@ impl Dashboard {
};
self.zmq_command_sender.send_command(&hostname, agent_command).await?;
}
UiCommand::SystemRebuild { hostname } => {
info!("Sending system rebuild command to {}", hostname);
let agent_command = AgentCommand::SystemRebuild {
git_url: self.config.system.nixos_config_git_url.clone(),
git_branch: self.config.system.nixos_config_branch.clone(),
working_dir: self.config.system.nixos_config_working_dir.clone(),
api_key_file: self.config.system.nixos_config_api_key_file.clone(),
};
self.zmq_command_sender.send_command(&hostname, agent_command).await?;
}
UiCommand::TriggerBackup { hostname } => {
info!("Trigger backup requested for {}", hostname);
// TODO: Implement backup trigger command