1#![allow(dead_code)] use std::collections::HashMap;
12use std::future::Future;
13use std::os::unix::process::ExitStatusExt;
14use std::process::ExitStatus;
15use std::process::Stdio;
16use std::sync::Arc;
17use std::sync::OnceLock;
18
19use async_trait::async_trait;
20use enum_as_inner::EnumAsInner;
21use hyperactor::ProcId;
22use hyperactor::WorldId;
23use hyperactor::channel;
24use hyperactor::channel::ChannelAddr;
25use hyperactor::channel::ChannelError;
26use hyperactor::channel::ChannelTransport;
27use hyperactor::channel::ChannelTx;
28use hyperactor::channel::Rx;
29use hyperactor::channel::Tx;
30use hyperactor::channel::TxStatus;
31use hyperactor::sync::flag;
32use hyperactor::sync::monitor;
33use ndslice::view::Extent;
34use nix::sys::signal;
35use nix::unistd::Pid;
36use serde::Deserialize;
37use serde::Serialize;
38use tokio::io;
39use tokio::process::Command;
40use tokio::sync::Mutex;
41use tokio::task::JoinSet;
42
43use super::Alloc;
44use super::AllocSpec;
45use super::Allocator;
46use super::AllocatorError;
47use super::ProcState;
48use super::ProcStopReason;
49use crate::assign::Ranks;
50use crate::bootstrap;
51use crate::bootstrap::Allocator2Process;
52use crate::bootstrap::MESH_ENABLE_LOG_FORWARDING;
53use crate::bootstrap::MESH_TAIL_LOG_LINES;
54use crate::bootstrap::Process2Allocator;
55use crate::bootstrap::Process2AllocatorMessage;
56use crate::logging::OutputTarget;
57use crate::logging::StreamFwder;
58use crate::shortuuid::ShortUuid;
59
60pub const CLIENT_TRACE_ID_LABEL: &str = "CLIENT_TRACE_ID";
61
62pub struct ProcessAllocator {
70 cmd: Arc<Mutex<Command>>,
71}
72
73impl ProcessAllocator {
74 pub fn new(cmd: Command) -> Self {
79 Self {
80 cmd: Arc::new(Mutex::new(cmd)),
81 }
82 }
83}
84
85#[async_trait]
86impl Allocator for ProcessAllocator {
87 type Alloc = ProcessAlloc;
88
89 #[hyperactor::instrument(fields(name = "process_allocate", monarch_client_trace_id = spec.constraints.match_labels.get(CLIENT_TRACE_ID_LABEL).cloned().unwrap_or_else(|| "".to_string())))]
90 async fn allocate(&mut self, spec: AllocSpec) -> Result<ProcessAlloc, AllocatorError> {
91 let (bootstrap_addr, rx) = channel::serve(ChannelAddr::any(ChannelTransport::Unix))
92 .map_err(anyhow::Error::from)?;
93
94 if spec.transport == ChannelTransport::Local {
95 return Err(AllocatorError::Other(anyhow::anyhow!(
96 "ProcessAllocator does not support local transport"
97 )));
98 }
99
100 let name = ShortUuid::generate();
101 let world_id = WorldId(name.to_string());
102 tracing::info!(
103 name = "ProcessAllocStatus",
104 alloc_name = %world_id,
105 addr = %bootstrap_addr,
106 status = "Allocated",
107 );
108 Ok(ProcessAlloc {
109 name: name.clone(),
110 world_id,
111 spec: spec.clone(),
112 bootstrap_addr,
113 rx,
114 active: HashMap::new(),
115 ranks: Ranks::new(spec.extent.num_ranks()),
116 created: Vec::new(),
117 cmd: Arc::clone(&self.cmd),
118 children: JoinSet::new(),
119 running: true,
120 failed: false,
121 client_context: ClientContext {
122 trace_id: spec
123 .constraints
124 .match_labels
125 .get(CLIENT_TRACE_ID_LABEL)
126 .cloned()
127 .unwrap_or_else(|| "".to_string()),
128 },
129 })
130 }
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize)]
136pub struct ClientContext {
137 pub trace_id: String,
139}
140
141pub struct ProcessAlloc {
143 name: ShortUuid,
144 world_id: WorldId, spec: AllocSpec,
146 bootstrap_addr: ChannelAddr,
147 rx: channel::ChannelRx<Process2Allocator>,
148 active: HashMap<usize, Child>,
149 ranks: Ranks<usize>,
151 created: Vec<ShortUuid>,
153 cmd: Arc<Mutex<Command>>,
154 children: JoinSet<(usize, ProcStopReason)>,
155 running: bool,
156 failed: bool,
157 client_context: ClientContext,
158}
159
160#[derive(EnumAsInner)]
161enum ChannelState {
162 NotConnected,
163 Connected(ChannelTx<Allocator2Process>),
164 Failed(ChannelError),
165}
166
167struct Child {
168 local_rank: usize,
169 channel: ChannelState,
170 group: monitor::Group,
171 exit_flag: Option<flag::Flag>,
172 stdout_fwder: Arc<std::sync::Mutex<Option<StreamFwder>>>,
173 stderr_fwder: Arc<std::sync::Mutex<Option<StreamFwder>>>,
174 stop_reason: Arc<OnceLock<ProcStopReason>>,
175 process_pid: Arc<std::sync::Mutex<Option<i32>>>,
176}
177
178impl Child {
179 fn monitored(
180 local_rank: usize,
181 mut process: tokio::process::Child,
182 log_channel: Option<ChannelAddr>,
183 tail_size: usize,
184 ) -> (Self, impl Future<Output = ProcStopReason>) {
185 let (group, handle) = monitor::group();
186 let (exit_flag, exit_guard) = flag::guarded();
187 let stop_reason = Arc::new(OnceLock::new());
188 let process_pid = Arc::new(std::sync::Mutex::new(process.id().map(|id| id as i32)));
189
190 let stdout_pipe = process.stdout.take();
209 let stderr_pipe = process.stderr.take();
210
211 let child = Self {
212 local_rank,
213 channel: ChannelState::NotConnected,
214 group,
215 exit_flag: Some(exit_flag),
216 stdout_fwder: Arc::new(std::sync::Mutex::new(None)),
217 stderr_fwder: Arc::new(std::sync::Mutex::new(None)),
218 stop_reason: Arc::clone(&stop_reason),
219 process_pid: process_pid.clone(),
220 };
221
222 let child_stdout_fwder = child.stdout_fwder.clone();
224 let child_stderr_fwder = child.stderr_fwder.clone();
225
226 if let Some(stdout) = stdout_pipe {
227 let pid = process.id().unwrap_or_default();
228 let stdout_fwder = child_stdout_fwder.clone();
229 let log_channel_clone = log_channel.clone();
230 *stdout_fwder.lock().expect("stdout_fwder mutex poisoned") = Some(StreamFwder::start(
231 stdout,
232 None, OutputTarget::Stdout,
234 tail_size,
235 log_channel_clone, pid,
237 local_rank,
238 ));
239 }
240
241 if let Some(stderr) = stderr_pipe {
242 let pid = process.id().unwrap_or_default();
243 let stderr_fwder = child_stderr_fwder.clone();
244 *stderr_fwder.lock().expect("stderr_fwder mutex poisoned") = Some(StreamFwder::start(
245 stderr,
246 None, OutputTarget::Stderr,
248 tail_size,
249 log_channel, pid,
251 local_rank,
252 ));
253 }
254
255 let monitor = async move {
256 let reason = tokio::select! {
257 _ = handle => {
258 Self::ensure_killed(process_pid);
259 Self::exit_status_to_reason(process.wait().await)
260 }
261 result = process.wait() => {
262 Self::exit_status_to_reason(result)
263 }
264 };
265 exit_guard.signal();
266
267 stop_reason.get_or_init(|| reason).clone()
268 };
269
270 (child, monitor)
271 }
272
273 fn ensure_killed(pid: Arc<std::sync::Mutex<Option<i32>>>) {
274 if let Some(pid) = pid.lock().unwrap().take() {
275 if let Err(e) = signal::kill(Pid::from_raw(pid), signal::SIGTERM) {
276 match e {
277 nix::errno::Errno::ESRCH => {
278 tracing::debug!("pid {} already exited", pid);
280 }
281 _ => {
282 tracing::error!("failed to kill {}: {}", pid, e);
283 }
284 }
285 }
286 }
287 }
288
289 fn exit_status_to_reason(result: io::Result<ExitStatus>) -> ProcStopReason {
290 match result {
291 Ok(status) if status.success() => ProcStopReason::Stopped,
292 Ok(status) => {
293 if let Some(signal) = status.signal() {
294 ProcStopReason::Killed(signal, status.core_dumped())
295 } else if let Some(code) = status.code() {
296 ProcStopReason::Exited(code, String::new())
297 } else {
298 ProcStopReason::Unknown
299 }
300 }
301 Err(e) => {
302 tracing::error!("error waiting for process: {}", e);
303 ProcStopReason::Unknown
304 }
305 }
306 }
307
308 #[hyperactor::instrument_infallible]
309 fn stop(&self, reason: ProcStopReason) {
310 let _ = self.stop_reason.set(reason); self.group.fail();
312 }
313
314 fn connect(&mut self, addr: ChannelAddr) -> bool {
315 if !self.channel.is_not_connected() {
316 return false;
317 }
318
319 match channel::dial(addr) {
320 Ok(channel) => {
321 let mut status = channel.status().clone();
322 self.channel = ChannelState::Connected(channel);
323 self.group.spawn(async move {
326 let _ = status
327 .wait_for(|status| matches!(status, TxStatus::Closed))
328 .await;
329 Result::<(), ()>::Err(())
330 });
331 }
332 Err(err) => {
333 self.channel = ChannelState::Failed(err);
334 self.stop(ProcStopReason::Watchdog);
335 }
336 };
337 true
338 }
339
340 fn spawn_watchdog(&mut self) {
341 let Some(exit_flag) = self.exit_flag.take() else {
342 tracing::info!("exit flag set, not spawning watchdog");
343 return;
344 };
345 let group = self.group.clone();
346 let stop_reason = self.stop_reason.clone();
347 tracing::info!("spawning watchdog");
348 tokio::spawn(async move {
349 let exit_timeout =
350 hyperactor::config::global::get(hyperactor::config::PROCESS_EXIT_TIMEOUT);
351 #[allow(clippy::disallowed_methods)]
352 if tokio::time::timeout(exit_timeout, exit_flag).await.is_err() {
353 tracing::info!("watchdog timeout, killing process");
354 let _ = stop_reason.set(ProcStopReason::Watchdog);
355 group.fail();
356 }
357 tracing::info!("Watchdog task exit");
358 });
359 }
360
361 #[hyperactor::instrument_infallible]
362 fn post(&mut self, message: Allocator2Process) {
363 if let ChannelState::Connected(channel) = &mut self.channel {
364 channel.post(message);
365 } else {
366 self.stop(ProcStopReason::Watchdog);
367 }
368 }
369
370 #[cfg(test)]
371 fn fail_group(&self) {
372 self.group.fail();
373 }
374
375 fn take_stream_monitors(&self) -> (Option<StreamFwder>, Option<StreamFwder>) {
376 let out = self
377 .stdout_fwder
378 .lock()
379 .expect("stdout_tailer mutex poisoned")
380 .take();
381 let err = self
382 .stderr_fwder
383 .lock()
384 .expect("stderr_tailer mutex poisoned")
385 .take();
386 (out, err)
387 }
388}
389
390impl Drop for Child {
391 fn drop(&mut self) {
392 Self::ensure_killed(self.process_pid.clone());
393 }
394}
395
396impl ProcessAlloc {
397 #[hyperactor::instrument_infallible]
403 fn stop(&mut self, proc_id: &ProcId, reason: ProcStopReason) -> Result<(), anyhow::Error> {
404 self.get_mut(proc_id)?.stop(reason);
405 Ok(())
406 }
407
408 fn get(&self, proc_id: &ProcId) -> Result<&Child, anyhow::Error> {
409 self.active.get(&self.index(proc_id)?).ok_or_else(|| {
410 anyhow::anyhow!(
411 "proc {} not currently active in alloc {}",
412 proc_id,
413 self.name
414 )
415 })
416 }
417
418 fn get_mut(&mut self, proc_id: &ProcId) -> Result<&mut Child, anyhow::Error> {
419 self.active.get_mut(&self.index(proc_id)?).ok_or_else(|| {
420 anyhow::anyhow!(
421 "proc {} not currently active in alloc {}",
422 &proc_id,
423 self.name
424 )
425 })
426 }
427
428 pub(crate) fn name(&self) -> &ShortUuid {
430 &self.name
431 }
432
433 fn index(&self, proc_id: &ProcId) -> Result<usize, anyhow::Error> {
434 anyhow::ensure!(
435 proc_id
436 .world_name()
437 .expect("proc must be ranked for allocation index")
438 .parse::<ShortUuid>()?
439 == self.name,
440 "proc {} does not belong to alloc {}",
441 proc_id,
442 self.name
443 );
444 Ok(proc_id
445 .rank()
446 .expect("proc must be ranked for allocation index"))
447 }
448
449 #[hyperactor::instrument_infallible]
450 async fn maybe_spawn(&mut self) -> Option<ProcState> {
451 if self.active.len() >= self.spec.extent.num_ranks() {
452 return None;
453 }
454 let mut cmd = self.cmd.lock().await;
455
456 let enable_forwarding = hyperactor::config::global::get(MESH_ENABLE_LOG_FORWARDING);
464 let tail_size = hyperactor::config::global::get(MESH_TAIL_LOG_LINES);
465 if enable_forwarding || tail_size > 0 {
466 cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
467 } else {
468 cmd.stdout(Stdio::inherit()).stderr(Stdio::inherit());
469 tracing::info!(
470 "child stdio NOT captured (forwarding/file_capture/tail all disabled); \
471 inheriting parent console"
472 );
473 }
474 let log_channel: Option<ChannelAddr> = None;
480
481 let index = self.created.len();
482 self.created.push(ShortUuid::generate());
483 let create_key = &self.created[index];
484
485 cmd.env(
486 bootstrap::BOOTSTRAP_ADDR_ENV,
487 self.bootstrap_addr.to_string(),
488 );
489 cmd.env(
490 bootstrap::CLIENT_TRACE_ID_ENV,
491 self.client_context.trace_id.as_str(),
492 );
493 cmd.env(bootstrap::BOOTSTRAP_INDEX_ENV, index.to_string());
494
495 tracing::debug!("spawning process {:?}", cmd);
496 match cmd.spawn() {
497 Err(err) => {
498 let message = format!(
500 "spawn {} index: {}, command: {:?}: {}",
501 create_key, index, cmd, err
502 );
503 tracing::error!(message);
504 self.failed = true;
505 Some(ProcState::Failed {
506 world_id: self.world_id.clone(),
507 description: message,
508 })
509 }
510 Ok(mut process) => {
511 let pid = process.id().unwrap_or(0);
512 match self.ranks.assign(index) {
513 Err(_index) => {
514 tracing::info!("could not assign rank to {}", create_key);
515 let _ = process.kill().await;
516 None
517 }
518 Ok(rank) => {
519 let (handle, monitor) =
520 Child::monitored(rank, process, log_channel, tail_size);
521
522 self.active.insert(index, handle);
525
526 self.children.spawn(async move { (index, monitor.await) });
528
529 let point = self.spec.extent.point_of_rank(rank).unwrap();
531 Some(ProcState::Created {
532 create_key: create_key.clone(),
533 point,
534 pid,
535 })
536 }
537 }
538 }
539 }
540 }
541
542 fn remove(&mut self, index: usize) -> Option<Child> {
543 self.ranks.unassign(index);
544 self.active.remove(&index)
545 }
546}
547
548#[async_trait]
549impl Alloc for ProcessAlloc {
550 #[hyperactor::instrument_infallible]
551 async fn next(&mut self) -> Option<ProcState> {
552 if !self.running && self.active.is_empty() {
553 return None;
554 }
555
556 loop {
557 if self.running
559 && !self.failed
560 && let state @ Some(_) = self.maybe_spawn().await
561 {
562 return state;
563 }
564
565 let transport = self.transport().clone();
566
567 tokio::select! {
568 Ok(Process2Allocator(index, message)) = self.rx.recv() => {
569 let child = match self.active.get_mut(&index) {
570 None => {
571 tracing::info!("message {:?} from zombie {}", message, index);
572 continue;
573 }
574 Some(child) => child,
575 };
576
577 match message {
578 Process2AllocatorMessage::Hello(addr) => {
579 if !child.connect(addr.clone()) {
580 tracing::error!("received multiple hellos from {}", index);
581 continue;
582 }
583
584 child.post(Allocator2Process::StartProc(
585 self.spec.proc_name.clone().map_or(
586 ProcId::Ranked(WorldId(self.name.to_string()), index),
587 |name| ProcId::Direct(addr.clone(), name)),
588 transport,
589 ));
590 }
591
592 Process2AllocatorMessage::StartedProc(proc_id, mesh_agent, addr) => {
593 break Some(ProcState::Running {
594 create_key: self.created[index].clone(),
595 proc_id,
596 mesh_agent,
597 addr,
598 });
599 }
600 Process2AllocatorMessage::Heartbeat => {
601 tracing::trace!("recv heartbeat from {index}");
602 }
603 }
604 },
605
606 Some(Ok((index, mut reason))) = self.children.join_next() => {
607 let stderr_content = if let Some(child) = self.remove(index) {
608 let mut stderr_lines = Vec::new();
609
610 let (stdout_mon, stderr_mon) = child.take_stream_monitors();
611
612 if let Some(stdout_monitor) = stdout_mon {
614 let (_lines, _result) = stdout_monitor.abort().await;
615 if let Err(e) = _result {
616 tracing::warn!("stdout monitor abort error: {}", e);
617 }
618 }
619
620 if let Some(stderr_monitor) = stderr_mon {
622 let (lines, result) = stderr_monitor.abort().await;
623 stderr_lines = lines;
624 if let Err(e) = result {
625 tracing::warn!("stderr monitor abort error: {}", e);
626 }
627 }
628
629 stderr_lines.join("\n")
630 } else {
631 String::new()
632 };
633
634 if let ProcStopReason::Exited(code, _) = &mut reason {
635 reason = ProcStopReason::Exited(*code, stderr_content);
636 }
637
638 tracing::info!("child stopped with ProcStopReason::{:?}", reason);
639
640 break Some(ProcState::Stopped {
641 create_key: self.created[index].clone(),
642 reason,
643 });
644 },
645 }
646 }
647 }
648
649 fn spec(&self) -> &AllocSpec {
650 &self.spec
651 }
652
653 fn extent(&self) -> &Extent {
654 &self.spec.extent
655 }
656
657 fn world_id(&self) -> &WorldId {
658 &self.world_id
659 }
660
661 async fn stop(&mut self) -> Result<(), AllocatorError> {
662 tracing::info!(
663 name = "ProcessAllocStatus",
664 alloc_name = %self.world_id(),
665 status = "Stopping",
666 );
667 for (_index, child) in self.active.iter_mut() {
672 child.post(Allocator2Process::StopAndExit(0));
673 child.spawn_watchdog();
674 }
675
676 self.running = false;
677 tracing::info!(
678 name = "ProcessAllocStatus",
679 alloc_name = %self.world_id(),
680 status = "Stop::Sent",
681 "StopAndExit was sent to allocators; check their logs for the stop progress."
682 );
683 Ok(())
684 }
685}
686
687impl Drop for ProcessAlloc {
688 fn drop(&mut self) {
689 tracing::info!(
690 name = "ProcessAllocStatus",
691 alloc_name = %self.world_id(),
692 status = "Dropped",
693 "dropping ProcessAlloc of name: {}, world id: {}",
694 self.name,
695 self.world_id
696 );
697 }
698}
699
700#[cfg(test)]
701mod tests {
702 use super::*;
703
704 #[cfg(fbcode_build)] crate::alloc_test_suite!(ProcessAllocator::new(Command::new(
706 crate::testresource::get("monarch/hyperactor_mesh/bootstrap")
707 )));
708
709 #[cfg(fbcode_build)]
710 #[tokio::test]
711 async fn test_sigterm_on_group_fail() {
712 let bootstrap_binary = crate::testresource::get("monarch/hyperactor_mesh/bootstrap");
713 let mut allocator = ProcessAllocator::new(Command::new(bootstrap_binary));
714
715 let mut alloc = allocator
716 .allocate(AllocSpec {
717 extent: ndslice::extent!(replica = 1),
718 constraints: Default::default(),
719 proc_name: None,
720 transport: ChannelTransport::Unix,
721 proc_allocation_mode: Default::default(),
722 })
723 .await
724 .unwrap();
725
726 let proc_id = {
727 loop {
728 match alloc.next().await {
729 Some(ProcState::Running { proc_id, .. }) => {
730 break proc_id;
731 }
732 Some(ProcState::Failed { description, .. }) => {
733 panic!("Process allocation failed: {}", description);
734 }
735 Some(_other) => {}
736 None => {
737 panic!("Allocation ended unexpectedly");
738 }
739 }
740 }
741 };
742
743 if let Some(child) = alloc.active.get(
744 &proc_id
745 .rank()
746 .expect("proc must be ranked for allocation lookup"),
747 ) {
748 child.fail_group();
749 }
750
751 assert!(matches!(
752 alloc.next().await,
753 Some(ProcState::Stopped {
754 reason: ProcStopReason::Killed(15, false),
755 ..
756 })
757 ));
758 }
759}