hyperactor_mesh/alloc/
process.rs

1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9#![allow(dead_code)] // some things currently used only in tests
10
11use std::collections::HashMap;
12use std::future::Future;
13use std::os::unix::process::ExitStatusExt;
14use std::process::ExitStatus;
15use std::process::Stdio;
16use std::sync::Arc;
17use std::sync::OnceLock;
18
19use async_trait::async_trait;
20use enum_as_inner::EnumAsInner;
21use hyperactor::ProcId;
22use hyperactor::WorldId;
23use hyperactor::channel;
24use hyperactor::channel::ChannelAddr;
25use hyperactor::channel::ChannelError;
26use hyperactor::channel::ChannelTransport;
27use hyperactor::channel::ChannelTx;
28use hyperactor::channel::Rx;
29use hyperactor::channel::Tx;
30use hyperactor::channel::TxStatus;
31use hyperactor::sync::flag;
32use hyperactor::sync::monitor;
33use ndslice::view::Extent;
34use nix::sys::signal;
35use nix::unistd::Pid;
36use serde::Deserialize;
37use serde::Serialize;
38use tokio::io;
39use tokio::process::Command;
40use tokio::sync::Mutex;
41use tokio::task::JoinSet;
42
43use super::Alloc;
44use super::AllocSpec;
45use super::Allocator;
46use super::AllocatorError;
47use super::ProcState;
48use super::ProcStopReason;
49use crate::assign::Ranks;
50use crate::bootstrap;
51use crate::bootstrap::Allocator2Process;
52use crate::bootstrap::MESH_ENABLE_LOG_FORWARDING;
53use crate::bootstrap::MESH_TAIL_LOG_LINES;
54use crate::bootstrap::Process2Allocator;
55use crate::bootstrap::Process2AllocatorMessage;
56use crate::logging::OutputTarget;
57use crate::logging::StreamFwder;
58use crate::shortuuid::ShortUuid;
59
60pub const CLIENT_TRACE_ID_LABEL: &str = "CLIENT_TRACE_ID";
61
62/// An allocator that allocates procs by executing managed (local)
63/// processes. ProcessAllocator is configured with a [`Command`] (template)
64/// to spawn external processes. These processes must invoke [`hyperactor_mesh::bootstrap`] or
65/// [`hyperactor_mesh::bootstrap_or_die`], which is responsible for coordinating
66/// with the allocator.
67///
68/// The process allocator tees the stdout and stderr of each proc to the parent process.
69pub struct ProcessAllocator {
70    cmd: Arc<Mutex<Command>>,
71}
72
73impl ProcessAllocator {
74    /// Create a new allocator using the provided command (template).
75    /// The command is used to spawn child processes that host procs.
76    /// The binary should yield control to [`hyperactor_mesh::bootstrap`]
77    /// or [`hyperactor_mesh::bootstrap_or_die`] or after initialization.
78    pub fn new(cmd: Command) -> Self {
79        Self {
80            cmd: Arc::new(Mutex::new(cmd)),
81        }
82    }
83}
84
85#[async_trait]
86impl Allocator for ProcessAllocator {
87    type Alloc = ProcessAlloc;
88
89    #[hyperactor::instrument(fields(name = "process_allocate", monarch_client_trace_id = spec.constraints.match_labels.get(CLIENT_TRACE_ID_LABEL).cloned().unwrap_or_else(|| "".to_string())))]
90    async fn allocate(&mut self, spec: AllocSpec) -> Result<ProcessAlloc, AllocatorError> {
91        let (bootstrap_addr, rx) = channel::serve(ChannelAddr::any(ChannelTransport::Unix))
92            .map_err(anyhow::Error::from)?;
93
94        if spec.transport == ChannelTransport::Local {
95            return Err(AllocatorError::Other(anyhow::anyhow!(
96                "ProcessAllocator does not support local transport"
97            )));
98        }
99
100        let name = ShortUuid::generate();
101        let world_id = WorldId(name.to_string());
102        tracing::info!(
103            name = "ProcessAllocStatus",
104            alloc_name = %world_id,
105            addr = %bootstrap_addr,
106            status = "Allocated",
107        );
108        Ok(ProcessAlloc {
109            name: name.clone(),
110            world_id,
111            spec: spec.clone(),
112            bootstrap_addr,
113            rx,
114            active: HashMap::new(),
115            ranks: Ranks::new(spec.extent.num_ranks()),
116            created: Vec::new(),
117            cmd: Arc::clone(&self.cmd),
118            children: JoinSet::new(),
119            running: true,
120            failed: false,
121            client_context: ClientContext {
122                trace_id: spec
123                    .constraints
124                    .match_labels
125                    .get(CLIENT_TRACE_ID_LABEL)
126                    .cloned()
127                    .unwrap_or_else(|| "".to_string()),
128            },
129        })
130    }
131}
132
133// Client Context is saved in ProcessAlloc, and is also passed in
134// the RemoteProcessAllocator's Allocate method
135#[derive(Debug, Clone, Serialize, Deserialize)]
136pub struct ClientContext {
137    /// Trace ID for correlating logs across client and worker processes
138    pub trace_id: String,
139}
140
141/// An allocation produced by [`ProcessAllocator`].
142pub struct ProcessAlloc {
143    name: ShortUuid,
144    world_id: WorldId, // to provide storage
145    spec: AllocSpec,
146    bootstrap_addr: ChannelAddr,
147    rx: channel::ChannelRx<Process2Allocator>,
148    active: HashMap<usize, Child>,
149    // Maps process index to its rank.
150    ranks: Ranks<usize>,
151    // Created processes by index.
152    created: Vec<ShortUuid>,
153    cmd: Arc<Mutex<Command>>,
154    children: JoinSet<(usize, ProcStopReason)>,
155    running: bool,
156    failed: bool,
157    client_context: ClientContext,
158}
159
160#[derive(EnumAsInner)]
161enum ChannelState {
162    NotConnected,
163    Connected(ChannelTx<Allocator2Process>),
164    Failed(ChannelError),
165}
166
167struct Child {
168    local_rank: usize,
169    channel: ChannelState,
170    group: monitor::Group,
171    exit_flag: Option<flag::Flag>,
172    stdout_fwder: Arc<std::sync::Mutex<Option<StreamFwder>>>,
173    stderr_fwder: Arc<std::sync::Mutex<Option<StreamFwder>>>,
174    stop_reason: Arc<OnceLock<ProcStopReason>>,
175    process_pid: Arc<std::sync::Mutex<Option<i32>>>,
176}
177
178impl Child {
179    fn monitored(
180        local_rank: usize,
181        mut process: tokio::process::Child,
182        log_channel: Option<ChannelAddr>,
183        tail_size: usize,
184    ) -> (Self, impl Future<Output = ProcStopReason>) {
185        let (group, handle) = monitor::group();
186        let (exit_flag, exit_guard) = flag::guarded();
187        let stop_reason = Arc::new(OnceLock::new());
188        let process_pid = Arc::new(std::sync::Mutex::new(process.id().map(|id| id as i32)));
189
190        // Take ownership of the child's stdio pipes.
191        //
192        // NOTE:
193        // - These Options are `Some(...)` **only if** the parent
194        //   spawned the child with
195        //   `stdout(Stdio::piped())/stderr(Stdio::piped())`, which
196        //   the caller decides via its `need_stdio` calculation:
197        //     need_stdio = enable_forwarding || tail_size > 0
198        // - If `need_stdio == false` the parent used
199        //   `Stdio::inherit()` and both will be `None`. In that case
200        //   we intentionally *skip* installing `StreamFwder`s and
201        //   the child writes directly to the parent's console with
202        //   no interception, no tail.
203        // - Even when we do install `StreamFwder`s, if `log_channel
204        //   == None` (forwarding disabled) we still mirror to the
205        //   parent console and keep an in-memory tail, but we don't
206        //   send anything over the mesh log channel. (In the v0 path
207        //   there's also no `FileAppender`.)
208        let stdout_pipe = process.stdout.take();
209        let stderr_pipe = process.stderr.take();
210
211        let child = Self {
212            local_rank,
213            channel: ChannelState::NotConnected,
214            group,
215            exit_flag: Some(exit_flag),
216            stdout_fwder: Arc::new(std::sync::Mutex::new(None)),
217            stderr_fwder: Arc::new(std::sync::Mutex::new(None)),
218            stop_reason: Arc::clone(&stop_reason),
219            process_pid: process_pid.clone(),
220        };
221
222        // Set up logging monitors asynchronously without blocking process creation
223        let child_stdout_fwder = child.stdout_fwder.clone();
224        let child_stderr_fwder = child.stderr_fwder.clone();
225
226        if let Some(stdout) = stdout_pipe {
227            let pid = process.id().unwrap_or_default();
228            let stdout_fwder = child_stdout_fwder.clone();
229            let log_channel_clone = log_channel.clone();
230            *stdout_fwder.lock().expect("stdout_fwder mutex poisoned") = Some(StreamFwder::start(
231                stdout,
232                None, // No file appender in v0.
233                OutputTarget::Stdout,
234                tail_size,
235                log_channel_clone, // Optional channel address
236                pid,
237                local_rank,
238            ));
239        }
240
241        if let Some(stderr) = stderr_pipe {
242            let pid = process.id().unwrap_or_default();
243            let stderr_fwder = child_stderr_fwder.clone();
244            *stderr_fwder.lock().expect("stderr_fwder mutex poisoned") = Some(StreamFwder::start(
245                stderr,
246                None, // No file appender in v0.
247                OutputTarget::Stderr,
248                tail_size,
249                log_channel, // Optional channel address
250                pid,
251                local_rank,
252            ));
253        }
254
255        let monitor = async move {
256            let reason = tokio::select! {
257                _ = handle => {
258                    Self::ensure_killed(process_pid);
259                    Self::exit_status_to_reason(process.wait().await)
260                }
261                result = process.wait() => {
262                    Self::exit_status_to_reason(result)
263                }
264            };
265            exit_guard.signal();
266
267            stop_reason.get_or_init(|| reason).clone()
268        };
269
270        (child, monitor)
271    }
272
273    fn ensure_killed(pid: Arc<std::sync::Mutex<Option<i32>>>) {
274        if let Some(pid) = pid.lock().unwrap().take() {
275            if let Err(e) = signal::kill(Pid::from_raw(pid), signal::SIGTERM) {
276                match e {
277                    nix::errno::Errno::ESRCH => {
278                        // Process already gone.
279                        tracing::debug!("pid {} already exited", pid);
280                    }
281                    _ => {
282                        tracing::error!("failed to kill {}: {}", pid, e);
283                    }
284                }
285            }
286        }
287    }
288
289    fn exit_status_to_reason(result: io::Result<ExitStatus>) -> ProcStopReason {
290        match result {
291            Ok(status) if status.success() => ProcStopReason::Stopped,
292            Ok(status) => {
293                if let Some(signal) = status.signal() {
294                    ProcStopReason::Killed(signal, status.core_dumped())
295                } else if let Some(code) = status.code() {
296                    ProcStopReason::Exited(code, String::new())
297                } else {
298                    ProcStopReason::Unknown
299                }
300            }
301            Err(e) => {
302                tracing::error!("error waiting for process: {}", e);
303                ProcStopReason::Unknown
304            }
305        }
306    }
307
308    #[hyperactor::instrument_infallible]
309    fn stop(&self, reason: ProcStopReason) {
310        let _ = self.stop_reason.set(reason); // first stop wins
311        self.group.fail();
312    }
313
314    fn connect(&mut self, addr: ChannelAddr) -> bool {
315        if !self.channel.is_not_connected() {
316            return false;
317        }
318
319        match channel::dial(addr) {
320            Ok(channel) => {
321                let mut status = channel.status().clone();
322                self.channel = ChannelState::Connected(channel);
323                // Monitor the channel, killing the process if it becomes unavailable
324                // (fails keepalive).
325                self.group.spawn(async move {
326                    let _ = status
327                        .wait_for(|status| matches!(status, TxStatus::Closed))
328                        .await;
329                    Result::<(), ()>::Err(())
330                });
331            }
332            Err(err) => {
333                self.channel = ChannelState::Failed(err);
334                self.stop(ProcStopReason::Watchdog);
335            }
336        };
337        true
338    }
339
340    fn spawn_watchdog(&mut self) {
341        let Some(exit_flag) = self.exit_flag.take() else {
342            tracing::info!("exit flag set, not spawning watchdog");
343            return;
344        };
345        let group = self.group.clone();
346        let stop_reason = self.stop_reason.clone();
347        tracing::info!("spawning watchdog");
348        tokio::spawn(async move {
349            let exit_timeout =
350                hyperactor_config::global::get(hyperactor::config::PROCESS_EXIT_TIMEOUT);
351            #[allow(clippy::disallowed_methods)]
352            if tokio::time::timeout(exit_timeout, exit_flag).await.is_err() {
353                tracing::info!("watchdog timeout, killing process");
354                let _ = stop_reason.set(ProcStopReason::Watchdog);
355                group.fail();
356            }
357            tracing::info!("Watchdog task exit");
358        });
359    }
360
361    #[hyperactor::instrument_infallible]
362    fn post(&mut self, message: Allocator2Process) {
363        if let ChannelState::Connected(channel) = &mut self.channel {
364            channel.post(message);
365        } else {
366            self.stop(ProcStopReason::Watchdog);
367        }
368    }
369
370    #[cfg(test)]
371    fn fail_group(&self) {
372        self.group.fail();
373    }
374
375    fn take_stream_monitors(&self) -> (Option<StreamFwder>, Option<StreamFwder>) {
376        let out = self
377            .stdout_fwder
378            .lock()
379            .expect("stdout_tailer mutex poisoned")
380            .take();
381        let err = self
382            .stderr_fwder
383            .lock()
384            .expect("stderr_tailer mutex poisoned")
385            .take();
386        (out, err)
387    }
388}
389
390impl Drop for Child {
391    fn drop(&mut self) {
392        Self::ensure_killed(self.process_pid.clone());
393    }
394}
395
396impl ProcessAlloc {
397    // Also implement exit (for graceful exit)
398
399    // Currently procs and processes are 1:1, so this just fully exits
400    // the process.
401
402    #[hyperactor::instrument_infallible]
403    fn stop(&mut self, proc_id: &ProcId, reason: ProcStopReason) -> Result<(), anyhow::Error> {
404        self.get_mut(proc_id)?.stop(reason);
405        Ok(())
406    }
407
408    fn get(&self, proc_id: &ProcId) -> Result<&Child, anyhow::Error> {
409        self.active.get(&self.index(proc_id)?).ok_or_else(|| {
410            anyhow::anyhow!(
411                "proc {} not currently active in alloc {}",
412                proc_id,
413                self.name
414            )
415        })
416    }
417
418    fn get_mut(&mut self, proc_id: &ProcId) -> Result<&mut Child, anyhow::Error> {
419        self.active.get_mut(&self.index(proc_id)?).ok_or_else(|| {
420            anyhow::anyhow!(
421                "proc {} not currently active in alloc {}",
422                &proc_id,
423                self.name
424            )
425        })
426    }
427
428    /// The "world name" assigned to this alloc.
429    pub(crate) fn name(&self) -> &ShortUuid {
430        &self.name
431    }
432
433    fn index(&self, proc_id: &ProcId) -> Result<usize, anyhow::Error> {
434        anyhow::ensure!(
435            proc_id
436                .world_name()
437                .expect("proc must be ranked for allocation index")
438                .parse::<ShortUuid>()?
439                == self.name,
440            "proc {} does not belong to alloc {}",
441            proc_id,
442            self.name
443        );
444        Ok(proc_id
445            .rank()
446            .expect("proc must be ranked for allocation index"))
447    }
448
449    #[hyperactor::instrument_infallible]
450    async fn maybe_spawn(&mut self) -> Option<ProcState> {
451        if self.active.len() >= self.spec.extent.num_ranks() {
452            return None;
453        }
454        let mut cmd = self.cmd.lock().await;
455
456        // In the case `MESH_ENABLE_LOG_FORWARDING` is set it's
457        // probable the client execution context is a notebook. In
458        // that case, for output from this process's children to
459        // reach the client, we **must** use pipes and copy output
460        // from child to parent (**`Stdio::inherit`** does not work!).
461        // So, this variable is being used as a proxy for "use pipes"
462        // here.
463        let enable_forwarding = hyperactor_config::global::get(MESH_ENABLE_LOG_FORWARDING);
464        let tail_size = hyperactor_config::global::get(MESH_TAIL_LOG_LINES);
465        if enable_forwarding || tail_size > 0 {
466            cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
467        } else {
468            cmd.stdout(Stdio::inherit()).stderr(Stdio::inherit());
469            tracing::info!(
470                "child stdio NOT captured (forwarding/file_capture/tail all disabled); \
471                 inheriting parent console"
472            );
473        }
474        // Regardless of the value of `MESH_ENABLE_LOG_FORWARDING`
475        // (c.f. `enable_forwarding`), we do not do log forwarding on
476        // these procs. This is because, now that we are on the v1
477        // path, the only procs we spawn via this code path are those
478        // to support `HostMeshAgent`s.
479        let log_channel: Option<ChannelAddr> = None;
480
481        let index = self.created.len();
482        self.created.push(ShortUuid::generate());
483        let create_key = &self.created[index];
484
485        // Capture config and pass to child via Bootstrap::V0ProcMesh
486        let client_config = hyperactor_config::global::attrs();
487        let bootstrap = bootstrap::Bootstrap::V0ProcMesh {
488            config: Some(client_config),
489        };
490        bootstrap.to_env(&mut cmd);
491
492        cmd.env(
493            bootstrap::BOOTSTRAP_ADDR_ENV,
494            self.bootstrap_addr.to_string(),
495        );
496        cmd.env(
497            bootstrap::CLIENT_TRACE_ID_ENV,
498            self.client_context.trace_id.as_str(),
499        );
500        cmd.env(bootstrap::BOOTSTRAP_INDEX_ENV, index.to_string());
501
502        cmd.env(
503            "HYPERACTOR_PROCESS_NAME",
504            format!(
505                "host rank:{} @{}",
506                index,
507                hostname::get()
508                    .unwrap_or_else(|_| "unknown_host".into())
509                    .into_string()
510                    .unwrap_or("unknown_host".to_string())
511            ),
512        );
513
514        tracing::debug!("spawning process {:?}", cmd);
515        match cmd.spawn() {
516            Err(err) => {
517                // Likely retry won't help here so fail permanently.
518                let message = format!(
519                    "spawn {} index: {}, command: {:?}: {}",
520                    create_key, index, cmd, err
521                );
522                tracing::error!(message);
523                self.failed = true;
524                Some(ProcState::Failed {
525                    world_id: self.world_id.clone(),
526                    description: message,
527                })
528            }
529            Ok(mut process) => {
530                let pid = process.id().unwrap_or(0);
531                match self.ranks.assign(index) {
532                    Err(_index) => {
533                        tracing::info!("could not assign rank to {}", create_key);
534                        let _ = process.kill().await;
535                        None
536                    }
537                    Ok(rank) => {
538                        let (handle, monitor) =
539                            Child::monitored(rank, process, log_channel, tail_size);
540
541                        // Insert into active map BEFORE spawning the monitor task
542                        // This prevents a race where the monitor completes before insertion
543                        self.active.insert(index, handle);
544
545                        // Now spawn the monitor task
546                        self.children.spawn(async move { (index, monitor.await) });
547
548                        // Adjust for shape slice offset for non-zero shapes (sub-shapes).
549                        let point = self.spec.extent.point_of_rank(rank).unwrap();
550                        Some(ProcState::Created {
551                            create_key: create_key.clone(),
552                            point,
553                            pid,
554                        })
555                    }
556                }
557            }
558        }
559    }
560
561    fn remove(&mut self, index: usize) -> Option<Child> {
562        self.ranks.unassign(index);
563        self.active.remove(&index)
564    }
565}
566
567#[async_trait]
568impl Alloc for ProcessAlloc {
569    #[hyperactor::instrument_infallible]
570    async fn next(&mut self) -> Option<ProcState> {
571        if !self.running && self.active.is_empty() {
572            return None;
573        }
574
575        loop {
576            // Do no allocate new processes if we are in failed state.
577            if self.running
578                && !self.failed
579                && let state @ Some(_) = self.maybe_spawn().await
580            {
581                return state;
582            }
583
584            let transport = self.transport().clone();
585
586            tokio::select! {
587                Ok(Process2Allocator(index, message)) = self.rx.recv() => {
588                    let child = match self.active.get_mut(&index) {
589                        None => {
590                            tracing::info!("message {:?} from zombie {}", message, index);
591                            continue;
592                        }
593                        Some(child) => child,
594                    };
595
596                    match message {
597                        Process2AllocatorMessage::Hello(addr) => {
598                            if !child.connect(addr.clone()) {
599                                tracing::error!("received multiple hellos from {}", index);
600                                continue;
601                            }
602
603                            child.post(Allocator2Process::StartProc(
604                                self.spec.proc_name.clone().map_or(
605                                    ProcId::Ranked(WorldId(self.name.to_string()), index),
606                                    |name| ProcId::Direct(addr.clone(), name)),
607                                transport,
608                            ));
609                        }
610
611                        Process2AllocatorMessage::StartedProc(proc_id, mesh_agent, addr) => {
612                            break Some(ProcState::Running {
613                                create_key: self.created[index].clone(),
614                                proc_id,
615                                mesh_agent,
616                                addr,
617                            });
618                        }
619                        Process2AllocatorMessage::Heartbeat => {
620                            tracing::trace!("recv heartbeat from {index}");
621                        }
622                    }
623                },
624
625                Some(Ok((index, mut reason))) = self.children.join_next() => {
626                    let stderr_content = if let Some(child) = self.remove(index) {
627                        let mut stderr_lines = Vec::new();
628
629                        let (stdout_mon, stderr_mon) = child.take_stream_monitors();
630
631                        // Clean up stdout monitor
632                        if let Some(stdout_monitor) = stdout_mon {
633                            let (_lines, _result) = stdout_monitor.abort().await;
634                            if let Err(e) = _result {
635                                tracing::warn!("stdout monitor abort error: {}", e);
636                            }
637                        }
638
639                        // Clean up stderr monitor and get stderr content for logging
640                        if let Some(stderr_monitor) = stderr_mon {
641                            let (lines, result) = stderr_monitor.abort().await;
642                            stderr_lines = lines;
643                            if let Err(e) = result {
644                                tracing::warn!("stderr monitor abort error: {}", e);
645                            }
646                        }
647
648                        stderr_lines.join("\n")
649                    } else {
650                        String::new()
651                    };
652
653                    if let ProcStopReason::Exited(code, _) = &mut reason {
654                        reason = ProcStopReason::Exited(*code, stderr_content);
655                    }
656
657                    tracing::info!("child stopped with ProcStopReason::{:?}", reason);
658
659                    break Some(ProcState::Stopped {
660                        create_key: self.created[index].clone(),
661                        reason,
662                    });
663                },
664            }
665        }
666    }
667
668    fn spec(&self) -> &AllocSpec {
669        &self.spec
670    }
671
672    fn extent(&self) -> &Extent {
673        &self.spec.extent
674    }
675
676    fn world_id(&self) -> &WorldId {
677        &self.world_id
678    }
679
680    async fn stop(&mut self) -> Result<(), AllocatorError> {
681        tracing::info!(
682            name = "ProcessAllocStatus",
683            alloc_name = %self.world_id(),
684            status = "Stopping",
685        );
686        // We rely on the teardown here, and that the process should
687        // exit on its own. We should have a hard timeout here as well,
688        // so that we never rely on the system functioning correctly
689        // for liveness.
690        for (_index, child) in self.active.iter_mut() {
691            child.post(Allocator2Process::StopAndExit(0));
692            child.spawn_watchdog();
693        }
694
695        self.running = false;
696        tracing::info!(
697            name = "ProcessAllocStatus",
698            alloc_name = %self.world_id(),
699            status = "Stop::Sent",
700            "StopAndExit was sent to allocators; check their logs for the stop progress."
701        );
702        Ok(())
703    }
704}
705
706impl Drop for ProcessAlloc {
707    fn drop(&mut self) {
708        tracing::info!(
709            name = "ProcessAllocStatus",
710            alloc_name = %self.world_id(),
711            status = "Dropped",
712            "dropping ProcessAlloc of name: {}, world id: {}",
713            self.name,
714            self.world_id
715        );
716    }
717}
718
719#[cfg(test)]
720mod tests {
721    use super::*;
722
723    #[cfg(fbcode_build)] // we use an external binary, produced by buck
724    crate::alloc_test_suite!(ProcessAllocator::new(Command::new(
725        crate::testresource::get("monarch/hyperactor_mesh/bootstrap")
726    )));
727
728    #[cfg(fbcode_build)]
729    #[tokio::test]
730    async fn test_sigterm_on_group_fail() {
731        let bootstrap_binary = crate::testresource::get("monarch/hyperactor_mesh/bootstrap");
732        let mut allocator = ProcessAllocator::new(Command::new(bootstrap_binary));
733
734        let mut alloc = allocator
735            .allocate(AllocSpec {
736                extent: ndslice::extent!(replica = 1),
737                constraints: Default::default(),
738                proc_name: None,
739                transport: ChannelTransport::Unix,
740                proc_allocation_mode: Default::default(),
741            })
742            .await
743            .unwrap();
744
745        let proc_id = {
746            loop {
747                match alloc.next().await {
748                    Some(ProcState::Running { proc_id, .. }) => {
749                        break proc_id;
750                    }
751                    Some(ProcState::Failed { description, .. }) => {
752                        panic!("Process allocation failed: {}", description);
753                    }
754                    Some(_other) => {}
755                    None => {
756                        panic!("Allocation ended unexpectedly");
757                    }
758                }
759            }
760        };
761
762        if let Some(child) = alloc.active.get(
763            &proc_id
764                .rank()
765                .expect("proc must be ranked for allocation lookup"),
766        ) {
767            child.fail_group();
768        }
769
770        assert!(matches!(
771            alloc.next().await,
772            Some(ProcState::Stopped {
773                reason: ProcStopReason::Killed(15, false),
774                ..
775            })
776        ));
777    }
778}