monarch_hyperactor/
runtime.rs

1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9use std::future::Future;
10use std::pin::Pin;
11use std::sync::OnceLock;
12use std::sync::RwLock;
13use std::sync::RwLockReadGuard;
14use std::sync::atomic::AtomicUsize;
15use std::sync::atomic::Ordering;
16use std::time::Duration;
17
18use anyhow::Result;
19use hyperactor::Proc;
20use hyperactor::channel::ChannelAddr;
21use hyperactor::channel::ChannelTransport;
22use hyperactor::mailbox::BoxedMailboxSender;
23use hyperactor::mailbox::PanickingMailboxSender;
24use hyperactor::reference;
25use once_cell::sync::Lazy;
26use once_cell::unsync::OnceCell as UnsyncOnceCell;
27use pyo3::PyResult;
28use pyo3::Python;
29use pyo3::exceptions::PyRuntimeError;
30use pyo3::prelude::*;
31use pyo3::types::PyAnyMethods;
32use pyo3_async_runtimes::TaskLocals;
33use tokio::task;
34
35// this must be a RwLock and only return a guard for reading the runtime.
36// Otherwise multiple threads can deadlock fighting for the Runtime object if they hold it
37// while blocking on something.
38static INSTANCE: std::sync::LazyLock<RwLock<Option<tokio::runtime::Runtime>>> =
39    std::sync::LazyLock::new(|| RwLock::new(None));
40
41pub fn get_tokio_runtime<'l>() -> std::sync::MappedRwLockReadGuard<'l, tokio::runtime::Runtime> {
42    // First try to get a read lock and check if runtime exists
43    {
44        let read_guard = INSTANCE.read().unwrap();
45        if read_guard.is_some() {
46            return RwLockReadGuard::map(read_guard, |lock: &Option<tokio::runtime::Runtime>| {
47                lock.as_ref().unwrap()
48            });
49        }
50        // Drop the read lock by letting it go out of scope
51    }
52
53    // Runtime doesn't exist, upgrade to write lock to initialize
54    let mut write_guard = INSTANCE.write().unwrap();
55    if write_guard.is_none() {
56        *write_guard = Some(
57            tokio::runtime::Builder::new_multi_thread()
58                .thread_name_fn(|| {
59                    static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
60                    let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
61                    format!("monarch-pytokio-worker-{}", id)
62                })
63                .enable_all()
64                .build()
65                .unwrap(),
66        );
67    }
68
69    // Downgrade write lock to read lock and return the reference
70    let read_guard = std::sync::RwLockWriteGuard::downgrade(write_guard);
71    RwLockReadGuard::map(read_guard, |lock: &Option<tokio::runtime::Runtime>| {
72        lock.as_ref().unwrap()
73    })
74}
75
76#[pyfunction]
77pub fn shutdown_tokio_runtime(py: Python<'_>) {
78    // Called from Python's atexit, which holds the GIL. Release it so tokio
79    // worker threads can acquire it to complete their Python work.
80    py.detach(|| {
81        if let Some(x) = INSTANCE.write().unwrap().take() {
82            x.shutdown_timeout(Duration::from_secs(1));
83        }
84    });
85}
86
87/// A global runtime proc used by this crate.
88pub(crate) fn get_proc_runtime() -> &'static Proc {
89    static RUNTIME_PROC: OnceLock<Proc> = OnceLock::new();
90    RUNTIME_PROC.get_or_init(|| {
91        let addr = ChannelAddr::any(ChannelTransport::Local);
92        let proc_id = reference::ProcId::unique(addr, "monarch_hyperactor_runtime");
93        Proc::configured(proc_id, BoxedMailboxSender::new(PanickingMailboxSender))
94    })
95}
96
97/// Stores the native thread ID of the main Python thread.
98/// This is lazily initialized on first call to `is_main_thread`.
99static MAIN_THREAD_NATIVE_ID: OnceLock<i64> = OnceLock::new();
100
101/// Returns the native thread ID of the main Python thread.
102/// On first call, looks it up via `threading.main_thread().native_id`.
103fn get_main_thread_native_id() -> i64 {
104    *MAIN_THREAD_NATIVE_ID.get_or_init(|| {
105        Python::attach(|py| {
106            let threading = py.import("threading").expect("failed to import threading");
107            let main_thread = threading
108                .call_method0("main_thread")
109                .expect("failed to get main_thread");
110            main_thread
111                .getattr("native_id")
112                .expect("failed to get native_id")
113                .extract::<i64>()
114                .expect("native_id is not an i64")
115        })
116    })
117}
118
119/// Returns the current thread's native ID in a cross-platform way.
120#[cfg(target_os = "linux")]
121fn get_current_thread_id() -> i64 {
122    nix::unistd::gettid().as_raw() as i64
123}
124
125/// Returns the current thread's native ID in a cross-platform way.
126#[cfg(target_os = "macos")]
127fn get_current_thread_id() -> i64 {
128    let mut tid: u64 = 0;
129    // pthread_threadid_np with thread=0 (null pthread_t) gets the current thread's ID.
130    unsafe {
131        let ret = libc::pthread_threadid_np(0, &mut tid);
132        debug_assert_eq!(
133            ret, 0,
134            "pthread_threadid_np failed with error code: {}",
135            ret
136        );
137    }
138    // macOS thread IDs are u64 so we need to convert to i64.
139    debug_assert!(tid <= i64::MAX as u64, "thread ID {} exceeds i64::MAX", tid);
140    tid as i64
141}
142
143/// Returns the current thread's native ID in a cross-platform way.
144#[cfg(not(any(target_os = "linux", target_os = "macos")))]
145compile_error!("get_current_thread_id is only implemented for Linux and macOS");
146
147/// Returns true if the current thread is the main Python thread.
148/// Compares the current thread's native ID against the main Python thread's native ID.
149pub fn is_main_thread() -> bool {
150    let current_tid = get_current_thread_id();
151    current_tid == get_main_thread_native_id()
152}
153
154pub fn initialize(py: Python) -> Result<()> {
155    let atexit = py.import("atexit")?;
156    let shutdown_fn = wrap_pyfunction!(shutdown_tokio_runtime, py)?;
157    atexit.call_method1("register", (shutdown_fn,))?;
158    Ok(())
159}
160
161/// Block the current thread on a future, but make sure to check for signals
162/// originating from the Python signal handler.
163///
164/// Python's signal handler just sets a flag that it expects the Python
165/// interpreter to handle later via a call to `PyErr_CheckSignals`. When we
166/// enter into potentially long-running native code, we need to make sure to be
167/// checking for signals frequently, otherwise we will ignore them. This will
168/// manifest as `ctrl-C` not doing anything.
169///
170/// One additional wrinkle is that `PyErr_CheckSignals` only works on the main
171/// Python thread; if it's called on any other thread it silently does nothing.
172/// So, we check if we're on the main thread by comparing native thread IDs.
173pub fn signal_safe_block_on<F>(py: Python, future: F) -> PyResult<F::Output>
174where
175    F: Future + Send + 'static,
176    F::Output: Send + 'static,
177{
178    let runtime = get_tokio_runtime();
179    // Release the GIL, otherwise the work in `future` that tries to acquire the
180    // GIL on another thread may deadlock.
181    py.detach(|| {
182        if is_main_thread() {
183            // Spawn the future onto the tokio runtime
184            let handle = runtime.spawn(future);
185            // Block the current thread on waiting for *either* the future to
186            // complete or a signal.
187            runtime.block_on(async {
188                tokio::select! {
189                    result = handle => result.map_err(|e| PyRuntimeError::new_err(format!("JoinErr: {:?}", e))),
190                    signal = async {
191                        let sleep_for = std::time::Duration::from_millis(100);
192                        loop {
193                            // Acquiring the GIL in a loop is sad, hopefully once
194                            // every 100ms is fine.
195                            Python::attach(|py| py.check_signals())?;
196                            tokio::time::sleep(sleep_for).await;
197                        }
198                    } => signal
199                }
200            })
201        } else {
202            // If we're not on the main thread, we can just block it. We've
203            // released the GIL, so the Python main thread will continue on, and
204            // `PyErr_CheckSignals` doesn't do anything anyway.
205            Ok(runtime.block_on(future))
206        }
207    })
208}
209
210/// A test function that sleeps indefinitely in a loop.
211/// This is used for testing signal handling in signal_safe_block_on.
212/// The function will sleep forever until interrupted by a signal.
213#[pyfunction]
214pub fn sleep_indefinitely_for_unit_tests(py: Python) -> PyResult<()> {
215    // Create a future that sleeps indefinitely
216    let future = async {
217        loop {
218            tracing::info!("idef sleeping for 100ms");
219            tokio::time::sleep(Duration::from_millis(100)).await;
220        }
221    };
222
223    // Use signal_safe_block_on to run the future, which should make it
224    // interruptible by signals like SIGINT
225    signal_safe_block_on(py, future)
226}
227
228/// Initialize the runtime module and expose Python functions
229pub fn register_python_bindings(runtime_mod: &Bound<'_, PyModule>) -> PyResult<()> {
230    let sleep_indefinitely_fn =
231        wrap_pyfunction!(sleep_indefinitely_for_unit_tests, runtime_mod.py())?;
232    sleep_indefinitely_fn.setattr(
233        "__module__",
234        "monarch._rust_bindings.monarch_hyperactor.runtime",
235    )?;
236    runtime_mod.add_function(sleep_indefinitely_fn)?;
237    Ok(())
238}
239
240struct SimpleRuntime;
241
242impl pyo3_async_runtimes::generic::Runtime for SimpleRuntime {
243    type JoinError = task::JoinError;
244    type JoinHandle = task::JoinHandle<()>;
245
246    fn spawn<F>(fut: F) -> Self::JoinHandle
247    where
248        F: Future<Output = ()> + Send + 'static,
249    {
250        get_tokio_runtime().spawn(async move {
251            fut.await;
252        })
253    }
254}
255
256tokio::task_local! {
257    static TASK_LOCALS: UnsyncOnceCell<TaskLocals>;
258}
259
260impl pyo3_async_runtimes::generic::ContextExt for SimpleRuntime {
261    fn scope<F, R>(locals: TaskLocals, fut: F) -> Pin<Box<dyn Future<Output = R> + Send>>
262    where
263        F: Future<Output = R> + Send + 'static,
264    {
265        let cell = UnsyncOnceCell::new();
266        cell.set(locals).unwrap();
267
268        Box::pin(TASK_LOCALS.scope(cell, fut))
269    }
270
271    fn get_task_locals() -> Option<TaskLocals> {
272        TASK_LOCALS
273            .try_with(|c| {
274                c.get()
275                    .map(|locals| monarch_with_gil_blocking(|py| locals.clone_ref(py)))
276            })
277            .unwrap_or_default()
278    }
279}
280
281pub fn future_into_py<F, T>(py: Python, fut: F) -> PyResult<Bound<PyAny>>
282where
283    F: Future<Output = PyResult<T>> + Send + 'static,
284    T: for<'py> IntoPyObject<'py>,
285{
286    pyo3_async_runtimes::generic::future_into_py::<SimpleRuntime, F, T>(py, fut)
287}
288
289/// Global lock to serialize GIL acquisition from Rust threads in async contexts.
290///
291/// Under high concurrency, many async tasks can simultaneously try to acquire the GIL.
292/// Each call blocks the current tokio worker thread, which can cause runtime starvation
293/// and apparent deadlocks (nothing else gets polled).
294///
295/// This wrapper serializes GIL acquisition among callers that opt in, so at most one
296/// tokio task is blocked in `Python::attach` at a time, improving fairness under
297/// contention.
298///
299/// Note: this does not globally prevent other sync code from calling `Python::attach`
300/// directly. Use `monarch_with_gil` or `monarch_with_gil_blocking` for Python interaction
301/// that occurs on async hot paths.
302static GIL_LOCK: Lazy<tokio::sync::Mutex<()>> = Lazy::new(|| tokio::sync::Mutex::new(()));
303
304// Thread-local depth counter for re-entrant GIL acquisition.
305//
306// This tracks when we're already inside a `monarch_with_gil` or `monarch_with_gil_blocking`
307// call. On re-entry (e.g., when Python calls back into Rust while we're already executing
308// under `Python::attach`), we bypass the `GIL_LOCK` to avoid deadlocks.
309//
310// Without this, the following scenario would deadlock:
311// 1. Rust async code calls `monarch_with_gil`, acquires `GIL_LOCK`
312// 2. Inside the closure, Python code is executed
313// 3. Python code calls back into Rust (e.g., via a PyO3 callback)
314// 4. The callback tries to call `monarch_with_gil` again
315// 5. DEADLOCK: waiting for `GIL_LOCK` which is held by the same logical call chain
316thread_local! {
317    static GIL_DEPTH: std::cell::Cell<u32> = const { std::cell::Cell::new(0) };
318}
319
320/// RAII guard that decrements the GIL depth counter when dropped.
321struct GilDepthGuard {
322    prev_depth: u32,
323}
324
325impl Drop for GilDepthGuard {
326    fn drop(&mut self) {
327        GIL_DEPTH.with(|d| d.set(self.prev_depth));
328    }
329}
330
331/// Increments the GIL depth counter and returns a guard that restores it on drop.
332fn increment_gil_depth() -> GilDepthGuard {
333    let prev_depth = GIL_DEPTH.with(|d| {
334        let current = d.get();
335        d.set(current + 1);
336        current
337    });
338    GilDepthGuard { prev_depth }
339}
340
341/// Returns true if we're already inside a `monarch_with_gil` call (re-entrant).
342fn is_reentrant() -> bool {
343    GIL_DEPTH.with(|d| d.get() > 0)
344}
345
346/// Async wrapper around `Python::attach` intended for async call sites.
347///
348/// Why: under high concurrency, many async tasks can simultaneously
349/// try to acquire the GIL. Each call blocks the current tokio worker
350/// thread, which can cause runtime starvation / apparent deadlocks
351/// (nothing else gets polled).
352///
353/// This wrapper serializes GIL acquisition among async callers so at most one tokio
354/// task is blocked in `Python::attach` at a time, preventing runtime starvation
355/// under GIL contention.
356///
357/// Note: this does not globally prevent other sync code from calling
358/// `Python::attach` directly. Use this wrapper for Python
359/// interaction that occurs on async hot paths.
360///
361/// # Re-entrancy Safety
362///
363/// This function is re-entrant safe. If called while already inside a `monarch_with_gil`
364/// or `monarch_with_gil_blocking` call (e.g., from a Python→Rust callback), it bypasses
365/// the `GIL_LOCK` to avoid deadlocks.
366///
367/// # Example
368/// ```ignore
369/// let result = monarch_with_gil(|py| {
370///     // Do work with Python GIL
371///     Ok(42)
372/// })
373/// .await?;
374/// ```
375pub async fn monarch_with_gil<F, R>(f: F) -> R
376where
377    F: for<'py> FnOnce(Python<'py>) -> R + Send,
378{
379    // If we're already inside a monarch_with_gil call (re-entrant), skip the lock
380    // to avoid deadlock from Python→Rust callbacks
381    if is_reentrant() {
382        let _depth_guard = increment_gil_depth();
383        return Python::attach(f);
384    }
385
386    // Not re-entrant: acquire the serialization lock
387    let _lock_guard = GIL_LOCK.lock().await;
388    let _depth_guard = increment_gil_depth();
389    Python::attach(f)
390}
391
392/// Blocking wrapper around `Python::with_gil` for use in synchronous contexts.
393///
394/// Unlike `monarch_with_gil`, this function does NOT use the `GIL_LOCK` async mutex.
395/// Since it is blocking call, it simply acquires the GIL and releases it when the
396/// closure returns.
397///
398/// # Example
399/// ```ignore
400/// let result = monarch_with_gil_blocking(|py| {
401///     // Do work with Python GIL
402///     Ok(42)
403/// })?;
404/// ```
405pub fn monarch_with_gil_blocking<F, R>(f: F) -> R
406where
407    F: for<'py> FnOnce(Python<'py>) -> R + Send,
408{
409    let _depth_guard = increment_gil_depth();
410    Python::attach(f)
411}