miri/shims/native_lib/trace/
parent.rs

1use std::sync::atomic::{AtomicPtr, AtomicUsize};
2
3use ipc_channel::ipc;
4use nix::sys::{ptrace, signal, wait};
5use nix::unistd;
6
7use super::CALLBACK_STACK_SIZE;
8use super::messages::{Confirmation, StartFfiInfo, TraceRequest};
9use crate::shims::native_lib::{AccessEvent, AccessRange, MemEvents};
10
11/// The flags to use when calling `waitid()`.
12const WAIT_FLAGS: wait::WaitPidFlag =
13    wait::WaitPidFlag::WUNTRACED.union(wait::WaitPidFlag::WEXITED);
14
15/// The default word size on a given platform, in bytes.
16#[cfg(target_arch = "x86")]
17const ARCH_WORD_SIZE: usize = 4;
18#[cfg(target_arch = "x86_64")]
19const ARCH_WORD_SIZE: usize = 8;
20
21// x86 max instruction length is 15 bytes:
22// https://www.intel.com/content/www/us/en/developer/articles/technical/intel-sdm.html
23// See vol. 3B section 24.25.
24const ARCH_MAX_INSTR_SIZE: usize = 15;
25
26/// The address of the page set to be edited, initialised to a sentinel null
27/// pointer.
28static PAGE_ADDR: AtomicPtr<u8> = AtomicPtr::new(std::ptr::null_mut());
29/// The host pagesize, initialised to a sentinel zero value.
30pub static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
31/// How many consecutive pages to unprotect. 1 by default, unlikely to be set
32/// higher than 2.
33static PAGE_COUNT: AtomicUsize = AtomicUsize::new(1);
34
35/// Allows us to get common arguments from the `user_regs_t` across architectures.
36/// Normally this would land us ABI hell, but thankfully all of our usecases
37/// consist of functions with a small number of register-sized integer arguments.
38/// See <https://man7.org/linux/man-pages/man2/syscall.2.html> for sources.
39trait ArchIndependentRegs {
40    /// Gets the address of the instruction pointer.
41    fn ip(&self) -> usize;
42    /// Set the instruction pointer; remember to also set the stack pointer, or
43    /// else the stack might get messed up!
44    fn set_ip(&mut self, ip: usize);
45    /// Set the stack pointer, ideally to a zeroed-out area.
46    fn set_sp(&mut self, sp: usize);
47}
48
49// It's fine / desirable behaviour for values to wrap here, we care about just
50// preserving the bit pattern.
51#[cfg(target_arch = "x86_64")]
52#[rustfmt::skip]
53impl ArchIndependentRegs for libc::user_regs_struct {
54    #[inline]
55    fn ip(&self) -> usize { self.rip.try_into().unwrap() }
56    #[inline]
57    fn set_ip(&mut self, ip: usize) { self.rip = ip.try_into().unwrap() }
58    #[inline]
59    fn set_sp(&mut self, sp: usize) { self.rsp = sp.try_into().unwrap() }
60}
61
62#[cfg(target_arch = "x86")]
63#[rustfmt::skip]
64impl ArchIndependentRegs for libc::user_regs_struct {
65    #[inline]
66    fn ip(&self) -> usize { self.eip.cast_unsigned().try_into().unwrap() }
67    #[inline]
68    fn set_ip(&mut self, ip: usize) { self.eip = ip.cast_signed().try_into().unwrap() }
69    #[inline]
70    fn set_sp(&mut self, sp: usize) { self.esp = sp.cast_signed().try_into().unwrap() }
71}
72
73/// A unified event representing something happening on the child process. Wraps
74/// `nix`'s `WaitStatus` and our custom signals so it can all be done with one
75/// `match` statement.
76pub enum ExecEvent {
77    /// Child process requests that we begin monitoring it.
78    Start(StartFfiInfo),
79    /// Child requests that we stop monitoring and pass over the events we
80    /// detected.
81    End,
82    /// The child process with the specified pid was stopped by the given signal.
83    Status(unistd::Pid, signal::Signal),
84    /// The child process with the specified pid entered or existed a syscall.
85    Syscall(unistd::Pid),
86    /// A child process exited or was killed; if we have a return code, it is
87    /// specified.
88    Died(Option<i32>),
89}
90
91/// A listener for the FFI start info channel along with relevant state.
92pub struct ChildListener {
93    /// The matching channel for the child's `Supervisor` struct.
94    message_rx: ipc::IpcReceiver<TraceRequest>,
95    /// ...
96    confirm_tx: ipc::IpcSender<Confirmation>,
97    /// Whether an FFI call is currently ongoing.
98    attached: bool,
99    /// If `Some`, overrides the return code with the given value.
100    override_retcode: Option<i32>,
101    /// Last code obtained from a child exiting.
102    last_code: Option<i32>,
103}
104
105impl ChildListener {
106    pub fn new(
107        message_rx: ipc::IpcReceiver<TraceRequest>,
108        confirm_tx: ipc::IpcSender<Confirmation>,
109    ) -> Self {
110        Self { message_rx, confirm_tx, attached: false, override_retcode: None, last_code: None }
111    }
112}
113
114impl Iterator for ChildListener {
115    type Item = ExecEvent;
116
117    // Allows us to monitor the child process by just iterating over the listener.
118    // NB: This should never return None!
119    fn next(&mut self) -> Option<Self::Item> {
120        // Do not block if the child has nothing to report for `waitid`.
121        let opts = WAIT_FLAGS | wait::WaitPidFlag::WNOHANG;
122        loop {
123            // Listen to any child, not just the main one. Important if we want
124            // to allow the C code to fork further, along with being a bit of
125            // defensive programming since Linux sometimes assigns threads of
126            // the same process different PIDs with unpredictable rules...
127            match wait::waitid(wait::Id::All, opts) {
128                Ok(stat) =>
129                    match stat {
130                        // Child exited normally with a specific code set.
131                        wait::WaitStatus::Exited(_, code) => self.last_code = Some(code),
132                        // Child was killed by a signal, without giving a code.
133                        wait::WaitStatus::Signaled(_, _, _) => self.last_code = None,
134                        // Child entered or exited a syscall.
135                        wait::WaitStatus::PtraceSyscall(pid) =>
136                            if self.attached {
137                                return Some(ExecEvent::Syscall(pid));
138                            },
139                        // Child with the given pid was stopped by the given signal.
140                        // It's somewhat unclear when which of these two is returned;
141                        // we just treat them the same.
142                        wait::WaitStatus::Stopped(pid, signal)
143                        | wait::WaitStatus::PtraceEvent(pid, signal, _) =>
144                            if self.attached {
145                                // This is our end-of-FFI signal!
146                                if signal == signal::SIGUSR1 {
147                                    self.attached = false;
148                                    return Some(ExecEvent::End);
149                                } else {
150                                    return Some(ExecEvent::Status(pid, signal));
151                                }
152                            } else {
153                                // Just pass along the signal.
154                                ptrace::cont(pid, signal).unwrap();
155                            },
156                        _ => (),
157                    },
158                // This case should only trigger when all children died.
159                Err(_) => return Some(ExecEvent::Died(self.override_retcode.or(self.last_code))),
160            }
161
162            // Similarly, do a non-blocking poll of the IPC channel.
163            if let Ok(req) = self.message_rx.try_recv() {
164                match req {
165                    TraceRequest::StartFfi(info) =>
166                    // Should never trigger - but better to panic explicitly than deadlock!
167                        if self.attached {
168                            panic!("Attempting to begin FFI multiple times!");
169                        } else {
170                            self.attached = true;
171                            return Some(ExecEvent::Start(info));
172                        },
173                    TraceRequest::OverrideRetcode(code) => {
174                        self.override_retcode = Some(code);
175                        self.confirm_tx.send(Confirmation).unwrap();
176                    }
177                }
178            }
179
180            // Not ideal, but doing anything else might sacrifice performance.
181            std::thread::yield_now();
182        }
183    }
184}
185
186/// An error came up while waiting on the child process to do something.
187/// It likely died, with this return code if we have one.
188#[derive(Debug)]
189pub struct ExecEnd(pub Option<i32>);
190
191/// Whether to call `ptrace::cont()` immediately. Used exclusively by `wait_for_signal`.
192enum InitialCont {
193    Yes,
194    No,
195}
196
197/// This is the main loop of the supervisor process. It runs in a separate
198/// process from the rest of Miri (but because we fork, addresses for anything
199/// created before the fork - like statics - are the same).
200pub fn sv_loop(
201    listener: ChildListener,
202    init_pid: unistd::Pid,
203    event_tx: ipc::IpcSender<MemEvents>,
204    confirm_tx: ipc::IpcSender<Confirmation>,
205) -> Result<!, ExecEnd> {
206    // Get the pagesize set and make sure it isn't still on the zero sentinel value!
207    let page_size = PAGE_SIZE.load(std::sync::atomic::Ordering::Relaxed);
208    assert_ne!(page_size, 0);
209
210    // Things that we return to the child process.
211    let mut acc_events = Vec::new();
212
213    // Memory allocated for the MiriMachine.
214    let mut ch_pages = Vec::new();
215    let mut ch_stack = None;
216
217    // An instance of the Capstone disassembler, so we don't spawn one on every access.
218    let cs = get_disasm();
219
220    // The pid of the last process we interacted with, used by default if we don't have a
221    // reason to use a different one.
222    let mut curr_pid = init_pid;
223
224    // There's an initial sigstop we need to deal with.
225    wait_for_signal(Some(curr_pid), signal::SIGSTOP, InitialCont::No)?;
226    ptrace::cont(curr_pid, None).unwrap();
227
228    for evt in listener {
229        match evt {
230            // Child started ffi, so prep memory.
231            ExecEvent::Start(ch_info) => {
232                // All the pages that the child process is "allowed to" access.
233                ch_pages = ch_info.page_ptrs;
234                // And the temporary callback stack it allocated for us to use later.
235                ch_stack = Some(ch_info.stack_ptr);
236
237                // We received the signal and are no longer in the main listener loop,
238                // so we can let the child move on to the end of the ffi prep where it will
239                // raise a SIGSTOP. We need it to be signal-stopped *and waited for* in
240                // order to do most ptrace operations!
241                confirm_tx.send(Confirmation).unwrap();
242                // We can't trust simply calling `Pid::this()` in the child process to give the right
243                // PID for us, so we get it this way.
244                curr_pid = wait_for_signal(None, signal::SIGSTOP, InitialCont::No).unwrap();
245                // Continue until next syscall.
246                ptrace::syscall(curr_pid, None).unwrap();
247            }
248            // Child wants to end tracing.
249            ExecEvent::End => {
250                // Hand over the access info we traced.
251                event_tx.send(MemEvents { acc_events }).unwrap();
252                // And reset our values.
253                acc_events = Vec::new();
254                ch_stack = None;
255
256                // No need to monitor syscalls anymore, they'd just be ignored.
257                ptrace::cont(curr_pid, None).unwrap();
258            }
259            // Child process was stopped by a signal
260            ExecEvent::Status(pid, signal) =>
261                match signal {
262                    // If it was a segfault, check if it was an artificial one
263                    // caused by it trying to access the MiriMachine memory.
264                    signal::SIGSEGV =>
265                        handle_segfault(
266                            pid,
267                            &ch_pages,
268                            ch_stack.unwrap(),
269                            page_size,
270                            &cs,
271                            &mut acc_events,
272                        )?,
273                    // Something weird happened.
274                    _ => {
275                        eprintln!("Process unexpectedly got {signal}; continuing...");
276                        // In case we're not tracing
277                        if ptrace::syscall(pid, None).is_err() {
278                            // If *this* fails too, something really weird happened
279                            // and it's probably best to just panic.
280                            signal::kill(pid, signal::SIGCONT).unwrap();
281                        }
282                    }
283                },
284            // Child entered or exited a syscall. For now we ignore this and just continue.
285            ExecEvent::Syscall(pid) => {
286                ptrace::syscall(pid, None).unwrap();
287            }
288            ExecEvent::Died(code) => {
289                return Err(ExecEnd(code));
290            }
291        }
292    }
293
294    unreachable!()
295}
296
297/// Spawns a Capstone disassembler for the host architecture.
298#[rustfmt::skip]
299fn get_disasm() -> capstone::Capstone {
300    use capstone::prelude::*;
301    let cs_pre = Capstone::new();
302    {
303        #[cfg(target_arch = "x86_64")]
304        {cs_pre.x86().mode(arch::x86::ArchMode::Mode64)}
305        #[cfg(target_arch = "x86")]
306        {cs_pre.x86().mode(arch::x86::ArchMode::Mode32)}
307    }
308    .detail(true)
309    .build()
310    .unwrap()
311}
312
313/// Waits for `wait_signal`. If `init_cont`, it will first do a `ptrace::cont`.
314/// We want to avoid that in some cases, like at the beginning of FFI.
315///
316/// If `pid` is `None`, only one wait will be done and `init_cont` should be false.
317fn wait_for_signal(
318    pid: Option<unistd::Pid>,
319    wait_signal: signal::Signal,
320    init_cont: InitialCont,
321) -> Result<unistd::Pid, ExecEnd> {
322    if matches!(init_cont, InitialCont::Yes) {
323        ptrace::cont(pid.unwrap(), None).unwrap();
324    }
325    // Repeatedly call `waitid` until we get the signal we want, or the process dies.
326    loop {
327        let wait_id = match pid {
328            Some(pid) => wait::Id::Pid(pid),
329            None => wait::Id::All,
330        };
331        let stat = wait::waitid(wait_id, WAIT_FLAGS).map_err(|_| ExecEnd(None))?;
332        let (signal, pid) = match stat {
333            // Report the cause of death, if we know it.
334            wait::WaitStatus::Exited(_, code) => {
335                return Err(ExecEnd(Some(code)));
336            }
337            wait::WaitStatus::Signaled(_, _, _) => return Err(ExecEnd(None)),
338            wait::WaitStatus::Stopped(pid, signal)
339            | wait::WaitStatus::PtraceEvent(pid, signal, _) => (signal, pid),
340            // This covers PtraceSyscall and variants that are impossible with
341            // the flags set (e.g. WaitStatus::StillAlive).
342            _ => {
343                ptrace::cont(pid.unwrap(), None).unwrap();
344                continue;
345            }
346        };
347        if signal == wait_signal {
348            return Ok(pid);
349        } else {
350            ptrace::cont(pid, signal).map_err(|_| ExecEnd(None))?;
351        }
352    }
353}
354
355/// Add the memory events from `op` being executed while there is a memory access at `addr` to
356/// `acc_events`. Return whether this was a memory operand.
357fn capstone_find_events(
358    addr: usize,
359    op: &capstone::arch::ArchOperand,
360    acc_events: &mut Vec<AccessEvent>,
361) -> bool {
362    use capstone::prelude::*;
363    match op {
364        #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
365        arch::ArchOperand::X86Operand(x86_operand) => {
366            match x86_operand.op_type {
367                // We only care about memory accesses
368                arch::x86::X86OperandType::Mem(_) => {
369                    let push = AccessRange { addr, size: x86_operand.size.into() };
370                    // It's called a "RegAccessType" but it also applies to memory
371                    let acc_ty = x86_operand.access.unwrap();
372                    // The same instruction might do both reads and writes, so potentially add both.
373                    // We do not know the order in which they happened, but writing and then reading
374                    // makes little sense so we put the read first. That is also the more
375                    // conservative choice.
376                    if acc_ty.is_readable() {
377                        acc_events.push(AccessEvent::Read(push.clone()));
378                    }
379                    if acc_ty.is_writable() {
380                        // FIXME: This could be made certain; either determine all cases where
381                        // only reads happen, or have an intermediate mempr_* function to first
382                        // map the page(s) as readonly and check if a segfault occurred.
383
384                        // Per https://docs.rs/iced-x86/latest/iced_x86/enum.OpAccess.html,
385                        // we know that the possible access types are Read, CondRead, Write,
386                        // CondWrite, ReadWrite, and ReadCondWrite. Since we got a segfault
387                        // we know some kind of access happened so Cond{Read, Write}s are
388                        // certain reads and writes; the only uncertainty is with an RW op
389                        // as it might be a ReadCondWrite with the write condition unmet.
390                        acc_events.push(AccessEvent::Write(push, !acc_ty.is_readable()));
391                    }
392
393                    return true;
394                }
395                _ => (),
396            }
397        }
398        // FIXME: arm64
399        _ => unimplemented!(),
400    }
401
402    false
403}
404
405/// Extract the events from the given instruction.
406fn capstone_disassemble(
407    instr: &[u8],
408    addr: usize,
409    cs: &capstone::Capstone,
410    acc_events: &mut Vec<AccessEvent>,
411) -> capstone::CsResult<()> {
412    // The arch_detail is what we care about, but it relies on these temporaries
413    // that we can't drop. 0x1000 is the default base address for Captsone, and
414    // we're expecting 1 instruction.
415    let insns = cs.disasm_count(instr, 0x1000, 1)?;
416    let ins_detail = cs.insn_detail(&insns[0])?;
417    let arch_detail = ins_detail.arch_detail();
418
419    let mut found_mem_op = false;
420
421    for op in arch_detail.operands() {
422        if capstone_find_events(addr, &op, acc_events) {
423            if found_mem_op {
424                panic!("more than one memory operand found; we don't know which one accessed what");
425            }
426            found_mem_op = true;
427        }
428    }
429
430    Ok(())
431}
432
433/// Grabs the access that caused a segfault and logs it down if it's to our memory,
434/// or kills the child and returns the appropriate error otherwise.
435fn handle_segfault(
436    pid: unistd::Pid,
437    ch_pages: &[usize],
438    ch_stack: usize,
439    page_size: usize,
440    cs: &capstone::Capstone,
441    acc_events: &mut Vec<AccessEvent>,
442) -> Result<(), ExecEnd> {
443    // Get information on what caused the segfault. This contains the address
444    // that triggered it.
445    let siginfo = ptrace::getsiginfo(pid).unwrap();
446    // All x86 instructions only have at most one memory operand (thankfully!)
447    // SAFETY: si_addr is safe to call.
448    let addr = unsafe { siginfo.si_addr().addr() };
449    let page_addr = addr.strict_sub(addr.strict_rem(page_size));
450
451    if !ch_pages.iter().any(|pg| (*pg..pg.strict_add(page_size)).contains(&addr)) {
452        // This was a real segfault (not one of the Miri memory pages), so print some debug info and
453        // quit.
454        let regs = ptrace::getregs(pid).unwrap();
455        eprintln!("Segfault occurred during FFI at {addr:#018x}");
456        eprintln!("Expected access on pages: {ch_pages:#018x?}");
457        eprintln!("Register dump: {regs:#x?}");
458        ptrace::kill(pid).unwrap();
459        return Err(ExecEnd(None));
460    }
461
462    // Overall structure:
463    // - Get the address that caused the segfault
464    // - Unprotect the memory: we force the child to execute `mempr_off`, passing parameters via
465    //   global atomic variables. This is what we use the temporary callback stack for.
466    // - Step 1 instruction
467    // - Parse executed code to estimate size & type of access
468    // - Reprotect the memory by executing `mempr_on` in the child, using the callback stack again.
469    // - Continue
470
471    // Ensure the stack is properly zeroed out!
472    for a in (ch_stack..ch_stack.strict_add(CALLBACK_STACK_SIZE)).step_by(ARCH_WORD_SIZE) {
473        ptrace::write(pid, std::ptr::with_exposed_provenance_mut(a), 0).unwrap();
474    }
475
476    // Guard against both architectures with upwards and downwards-growing stacks.
477    let stack_ptr = ch_stack.strict_add(CALLBACK_STACK_SIZE / 2);
478    let regs_bak = ptrace::getregs(pid).unwrap();
479    let mut new_regs = regs_bak;
480
481    // Read at least one instruction from the ip. It's possible that the instruction
482    // that triggered the segfault was short and at the end of the mapped text area,
483    // so some of these reads may fail; in that case, just write empty bytes. If all
484    // reads failed, the disassembler will report an error.
485    let instr = (0..(ARCH_MAX_INSTR_SIZE.div_ceil(ARCH_WORD_SIZE)))
486        .flat_map(|ofs| {
487            // This reads one word of memory; we divided by `ARCH_WORD_SIZE` above to compensate for that.
488            ptrace::read(
489                pid,
490                std::ptr::without_provenance_mut(
491                    regs_bak.ip().strict_add(ARCH_WORD_SIZE.strict_mul(ofs)),
492                ),
493            )
494            .unwrap_or_default()
495            .to_ne_bytes()
496        })
497        .collect::<Vec<_>>();
498
499    // Now figure out the size + type of access and log it down.
500    capstone_disassemble(&instr, addr, cs, acc_events).expect("Failed to disassemble instruction");
501
502    // Move the instr ptr into the deprotection code.
503    #[expect(clippy::as_conversions)]
504    new_regs.set_ip(mempr_off as usize);
505    // Don't mess up the stack by accident!
506    new_regs.set_sp(stack_ptr);
507
508    // Modify the PAGE_ADDR global on the child process to point to the page
509    // that we want unprotected.
510    ptrace::write(
511        pid,
512        (&raw const PAGE_ADDR).cast_mut().cast(),
513        libc::c_long::try_from(page_addr.cast_signed()).unwrap(),
514    )
515    .unwrap();
516
517    // Check if we also own the next page, and if so unprotect it in case
518    // the access spans the page boundary.
519    let flag = if ch_pages.contains(&page_addr.strict_add(page_size)) { 2 } else { 1 };
520    ptrace::write(pid, (&raw const PAGE_COUNT).cast_mut().cast(), flag).unwrap();
521
522    ptrace::setregs(pid, new_regs).unwrap();
523
524    // Our mempr_* functions end with a raise(SIGSTOP).
525    wait_for_signal(Some(pid), signal::SIGSTOP, InitialCont::Yes)?;
526
527    // Step 1 instruction.
528    ptrace::setregs(pid, regs_bak).unwrap();
529    ptrace::step(pid, None).unwrap();
530    // Don't use wait_for_signal here since 1 instruction doesn't give room
531    // for any uncertainty + we don't want it `cont()`ing randomly by accident
532    // Also, don't let it continue with unprotected memory if something errors!
533    let _ = wait::waitid(wait::Id::Pid(pid), WAIT_FLAGS).map_err(|_| ExecEnd(None))?;
534
535    // Zero out again to be safe
536    for a in (ch_stack..ch_stack.strict_add(CALLBACK_STACK_SIZE)).step_by(ARCH_WORD_SIZE) {
537        ptrace::write(pid, std::ptr::with_exposed_provenance_mut(a), 0).unwrap();
538    }
539
540    let regs_bak = ptrace::getregs(pid).unwrap();
541    new_regs = regs_bak;
542
543    // Reprotect everything and continue.
544    #[expect(clippy::as_conversions)]
545    new_regs.set_ip(mempr_on as usize);
546    new_regs.set_sp(stack_ptr);
547    ptrace::setregs(pid, new_regs).unwrap();
548    wait_for_signal(Some(pid), signal::SIGSTOP, InitialCont::Yes)?;
549
550    ptrace::setregs(pid, regs_bak).unwrap();
551    ptrace::syscall(pid, None).unwrap();
552    Ok(())
553}
554
555// We only get dropped into these functions via offsetting the instr pointer
556// manually, so we *must not ever* unwind from them.
557
558/// Disables protections on the page whose address is currently in `PAGE_ADDR`.
559///
560/// SAFETY: `PAGE_ADDR` should be set to a page-aligned pointer to an owned page,
561/// `PAGE_SIZE` should be the host pagesize, and the range from `PAGE_ADDR` to
562/// `PAGE_SIZE` * `PAGE_COUNT` must be owned and allocated memory. No other threads
563/// should be running.
564pub unsafe extern "C" fn mempr_off() {
565    use std::sync::atomic::Ordering;
566
567    // Again, cannot allow unwinds to happen here.
568    let len = PAGE_SIZE.load(Ordering::Relaxed).saturating_mul(PAGE_COUNT.load(Ordering::Relaxed));
569    // SAFETY: Upheld by "caller".
570    unsafe {
571        // It's up to the caller to make sure this doesn't actually overflow, but
572        // we mustn't unwind from here, so...
573        if libc::mprotect(
574            PAGE_ADDR.load(Ordering::Relaxed).cast(),
575            len,
576            libc::PROT_READ | libc::PROT_WRITE,
577        ) != 0
578        {
579            // Can't return or unwind, but we can do this.
580            std::process::exit(-1);
581        }
582    }
583    // If this fails somehow we're doomed.
584    if signal::raise(signal::SIGSTOP).is_err() {
585        std::process::exit(-1);
586    }
587}
588
589/// Reenables protection on the page set by `PAGE_ADDR`.
590///
591/// SAFETY: See `mempr_off()`.
592pub unsafe extern "C" fn mempr_on() {
593    use std::sync::atomic::Ordering;
594
595    let len = PAGE_SIZE.load(Ordering::Relaxed).wrapping_mul(PAGE_COUNT.load(Ordering::Relaxed));
596    // SAFETY: Upheld by "caller".
597    unsafe {
598        if libc::mprotect(PAGE_ADDR.load(Ordering::Relaxed).cast(), len, libc::PROT_NONE) != 0 {
599            std::process::exit(-1);
600        }
601    }
602    if signal::raise(signal::SIGSTOP).is_err() {
603        std::process::exit(-1);
604    }
605}