miri/shims/unix/
sync.rs

1use rustc_abi::Size;
2
3use crate::concurrency::sync::LAZY_INIT_COOKIE;
4use crate::*;
5
6/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
7/// a synchronization primitive matches its static initializer value.
8///
9/// The reads happen in chunks of 4, so all racing accesses must also use that access size.
10fn bytewise_equal_atomic_relaxed<'tcx>(
11    ecx: &MiriInterpCx<'tcx>,
12    left: &MPlaceTy<'tcx>,
13    right: &MPlaceTy<'tcx>,
14) -> InterpResult<'tcx, bool> {
15    let size = left.layout.size;
16    assert_eq!(size, right.layout.size);
17
18    // We do this in chunks of 4, so that we are okay to race with (sufficiently aligned)
19    // 4-byte atomic accesses.
20    assert!(size.bytes().is_multiple_of(4));
21    for i in 0..(size.bytes() / 4) {
22        let offset = Size::from_bytes(i.strict_mul(4));
23        let load = |place: &MPlaceTy<'tcx>| {
24            let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
25            ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
26        };
27        let left = load(left)?;
28        let right = load(right)?;
29        if left != right {
30            return interp_ok(false);
31        }
32    }
33
34    interp_ok(true)
35}
36
37// # pthread_mutexattr_t
38// We store some data directly inside the type, ignoring the platform layout:
39// - kind: i32
40
41#[inline]
42fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
43    interp_ok(match &*ecx.tcx.sess.target.os {
44        "linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
45        os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
46    })
47}
48
49fn mutexattr_get_kind<'tcx>(
50    ecx: &MiriInterpCx<'tcx>,
51    attr_ptr: &OpTy<'tcx>,
52) -> InterpResult<'tcx, i32> {
53    ecx.deref_pointer_and_read(
54        attr_ptr,
55        mutexattr_kind_offset(ecx)?,
56        ecx.libc_ty_layout("pthread_mutexattr_t"),
57        ecx.machine.layouts.i32,
58    )?
59    .to_i32()
60}
61
62fn mutexattr_set_kind<'tcx>(
63    ecx: &mut MiriInterpCx<'tcx>,
64    attr_ptr: &OpTy<'tcx>,
65    kind: i32,
66) -> InterpResult<'tcx, ()> {
67    ecx.deref_pointer_and_write(
68        attr_ptr,
69        mutexattr_kind_offset(ecx)?,
70        Scalar::from_i32(kind),
71        ecx.libc_ty_layout("pthread_mutexattr_t"),
72        ecx.machine.layouts.i32,
73    )
74}
75
76/// To differentiate "the mutex kind has not been changed" from
77/// "the mutex kind has been set to PTHREAD_MUTEX_DEFAULT and that is
78/// equal to some other mutex kind", we make the default value of this
79/// field *not* PTHREAD_MUTEX_DEFAULT but this special flag.
80const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
81
82/// Translates the mutex kind from what is stored in pthread_mutexattr_t to our enum.
83fn mutexattr_translate_kind<'tcx>(
84    ecx: &MiriInterpCx<'tcx>,
85    kind: i32,
86) -> InterpResult<'tcx, MutexKind> {
87    interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
88        MutexKind::Normal
89    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
90        MutexKind::ErrorCheck
91    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
92        MutexKind::Recursive
93    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
94        || kind == PTHREAD_MUTEX_KIND_UNCHANGED
95    {
96        // We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
97        // others, and we want an explicit `mutexattr_settype` to work as expected.
98        MutexKind::Default
99    } else {
100        throw_unsup_format!("unsupported type of mutex: {kind}");
101    })
102}
103
104// # pthread_mutex_t
105// We store some data directly inside the type, ignoring the platform layout:
106// - init: u32
107
108/// The mutex kind.
109#[derive(Debug, Clone, Copy)]
110enum MutexKind {
111    Normal,
112    Default,
113    Recursive,
114    ErrorCheck,
115}
116
117#[derive(Debug, Clone)]
118struct PthreadMutex {
119    mutex_ref: MutexRef,
120    kind: MutexKind,
121}
122
123/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
124/// a statically initialized mutex that is used the first time, we pick some offset within
125/// `pthread_mutex_t` and use it as an "initialized" flag.
126fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
127    let offset = match &*ecx.tcx.sess.target.os {
128        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
129        // macOS stores a signature in the first bytes, so we move to offset 4.
130        "macos" => 4,
131        os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
132    };
133    let offset = Size::from_bytes(offset);
134
135    // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
136    // the `init` field must start out not equal to INIT_COOKIE.
137    if !ecx.machine.pthread_mutex_sanity.replace(true) {
138        let check_static_initializer = |name| {
139            let static_initializer = ecx.eval_path(&["libc", name]);
140            let init_field =
141                static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
142            let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
143            assert_ne!(
144                init, LAZY_INIT_COOKIE,
145                "{name} is incompatible with our initialization cookie"
146            );
147        };
148
149        check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
150        // Check non-standard initializers.
151        match &*ecx.tcx.sess.target.os {
152            "linux" => {
153                check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
154                check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
155                check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
156            }
157            "illumos" | "solaris" | "macos" | "freebsd" | "android" => {
158                // No non-standard initializers.
159            }
160            os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
161        }
162    }
163
164    interp_ok(offset)
165}
166
167/// Eagerly create and initialize a new mutex.
168fn mutex_create<'tcx>(
169    ecx: &mut MiriInterpCx<'tcx>,
170    mutex_ptr: &OpTy<'tcx>,
171    kind: MutexKind,
172) -> InterpResult<'tcx, PthreadMutex> {
173    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
174    let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
175    ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
176    interp_ok(data)
177}
178
179/// Returns the mutex data stored at the address that `mutex_ptr` points to.
180/// Will raise an error if the mutex has been moved since its first use.
181fn mutex_get_data<'tcx, 'a>(
182    ecx: &'a mut MiriInterpCx<'tcx>,
183    mutex_ptr: &OpTy<'tcx>,
184) -> InterpResult<'tcx, &'a PthreadMutex>
185where
186    'tcx: 'a,
187{
188    let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
189    ecx.lazy_sync_get_data(
190        &mutex,
191        mutex_init_offset(ecx)?,
192        || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
193        |ecx| {
194            let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
195            interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
196        },
197    )
198}
199
200/// Returns the kind of a static initializer.
201fn mutex_kind_from_static_initializer<'tcx>(
202    ecx: &MiriInterpCx<'tcx>,
203    mutex: &MPlaceTy<'tcx>,
204) -> InterpResult<'tcx, MutexKind> {
205    // All the static initializers recognized here *must* be checked in `mutex_init_offset`!
206    let is_initializer =
207        |name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
208
209    // PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
210    if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
211        return interp_ok(MutexKind::Default);
212    }
213    // Support additional platform-specific initializers.
214    match &*ecx.tcx.sess.target.os {
215        "linux" =>
216            if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
217                return interp_ok(MutexKind::Recursive);
218            } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
219                return interp_ok(MutexKind::ErrorCheck);
220            },
221        _ => {}
222    }
223    throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
224}
225
226// # pthread_rwlock_t
227// We store some data directly inside the type, ignoring the platform layout:
228// - init: u32
229
230#[derive(Debug, Clone)]
231struct PthreadRwLock {
232    rwlock_ref: RwLockRef,
233}
234
235fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
236    let offset = match &*ecx.tcx.sess.target.os {
237        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
238        // macOS stores a signature in the first bytes, so we move to offset 4.
239        "macos" => 4,
240        os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
241    };
242    let offset = Size::from_bytes(offset);
243
244    // Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
245    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
246    if !ecx.machine.pthread_rwlock_sanity.replace(true) {
247        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
248        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
249        let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
250        assert_ne!(
251            init, LAZY_INIT_COOKIE,
252            "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
253        );
254    }
255
256    interp_ok(offset)
257}
258
259fn rwlock_get_data<'tcx, 'a>(
260    ecx: &'a mut MiriInterpCx<'tcx>,
261    rwlock_ptr: &OpTy<'tcx>,
262) -> InterpResult<'tcx, &'a PthreadRwLock>
263where
264    'tcx: 'a,
265{
266    let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
267    ecx.lazy_sync_get_data(
268        &rwlock,
269        rwlock_init_offset(ecx)?,
270        || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
271        |ecx| {
272            if !bytewise_equal_atomic_relaxed(
273                ecx,
274                &rwlock,
275                &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
276            )? {
277                throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
278            }
279            interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
280        },
281    )
282}
283
284// # pthread_condattr_t
285// We store some data directly inside the type, ignoring the platform layout:
286// - clock: i32
287
288#[inline]
289fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
290    interp_ok(match &*ecx.tcx.sess.target.os {
291        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
292        // macOS does not have a clock attribute.
293        os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
294    })
295}
296
297fn condattr_get_clock_id<'tcx>(
298    ecx: &MiriInterpCx<'tcx>,
299    attr_ptr: &OpTy<'tcx>,
300) -> InterpResult<'tcx, Scalar> {
301    ecx.deref_pointer_and_read(
302        attr_ptr,
303        condattr_clock_offset(ecx)?,
304        ecx.libc_ty_layout("pthread_condattr_t"),
305        ecx.machine.layouts.i32,
306    )
307}
308
309fn condattr_set_clock_id<'tcx>(
310    ecx: &mut MiriInterpCx<'tcx>,
311    attr_ptr: &OpTy<'tcx>,
312    clock_id: i32,
313) -> InterpResult<'tcx, ()> {
314    ecx.deref_pointer_and_write(
315        attr_ptr,
316        condattr_clock_offset(ecx)?,
317        Scalar::from_i32(clock_id),
318        ecx.libc_ty_layout("pthread_condattr_t"),
319        ecx.machine.layouts.i32,
320    )
321}
322
323// # pthread_cond_t
324// We store some data directly inside the type, ignoring the platform layout:
325// - init: u32
326
327fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
328    let offset = match &*ecx.tcx.sess.target.os {
329        "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
330        // macOS stores a signature in the first bytes, so we move to offset 4.
331        "macos" => 4,
332        os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
333    };
334    let offset = Size::from_bytes(offset);
335
336    // Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
337    // the `init` field must start out not equal to LAZY_INIT_COOKIE.
338    if !ecx.machine.pthread_condvar_sanity.replace(true) {
339        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
340        let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
341        let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
342        assert_ne!(
343            init, LAZY_INIT_COOKIE,
344            "PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
345        );
346    }
347
348    interp_ok(offset)
349}
350
351#[derive(Debug, Clone)]
352struct PthreadCondvar {
353    condvar_ref: CondvarRef,
354    clock: TimeoutClock,
355}
356
357fn cond_create<'tcx>(
358    ecx: &mut MiriInterpCx<'tcx>,
359    cond_ptr: &OpTy<'tcx>,
360    clock: TimeoutClock,
361) -> InterpResult<'tcx, PthreadCondvar> {
362    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
363    let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
364    ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data.clone())?;
365    interp_ok(data)
366}
367
368fn cond_get_data<'tcx, 'a>(
369    ecx: &'a mut MiriInterpCx<'tcx>,
370    cond_ptr: &OpTy<'tcx>,
371) -> InterpResult<'tcx, &'a PthreadCondvar>
372where
373    'tcx: 'a,
374{
375    let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
376    ecx.lazy_sync_get_data(
377        &cond,
378        cond_init_offset(ecx)?,
379        || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
380        |ecx| {
381            if !bytewise_equal_atomic_relaxed(
382                ecx,
383                &cond,
384                &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
385            )? {
386                throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
387            }
388            // This used the static initializer. The clock there is always CLOCK_REALTIME.
389            interp_ok(PthreadCondvar {
390                condvar_ref: CondvarRef::new(),
391                clock: TimeoutClock::RealTime,
392            })
393        },
394    )
395}
396
397impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
398pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
399    fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
400        let this = self.eval_context_mut();
401
402        mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
403
404        interp_ok(())
405    }
406
407    fn pthread_mutexattr_settype(
408        &mut self,
409        attr_op: &OpTy<'tcx>,
410        kind_op: &OpTy<'tcx>,
411    ) -> InterpResult<'tcx, Scalar> {
412        let this = self.eval_context_mut();
413
414        let kind = this.read_scalar(kind_op)?.to_i32()?;
415        if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
416            || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
417            || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
418            || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
419        {
420            // Make sure we do not mix this up with the "unchanged" kind.
421            assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
422            mutexattr_set_kind(this, attr_op, kind)?;
423        } else {
424            let einval = this.eval_libc_i32("EINVAL");
425            return interp_ok(Scalar::from_i32(einval));
426        }
427
428        interp_ok(Scalar::from_i32(0))
429    }
430
431    fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
432        let this = self.eval_context_mut();
433
434        // Destroying an uninit pthread_mutexattr is UB, so check to make sure it's not uninit.
435        mutexattr_get_kind(this, attr_op)?;
436
437        // To catch double-destroys, we de-initialize the mutexattr.
438        // This is technically not right and might lead to false positives. For example, the below
439        // code is *likely* sound, even assuming uninit numbers are UB, but Miri complains.
440        //
441        // let mut x: MaybeUninit<libc::pthread_mutexattr_t> = MaybeUninit::zeroed();
442        // libc::pthread_mutexattr_init(x.as_mut_ptr());
443        // libc::pthread_mutexattr_destroy(x.as_mut_ptr());
444        // x.assume_init();
445        //
446        // However, the way libstd uses the pthread APIs works in our favor here, so we can get away with this.
447        // This can always be revisited to have some external state to catch double-destroys
448        // but not complain about the above code. See https://github.com/rust-lang/miri/pull/1933
449        this.write_uninit(
450            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
451        )?;
452
453        interp_ok(())
454    }
455
456    fn pthread_mutex_init(
457        &mut self,
458        mutex_op: &OpTy<'tcx>,
459        attr_op: &OpTy<'tcx>,
460    ) -> InterpResult<'tcx, ()> {
461        let this = self.eval_context_mut();
462
463        let attr = this.read_pointer(attr_op)?;
464        let kind = if this.ptr_is_null(attr)? {
465            MutexKind::Default
466        } else {
467            mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
468        };
469
470        mutex_create(this, mutex_op, kind)?;
471
472        interp_ok(())
473    }
474
475    fn pthread_mutex_lock(
476        &mut self,
477        mutex_op: &OpTy<'tcx>,
478        dest: &MPlaceTy<'tcx>,
479    ) -> InterpResult<'tcx> {
480        let this = self.eval_context_mut();
481
482        let mutex = mutex_get_data(this, mutex_op)?.clone();
483
484        let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
485            if owner_thread != this.active_thread() {
486                this.mutex_enqueue_and_block(
487                    mutex.mutex_ref,
488                    Some((Scalar::from_i32(0), dest.clone())),
489                );
490                return interp_ok(());
491            } else {
492                // Trying to acquire the same mutex again.
493                match mutex.kind {
494                    MutexKind::Default =>
495                        throw_ub_format!(
496                            "trying to acquire default mutex already locked by the current thread"
497                        ),
498                    MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
499                    MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
500                    MutexKind::Recursive => {
501                        this.mutex_lock(&mutex.mutex_ref);
502                        0
503                    }
504                }
505            }
506        } else {
507            // The mutex is unlocked. Let's lock it.
508            this.mutex_lock(&mutex.mutex_ref);
509            0
510        };
511        this.write_scalar(Scalar::from_i32(ret), dest)?;
512        interp_ok(())
513    }
514
515    fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
516        let this = self.eval_context_mut();
517
518        let mutex = mutex_get_data(this, mutex_op)?.clone();
519
520        interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
521            if owner_thread != this.active_thread() {
522                this.eval_libc_i32("EBUSY")
523            } else {
524                match mutex.kind {
525                    MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
526                        this.eval_libc_i32("EBUSY"),
527                    MutexKind::Recursive => {
528                        this.mutex_lock(&mutex.mutex_ref);
529                        0
530                    }
531                }
532            }
533        } else {
534            // The mutex is unlocked. Let's lock it.
535            this.mutex_lock(&mutex.mutex_ref);
536            0
537        }))
538    }
539
540    fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
541        let this = self.eval_context_mut();
542
543        let mutex = mutex_get_data(this, mutex_op)?.clone();
544
545        if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
546            // The mutex was locked by the current thread.
547            interp_ok(Scalar::from_i32(0))
548        } else {
549            // The mutex was locked by another thread or not locked at all. See
550            // the “Unlock When Not Owner” column in
551            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
552            match mutex.kind {
553                MutexKind::Default =>
554                    throw_ub_format!(
555                        "unlocked a default mutex that was not locked by the current thread"
556                    ),
557                MutexKind::Normal =>
558                    throw_ub_format!(
559                        "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
560                    ),
561                MutexKind::ErrorCheck | MutexKind::Recursive =>
562                    interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
563            }
564        }
565    }
566
567    fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
568        let this = self.eval_context_mut();
569
570        // Reading the field also has the side-effect that we detect double-`destroy`
571        // since we make the field uninit below.
572        let mutex = mutex_get_data(this, mutex_op)?.clone();
573
574        if mutex.mutex_ref.owner().is_some() {
575            throw_ub_format!("destroyed a locked mutex");
576        }
577
578        // This might lead to false positives, see comment in pthread_mutexattr_destroy
579        this.write_uninit(
580            &this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
581        )?;
582        // FIXME: delete interpreter state associated with this mutex.
583
584        interp_ok(())
585    }
586
587    fn pthread_rwlock_rdlock(
588        &mut self,
589        rwlock_op: &OpTy<'tcx>,
590        dest: &MPlaceTy<'tcx>,
591    ) -> InterpResult<'tcx> {
592        let this = self.eval_context_mut();
593
594        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
595
596        if rwlock.rwlock_ref.is_write_locked() {
597            this.rwlock_enqueue_and_block_reader(
598                rwlock.rwlock_ref,
599                Scalar::from_i32(0),
600                dest.clone(),
601            );
602        } else {
603            this.rwlock_reader_lock(&rwlock.rwlock_ref);
604            this.write_null(dest)?;
605        }
606
607        interp_ok(())
608    }
609
610    fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
611        let this = self.eval_context_mut();
612
613        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
614
615        if rwlock.rwlock_ref.is_write_locked() {
616            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
617        } else {
618            this.rwlock_reader_lock(&rwlock.rwlock_ref);
619            interp_ok(Scalar::from_i32(0))
620        }
621    }
622
623    fn pthread_rwlock_wrlock(
624        &mut self,
625        rwlock_op: &OpTy<'tcx>,
626        dest: &MPlaceTy<'tcx>,
627    ) -> InterpResult<'tcx> {
628        let this = self.eval_context_mut();
629
630        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
631
632        if rwlock.rwlock_ref.is_locked() {
633            // Note: this will deadlock if the lock is already locked by this
634            // thread in any way.
635            //
636            // Relevant documentation:
637            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html
638            // An in-depth discussion on this topic:
639            // https://github.com/rust-lang/rust/issues/53127
640            //
641            // FIXME: Detect and report the deadlock proactively. (We currently
642            // report the deadlock only when no thread can continue execution,
643            // but we could detect that this lock is already locked and report
644            // an error.)
645            this.rwlock_enqueue_and_block_writer(
646                rwlock.rwlock_ref,
647                Scalar::from_i32(0),
648                dest.clone(),
649            );
650        } else {
651            this.rwlock_writer_lock(&rwlock.rwlock_ref);
652            this.write_null(dest)?;
653        }
654
655        interp_ok(())
656    }
657
658    fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
659        let this = self.eval_context_mut();
660
661        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
662
663        if rwlock.rwlock_ref.is_locked() {
664            interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
665        } else {
666            this.rwlock_writer_lock(&rwlock.rwlock_ref);
667            interp_ok(Scalar::from_i32(0))
668        }
669    }
670
671    fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
672        let this = self.eval_context_mut();
673
674        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
675
676        if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
677            || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
678        {
679            interp_ok(())
680        } else {
681            throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
682        }
683    }
684
685    fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
686        let this = self.eval_context_mut();
687
688        // Reading the field also has the side-effect that we detect double-`destroy`
689        // since we make the field uninit below.
690        let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
691
692        if rwlock.rwlock_ref.is_locked() {
693            throw_ub_format!("destroyed a locked rwlock");
694        }
695
696        // This might lead to false positives, see comment in pthread_mutexattr_destroy
697        this.write_uninit(
698            &this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
699        )?;
700        // FIXME: delete interpreter state associated with this rwlock.
701
702        interp_ok(())
703    }
704
705    fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
706        let this = self.eval_context_mut();
707
708        // no clock attribute on macOS
709        if this.tcx.sess.target.os != "macos" {
710            // The default value of the clock attribute shall refer to the system
711            // clock.
712            // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_condattr_setclock.html
713            let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
714            condattr_set_clock_id(this, attr_op, default_clock_id)?;
715        }
716
717        interp_ok(())
718    }
719
720    fn pthread_condattr_setclock(
721        &mut self,
722        attr_op: &OpTy<'tcx>,
723        clock_id_op: &OpTy<'tcx>,
724    ) -> InterpResult<'tcx, Scalar> {
725        let this = self.eval_context_mut();
726
727        let clock_id = this.read_scalar(clock_id_op)?;
728        if this.parse_clockid(clock_id).is_some() {
729            condattr_set_clock_id(this, attr_op, clock_id.to_i32()?)?;
730        } else {
731            let einval = this.eval_libc_i32("EINVAL");
732            return interp_ok(Scalar::from_i32(einval));
733        }
734
735        interp_ok(Scalar::from_i32(0))
736    }
737
738    fn pthread_condattr_getclock(
739        &mut self,
740        attr_op: &OpTy<'tcx>,
741        clk_id_op: &OpTy<'tcx>,
742    ) -> InterpResult<'tcx, ()> {
743        let this = self.eval_context_mut();
744
745        let clock_id = condattr_get_clock_id(this, attr_op)?;
746        this.write_scalar(
747            clock_id,
748            &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
749        )?;
750
751        interp_ok(())
752    }
753
754    fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
755        let this = self.eval_context_mut();
756
757        // Destroying an uninit pthread_condattr is UB, so check to make sure it's not uninit.
758        // There's no clock attribute on macOS.
759        if this.tcx.sess.target.os != "macos" {
760            condattr_get_clock_id(this, attr_op)?;
761        }
762
763        // De-init the entire thing.
764        // This might lead to false positives, see comment in pthread_mutexattr_destroy
765        this.write_uninit(
766            &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
767        )?;
768
769        interp_ok(())
770    }
771
772    fn pthread_cond_init(
773        &mut self,
774        cond_op: &OpTy<'tcx>,
775        attr_op: &OpTy<'tcx>,
776    ) -> InterpResult<'tcx, ()> {
777        let this = self.eval_context_mut();
778
779        let attr = this.read_pointer(attr_op)?;
780        // Default clock if `attr` is null, and on macOS where there is no clock attribute.
781        let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
782            this.eval_libc("CLOCK_REALTIME")
783        } else {
784            condattr_get_clock_id(this, attr_op)?
785        };
786        let Some(clock) = this.parse_clockid(clock_id) else {
787            // This is UB since this situation cannot arise when using pthread_condattr_setclock.
788            throw_ub_format!("pthread_cond_init: invalid attributes (unsupported clock)")
789        };
790
791        cond_create(this, cond_op, clock)?;
792
793        interp_ok(())
794    }
795
796    fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
797        let this = self.eval_context_mut();
798        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
799        this.condvar_signal(&condvar)?;
800        interp_ok(())
801    }
802
803    fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
804        let this = self.eval_context_mut();
805        let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
806        while this.condvar_signal(&condvar)? {}
807        interp_ok(())
808    }
809
810    fn pthread_cond_wait(
811        &mut self,
812        cond_op: &OpTy<'tcx>,
813        mutex_op: &OpTy<'tcx>,
814        dest: &MPlaceTy<'tcx>,
815    ) -> InterpResult<'tcx> {
816        let this = self.eval_context_mut();
817
818        let data = cond_get_data(this, cond_op)?.clone();
819        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
820
821        this.condvar_wait(
822            data.condvar_ref,
823            mutex_ref,
824            None, // no timeout
825            Scalar::from_i32(0),
826            Scalar::from_i32(0), // retval_timeout -- unused
827            dest.clone(),
828        )?;
829
830        interp_ok(())
831    }
832
833    fn pthread_cond_timedwait(
834        &mut self,
835        cond_op: &OpTy<'tcx>,
836        mutex_op: &OpTy<'tcx>,
837        abstime_op: &OpTy<'tcx>,
838        dest: &MPlaceTy<'tcx>,
839    ) -> InterpResult<'tcx> {
840        let this = self.eval_context_mut();
841
842        let data = cond_get_data(this, cond_op)?.clone();
843        let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
844
845        // Extract the timeout.
846        let duration = match this
847            .read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
848        {
849            Some(duration) => duration,
850            None => {
851                let einval = this.eval_libc("EINVAL");
852                this.write_scalar(einval, dest)?;
853                return interp_ok(());
854            }
855        };
856        if data.clock == TimeoutClock::RealTime {
857            this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
858        }
859
860        this.condvar_wait(
861            data.condvar_ref,
862            mutex_ref,
863            Some((data.clock, TimeoutAnchor::Absolute, duration)),
864            Scalar::from_i32(0),
865            this.eval_libc("ETIMEDOUT"), // retval_timeout
866            dest.clone(),
867        )?;
868
869        interp_ok(())
870    }
871
872    fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
873        let this = self.eval_context_mut();
874
875        // Reading the field also has the side-effect that we detect double-`destroy`
876        // since we make the field uninit below.
877        let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
878        if condvar.is_awaited() {
879            throw_ub_format!("destroying an awaited conditional variable");
880        }
881
882        // This might lead to false positives, see comment in pthread_mutexattr_destroy
883        this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
884        // FIXME: delete interpreter state associated with this condvar.
885
886        interp_ok(())
887    }
888}