diff --git a/src/lazy.rs b/src/lazy.rs index 100ce1ea..693bb7ff 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,4 +1,8 @@ -use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +#![allow(dead_code)] +use core::{ + ffi::c_void, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, +}; // This structure represents a lazily initialized static usize value. Useful // when it is preferable to just rerun initialization instead of locking. @@ -21,22 +25,22 @@ use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; pub(crate) struct LazyUsize(AtomicUsize); impl LazyUsize { + // The initialization is not completed. + const UNINIT: usize = usize::max_value(); + pub const fn new() -> Self { Self(AtomicUsize::new(Self::UNINIT)) } - // The initialization is not completed. - pub const UNINIT: usize = usize::max_value(); - // Runs the init() function at most once, returning the value of some run of // init(). Multiple callers can run their init() functions in parallel. // init() should always return the same value, if it succeeds. pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { // Relaxed ordering is fine, as we only have a single atomic variable. - let mut val = self.0.load(Relaxed); + let mut val = self.0.load(Ordering::Relaxed); if val == Self::UNINIT { val = init(); - self.0.store(val, Relaxed); + self.0.store(val, Ordering::Relaxed); } val } @@ -54,3 +58,53 @@ impl LazyBool { self.0.unsync_init(|| init() as usize) != 0 } } + +// This structure represents a lazily initialized static pointer value. +/// +/// It's intended to be used for weak linking of a C function that may +/// or may not be present at runtime. +/// +/// Based off of the DlsymWeak struct in libstd: +/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 +/// except that the caller must manually cast self.ptr() to a function pointer. +pub struct LazyPtr { + addr: AtomicPtr, +} + +impl LazyPtr { + /// A non-null pointer value which indicates we are uninitialized. + /// + /// This constant should ideally not be a valid pointer. However, + /// if by chance initialization function passed to the `unsync_init` + /// method does return UNINIT, there will not be undefined behavior. + /// The initialization function will just be called each time `get()` + /// is called. This would be inefficient, but correct. + const UNINIT: *mut c_void = !0usize as *mut c_void; + + /// Construct new `LazyPtr` in uninitialized state. + pub const fn new() -> Self { + Self { + addr: AtomicPtr::new(Self::UNINIT), + } + } + + // Runs the init() function at most once, returning the value of some run of + // init(). Multiple callers can run their init() functions in parallel. + // init() should always return the same value, if it succeeds. + pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void { + // Despite having only a single atomic variable (self.addr), we still + // cannot always use Ordering::Relaxed, as we need to make sure a + // successful call to `init` is "ordered before" any data read through + // the returned pointer (which occurs when the function is called). + // Our implementation mirrors that of the one in libstd, meaning that + // the use of non-Relaxed operations is probably unnecessary. + match self.addr.load(Ordering::Acquire) { + Self::UNINIT => { + let addr = init(); + self.addr.store(addr, Ordering::Release); + addr + } + addr => addr, + } + } +} diff --git a/src/lib.rs b/src/lib.rs index bc3695b6..eb8c7fc5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -306,6 +306,7 @@ cfg_if! { #[path = "solaris.rs"] mod imp; } else if #[cfg(target_os = "netbsd")] { mod util_libc; + mod lazy; #[path = "netbsd.rs"] mod imp; } else if #[cfg(target_os = "fuchsia")] { #[path = "fuchsia.rs"] mod imp; diff --git a/src/netbsd.rs b/src/netbsd.rs index b8a770f5..4211a865 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,9 +1,6 @@ //! Implementation for NetBSD -use crate::{ - util_libc::{sys_fill_exact, Weak}, - Error, -}; -use core::{mem::MaybeUninit, ptr}; +use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error}; +use core::{ffi::c_void, mem::MaybeUninit, ptr}; fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND]; @@ -27,10 +24,18 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; +// getrandom(2) was introduced in NetBSD 10.0 +static GETRANDOM: LazyPtr = LazyPtr::new(); + +fn dlsym_getrandom() -> *mut c_void { + static NAME: &[u8] = b"getrandom\0"; + let name_ptr = NAME.as_ptr() as *const libc::c_char; + unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } +} + pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // getrandom(2) was introduced in NetBSD 10.0 - static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") }; - if let Some(fptr) = GETRANDOM.ptr() { + let fptr = GETRANDOM.unsync_init(dlsym_getrandom); + if !fptr.is_null() { let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; return sys_fill_exact(dest, |buf| unsafe { func(buf.as_mut_ptr() as *mut u8, buf.len(), 0) diff --git a/src/util_libc.rs b/src/util_libc.rs index 129362d5..765d5fd4 100644 --- a/src/util_libc.rs +++ b/src/util_libc.rs @@ -1,12 +1,6 @@ #![allow(dead_code)] use crate::Error; -use core::{ - mem::MaybeUninit, - num::NonZeroU32, - ptr::NonNull, - sync::atomic::{fence, AtomicPtr, Ordering}, -}; -use libc::c_void; +use core::{mem::MaybeUninit, num::NonZeroU32}; cfg_if! { if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] { @@ -76,62 +70,6 @@ pub fn sys_fill_exact( Ok(()) } -// A "weak" binding to a C function that may or may not be present at runtime. -// Used for supporting newer OS features while still building on older systems. -// Based off of the DlsymWeak struct in libstd: -// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -// except that the caller must manually cast self.ptr() to a function pointer. -pub struct Weak { - name: &'static str, - addr: AtomicPtr, -} - -impl Weak { - // A non-null pointer value which indicates we are uninitialized. This - // constant should ideally not be a valid address of a function pointer. - // However, if by chance libc::dlsym does return UNINIT, there will not - // be undefined behavior. libc::dlsym will just be called each time ptr() - // is called. This would be inefficient, but correct. - // TODO: Replace with core::ptr::invalid_mut(1) when that is stable. - const UNINIT: *mut c_void = 1 as *mut c_void; - - // Construct a binding to a C function with a given name. This function is - // unsafe because `name` _must_ be null terminated. - pub const unsafe fn new(name: &'static str) -> Self { - Self { - name, - addr: AtomicPtr::new(Self::UNINIT), - } - } - - // Return the address of a function if present at runtime. Otherwise, - // return None. Multiple callers can call ptr() concurrently. It will - // always return _some_ value returned by libc::dlsym. However, the - // dlsym function may be called multiple times. - pub fn ptr(&self) -> Option> { - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to dlsym() is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - match self.addr.load(Ordering::Relaxed) { - Self::UNINIT => { - let symbol = self.name.as_ptr() as *const _; - let addr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, symbol) }; - // Synchronizes with the Acquire fence below - self.addr.store(addr, Ordering::Release); - NonNull::new(addr) - } - addr => { - let func = NonNull::new(addr)?; - fence(Ordering::Acquire); - Some(func) - } - } - } -} - // SAFETY: path must be null terminated, FD must be manually closed. pub unsafe fn open_readonly(path: &str) -> Result { debug_assert_eq!(path.as_bytes().last(), Some(&0));