From e1e18ab719be724e3ebf4810a0b6f43de645da33 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Mon, 24 Nov 2025 16:13:19 -0500 Subject: [PATCH 1/4] Unify lazy atomics in entropy backends Replace ad-hoc atomic lazy caches with shared lazy helpers in the Linux/Android fallback, NetBSD, RDRAND, and RNDR backends. Add `LazyPtr` alongside `LazyUsize` and `LazyBool` so pointer and boolean caches use the same initialization contract. This reduces duplicated cache logic and keeps backend probing/fallback semantics aligned while preserving the existing retry-until-cached behavior. --- src/backends/efi_rng.rs | 18 +++--- src/backends/linux_android_with_fallback.rs | 35 ++++------- src/backends/netbsd.rs | 42 ++++++------- src/backends/rdrand.rs | 24 +++++--- src/backends/rndr.rs | 3 +- src/utils/lazy.rs | 64 -------------------- src/utils/lazy_bool.rs | 37 ++++++++++++ src/utils/lazy_ptr.rs | 65 +++++++++++++++++++++ 8 files changed, 154 insertions(+), 134 deletions(-) delete mode 100644 src/utils/lazy.rs create mode 100644 src/utils/lazy_bool.rs create mode 100644 src/utils/lazy_ptr.rs diff --git a/src/backends/efi_rng.rs b/src/backends/efi_rng.rs index 9d5888402..f667c7fa2 100644 --- a/src/backends/efi_rng.rs +++ b/src/backends/efi_rng.rs @@ -2,8 +2,7 @@ use crate::Error; use core::{ mem::MaybeUninit, - ptr::{self, NonNull, null_mut}, - sync::atomic::{AtomicPtr, Ordering::Relaxed}, + ptr::{self, NonNull}, }; use r_efi::{ efi::{BootServices, Handle}, @@ -17,8 +16,6 @@ pub use crate::util::{inner_u32, inner_u64}; #[cfg(not(target_os = "uefi"))] compile_error!("`efi_rng` backend can be enabled only for UEFI targets!"); -static RNG_PROTOCOL: AtomicPtr = AtomicPtr::new(null_mut()); - #[cold] #[inline(never)] fn init() -> Result, Error> { @@ -36,7 +33,7 @@ fn init() -> Result, Error> { ((*boot_services.as_ptr()).locate_handle)( r_efi::efi::BY_PROTOCOL, &mut guid, - null_mut(), + ptr::null_mut(), &mut buf_size, handles.as_mut_ptr(), ) @@ -88,7 +85,6 @@ fn init() -> Result, Error> { continue; } - RNG_PROTOCOL.store(protocol.as_ptr(), Relaxed); return Ok(protocol); } Err(Error::NO_RNG_HANDLE) @@ -96,10 +92,12 @@ fn init() -> Result, Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - let protocol = match NonNull::new(RNG_PROTOCOL.load(Relaxed)) { - Some(p) => p, - None => init()?, - }; + #[path = "../utils/lazy_ptr.rs"] + mod lazy; + + static RNG_PROTOCOL: lazy::LazyPtr = lazy::LazyPtr::new(); + + let protocol = RNG_PROTOCOL.try_unsync_init(init)?; let mut alg_guid = rng::ALGORITHM_RAW; let ret = unsafe { diff --git a/src/backends/linux_android_with_fallback.rs b/src/backends/linux_android_with_fallback.rs index 48ce62886..c32baeead 100644 --- a/src/backends/linux_android_with_fallback.rs +++ b/src/backends/linux_android_with_fallback.rs @@ -4,8 +4,7 @@ use crate::Error; use core::{ ffi::c_void, mem::{MaybeUninit, transmute}, - ptr::NonNull, - sync::atomic::{AtomicPtr, Ordering}, + ptr::{self, NonNull}, }; use use_file::utils; @@ -17,18 +16,12 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) /// or not supported by kernel. const NOT_AVAILABLE: NonNull = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) }; -static GETRANDOM_FN: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); - #[cold] #[inline(never)] fn init() -> NonNull { // Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else #[cfg(not(target_env = "musl"))] - let raw_ptr = { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } - }; + let raw_ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; #[cfg(target_env = "musl")] let raw_ptr = { let fptr: GetRandomFn = libc::getrandom; @@ -37,10 +30,9 @@ fn init() -> NonNull { let res_ptr = match NonNull::new(raw_ptr) { Some(fptr) => { - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; - let dangling_ptr = NonNull::dangling().as_ptr(); + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; // Check that `getrandom` syscall is supported by kernel - let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) }; + let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) }; if cfg!(getrandom_test_linux_fallback) { NOT_AVAILABLE } else if res.is_negative() { @@ -65,7 +57,6 @@ fn init() -> NonNull { panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`") } - GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release); res_ptr } @@ -77,23 +68,17 @@ fn use_file_fallback(dest: &mut [MaybeUninit]) -> Result<(), Error> { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire); - let fptr = match NonNull::new(raw_ptr) { - Some(p) => p, - None => init(), - }; + #[path = "../utils/lazy_ptr.rs"] + mod lazy; + + static GETRANDOM_FN: lazy::LazyPtr = lazy::LazyPtr::new(); + let fptr = GETRANDOM_FN.unsync_init(init); if fptr == NOT_AVAILABLE { use_file_fallback(dest) } else { // note: `transmute` is currently the only way to convert a pointer into a function reference - let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; + let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; utils::sys_fill_exact(dest, |buf| unsafe { getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0) }) diff --git a/src/backends/netbsd.rs b/src/backends/netbsd.rs index 0da0d492c..ad42475b2 100644 --- a/src/backends/netbsd.rs +++ b/src/backends/netbsd.rs @@ -8,8 +8,7 @@ use core::{ cmp, ffi::c_void, mem::{self, MaybeUninit}, - ptr, - sync::atomic::{AtomicPtr, Ordering}, + ptr::{self, NonNull}, }; pub use crate::util::{inner_u32, inner_u64}; @@ -42,36 +41,29 @@ unsafe extern "C" fn polyfill_using_kern_arand( type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; -static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - #[cold] #[inline(never)] -fn init() -> *mut c_void { - static NAME: &[u8] = b"getrandom\0"; - let name_ptr = NAME.as_ptr().cast::(); - let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; - if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) { - // Verify `polyfill_using_kern_arand` has the right signature. - const POLYFILL: GetRandomFn = polyfill_using_kern_arand; - ptr = POLYFILL as *mut c_void; +fn init() -> NonNull { + let ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) }; + if !cfg!(getrandom_test_netbsd_fallback) { + if let Some(ptr) = NonNull::new(ptr) { + return ptr; + } } - GETRANDOM.store(ptr, Ordering::Release); - ptr + // Verify `polyfill_using_kern_arand` has the right signature. + const POLYFILL: GetRandomFn = polyfill_using_kern_arand; + unsafe { NonNull::new_unchecked(POLYFILL as *mut c_void) } } #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - // Despite being only a single atomic variable, we still cannot always use - // Ordering::Relaxed, as we need to make sure a successful call to `init` - // is "ordered before" any data read through the returned pointer (which - // occurs when the function is called). Our implementation mirrors that of - // the one in libstd, meaning that the use of non-Relaxed operations is - // probably unnecessary. - let mut fptr = GETRANDOM.load(Ordering::Acquire); - if fptr.is_null() { - fptr = init(); - } - let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; + #[path = "../utils/lazy_ptr.rs"] + mod lazy; + + static GETRANDOM_FN: lazy::LazyPtr = lazy::LazyPtr::new(); + + let fptr = GETRANDOM_FN.unsync_init(init); + let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) }; utils::sys_fill_exact(dest, |buf| unsafe { fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) }) diff --git a/src/backends/rdrand.rs b/src/backends/rdrand.rs index d39c31512..6e615eb89 100644 --- a/src/backends/rdrand.rs +++ b/src/backends/rdrand.rs @@ -2,9 +2,6 @@ use crate::{Error, util::slice_as_uninit}; use core::mem::{MaybeUninit, size_of}; -#[path = "../utils/lazy.rs"] -mod lazy; - #[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!"); @@ -20,8 +17,6 @@ cfg_if! { } } -static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new(); - // Recommendation from "Intel® Digital Random Number Generator (DRNG) Software // Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures // Software Developer’s Manual" - Volume 1 - Section 7.3.17.1. @@ -72,7 +67,9 @@ fn self_test() -> bool { fails <= 2 } -fn is_rdrand_good() -> bool { +#[cold] +#[inline(never)] +fn init() -> bool { #[cfg(not(target_feature = "rdrand"))] { // SAFETY: All Rust x86 targets are new enough to have CPUID, and we @@ -115,6 +112,15 @@ fn is_rdrand_good() -> bool { unsafe { self_test() } } +fn is_rdrand_good() -> bool { + #[path = "../utils/lazy_bool.rs"] + mod lazy; + + static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new(); + + RDRAND_GOOD.unsync_init(init) +} + #[target_feature(enable = "rdrand")] fn rdrand_exact(dest: &mut [MaybeUninit]) -> Option<()> { // We use chunks_exact_mut instead of chunks_mut as it allows almost all @@ -162,7 +168,7 @@ fn rdrand_u64() -> Option { #[inline] pub fn inner_u32() -> Result { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. @@ -171,7 +177,7 @@ pub fn inner_u32() -> Result { #[inline] pub fn inner_u64() -> Result { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. @@ -180,7 +186,7 @@ pub fn inner_u64() -> Result { #[inline] pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + if !is_rdrand_good() { return Err(Error::NO_RDRAND); } // SAFETY: After this point, we know rdrand is supported. diff --git a/src/backends/rndr.rs b/src/backends/rndr.rs index 1a24b49f7..50849d506 100644 --- a/src/backends/rndr.rs +++ b/src/backends/rndr.rs @@ -69,8 +69,9 @@ fn is_rndr_available() -> bool { #[cfg(not(target_feature = "rand"))] fn is_rndr_available() -> bool { - #[path = "../utils/lazy.rs"] + #[path = "../utils/lazy_bool.rs"] mod lazy; + static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new(); cfg_if::cfg_if! { diff --git a/src/utils/lazy.rs b/src/utils/lazy.rs deleted file mode 100644 index b191aa6d7..000000000 --- a/src/utils/lazy.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Helpers built around pointer-sized atomics. -use core::sync::atomic::{AtomicUsize, Ordering}; - -// This structure represents a lazily initialized static usize value. Useful -// when it is preferable to just rerun initialization instead of locking. -// unsync_init will invoke an init() function until it succeeds, then return the -// cached value for future calls. -// -// unsync_init supports init() "failing". If the init() method returns UNINIT, -// that value will be returned as normal, but will not be cached. -// -// Users should only depend on the _value_ returned by init() functions. -// Specifically, for the following init() function: -// fn init() -> usize { -// a(); -// let v = b(); -// c(); -// v -// } -// the effects of c() or writes to shared memory will not necessarily be -// observed and additional synchronization methods may be needed. -struct LazyUsize(AtomicUsize); - -impl LazyUsize { - // The initialization is not completed. - const UNINIT: usize = usize::MAX; - - const fn new() -> Self { - Self(AtomicUsize::new(Self::UNINIT)) - } - - // Runs the init() function at most once, returning the value of some run of - // init(). Multiple callers can run their init() functions in parallel. - // init() should always return the same value, if it succeeds. - fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { - #[cold] - fn do_init(this: &LazyUsize, init: impl FnOnce() -> usize) -> usize { - let val = init(); - this.0.store(val, Ordering::Relaxed); - val - } - - // Relaxed ordering is fine, as we only have a single atomic variable. - let val = self.0.load(Ordering::Relaxed); - if val != Self::UNINIT { - val - } else { - do_init(self, init) - } - } -} - -// Identical to LazyUsize except with bool instead of usize. -pub(crate) struct LazyBool(LazyUsize); - -impl LazyBool { - pub const fn new() -> Self { - Self(LazyUsize::new()) - } - - pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { - self.0.unsync_init(|| usize::from(init())) != 0 - } -} diff --git a/src/utils/lazy_bool.rs b/src/utils/lazy_bool.rs new file mode 100644 index 000000000..b9392258d --- /dev/null +++ b/src/utils/lazy_bool.rs @@ -0,0 +1,37 @@ +//! Lazily caches a `bool` in an `AtomicU8`. +//! +//! Initialization is intentionally unsynchronized: concurrent callers may race +//! and run `init` more than once. Once a value is produced, it is cached and +//! reused by subsequent calls. +//! +//! Uses `Ordering::Relaxed` because this helper only publishes the cached +//! value itself. + +use core::sync::atomic::{AtomicU8, Ordering}; + +pub(crate) struct LazyBool(AtomicU8); + +impl LazyBool { + const UNINIT: u8 = 2; + + pub const fn new() -> Self { + Self(AtomicU8::new(Self::UNINIT)) + } + + #[cold] + fn do_init(&self, init: impl FnOnce() -> bool) -> bool { + let val = u8::from(init()); + self.0.store(val, Ordering::Relaxed); + val != 0 + } + + #[inline] + pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { + let val = self.0.load(Ordering::Relaxed); + if val != Self::UNINIT { + val != 0 + } else { + self.do_init(init) + } + } +} diff --git a/src/utils/lazy_ptr.rs b/src/utils/lazy_ptr.rs new file mode 100644 index 000000000..027a2f469 --- /dev/null +++ b/src/utils/lazy_ptr.rs @@ -0,0 +1,65 @@ +//! Lazily caches a non-null pointer in an `AtomicPtr`. +//! +//! Initialization is intentionally unsynchronized: concurrent callers may race +//! and run `init` more than once. Once a value is produced, it is cached and +//! reused by subsequent calls. +//! +//! For fallible initialization (`try_unsync_init`), only successful values are +//! cached; errors are returned to the caller and are not cached. +//! +//! Uses `Ordering::Relaxed` because this helper only publishes the cached +//! pointer value. Callers must not rely on this mechanism to synchronize +//! unrelated memory side effects performed by `init`. + +use core::{ + ptr::{self, NonNull}, + sync::atomic::{AtomicPtr, Ordering}, +}; + +pub(crate) struct LazyPtr(AtomicPtr); + +#[allow( + dead_code, + reason = "callers typically use only one of `{try_,}unsync_init`" +)] +impl LazyPtr { + pub const fn new() -> Self { + Self(AtomicPtr::new(ptr::null_mut())) + } + + #[cold] + fn do_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { + let val = init(); + self.0.store(val.as_ptr(), Ordering::Relaxed); + val + } + + #[cold] + fn try_do_init( + &self, + init: impl FnOnce() -> Result, E>, + ) -> Result, E> { + let val = init()?; + self.0.store(val.as_ptr(), Ordering::Relaxed); + Ok(val) + } + + #[inline] + pub fn unsync_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { + match NonNull::new(self.0.load(Ordering::Relaxed)) { + Some(val) => val, + None => self.do_init(init), + } + } + + #[inline] + pub fn try_unsync_init( + &self, + init: impl FnOnce() -> Result, E>, + ) -> Result, E> { + match NonNull::new(self.0.load(Ordering::Relaxed)) { + Some(val) => Ok(val), + None => self.try_do_init(init), + } + } +} From 734f0cee0a618ebcc334c51d6ee05127fd0cd897 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 11 Feb 2026 16:29:25 +0300 Subject: [PATCH 2/4] tweak `LazyBool` --- src/utils/lazy_bool.rs | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/utils/lazy_bool.rs b/src/utils/lazy_bool.rs index b9392258d..e58f12dd8 100644 --- a/src/utils/lazy_bool.rs +++ b/src/utils/lazy_bool.rs @@ -1,37 +1,39 @@ -//! Lazily caches a `bool` in an `AtomicU8`. -//! -//! Initialization is intentionally unsynchronized: concurrent callers may race -//! and run `init` more than once. Once a value is produced, it is cached and -//! reused by subsequent calls. -//! -//! Uses `Ordering::Relaxed` because this helper only publishes the cached -//! value itself. - -use core::sync::atomic::{AtomicU8, Ordering}; +use core::sync::atomic::{AtomicU8, Ordering::Relaxed}; +/// Lazily caches a `bool` in an `AtomicU8`. +/// +/// Initialization is intentionally unsynchronized: concurrent callers may race +/// and run `init` more than once. Once a value is produced, it is cached and +/// reused by subsequent calls. +/// +/// Uses `Relaxed` ordering because this helper only publishes the cached +/// value itself. pub(crate) struct LazyBool(AtomicU8); impl LazyBool { - const UNINIT: u8 = 2; + const UNINIT: u8 = u8::MAX; + /// Create new `LazyBool`. pub const fn new() -> Self { Self(AtomicU8::new(Self::UNINIT)) } + /// Call the `init` closure and return the result after caching it. #[cold] - fn do_init(&self, init: impl FnOnce() -> bool) -> bool { + fn cold_init(&self, init: impl FnOnce() -> bool) -> bool { let val = u8::from(init()); - self.0.store(val, Ordering::Relaxed); + self.0.store(val, Relaxed); val != 0 } + /// Retrieve the cached value if it was already initialized or call the `init` closure + /// and return the result after caching it. #[inline] pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { - let val = self.0.load(Ordering::Relaxed); - if val != Self::UNINIT { - val != 0 - } else { - self.do_init(init) + let val = self.0.load(Relaxed); + if val == Self::UNINIT { + return self.cold_init(init); } + val != 0 } } From 3c4a7f2704b147a968e6c56274459de0abea22c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 11 Feb 2026 16:42:36 +0300 Subject: [PATCH 3/4] Tweak `LazyPtr` --- src/utils/lazy_ptr.rs | 71 +++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 40 deletions(-) diff --git a/src/utils/lazy_ptr.rs b/src/utils/lazy_ptr.rs index 027a2f469..821dcff73 100644 --- a/src/utils/lazy_ptr.rs +++ b/src/utils/lazy_ptr.rs @@ -1,65 +1,56 @@ -//! Lazily caches a non-null pointer in an `AtomicPtr`. -//! -//! Initialization is intentionally unsynchronized: concurrent callers may race -//! and run `init` more than once. Once a value is produced, it is cached and -//! reused by subsequent calls. -//! -//! For fallible initialization (`try_unsync_init`), only successful values are -//! cached; errors are returned to the caller and are not cached. -//! -//! Uses `Ordering::Relaxed` because this helper only publishes the cached -//! pointer value. Callers must not rely on this mechanism to synchronize -//! unrelated memory side effects performed by `init`. - use core::{ + convert::Infallible, ptr::{self, NonNull}, - sync::atomic::{AtomicPtr, Ordering}, + sync::atomic::{AtomicPtr, Ordering::Relaxed}, }; +/// Lazily caches a non-null pointer in an `AtomicPtr`. +/// +/// Initialization is intentionally unsynchronized: concurrent callers may race +/// and run `init` more than once. Once a value is produced, it is cached and +/// reused by subsequent calls. +/// +/// For fallible initialization (`try_unsync_init`), only successful values are +/// cached; errors are returned to the caller and are not cached. +/// +/// Uses `Ordering::Relaxed` because this helper only publishes the cached +/// pointer value. Callers must not rely on this mechanism to synchronize +/// unrelated memory side effects performed by `init`. pub(crate) struct LazyPtr(AtomicPtr); -#[allow( - dead_code, - reason = "callers typically use only one of `{try_,}unsync_init`" -)] impl LazyPtr { + /// Create new `LazyPtr`. pub const fn new() -> Self { Self(AtomicPtr::new(ptr::null_mut())) } + /// Call the `init` closure and return the result after caching it in the case of succeess. #[cold] - fn do_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { - let val = init(); - self.0.store(val.as_ptr(), Ordering::Relaxed); - val - } - - #[cold] - fn try_do_init( - &self, - init: impl FnOnce() -> Result, E>, - ) -> Result, E> { + fn cold_init(&self, init: impl FnOnce() -> Result, E>) -> Result, E> { let val = init()?; - self.0.store(val.as_ptr(), Ordering::Relaxed); + self.0.store(val.as_ptr(), Relaxed); Ok(val) } - #[inline] - pub fn unsync_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { - match NonNull::new(self.0.load(Ordering::Relaxed)) { - Some(val) => val, - None => self.do_init(init), - } - } - + /// Retrieve the cached value if it was already initialized or call the potentially fallible + /// `init` closure and return the result after caching it in the case of succeess. #[inline] pub fn try_unsync_init( &self, init: impl FnOnce() -> Result, E>, ) -> Result, E> { - match NonNull::new(self.0.load(Ordering::Relaxed)) { + let p = self.0.load(Relaxed); + match NonNull::new(p) { Some(val) => Ok(val), - None => self.try_do_init(init), + None => self.cold_init(init), } } + + /// Retrieve the cached value if it was already initialized or call the `init` closure + /// and return the result after caching it. + #[inline] + pub fn unsync_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { + let Ok(p): Result<_, Infallible> = self.try_unsync_init(|| Ok(init())); + p + } } From d79beb709824f10e10d912830a881c49cbb249c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 11 Feb 2026 16:46:41 +0300 Subject: [PATCH 4/4] fix CI failures --- src/utils/lazy_ptr.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/utils/lazy_ptr.rs b/src/utils/lazy_ptr.rs index 821dcff73..92d12999f 100644 --- a/src/utils/lazy_ptr.rs +++ b/src/utils/lazy_ptr.rs @@ -24,7 +24,7 @@ impl LazyPtr { Self(AtomicPtr::new(ptr::null_mut())) } - /// Call the `init` closure and return the result after caching it in the case of succeess. + /// Call the `init` closure and return the result after caching it in the case of success. #[cold] fn cold_init(&self, init: impl FnOnce() -> Result, E>) -> Result, E> { let val = init()?; @@ -33,7 +33,7 @@ impl LazyPtr { } /// Retrieve the cached value if it was already initialized or call the potentially fallible - /// `init` closure and return the result after caching it in the case of succeess. + /// `init` closure and return the result after caching it in the case of success. #[inline] pub fn try_unsync_init( &self, @@ -49,6 +49,7 @@ impl LazyPtr { /// Retrieve the cached value if it was already initialized or call the `init` closure /// and return the result after caching it. #[inline] + #[allow(dead_code, reason = "Some modules use only `try_unsync_init`")] pub fn unsync_init(&self, init: impl FnOnce() -> NonNull) -> NonNull { let Ok(p): Result<_, Infallible> = self.try_unsync_init(|| Ok(init())); p