spinning_top/
spinlock.rs

1// This implementation is based on:
2// https://github.com/Amanieu/parking_lot/tree/fa294cd677936bf365afa0497039953b10c722f5/lock_api
3// and
4// https://github.com/mvdnes/spin-rs/tree/7516c8037d3d15712ba4d8499ab075e97a19d778
5
6use core::marker::PhantomData;
7use core::sync::atomic::{AtomicBool, Ordering};
8use lock_api::{GuardSend, RawMutex};
9
10use crate::relax::{Backoff, Relax, Spin};
11
12/// Provides mutual exclusion based on spinning on an `AtomicBool`.
13///
14/// It's recommended to use this type either combination with [`lock_api::Mutex`] or
15/// through the [`Spinlock`] type.
16///
17/// ## Example
18///
19/// ```rust
20/// use lock_api::RawMutex;
21/// use spinning_top::RawSpinlock;
22///
23/// let lock: RawSpinlock = RawSpinlock::INIT;
24/// assert_eq!(lock.try_lock(), true); // lock it
25/// assert_eq!(lock.try_lock(), false); // can't be locked a second time
26/// unsafe { lock.unlock(); } // unlock it
27/// assert_eq!(lock.try_lock(), true); // now it can be locked again
28#[derive(Debug)]
29pub struct RawSpinlock<R: Relax = Spin> {
30    /// Whether the spinlock is locked.
31    locked: AtomicBool,
32    relax: PhantomData<R>,
33}
34
35impl<R: Relax> RawSpinlock<R> {
36    // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
37    // when called in a loop.
38    #[inline]
39    fn try_lock_weak(&self) -> bool {
40        // The Orderings are the same as try_lock, and are still correct here.
41        self.locked
42            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
43            .is_ok()
44    }
45}
46
47unsafe impl<R: Relax> RawMutex for RawSpinlock<R> {
48    const INIT: RawSpinlock<R> = RawSpinlock {
49        locked: AtomicBool::new(false),
50        relax: PhantomData,
51    };
52
53    // A spinlock guard can be sent to another thread and unlocked there
54    type GuardMarker = GuardSend;
55
56    #[inline]
57    fn lock(&self) {
58        let mut relax = R::default();
59
60        while !self.try_lock_weak() {
61            // Wait until the lock looks unlocked before retrying
62            // Code from https://github.com/mvdnes/spin-rs/commit/d3e60d19adbde8c8e9d3199c7c51e51ee5a20bf6
63            while self.is_locked() {
64                // Tell the CPU that we're inside a busy-wait loop
65                relax.relax();
66            }
67        }
68    }
69
70    #[inline]
71    fn try_lock(&self) -> bool {
72        // Code taken from:
73        // https://github.com/Amanieu/parking_lot/blob/fa294cd677936bf365afa0497039953b10c722f5/lock_api/src/lib.rs#L49-L53
74        //
75        // The reason for using a strong compare_exchange is explained here:
76        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
77        //
78        // The second Ordering argument specfies the ordering when the compare_exchange
79        // fails. Since we don't access any critical data if we fail to acquire the lock,
80        // we can use a Relaxed ordering in this case.
81        self.locked
82            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
83            .is_ok()
84    }
85
86    #[inline]
87    unsafe fn unlock(&self) {
88        self.locked.store(false, Ordering::Release);
89    }
90
91    #[inline]
92    fn is_locked(&self) -> bool {
93        // Relaxed is sufficient because this operation does not provide synchronization, only atomicity.
94        self.locked.load(Ordering::Relaxed)
95    }
96}
97
98/// A mutual exclusion (Mutex) type based on busy-waiting.
99///
100/// Calling `lock` (or `try_lock`) on this type returns a [`SpinlockGuard`], which
101/// automatically frees the lock when it goes out of scope.
102///
103/// ## Example
104///
105/// ```rust
106/// use spinning_top::Spinlock;
107///
108/// fn main() {
109///     // Wrap some data in a spinlock
110///     let data = String::from("Hello");
111///     let spinlock = Spinlock::new(data);
112///     make_uppercase(&spinlock); // only pass a shared reference
113///
114///     // We have ownership of the spinlock, so we can extract the data without locking
115///     // Note: this consumes the spinlock
116///     let data = spinlock.into_inner();
117///     assert_eq!(data.as_str(), "HELLO");
118/// }
119///
120/// fn make_uppercase(spinlock: &Spinlock<String>) {
121///     // Lock the spinlock to get a mutable reference to the data
122///     let mut locked_data = spinlock.lock();
123///     assert_eq!(locked_data.as_str(), "Hello");
124///     locked_data.make_ascii_uppercase();
125///
126///     // the lock is automatically freed at the end of the scope
127/// }
128/// ```
129///
130/// ## Usage in statics
131///
132/// `Spinlock::new` is a `const` function. This makes the `Spinlock` type
133/// usable in statics:
134///
135/// ```rust
136/// use spinning_top::Spinlock;
137///
138/// static DATA: Spinlock<u32> = Spinlock::new(0);
139///
140/// fn main() {
141///     let mut data = DATA.lock();
142///     *data += 1;
143///     assert_eq!(*data, 1);
144/// }
145/// ```
146pub type Spinlock<T> = lock_api::Mutex<RawSpinlock<Spin>, T>;
147
148/// A RAII guard that frees the spinlock when it goes out of scope.
149///
150/// Allows access to the locked data through the [`core::ops::Deref`] and [`core::ops::DerefMut`] operations.
151///
152/// ## Example
153///
154/// ```rust
155/// use spinning_top::{guard::SpinlockGuard, Spinlock};
156///
157/// let spinlock = Spinlock::new(Vec::new());
158///
159/// // begin a new scope
160/// {
161///     // lock the spinlock to create a `SpinlockGuard`
162///     let mut guard: SpinlockGuard<_> = spinlock.lock();
163///
164///     // guard can be used like a `&mut Vec` since it implements `DerefMut`
165///     guard.push(1);
166///     guard.push(2);
167///     assert_eq!(guard.len(), 2);
168/// } // guard is dropped -> frees the spinlock again
169///
170/// // spinlock is unlocked again
171/// assert!(spinlock.try_lock().is_some());
172/// ```
173pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock<Spin>, T>;
174
175/// A RAII guard returned by `SpinlockGuard::map`.
176///
177/// ## Example
178/// ```rust
179/// use spinning_top::{
180///     guard::{MappedSpinlockGuard, SpinlockGuard},
181///     Spinlock,
182/// };
183///
184/// let spinlock = Spinlock::new(Some(3));
185///
186/// // Begin a new scope.
187/// {
188///     // Lock the spinlock to create a `SpinlockGuard`.
189///     let mut guard: SpinlockGuard<_> = spinlock.lock();
190///
191///     // Map the internal value of `guard`. `guard` is moved.
192///     let mut mapped: MappedSpinlockGuard<'_, _> =
193///         SpinlockGuard::map(guard, |g| g.as_mut().unwrap());
194///     assert_eq!(*mapped, 3);
195///
196///     *mapped = 5;
197///     assert_eq!(*mapped, 5);
198/// } // `mapped` is dropped -> frees the spinlock again.
199///
200/// // The operation is reflected to the original lock.
201/// assert_eq!(*spinlock.lock(), Some(5));
202/// ```
203pub type MappedSpinlockGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawSpinlock<Spin>, T>;
204
205/// A [`lock_api::ArcMutexGuard`] based on [`RawSpinlock`]`.
206#[cfg(feature = "arc_lock")]
207pub type ArcSpinlockGuard<T> = lock_api::ArcMutexGuard<RawSpinlock<Spin>, T>;
208
209/// A mutual exclusion (Mutex) type based on busy-waiting with exponential backoff.
210///
211/// Calling `lock` (or `try_lock`) on this type returns a [`BackoffSpinlockGuard`], which
212/// automatically frees the lock when it goes out of scope.
213///
214/// ## Example
215///
216/// ```rust
217/// use spinning_top::BackoffSpinlock;
218///
219/// fn main() {
220///     // Wrap some data in a spinlock
221///     let data = String::from("Hello");
222///     let spinlock = BackoffSpinlock::new(data);
223///     make_uppercase(&spinlock); // only pass a shared reference
224///
225///     // We have ownership of the spinlock, so we can extract the data without locking
226///     // Note: this consumes the spinlock
227///     let data = spinlock.into_inner();
228///     assert_eq!(data.as_str(), "HELLO");
229/// }
230///
231/// fn make_uppercase(spinlock: &BackoffSpinlock<String>) {
232///     // Lock the spinlock to get a mutable reference to the data
233///     let mut locked_data = spinlock.lock();
234///     assert_eq!(locked_data.as_str(), "Hello");
235///     locked_data.make_ascii_uppercase();
236///
237///     // the lock is automatically freed at the end of the scope
238/// }
239/// ```
240///
241/// ## Usage in statics
242///
243/// `BackoffSpinlock::new` is a `const` function. This makes the `BackoffSpinlock` type
244/// usable in statics:
245///
246/// ```rust
247/// use spinning_top::BackoffSpinlock;
248///
249/// static DATA: BackoffSpinlock<u32> = BackoffSpinlock::new(0);
250///
251/// fn main() {
252///     let mut data = DATA.lock();
253///     *data += 1;
254///     assert_eq!(*data, 1);
255/// }
256/// ```
257pub type BackoffSpinlock<T> = lock_api::Mutex<RawSpinlock<Backoff>, T>;
258
259/// A RAII guard that frees the exponential backoff spinlock when it goes out of scope.
260///
261/// Allows access to the locked data through the [`core::ops::Deref`] and [`core::ops::DerefMut`] operations.
262///
263/// ## Example
264///
265/// ```rust
266/// use spinning_top::{guard::BackoffSpinlockGuard, BackoffSpinlock};
267///
268/// let spinlock = BackoffSpinlock::new(Vec::new());
269///
270/// // begin a new scope
271/// {
272///     // lock the spinlock to create a `BackoffSpinlockGuard`
273///     let mut guard: BackoffSpinlockGuard<_> = spinlock.lock();
274///
275///     // guard can be used like a `&mut Vec` since it implements `DerefMut`
276///     guard.push(1);
277///     guard.push(2);
278///     assert_eq!(guard.len(), 2);
279/// } // guard is dropped -> frees the spinlock again
280///
281/// // spinlock is unlocked again
282/// assert!(spinlock.try_lock().is_some());
283/// ```
284pub type BackoffSpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock<Backoff>, T>;
285
286/// A RAII guard returned by `BackoffSpinlockGuard::map`.
287///
288/// ## Example
289/// ```rust
290/// use spinning_top::{
291///     guard::{BackoffSpinlockGuard, MappedBackoffSpinlockGuard},
292///     BackoffSpinlock,
293/// };
294///
295/// let spinlock = BackoffSpinlock::new(Some(3));
296///
297/// // Begin a new scope.
298/// {
299///     // Lock the spinlock to create a `BackoffSpinlockGuard`.
300///     let mut guard: BackoffSpinlockGuard<_> = spinlock.lock();
301///
302///     // Map the internal value of `guard`. `guard` is moved.
303///     let mut mapped: MappedBackoffSpinlockGuard<'_, _> =
304///         BackoffSpinlockGuard::map(guard, |g| g.as_mut().unwrap());
305///     assert_eq!(*mapped, 3);
306///
307///     *mapped = 5;
308///     assert_eq!(*mapped, 5);
309/// } // `mapped` is dropped -> frees the spinlock again.
310///
311/// // The operation is reflected to the original lock.
312/// assert_eq!(*spinlock.lock(), Some(5));
313/// ```
314pub type MappedBackoffSpinlockGuard<'a, T> =
315    lock_api::MappedMutexGuard<'a, RawSpinlock<Backoff>, T>;
316
317/// A [`lock_api::ArcMutexGuard`] based on [`RawSpinlock`]`<`[`Backoff`]`>`.
318#[cfg(feature = "arc_lock")]
319pub type ArcBackoffSpinlockGuard<T> = lock_api::ArcMutexGuard<RawSpinlock<Backoff>, T>;
320
321#[cfg(test)]
322mod tests {
323    use super::*;
324
325    #[test]
326    fn create_and_lock() {
327        let spinlock = Spinlock::new(42);
328        let data = spinlock.try_lock();
329        assert!(data.is_some());
330        assert_eq!(*data.unwrap(), 42);
331    }
332
333    #[test]
334    fn mutual_exclusion() {
335        let spinlock = Spinlock::new(1);
336        let data = spinlock.try_lock();
337        assert!(data.is_some());
338        assert!(spinlock.try_lock().is_none());
339        assert!(spinlock.try_lock().is_none()); // still None
340        core::mem::drop(data);
341        assert!(spinlock.try_lock().is_some());
342    }
343
344    #[test]
345    fn three_locks() {
346        let spinlock1 = Spinlock::new(1);
347        let spinlock2 = Spinlock::new(2);
348        let spinlock3 = Spinlock::new(3);
349        let data1 = spinlock1.try_lock();
350        let data2 = spinlock2.try_lock();
351        let data3 = spinlock3.try_lock();
352        assert!(data1.is_some());
353        assert!(data2.is_some());
354        assert!(data3.is_some());
355        assert!(spinlock1.try_lock().is_none());
356        assert!(spinlock1.try_lock().is_none()); // still None
357        assert!(spinlock2.try_lock().is_none());
358        assert!(spinlock3.try_lock().is_none());
359        core::mem::drop(data3);
360        assert!(spinlock3.try_lock().is_some());
361    }
362
363    #[test]
364    fn mapped_lock() {
365        let spinlock = Spinlock::new([1, 2, 3]);
366        let data = spinlock.lock();
367        let mut mapped = SpinlockGuard::map(data, |d| &mut d[0]);
368        assert_eq!(*mapped, 1);
369        *mapped = 4;
370        assert_eq!(*mapped, 4);
371        core::mem::drop(mapped);
372        assert!(!spinlock.is_locked());
373        assert_eq!(*spinlock.lock(), [4, 2, 3]);
374    }
375}