1use core::sync::atomic::{AtomicBool, Ordering, spin_loop_hint as cpu_relax};
2use core::cell::UnsafeCell;
3use core::marker::Sync;
4use core::ops::{Drop, Deref, DerefMut};
5use core::fmt;
6use core::option::Option::{self, None, Some};
7use core::default::Default;
8
9pub struct Mutex<T: ?Sized>
73{
74 lock: AtomicBool,
75 data: UnsafeCell<T>,
76}
77
78#[derive(Debug)]
82pub struct MutexGuard<'a, T: ?Sized + 'a>
83{
84 lock: &'a AtomicBool,
85 data: &'a mut T,
86}
87
88unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
90unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
91
92impl<T> Mutex<T>
93{
94 pub const fn new(user_data: T) -> Mutex<T>
110 {
111 Mutex
112 {
113 lock: AtomicBool::new(false),
114 data: UnsafeCell::new(user_data),
115 }
116 }
117
118 pub fn into_inner(self) -> T {
120 let Mutex { data, .. } = self;
123 data.into_inner()
124 }
125}
126
127impl<T: ?Sized> Mutex<T>
128{
129 fn obtain_lock(&self)
130 {
131 while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false
132 {
133 while self.lock.load(Ordering::Relaxed)
135 {
136 cpu_relax();
137 }
138 }
139 }
140
141 pub fn lock(&self) -> MutexGuard<T>
157 {
158 self.obtain_lock();
159 MutexGuard
160 {
161 lock: &self.lock,
162 data: unsafe { &mut *self.data.get() },
163 }
164 }
165
166 pub unsafe fn force_unlock(&self) {
174 self.lock.store(false, Ordering::Release);
175 }
176
177 pub fn try_lock(&self) -> Option<MutexGuard<T>>
180 {
181 if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
182 {
183 Some(
184 MutexGuard {
185 lock: &self.lock,
186 data: unsafe { &mut *self.data.get() },
187 }
188 )
189 }
190 else
191 {
192 None
193 }
194 }
195}
196
197impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T>
198{
199 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
200 {
201 match self.try_lock()
202 {
203 Some(guard) => write!(f, "Mutex {{ data: ")
204 .and_then(|()| (&*guard).fmt(f))
205 .and_then(|()| write!(f, "}}")),
206 None => write!(f, "Mutex {{ <locked> }}"),
207 }
208 }
209}
210
211impl<T: ?Sized + Default> Default for Mutex<T> {
212 fn default() -> Mutex<T> {
213 Mutex::new(Default::default())
214 }
215}
216
217impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>
218{
219 type Target = T;
220 fn deref<'b>(&'b self) -> &'b T { &*self.data }
221}
222
223impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>
224{
225 fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data }
226}
227
228impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
229{
230 fn drop(&mut self)
232 {
233 self.lock.store(false, Ordering::Release);
234 }
235}
236
237#[cfg(test)]
238mod tests {
239 use std::prelude::v1::*;
240
241 use std::sync::mpsc::channel;
242 use std::sync::Arc;
243 use std::sync::atomic::{AtomicUsize, Ordering};
244 use std::thread;
245
246 use super::*;
247
248 #[derive(Eq, PartialEq, Debug)]
249 struct NonCopy(i32);
250
251 #[test]
252 fn smoke() {
253 let m = Mutex::new(());
254 drop(m.lock());
255 drop(m.lock());
256 }
257
258 #[test]
259 fn lots_and_lots() {
260 static M: Mutex<()> = Mutex::new(());
261 static mut CNT: u32 = 0;
262 const J: u32 = 1000;
263 const K: u32 = 3;
264
265 fn inc() {
266 for _ in 0..J {
267 unsafe {
268 let _g = M.lock();
269 CNT += 1;
270 }
271 }
272 }
273
274 let (tx, rx) = channel();
275 for _ in 0..K {
276 let tx2 = tx.clone();
277 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
278 let tx2 = tx.clone();
279 thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
280 }
281
282 drop(tx);
283 for _ in 0..2 * K {
284 rx.recv().unwrap();
285 }
286 assert_eq!(unsafe {CNT}, J * K * 2);
287 }
288
289 #[test]
290 fn try_lock() {
291 let mutex = Mutex::new(42);
292
293 let a = mutex.try_lock();
295 assert_eq!(a.as_ref().map(|r| **r), Some(42));
296
297 let b = mutex.try_lock();
299 assert!(b.is_none());
300
301 ::core::mem::drop(a);
303 let c = mutex.try_lock();
304 assert_eq!(c.as_ref().map(|r| **r), Some(42));
305 }
306
307 #[test]
308 fn test_into_inner() {
309 let m = Mutex::new(NonCopy(10));
310 assert_eq!(m.into_inner(), NonCopy(10));
311 }
312
313 #[test]
314 fn test_into_inner_drop() {
315 struct Foo(Arc<AtomicUsize>);
316 impl Drop for Foo {
317 fn drop(&mut self) {
318 self.0.fetch_add(1, Ordering::SeqCst);
319 }
320 }
321 let num_drops = Arc::new(AtomicUsize::new(0));
322 let m = Mutex::new(Foo(num_drops.clone()));
323 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
324 {
325 let _inner = m.into_inner();
326 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
327 }
328 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
329 }
330
331 #[test]
332 fn test_mutex_arc_nested() {
333 let arc = Arc::new(Mutex::new(1));
336 let arc2 = Arc::new(Mutex::new(arc));
337 let (tx, rx) = channel();
338 let _t = thread::spawn(move|| {
339 let lock = arc2.lock();
340 let lock2 = lock.lock();
341 assert_eq!(*lock2, 1);
342 tx.send(()).unwrap();
343 });
344 rx.recv().unwrap();
345 }
346
347 #[test]
348 fn test_mutex_arc_access_in_unwind() {
349 let arc = Arc::new(Mutex::new(1));
350 let arc2 = arc.clone();
351 let _ = thread::spawn(move|| -> () {
352 struct Unwinder {
353 i: Arc<Mutex<i32>>,
354 }
355 impl Drop for Unwinder {
356 fn drop(&mut self) {
357 *self.i.lock() += 1;
358 }
359 }
360 let _u = Unwinder { i: arc2 };
361 panic!();
362 }).join();
363 let lock = arc.lock();
364 assert_eq!(*lock, 2);
365 }
366
367 #[test]
368 fn test_mutex_unsized() {
369 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
370 {
371 let b = &mut *mutex.lock();
372 b[0] = 4;
373 b[2] = 5;
374 }
375 let comp: &[i32] = &[4, 2, 5];
376 assert_eq!(&*mutex.lock(), comp);
377 }
378
379 #[test]
380 fn test_mutex_force_lock() {
381 let lock = Mutex::new(());
382 ::std::mem::forget(lock.lock());
383 unsafe {
384 lock.force_unlock();
385 }
386 assert!(lock.try_lock().is_some());
387 }
388}