@@ -13,7 +13,9 @@ use prelude::v1::*;
1313use cell:: UnsafeCell ;
1414use fmt;
1515use marker;
16+ use mem;
1617use ops:: { Deref , DerefMut } ;
18+ use ptr;
1719use sys_common:: mutex as sys;
1820use sys_common:: poison:: { self , TryLockError , TryLockResult , LockResult } ;
1921
@@ -243,6 +245,50 @@ impl<T: ?Sized> Mutex<T> {
243245 pub fn is_poisoned ( & self ) -> bool {
244246 self . inner . poison . get ( )
245247 }
248+
249+ /// Consumes this mutex, returning the underlying data.
250+ ///
251+ /// # Failure
252+ ///
253+ /// If another user of this mutex panicked while holding the mutex, then
254+ /// this call will return an error instead.
255+ #[ unstable( feature = "mutex_into_inner" , reason = "recently added" , issue = "28968" ) ]
256+ pub fn into_inner ( self ) -> LockResult < T > where T : Sized {
257+ // We know statically that there are no outstanding references to
258+ // `self` so there's no need to lock the inner StaticMutex.
259+ //
260+ // To get the inner value, we'd like to call `data.into_inner()`,
261+ // but because `Mutex` impl-s `Drop`, we can't move out of it, so
262+ // we'll have to destructure it manually instead.
263+ unsafe {
264+ // Like `let Mutex { inner, data } = self`.
265+ let ( inner, data) = {
266+ let Mutex { ref inner, ref data } = self ;
267+ ( ptr:: read ( inner) , ptr:: read ( data) )
268+ } ;
269+ mem:: forget ( self ) ;
270+ inner. lock . destroy ( ) ; // Keep in sync with the `Drop` impl.
271+
272+ poison:: map_result ( inner. poison . borrow ( ) , |_| data. into_inner ( ) )
273+ }
274+ }
275+
276+ /// Returns a mutable reference to the underlying data.
277+ ///
278+ /// Since this call borrows the `Mutex` mutably, no actual locking needs to
279+ /// take place---the mutable borrow statically guarantees no locks exist.
280+ ///
281+ /// # Failure
282+ ///
283+ /// If another user of this mutex panicked while holding the mutex, then
284+ /// this call will return an error instead.
285+ #[ unstable( feature = "mutex_get_mut" , reason = "recently added" , issue = "28968" ) ]
286+ pub fn get_mut ( & mut self ) -> LockResult < & mut T > {
287+ // We know statically that there are no other references to `self`, so
288+ // there's no need to lock the inner StaticMutex.
289+ let data = unsafe { & mut * self . data . get ( ) } ;
290+ poison:: map_result ( self . inner . poison . borrow ( ) , |_| data )
291+ }
246292}
247293
248294#[ stable( feature = "rust1" , since = "1.0.0" ) ]
@@ -251,6 +297,8 @@ impl<T: ?Sized> Drop for Mutex<T> {
251297 // This is actually safe b/c we know that there is no further usage of
252298 // this mutex (it's up to the user to arrange for a mutex to get
253299 // dropped, that's not our job)
300+ //
301+ // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
254302 unsafe { self . inner . lock . destroy ( ) }
255303 }
256304}
@@ -371,10 +419,14 @@ mod tests {
371419
372420 use sync:: mpsc:: channel;
373421 use sync:: { Arc , Mutex , StaticMutex , Condvar } ;
422+ use sync:: atomic:: { AtomicUsize , Ordering } ;
374423 use thread;
375424
376425 struct Packet < T > ( Arc < ( Mutex < T > , Condvar ) > ) ;
377426
427+ #[ derive( Eq , PartialEq , Debug ) ]
428+ struct NonCopy ( i32 ) ;
429+
378430 unsafe impl < T : Send > Send for Packet < T > { }
379431 unsafe impl < T > Sync for Packet < T > { }
380432
@@ -435,6 +487,69 @@ mod tests {
435487 * m. try_lock ( ) . unwrap ( ) = ( ) ;
436488 }
437489
490+ #[ test]
491+ fn test_into_inner ( ) {
492+ let m = Mutex :: new ( NonCopy ( 10 ) ) ;
493+ assert_eq ! ( m. into_inner( ) . unwrap( ) , NonCopy ( 10 ) ) ;
494+ }
495+
496+ #[ test]
497+ fn test_into_inner_drop ( ) {
498+ struct Foo ( Arc < AtomicUsize > ) ;
499+ impl Drop for Foo {
500+ fn drop ( & mut self ) {
501+ self . 0 . fetch_add ( 1 , Ordering :: SeqCst ) ;
502+ }
503+ }
504+ let num_drops = Arc :: new ( AtomicUsize :: new ( 0 ) ) ;
505+ let m = Mutex :: new ( Foo ( num_drops. clone ( ) ) ) ;
506+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 0 ) ;
507+ {
508+ let _inner = m. into_inner ( ) . unwrap ( ) ;
509+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 0 ) ;
510+ }
511+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 1 ) ;
512+ }
513+
514+ #[ test]
515+ fn test_into_inner_poison ( ) {
516+ let m = Arc :: new ( Mutex :: new ( NonCopy ( 10 ) ) ) ;
517+ let m2 = m. clone ( ) ;
518+ let _ = thread:: spawn ( move || {
519+ let _lock = m2. lock ( ) . unwrap ( ) ;
520+ panic ! ( "test panic in inner thread to poison mutex" ) ;
521+ } ) . join ( ) ;
522+
523+ assert ! ( m. is_poisoned( ) ) ;
524+ match Arc :: try_unwrap ( m) . unwrap ( ) . into_inner ( ) {
525+ Err ( e) => assert_eq ! ( e. into_inner( ) , NonCopy ( 10 ) ) ,
526+ Ok ( x) => panic ! ( "into_inner of poisoned Mutex is Ok: {:?}" , x) ,
527+ }
528+ }
529+
530+ #[ test]
531+ fn test_get_mut ( ) {
532+ let mut m = Mutex :: new ( NonCopy ( 10 ) ) ;
533+ * m. get_mut ( ) . unwrap ( ) = NonCopy ( 20 ) ;
534+ assert_eq ! ( m. into_inner( ) . unwrap( ) , NonCopy ( 20 ) ) ;
535+ }
536+
537+ #[ test]
538+ fn test_get_mut_poison ( ) {
539+ let m = Arc :: new ( Mutex :: new ( NonCopy ( 10 ) ) ) ;
540+ let m2 = m. clone ( ) ;
541+ let _ = thread:: spawn ( move || {
542+ let _lock = m2. lock ( ) . unwrap ( ) ;
543+ panic ! ( "test panic in inner thread to poison mutex" ) ;
544+ } ) . join ( ) ;
545+
546+ assert ! ( m. is_poisoned( ) ) ;
547+ match Arc :: try_unwrap ( m) . unwrap ( ) . get_mut ( ) {
548+ Err ( e) => assert_eq ! ( * e. into_inner( ) , NonCopy ( 10 ) ) ,
549+ Ok ( x) => panic ! ( "get_mut of poisoned Mutex is Ok: {:?}" , x) ,
550+ }
551+ }
552+
438553 #[ test]
439554 fn test_mutex_arc_condvar ( ) {
440555 let packet = Packet ( Arc :: new ( ( Mutex :: new ( false ) , Condvar :: new ( ) ) ) ) ;
0 commit comments