Skip to content

Mutexes, take 5 #11866

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 9 commits into from
1 change: 1 addition & 0 deletions src/etc/licenseck.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"libstd/sync/mpsc_queue.rs", # BSD
"libstd/sync/spsc_queue.rs", # BSD
"libstd/sync/mpmc_bounded_queue.rs", # BSD
"libextra/sync/mpsc_intrusive.rs", # BSD
]

def check_license(name, contents):
Expand Down
48 changes: 28 additions & 20 deletions src/libextra/sync.rs → src/libextra/sync/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@
* in std.
*/


use std::cast;
use std::comm;
use std::unstable::sync::Exclusive;
use std::sync::arc::UnsafeArc;
use std::sync::atomics;
use std::unstable::finally::Finally;
Expand All @@ -32,6 +31,10 @@ use arc::MutexArc;
* Internals
****************************************************************************/

pub mod mutex;
pub mod one;
mod mpsc_intrusive;

// Each waiting task receives on one of these.
#[doc(hidden)]
type WaitEnd = Port<()>;
Expand All @@ -54,7 +57,7 @@ impl WaitQueue {
comm::Data(ch) => {
// Send a wakeup signal. If the waiter was killed, its port will
// have closed. Keep trying until we get a live task.
if ch.try_send_deferred(()) {
if ch.try_send(()) {
true
} else {
self.signal()
Expand All @@ -69,7 +72,7 @@ impl WaitQueue {
loop {
match self.head.try_recv() {
comm::Data(ch) => {
if ch.try_send_deferred(()) {
if ch.try_send(()) {
count += 1;
}
}
Expand All @@ -81,36 +84,45 @@ impl WaitQueue {

fn wait_end(&self) -> WaitEnd {
let (wait_end, signal_end) = Chan::new();
assert!(self.tail.try_send_deferred(signal_end));
assert!(self.tail.try_send(signal_end));
wait_end
}
}

// The building-block used to make semaphores, mutexes, and rwlocks.
#[doc(hidden)]
struct SemInner<Q> {
lock: mutex::Mutex,
count: int,
waiters: WaitQueue,
// Can be either unit or another waitqueue. Some sems shouldn't come with
// a condition variable attached, others should.
blocked: Q
}

#[doc(hidden)]
struct Sem<Q>(Exclusive<SemInner<Q>>);
struct Sem<Q>(UnsafeArc<SemInner<Q>>);

#[doc(hidden)]
impl<Q:Send> Sem<Q> {
fn new(count: int, q: Q) -> Sem<Q> {
Sem(Exclusive::new(SemInner {
count: count, waiters: WaitQueue::new(), blocked: q }))
Sem(UnsafeArc::new(SemInner {
count: count,
waiters: WaitQueue::new(),
blocked: q,
lock: mutex::Mutex::new(),
}))
}

unsafe fn with(&self, f: |&mut SemInner<Q>|) {
let Sem(ref arc) = *self;
let state = arc.get();
let _g = (*state).lock.lock();
f(cast::transmute(state));
}

pub fn acquire(&self) {
unsafe {
let mut waiter_nobe = None;
let Sem(ref lock) = *self;
lock.with(|state| {
self.with(|state| {
state.count -= 1;
if state.count < 0 {
// Create waiter nobe, enqueue ourself, and tell
Expand All @@ -129,8 +141,7 @@ impl<Q:Send> Sem<Q> {

pub fn release(&self) {
unsafe {
let Sem(ref lock) = *self;
lock.with(|state| {
self.with(|state| {
state.count += 1;
if state.count <= 0 {
state.waiters.signal();
Expand Down Expand Up @@ -210,8 +221,7 @@ impl<'a> Condvar<'a> {
let mut out_of_bounds = None;
// Release lock, 'atomically' enqueuing ourselves in so doing.
unsafe {
let Sem(ref queue) = *self.sem;
queue.with(|state| {
self.sem.with(|state| {
if condvar_id < state.blocked.len() {
// Drop the lock.
state.count += 1;
Expand Down Expand Up @@ -253,8 +263,7 @@ impl<'a> Condvar<'a> {
unsafe {
let mut out_of_bounds = None;
let mut result = false;
let Sem(ref lock) = *self.sem;
lock.with(|state| {
self.sem.with(|state| {
if condvar_id < state.blocked.len() {
result = state.blocked[condvar_id].signal();
} else {
Expand All @@ -276,8 +285,7 @@ impl<'a> Condvar<'a> {
let mut out_of_bounds = None;
let mut queue = None;
unsafe {
let Sem(ref lock) = *self.sem;
lock.with(|state| {
self.sem.with(|state| {
if condvar_id < state.blocked.len() {
// To avoid :broadcast_heavy, we make a new waitqueue,
// swap it out with the old one, and broadcast on the
Expand Down
139 changes: 139 additions & 0 deletions src/libextra/sync/mpsc_intrusive.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/

//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module implements an intrusive MPSC queue. This queue is incredibly
//! unsafe (due to use of unsafe pointers for nodes), and hence is not public.

// http://www.1024cores.net/home/lock-free-algorithms
// /queues/intrusive-mpsc-node-based-queue

use std::cast;
use std::sync::atomics;

// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static
// initialization.

pub struct Node<T> {
next: atomics::AtomicUint,
data: T,
}

pub struct DummyNode {
next: atomics::AtomicUint,
}

pub struct Queue<T> {
head: atomics::AtomicUint,
tail: *mut Node<T>,
stub: DummyNode,
}

impl<T: Send> Queue<T> {
pub fn new() -> Queue<T> {
Queue {
head: atomics::AtomicUint::new(0),
tail: 0 as *mut Node<T>,
stub: DummyNode {
next: atomics::AtomicUint::new(0),
},
}
}

pub unsafe fn push(&mut self, node: *mut Node<T>) {
(*node).next.store(0, atomics::Release);
let prev = self.head.swap(node as uint, atomics::AcqRel);

// Note that this code is slightly modified to allow static
// initialization of these queues with rust's flavor of static
// initialization.
if prev == 0 {
self.stub.next.store(node as uint, atomics::Release);
} else {
let prev = prev as *mut Node<T>;
(*prev).next.store(node as uint, atomics::Release);
}
}

/// You'll note that the other MPSC queue in std::sync is non-intrusive and
/// returns a `PopResult` here to indicate when the queue is inconsistent.
/// An "inconsistent state" in the other queue means that a pusher has
/// pushed, but it hasn't finished linking the rest of the chain.
///
/// This queue also suffers from this problem, but I currently haven't been
/// able to detangle when this actually happens. This code is translated
/// verbatim from the website above, and is more complicated than the
/// non-intrusive version.
///
/// Right now consumers of this queue must be ready for this fact. Just
/// because `pop` returns `None` does not mean that there is not data
/// on the queue.
pub unsafe fn pop(&mut self) -> Option<*mut Node<T>> {
let tail = self.tail;
let mut tail = if !tail.is_null() {tail} else {
cast::transmute(&self.stub)
};
let mut next = (*tail).next(atomics::Relaxed);
if tail as uint == &self.stub as *DummyNode as uint {
if next.is_null() {
return None;
}
self.tail = next;
tail = next;
next = (*next).next(atomics::Relaxed);
}
if !next.is_null() {
self.tail = next;
return Some(tail);
}
let head = self.head.load(atomics::Acquire) as *mut Node<T>;
if tail != head {
return None;
}
let stub = cast::transmute(&self.stub);
self.push(stub);
next = (*tail).next(atomics::Relaxed);
if !next.is_null() {
self.tail = next;
return Some(tail);
}
return None
}
}

impl<T: Send> Node<T> {
pub fn new(t: T) -> Node<T> {
Node {
data: t,
next: atomics::AtomicUint::new(0),
}
}
pub unsafe fn next(&mut self, ord: atomics::Ordering) -> *mut Node<T> {
cast::transmute::<uint, *mut Node<T>>(self.next.load(ord))
}
}
Loading