Files
base64
byteorder
bytes
cfg_if
crossbeam_deque
crossbeam_epoch
crossbeam_queue
crossbeam_utils
fnv
futures
futures_cpupool
httparse
hyper
iovec
language_tags
lazy_static
libc
lock_api
log
maybe_uninit
memoffset
mime
mio
mio_uds
net2
num_cpus
parking_lot
parking_lot_core
percent_encoding
proc_macro2
quote
rand
relay
rfsapi
safemem
scoped_tls
scopeguard
serde
serde_derive
slab
smallvec
syn
take
time
tokio
tokio_codec
tokio_core
tokio_current_thread
tokio_executor
tokio_fs
tokio_io
tokio_proto
tokio_reactor
tokio_service
tokio_sync
tokio_tcp
tokio_threadpool
tokio_timer
tokio_udp
tokio_uds
try_lock
unicase
unicode_xid
want
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
use std::cell::UnsafeCell;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;

/// A "lock" around data `D`, which employs a *helping* strategy.
///
/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
/// invoked on only a single thread at a time (2) `poll` being invoked at least
/// once after each `unpark` (unless the future has completed).
pub struct UnparkMutex<D> {
    // The state of task execution (state machine described below)
    status: AtomicUsize,

    // The actual task data, accessible only in the POLLING state
    inner: UnsafeCell<Option<D>>,
}

// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
// acquisition failure, the current lock holder performs the desired work --
// re-polling.
//
// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
// must therefore be `Send`.
unsafe impl<D: Send> Send for UnparkMutex<D> {}
unsafe impl<D: Send> Sync for UnparkMutex<D> {}

// There are four possible task states, listed below with their possible
// transitions:

// The task is blocked, waiting on an event
const WAITING: usize = 0;       // --> POLLING

// The task is actively being polled by a thread; arrival of additional events
// of interest should move it to the REPOLL state
const POLLING: usize = 1;       // --> WAITING, REPOLL, or COMPLETE

// The task is actively being polled, but will need to be re-polled upon
// completion to ensure that all events were observed.
const REPOLL: usize = 2;        // --> POLLING

// The task has finished executing (either successfully or with an error/panic)
const COMPLETE: usize = 3;      // No transitions out

impl<D> UnparkMutex<D> {
    pub fn new() -> UnparkMutex<D> {
        UnparkMutex {
            status: AtomicUsize::new(WAITING),
            inner: UnsafeCell::new(None),
        }
    }

    /// Attempt to "notify" the mutex that a poll should occur.
    ///
    /// An `Ok` result indicates that the `POLLING` state has been entered, and
    /// the caller can proceed to poll the future. An `Err` result indicates
    /// that polling is not necessary (because the task is finished or the
    /// polling has been delegated).
    pub fn notify(&self) -> Result<D, ()> {
        let mut status = self.status.load(SeqCst);
        loop {
            match status {
                // The task is idle, so try to run it immediately.
                WAITING => {
                    match self.status.compare_exchange(WAITING, POLLING,
                                                       SeqCst, SeqCst) {
                        Ok(_) => {
                            let data = unsafe {
                                // SAFETY: we've ensured mutual exclusion via
                                // the status protocol; we are the only thread
                                // that has transitioned to the POLLING state,
                                // and we won't transition back to QUEUED until
                                // the lock is "released" by this thread. See
                                // the protocol diagram above.
                                (*self.inner.get()).take().unwrap()
                            };
                            return Ok(data);
                        }
                        Err(cur) => status = cur,
                    }
                }

                // The task is being polled, so we need to record that it should
                // be *repolled* when complete.
                POLLING => {
                    match self.status.compare_exchange(POLLING, REPOLL,
                                                       SeqCst, SeqCst) {
                        Ok(_) => return Err(()),
                        Err(cur) => status = cur,
                    }
                }

                // The task is already scheduled for polling, or is complete, so
                // we've got nothing to do.
                _ => return Err(()),
            }
        }
    }

    /// Alert the mutex that polling is about to begin, clearing any accumulated
    /// re-poll requests.
    ///
    /// # Safety
    ///
    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
    /// successful calls to `notify` and `wait`/`complete`.
    pub unsafe fn start_poll(&self) {
        self.status.store(POLLING, SeqCst);
    }

    /// Alert the mutex that polling completed with NotReady.
    ///
    /// # Safety
    ///
    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
    /// successful calls to `notify` and `wait`/`complete`.
    pub unsafe fn wait(&self, data: D) -> Result<(), D> {
        *self.inner.get() = Some(data);

        match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
            // no unparks came in while we were running
            Ok(_) => Ok(()),

            // guaranteed to be in REPOLL state; just clobber the
            // state and run again.
            Err(status) => {
                assert_eq!(status, REPOLL);
                self.status.store(POLLING, SeqCst);
                Err((*self.inner.get()).take().unwrap())
            }
        }
    }

    /// Alert the mutex that the task has completed execution and should not be
    /// notified again.
    ///
    /// # Safety
    ///
    /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
    /// successful calls to `notify` and `wait`/`complete`.
    pub unsafe fn complete(&self) {
        self.status.store(COMPLETE, SeqCst);
    }
}