mercredi 1 août 2018

Why need memory_order_seq_cst in lock-free queue?

A lock-free queue, only one thread execute push and pop, others execute steal. Howerver, I cann't understand why need std::atomic_thread_fence(std::memory_order_seq_cst) in function steal. In my opinion, funciton steal only has one store operation, that is _top.compare_exchange_strong, and it has memory_order_seq_cst. So, why need?

template <typename T>
class WorkStealingQueue {
public:
    WorkStealingQueue() : _bottom(1), _top(1) { }
    ~WorkStealingQueue() { delete [] _buffer; }

    int init(size_t capacity) {
        if (capacity & (capacity - 1)) {
            LOG(ERROR) << "Invalid capacity=" << capacity
                       << " which must be power of 2";
            return -1;
        }

        _buffer = new(std::nothrow) T[capacity];
        _capacity = capacity;
        return 0;
    }

    // Steal one item from the queue.
    // Returns true on stolen.
    // May run in parallel with push() pop() or another steal().
    bool steal(T* val) {
        size_t t = _top.load(std::memory_order_acquire);
        size_t b = _bottom.load(std::memory_order_acquire);
        if (t >= b) {
            // Permit false negative for performance considerations.
            return false;
        }

        do {
            std::atomic_thread_fence(std::memory_order_seq_cst);
            b = _bottom.load(std::memory_order_acquire);
            if (t >= b) {
                return false;
            }
            *val = _buffer[t & (_capacity - 1)];
        } while (!_top.compare_exchange_strong(t, t + 1,
                                               std::memory_order_seq_cst,
                                               std::memory_order_relaxed));
        return true;
    }

    // Pop an item from the queue.
    // Returns true on popped and the item is written to `val'.
    // May run in parallel with steal().
    // Never run in parallel with push() or another pop().
    bool pop(T* val) {
        const size_t b = _bottom.load(std::memory_order_relaxed);
        size_t t = _top.load(std::memory_order_relaxed);
        if (t >= b) {
            // fast check since we call pop() in each sched.
            // Stale _top which is smaller should not enter this branch.
            return false;
        }

        const size_t newb = b - 1;
        _bottom.store(newb, std::memory_order_relaxed);
        std::atomic_thread_fence(std::memory_order_seq_cst);

        t = _top.load(std::memory_order_relaxed);
        if (t > newb) {
            _bottom.store(b, std::memory_order_relaxed);
            return false;
        }

        *val = _buffer[newb & (_capacity - 1)];
        if (t != newb) {
            return true;
        }

        // Single last element, compete with steal()
        const bool popped = _top.compare_exchange_strong(
            t, t + 1, std::memory_order_seq_cst, std::memory_order_relaxed);
        _bottom.store(b, std::memory_order_relaxed);
        return popped;
    }

    // Push an item into the queue.
    // Returns true on pushed.
    // May run in parallel with steal().
    // Never run in parallel with pop() or another push().
    bool push(const T& x) {
        const size_t b = _bottom.load(std::memory_order_relaxed);
        const size_t t = _top.load(std::memory_order_acquire);
        if (b >= t + _capacity) { // Full queue.
            return false;
        }

        _buffer[b & (_capacity - 1)] = x;
        _bottom.store(b + 1, std::memory_order_release);
        return true;
    }

private:
    DISALLOW_COPY_AND_ASSIGN(WorkStealingQueue);

    std::atomic<size_t> _bottom;
    size_t _capacity;
    T* _buffer;
    std::atomic<size_t> BAIDU_CACHELINE_ALIGNMENT _top;
};

enter image description here

Aucun commentaire:

Enregistrer un commentaire