1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;

pub struct LowContentionPool<T> {
    stack: Vec<Mutex<T>>,
    size: AtomicUsize,
}

impl<T: Default> LowContentionPool<T> {
    pub fn new(size: usize) -> Self {
        let mut stack = Vec::with_capacity(size);
        for _ in 0..size {
            stack.push(Mutex::new(T::default()))
        }
        let size = AtomicUsize::new(size);

        Self { stack, size }
    }

    pub fn get(&self) -> T {
        let size = self.size.fetch_sub(1, Ordering::AcqRel);
        // implementation error if this fails
        assert!(size <= self.stack.len());
        let mut locked = self.stack[size - 1].lock().unwrap();
        std::mem::take(&mut locked)
    }

    pub fn set(&self, value: T) {
        let size = self.size.fetch_add(1, Ordering::AcqRel);
        // implementation error if this fails
        // assert!(size <= self.stack.len());
        let mut locked = self.stack[size].lock().unwrap();
        *locked = value;
    }
}