use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver, Sender};
use std::sync::{Arc, Condvar, Mutex};
use std::thread;

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn it_works() {
        let thread_pool = ThreadPool::with_threads(8).build();
        let test_count = Arc::new(AtomicUsize::new(0));
        for _ in 0..42 {
            let test_count = test_count.clone();
            thread_pool.execute(move || {
                test_count.fetch_add(1, Ordering::Relaxed);
            });
        }
        thread_pool.join();
        assert_eq!(42, test_count.load(Ordering::Relaxed));
    }
}

pub type Thunk = Box<dyn FnOnce() + Send + 'static>;

pub fn with_threads(threads: usize) -> Builder {
    Builder::new().num_threads(threads)
}

/// 简单的线程池
pub struct ThreadPool {
    jobs: Sender<Thunk>,
    shared: Arc<ThreadSharedData>,
}

impl ThreadPool {
    pub fn new() -> Builder {
        Builder::new()
    }

    pub fn execute<F>(&self, job: F)
    where
        F: FnOnce() + Send + 'static,
    {
        self.shared.queued_count.fetch_add(1, Ordering::SeqCst);
        self.jobs
            .send(Box::new(job))
            .expect("unable to send job into queue");
    }

    pub fn join(&self) {
        if !self.shared.has_work() {
            return;
        }
        let mut lock = self.shared.empty_trigger.lock().unwrap();
        while self.shared.has_work() {
            lock = self.shared.empty_condvar.wait(lock).unwrap();
        }
    }
}

/// 线程池的共享数据
pub(self) struct ThreadSharedData {
    name: Option<String>,
    stack_size: Option<usize>,
    queued_count: AtomicUsize,
    active_count: AtomicUsize,
    max_thread_count: AtomicUsize,
    panic_thread_count: AtomicUsize,
    empty_trigger: Mutex<()>,
    empty_condvar: Condvar,
    job_receiver: Mutex<Receiver<Thunk>>,
}

impl ThreadSharedData {
    fn has_work(&self) -> bool {
        self.queued_count.load(Ordering::SeqCst) > 0 || self.active_count.load(Ordering::SeqCst) > 0
    }

    fn no_work_notify_all(&self) {
        if !self.has_work() {
            *self
                .empty_trigger
                .lock()
                .expect("Unable to notify all joining threads");
            self.empty_condvar.notify_all();
        }
    }
}

struct Sentinel<'a> {
    shared: &'a Arc<ThreadSharedData>,
    active: bool,
}

impl<'a> Sentinel<'a> {
    fn new(shared_data: &'a Arc<ThreadSharedData>) -> Sentinel {
        Sentinel {
            shared: shared_data,
            active: true,
        }
    }

    fn cancel(mut self) {
        self.active = false;
    }
}

impl<'a> Drop for Sentinel<'a> {
    fn drop(&mut self) {
        if self.active {
            self.shared.active_count.fetch_sub(1, Ordering::SeqCst);
            if thread::panicking() {
                self.shared
                    .panic_thread_count
                    .fetch_add(1, Ordering::SeqCst);
            }
            self.shared.no_work_notify_all();
            spawn_in_pool(self.shared.clone());
        }
    }
}

pub struct Builder {
    name: Option<String>,
    stack_size: Option<usize>,
    num_threads: Option<usize>,
}

impl Builder {
    pub fn new() -> Builder {
        Self {
            name: None,
            stack_size: None,
            num_threads: None,
        }
    }

    pub fn name(mut self, name: String) -> Self {
        self.name = Some(name);
        self
    }

    pub fn stack_size(mut self, stack_size: usize) -> Self {
        self.stack_size = Some(stack_size);
        self
    }

    pub fn num_threads(mut self, num_threads: usize) -> Self {
        self.num_threads = Some(num_threads);
        self
    }

    pub fn build(self) -> ThreadPool {
        let (tx, rx) = mpsc::channel::<Thunk>();
        let num_threads = {
            if let Some(num_threads) = self.num_threads {
                num_threads
            } else {
                num_cpus::get()
            }
        };
        let shared_data = Arc::new(ThreadSharedData {
            name: self.name,
            stack_size: self.stack_size,
            queued_count: AtomicUsize::new(0),
            active_count: AtomicUsize::new(0),
            max_thread_count: AtomicUsize::new(num_threads),
            panic_thread_count: AtomicUsize::new(0),
            empty_trigger: Mutex::new(()),
            empty_condvar: Condvar::new(),
            job_receiver: Mutex::new(rx),
        });
        (0..num_threads).for_each(|_| spawn_in_pool(shared_data.clone()));
        ThreadPool {
            jobs: tx,
            shared: shared_data,
        }
    }
}

fn spawn_in_pool(shared_data: Arc<ThreadSharedData>) {
    let mut build = thread::Builder::new();
    if let Some(ref name) = shared_data.name {
        build = build.name(name.to_string());
    }
    if let Some(stack_size) = shared_data.stack_size {
        build = build.stack_size(stack_size);
    }
    build
        .spawn(move || {
            let sentinel = Sentinel::new(&shared_data);
            let mut job;
            let mut thread_counter_val;
            let mut max_thread_count_val;
            loop {
                thread_counter_val = shared_data.active_count.load(Ordering::Acquire);
                max_thread_count_val = shared_data.max_thread_count.load(Ordering::Relaxed);
                if thread_counter_val >= max_thread_count_val {
                    break;
                }
                job = match {
                    let mutex_guard = shared_data
                        .job_receiver
                        .lock()
                        .expect("unable to lock job receiver");
                    mutex_guard.recv()
                } {
                    Ok(job) => job,
                    Err(..) => break,
                };
                shared_data.queued_count.fetch_sub(1, Ordering::SeqCst);
                shared_data.active_count.fetch_add(1, Ordering::SeqCst);
                job();
                shared_data.active_count.fetch_sub(1, Ordering::SeqCst);
                shared_data.no_work_notify_all();
            }
            sentinel.cancel();
        })
        .unwrap();
}
