use std::collections::LinkedList;
use std::sync::{Arc, mpsc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use crate::config::configuration::Config;
use crate::threadpool::task::Task;

use crate::threadpool::worker::{Worker};


//#[derive(Debug)]
pub struct ThreadPool {
    max_thread_num          :   usize,
    core_thread_num         :   usize,
    max_task_num            :   usize,
    thread_keep_alive_time  :   usize,  // unit: second
    task_queue              :   Arc<Mutex<LinkedList<Task>>>,
    workers                 :   Vec<Worker>,
    sender                  :   mpsc::Sender<Task>,
    receiver                :   Arc<Mutex<mpsc::Receiver<Task>>>,
}

pub const MAX_THREAD_NUM            :   &str = "max_thread_num";
pub const CORE_THREAD_NUM           :   &str = "core_thread_num";
pub const CORE_THREAD_NUM_DEFAULT   :   usize =2;
pub const MAX_TASK_MUM              :   &str = "max_task_num";
pub const MAX_TASK_MUM_DEFAULT      :   usize = 1024;
pub const THREAD_KEEP_ALIVE_TIME    :   &str = "thread_keep_alive_time";


impl ThreadPool {

    pub fn new(config:&Config) -> Self {
        let core_thread_num = config.get_value_with_default_usize(CORE_THREAD_NUM,
                                                                  CORE_THREAD_NUM_DEFAULT);
        if core_thread_num<1 {
            panic!("core thread pool number can not be less than 1!");
        }
        let cpus = num_cpus::get();
        let max_thread_num  = config.get_value_with_default_usize(MAX_THREAD_NUM,
                                                              cpus+1);
        let max_task_num  = config.get_value_with_default_usize(MAX_TASK_MUM,
                                                                    1000);
        let thread_keep_alive_time = config.get_value_with_default_usize(MAX_TASK_MUM,
                                                                         60);
        let mut workers  = Vec::with_capacity(core_thread_num);

        let (sender,receiver) = mpsc::channel();
        let receiver = Arc::new(Mutex::new(receiver));

        for i in 0..core_thread_num {
            let mut worker = Worker::new(i, thread_keep_alive_time,true);
            let receiver = Arc::clone(&receiver);
            worker.work(receiver);
            workers.push(worker);
        }

        return ThreadPool{
            max_thread_num,
            core_thread_num,
            max_task_num,
            thread_keep_alive_time,
            task_queue: Default::default(),
            workers,
            sender,
            receiver,
        };
    }

    pub fn delivery_task(&mut self) {

        let sender  = self.sender.clone();
        let mut task_queue = Arc::clone(&self.task_queue);
        thread::spawn(move ||{
            loop {
                sleep(Duration::new(1,0));
                let current_time = SystemTime::now()
                    .duration_since(UNIX_EPOCH)
                    .unwrap()
                    .as_secs();
                println!(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>there are {} task in queue {}", task_queue.lock().unwrap().len(), current_time);
                let task = task_queue.lock().unwrap().pop_front();
                match task {
                    None => {
                        println!("there is not any request at present.");
                    }
                    Some(t) => {
                        sender.send(t).expect("delivery task failed.");
                    }
                }
            }
        });
    }

    pub fn execute<F>(&mut self, f:F) where F:FnOnce() +Send + Sync + 'static,  {

        let mut is_free_worker = true;
        // 这里可能存在这样的情况，for循环检查core worker时他们在working,但是在创建新worker时，他们停止了working,
        // 这不影响功能，但是可能会多出一个空闲的线程。
        for worker in &self.workers {
            is_free_worker = is_free_worker && !*worker.is_working();
        }

       if &self.workers.len() < &self.max_thread_num && !is_free_worker{
            let mut worker = Worker::new(&self.workers.len()+1, self.thread_keep_alive_time, false);
            let receiver = Arc::clone(&self.receiver);
            worker.work(receiver);
            self.workers.push(worker);
            //let _ =&self.sender.send(task);
        }
        if &self.max_task_num > &self.task_queue.lock().unwrap().len() {
            //let task = RefCell::new(Task::new(f));
            let task = Task::new(f);
            let _ = &self.task_queue.lock().unwrap().push_back(task);
        }else {
            eprintln!("the task queue have been full, so your request will be abandon.");
        }









        /*for worker in &self.workers {
            //如果有worker 处于空闲状态，就直接发任务
            if !worker.is_working(){
                let _ =&self.sender.send(task);
                is_sent = true;
                break;//如果不加break, 那么task 就会报错，因为在不加break 时，task要进入第二次循环，这个时候task所有权可能已经被第一个worker 获得了，第二次就会报错。
            }
        }*/
        /*if !is_sent {
            if &self.workers.len() < &self.max_thread_num {
                let mut worker = Worker::new(&self.workers.len()+1, self.thread_keep_alive_time, false);
                let receiver = Arc::clone(&self.receiver);
                worker.work(receiver);
                self.workers.push(worker);
                let _ =&self.sender.send(*(task.try_borrow().unwrap()));
            }else {
                //放入任务队列
                &self.task_queue.push_back(*(task.try_borrow().unwrap()));
            }
        }*/
    }
}