use std::{thread, time::Duration, sync::Arc};

use crossbeam::atomic::AtomicCell;

pub(crate) struct ChannelItem<Input> {
    pub rt: tokio::runtime::Runtime,
    pub sender: crossbeam::channel::Sender<Input>,
    pub count_send: AtomicCell<u32>,
}

pub struct TaskParallel<Input: Send + Sync + 'static> {
    count_parallel: u8,
    name: String,
    list_channel: Vec<ChannelItem<Input>>,
    index: AtomicCell<usize>,
    count_send: AtomicCell<u32>,
    /// 接收计数器
    count_receive: Arc<AtomicCell<u32>>,
}
impl<Input: Send + Sync + 'static> TaskParallel<Input> {
    pub fn new<Func: Fn(Input) + Send + 'static + Clone>(count_parallel: u8, name: String, func: Func) -> Self {
        let count_parallel = count_parallel.max(2);
        if count_parallel > 40 {
            warn!("TaskParallel::new: {name}, count_parallel={count_parallel} > 40.");
        }
        let count_receive = Arc::new(AtomicCell::new(0_u32));

        let mut list_channel = vec![];
        for _ in 0..count_parallel {
            let func_clone = func.clone();
            let rt = match tokio::runtime::Builder::new_current_thread()
                .max_blocking_threads(1)
                .on_thread_start(move || {
                    // core_affinity::set_for_current(cid);
                })
                .enable_all()
                .build()
            {
                Ok(v) => v,
                Err(e) => panic!(
                    "ChannelItem::new: tokio::runtime::Builder::new_current_thread, error {e:?}"
                ),
            };
            let (sender, receiver) = crossbeam::channel::unbounded();
            let count_receive_clone = count_receive.clone();
            rt.spawn_blocking(move || loop {
                let data = match receiver.recv() {
                    Ok(v) => v,
                    Err(_e) => {
                        // error!("{e:?}");
                        thread::sleep(Duration::from_secs(10));
                        continue;
                    }
                };
                func_clone(data);
                count_receive_clone.fetch_add(1);
            });

            list_channel.push(ChannelItem {
                rt,
                sender,
                count_send: AtomicCell::new(0),
            });
        }

        Self {
            count_parallel,
            name,
            list_channel,
            index: AtomicCell::new(0),
            count_send: AtomicCell::new(0),
            count_receive,
        }
    }

    pub fn push(&self, data: Input) {
        let index = self.index.fetch_add(1);
        if index as u8 + 1 == self.count_parallel {
            self.index.store(0);
        }

        match self.list_channel[index].sender.send(data) {
            Ok(_) => {
                self.count_send.fetch_add(1);
            },
            Err(e) => {
                error!("TaskParallel::push error: {e:?}.");
                return;
            }
        }

    }
    pub fn count(&self) -> (u32, u32) {
        let count_send = self.count_send.load();
        let count_receive = self.count_receive.load();
        (count_send, count_receive)
    } 

    pub fn is_receive_all(&self, prefix: &str) -> bool {
        let count_send = self.count_send.load();
        let count_receive = self.count_receive.load();
        info!("TaskParallel {} {prefix}: count_send={count_send}, count_receive={count_receive}, left={}", self.name, count_send - count_receive);

        count_send == count_receive
    }

    pub fn wait_for_finished(self, secs_loop: u64, count_loop: u64) {
        let len = self.list_channel.len();
        let mut buf = format!("TaskParallel::wait_for_finished: you send {} tasks to {len} channels:\n", self.count_send.load());
        let mut index = 0;
        for item in self.list_channel.iter() {
            index += 1;
            buf.push_str(&format!("\t{index} / {len}: {} tasks.\n", item.count_send.load()));
        }
        for index in 0..count_loop {
            thread::sleep(Duration::from_secs(secs_loop));
            let prefix = format!("{} / {count_loop}", index+1);  
            let is_receive_all = self.is_receive_all(&prefix);
            if is_receive_all {
                break;
            }
        }
        for item in self.list_channel {
            item.rt.shutdown_background();
        }
        warn!("TaskParallel {}: finish when is_receive_all=true", self.name);
    }

}

#[cfg(test)]
mod test {
    use std::{thread, time::Duration};

    use super::TaskParallel;

    fn sleep_func(mills: u64) {
        // println!("sleep: {}", 1000 + mills);
        thread::sleep(Duration::from_millis(100 + mills));
    }

    #[test]
    fn test_task_parallel() {
        let tp = TaskParallel::new(20, "test".to_string(), sleep_func);
        for millis in 0..1000 {
            tp.push(millis);
        }
        tp.wait_for_finished(60, 30);
        
    }
    
}