use crate::component::request::Request;
use crate::component::response::Response;
use crate::component::ParseResult;
use crate::downloader::Downloader;
use crate::middleware::{DownloaderMiddleware, DownloaderMwErrResult, DownloaderMwResult};
use crate::pipeline::{ItemProduct, Pipeline};
use crate::scheduler::Scheduler;
use crate::spider::Spider;
use std::sync::{mpsc, Arc, Mutex};
use std::thread;
use log::debug;

pub type Pipelines<ItemType> = Vec<Mutex<Box<dyn Pipeline<ItemType>>>>;
type DownloaderMws<ItemType> = Vec<Mutex<Box<dyn DownloaderMiddleware<ItemType>>>>;

/// 一个请求在发送给 downloader 后, 及给 pipeline 前的处理结果
/// `download_one()` 的返回值
pub enum IntermediateProcessResult<ItemType: Clone> {
    Request(Request<ItemType>),
    Response(Response<ItemType>),
    Ignore,
}

/// 一个 crawler 包含一个 spider, 很多 piplines, 很多 middleware
/// 一个 Downloader
pub struct Crawler<ItemType: Clone> {
    spider: Box<dyn Spider<ItemType = ItemType>>,
    downloader: Downloader,
    pipelines: Pipelines<ItemType>,
    downloader_mws: DownloaderMws<ItemType>,
}

impl<ItemType: Clone> Crawler<ItemType> {
    /// 必须提供 spider 和 pipelines
    /// 默认没有下载中间件, 如果需要可以通过 `add_downloader_middleware()` 添加
    pub fn new(
        spider: Box<dyn Spider<ItemType = ItemType>>,
        pipelines: Pipelines<ItemType>,
    ) -> Self {
        Self {
            spider,
            downloader: Downloader::new(),
            pipelines,
            downloader_mws: vec![],
        }
    }

    pub fn add_downloader_middleware(&mut self, dm: Box<dyn DownloaderMiddleware<ItemType>>) {
        self.downloader_mws.push(Mutex::new(dm));
    }
}

/// 一个任务包含一个请求, 以及该请求对应的 crawler
/// 任务是发给 worker 的
pub struct Task<ItemType: Clone> {
    request: Request<ItemType>,
    // TODO 为什么要 Box?
    crawler: Arc<Crawler<ItemType>>,
}

/// 包含新的任务 vec, 以及产生这些新任务的 worker id.
struct FinalProcessResult<ItemType: Clone> {
    new_tasks: Vec<Task<ItemType>>,
    worker_id: usize,
}

///     engine                          threads
///
///     result_rx               <--- result_tx, result_tx, result_tx
///     tasktx, task_tx, task_tx --->task_rx, task_rx, task_rx
///
///
///
// new() 创建 n 个 worker 线程并阻塞, 等待 engine 发送 task;
//  - 将 worker_id, task_rx, result_tx 发送给 worker 线程
//  - task_rx: 等待接收 engine 发送过来的 task
//  - result_tx: worker 完成 task 后将 FinalProcessResult 发送给 engine
//  - FinalProcessResult: 包含新的 tasks 和 worker_id
//
// run() 启动引擎
//  - init_requests() 从 crawlers - spider 中获取 tasks 放入调度器
//  - 给每个 worker 发送 task
//  - while 循环 result_rx 等待 worker 发送 FinalProcessResult
//      - 收到 Err, 则表示所有工作都结束了
//      - 收到 FinalProcessResult, 这也意味着该 worker 已经空闲
//      - 给该空闲 worker 发送 task, 如果也失败(只能是因为调度器中空了), 退出.
//  - 关闭所有发送 task 的 task_tx
//  - 等待所有 worker 线程结束
pub struct Engine<ItemType: Clone + 'static> {
    crawler: Arc<Crawler<ItemType>>,
    /// 一个调度器
    scheduler: Scheduler<ItemType>,
    /// workers, Vec<thread::JoinHandle<()>> 类型
    workers: Vec<thread::JoinHandle<()>>,
    /// task_txs, 发送任务给 workers
    task_txs: Vec<mpsc::Sender<Task<ItemType>>>,
    /// result_rx 从 workers 接收 FinalProcessResult{ new_tasks, worker_id }
    result_rx: mpsc::Receiver<FinalProcessResult<ItemType>>,
}

impl<ItemType: Clone + 'static + Send + Sync> Engine<ItemType> {
    // TODO: 配置文件 n_workers //
    pub fn new(crawler: Crawler<ItemType>, n_workers: usize) -> Self {
        assert!(n_workers > 0);
        debug!("创建爬虫引擎, 启动 {} 个工作线程", n_workers);
        let mut task_txs = Vec::new();
        let mut workers = Vec::new();
        let (result_tx, result_rx) = mpsc::channel();
        for i in 0..n_workers {
            let (task_tx, task_rx) = mpsc::channel();
            task_txs.push(task_tx);
            let result_tx = result_tx.clone();
            // 启动 workers
            workers.push(thread::spawn(move || Self::work(i, task_rx, result_tx)));
        }
        Self {
            crawler: Arc::new(crawler),
            scheduler: Scheduler::new(),
            workers,
            task_txs,
            result_rx,
        }
    }

    /// 工作线程函数
    ///     - 阻塞, 等待 engine 发送任务过来
    ///     - 调用 `process_one_task()` 处理任务, 该函数会返回新的请求 vec
    ///     - 将请求包装为任务
    ///     - 再将任务和 worker_id 包装为 `FinalProcessResult` 发送给 engine
    fn work(
        worker_id: usize,
        task_rx: mpsc::Receiver<Task<ItemType>>,
        result_tx: mpsc::Sender<FinalProcessResult<ItemType>>,
    ) {
        // 一开始, 阻塞线程, 等待 engine 发送任务
        while let Ok(task) = task_rx.recv() {
            let crawler = task.crawler.clone();
            // 处理一个任务, 返回新的 Requests
            debug!("[{}号 worker] 开始处理一个任务", worker_id);
            let new_requests = Self::process_one_task(task);
            // 将请求包装为任务
            let mut new_tasks = Vec::new();
            for req in new_requests {
                let crawler = crawler.clone();
                new_tasks.push(Task {
                    request: req,
                    crawler,
                });
            }
            // FinalProcessResult 发送 engine
            result_tx
                .send(FinalProcessResult {
                    new_tasks,
                    worker_id,
                })
                .unwrap();
        }
    }

    /// worker 处理一个任务
    ///     - 调用 `Self::download_one()` 下载, 处理它的返回结果(`IntermediateProcessResult` 枚举类型)
    ///     - 返回的 Vec<Request> 有三种可能
    ///         - 1. 只包含原来的 request, 重新调度
    ///         - 2. 解析 response 后得到的更多的 requests
    ///         - 3. 空
    fn process_one_task(task: Task<ItemType>) -> Vec<Request<ItemType>> {
        let crawler = task.crawler.clone();
        let parser = task.request.parser;
        let metadata = task.request.metadata.clone();
        match Self::download_one(task) {
            // 如果是 ::Request, 直接重新调度该请求
            IntermediateProcessResult::Request(request) => vec![request],
            // 如果是 ::Response, 交给 spider 解析器
            IntermediateProcessResult::Response(response) => match parser {
                Some(parser) => {
                    let ParseResult { reqs, items } = (parser)(response, metadata);
                    for item in items {
                        Self::pipeline_chain(item, crawler.clone());
                    }
                    reqs
                }
                None => {
                    let ParseResult { reqs, items } = crawler.spider.parse(response);
                    for item in items {
                        Self::pipeline_chain(item, crawler.clone());
                    }
                    reqs
                }
            },
            IntermediateProcessResult::Ignore => vec![],
        }
    }

    fn pipeline_chain(item: ItemType, crawler: Arc<Crawler<ItemType>>) {
        let mut item = item;
        for pl in &crawler.pipelines {
            let mut pl = pl.lock().unwrap();
            match pl.process(item) {
                ItemProduct::Item(item_) => item = item_,
                ItemProduct::Ignore => break,
            }
        }
    }

    fn download_one(task: Task<ItemType>) -> IntermediateProcessResult<ItemType> {
        let Task {
            mut request,
            crawler,
        } = task;
        // 下载前先经过 DownloaderMiddleware.process_request 处理
        for dm in &crawler.downloader_mws {
            let mut dm = dm.lock().unwrap();
            match dm.process_request(request) {
                // 重新调度
                DownloaderMwResult::FinalRequest(r) => {
                    return IntermediateProcessResult::Request(r)
                }
                // 不下载了, 直接返回 response
                DownloaderMwResult::Response(r) => return IntermediateProcessResult::Response(r),
                // 继续使用下载器中间件处理
                DownloaderMwResult::IntermediateRequest(r) => request = r,
                // 丢弃
                DownloaderMwResult::Ignore => return IntermediateProcessResult::Ignore,
            }
        }

        // 下载
        let req = request.clone();
        match (&crawler).downloader.download(request) {
            Ok(response) => {
                // 下载后继续使用下载中间件处理
                Self::downloader_middleware_response_chain(&req, response, crawler.clone())
            }
            Err(e) => {
                for dm in &crawler.downloader_mws {
                    let mut reprocess_response = None; // TODO: Using nested process and holding the response in the outer scope avoids acquiring another lock before releasing the current one.
                    {
                        let mut dm = dm.lock().unwrap();
                        match dm.process_error(&req, &e) {
                            DownloaderMwErrResult::Continue => (),
                            DownloaderMwErrResult::Request(r) => {
                                return IntermediateProcessResult::Request(r)
                            }
                            DownloaderMwErrResult::Response(r) => reprocess_response = Some(r),
                        }
                    }
                    if let Some(r) = reprocess_response {
                        return Self::downloader_middleware_response_chain(
                            &req,
                            r,
                            crawler.clone(),
                        );
                    }
                }
                IntermediateProcessResult::Ignore
            }
        }
    }

    pub fn downloader_middleware_response_chain(
        request: &Request<ItemType>,
        response: Response<ItemType>,
        crawler: Arc<Crawler<ItemType>>,
    ) -> IntermediateProcessResult<ItemType> {
        let mut response = response;
        for dm in &crawler.downloader_mws {
            let mut dm = dm.lock().unwrap();
            match dm.process_response(request, response) {
                DownloaderMwResult::FinalRequest(r)
                | DownloaderMwResult::IntermediateRequest(r) => {
                    return IntermediateProcessResult::Request(r);
                }
                DownloaderMwResult::Response(r) => response = r,
                DownloaderMwResult::Ignore => return IntermediateProcessResult::Ignore,
            }
        }
        IntermediateProcessResult::Response(response)
    }

    /// Run the engine
    ///  - init_requests() 从 crawlers - spider 中获取 tasks 放入调度器
    ///  - 给每个 worker 发送 task
    ///  - while 循环 result_rx 等待 worker 发送 FinalProcessResult
    ///      - 收到 Err, 则表示所有工作都结束了
    ///      - 收到 FinalProcessResult, 这也意味着该 worker 已经空闲
    ///      - 给该空闲 worker 发送 task, 如果也失败(只能是因为调度器中空了), 退出.
    ///  - 关闭所有发送 task 的 task_tx
    ///  - 等待所有 worker 线程结束
    pub fn run(&mut self) {
        self.init_requests();

        for i in 0..self.workers.len() {
            if !self.send_one_task(i) {
                break;
            }
        }

        // 等待 worker 的 FinalProcessResult,
        // 如果接受到, 该 worker 已经空闲
        // 给空闲 worker 发送任务
        while let Ok(result) = self.result_rx.recv() {
            for task in result.new_tasks {
                self.scheduler.enqueue(task);
            }

            if !self.send_one_task(result.worker_id) {
                break;
            }
        }

        // 关闭所有 task_tx
        while let Some(_) = self.task_txs.pop() {}

        // 等待 worker 线程结束
        while let Some(worker) = self.workers.pop() {
            worker.join().unwrap();
        }

        // close_spider
        for pl in &self.crawler.pipelines {
            pl.lock().unwrap().close_spider();
        }
    }

    fn init_requests(&mut self) {
        let requests = self.crawler.spider.start_requests();
        // open_spider
        for pl in &self.crawler.pipelines {
            pl.lock().unwrap().open_spider();
        }
        for request in requests {
            self.scheduler.enqueue(Task {
                request,
                crawler: self.crawler.clone(),
            });
        }
    }

    fn send_one_task(&mut self, worker_id: usize) -> bool {
        match self.scheduler.dequeue() {
            Some(task) => {
                self.task_txs[worker_id].send(task).unwrap();
                true
            }
            None => false,
        }
    }
}
