use async_trait::async_trait;
use dioxus::logger::tracing;
use futures::stream::{FuturesUnordered, StreamExt};
use reqwest::Client;
use scraper::{Html, Selector};
use spider::base::{Runnable, Stopable, Task, TaskError};
use std::future::Future;
use std::pin::Pin;
use url::Url;

/// 表示爬虫任务的当前状态
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum SpiderTaskStatus {
    /// 任务已创建但尚未开始运行
    Pending,
    /// 任务正在执行中
    Running,
    /// 任务已停止运行（可能是完成或被中断）
    Stopped,
}

/// 爬虫运行过程中可能遇到的错误类型
#[derive(Debug, thiserror::Error)]
pub enum SpiderError {
    /// HTTP请求相关错误
    #[error(transparent)]
    RequestError(#[from] reqwest::Error),
    /// 页面解析错误，包含具体错误信息
    #[error("{0}")]
    ParseError(String),
    /// 结果处理错误
    #[error(transparent)]
    ResultHandlerError(#[from] ResultHandlerError),
}

#[derive(Debug, thiserror::Error)]
pub enum ResultHandlerError {
    #[error("{0}")]
    Custom(String),
}


/// 文章解析器，负责从HTML页面中提取文章的标题、内容和面包屑导航
/// 
/// 使用CSS选择器来定位和提取页面中的特定元素
pub struct ArticleParser {
    /// 用于选择文章标题的选择器
    title_selector: Selector,
    /// 用于选择文章主体内容的选择器
    article_selector: Selector,
    /// 用于选择面包屑导航的选择器
    breadcrumb_selector: Selector,
}

impl ArticleParser {
    /// 创建一个新的文章解析器实例
    ///
    /// # Arguments
    /// * `title_selector_css` - 用于选择文章标题的CSS选择器
    /// * `article_selector_css` - 用于选择文章内容的CSS选择器
    /// * `breadcrumb_selector_css` - 用于选择面包屑导航的CSS选择器
    ///
    /// # Returns
    /// 返回配置好的ArticleParser实例
    pub fn new(title_selector_css: &str, article_selector_css: &str, breadcrumb_selector_css: &str) -> Result<Self, SpiderError> {
        Ok(Self {
            title_selector: Selector::parse(title_selector_css)
                .map_err(|e| SpiderError::ParseError(format!("解析标题选择器失败: {}", e)))?,
            article_selector: Selector::parse(article_selector_css)
                .map_err(|e| SpiderError::ParseError(format!("解析文章选择器失败: {}", e)))?,
            breadcrumb_selector: Selector::parse(breadcrumb_selector_css)
                .map_err(|e| SpiderError::ParseError(format!("解析面包屑选择器失败: {}", e)))?,
        })
    }

    pub fn parse(&self, html: &str) -> Result<ParseResult, SpiderError> {
        let document = Html::parse_document(html);
        let title = document.select(&self.title_selector).next()
            .map_or_else(|| String::new(), |el| el.text().collect::<String>());
        let content = document.select(&self.article_selector).next()
            .map_or_else(|| String::new(), |el| el.html());
        let breadcrumb = document.select(&self.breadcrumb_selector).next()
            .map_or_else(|| String::new(), |el| el.text().collect::<String>());
        Ok(ParseResult {
            title,
            content,
            breadcrumb,
        })
    }
}

/// 爬虫运行器，负责执行网页爬取和解析任务
///
/// 支持并发爬取、自定义页面解析和结果处理
pub struct SpiderRunner {
    /// 爬虫任务的唯一标识符
    pub task_id: i32,
    /// 起始列表页面的URL
    root_list_url: String,
    /// 用于选择文章链接的CSS选择器
    list_css_selector: String,
    /// 用于生成下一页URL的函数
    next_page_creator: Box<dyn Fn(&str) -> Option<String> + Send + Sync>,
    /// 用于解析文章内容的解析器
    article_parser: ArticleParser,
    /// 当前爬虫任务的状态
    status: SpiderTaskStatus,
    /// 处理解析结果的异步回调函数
    result_callback: Box<dyn Fn(Vec<ParseResult>) -> Pin<Box<dyn Future<Output = Result<(), ResultHandlerError>> + Send>> + Send + Sync>,
    /// 并发爬取的线程数
    concurrency: usize,
}

impl Task for SpiderRunner {
    fn type_name(&self) -> &'static str {
        "SpiderRunner"
    }

    fn serialize(&self) -> Result<String, spider::base::TaskError> {
        Ok(format!("SpiderRunner:{}", self.task_id))
    }
}

#[async_trait]
impl Runnable for SpiderRunner {
    async fn run(&self) -> Result<(), TaskError> {
        self.run().await?;
        Ok(())
    }
}
#[async_trait]
impl Stopable for SpiderRunner {
    async fn stop(&self) -> Result<(), TaskError> {
        Ok(())
    }
}

impl SpiderRunner {
    pub fn new(
        task_id: i32, 
        root_list_url: &str, 
        list_css_selector: &str,
        next_page_creator: impl Fn(&str) -> Option<String> + Send + Sync + 'static,
        article_parser: ArticleParser,
        result_callback: impl Fn(Vec<ParseResult>) -> Pin<Box<dyn Future<Output = Result<(), ResultHandlerError>> + Send>> + Send + Sync + 'static,
        // 增加并行度参数，默认为1（串行）
        concurrency: Option<usize>,
    ) -> Self {
        Self {
            task_id,
            root_list_url: root_list_url.to_string(),
            list_css_selector: list_css_selector.to_string(),
            next_page_creator: Box::new(next_page_creator),
            article_parser: article_parser,
            status: SpiderTaskStatus::Pending,
            result_callback: Box::new(result_callback),
            // 设置并行度，如果未指定，默认使用1
            concurrency: concurrency.unwrap_or(1),
        }
    }

    pub async fn run(&mut self) -> Result<(), SpiderError> {
        self.status = SpiderTaskStatus::Running;
        let mut current_url = self.root_list_url.clone();
        tracing::info!("SpiderRunner started for list_css_selector: {} with concurrency: {}", 
            &self.list_css_selector, self.concurrency);
        
        let list_selector = Selector::parse(&self.list_css_selector)
            .map_err(|e| SpiderError::ParseError(format!("解析列表选择器失败: {}", e)))?;

        loop {
            // 使用新的辅助函数获取文章链接
            let article_links = fetch_article_links(&current_url, &list_selector).await?;
            tracing::info!("Found {} articles on page: {}", article_links.len(), &current_url);

            if !article_links.is_empty() {
                // 使用并行处理文章
                let results = process_articles_concurrently(self.concurrency, &self.article_parser, article_links).await?;
                
                // 处理解析结果
                if !results.is_empty() {
                    (self.result_callback)(results).await?
                }
            }

            // 获取下一页URL
            match (self.next_page_creator)(&current_url) {
                Some(next_url) => current_url = next_url,
                None => break,
            }
        }

        self.status = SpiderTaskStatus::Stopped;
        Ok(())
    }

    pub fn get_status(&self) -> SpiderTaskStatus {
        self.status
    }
}


async fn process_articles_concurrently(concurrency: usize, article_parser: &ArticleParser, article_links: Vec<String>) -> Result<Vec<ParseResult>, SpiderError> {
    let mut results = Vec::new();
    
    // 创建并发任务集合
    let mut tasks = FuturesUnordered::new();
    let shared_client = Client::new(); // 共享HTTP客户端
    
    // 使用 Arc 包装信号量以便安全共享
    let semaphore = std::sync::Arc::new(tokio::sync::Semaphore::new(concurrency));
    
    for href in article_links {
        // 克隆 Arc 而不是借用原始信号量
        let sem_clone = semaphore.clone();
        let client = shared_client.clone();
        let href_clone = href.clone();
        let parser = article_parser;
        
        // 创建异步任务
        tasks.push(async move {
            // 获取信号量许可
            let permit = sem_clone.acquire().await.unwrap();
            let result = async {
                tracing::info!("Processing article: {}", &href_clone);
                // 获取文章页面
                let article_html = fetch_page_with_client(&client, &href_clone).await?;
                // 解析文章内容
                parser.parse(&article_html)
            }.await;
            
            // 释放信号量许可（通过drop）
            drop(permit);
            (href_clone, result)
        });
    }
    
    // 收集所有结果
    while let Some((href, result)) = tasks.next().await {
        match result {
            Ok(parsed) => {
                results.push(parsed);
            },
            Err(e) => {
                tracing::error!("Failed to process article {}: {:?}", href, e);
                // 这里可以选择继续处理其他文章而不退出
            }
        }
    }
    
    Ok(results)
}

pub async fn fetch_page(url: &str) -> Result<String, SpiderError> {
    // 创建HTTP客户端
    let client = Client::new();
    fetch_page_with_client(&client, url).await
}

pub async fn fetch_page_with_client(client: &Client, url: &str) -> Result<String, SpiderError> {
    // 获取目标网页内容
    let resp = client.get(url)
        .send()
        .await?;

    let html = resp.text()
        .await?;

    Ok(html)
}

async fn fetch_article_links(url: &str, list_selector: &Selector) -> Result<Vec<String>, SpiderError> {
    let html = fetch_page(url).await?;
    let document = Html::parse_document(&html);
    let base_url = Url::parse(url).map_err(|e| SpiderError::ParseError(format!("解析基础URL失败: {}", e)))?;

    let article_links: std::collections::HashSet<String> = document.select(list_selector)
        .filter_map(|element| {
            element.value().attr("href").map(|href| {
                let url_str = match Url::parse(href) {
                    Ok(absolute_url) => absolute_url.to_string(),
                    Err(_) => base_url.join(href)
                        .map(|u| u.to_string())
                        .unwrap_or_default()
                };
                normalize_url(&url_str)
            })
        })
        .filter(|url| !url.is_empty())
        .collect();

    Ok(article_links.into_iter().collect())
}

/// 页面解析结果，包含提取的文章信息
pub struct ParseResult {
    /// 文章标题
    pub title: String,
    /// 文章内容
    pub content: String,
    /// 面包屑导航路径
    pub breadcrumb: String,
}

fn normalize_url(url: &str) -> String {
    if let Ok(mut parsed_url) = Url::parse(url) {
        // 移除锚点
        parsed_url.set_fragment(None);

        // 移除末尾的斜杠
        let path = parsed_url.path().to_string();
        let trimmed_path = path.trim_end_matches('/');
        parsed_url.set_path(trimmed_path);

        // 转换为小写
        let host = parsed_url.host_str().unwrap_or("").to_lowercase();
        if let Ok(()) = parsed_url.set_host(Some(&host)) {}

        parsed_url.to_string()
    } else {
        url.to_string()
    }
}