//! 基于 Reqwest 的简单爬虫引擎

use crate::config::CrawlerConfig;
use crate::fuzzer::{Fuzzer, FuzzConfig};
use crate::js_renderer::{JsRenderer, JsRendererConfig};
use anyhow::Result;
use log::{debug, info, warn};
use reqwest::Client;
use select::document::Document;
use select::predicate::Name;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::mpsc;
use url::Url;

/// 爬虫响应数据
#[derive(Debug, Clone)]
pub struct CrawlerResponse {
    /// 请求URL
    pub url: String,
    /// 响应状态码
    pub status: u16,
    /// 响应头
    pub headers: HashMap<String, String>,
    /// 响应体
    pub body: Vec<u8>,
    /// 内容类型
    pub content_type: Option<String>,
    /// 响应时间（毫秒）
    pub response_time_ms: u64,
}

/// 爬虫引擎
pub struct CrawlerEngine {
    /// 配置
    config: CrawlerConfig,
    /// HTTP客户端
    client: Client,
    /// 响应发送通道
    response_tx: mpsc::UnboundedSender<CrawlerResponse>,
    /// JS 渲染器（可选）
    #[cfg(feature = "js-rendering")]
    js_renderer: Option<Arc<JsRenderer>>,
    /// Fuzzer（可选）
    fuzzer: Option<Arc<Fuzzer>>,
}

impl CrawlerEngine {
    /// 创建新的爬虫引擎
    pub fn new(
        config: CrawlerConfig,
        response_tx: mpsc::UnboundedSender<CrawlerResponse>,
    ) -> Result<Self> {
        // 构建 HTTP 客户端
        let client = Client::builder()
            .timeout(std::time::Duration::from_secs(config.timeout_secs))
            .redirect(if config.follow_redirects {
                reqwest::redirect::Policy::limited(config.max_redirects)
            } else {
                reqwest::redirect::Policy::none()
            })
            .user_agent(&config.user_agent)
            .danger_accept_invalid_certs(true)
            .build()?;

        // 创建 JS 渲染器（如果启用）
        #[cfg(feature = "js-rendering")]
        let js_renderer = if config.enable_js {
            let js_config = JsRendererConfig {
                timeout_secs: config.timeout_secs,
                ..Default::default()
            };
            match JsRenderer::new(js_config) {
                Ok(renderer) => {
                    info!("JavaScript rendering enabled");
                    Some(Arc::new(renderer))
                }
                Err(e) => {
                    warn!("Failed to initialize JS renderer: {}", e);
                    None
                }
            }
        } else {
            None
        };

        // 创建 Fuzzer（如果启用）
        let fuzzer = if config.enable_fuzz {
            let fuzz_config = FuzzConfig {
                dict_path: config.fuzz_dict_path.clone(),
                ..Default::default()
            };
            match Fuzzer::new(fuzz_config) {
                Ok(fuzzer) => {
                    info!("Fuzzing mode enabled");
                    Some(Arc::new(fuzzer))
                }
                Err(e) => {
                    warn!("Failed to initialize fuzzer: {}", e);
                    None
                }
            }
        } else {
            None
        };

        Ok(Self {
            config,
            client,
            response_tx,
            #[cfg(feature = "js-rendering")]
            js_renderer,
            fuzzer,
        })
    }

    /// 开始爬取
    pub async fn crawl(&self, start_url: &str) -> Result<()> {
        info!("Starting crawler for URL: {}", start_url);

        let base_url = Url::parse(start_url)?;
        let base_domain = base_url
            .host_str()
            .ok_or_else(|| anyhow::anyhow!("Invalid URL: no host"))?
            .to_string();

        let mut visited = HashSet::new();
        let mut to_visit = vec![(start_url.to_string(), 0)];
        let mut pages_crawled = 0;

        while let Some((url, depth)) = to_visit.pop() {
            // 检查是否达到限制
            if pages_crawled >= self.config.max_pages {
                info!("Reached max pages limit: {}", self.config.max_pages);
                break;
            }

            if depth > self.config.max_depth {
                debug!("Skipping URL due to depth limit: {}", url);
                continue;
            }

            // 跳过已访问的URL
            if visited.contains(&url) {
                continue;
            }

            // 检查是否应该忽略此URL
            if self.should_ignore_url(&url) {
                debug!("Ignoring URL due to extension: {}", url);
                continue;
            }

            visited.insert(url.clone());

            // 发送HTTP请求
            let start_time = std::time::Instant::now();
            match self.client.get(&url).send().await {
                Ok(response) => {
                    let response_time_ms = start_time.elapsed().as_millis() as u64;
                    let status = response.status().as_u16();

                    // 提取响应头
                    let mut headers = HashMap::new();
                    for (key, value) in response.headers() {
                        if let Ok(value_str) = value.to_str() {
                            headers.insert(key.to_string(), value_str.to_string());
                        }
                    }

                    let content_type = headers.get("content-type").cloned();

                    // 获取响应体
                    let body_bytes = response.bytes().await?.to_vec();
                    let body_text = String::from_utf8_lossy(&body_bytes).to_string();

                    info!("Crawled: {} - Status: {} - Time: {}ms", url, status, response_time_ms);

                    // 发送响应到处理通道
                    let crawler_response = CrawlerResponse {
                        url: url.clone(),
                        status,
                        headers: headers.clone(),
                        body: body_bytes.clone(),
                        content_type: content_type.clone(),
                        response_time_ms,
                    };

                    if let Err(e) = self.response_tx.send(crawler_response) {
                        warn!("Failed to send crawler response: {}", e);
                    }

                    pages_crawled += 1;

                    // 如果是HTML，提取链接
                    if content_type.as_ref().map(|ct| ct.contains("text/html")).unwrap_or(false) {
                        if let Ok(parsed_url) = Url::parse(&url) {
                            let document = Document::from(body_text.as_str());

                            for node in document.find(Name("a")) {
                                if let Some(href) = node.attr("href") {
                                    if let Ok(absolute_url) = parsed_url.join(href) {
                                        let absolute_url_str = absolute_url.to_string();

                                        // 检查是否为外部链接
                                        if !self.config.crawl_external {
                                            if let Some(host) = absolute_url.host_str() {
                                                if host != base_domain {
                                                    continue;
                                                }
                                            }
                                        }

                                        // 添加到待访问列表
                                        if !visited.contains(&absolute_url_str) {
                                            to_visit.push((absolute_url_str, depth + 1));
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
                Err(e) => {
                    warn!("Failed to fetch {}: {}", url, e);
                }
            }
        }

        info!("Crawler finished. Crawled {} pages", pages_crawled);
        Ok(())
    }

    /// 检查是否应该忽略此URL
    fn should_ignore_url(&self, url: &str) -> bool {
        let url_lower = url.to_lowercase();
        for ext in &self.config.ignore_extensions {
            if url_lower.ends_with(&format!(".{}", ext)) {
                return true;
            }
        }
        false
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_should_ignore_url() {
        let config = CrawlerConfig {
            ignore_extensions: vec![
                "css".to_string(),
                "js".to_string(),
                "png".to_string(),
            ],
            ..Default::default()
        };

        let (tx, _rx) = mpsc::unbounded_channel();
        let engine = CrawlerEngine::new(config, tx).unwrap();

        assert!(engine.should_ignore_url("https://example.com/style.css"));
        assert!(engine.should_ignore_url("https://example.com/script.js"));
        assert!(!engine.should_ignore_url("https://example.com/page.html"));
        assert!(!engine.should_ignore_url("https://example.com/api/users"));
    }
}
