use std::collections::HashMap;

use colored::Colorize;
use log::debug;
use rig::{embeddings::EmbeddingsBuilder, providers::ollama::Client};
use rig_sqlite::SqliteVectorStore;
use rusqlite::ffi::sqlite3_auto_extension;
use scraper::{Html, Selector};
use sqlite_vec::sqlite3_vec_init;
use tokio::sync::mpsc;
use tokio_rusqlite::Connection;
use url::Url;

use crate::Document;

use super::property;

pub struct WebCrawl {
    base_url: String,
    urls_map: HashMap<String, String>,
    urls: Vec<String>,
}

impl WebCrawl {
    pub fn new(base_url: &String) -> WebCrawl {
        WebCrawl {
            base_url: base_url.clone(),
            urls_map: HashMap::new(),
            urls: vec![base_url.clone()],
        }
    }

    pub fn paragraph_iter<'a>(
        &'a mut self,
        min_length: usize,
    ) -> impl Iterator<Item = String> + 'a {
        let t = self.into_iter().flat_map(move |fragment| {
            // 解析段落
            fragment
                .select(&Selector::parse("p").unwrap())
                .map(|p_element| {
                    let content: String = p_element.text().collect::<String>();
                    return content.trim().to_string();
                })
                .filter(|content| content.len() >= min_length)
                .collect::<Vec<String>>()
        });

        return t;
    }
}

impl Iterator for WebCrawl {
    type Item = Html;

    fn next(&mut self) -> Option<Self::Item> {
        while let Some(url) = self.urls.pop() {
            // 阻塞client
            let client = reqwest::blocking::Client::new();

            debug!("{}", url.red());
            let resp = client
            .get(url.clone())
            .header(reqwest::header::USER_AGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:138.0) Gecko/20100101 Firefox/138.0")
            .send().ok()?;

            // 响应参数
            let content_type_op = resp.headers().get(reqwest::header::CONTENT_TYPE);
            let status = resp.status();

            if status.is_success() {
                if let Some(content_type) = content_type_op {
                    if content_type.to_str().unwrap_or("").contains("text/html") {
                        if let Ok(html_str) = resp.text() {
                            let fragment: Html = Html::parse_fragment(html_str.as_str());

                            // 解析链接
                            for p_element in fragment.select(&Selector::parse("a").unwrap()) {
                                if let Some(href) = p_element.value().attr("href") {
                                    let mut target_url: String = href.to_string();
                                    if !target_url.starts_with("http") {
                                        let base: Url = Url::parse(&self.base_url).unwrap();
                                        let mut base = base.join(&target_url).unwrap();
                                        base.set_fragment(None); // 移除片段(#a)
                                        target_url = base.as_str().to_string();
                                    }

                                    if target_url.starts_with(&self.base_url)
                                        && !self.urls_map.contains_key(&target_url)
                                    {
                                        debug!("href:{}", target_url);
                                        self.urls_map.insert(target_url.clone(), "".to_string());
                                        self.urls.push(target_url);
                                    }
                                }
                            }

                            // 返回页面
                            return Some(fragment);
                        }
                    }
                }
            }
        }
        None
    }
}

#[test]
fn test() -> Result<(), anyhow::Error> {
    let base_url = "https://docs.chainmaker.org.cn/v2.3.3/html/";
    let mut w = WebCrawl::new(&base_url.to_string());
    for p in w.paragraph_iter(90) {
        println!(">{}", p);
    }

    Ok(())
}
