from sentence_transformers import SentenceTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter
import chromadb
from llm.doubao import doubao_qa
from llm.local import ollama_qa
import requests
from pprint import pprint
import random
from bs4 import BeautifulSoup
import re
import hashlib
from markdownify import markdownify as md
from strsimpy.normalized_levenshtein import NormalizedLevenshtein

# 导入 json 库，用于处理 JSON 数据
import json

# 设置 serper.dev 的 API 地址
SERPER_API = "https://google.serper.dev/search"
X_API_KEY = "2083cd014458a8ba01651de7f2f8ec8e834e3c98"
model = SentenceTransformer("all-MiniLM-L6-v2")

client = chromadb.PersistentClient(path="./chromadb_data")
collection = client.get_or_create_collection("rag")


def get_query_embedding(query):
    return model.encode(query).tolist()


def retrieve_related_chunks(query_embedding, n_results=3):
    results = collection.query(query_embeddings=[query_embedding], n_results=n_results)
    related_chunks = results.get("documents")
    if not related_chunks or not related_chunks[0]:
        exit(1)
    return related_chunks[0]


def search_serper(query, num=20):
    headers = {
        "X-API-KEY": X_API_KEY,
        "Content-Type": "application/json",
    }
    data = {"q": query}
    try:
        response = requests.post(SERPER_API, headers=headers, json=data, timeout=10)
        # 检查请求是否成功
        response.raise_for_status()
        results = response.json()
        docs = []
        for item in results.get("organic", [])[:num]:
            snippet = item.get("snippet", "")
            url = item.get("link", "")
            if snippet:
                docs.append({"url": url, "content": snippet})
        return docs
    except Exception as e:
        print(e)
        return []


def keyword_similarity(query, content):
    normalizedLevenshtein = NormalizedLevenshtein()
    return 1 - normalizedLevenshtein.distance(query, content)


def fetch_web_content(url):
    try:
        resp = requests.get(url, timeout=10)
        resp.raise_for_status()
        soup = BeautifulSoup(resp.text, "html.parser")
        html = soup.prettify()
        # 把html转成markdown
        md_text = md(html)
        # 把连续的空白字符替换成一个空格
        md_text = re.sub(r"\s+", " ", md_text)
        return md_text.strip()
    except Exception as e:
        print(e)
        return ""


def preprocess_and_store(docs):
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=200,
        chunk_overlap=30,
        separators=["\n\n", "\n", "。", ".", "，", ","],
    )
    for doc in docs:
        url = doc["url"]
        content = doc["content"]
        chunks = splitter.split_text(content)
        for chunk in chunks:
            # 为每个分块生成唯一的ID
            doc_id = hashlib.md5((url + chunk).encode("utf-8")).hexdigest()
            exists = collection.get(ids=[doc_id])
            if exists and exists.get("ids"):
                continue
            embedding = get_query_embedding(chunk)
            collection.add(documents=[chunk], embeddings=[embedding], ids=[doc_id])


if __name__ == "__main__":
    query = "北京今日有什么新闻？"
    # 1.联网搜索，获取摘要
    docs = search_serper(query, num=20)
    # 2.计算摘要和query关键字的相似度，取top-k
    for doc in docs:
        doc["sim"] = keyword_similarity(query, doc["content"])
    # 3.按相似度从高到低排序
    docs_sorted = sorted(docs, key=lambda d: d["sim"], reverse=True)
    top_k = 5
    top_docs = docs_sorted[:top_k]
    print(top_docs)
    # 4.抓取完整的网页正文
    full_docs = []
    for doc in top_docs:
        url = doc["url"]
        content = fetch_web_content(url)
        if content:
            full_docs.append({"url": url, "content": content})
    # 5.对正文内容分块并且入库
    preprocess_and_store(full_docs)

    # 6.查询向量化
    query_embedding = get_query_embedding(query)
    related_chunks = retrieve_related_chunks(query_embedding, n_results=3)
    query_context = "\n".join(related_chunks)
    print(f"query_context:{query_context}")
    prompt = f"已知信息:{query_context}\n\n请根据上述的内容回答用户下面的问题:{query}"
    print(f"prompt:{prompt}")
    answer = doubao_qa(prompt)
    print(f"answer:{answer}")
