import requests
import markdown
from bs4 import BeautifulSoup
import chromadb
from chromadb.utils import embedding_functions
import re

# 定义存储到 ChromaDB 的函数
def store_text_to_chroma(text_chunks, collection_name, db_path):
    client = chromadb.PersistentClient(path=db_path)
    collection = client.get_or_create_collection(
        name=collection_name,
        metadata={"hnsw:space": "cosine"}
    )

    documents = [chunk["description"] for chunk in text_chunks]
    ids = [chunk["id"] for chunk in text_chunks]

    collection.upsert(
        documents=documents,
        ids=ids
    )

    print(f"成功存入 {len(ids)} 个文本块到集合 '{collection_name}'")

# 将文本分割为基于句子的块，并限制每个块的最大长度
def chunk_text_by_sentences(text, max_sentences_per_chunk=5, max_length_per_chunk=512):
    sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
    chunks = []
    current_chunk = []

    for sentence in sentences:
        if len(current_chunk) < max_sentences_per_chunk and len(" ".join(current_chunk + [sentence])) <= max_length_per_chunk:
            current_chunk.append(sentence)
        else:
            chunks.append(" ".join(current_chunk))
            current_chunk = [sentence]

    if current_chunk:
        chunks.append(" ".join(current_chunk))

    return chunks

# 处理单个网页并存储到 ChromaDB
def process_and_store_page(url, collection_name, db_path):
    try:
        print(f"正在处理网页: {url}")
        response = requests.get(url, timeout=10)
        if response.status_code == 200:
            markdown_content = response.text
            html_content = markdown.markdown(markdown_content)
            soup = BeautifulSoup(html_content, 'html.parser')
            text_content = soup.get_text().strip().replace("\n", " ").replace("  ", " ")

            chunks = chunk_text_by_sentences(text_content, max_sentences_per_chunk=5, max_length_per_chunk=512)
            text_chunks = [{"id": f"{url}_chunk_{i}", "description": chunk} for i, chunk in enumerate(chunks)]

            store_text_to_chroma(text_chunks, collection_name=collection_name, db_path=db_path)
        else:
            print(f"请求失败，状态码: {response.status_code}。请检查网页链接的合法性或稍后重试。")
    except requests.exceptions.RequestException as e:
        print(f"处理网页 {url} 时出错: {e}。可能是网络问题或链接无效。请检查链接的合法性或稍后重试。")

# 从文件中读取 URLs
def read_urls_from_file(file_path):
    with open(file_path, "r", encoding="utf-8") as file:
        urls = [line.strip() for line in file.readlines()]
    return urls

# 打印输出向量资料库的内容
def print_chroma_db_contents(collection_name, db_path):
    client = chromadb.PersistentClient(path=db_path)
    collection = client.get_or_create_collection(name=collection_name)

    if collection is None:
        print(f"集合 '{collection_name}' 不存在。")
        return

    results = collection.query(
        query_texts=[""],
        n_results=collection.count(),
        include=["documents", "metadatas", "embeddings"]
    )

    if not results["documents"]:
        print(f"集合 '{collection_name}' 中没有记录。")
        return

    print(f"集合 '{collection_name}' 中的记录如下：")
    for doc, meta, embedding in zip(results["documents"][0], results["metadatas"][0], results["embeddings"][0]):
        print(f"描述: {doc}")
        print(f"元数据: {meta}")
        print(f"嵌入向量: {embedding}")
        print("-" * 40)

# 主程序
if __name__ == "__main__":
    collection_name = "text_collection3"
    db_path = "./chroma_db3"
    urls_file = "urls.txt"
    urls = read_urls_from_file(urls_file)

    for url in urls:
        process_and_store_page(url, collection_name, db_path)

    print("所有网页处理完成。")
    print_chroma_db_contents(collection_name, db_path)