import re
import json

def clean_content(documents: list):

    """文本清洗"""
    cleaned_docs = []


    for doc in documents:

        # 1、page_content处理：去除多余换行和空格
        text = doc.page_content

        # 将连续的换行符替换为两个换行符，正则表达式模式：r"\n{2,}"
        # r"" 表示原始字符串（raw string），避免转义字符的特殊处理
        # \n 表示换行符
        # {2,} 是量词，表示前面的字符（换行符）出现 2 次或更多次
        text = re.sub(r"\n{2,}", "\n\n", text)
        text = text.strip()

        # 2、metadata处理：将所有非 Chroma 支持类型的值转为 JSON 格式字符串
        allowed_types = (str, int, float, bool)
        for key, value in doc.metadata.items():
            if not isinstance(value, allowed_types):
                try:
                    doc.metadata[key] = json.dumps(value, ensure_ascii=False)
                except (TypeError, ValueError):
                    # 如果 json.dumps 失败（如包含不可序列化对象），转为 str
                    doc.metadata[key] = str(value)

        # 3、更新文档内容
        doc.page_content = text
        cleaned_docs.append(doc)

    return cleaned_docs


from langchain_community.document_loaders import (
    TextLoader,
    UnstructuredMarkdownLoader,
    UnstructuredPDFLoader,
    UnstructuredWordDocumentLoader
)

def load_documents():
    """
    加载多种类型的文档，包括text、markdown、PDF和Word文档

    Returns:
        list: 包含所有加载文档的列表
    """
    # 加载文本文件
    text_documents = TextLoader(
        "../knowledge_base/sample.txt",
        encoding="utf-8"
    ).load()

    # 加载Markdown文件
    md_documents = UnstructuredMarkdownLoader(
        "../knowledge_base/sample.md"
    ).load()

    # 加载PDF文件
    try:
        pdf_documents = UnstructuredPDFLoader(
            "../knowledge_base/sample.pdf",
            mode="elements",  # 元素模式
            strategy="hi_res",  # 高分辨率策略
            # strategy="fast",
            languages=["eng", "chi_sim"],  # 支持的语言：英文和简体中文
        ).load()
    except Exception as e:
        print(f"加载PDF时出错: {e}")
        pdf_documents = []

    # 加载Word文档
    try:
        word_documents = UnstructuredWordDocumentLoader(
            "../knowledge_base/sample.docx"
        ).load()
    except Exception as e:
        print(f"加载Word文档时出错: {e}")
        word_documents = []

    # 返回所有文档的列表
    print(f"Loaded {len(text_documents)} text documents.")
    print(f"Loaded {len(md_documents)} text md_documents.")
    print(f"Loaded {len(pdf_documents)} text pdf_documents.")
    print(f"Loaded {len(word_documents)} text word_documents.")
    return text_documents + md_documents + pdf_documents + word_documents


documents = load_documents()
documents = clean_content(documents)
print(f"Total documents: {len(documents)}")
# print(documents[:1])

# 打印文档内容以检查编码
for i, doc in enumerate(documents[:1]):
    print("-" * 50)
    print(f"Document {i+1}:")
    print(f"Content preview: {doc.page_content[:200]}")
    print(f"Metadata: {doc.metadata}")
    print("-" * 50)

from langchain.text_splitter import RecursiveCharacterTextSplitter

# 文本分块
text_splitter = RecursiveCharacterTextSplitter(
    separators=["\n\n", "。"],  # 分隔符列表
    chunk_size=500,  # 每个块的最大长度
    chunk_overlap=50,  # 每个块重叠的长度
)
texts = text_splitter.split_documents(documents)
print(f"Split into {len(texts)} chunks.")

import torch
from langchain_huggingface import HuggingFaceEmbeddings

# 加载嵌入模型
embedding_model = HuggingFaceEmbeddings(
    model_name="./bge-base-zh-v1.5",
    model_kwargs={"device": "cuda" if torch.cuda.is_available() else "cpu"},
    encode_kwargs={
        "normalize_embeddings": True
    },  # 输出归一化向量，更适合余弦相似度计算
)
# 直接从 Hugging Face Hub 加载模型
# embedding_model = HuggingFaceEmbeddings(
#     model_name="BAAI/bge-base-zh-v1.5",  # 指定模型在Hub上的路径
#     model_kwargs={"device": "cuda" if torch.cuda.is_available() else "cpu",
#                   'use_safetensors': True},
#     encode_kwargs={"normalize_embeddings": True}  # 输出归一化向量，更适合余弦相似度计算
# )
# 从 Document 中取出文本
page_content_list = [text.page_content for text in texts]
print(f"Total texts: {len(page_content_list)}")
# 进行嵌入
embeddings = embedding_model.embed_documents(page_content_list)
print(f"Embedded {len(embeddings)} vectors.")
# 打印嵌入结果
for i, (page_content, vector) in enumerate(zip(page_content_list, embeddings)):
    print("Text:\n", page_content)
    print("Embedding:\n", vector[:5])
    print()
    if i == 5:
        break