from config.config import Config
from langchain_community.document_loaders import (
    PyPDFLoader, 
    WebBaseLoader,
    TextLoader
)

import re
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings


def pdf_loader(file_path):

    pdf_loader = PyPDFLoader(file_path)
    pdf_docs = pdf_loader.load()
    return pdf_docs

def web_loader(url):

    web_loader = WebBaseLoader(url)
    web_docs = web_loader.load()
    return web_docs

def text_loader(file_path):

    text_loader = TextLoader(file_path)
    text_docs = text_loader.load()
    return text_docs

def clean_text(text):
    # 移除特殊字符
    text = re.sub(r'[^\w\s.,!?;:\'\"-]', '', text)
    # 标准化空白字符
    text = re.sub(r'\s+', ' ', text)
    # 移除页眉页脚
    text = re.sub(r'Page \d+ of \d+', '', text)
    return text.strip()

def doc_splitter(all_docs):
    psych_splitter = RecursiveCharacterTextSplitter(
        chunk_size=Config.rag["chunk_size"],          # 每块约1000字符
        chunk_overlap=Config.rag["chunk_overlap"],         # 块间重叠200字符
        length_function=len,       # 使用字符数计算长度
        separators=[
            "\n\n", "\n", "。", "？", "！",  # 中文分隔符
            "(?<=\. )", "(?<=\? )", "(?<=\! )"  # 英文分隔符
        ]
    )

    return psych_splitter.split_documents(all_docs)

def make_metadata(chunked_docs):
    for i, chunk in enumerate(chunked_docs):
        # 添加文档来源信息
        chunk.metadata["source"] = "CBT_Manual_v2"
        chunk.metadata["doc_type"] = "therapy_guide"
        chunk.metadata["page"] = chunk.metadata.get("page", 0)
        
        # 添加内容摘要
        chunk.metadata["summary"] = chunk.page_content[:150] + "..."
        
        # 添加领域标签
        chunk.metadata["tags"] = ["anxiety", "cbt", "techniques"]

def save_database(chunked_docs):

    # 使用OpenAI嵌入模型
    embeddings = OpenAIEmbeddings(
        model="text-embedding-3-small",
        base_url=Config.rag["base_url"],
        api_key=Config.rag["api_key"]
    )

    # 创建向量存储
    vector_store = FAISS.from_documents(
        documents=chunked_docs,
        embedding=embeddings
    )

    # 保存到VECTOR_DB_PATH
    vector_store.save_local(Config.data["VECTOR_DB_PATH"])

# 增量添加新文档
def update_vector_store(new_docs):
    embeddings = OpenAIEmbeddings(
        model="text-embedding-3-small",
        base_url=Config.rag["base_url"],
        api_key=Config.rag["api_key"]
    )
    # 加载现有存储
    vector_store = FAISS.load_local(
        Config.data["VECTOR_DB_PATH"], 
        embeddings, 
        allow_dangerous_deserialization=True
    )
    
    # 添加新文档块
    vector_store.add_documents(new_docs)
    
    # 保存更新
    vector_store.save_local(Config.data["VECTOR_DB_PATH"])