import os
from typing import List, Dict
from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain.docstore.document import Document
from langchain.vectorstores import FAISS
import shutil


def load_article_data(file_path: str) -> List[str]:
    """
    加载Article.txt文件数据，按双换行符分割

    Args:
        file_path (str): 文件路径

    Returns:
        List[str]: 按双换行符分割的内容列表
    """
    with open(file_path, 'r', encoding='utf-8') as file:
        data = file.read()

    # 根据双换行符切割
    lines = data.split('\n\n')
    # 过滤空内容
    lines = [line.strip() for line in lines if line.strip()]
    return lines


def create_documents_from_lines(lines: List[str]) -> List[str]:
    """
    将每段内容转换为字符串列表

    Args:
        lines (List[str]): 内容段落列表

    Returns:
        List[str]: 字符串列表
    """
    # 过滤掉空字符串并去除首尾空白
    documents = [line.strip() for line in lines if line.strip()]
    return documents


def clear_vectorstore_if_exists(vectorstore_path: str):
    """
    如果向量数据库已存在，则清除

    Args:
        vectorstore_path (str): 向量数据库路径
    """
    if os.path.exists(vectorstore_path):
        shutil.rmtree(vectorstore_path)
        print(f"已清除现有向量数据库: {vectorstore_path}")


def vectorize_and_store_documents(documents: List[str], vectorstore_path: str = "faiss_index"):
    """
    对文档进行向量化并存储到向量数据库

    Args:
        documents (List[str]): 字符串列表
        vectorstore_path (str): 向量数据库存储路径
    """
    # 从配置文件读取API密钥
    with open("config.txt", "r", encoding="utf-8") as f:
        dashscope_api_key = f.read().strip()
    
    # 初始化嵌入模型
    from langchain_community.embeddings import DashScopeEmbeddings

    embeddings = DashScopeEmbeddings(
        model="text-embedding-v2",  # 正确的模型名
        dashscope_api_key=dashscope_api_key
    )

    # 清除现有的向量数据库
    clear_vectorstore_if_exists(vectorstore_path)

    # 创建向量数据库
    vectorstore = FAISS.from_texts(documents, embeddings)

    # 保存向量数据库
    vectorstore.save_local(vectorstore_path)
    print(f"向量数据库已保存到: {vectorstore_path}")

    return vectorstore


def main():
    """
    主函数：加载Article.txt，分块向量化，存入向量数据库
    """
    # 文件路径
    article_file_path = "Article.txt"

    # 加载数据
    print("正在加载Article.txt文件...")
    lines = load_article_data(article_file_path)
    print(f"成功加载 {len(lines)} 段数据")

    # 转换为字符串列表
    print("正在转换为字符串列表...")
    documents = create_documents_from_lines(lines)
    print(f"成功创建 {len(documents)} 个文档段落")

    # 向量化并存储
    print("正在进行向量化并存储到向量数据库...")
    vectorstore = vectorize_and_store_documents(documents)
    print("处理完成！")

    # 简单测试
    query = "开题"
    docs = vectorstore.similarity_search(query, k=3)
    print(f"\n查询测试 - 查询: {query}")
    for i, doc in enumerate(docs):
        print(f"结果 {i + 1}: {doc.page_content}")


if __name__ == "__main__":
    main()
