from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from settings import settings
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS

def load_and_vectorize_texts(folder_path, api_key):
    """
    Load all text files from a folder, split them into chunks, create embeddings for each chunk,
    and store the embeddings in a FAISS vector database.

    :param folder_path: Path to the folder containing the text files.
    :param api_key: API key for DashScope.
    :return: FAISS vector database.
    """
    # Load all text files from the folder
    docs = []
    for filename in os.listdir(folder_path):
        if filename.endswith(".txt"):  # 确保这里的参数是字符串类型
            file_path = os.path.join(folder_path, filename)
            try:
                loader = TextLoader(file_path, encoding='utf-8')
                docs.extend(loader.load())
            except Exception as e:
                print(f"Error loading {file_path}: {e}")

    # Split documents into smaller chunks
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200,
        separators=["\n", "。", "！", "？", "，", "、"]
    )
    texts = text_splitter.split_documents(docs)

    # Create embeddings model
    embeddings_model = DashScopeEmbeddings(
        model="text-embedding-v1",
        dashscope_api_key=settings.DASH_API_KEY
    )

    # Create and populate the FAISS vector database
    db = FAISS.from_documents(texts, embeddings_model)

    return db
# if __name__ == "__main__":
#     db = load_and_vectorize_texts(settings.DATA_PATH, settings.DASH_API_KEY)
#     print(db)