# from langchain.document_loaders import DirectoryLoader
from langchain_community.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, Language
from langchain.schema import Document
# from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
import openai 
from dotenv import load_dotenv
import os
import shutil
from langchain_ollama import OllamaEmbeddings
import psycopg2

# Load environment variables. Assumes that project contains .env file with API keys
load_dotenv()
#---- Set OpenAI API key 
# Change environment variable name from "OPENAI_API_KEY" to the name given in 
# your .env file.
os.environ['OPENAI_API_KEY'] = 'sk-2131232132'
openai.api_key = os.environ['OPENAI_API_KEY']

CHROMA_PATH = "chroma"
DATA_PATH = "data/books"


def main():
    generate_data_store()


def generate_data_store():
    documents = load_documents()
    chunks = split_text(documents)
    save_to_pgvector(chunks)


def load_documents():
    loader = DirectoryLoader(DATA_PATH, glob="*.md")
    documents = loader.load()
    return documents


def split_text(documents: list[Document]):
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=300,
        chunk_overlap=100,
        length_function=len,
        add_start_index=True,
    )
    chunks = text_splitter.split_documents(documents)
    print(f"Split {len(documents)} documents into {len(chunks)} chunks.")

    document = chunks[10]
    print(document.page_content)
    print(document.metadata)

    return chunks

def split_code(documents: list[Document]):
    # 初始化代码分割器
    code_splitter = RecursiveCharacterTextSplitter.from_language(
        language=Language.PYTHON,
        chunk_size=500,  # 最大 500 字符
        chunk_overlap=50  # 重叠 50 字符
    )

    chunks = code_splitter.split_documents(documents)
    print(f"Split {len(documents)} documents into {len(chunks)} chunks.")

    document = chunks[10]
    print(document.page_content)
    print(document.metadata)

    return chunks

def save_to_chroma(chunks: list[Document]):
    # Clear out the database first.
    if os.path.exists(CHROMA_PATH):
        shutil.rmtree(CHROMA_PATH)

    # Create a new DB from the documents.
    db = Chroma.from_documents(
        chunks, OllamaEmbeddings(base_url='http://localhost:11434',model="mxbai-embed-large"), persist_directory=CHROMA_PATH
    )
    db.persist()
    print(f"Saved {len(chunks)} chunks to {CHROMA_PATH}.")

def save_to_pgvector(chunks: list[Document]):
    # 数据库连接参数
    db_params = {
        'dbname': 'vector_db',
        'user': 'postgres',
        'password': '123',
        'host': 'localhost',
        'port': '5432'
    }

    # 初始化 Ollama 嵌入模型
    embedding_model = OllamaEmbeddings(base_url='http://localhost:11434', model="mxbai-embed-large")

    # 连接数据库
    conn = psycopg2.connect(**db_params)
    cur = conn.cursor()

    # 清空现有表（模拟 Chroma 的删除行为）
    cur.execute("CREATE EXTENSION IF NOT EXISTS vector;")
    cur.execute("DROP TABLE IF EXISTS documents;")

    # 创建表（假设 mxbai-embed-large 生成 1024 维向量，需确认具体维度）
    cur.execute("""
        CREATE TABLE documents (
            id SERIAL PRIMARY KEY,
            text_content TEXT NOT NULL,
            embedding VECTOR(1024)
        );
    """)

    # 创建 HNSW 索引以优化向量搜索
    cur.execute("CREATE INDEX IF NOT EXISTS documents_embedding_idx ON documents USING hnsw (embedding vector_l2_ops);")

    # 提取文本并生成嵌入
    texts = [chunk.page_content for chunk in chunks]
    embeddings = embedding_model.embed_documents(texts)  # 生成向量

    # 批量插入数据
    batch_data = [(text, str(emb)) for text, emb in zip(texts, embeddings)]
    cur.executemany(
        "INSERT INTO documents (text_content, embedding) VALUES (%s, %s)",
        batch_data
    )

    # 提交事务
    conn.commit()
    print(f"Saved {len(chunks)} chunks to pgvector database.")

    # 可选：查询验证
    cur.execute("SELECT id, text_content, embedding FROM documents LIMIT 3;")
    rows = cur.fetchall()
    for row in rows:
        print(f"ID: {row[0]}, Text: {row[1][:50]}..., Embedding (first 5 dims): {row[2][:5]}...")


        cur.close()
        conn.close()
    
if __name__ == "__main__":
    main()
