import os

if os.environ.get('CUR_ENV_IS_STUDENT', False):
    import sys
    __import__('pysqlite3')
    sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')


import chromadb
from chromadb.config import Settings
class MyVectorDBConnector:
    def __init__(self, collection_name, embedding_fn):
        chroma_client = chromadb.Client(Settings(allow_reset=True))

        # 为了演示，实际不需要每次 reset()
        chroma_client.reset()

        # 创建一个 collection
        self.collection = chroma_client.get_or_create_collection(
            name=collection_name
        )
        self.embedding_fn = embedding_fn

    def add_documents(self, documents):
        ''' 向 collection 中添加文档与向量 '''
        self.collection.add(
            embeddings=self.embedding_fn(documents), # 每个文档的向量
            documents=documents, # 文档的原文
            ids=[f"id{i}" for i in range(len(documents))] # 每个文档的 id
        )

    def search(self, query, top_n):
        ''' 检索向量数据库 '''
        results = self.collection.query(
            query_embeddings=self.embedding_fn([query]), # 查询向量
            n_results=top_n # 返回 top_n 个结果
        )
        return results

from rag03 import prompt_template, build_prompt, get_completion
class RAG_Bot:
    def __init__(self, vector_db, llm_api, n_results=2):
        self.vector_db = vector_db
        self.llm_api = llm_api
        self.n_results = n_results

    def chat(self, user_query):
        # 1.检索
        search_results = self.vector_db.search(user_query, self.n_results)

        # 2. 构建 Prompt
        prompt = build_prompt(prompt_template, context=search_results['documents'][0], query=user_query)

        # 3. 调用LLM
        response = self.llm_api(prompt)
        return response
    
from nltk.tokenize import sent_tokenize
import json

def split_text(paragraphs, chunk_size=300, overlap_size=100):
    ''' 按指定 chunk_size 和 overlap_size 交叠割文本 '''
    sentences = [s.strip() for p in paragraphs for s in sent_tokenize(p)]
    chunks = []
    i = 0
    while i < len(sentences):
        chunk = sentences[i]
        overlap = ''
        prev_len = 0
        prev = i - 1
        # 向前计算重叠部分
        while prev >= 0 and len(sentences[prev]) + len(overlap) <= overlap_size:
                overlap = sentences[prev] + ' ' + overlap
                prev -= 1
        
        chunk = overlap + chunk

        next = i + 1
        # 向后计算当前chunk
        while next < len(sentences) and len(sentences[next]) + len(chunk) <= chunk_size:
            chunk = chunk + ' ' + sentences[next]
            next += 1
        chunks.append(chunk)
        i = next
    return chunks
       

if __name__ == "__main__":
    from rag04 import get_embeddings
    # 创建一个向量数据库对象
    vector_db = MyVectorDBConnector("demo", get_embeddings)
    # 向向量数据库中添加文档
    from rag01 import extract_text_from_pdf
    # paragraphs = extract_text_from_pdf("llama2.pdf", page_numbers=[2,3], min_line_length=10)
    # vector_db.add_documents(paragraphs)
    paragraphs = extract_text_from_pdf("llama2.pdf",min_line_length=10)
    chunks = split_text(paragraphs, 300, 100)
    vector_db.add_documents(chunks)

    # user_query = "Llama 2有多少参数"
    # user_query = "Does Llama 2 have a conversational variant"
    # results = vector_db.search(user_query, 2)
    # for para in results['documents'][0]:
    #     print(para+"\n")

    # 创建一个RAG机器人
    bot = RAG_Bot(
        vector_db, 
        llm_api=get_completion)

    user_query = "llama2 有多少个参数"

    response = bot.chat(user_query)
    print(response)