import abc

import chromadb
from dotenv import load_dotenv

from rag.chunk import ChunkHandler

load_dotenv()


class LLMService(abc.ABC):
    def __init__(self, model: str, embedding_model: str, db_collection: str, init_db: bool = False):
        self.model = model
        self.embedding_model = embedding_model
        self.client = None
        self.chromadb_client = chromadb.PersistentClient("./chroma.db")
        self.chromadb_collection = self.chromadb_client.get_or_create_collection(db_collection)
        self.chunk_handler = ChunkHandler()
        if init_db:
            self.create_db()

    @abc.abstractmethod
    def embed(self, text: str, store: bool) -> list[float]:
        """
        返回向量数组
        """
        return []

    def create_db(self) -> None:
        print("Creating database...")
        for idx, c in enumerate(self.chunk_handler.get_chunks()):
            print(f"Process: {c}")
            embedding = self.embed(c, store=True)
            self.chromadb_collection.upsert(
                ids=str(idx),  # 没有实际作用
                documents=c,
                embeddings=embedding
            )

    def query_db(self, question: str) -> list[str]:
        question_embedding = self.embed(question, store=False)
        result = self.chromadb_collection.query(
            query_embeddings=question_embedding,
            n_results=5
        )
        assert result["documents"]
        return result["documents"][0]

    def prompt(self, question: str) -> str:
        chunks = self.query_db(question)
        prompt = "Please answer user's question according to context\n"
        # prompt += f"Question: {question}\n"
        prompt += "Context:\n"
        for c in chunks:
            prompt += f"{c}\n"
            prompt += "-------------\n"
        return prompt

    def chat(self, messages):
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            timeout=20,
        )
        return response.choices[0].message.content
