import os

from openai import OpenAI

from rag.rag_base import LLMService


class OpenAIService(LLMService):
    """
    所有兼容OpenAI的大模型都可以使用
    阿里的 text-embedding-v4
    https://bailian.console.aliyun.com/?spm=5176.29597918.J_SEsSjsNv72yRuRFS2VknO.2.40697b08d5yUNW&tab=model#/model-market?capabilities=%5B%22TR%22%5D&z_type_=%7B%22capabilities%22%3A%22array%22%7D
    """

    def __init__(
            self,
            model: str = os.getenv('MODEL'),
            embedding_model: str = os.getenv('EMBEDDING_MODEL'),
            db_collection: str = "openai",
    ):
        super().__init__(model=model, embedding_model=embedding_model, db_collection=db_collection)
        self.client = OpenAI(base_url=os.getenv('BASE_URL'), api_key=os.getenv('API_KEY'))

    def embed(self, text: str, store: bool) -> list[float]:
        completion = self.client.embeddings.create(
            model=self.embedding_model,
            input=text,
            dimensions=1024,  # 指定向量维度（仅 text-embedding-v3及 text-embedding-v4支持该参数）
            encoding_format="float"
        )
        # print(completion.model_dump_json())
        assert completion.data[0].embedding
        return completion.data[0].embedding


if __name__ == '__main__':
    service = OpenAIService()
    # service.create_db()  # 先初始化向量数据库

    question = "接待沙丁鱼时音乐演出是啥？"

    prompt = service.prompt(question)
    print(f"prompt: \n{prompt}")
    messages = [
        {'role': 'system', 'content': prompt},
        {'role': 'user', 'content': question}
    ]
    print(service.chat(messages))
