from tools.config import get_config
from openai import OpenAI
import asyncio


class LLMModel:
    def __init__(self, model_name:str):
        self.model_name = get_config('llm').get(model_name)
        self.model = self.model_name.get('model')
        self.temperature = self.model_name.get('temperature')
        self.role_content = self.model_name.get('role_content')
        self.client = OpenAI(
            api_key=self.model_name.get('key'),
            base_url=self.model_name.get('base_url'),
        )
        self.message = [{"role": "system", "content": self.role_content}]
    
    def make_messages(self, message: list) -> list:
        """
        这里给我一个历史消息，我进行截取，按照最大轮次
        :param message: 消息
        :return: 消息
        """
        if len(message) < self.model_name.get('round_num'):
            return message
        message.pop(1)
        return message
    
    async def chat(self, query: str, role_content: str = None):
        """
        :param query: 问题
        :param role_content: 智能体身份，可以不传递
        :return:
        """
        completion = self.client.chat.completions.create(
            model=self.model,
            messages=[
                {"role": "system", "content": role_content if role_content else self.role_content},
                {"role": "user", "content": query}
            ],
            temperature=self.temperature,
        )
        return completion.choices[0].message.content
    
    async def long_chat(self, query: str, role_content: str = None):
        """
        :param query: 问题
        :param role_content: 智能体身份，可以不传递
        :return:
        """
        if role_content:
            self.message[0]['content'] = role_content
        self.message.append({"role": "user", "content": query})
        completion = self.client.chat.completions.create(
            model=self.model,
            messages=self.make_messages(self.message),
            temperature=self.temperature,
        )
        self.message.append(completion.choices[0].message)
        return completion.choices[0].message.content


async def main():
    deepseek = LLMModel(model_name="kimi")
    data1 = await deepseek.long_chat(
        query="我现在需要做一个小红书运营的账号，但是我不知道怎么做，你帮我设计一套运营方案，我要做的赛道是早教产品",
        role_content="你是一个小红书运营商，擅长制定小红书运营方案，并且你有年入百万的经历"
    )
    data2 = await deepseek.long_chat('你知道我今年几岁了吗')
    print(data1,data2)


if __name__ == '__main__':
    asyncio.run(main())
