from openai import OpenAI
import os
from dotenv import load_dotenv

"""
封装了大模型接口的基本问答智能体
"""

load_dotenv()

class BaseAgent:

    client = OpenAI(
        base_url=os.getenv("BASE_URL"),
        api_key=os.getenv("LLM_KEY"),
    )

    def complete(self, prompt,history=None):

        messages = [
            {
                "role": "system",
                "content": self.get_system_prompt()
            }
        ]
        if history:
            messages+=history[-5:]

        messages.append({
                    "role": "user",
                    "content": prompt
                })
        

        response = self.client.chat.completions.create(    
            model=os.getenv("MODEL"),
            messages=messages,
            stream=False,
        )
        return response

    def stream_complete(self, prompt, history=None, system_prompt=None):
        if not system_prompt:
            system_prompt = self.get_system_prompt()
        messages = [
            {
                "role": "system",
                "content": system_prompt
            }
        ]
        if history:
            for h in history[-5:]:  # 仅考虑最近5次对话
                messages.append(h)
        messages.append({
            "role": "user",
            "content": prompt
        })
        # logging.info(f"发送message：{messages}")
        response = self.client.chat.completions.create(
            extra_headers={
                "HTTP-Referer": "<YOUR_SITE_URL>",  # Optional. Site URL for rankings on openrouter.ai.
                "X-Title": "<YOUR_SITE_NAME>",  # Optional. Site title for rankings on openrouter.ai.
            },
            model=os.getenv("MODEL"),
            messages=messages,
            stream=True
        )
        return response

    def get_system_prompt(self):
        return "你是洞察基础AI，在每一个回答的前面都先介绍自己"


if __name__ == '__main__':
    # agent = BaseAgent()
    # # 记录请求发送前的时间
    # response = agent.complete("写一首小诗，不超过10个字")
    
    # print(response.choices[0].message.content)
    print(os.getenv("TEST"))
    
    # # 创建变量来收集事件流
    # collected_chunks = []
    # collected_messages = []
    # # 通过事件流迭代
    # for chunk in response1:
    #     chunk_time = time.time() - start_time
    #     collected_chunks.append(chunk)
    #     chunk_message = chunk.choices[0].delta
    #     collected_messages.append(chunk_message)
    #     # print(f"Message received {chunk_time:.2f} seconds after request: {chunk_message}")
    #     print(chunk.choices[0].delta.content, end='')

    # print()

    # # 打印完整响应和文本
    # # full_reply_content = ''.join([m.content for m in collected_messages])
    # # print(f"Full conversation received: {full_reply_content}")
    