# conversation_manager.py
import os
from typing import List, Dict, Optional
import json
from openai import OpenAI

class APIClientManager:
    """使用NVIDIA API的对话管理器"""
    def __init__(self, 
                 api_key: str,
                 base_url: str = "https://integrate.api.nvidia.com/v1",
                 model: str = "meta/llama3-8b-instruct",
                 enti_num: int = 5):
        """
        初始化API客户端
        :param api_key: NVIDIA API密钥
        :param base_url: API基础URL
        :param model: 使用的模型名称
        :param enti_num: 实体数量，默认为5
        """
        self.client = OpenAI(base_url=base_url, api_key=api_key)
        self.model = model
        self.enti_num = enti_num
        self.conversation_history = []
        self.system_prompt = "你是一个严谨的传记事实核查助手。请根据用户的要求进行多轮事实核查。"

    def reset_history(self):
        """重置对话历史"""
        self.conversation_history = []

    # 在APIClientManager类中添加
    def get_full_history(self) -> List[Dict]:
        """返回当前完整的对话历史副本"""
        return [entry.copy() for entry in self.conversation_history]

    
    def generate_full_response(self, prompt: str, conversation_history: Optional[List[Dict]] = None, 
                              max_tokens: int = 1024, temperature: float = 0.5, top_p: float = 1) -> str:
        """
        使用API生成完整回复并保存对话历史
        :param prompt: 当前提示词
        :param conversation_history: 对话历史列表
        :param max_tokens: 最大token数量
        :param temperature: 采样温度
        :param top_p: top-p采样参数
        :return: 模型生成的完整回复
        """
        try:
            # 构建消息列表
            messages = self._build_messages(prompt, conversation_history)
            
            # 调用API
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=temperature,
                top_p=top_p,
                max_tokens=max_tokens,
                stream=False
            )
            
            full_response = response.choices[0].message.content
            
            # 保存对话
            self.conversation_history.append({
                "prompt": prompt,
                "response": full_response
            })
            
            return full_response
        except Exception as e:
            print(f"API调用出现错误: {e}")
            return ""
    
    def _build_messages(self, current_prompt: str, history: Optional[List[Dict]] = None) -> List[Dict]:
        """
        构建消息列表
        :param current_prompt: 当前提示词
        :param history: 对话历史列表
        :return: 消息字典列表
        """
        messages = [{"role": "system", "content": self.system_prompt}]
        
        # 添加历史对话
        if history:
            for entry in history:
                messages.append({"role": "user", "content": entry["prompt"]})
                messages.append({"role": "assistant", "content": entry["response"]})
        
        # 添加当前提示
        messages.append({"role": "user", "content": current_prompt})
        
        return messages

    def generate_biography(self, subject: str, length: int = 100) -> str:
        """
        生成初始传记
        :param subject: 传记主题
        :param length: 传记长度（字）
        :return: 生成的传记文本
        """
        prompt = f"请写一段关于{subject}的人物传记，约{length}字。要求内容准确、全面。"
        print('原始传记提示：', prompt)
        
        biography = self.generate_full_response(prompt, max_tokens=500)
        print('生成的传记：', biography)
        
        return biography, None  # 为了兼容性返回元组

    def save_conversation(self, filename: str = "conversation_history.json"):
        """
        保存完整对话历史到文件
        :param filename: 文件名
        """
        with open(filename, "w", encoding="utf-8") as f:
            json.dump(self.conversation_history, f, ensure_ascii=False, indent=2)
        print(f"对话历史已保存至 {filename}")
    
    def get_history(self) -> List[Dict]:
        """
        获取当前完整的对话历史副本
        :return: 对话历史列表
        """
        return [entry.copy() for entry in self.conversation_history]