from langchain_core.language_models import BaseLLM
from langchain_core.outputs import LLMResult
from typing import Optional, List, Dict, Any, Iterator
import requests
import os
from dotenv import load_dotenv
# 对话部分内容
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

load_dotenv()

class DeepSeekLLM(BaseLLM):
    api_key: str = os.getenv("DEEPSEEK_API_KEY", "")
    model: str = "deepseek-chat"
    temperature: float = 0.7
    max_tokens: int = 2048

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        if not self.api_key:
            raise ValueError("DEEPSEEK_API_KEY 未设置")

    def _generate(
        self,
        prompts: List[str],
        stop: Optional[List[str]] = None,
        **kwargs: Any,
    ) -> LLMResult:
        # 实现 _generate 方法（处理多个prompts）
        generations = []
        for prompt in prompts:
            text = self._invoke(prompt, stop=stop, **kwargs)
            generations.append([{"text": text}])
        return LLMResult(generations=generations)

    def _invoke(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        **kwargs: Any,
    ) -> str:
        """实际调用DeepSeek API"""
        url = "https://api.deepseek.com/v1/chat/completions"
        headers = {"Authorization": f"Bearer {self.api_key}"}
        data = {
            "model": self.model,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": self.temperature,
            "max_tokens": self.max_tokens,
        }
        response = requests.post(url, headers=headers, json=data)
        response.raise_for_status()
        return response.json()["choices"][0]["message"]["content"]

    @property
    def _llm_type(self) -> str:
        return "deepseek"

# 使用示例
if __name__ == "__main__":
    '''
    llm = DeepSeekLLM()
    print("DeepSeek 聊天机器人已启动（输入'退出'结束对话）")
    while True:
        try:
            user_input = input("你: ")
            if user_input.lower() in ["退出", "exit", "quit"]:
                break
            response = llm.invoke(user_input)
            print(f"AI助手: {response}")
        except KeyboardInterrupt:
            print("\n对话结束")
            break
        except Exception as e:
            print(f"出错: {e}")
    '''

    llm = DeepSeekLLM()
    memory = ConversationBufferMemory()
    conversation = ConversationChain(llm=llm, memory=memory)
    
    print("DeepSeek 聊天机器人已启动（输入'退出'结束对话）")
    while True:
        try:
            user_input = input("你: ")
            if user_input.lower() in ["退出", "exit", "quit"]:
                break
            response = conversation.predict(input=user_input)
            print(f"AI助手: {response}")
        except KeyboardInterrupt:
            print("\n对话结束")
            break
        except Exception as e:
            print(f"出错: {e}")