import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.llms.base import LLM
from typing import Optional, List, Any
import yaml


# class LocalLLM(LLM):
#     """本地大模型封装"""
#
#     def __init__(self, config_path: str = "config.yaml"):
#         super().__init__()
#         with open(config_path, 'r', encoding='utf-8') as f:
#             config = yaml.safe_load(f)
#
#         self.llm_config = config['llm']
#         self.tokenizer = None
#         self.model = None
#         self._load_model()
#
#     def _load_model(self):
#         """加载本地模型"""
#         model_path = self.llm_config['model_path']
#
#         self.tokenizer = AutoTokenizer.from_pretrained(
#             model_path,
#             trust_remote_code=True
#         )
#
#         self.model = AutoModelForCausalLM.from_pretrained(
#             model_path,
#             torch_dtype=torch.float16 if self.llm_config['device'] == 'cuda' else torch.float32,
#             device_map="auto" if self.llm_config['device'] == 'cuda' else None,
#             trust_remote_code=True
#         )
#
#         if self.llm_config['device'] == 'cuda' and torch.cuda.is_available():
#             self.model = self.model.cuda()
#
#     @property
#     def _llm_type(self) -> str:
#         return "local_llm"
#
#     def _call(
#             self,
#             prompt: str,
#             stop: Optional[List[str]] = None,
#             run_manager: Optional[Any] = None,
#             **kwargs: Any,
#     ) -> str:
#         """调用本地模型生成回复"""
#         inputs = self.tokenizer(prompt, return_tensors="pt")
#
#         if self.llm_config['device'] == 'cuda':
#             inputs = {k: v.cuda() for k, v in inputs.items()}
#
#         with torch.no_grad():
#             outputs = self.model.generate(
#                 **inputs,
#                 max_new_tokens=self.llm_config['max_tokens'],
#                 temperature=self.llm_config['temperature'],
#                 do_sample=True,
#                 pad_token_id=self.tokenizer.eos_token_id
#             )
#
#         response = self.tokenizer.decode(
#             outputs[0][inputs['input_ids'].shape[1]:],
#             skip_special_tokens=True
#         )
#
#         return response.strip()


from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
from typing import Optional, List, Any
import yaml
import os


class OpenAILLM:
    """OpenAI大模型封装"""

    def __init__(self, config_path: str = "config.yaml"):
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)

        self.llm_config = config['llm']
        self.chat_model = self._initialize_chat_model()

    def _initialize_chat_model(self) -> ChatOpenAI:
        """初始化ChatOpenAI模型"""
        # 从配置或环境变量获取API密钥
        api_key = self.llm_config.get('api_key') or os.getenv('DASHSCOPE_API_KEY')
        base_url = self.llm_config.get('base_url', 'https://dashscope.aliyuncs.com/compatible-mode/v1')

        if not api_key:
            raise ValueError("请在config.yaml中设置api_key或设置OPENAI_API_KEY环境变量")

        return ChatOpenAI(
            model=self.llm_config.get('model_name', 'qwen-plus'),
            temperature=self.llm_config.get('temperature', 0.7),
            max_tokens=self.llm_config.get('max_tokens', 2048),
            openai_api_key=api_key,
            openai_api_base=base_url,
            request_timeout=self.llm_config.get('request_timeout', 60)
        )

    def __call__(self, prompt: str, **kwargs) -> str:
        """调用模型生成回复"""
        # 如果传入的是简单字符串，转换为消息格式
        # if isinstance(prompt, str):
        #     messages = [HumanMessage(content=prompt)]
        # else:
        #     messages = prompt
        # try:
        #     response = self.chat_model.invoke(messages)
        #     return response.content
        # except Exception as e:
        #     print(f"调用OpenAI API时出错: {e}")
        #     return "抱歉，模型调用失败，请稍后再试。"
        if isinstance(prompt, str):
            messages = [HumanMessage(content=prompt)]
        else:
            messages = prompt

        try:
            def generate_response():
                for chunk in self.chat_model.stream(messages):
                    if chunk.content:
                        yield {"data": chunk.content}
            return generate_response()
        except Exception as e:
            print(f"调用OpenAI API时出错: {e}")

    def chat(self, messages: List[BaseMessage]) -> str:
        """支持多轮对话的方法"""
        try:
            response = self.chat_model.invoke(messages)
            return response.content
        except Exception as e:
            print(f"调用OpenAI API时出错: {e}")
            return "抱歉，模型调用失败，请稍后再试。"

if __name__ == "__main__":
    llm = OpenAILLM()
    print(">>> 流式输出")
    print(llm("介绍一下你自己"))