from typing import Optional, Any
from typing import List
import torch
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM
from config.config_env import DEVICE_IN


class QwenService(LLM):
    _instance = None  # 单例实例
    max_token: int = 2048
    temperature: float = 0.95
    top_p = 0.7
    history_len: int = 3
    device = DEVICE_IN
    history = []
    tokenizer: object = None
    model: object = None

    def __init__(self):
        super().__init__()
    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super(QwenService, cls).__new__(cls)
        return cls._instance
    @property
    def _llm_type(self) -> str:
        return "Qwen LLM"

    def _call(self, prompt: str, stop: Optional[List[str]] = None,
              run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any) -> str:
        messages = [
            {"role": "system", "content": "你是一个智能助手。"},
            {"role": "user", "content": prompt}
        ]
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)
        generated_ids = self.model.generate(
            model_inputs.input_ids,
            max_new_tokens=512,
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        return response

    def load_model(self, model_name_or_path: str, isqiamtize4, device):
        if self.model is not None and self.tokenizer is not None:
            return
        self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True,
                                                          device_map=device)
        self.model = self.model.eval()






if __name__ == '__main__':
    service = QwenService()
    service.load_model(model_name_or_path="D:\code\model\llm\qwen1.5", isqiamtize4=False, device="auto")
    tokenizer = AutoModelForCausalLM.from_pretrained("D:\code\model\llm\qwen1.5", trust_remote_code=True)
    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cuda")
    while True:
        user_input = input()
        messages = [
            {"role": "system", "content": "你是一个智能助手。"},
            {"role": "user", "content": user_input}
        ]
        text = service.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = service.tokenizer([text], return_tensors="pt").to(device)
        generated_ids = service.model.generate(
            model_inputs.input_ids,
            max_new_tokens=512,
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = service.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        print(response)
