from typing import Any, List, Optional
from langchain.llms.base import LLM
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers.generation import GenerationConfig
from torch import inference_mode
from time import time


class ChatGLM(LLM):
    tokenizer: object = None
    model: object = None
    temperature: float = 1.0
    
    def load_model(self, pretrained_dir: str = 'plms/chatglm3-6b'):
        self.tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(pretrained_dir, trust_remote_code=True).half().cuda().eval()
    
    @property
    def _llm_type(self) -> str:
        return "ChatGLM"
    
    @inference_mode()
    def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
        start = time()
        response, his = self.model.chat(self.tokenizer, prompt, history=None)
        spend_time = time() - start
        num_tokens = len(self.tokenizer.tokenize(response))
        print(f"genereta ({num_tokens} tokens, {spend_time:.2f} sec, {num_tokens/spend_time:.2f} tokens/sec)")
        return response


class ChatQwen(LLM):
    tokenizer: object = None
    model: object = None
    temperature: float = 1.0
    
    def load_model(self, pretrained_dir: str):
        self.tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(pretrained_dir, device_map='auto', trust_remote_code=True).eval()
        generation_config = GenerationConfig.from_pretrained(pretrained_dir, trust_remote_code=True)
        generation_config.temperature = self.temperature
        self.model.generation_config = generation_config
    
    @property
    def _llm_type(self) -> str:
        return "ChatQwen"
    
    @inference_mode()
    def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
        start_time = time()
        response, his = self.model.chat(self.tokenizer, prompt, history=None)
        spend_time = time() - start_time
        num_tokens = len(self.tokenizer.tokenize(response))
        print(f"genereta ({num_tokens} tokens, {spend_time:.2f} sec, {num_tokens/spend_time:.2f} tokens/sec)")
        return response
    
    
class ChatYi(LLM):
    tokenizer: object = None
    model: object = None
    temperature: float = 1.0
    
    def load_model(self, pretrained_dir: str):
        self.tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, use_fast=False)
        self.model = AutoModelForCausalLM.from_pretrained(pretrained_dir,device_map="auto",torch_dtype='auto').eval()
    
    @property
    def _llm_type(self) -> str:
        return "ChatYi"
    
    @inference_mode()
    def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
        start_time = time()
        messages = [{"role": "user", "content": prompt}]
        input_ids = self.tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
        output_ids = self.model.generate(input_ids.to('cuda'))
        response = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
        spend_time = time() - start_time
        num_tokens = len(self.tokenizer.tokenize(response))
        print(f"genereta ({num_tokens} tokens, {spend_time:.2f} sec, {num_tokens/spend_time:.2f} tokens/sec)")
        return response