from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer, GenerationConfig
import torch, os, sys 
from typing import List, Dict, Tuple, Union
from enum import Enum
from threading import Thread

from vllm import LLM, SamplingParams

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if torch.cuda.is_available() else DEVICE


def torch_gc():
    if torch.cuda.is_available():
        with torch.cuda.device(CUDA_DEVICE):
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()

class Qwen:
    abs_path: str = os.path.dirname(os.path.abspath(__file__))
    device: str = "cuda" if torch.cuda.is_available() else 'cpu'
    running: bool = False

    def __init__(self, model_name: str="Qwen1.5-1.8B-Chat"):
        model_path = os.path.join(self.abs_path, model_name)
        bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            # device_map="auto",
            quantization_config=bnb_config
        )
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        
        self.count = 0

    def isrunning(self):
        return self.running


    def chat(self, query: List[Dict[str, str]]):      
        messages = self._messages(query)

        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)

        generated_ids = self.model.generate(
            model_inputs.input_ids,
            max_new_tokens=512
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        return response 
    
    def stream_chat(self, query: List[Dict[str, str]]):
        streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
        messages = self._messages(query)

        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)
        generation_kwargs = dict(model_inputs, streamer=streamer, max_new_tokens=512)
        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
        thread.start()
        return streamer 
    
    def _messages(self, query):
        messages = [{'role':'system', 'content':'you are a helpful ai chatbot'}]
        # messages = []
        messages += query 
        return messages


class Qwen_vllm:
    abs_path: str = os.path.dirname(os.path.abspath(__file__))
    device: str = "cuda" if torch.cuda.is_available() else 'cpu'
    running: bool = False


    def __init__(self, model_name: str="Qwen/Qwen1.5-1.8B-Chat"):
        model_path = os.path.join(self.abs_path, model_name)
        bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            # device_map="auto",
            quantization_config=bnb_config
        )
        self.generation_config = GenerationConfig.from_pretrained(model_path)


        self.model = LLM(model=model_path,
                            tokenizer=model_path,
                            tensor_parallel_size=1,
                            trust_remote_code=True,
                            gpu_memory_utilization=0.95,
                            dtype="bfloat16")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)

        sampling_kwargs = {
            "stop_token_ids": self.generation_config.eos_token_id,
            "early_stopping": False,
            "top_p": self.generation_config.top_p,
            "top_k": -1 if self.generation_config.top_k == 0 else self.generation_config.top_k,
            "temperature": 0.0,
            "max_new_tokens": 512,
            "repetition_penalty": self.generation_config.repetition_penalty,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0,
            "n":1,
            "best_of":2,
            "use_beam_search":True
        }

        print("loading vllm model...")
        self.model = LLM(model=model_path,
                            tokenizer=model_path,
                            tensor_parallel_size=1,
                            trust_remote_code=True,
                            gpu_memory_utilization=0.95,
                            enforce_eager=True,
                            dtype="bfloat16")
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        
        self.sampling_params = SamplingParams(**sampling_kwargs)
        
        self.count = 0

    def isrunning(self):
        return self.running


    def chat(self, querys: List[List[Dict[str, str]]]):      
        """
            query example: [{"role":"user", "content":"good morning"}, 
                            {"role":"assistant", "content":"good morning. Can I help you?"}, 
                            {"role":"user", "content":"I need a cup of coffee. How much is it?"}, ]
        """
        querys = self._messages(querys)

        batch_texts = self.tokenizer.apply_chat_template(
            querys,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer(batch_texts, return_tensors="pt").to(self.device)

        response = self.model.generate(
            batch_texts,
            sampling_params=self.sampling_params
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]
        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        torch_gc()

        return response 
    
    def stream_chat(self, query: List[Dict[str, str]]):
        streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
        messages = self._messages(query)

        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.device)
        generation_kwargs = dict(model_inputs, streamer=streamer, max_new_tokens=512)
        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
        thread.start()
        return streamer 
    
    
    def _messages(self, querys):
        messages = [{'role':'system', 'content':'you are a helpful ai chatbot.'}]
        # messages = []
        querys = [messages + query for query in querys] 
        return querys
    


if __name__ == "__main__":
    path = 'Qwen1.5-1.8B-Chat'

    llm = Qwen(path)

    while True:
        query = input("user:")
        query = [{"role":"user", "content":query}]
        querys = [query]
        response = llm.chat(querys)
        print("response:", response[0].outputs[0].text)


        # for response in llm.chat(querys):
        #     if response in [None, '']:
        #         continue
        #     print(response, end='', flush=True)
        print()

