from .llm_serve import LLMBase
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
import torch


class Qwen2(LLMBase):
    def __init__(self, model_path='', history=False):
        self.model_id_or_path = model_path

        # Initialize the tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_id_or_path, trust_remote_code=True)

        # Pass the default decoding hyperparameters of Qwen2-7B-Instruct
        # max_tokens is for the maximum length for generation.
        self.sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.052)

        # Input the model name or path. Can be GPTQ or AWQ models.
        self.llm = LLM(model=self.model_id_or_path, trust_remote_code=True, dtype=torch.float16, tensor_parallel_size=4,
                       max_model_len=64, gpu_memory_utilization=1, enforce_eager=True)

        print(self.sampling_params, flush=True)
        print('-' * 20, flush=True)
        print(self.llm.__dict__, flush=True)

    def chat(self, input, history=[]):
        prompt = input
        if history:
            messages = history
            messages.insert(0, {"role": "system", "content": "You are a helpful assistant."})
            messages.append({"role": "user", "content": prompt})
        else:
            messages = [
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt}
            ]
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        outputs = self.llm.generate([text], self.sampling_params)
        # print(outputs, flush=True)

        # Print the outputs.
        for output in outputs:
            prompt = output.prompt
            generated_text = output.outputs[0].text
            print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")

        response = generated_text if generated_text else ''
        history.extend([
            {"role": "user", "content": prompt},
            {"role": "assistant", "content": response}
        ])
        return response


if __name__ == '__main__':
    tensor_parallel_size = 2
