
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSequenceClassification,BitsAndBytesConfig
import pymupdf
from vllm import LLM, SamplingParams
# from accelerate import infer_auto_device_map
# from transformers.generation import GenerationConfig

import os

class llmwrapper:
    history = None

    def __init__(self, model_path):
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
            self.model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto", device_map="auto", trust_remote_code=True).eval()

            # # Pass the default decoding hyperparameters of Qwen2-7B-Instruct
            # # max_tokens is for the maximum length for generation.
            # self.sampling_params = SamplingParams(temperature=0.7, top_p=0.8, repetition_penalty=1.05, max_tokens=2048)
            # # Input the model name or path. Can be GPTQ or AWQ models.
            # self.llm = LLM(model=model_path,dtype='float16',tensor_parallel_size=4)
            
            #
            self.history = None
            print("完成初始化")
        except Exception as e:
            print("发生异常", str(e))
            raise e

    def chat(self, query, image):
        # inputs = self.tokenizer.apply_chat_template([{"role": "user", "content": query}],
        #                                             add_generation_prompt=True, tokenize=True, return_tensors="pt",
        #                                             return_dict=True)  # chat mode

        inputs = None
        if image is not None:
            inputs = self.tokenizer.from_list_format([
                {'image': image},
                {'text': query},
            ])
        else:
            inputs = self.tokenizer.from_list_format([
                {'text': query},
            ])

        response, history = self.model.chat(self.tokenizer, query=inputs, history=self.history)
        self.history = history

        # if self.history is not None:
        #     inputs['past_key_values'] = self.history['past_key_values']

        # with torch.no_grad():
        #     outputs = self.model.generate(**inputs, max_length=2500, do_sample=True, top_k=1)
        #     outputs = outputs[:, inputs['input_ids'].shape[1]:]
        #     response = self.tokenizer.decode(outputs[0])

        return response, inputs
    
    def chat2(self, query):
        # prompt = "Give me a short introduction to large language model."
        messages = [
            {"role": "system", "content": "你是一位助手."},
            {"role": "user", "content": query}
        ]

        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # # generate outputs
        # outputs = self.llm.generate([text], self.sampling_params)

        # # Print the outputs.
        # if len(outputs) == 0:
        #     return "Sorry, I don't understand.", query
        # else:
        #     return outputs[0].outputs[0].text, outputs[0].prompt
        
        # # for output in outputs:
        # #     prompt = output.prompt
        # #     generated_text = output.outputs[0].text
        # #     print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")


        #######
        model_inputs = self.tokenizer([text], return_tensors="pt").to('cuda')

        generated_ids = self.model.generate(
            model_inputs.input_ids,
            max_new_tokens=2048
        )
        generated_ids = [
            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
        ]

        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 
        return response, text

    def newChat(self):
        self.history = None

# print(torch.__version__)
# print(torch.cuda.is_available())

# #history = None

# def dealwithImage(model, tokenizer, image, device):
#     # 1st dialogue turn
#     query = tokenizer.from_list_format([
#         {'image': image},
#         {'text': '请帮我提取样本编号的信息，以JSON格式提供。'},
#     ])

#     response, history = model.chat(tokenizer, query=query, history=None)
#     print(response)

#     # # 图中是一名年轻女子在沙滩上和她的狗玩耍，狗的品种可能是拉布拉多。她们坐在沙滩上，狗的前腿抬起来，似乎在和人类击掌。两人之间充满了信任和爱。

#     # # 2nd dialogue turn
#     # response, history = model.chat(tokenizer, '图片中有分析结果表吗？如果有，请以JSON格式提供。如果没有，请输出"[]"', history=history)
#     # print(response)
#     # # # <ref>击掌</ref><box>(517,508),(589,611)</box>
#     # # image = tokenizer.draw_bbox_on_latest_picture(response, history)
#     # # if image:
#     # # image.save('1.jpg')
#     # # else:
#     # # print("no box")

# # testChatGlm4V2()

# def pdf_to_image(pdf_path, model, tokenizer, device):
#     # 获取pdf文件的文件夹
#     base_path = os.path.dirname(pdf_path)
#     # 保存的图片路径
#     image_path = os.path.join(base_path, 'images')
#     if not os.path.exists(image_path):
#         os.makedirs(image_path)
#     docu = pymupdf.open(pdf_path)
#     for page_number in range(len(docu)):
#         page = docu[page_number]

#         # 需要把图片转化为最大边长不超过1120 
#         zoom_x = 0.8
#         zoom_y = 0.8
#         w = page.rect.width
#         h = page.rect.height
#         print(w, h)
#         if w > h:
#             zoom_x = 1120 / w
#             zoom_y = zoom_x
#         else:
#             zoom_y = 1120 / h
#             zoom_x = zoom_y
#         mat = pymupdf.Matrix(zoom_x, zoom_y)
#         pix = page.get_pixmap(matrix=mat)
#         print(type(pix))
#         mode = "RGB"
#         img = Image.frombytes(mode, [pix.width, pix.height], pix.samples)
#         print(type(img))

#         image_file_name = os.path.join(image_path, f"{page_number}.jpg")

#         img.save(image_file_name)

#         # 处理图片
#         dealwithImage(model, tokenizer, image_file_name, device)

#     docu.close()

# # pdf_to_image("D:\\xiaojuan.pdf")

# def test(pdf_path):
#     device = "cuda"
#     torch.manual_seed(1234)

#     # Note: The default behavior now has injection attack prevention off.
#     tokenizer = AutoTokenizer.from_pretrained("/opt/ai/llm/models/Qwen-VL-Chat", trust_remote_code=True)

#     # use bf16
#     # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
#     # use fp16
#     # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
#     # use cpu only
#     # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL-Chat", device_map="cpu", trust_remote_code=True).eval()
#     # use cuda device
#     model = AutoModelForCausalLM.from_pretrained("/opt/ai/llm/models/Qwen-VL-Chat", device_map="cuda", trust_remote_code=True).eval()

#     # Specify hyperparameters for generation (No need to do this if you are using transformers>=4.32.0)
#     # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)

#     # device_map = infer_auto_device_map(model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"})

#     # if torch.cuda.device_count() > 1:
#     #     from accelerate import dispatch_model
#     #     from accelerate.utils import infer_auto_device_map, get_balanced_memory

#     #     device_map = infer_auto_device_map(model, max_memory=get_balanced_memory(model))
#     #     model = dispatch_model(model, device_map=device_map)
#     #     print(f"Model has been moved to {device_map} devices.")
#     # else:
#     #     print(f"GPU不够")
#     #     return

#     # model = AutoModelForSequenceClassification.from_pretrained(
#     #     # "THUDM/glm-4v-9b",
#     #     model_path,
#     #     torch_dtype=torch.bfloat16,
#     #     trust_remote_code=True
#     # )

#     # model = torch.nn.DataParallel(model)
#     # model.to(device)
#     # model.eval()

#     pdf_to_image(pdf_path, model, tokenizer, device)

#     # gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}

#     # query = '描述这张图片'
#     # # image = Image.open("D:\\xiaojuan.jpg").convert('RGB')
#     # # print(type(image))
#     # inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
#     #                                     add_generation_prompt=True, tokenize=True, return_tensors="pt",
#     #                                     return_dict=True)  # chat mode

#     # inputs = inputs.to(device)
#     # with torch.no_grad():
#     #     outputs = model.generate(**inputs, **gen_kwargs)
#     #     outputs = outputs[:, inputs['input_ids'].shape[1]:]
#     #     print(tokenizer.decode(outputs[0]))

# # test("xiaojuan.pdf")


# from transformers import AutoModelForCausalLM, AutoTokenizer
# device = "cuda" # the device to load the model onto

# model = AutoModelForCausalLM.from_pretrained(
#     "Qwen/Qwen2-57B-A14B-Instruct",
#     torch_dtype="auto",
#     device_map="auto"
# )
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-57B-A14B-Instruct")

# prompt = "Give me a short introduction to large language model."
# messages = [
#     {"role": "system", "content": "You are a helpful assistant."},
#     {"role": "user", "content": prompt}
# ]
# text = tokenizer.apply_chat_template(
#     messages,
#     tokenize=False,
#     add_generation_prompt=True
# )
# model_inputs = tokenizer([text], return_tensors="pt").to(device)

# generated_ids = model.generate(
#     model_inputs.input_ids,
#     max_new_tokens=512
# )
# generated_ids = [
#     output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
# ]

# response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
