'''
:@Author: junpeng_chen
:@Date: 7/25/2023, 1:32:17 PM
:@LastEditors: junpeng_chen
:@LastEditTime: 7/25/2023, 1:32:17 PM
:Description: 
:Copyright: Copyright (©)}) 2023 junpeng_chen. All rights reserved.
'''
from transformers import AutoTokenizer, AutoModel
from peft import LoraConfig, get_peft_model, TaskType
import torch
from utils.log import logger
import os


class ChatGLMUtils:
    """
    chatglm加载相关（不包括训练）
    """

    def __init__(self) -> None:
        self.model = None
        self.tokenizer = None

    def load_model(self, checkpoint="/home/cike/ytc/GLM2/chatglm2-6b",
                   load_lora=True, local_loading=False, model_path=None, cuda_index=0):
        """
        加载cahtglm模型
        """
        try:
            self.model, loading_info = AutoModel.from_pretrained(
                checkpoint,
                load_in_8bit=False,
                torch_dtype=torch.float16,
                output_loading_info=True,
                trust_remote_code=True,
            )
            self.tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
            if load_lora:
                self.model = self.load_lora_config(self.model)
            if local_loading:
                if os.path.exists(model_path):
                    print(f"Restarting from {model_path}")
                    self.model.load_state_dict(torch.load(model_path), strict=False)
                else:
                    print(f"Checkpoint {model_path} not found")               
            device = f'cuda:{cuda_index}' if torch.cuda.is_available() else 'cpu'
            print("device: ", device)
            self.model.to(device)
            self.model.half().eval()
        except Exception as err:
            logger.error("some errors happened while chatglm loading[{}]: {}".format(type(err), str(err)))
            raise

    def predict(self, prompt_input, do_sample=False):
        '''
        模型预测
        :param prompt_input: 构造好的带有实际输入数据的prompt
        :return: 模型生成文本
        '''
        try:
            # chat 函数默认设置了 do_sample=True，可以设置成 do_sample=False来使每次输出内容保持一致，generate 函数对应temperature = 0
            response, history = self.model.chat(self.tokenizer, prompt_input, history=[], do_sample=do_sample)
            return response
        except Exception as err:
            logger.error("some errors happened when chatglm predicted[{}]: {}".format(type(err), str(err)))
            raise

    def load_lora_config(self, model):
        """
        配置LoRA
        """
        try:
            config = LoraConfig(
                r=8,
                lora_alpha=16,
                lora_dropout=0.05,
                target_modules=["query_key_value"],
                bias="none",
                task_type="CAUSAL_LM",
            )          
            return get_peft_model(model, config)
        except Exception as err:
            logger.error("some errors happened when lora was loaded[{}]: {}".format(type(err), str(err)))
            raise


# if __name__ == "__main__":
#     glm_util = ChatGLMUtils()
#     glm_util.load_model(local_loading=True,
#                         model_path=f"/home/cike/ytc/GLM2/checkpoints/Lora7_25_combine/checkpoint-64000/pytorch_model.bin")
#     while True:
#         literal = str(input("请输入字面量："))
#         intput_prompt = "假设你是数值格式转换器，输入字面量，需要输出格式化后的数值。格式要求：如果不存在数值则只生成none即可；单个数值点直接转换为数字；多个数值点则转换为list列表形式；数值范围则识别左边界点和右边界点，其中若某一边无边界点则用none标识无边界，最后将边界点用英文括号括起来，英文小括号表示不包括边界点，英文大括号表示包括边界点；输入：{}；输出：\n".format(
#             literal)
#         res = glm_util.predict(intput_prompt)
#         print(res)

