#
import os
from typing import Dict, Tuple
import torch
import transformers
from modelscope import AutoModelForCausalLM, AutoTokenizer

'''
注意：先配置如下环境变量
export HF_HOME=/home/psdz/diskc/yantao/awork/zywy/rlm/llms/hf_home
export HF_ENDPOINT="https://hf-mirror.com"
'''

class Glm4Engine(object):
    BASE_MODEL = 1
    CHAT_MODEL = 2
    B0414_MODEL = 3
    B0414_LORA_MODEL = 4
    # /home/psdz/diskc/yantao/awork/gyb/work/modelscope/models/ZhipuAI/glm-4-9b-hf
    CACHE_DIR = '/home/psdz/diskc/yantao/awork/zywy/rlm/llms/ms_home'

    @staticmethod
    def infer(query:str, model:int = 1) -> Tuple[int, str]:
        if model == Glm4Engine.BASE_MODEL:
            resp = Glm4Engine.glm4_9b_infer(query=query)
            print(f'网络输出：{resp};')
        elif model == Glm4Engine.CHAT_MODEL:
            resp = Glm4Engine.glm4_9b_chat_infer(query=query)
            print(f'Chat网络输出: {resp};')
        elif model == Glm4Engine.B0414_MODEL:
            resp = Glm4Engine.glm4_9b_0414_infer(query=query)
            print(f'Official 9b 0414: {resp};')
        elif model == Glm4Engine.B0414_LORA_MODEL:
            print(f'step 1')
            resp = Glm4Engine.glm4_9b_0414_lora_infer(query=query)
            print(f'LoRA: {resp};')

    @staticmethod
    def glm4_9b_0414_infer(query:str) -> str:
        '''
        Use 9B 0414 official version to infer
        '''
        os.environ['CUDA_VISIBLE_DEVICES'] = '1'
        MODEL_PATH = 'THUDM/GLM-4-9b-0414'
        device = "cuda" if torch.cuda.is_available() else "cpu"
        tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_PATH, resume_download=True, trust_remote_code=True)
        inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
                                       add_generation_prompt=True,
                                       tokenize=True,
                                       return_tensors="pt",
                                       return_dict=True
                                       )
        inputs = inputs.to(device)
        model = transformers.AutoModelForCausalLM.from_pretrained(
            MODEL_PATH,
            resume_download=True,  # 确保续传
            torch_dtype=torch.bfloat16,
            low_cpu_mem_usage=True,
            trust_remote_code=True,
            device_map="auto"
        ).eval()
        gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
        with torch.no_grad():
            outputs = model.generate(**inputs, **gen_kwargs)
            outputs = outputs[:, inputs['input_ids'].shape[1]:]
            resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return resp
    
    @staticmethod
    def glm4_9b_0414_lora_infer(query:str) -> str:
        print(f'step 2')
        model_dir = '/home/psdz/diskc/yantao/awork/zywy/rlm/llms/GLM-4/finetune/work/outputs001/checkpoint-15000' #_resolve_path(model_dir)
        print(f'step 3')
        model = transformers.AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=False, device_map="auto")
        print(f'step 4')
        tokenizer_dir = model_dir # model.peft_config["default"].base_model_name_or_path
        # if (model_dir / "adapter_config.json").exists():
            # model = transformers.AutoPeftModelForCausalLM.from_pretrained(model_dir, device_map="auto")
            # tokenizer_dir = model.peft_config["default"].base_model_name_or_path
        # else:
        #     model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto")
        #     tokenizer_dir = model_dir
        model.eval()
        print(f'step 5 {tokenizer_dir}; ??????????')
        tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
        print(f'step 6')
        device = "cuda" if torch.cuda.is_available() else "cpu"
        # tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_PATH, resume_download=True, trust_remote_code=True)
        inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
                                       add_generation_prompt=True,
                                       tokenize=True,
                                       return_tensors="pt",
                                       return_dict=True
                                       )
        inputs = inputs.to(device)
        gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
        with torch.no_grad():
            outputs = model.generate(**inputs, **gen_kwargs)
            outputs = outputs[:, inputs['input_ids'].shape[1]:]
            resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return resp














    @staticmethod
    def glm4_9b_chat_infer(query:str) -> str:
        '''
        GLM-4 9B Chat模型推理代码
        '''
        os.environ['CUDA_VISIBLE_DEVICES'] = '1'
        MODEL_PATH = "THUDM/glm-4-9b-chat-hf"
        device = "cuda" if torch.cuda.is_available() else "cpu"
        tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_PATH, resume_download=True, trust_remote_code=True)
        # query = "你好"
        inputs = tokenizer.apply_chat_template([{"role": "user", "content": query}],
                                       add_generation_prompt=True,
                                       tokenize=True,
                                       return_tensors="pt",
                                       return_dict=True
                                       )
        inputs = inputs.to(device)
        model = transformers.AutoModelForCausalLM.from_pretrained(
            MODEL_PATH,
            resume_download=True,  # 确保续传
            torch_dtype=torch.bfloat16,
            low_cpu_mem_usage=True,
            trust_remote_code=True,
            device_map="auto"
        ).eval()
        gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
        with torch.no_grad():
            outputs = model.generate(**inputs, **gen_kwargs)
            outputs = outputs[:, inputs['input_ids'].shape[1]:]
            resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return resp

    @staticmethod
    def glm4_9b_infer(query:str) -> str:
        '''
        GLM-4 9B基础模型推理代码
        '''
        # os.environ['MODELSCOPE_CACHE'] = '/home/psdz/diskc/yantao/awork/zywy/rlm/llms/ms_home'
        # 设置GPU设备
        os.environ['CUDA_VISIBLE_DEVICES'] = '1'
        MODEL_PATH = "ZhipuAI/glm-4-9b-hf"
        model = AutoModelForCausalLM.from_pretrained(
            MODEL_PATH,
            cache_dir=Glm4Engine.CACHE_DIR,
            torch_dtype=torch.bfloat16,
            low_cpu_mem_usage=True,
            trust_remote_code=True,
            device_map="auto"
        ).eval()
        device = "cuda" if torch.cuda.is_available() else "cpu"
        tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, cache_dir=Glm4Engine.CACHE_DIR,trust_remote_code=True)

        encoding = tokenizer(query)
        inputs = {key: torch.tensor([value]).to(device) for key, value in encoding.items()}
        # query = '请介绍ChatGLM4'
        # inputs = tokenizer.apply_chat_template(
        #     [{'role': 'user', 'content': query}],
        #     add_generation_prompt=True,
        #     tokenizer=True,
        #     return_tensors='pt',
        #     return_dict = True
        # )
        # inputs = inputs.to(device)

        gen_kwargs = {"max_length": 2500, "do_sample": True, "top_k": 1}
        with torch.no_grad():
            outputs = model.generate(**inputs, **gen_kwargs)
            outputs = outputs[:, inputs['input_ids'].shape[1]:]
            resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return resp