'''
载入训练后的模型 推理 对比

'''
import json
import os
import torch

# # 可用gpu个数
# if torch.cuda.device_count() > 3:
#     os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'

from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq


def infer_after_train(model_path):
    # 训练完成后加载保存的模型
    print('4 加载保存的模型')
    # device = 'cuda' if torch.cuda.is_available() else 'cpu'
    device = 'auto'
    model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device, torch_dtype=torch.float16)
    model.eval()
    # if os.name == 'nt': # windows
    #     base_model_path = r"D:\\code\\other\\LLMs\\models\\Qwen2.5-Coder-0.5B-Instruct"
    # else:
    #     base_model_path = r"/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-Coder-0.5B-Instruct"

    with open(os.path.join(model_path, 'adapter_config.json'), 'r', encoding='utf-8') as file:
        adapter_config = json.load(file)
    base_model_path = adapter_config['base_model_name_or_path']
    print(base_model_path)
    tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False, trust_remote_code=True)
    base_model = AutoModelForCausalLM.from_pretrained(base_model_path, device_map=device, torch_dtype=torch.float16)
    base_model.eval()
    # 推理逻辑
    print('5 推理')
    # model.to(device)
    # 示例输入
    # input_text = "你好。"
    # input_text = "`requests.post`和`requests.get`分别起到什么作用？"
    from train_data_convert import get_dataset_mycode
    train_dataset = get_dataset_mycode(is_map=False)
    example = train_dataset[80]
    example['question'] = '这段代码含义是什么'
    # example['question'] = 'streamlit的使用方法'
    input_text = f"问题：{example['question']}\n结合以下资料回答问题，不超过5句话:\n{example['content']}"
    # input_text = '你好，你是谁'
    inputs = tokenizer(input_text, return_tensors="pt")

    # 生成输出
    with torch.no_grad():
        outputs = model.generate(**inputs, max_length=3000, do_sample=False) #  Qwen config "temperature": 0.7,

    with torch.no_grad():
        base_model_outputs = base_model.generate(**inputs, max_length=3000, do_sample=False)

    # 解码输出
    output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    base_model_output_text = tokenizer.decode(base_model_outputs[0], skip_special_tokens=True)

    print('-输入-'+ '-'*100)
    print(f"{input_text}")
    print('-base_model_output-' + '-' * 100)
    print(f"{base_model_output_text[len(input_text):]}")
    print('-lora_model_output-' + '-'* 100)
    print(f" {output_text[len(input_text):]}")


if __name__ == '__main__':
    if os.name == 'nt': # windows
        model_path = r"D:\code\other\LLMs\local_data\train_output\epoch_10"
    else:
        model_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/train_output/epoch_ds_1"
        model_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/train_output/checkpoint-760"

    infer_after_train(model_path)