import os
import argparse
import re
import time
import numpy as np

import pickle
import torch
import transformers
from tqdm import tqdm

from http import HTTPStatus

# 用到的路径
model_dir = r'E:\TJU\Grade3\NLP\Project\cheatGPT-ryan\models'
load_dir = r'..\results\perturb'
save_dir = r'..\results\score'

# 加载模型
def load_score_model(model_name):
    """
    @ 参数 model_name: str, 调用的得分模型名称

    @ 返回值 model: transformers.PreTrainedModel, 加载的得分模型
    @ 返回值 tokenizer: transformers.PreTrainedTokenizer, 加载的分词器
    """

    print(f'Loading SCORING model {model_name}...')
    model_path = os.path.join(model_dir, model_name)

    model, tokenizer = None, None

    # 加载模型
    model_kwargs = {}
    if '2.7B' in model_name:
        model_kwargs.update(dict(torch_dtype=torch.float16))

    model = transformers.AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)

    # 加载分词器
    optional_tok_kwargs = {}
    if "opt-" in model_name:
        print("Using non-fast tokenizer for OPT")
        optional_tok_kwargs['fast'] = False
    tokenizer = transformers.AutoTokenizer.from_pretrained(model_path, **optional_tok_kwargs)
    tokenizer.pad_token_id = tokenizer.eos_token_id

    return model, tokenizer

def get_ll_of_sample(sample):
    """
    @ 参数 sample: List[List[String]], 待计算的样本 (扰动数, 分段数)
    
    @ 返回值 ll_of_sample: np.ndarray 规格大小与输入相同, 样本的对数似然值 (扰动数, 分段数)
    """
    ll_of_sample = np.zeros((len(sample), len(sample[0])))
    with torch.no_grad():
        # 对每一个扰动样本进行遍历 
        for perturb_idx, seg_set in enumerate(tqdm(sample, desc="Processing samples")):
            # 对当前扰动样本的每一个分段进行遍历
            
            for seg_idx, seg in enumerate(seg_set):
                tokenized = tokenizer(seg, return_tensors="pt").to(device)
                labels = tokenized.input_ids
                ll_of_sample[perturb_idx, seg_idx] = model(**tokenized, labels=labels).loss.item()

    return ll_of_sample            

def load_and_save():
    # 加载路径
    load_path_root = os.path.join(load_dir, f"{args.dataset}")

    for dirnames in ['human', 'LLMs']:
        if dirnames == 'human':
            if args.only_LLMs:
                continue
            load_path = os.path.join(load_path_root, dirnames)
            file_list = sorted(os.listdir(load_path), key=lambda x: int(re.findall(r'\d+', x)[0]))

            for idx, filename in enumerate(file_list):
                if idx < args.checkpoint:
                    continue
                file = os.path.join(load_path, filename)
                sample = None
                # 读取文件数据 扰动后的文本
                with open(file, 'rb') as f:
                    sample = pickle.load(f)
                # 调用函数计算
                print(f'Get the ll of human sample {filename}...')
                ll_of_sample = get_ll_of_sample(sample)
                # 保存路径
                filename = filename.split('.')[0]
                save_path = os.path.join(save_dir, f"{args.dataset}", "human", f"{args.scoring_model_name}", f"{filename}.npy")
                if not os.path.exists(os.path.dirname(save_path)):
                    os.makedirs(os.path.dirname(save_path))
                np.save(save_path, ll_of_sample)
        else:
            if args.only_human:
                continue

            load_path = os.path.join(load_path_root, dirnames, f"{args.source_model_name}")
            for idx, filename in enumerate(os.listdir(load_path)):
                if idx < args.checkpoint:
                    continue
                file = os.path.join(load_path, filename)
                sample = None
                # 读取文件数据 扰动后的文本
                with open(file, 'rb') as f:
                    sample = pickle.load(f)
                # 调用函数计算
                print(f'Get the ll of {args.source_model_name} sample {idx}...')
                ll_of_sample = get_ll_of_sample(sample)
                # 保存路径
                filename = filename.split('.')[0]
                save_path = os.path.join(save_dir, f"{args.dataset}", "LLMs", f"{args.scoring_model_name}", f"{args.source_model_name}_{filename}.npy")
                if not os.path.exists(os.path.dirname(save_path)):
                    os.makedirs(os.path.dirname(save_path))
                np.save(save_path, ll_of_sample)

    return 

if __name__ == '__main__':
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str, default="xsum")  # 使用数据集名称
    parser.add_argument('--source_model_name', type=str, required=True, help='Please specify the source model name.')  # 生成模型名称
    parser.add_argument('--scoring_model_name', type=str, required=True, help='Please specify the scoring model name.')  # 计算得分模型名称
    parser.add_argument('--only_human', action='store_true')    # 人类文本
    parser.add_argument('--only_LLMs', action='store_true')     # 机器文本
    parser.add_argument('--checkpoint', type=int, default=0)
    args = parser.parse_args()

    # 加载得分模型
    model, tokenizer = load_score_model(args.scoring_model_name)

    # 模型放到GPU上
    print('Moving model to DEVICE...')
    model.to(device)


    load_and_save()