import torch
import json
import time
from torch.utils.data import DataLoader
from model.model import GPT 
from model.dataloader import get_dataset_info 
import torch.utils.data as Data 
import collections

batch_size = 32  
out_file = '/data/whl/cl/gpt2/dataset/resgpt2_1.json'  
word2id, a, b = get_dataset_info()
word2id, id2word, vocab_size = get_dataset_info()
f1_threshold = 0.9

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def make_data(datas):
    train_datas = []
    for data in datas:
        data = data.strip()
        input_data, output_data = data.split('\t')
        train_datas.append((input_data.strip(), output_data.strip())) 
    return train_datas


class EvalDataSet(Data.Dataset):
    def __init__(self, datas):
        self.datas = datas

    def __getitem__(self, item):
        data = self.datas[item]
        decoder_input = data[0]  
        decoder_output = data[1]  
        
        decoder_input_len = len(decoder_input)
        decoder_output_len = len(decoder_output)
        return {
            "decoder_input": decoder_input,
            "decoder_input_len": decoder_input_len,
            "decoder_output": decoder_output,
            "decoder_output_len": decoder_output_len
        }

    def __len__(self):
        return len(self.datas)

    def padding_batch(self, batch):
        word2id, a, b = get_dataset_info()
        
        # 获取每个batch中decoder_input和decoder_output的长度
        decoder_input_lens = [d["decoder_input_len"] for d in batch]
        decoder_output_lens = [d["decoder_output_len"] for d in batch]
        
        # 计算最大长度
        decoder_input_maxlen = max(decoder_input_lens)
        decoder_output_maxlen = max(decoder_output_lens)
        
        # 对每个样本进行padding
        for d in batch:
            d["decoder_input"].extend([word2id["<pad>"]] * (decoder_input_maxlen - d["decoder_input_len"]))
            d["decoder_output"].extend([word2id["<pad>"]] * (decoder_output_maxlen - d["decoder_output_len"]))
        
        # 将输入和输出转化为Tensor
        decoder_inputs = torch.tensor([d["decoder_input"] for d in batch], dtype=torch.long)
        decoder_outputs = torch.tensor([d["decoder_output"] for d in batch], dtype=torch.long)
        
        return decoder_inputs, decoder_outputs


def generate_predictions(model, data_loader):
    model.eval()
    preds = {}
    start_time = time.time()
    total_length = 0 

    for i, (dec_inputs, dec_outputs) in enumerate(data_loader):
        for j, dec_input in enumerate(dec_inputs):
            dec_input = dec_input.to(device)
            with torch.no_grad():
                pred_ids = model.greedy_decoder(dec_input.unsqueeze(0))  # 模型生成预测
            
            pred_answer = "".join([id2word[token.item()] for token in pred_ids.squeeze(0)])
            
            real_answer = [id2word[token] for token in dec_outputs[j].tolist()]
            real_answer = "".join(real_answer)

            preds[i * batch_size + j] = {
                "pred": pred_answer,
                "real": real_answer
            }
            total_length += len(pred_answer)


    end_time = time.time()
    avg_inference_time = (end_time - start_time) / len(data_loader.dataset) 
    avg_length = total_length / len(data_loader.dataset) 

    return preds, avg_inference_time, avg_length


def evaluate(predictions):
    exact_matches = 0
    f1_total = 0

    def compute_f1(gold, pred):
        common = collections.Counter(gold) & collections.Counter(pred)
        num_same = sum(common.values())
        if len(gold) == 0 or len(pred) == 0:
            return int(gold == pred)
        if num_same == 0:
            return 0
        precision = num_same / len(pred)
        recall = num_same / len(gold)
        return 2 * (precision * recall) / (precision + recall)

    for key, value in predictions.items():
        pred_answer = value["pred"]
        real_answer = value["real"]
        f1_score = compute_f1(real_answer, pred_answer)
        f1_total += f1_score
        if f1_score >= f1_threshold:
            exact_matches += 1

    accuracy = exact_matches / len(predictions)
    avg_f1 = f1_total / len(predictions)
    return accuracy, avg_f1

def compute_perplexity(loss, total_tokens):
    if not isinstance(loss, torch.Tensor):
        loss = torch.tensor(loss, dtype=torch.float32) 
    if not isinstance(total_tokens, torch.Tensor):
        total_tokens = torch.tensor(total_tokens, dtype=torch.float32) 

    return torch.exp(loss / total_tokens)


def main():
    try:
        with open('/data/whl/cl/gpt2/dataset/16_dataset.txt', 'r', encoding='utf-8') as f:
            datas = f.readlines()
    except FileNotFoundError:
        print("无法找到数据集文件'dataset.txt'，请检查文件是否存在。")

    train_data = make_data(datas)
    train_num_data = [( [word2id[word] for word in input_line if word in word2id], 
                    [word2id[word] for word in output_line if word in word2id]) 
                   for input_line, output_line in train_data]

    dataset = EvalDataSet(train_num_data)
    data_loader = DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.padding_batch)

    pretrained_model_path = 'ResGPT2.pt'
    model = GPT().to(device)
    model.load_state_dict(torch.load(pretrained_model_path), strict=False)

    model.to(device)

    predictions, avg_inference_time, avg_length = generate_predictions(model, data_loader)

    accuracy, avg_f1 = evaluate(predictions)

    total_loss = 0.0  

    total_tokens = sum([len(data["decoder_input"]) for data in dataset])
    perplexity = compute_perplexity(total_loss, total_tokens)

    eval_results = {
        'accuracy': accuracy * 100, 
        'f1': avg_f1 * 100,  
        'avg_inference_time': avg_inference_time,
        'avg_length': avg_length,
        'perplexity': perplexity.item() if isinstance(perplexity, torch.Tensor) else perplexity
    }

    if out_file:
        with open(out_file, 'w') as out_file_obj:
            json.dump(eval_results, out_file_obj, indent=2)
    else:
        print(json.dumps(eval_results, indent=2))

if __name__ == '__main__':
    main()
