import torch
from model.moe import MixtureOfExperts
from model.dataloader import get_dataset_info
import torch.utils.data as Data
import torch.nn as nn
from torch.cuda.amp import autocast
import gc
import os

def cleanup():
    gc.collect()
    torch.cuda.empty_cache()

test_data_file = '/data/whl/cl/gpt2/dataset/test_output.txt'
batch_size = 4
word2id, id2word, vocab_size = get_dataset_info()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_load_path = 'moe_model.pt'  # 已训练模型路径

with open(test_data_file, 'r') as f:
    test_datas = f.readlines()

def make_data(datas):
    test_datas = []
    for data in datas:
        data = data.strip()
        data_list = data.split('\t')
        input_data = ''.join(data_list[:-1])
        lora_file_index = int(data_list[-1])
        test_datas.append((input_data.strip(), lora_file_index))
    return test_datas

class moeDataSet(Data.Dataset):
    def __init__(self, datas):
        self.datas = datas

    def __getitem__(self, item):
        data = self.datas[item]
        decoder_input = data[0]
        decoder_input_ids = [word2id.get(char, word2id["<unk>"]) for char in decoder_input]
        lora_file_index = data[1]

        return {
            "decoder_input": decoder_input_ids,
            "decoder_input_len": len(decoder_input_ids),
            "lora_file_index": lora_file_index,
        }

    def __len__(self):
        return len(self.datas)

    def padding_batch(self, batch):
        decoder_input_lens = [d["decoder_input_len"] for d in batch]
        decoder_input_maxlen = max(decoder_input_lens)

        for d in batch:
            padding_len = decoder_input_maxlen - d["decoder_input_len"]
            d["decoder_input"].extend([word2id["<pad>"]] * padding_len)

        decoder_inputs = torch.tensor([d["decoder_input"] for d in batch], dtype=torch.long)
        lora_file_indices = torch.tensor([d["lora_file_index"] for d in batch], dtype=torch.long)

        return decoder_inputs, lora_file_indices

test_datas = make_data(test_datas)
test_dataset = moeDataSet(test_datas)
test_loader = Data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.padding_batch)

def load_moe_weights(moe_model, moe_weights_path):
    if os.path.exists(moe_weights_path):
        state_dict = torch.load(moe_weights_path)
        
        if torch.cuda.device_count() > 1 and not any(k.startswith('module.') for k in state_dict.keys()):
            new_state_dict = {}
            for k, v in state_dict.items():
                new_state_dict[f"module.{k}"] = v 
            state_dict = new_state_dict
        elif torch.cuda.device_count() == 1 and any(k.startswith('module.') for k in state_dict.keys()):
            new_state_dict = {}
            for k, v in state_dict.items():
                new_state_dict[k[7:]] = v  
            state_dict = new_state_dict
        
        moe_model.load_state_dict(state_dict)
    else:
        raise FileNotFoundError(f"找不到 MoE 权重文件: {moe_weights_path}")



moe_model = MixtureOfExperts(num_experts=3)
if torch.cuda.device_count() > 1:
    moe_model = nn.DataParallel(moe_model)

moe_model = moe_model.to(device)

load_moe_weights(moe_model, model_load_path)
moe_model.eval()  

criterion = nn.CrossEntropyLoss()  
total_loss = 0
correct_predictions = 0
total_samples = 0

with torch.no_grad(): 
    for decoder_inputs, lora_file_indices in test_loader:
        decoder_inputs = decoder_inputs.to(device)
        lora_file_indices = lora_file_indices.to(device)

        decoder_inputs = decoder_inputs.unsqueeze(1).unsqueeze(3).expand(-1, 3, -1, 20)

        with autocast():
            selected_expert_idx = moe_model(decoder_inputs.float())
            loss = criterion(selected_expert_idx, lora_file_indices)

        total_loss += loss.item()

        _, predicted = torch.max(selected_expert_idx, 1)
        correct_predictions += (predicted == lora_file_indices).sum().item()
        total_samples += lora_file_indices.size(0)

avg_loss = total_loss / len(test_loader)
accuracy = correct_predictions / total_samples

print(f'Test Loss: {avg_loss:.4f}, Accuracy: {accuracy:.4f}')

cleanup()
