import torch
from model.moe import MixtureOfExperts
from model.dataloader import get_dataset_info 
import torch.utils.data as Data 
import torch.optim as optim
import torch.nn as nn
from torch.cuda.amp import autocast, GradScaler
import gc

def cleanup():
    gc.collect()
    torch.cuda.empty_cache()

data_file = '/data/whl/cl/gpt2/dataset/train_output.txt' 
batch_size = 4  
word2id, id2word, vocab_size = get_dataset_info()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_epochs = 200 
learning_rate = 0.001  
num_experts = 3  
loss_file_path = 'loss_moe.txt'

model_save_path = 'moe_model.pt'

with open(data_file, 'r') as f:
    datas = f.readlines()

def make_data(datas):
    train_datas = []
    for data in datas:
        data = data.strip()
        data_list = data.split('\t')
        input_data = ''.join(data_list[:-1])
        lora_file_index = int(data_list[-1])
        train_datas.append((input_data.strip(), lora_file_index)) 
    return train_datas

class moeDataSet(Data.Dataset):
    def __init__(self, datas):
        self.datas = datas

    def __getitem__(self, item):
        data = self.datas[item]
        decoder_input = data[0] 
        decoder_input_ids = [word2id.get(char, word2id["<unk>"]) for char in decoder_input]  
        lora_file_index = data[1] 

        return {
            "decoder_input": decoder_input_ids, 
            "decoder_input_len": len(decoder_input_ids),
            "lora_file_index": lora_file_index,  
        }

    def __len__(self):
        return len(self.datas)

    def padding_batch(self, batch):
        decoder_input_lens = [d["decoder_input_len"] for d in batch]
        decoder_input_maxlen = max(decoder_input_lens)  

        for d in batch:
            padding_len = decoder_input_maxlen - d["decoder_input_len"]
            d["decoder_input"].extend([word2id["<pad>"]] * padding_len) 

        decoder_inputs = torch.tensor([d["decoder_input"] for d in batch], dtype=torch.long)
        lora_file_indices = torch.tensor([d["lora_file_index"] for d in batch], dtype=torch.long)
        
        return decoder_inputs, lora_file_indices

train_datas = make_data(datas)  
train_dataset = moeDataSet(train_datas) 
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.padding_batch)

moe_model = MixtureOfExperts(num_experts=num_experts)

if torch.cuda.device_count() > 1:
    moe_model = nn.DataParallel(moe_model)

moe_model = moe_model.to(device)

criterion = nn.CrossEntropyLoss()  
optimizer = optim.Adam(moe_model.parameters(), lr=learning_rate)
scaler = GradScaler()

for epoch in range(num_epochs):
    moe_model.train()
    total_loss = 0
    for i, (decoder_inputs, lora_file_indices) in enumerate(train_loader):
        decoder_inputs = decoder_inputs.to(device)
        lora_file_indices = lora_file_indices.to(device)

        decoder_inputs = decoder_inputs.unsqueeze(1).unsqueeze(3).expand(-1, 3, -1, 20)

        optimizer.zero_grad()

        with autocast():
            selected_expert_idx = moe_model(decoder_inputs.float())
            loss = criterion(selected_expert_idx, lora_file_indices)

        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()

        total_loss += loss.item()

    avg_loss = total_loss / len(train_loader)

    with open(loss_file_path, 'a') as f:
        f.write(f'{avg_loss:.4f} ') 

    print(f'Epoch {epoch + 1}, Total Loss: {total_loss / len(train_loader):.4f}')

    if torch.cuda.device_count() > 1:
        torch.save(moe_model.module.state_dict(), model_save_path)  
    else:
        torch.save(moe_model.state_dict(), model_save_path)

    print(f"模型已保存到 {model_save_path}")

    cleanup()
