import torch
import json
from tqdm import tqdm
import matplotlib.pyplot as plt

from model import BertForPermutationAndMaskedLM
from load_dataset import load_tokenized_dataset, create_masked_inputs
from value import evaluate_bert_with_permutation
    
def train_bert_with_permutation(model, tokenizer, dataloader, optimizer, epochs, device, save_path="my-bert-model"):
    model.train()
    loss_list = []
    
    for epoch in range(epochs):
        total_loss = 0
        progress_bar = tqdm(enumerate(dataloader), total=len(dataloader))
        
        for step, batch in progress_bar:
            # 将数据移至设备
            batch = {k: v.to(device) for k, v in batch.items()}
            
            # 使用打乱后的输入进行词序恢复任务
            inputs = batch["permuted_input_ids"]
            labels = batch["input_ids"]  # 原始输入作为MLM的标签
            
            # 生成MLM的掩码输入
            inputs, mlm_labels = create_masked_inputs(inputs, tokenizer, device)
            
            # 获取词序恢复任务的标签
            permutation_labels = batch["permutation_labels"]
            
            # 前向传播
            outputs = model(
                inputs,
                attention_mask=batch["attention_mask"],
                labels=mlm_labels,
                permutation_labels=permutation_labels
            )
            
            loss = outputs[0]  # 总损失
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            progress_bar.set_description(f"Epoch {epoch+1}, Loss: {total_loss/(step+1):.4f}")
            loss_list.append(loss.item())
        
        # 打印每个epoch的平均损失
        avg_loss = total_loss / len(dataloader)
        print(f"Epoch {epoch+1}/{epochs}, Average Loss: {avg_loss:.4f}")
        # 评估模型
        # evaluate_bert_with_permutation(model, dataloader, tokenizer, device)

    # 保存模型
    model.save_pretrained(save_path)
    tokenizer.save_pretrained(save_path)
    # 保存loss_list到JSON文件
    with open(save_path + "/loss_list.json", "w") as f:
        json.dump(loss_list, f)
    
    return loss_list


def plot_loss(loss_list, title):
    plt.plot(loss_list, label='Training Loss')
    plt.title(title)
    plt.xlabel('Batch')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig(title + '.png')
    plt.show()

if __name__ == "__main__":
    from transformers import BertConfig
    import os
    lr = 2e-5
    epochs = 20
    batch_size = 16
    model_path = "my-bert"
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 加载数据集
    if os.path.exists(model_path):
        tokenizer, train_dataloader = load_tokenized_dataset(
            pretrained_model_name_or_path=model_path,
            add_permutation=True, 
            batch_size=batch_size)
    else:
        tokenizer, train_dataloader = load_tokenized_dataset(
            add_permutation=True, 
            batch_size=batch_size)
    
    # 加载自定义模型
    if os.path.exists(model_path):
        model = BertForPermutationAndMaskedLM.from_pretrained(model_path)
    else:
        # 创建模型实例
        config = {
            "vocab_size": tokenizer.vocab_size,
            "hidden_size": 256,
            "num_hidden_layers": 2,
            "num_attention_heads": 4
        }
        model_config = BertConfig.from_dict(config)
        model = BertForPermutationAndMaskedLM(model_config) # 使用未预训练的权重
    model.to(device)
    
    # 创建优化器
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    
    # 训练模型
    loss_list = train_bert_with_permutation(model, tokenizer, train_dataloader, optimizer, epochs, device)
    plot_loss(loss_list, 'Training Loss')