from config import *
from util import *
from model import make_model
from data_loader import *
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.lr_scheduler import CosineAnnealingLR
#自定义学习率调整策略
def lr_lambda_fn(step,warmup):
    if step <= warmup:
        lr = step / warmup * 5
    else:
        lr = warmup / step * 5
    return max(lr,0.1)

def run_epoch(loader,model,loss_fn,optimizer=None):
    total_batch = 0
    total_loss = 0
    model.to(device)
    for src_x,src_mask,tgt_x,tgt_mask,tgt_y,tgt_text in loader:
        src_x, src_mask, tgt_x, tgt_mask, tgt_y = src_x.to(device), src_mask.to(device), tgt_x.to(device), tgt_mask.to(device), tgt_y.to(device)
        output = model(src_x,src_mask,tgt_x,tgt_mask)
        loss = loss_fn(output.reshape(-1,output.shape[-1]),tgt_y.reshape(-1))
        total_batch += 1
        total_loss += loss.item()
        if optimizer:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    return total_loss / total_batch

#评估函数
def evaluate(loader,model,max_len=MAX_LEN):
    model.eval()
    tgt_sent = []
    prob_sent = []
    for src_x,src_mask,tgt_x,tgt_mask,tgt_y,tgt_text in loader:
        batch_prob_text = batch_greedy_decode(model,src_x,src_mask,max_len)
        prob_sent += batch_prob_text
        tgt_sent += tgt_text

    print("预测句子：",prob_sent)
    print("真实句子：",tgt_sent)
    print("BLEU:",bleu_score(prob_sent,[tgt_sent]))
    return bleu_score(prob_sent,[tgt_sent])


if __name__ == '__main__':
    en_id2vocab,_ = get_vocab("en")
    zh_id2vocab,_ = get_vocab("zh")

    SRC_VOCAB_SIZE = len(en_id2vocab)
    TGT_VOCAB_SIZE = len(zh_id2vocab)

    model = make_model(SRC_VOCAB_SIZE,TGT_VOCAB_SIZE,D_MODEL,N_HEAD,D_FF,N,DROPOUT)

    train_dataset = Dataset("train")
    train_loader = data.DataLoader(train_dataset,batch_size=BATCH_SIZE,shuffle=True,
                                   collate_fn=train_dataset.collate_fn)

    val_dataset = Dataset("val")
    val_loader = data.DataLoader(val_dataset,batch_size=BATCH_SIZE,shuffle=True,
                                 collate_fn=val_dataset.collate_fn)

    loss_fn = CrossEntropyLoss(ignore_index=PAD_ID,label_smoothing=LABEL_SMOOTHING)
    optimizer = Adam(model.parameters(),lr=LR)
    lr_scheduler = LambdaLR(optimizer,lr_lambda=lambda step:lr_lambda_fn(step,EPOCH/4))
    # lr_scheduler = CosineAnnealingLR(optimizer, T_max=EPOCH, eta_min=1e-6)  # 余弦退火，自动降低学习率
    best_bleu = 0
    for e in range(EPOCH):
        #训练
        model.train()
        train_loss = run_epoch(train_loader,model,loss_fn,optimizer)
        print(f"loss:{train_loss},epoch:{e}")
        lr_scheduler.step()
        model.eval()
        val_loss = run_epoch(val_loader,model,loss_fn,None)
        # torch.save(model.state_dict(), f"/root/project/Code/sshcode/Transformer/Transformer_model/best.pt")
        if e % 50 == 0 or e == EPOCH-1:
            val_bleu = evaluate(val_loader,model,MAX_LEN)
            if val_bleu > best_bleu:
                torch.save(model.state_dict(),f"/root/project/Code/sshcode/Transformer/Transformer_model/best.pt")
                best_bleu = val_bleu