import torch
import numpy as np
import matplotlib.pyplot as plt

from data import *
from model import *

IMG_PATH = BASE_PATH / 'img' ; IMG_PATH.mkdir(exist_ok=True)
LOG_PATH = BASE_PATH / 'log' ; LOG_PATH.mkdir(exist_ok=True)
MODEL_FILE = LOG_PATH / 'model.pth'

# hyperparams
IN_DIM      = 128
BATCH_SIZE  = 128
SPLIT_RATIO = 0.25
EPOCHS      = 200
LR          = 0.001


train_loader, test_loader = get_dataloaders(batch_size=BATCH_SIZE, split_ratio=SPLIT_RATIO, pca_dim=IN_DIM)

model = ANN(IN_DIM)
optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=1e-2)
criterion = Myloss(alpha=1, beta=50)

train_losses = []
train_accs_g = []
train_accs_a = []
eval_losses  = []
eval_accs_g  = []
eval_accs_a  = []
for e in range(EPOCHS):
    ''' Train '''
    train_loss  = 0
    train_acc_g = 0
    train_acc_a = 0

    model.train()   # 将模型改为训练模式
    for input, label in train_loader:
        g = label[:,0]
        a = label[:,1]

        optimizer.zero_grad()
        out = model(input)
        g_hat = out[:,0]
        a_hat = out[:,1]
        loss = criterion(g, g_hat, a, a_hat)
        loss.backward()
        optimizer.step()

        # 记录每个批的损失
        running_loss = loss.item()
        # train_losses_batch.append(running_loss)

        # 记录损失函数误差
        train_loss += loss.item()

        # 计算准确率（这里是回归任务，计算距离真实值的远近）值越小越好
        acc_a = r2(a_hat, a)
        train_acc_a += acc_a.item()
        # 计算准确率（这里是回归任务，计算距离真实值的远近）值越小越好
        acc_g = r2(g_hat, g)
        train_acc_g += acc_g.item()

    train_losses.append(train_loss  / len(train_loader))
    train_accs_g.append(train_acc_g / len(train_loader))
    train_accs_a.append(train_acc_a / len(train_loader))

    ''' Eval '''
    eval_loss  = 0
    eval_acc_g = 0
    eval_acc_a = 0

    model.eval()  # 将模型改为预测模型
    with torch.no_grad():
        for input, label in test_loader:
            g = label[:,0]
            a = label[:,1]

            out = model(input)  # 神经网络预测结果
            g_hat = out[:,0]
            a_hat = out[:,1]
            loss = criterion(g, g_hat, a, a_hat)  # 得到误差

            # 记录误差
            eval_loss += loss.item()

            # 记录准确率
            acc = r2(g_hat, g)
            eval_acc_g += acc.item()
            # 记录准确率
            acc_a = r2(a_hat, a)
            eval_acc_a += acc_a.item()

    eval_losses.append(eval_loss  / len(test_loader))
    eval_accs_g.append(eval_acc_g / len(test_loader))
    eval_accs_a.append(eval_acc_a / len(test_loader))

    if (e + 1) % 10 == 0:
        print(f'[{e+1}/{EPOCHS}]')
        print(f'  train loss: {train_losses[-1]:.7f}, r2-g: {train_accs_g[-1]:.5f}, r2-a: {train_accs_a[-1]:.5f}')
        print(f'  test  loss: {eval_losses[-1] :.7f}, r2-g: {eval_accs_g[-1] :.5f}, r2-a: {eval_accs_a[-1] :.5f}')

torch.save(model, MODEL_FILE)

if 'plot':
    plt.figure(figsize=(20,15))
    plt.subplot(221)
    # 画第一个子图(损失函数图)
    plt.plot(np.arange(len(train_losses)), train_losses, label='train loss')
    plt.plot(np.arange(len(eval_losses)),  eval_losses,  label='eval loss')
    plt.legend()
    plt.title('total loss')
    plt.xlabel('epoches')
    plt.ylabel('Model Losses')

    # 画第二个字图(准确率图)
    plt.subplot(222)
    plt.plot(np.arange(len(train_accs_g)), train_accs_g, label='train accurary of g')
    plt.plot(np.arange(len(eval_accs_g)),  eval_accs_g,  label='eval accurary of g')
    plt.ylim((-1,1.1))
    plt.legend()
    plt.title('R2 of g')
    plt.xlabel('epoches')
    plt.ylabel('R2 of g')

    # 画第三个字图(准确率图)
    plt.subplot(223)
    plt.plot(np.arange(len(train_accs_a)), train_accs_a, label='train accurary of alpha')
    plt.plot(np.arange(len(eval_accs_a)),  eval_accs_a,  label='eval accurary of alpha')
    plt.ylim((-1,1.1))
    plt.legend()
    plt.title('R2 of alpha')
    plt.xlabel('epoches')
    plt.ylabel('R2 of alpha')

    plt.savefig(IMG_PATH / 'loss&accurary.png', dpi=300, bbox_inches='tight')
