# -*- coding: utf-8 -*-
"""
   File Name:  train_model.py
   Author :    liccoo
   Time:       2022/8/24 12:47
"""
import math
import os

import numpy as np
import torch

from tensorboard import program
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm


def compute_relative_error(_output, _label):
    """
    计算相对误差
    :param _output:
    :param _label:
    :return:
    """
    rela_err = torch.abs(torch.abs(_output - _label) / _label)
    max_rela_err = torch.max(rela_err)  # 计算最大相对误差
    mean_rela_err_mean = torch.mean(rela_err)  # 计算平均相对误差
    mean_abs_err_mean = torch.mean(torch.abs(_output - _label))  # 计算平均绝对误差
    return max_rela_err.item(), mean_rela_err_mean.item(), mean_abs_err_mean.item()


def save_model(_model, _model_name, _exp_path, _epoch, _epochs, _save_num):
    """
    save model
    :param _model:
    :param _model_name:
    :param _exp_path:
    :param _epoch:
    :param _epochs:
    :param _save_num:
    :return:
    """
    # 每 epochs/save_num 和 100个epochs保存一次
    if (_epoch * _save_num) % _epochs == (_epochs - _save_num) or _epoch % 100 == 99:
        # 获取模型保存的次数  ->  _save_num 中的第几次
        num = math.ceil((_epoch * _save_num) / _epochs)
        torch.save(_model, f'{_exp_path}/{_model_name}_{num}_{_save_num}.pt')  # 最终保存的模型为 参数_第几次_总次数.pt


def train_model(args, path, train_loader, dev_loader, model):

    # 定义 tensorboard 文件写入路径
    tensorboard_path = os.path.join(path, 'tensorboard')
    writer = SummaryWriter(tensorboard_path)

    # 自动启动 tensorboard
    tb = program.TensorBoard()
    tb.configure(argv=[None, '--logdir', tensorboard_path, '--bind_all', '--port', f'{args.port}'])
    url = tb.launch()
    print(f"Tensorflow listening on {url}")

    # 损失函数 优化器 和 学习率衰减
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    # 步长式学习率衰减 lr_next / lr_current == args.gamma
    scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)

    # GPU 加速
    device = torch.device(f"{args.device}")
    model.to(device)
    criterion.to(device)

    # ------------------------------------------------------------------ #
    print("----------Start training----------")
    # 训练网络
    for epoch in range(args.epochs):
        model.train()
        loop = tqdm(enumerate(train_loader, 0), total=len(train_loader), leave=True)  # 设置 tqdm
        losses = np.array([])  # 存放一个 epoch 的所有 loss 值
        for batch_idx, (input_, label) in loop:
            input_, label = input_.to(device), label.to(device)
            # predict the result
            output = model(input_)
            # get the loss
            loss = criterion(output, label)
            # set the grad to zero
            optimizer.zero_grad()
            # backward propagation -- Autograd
            loss.backward()
            # update the parameter
            optimizer.step()

            # append loss vaule to losses
            losses = np.append(losses, loss.cpu().detach().numpy())

            # Calculate the relative_error
            _, train_mean_relative_error, _ = compute_relative_error(output, label)

            # 打印 loss  ->  更新信息
            loop.set_description(f'Epoch [{epoch}/{args.epochs}]')
            loop.set_postfix(loss=np.mean(losses), acc=train_mean_relative_error)

            # tensorboard 可视化网络结构
            if epoch == 0 and batch_idx == 0:  # 避免重复写入
                writer.add_graph(model, input_)

        # 保存模型
        save_model(model, args.model, path, epoch, args.epochs, args.save_num)

        # 开始验证
        model.eval()
        val_losses = np.array([])  # 存放一个 epoch 的所有 loss 值
        for val_batch_idx, (val_input_, val_label) in enumerate(dev_loader, 0):
            val_input_, val_label = val_input_.to(args.device), val_label.to(args.device)
            # predict the result
            val_output = model(val_input_)
            # get the loss
            val_loss = criterion(val_output, val_label)

            # append loss vaule to losses
            val_losses = np.append(val_losses, val_loss.cpu().detach().numpy())

            # Calculate the relative_error
            _, val_mean_relative_error, _ = compute_relative_error(val_output, val_label)

        # tensorboard 可视化 loss and relative_error
        writer.add_scalars('Loss', {'train': np.mean(losses), 'test': np.mean(val_losses)}, epoch)
        writer.add_scalars('Relative_error',
                           {'train': train_mean_relative_error, 'test': val_mean_relative_error}, epoch)

        # 学习率衰减
        scheduler.step()

    # 训练结束
    print("----------Finish training----------")

    # ------------------------------------------------------------------ #

    # 关闭 tensorboard 写入
    writer.close()


def train_tandem_model(args, path, train_loader, dev_loader, forward_model, inverse_model):

    # 定义 tensorboard 文件写入路径
    tensorboard_path = os.path.join(path, 'tensorboard')
    writer = SummaryWriter(tensorboard_path)

    # 自动启动 tensorboard
    tb = program.TensorBoard()
    tb.configure(argv=[None, '--logdir', tensorboard_path, '--bind_all', '--port', f'{args.port}'])
    url = tb.launch()
    print(f"Tensorflow listening on {url}")

    # 损失函数 优化器 和 学习率衰减
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(inverse_model.parameters(), lr=args.lr)
    # 步长式学习率衰减 lr_next / lr_current == args.gamma
    scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)

    # GPU 加速
    device = torch.device(f"{args.device}")
    inverse_model.to(device)
    criterion.to(device)

    # ------------------------------------------------------------------ #
    print("----------Start training----------")
    # 训练网络
    for epoch in range(args.epochs):
        inverse_model.train()
        loop = tqdm(enumerate(train_loader, 0), total=len(train_loader), leave=True)  # 设置 tqdm
        losses_s = np.array([])  # 存放一个 epoch 的所有 loss 值 ----- spectrum
        losses_p = np.array([])  # 存放一个 epoch 的所有 loss 值 ----- parameter
        for batch_idx, (spectrum, para) in loop:
            spectrum, para = spectrum.to(device), para.to(device)
            # predict the result
            para_pre = inverse_model(spectrum)
            output = forward_model(para_pre)
            # get the loss
            loss_s = criterion(output, spectrum)
            loss_p = criterion(para_pre, para)
            # set the grad to zero
            optimizer.zero_grad()
            # backward propagation -- Autograd
            loss_s.backward()
            # update the parameter
            optimizer.step()

            # append loss vaule to losses
            losses_s = np.append(losses_s, loss_s.cpu().detach().numpy())
            losses_p = np.append(losses_p, loss_p.cpu().detach().numpy())

            # Calculate the relative_error
            _, train_mean_relative_error, _ = compute_relative_error(spectrum, output)
            _, train_para_relative_error, _ = compute_relative_error(para_pre, para)

            # 打印 loss  ->  更新信息
            loop.set_description(f'Epoch [{epoch}/{args.epochs}]')
            loop.set_postfix(loss=np.mean(losses_s), acc=train_mean_relative_error)

            # tensorboard 可视化网络结构
            if epoch == 0 and batch_idx == 0:  # 避免重复写入
                writer.add_graph(inverse_model, spectrum)

        # 保存模型
        save_model(inverse_model, args.model, path, epoch, args.epochs, args.save_num)

        # 开始验证
        inverse_model.eval()
        val_losses_s = np.array([])  # 存放一个 epoch 的所有 loss 值 ----- spectrum 的 mse
        val_losses_p = np.array([])  # 存放一个 epoch 的所有 loss 值 ----- parameter 的 mse
        for val_batch_idx, (val_spectrum, val_para) in enumerate(dev_loader, 0):
            val_spectrum, val_para = val_spectrum.to(args.device), val_para.to(args.device)
            # predict the result
            val_para_pre = inverse_model(val_spectrum)
            val_output = forward_model(val_para_pre)

            # get the loss
            val_loss_s = criterion(val_output, val_spectrum)
            val_loss_p = criterion(val_para_pre, val_para)

            # append loss vaule to losses
            val_losses_s = np.append(val_losses_s, val_loss_s.cpu().detach().numpy())
            val_losses_p = np.append(val_losses_p, val_loss_p.cpu().detach().numpy())

            # Calculate the relative_error
            _, val_mean_relative_error, _ = compute_relative_error(val_output, val_spectrum)
            _, val_para_relative_error, _ = compute_relative_error(val_para_pre, val_para)

        # tensorboard 可视化 loss and relative_error
        writer.add_scalars('Loss_s', {'train': np.mean(losses_s), 'test': np.mean(val_losses_s)}, epoch)
        writer.add_scalars('Loss_p', {'train': np.mean(losses_p), 'test': np.mean(val_losses_p)}, epoch)
        writer.add_scalars('Relative_error_s',
                           {'train': train_mean_relative_error, 'test': val_mean_relative_error}, epoch)
        writer.add_scalars('Relative_error_p',
                           {'train': train_para_relative_error, 'test': val_para_relative_error}, epoch)

        # 学习率衰减
        scheduler.step()

    # 训练结束
    print("----------Finish training----------")

    # ------------------------------------------------------------------ #

    # 关闭 tensorboard 写入
    writer.close()
