# -*- coding: utf-8 -*-
from os import environ

environ["PBR_VERSION"] = "4.0.2"
import torch.nn.functional as F
from torch.utils.data import DataLoader
from model.SmaAt_UNet import SmaAt_UNet
from data.segmented_data import SegmentedData
from utils.config import *
from utils.myloss import *
from utils.logger import Logger
from tendo.singleton import SingleInstance
from datetime import datetime
import os
from utils.s2d import space_to_depth2
from utils.d2s import depth_to_space

dtype = torch.float32

train_logger = Logger(filename=f'{log_path}/train_{datetime.now().year}-{datetime.now().month}.log', level='info',
                      when='D', back_count=30).logger

train_logger.info(f'Training Start Time:{datetime.now()}')
train_logger.info(f'leaning rate:{leaning_rate}')
train_logger.info(f'weight decay:{weight_decay}')
train_logger.info(f'batch size:{batch_size}')
train_logger.info(f'max epoch:{maxepoch}')


def my_mse_loss(x, y):
    return torch.mean(torch.pow((x - y), 2))


def check_accuracy_part(loader, model):
    maerList = []
    maexList = []
    model.eval()  # set model to evaluation mode
    with torch.no_grad():
        for data in loader:
            if data is None:
                continue
            x, y = data

            x_gpu = x.to(device=device, dtype=dtype)
            x_gpu = space_to_depth2(x_gpu, 2)

            out = model(x_gpu)
            out = depth_to_space(out, 2)

            # mae_r = metrics.mae(out.cpu().numpy().squeeze(), y.numpy().squeeze())
            mae_r = weighted_mae_loss2(out.cpu().squeeze(), y.squeeze()) + \
                    0.5 * area_loss(x[:, -1:, ...].squeeze(), y.squeeze())
            maerList.append(mae_r)
            # mae_x = metrics.mae(x.numpy()[:, -1:, ...].squeeze(), y.numpy().squeeze())
            mae_x = weighted_mae_loss2(x[:, -1:, ...].squeeze(), y.squeeze()) + \
                    0.5 * area_loss(x[:, -1:, ...].squeeze(), y.squeeze())
            maexList.append(mae_x)

        maer_stack = np.stack(maerList)
        mae_r = np.mean(maer_stack)
        maex_stack = np.stack(maexList)
        mae_x = np.mean(maex_stack)
        return float(mae_x), float(mae_r)


def train_part():
    model = SmaAt_UNet(in_channels=4, out_channels=4)
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=weight_decay, lr=leaning_rate)
    model.cuda()
    model_state_dict = model.state_dict()
    weight_path = os.path.join(weight_root, f"{model_date}.pth")
    resume_weight_path = os.path.join(weight_root, f"resume.pth")

    batchSize = batch_size * len(device_ids)

    # create data generator
    data_obj_train = SegmentedData(mode='train')
    train_generator = DataLoader(data_obj_train, batch_size=batchSize, shuffle=True, num_workers=thread_count,
                                 pin_memory=True)
    train_logger.info(f"Train Data Count:{len(data_obj_train)}")

    data_obj_val = SegmentedData(mode='val')
    val_generator = DataLoader(data_obj_val, batch_size=batchSize * 3, shuffle=False, num_workers=thread_count,
                               pin_memory=True)
    train_logger.info(f"Validation Data Count:{len(data_obj_val)}")

    if initTrain:
        min_maer = float("inf")
    else:
        # Load previous model state
        last_weight_path = os.path.join(weight_root, f"resume.pth")
        train_logger.info(f"LOADING LAST CHECKPOINTS:{last_weight_path}")
        base_checkpoints = torch.load(last_weight_path)
        train_logger.info("SUCCESS")
        # torch.save(base_checkpoints, weight_path)

        state_dict = base_checkpoints['state_dict']
        model.load_state_dict(state_dict)
        optimizer.load_state_dict(base_checkpoints['optim_state'])

        maex, min_maer = check_accuracy_part(val_generator, model)
        message = f'LAST MODEL EVAL: mae_x = {maex:.4f} mae_r = {min_maer:.4f} '
        train_logger.info(message)

    if len(device_ids) > 1:
        train_logger.info("Parallel training")
        model = torch.nn.DataParallel(model, device_ids=device_ids)
        model_state_dict = model.module.state_dict()

    for epoch in range(maxepoch):
        loss_list = []
        for data in train_generator:
            if data is None:
                continue
            inputData, targetData = data

            model.train()  # put model to training mode
            inputData_cuda0 = inputData.to(device=device, dtype=dtype)  # move to device, e.g. GPU
            inputData_cuda = space_to_depth2(inputData_cuda0, down_scale=2)
            targetData_cuda = targetData.to(device=device, dtype=dtype)

            outData_cuda = model(inputData_cuda)
            outData_cuda = depth_to_space(outData_cuda, 2)

            targetData_cuda = F.max_pool2d(targetData_cuda, 7, 1, padding=3)

            # loss = 0.4 * F.mse_loss(outData_cuda.squeeze(), targetData_cuda.squeeze(), reduction='mean') + \
            #        0.4 * weighted_mae_loss2(outData_cuda, targetData_cuda) + 0.2 * area_loss(outData_cuda,
            #                                                                                  targetData_cuda)

            # loss = weighted_mae_loss2(outData_cuda, targetData_cuda) + 0.5 * area_loss(outData_cuda, targetData_cuda)
            loss = F.mse_loss(outData_cuda.squeeze(), targetData_cuda.squeeze(), reduction='sum') + \
                   area_loss(outData_cuda, targetData_cuda)
            loss_list.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        mae_x, mae_r = check_accuracy_part(val_generator, model)
        avg_loss = np.mean(loss_list)

        val_message = f'Epoch {epoch:d}/{maxepoch:d} loss = {avg_loss:.4f} mae_x = {mae_x:.4f} mae_r = {mae_r:.4f} '
        train_logger.info(val_message)

        checkpoints = {
            'state_dict': model_state_dict,
            'optim_state': optimizer.state_dict(),
        }
        torch.save(checkpoints, resume_weight_path)

        if mae_r < min(min_maer, mae_x):
            train_logger.info(f"save weights, mae: {min_maer:.4f} -> {mae_r:.4f}")
            min_maer = mae_r

            torch.save(checkpoints, weight_path)
            # torch.save(checkpoints, os.path.join(weight_root, f"{datetime.now().strftime('%d%H%M')}.pth"))


def net_train():
    localtime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    head_Msg = f"{localtime} MaxEpoch: {maxepoch} "
    train_logger.info(head_Msg)

    train_part()
    torch.cuda.empty_cache()


if __name__ == "__main__":

    run = True
    try:
        me = SingleInstance()
        train_logger.info("ReviseNet Train start...")

    except:
        train_logger.warning('ReviseNet Train Process already  exist! ')
        run = False
    if run:
        net_train()
