# -*- coding: utf-8 -*-
# Author: GuoYouMing
from os import environ

environ["PBR_VERSION"] = "4.0.2"
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
from model.reviseNet import Net
from utils import metrics
from data.segmented_data import SegmentedData
from data.databuild_bk import Datasets
from utils.config import *
from tendo.singleton import SingleInstance

dtype = torch.float32

train_logger = Logger(filename=f'{logpath}/train_{datetime.now().year}-{datetime.now().month}.log', level='info',
                      when='D', back_count=30).logger

train_logger.info(f'Current Model Time:{currentTime}')
train_logger.info(f'Training Start Time:{datetime.now()}')
train_logger.info(f'Tcategorys:{Tcategorys}')
train_logger.info(f'if init train:{initTrain}')
train_logger.info(f'device:{device}')
train_logger.info(f'leaning rate:{leaning_rate}')
train_logger.info(f'weight decay:{weight_decay}')
train_logger.info(f'batch size:{batch_size}')
train_logger.info(f'max epoch:{maxepoch}')
train_logger.info(f'htimes:{htimes}')
train_logger.info(f'last_train_date:{last_train_date}')
train_logger.info(f'auto update:{auto_update}')


def check_accuracy_part(loader, model):
    maerList = []
    maexList = []
    model.eval()  # set model to evaluation mode
    with torch.no_grad():
        for data in loader:
            if data is None:
                continue
            x, y = data
            if x.max() > (Tem_max + C_K) or x.min() < (Tem_min + C_K):
                continue
            x_gpu = x.to(device=device, dtype=dtype)
            out = model(x_gpu)
            mae_r = metrics.mae(out.cpu().numpy().squeeze(), y.numpy().squeeze())
            maerList.append(mae_r)
            mae_x = metrics.mae(x.numpy().squeeze(), y.numpy().squeeze())
            maexList.append(mae_x)

        maer_stack = np.stack(maerList)
        mae_r = np.mean(maer_stack)
        maex_stack = np.stack(maexList)
        mae_x = np.mean(maex_stack)
        return float(mae_x), float(mae_r)


def train_part(Tcategory):
    model = Net()
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=weight_decay, lr=leaning_rate)
    model.cuda()
    model_state_dict = model.state_dict()
    weight_path = pathjoin(weight_root, f"{Tcategory}/{currentTime.strftime('%Y%m%d')}_{Tcategory}.pth")

    batchSize = batch_size * len(device_ids)

    # create data generator
    data_obj_train = SegmentedData(Tcategory=Tcategory, mode='train')
    train_generator = DataLoader(data_obj_train, batch_size=batchSize, shuffle=True, num_workers=thread_count)
    train_logger.info(f"Train Data Count:{len(data_obj_train)}")

    data_obj_val = SegmentedData(Tcategory=Tcategory, mode='val')
    val_generator = DataLoader(data_obj_val, batch_size=batchSize, shuffle=False, num_workers=thread_count)
    train_logger.info(f"Validation Data Count:{len(data_obj_val)}")

    if initTrain:
        min_maer = float("inf")
    else:
        # Load previous model state
        last_weight_path = pathjoin(weight_root, f"{Tcategory}/{last_train_date}_{Tcategory}.pth")
        train_logger.info(f"LOADING LAST CHECKPOINTS:{last_weight_path}")
        base_checkpoints = torch.load(last_weight_path)
        train_logger.info("SUCCESS")
        torch.save(base_checkpoints, weight_path)

        state_dict = base_checkpoints['state_dict']
        model.load_state_dict(state_dict)
        optimizer.load_state_dict(base_checkpoints['optim_state'])

        maex, min_maer = check_accuracy_part(val_generator, model)
        min_maer += 0.02
        message = f'LAST MODEL EVAL: mae_x = {maex:.2f} mae_r = {min_maer:.2f} '
        train_logger.info(message)

    if len(device_ids) > 1:
        train_logger.info("Parallel training")
        model = torch.nn.DataParallel(model, device_ids=device_ids)
        model_state_dict = model.module.state_dict()

    for epoch in range(maxepoch):
        for data in train_generator:
            if data is None:
                continue
            inputData, targetData = data

            model.train()  # put model to training mode
            inputData_cuda = inputData.to(device=device, dtype=dtype)  # move to device, e.g. GPU
            targetData_cuda = targetData.to(device=device, dtype=dtype)

            outData_cuda = model(inputData_cuda)
            loss = F.l1_loss(outData_cuda.squeeze(), targetData_cuda.squeeze())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # verification at epoch end
        mae_x, mae_r = check_accuracy_part(val_generator, model)

        val_message = f'Epoch {epoch:d}/{maxepoch:d} mae_x = {mae_x:.4f} mae_r = {mae_r:.4f} '
        train_logger.info(val_message)

        if mae_r < min(min_maer, mae_x):
            train_logger.info(f"save weights, mae: {min_maer:.4f} -> {mae_r:.4f}")
            min_maer = mae_r
            checkpoints = {
                'state_dict': model_state_dict,
                'optim_state': optimizer.state_dict(),
            }
            torch.save(checkpoints, weight_path)


def net_train():
    train_logger.info("Making Dataset...")
    dataset = Datasets(dataEndTime=currentTime)
    dataset.data_split()
    dataset.build_cldas()
    train_logger.info("Making Dataset Done")

    for Tcategory in Tcategorys:
        localtime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        head_Msg = f"{localtime} Tem-Class：{Tcategory} MaxEpoch: {maxepoch} "
        train_logger.info(head_Msg)

        train_part(Tcategory=Tcategory)
        torch.cuda.empty_cache()

    train_logger.info(f"TRAIN COMPLETED!")
    if auto_update:
        config.set('model info', 'last_train_date', currentTime.strftime('%Y%m%d'))
        with open(cfg_file, "w+") as f:
            config.write(f)
        train_logger.info(f"MODEL UPDATED TO:{currentTime.strftime('%Y%m%d')}")


if __name__ == "__main__":
    run = True
    try:
        me = SingleInstance()
        train_logger.info("ReviseNet Train start...")

    except:
        train_logger.warning('ReviseNet Train Process already  exist! ')
        run = False
    if run:
        net_train()
