"""
    train

Author: Zhengwei Li
Date  : 2018/12/24
"""
import os
import numpy as np
import argparse
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import time
from datetime import datetime
from data import dataset
from model import network

PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


def get_args():
    # Training settings
    parser = argparse.ArgumentParser(description='Fast portrait matting !')
    parser.add_argument('--dataDir', default='{}/data/datasets_debug'.format(PROJECT_ROOT), help='dataset directory')
    parser.add_argument('--saveDir', default='{}/data/ckpt'.format(PROJECT_ROOT), help='model save dir')
    parser.add_argument('--trainData', default='human_matting_data', help='train dataset name')

    parser.add_argument('--load', default='human_matting', help='save model dir')
    parser.add_argument('--finetuning', action='store_true', default=False, help='finetuning the training')

    parser.add_argument('--nThreads', type=int, default=0, help='number of threads for data loading')
    parser.add_argument('--train_batch', type=int, default=2, help='input batch size for train')
    parser.add_argument('--patch_size', type=int, default=320, help='patch size for train')
    parser.add_argument('--lr', type=float, default=1e-2, help='learning rate')
    parser.add_argument('--lrDecay', type=int, default=100)
    parser.add_argument('--lrdecayType', default='keep')
    parser.add_argument('--nEpochs', type=int, default=4, help='number of epochs to train')
    parser.add_argument('--save_epoch', type=int, default=1, help='number of epochs to save model')

    parser.add_argument('--train_phase', default='pre_train_t_net', help='train phase')

    args = parser.parse_args()
    print(args)
    return args


class TrainLog:
    """

    日志和模型保存目录：
        ../data/ckpt/human_matting
        pre_train_t_net
            ckpt_best.pth
            ckpt_lastest.pth
            model_obj_best.pth
            model_obj_lastest.pth
            log_one.txt
            log_best.txt
        end_to_end
            ckpt_best.pth
            ckpt_lastest.pth
            model_obj_best.pth
            model_obj_lastest.pth
            log_one.txt
            log_best.txt
        ckpt_best.pth
        ckpt_lastest.pth
        model_obj_best.pth
        model_obj_lastest.pth
        log_one.txt
        log_best.txt
    """

    def __init__(self, args):
        self.args = args

        # 创建模型保存目录
        self.save_dir = os.path.join(args.saveDir, args.load)  # ../data/ckpt/human_matting
        self.safe_makedirs(self.save_dir)

        self.save_pre_train_t_net = os.path.join(self.save_dir, 'pre_train_t_net')
        self.safe_makedirs(self.save_pre_train_t_net)

        self.save_end_to_end = os.path.join(self.save_dir, 'end_to_end')
        self.safe_makedirs(self.save_end_to_end)

        # 创建日志保存文件
        self.logFile_one = self.create_logfile(self.save_dir, 'one')
        self.logFile_one_pre = self.create_logfile(self.save_pre_train_t_net, 'one')
        self.logFile_one_end = self.create_logfile(self.save_end_to_end, 'one')

        self.logFile_best = self.create_logfile(self.save_dir, 'best')
        self.logFile_best_pre = self.create_logfile(self.save_pre_train_t_net, 'best')
        self.logFile_best_end = self.create_logfile(self.save_end_to_end, 'best')

    @staticmethod
    def create_logfile(path_dir, suffix_name):
        file_name = '{}/log_{}.txt'.format(path_dir, suffix_name)
        return open(file_name, 'a') if os.path.exists(file_name) else open(file_name, 'w')

    @staticmethod
    def safe_makedirs(path_dir):
        if not os.path.exists(path_dir):
            os.makedirs(path_dir)

    def __save_model(self, model, epoch, suffix_name='lastest'):
        lastest_out_path = "{}/ckpt_{}.pth".format(self.save_dir, suffix_name)
        model_out_path = "{}/model_obj_{}.pth".format(self.save_dir, suffix_name)
        torch.save({'epoch': epoch, 'state_dict': model.state_dict(), }, lastest_out_path)
        torch.save(model, model_out_path)

        if self.args.train_phase == 'pre_train_t_net':
            lastest_out_path = "{}/ckpt_{}.pth".format(self.save_pre_train_t_net, suffix_name)
            model_out_path = "{}/model_obj_{}.pth".format(self.save_pre_train_t_net, suffix_name)
        if self.args.train_phase == 'end_to_end':
            lastest_out_path = "{}/ckpt_{}.pth".format(self.save_end_to_end, suffix_name)
            model_out_path = "{}/model_obj_{}.pth".format(self.save_end_to_end, suffix_name)
        torch.save({'epoch': epoch, 'state_dict': model.state_dict(), }, lastest_out_path)
        torch.save(model, model_out_path)

    def save_model(self, model, epoch, mode='one'):
        if mode == 'one':
            self.__save_model(model, epoch, suffix_name='lastest')
        else:
            self.__save_model(model, epoch, suffix_name='best')

    def load_model(self, model):
        """
        默认加载当前目录预训练文件
        """
        lastest_out_path = "{}/ckpt_lastest.pth".format(self.save_dir)
        ckpt = torch.load(lastest_out_path)
        start_epoch = ckpt['epoch']
        model.load_state_dict(ckpt['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(lastest_out_path, ckpt['epoch']))

        return start_epoch, model

    def save_log(self, log, mode='one'):
        if mode == 'one':
            self.logFile_one.write(log + '\n')
            if self.args.train_phase == 'pre_train_t_net':
                self.logFile_one_pre.write(log + '\n')
            if self.args.train_phase == 'end_to_end':
                self.logFile_one_end.write(log + '\n')
        else:
            self.logFile_best.write(log + '\n')
            if self.args.train_phase == 'pre_train_t_net':
                self.logFile_best_pre.write(log + '\n')
            if self.args.train_phase == 'end_to_end':
                self.logFile_best_end.write(log + '\n')


def set_lr(args, epoch, optimizer):
    lrDecay = args.lrDecay
    decayType = args.lrdecayType
    if decayType == 'keep':
        lr = args.lr
    elif decayType == 'step':
        epoch_iter = (epoch + 1) // lrDecay
        lr = args.lr / 2 ** epoch_iter
    elif decayType == 'exp':
        k = math.log(2) / lrDecay
        lr = args.lr * math.exp(-k * epoch)
    elif decayType == 'poly':
        lr = args.lr * math.pow((1 - epoch / args.nEpochs), 0.9)

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    return lr


def loss_function(args, img, trimap_pre, trimap_gt, alpha_pre, alpha_gt):
    # -------------------------------------
    # classification loss L_t
    # ------------------------
    # Cross Entropy
    # criterion = nn.BCELoss()
    # trimap_pre = trimap_pre.contiguous().view(-1)
    # trimap_gt = trimap_gt.view(-1)
    # L_t = criterion(trimap_pre, trimap_gt)

    criterion = nn.CrossEntropyLoss()
    L_t = criterion(trimap_pre, trimap_gt[:, 0, :, :].long())

    # -------------------------------------
    # prediction loss L_p
    # ------------------------
    eps = 1e-6
    # l_alpha
    L_alpha = torch.sqrt(torch.pow(alpha_pre - alpha_gt, 2.) + eps).mean()

    # L_composition
    fg = torch.cat((alpha_gt, alpha_gt, alpha_gt), 1) * img
    fg_pre = torch.cat((alpha_pre, alpha_pre, alpha_pre), 1) * img

    L_composition = torch.sqrt(torch.pow(fg - fg_pre, 2.) + eps).mean()

    L_p = 0.5 * L_alpha + 0.5 * L_composition

    # train_phase
    if args.train_phase == 'pre_train_t_net':
        loss = L_t
    if args.train_phase == 'end_to_end':
        loss = L_p + 0.01 * L_t

    return loss, L_alpha, L_composition, L_t


def mse(img1, img2):
    mse = np.mean((img1 - img2) ** 2)
    return mse


def run_train(args):
    print("============> Environment init ...")
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        print("No GPU is is available ")
        device = torch.device('cpu')

    print("============> Building model ...")
    model = network.net()
    model.to(device)

    print("============> Loading datasets ...")
    dataloader_dict = dict()
    # 加载训练数据
    train_data = getattr(dataset, args.trainData)(root_dir=args.dataDir,
                                                  imglist='{}/train/train.txt'.format(args.dataDir),
                                                  patch_size=args.patch_size, train_mode='train')
    trainloader = DataLoader(train_data, batch_size=args.train_batch, drop_last=True,
                             shuffle=True, num_workers=args.nThreads, pin_memory=True)
    dataloader_dict.update({'train': trainloader})

    # 加载验证数据
    eval_data = getattr(dataset, args.trainData)(root_dir=args.dataDir,
                                                 imglist='{}/eval/eval.txt'.format(args.dataDir),
                                                 patch_size=args.patch_size, train_mode='eval')
    evalloader = DataLoader(eval_data, batch_size=args.train_batch, drop_last=True,
                            shuffle=True, num_workers=args.nThreads, pin_memory=True)
    dataloader_dict.update({'eval': evalloader})

    print("============> Set optimizer ...")
    lr = args.lr
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                           lr=lr, betas=(0.9, 0.999), weight_decay=0.0005)

    print("============> Start Train ...")
    start_epoch = 1
    trainlog = TrainLog(args)
    if args.finetuning:
        start_epoch, model = trainlog.load_model(model)
        if args.train_phase == 'end_to_end':
            start_epoch = 1

    best_sad = 0
    best_mse = 0
    for epoch in range(start_epoch, args.nEpochs + 1):

        loss_ = 0
        L_alpha_ = 0
        L_composition_ = 0
        L_cross_ = 0
        sad_ = 0
        mse_ = 0
        if args.lrdecayType != 'keep':
            lr = set_lr(args, epoch, optimizer)

        t0 = time.time()

        # 每个epoch都有一个训练和验证阶段
        for phase in dataloader_dict.keys():
            if phase == 'train':
                model.train()
            else:
                model.eval()

            for i, sample_batched in enumerate(dataloader_dict[phase]):
                img, trimap_gt, alpha_gt = sample_batched['image'], sample_batched['trimap'], sample_batched['alpha']
                img, trimap_gt, alpha_gt = img.to(device), trimap_gt.to(device), alpha_gt.to(device)

                optimizer.zero_grad()
                with torch.set_grad_enabled(phase == 'train'):
                    trimap_pre, alpha_pre = model(img)
                    loss, L_alpha, L_composition, L_cross = loss_function(args, img, trimap_pre, trimap_gt,
                                                                          alpha_pre, alpha_gt)
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                loss_ += loss.item()
                L_alpha_ += L_alpha.item()
                L_composition_ += L_composition.item()
                L_cross_ += L_cross.item()

                # 客观指标
                if args.train_phase == 'pre_train_t_net':
                    mse_ += torch.mean(torch.pow(trimap_pre - trimap_gt, 2))
                if args.train_phase == 'end_to_end':
                    alpha_gt = torch.cat((alpha_gt, alpha_gt, alpha_gt), 1) * img
                    alpha_pre = torch.cat((alpha_pre, alpha_pre, alpha_pre), 1) * img
                    sad_ += ((alpha_pre - alpha_gt).float()).abs().sum().item() / 1000

            t1 = time.time()

            if epoch % args.save_epoch == 0:
                speed = t1 - t0
                loss_ = loss_ / (i + 1)
                L_alpha_ = L_alpha_ / (i + 1)
                L_composition_ = L_composition_ / (i + 1)
                L_cross_ = L_cross_ / (i + 1)

                current_time = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
                log = "{} {} [{}/{}]\tstage: {}\tspeed: {:.5f}s\tLr: {:.5f}\t" \
                      "loss: {:.5f}\tloss_p: {:.5f}\tloss_t: {:.5f}\t" \
                    .format(current_time, phase, epoch, args.nEpochs, args.train_phase, speed,
                            lr, loss_, L_alpha_ + L_composition_, L_cross_)

                if args.train_phase == 'pre_train_t_net':
                    mse_ = mse_ / (i + 1)
                    log = '{}mse: {:.5f}\t'.format(log, mse_)
                    if epoch == start_epoch:
                        best_mse = mse_
                    if mse_ <= best_mse:  # 保存指标最好的模型文件
                        trainlog.save_log(log, mode='best')
                        trainlog.save_model(model, epoch, mode='best')

                if args.train_phase == 'end_to_end':
                    sad_ = sad_ / (i + 1)
                    log = '{}sad: {:.5f}\t'.format(log, sad_)
                    if epoch == start_epoch:
                        best_sad = sad_
                    if sad_ <= best_sad:  # 保存指标最好的模型文件
                        trainlog.save_log(log, mode='best')
                        trainlog.save_model(model, epoch, mode='best')

                print(log)
                trainlog.save_log(log)
                trainlog.save_model(model, epoch)


if __name__ == "__main__":
    run_train(get_args())
