import sys
import os
import time
from optparse import OptionParser
from torch.autograd import Variable
import torchvision
import numpy as np
from data.dataset import Data
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch import optim
import cv2
from torchvision import transforms
from eval import eval_net
from unet import Unet as UNet
from utils import get_ids, split_ids, split_train_val, get_imgs_and_masks, batch
from visdom import Visdom  # 可视化处理模块
from torch import functional as F

# 可视化app
viz = Visdom()

# 超参数
crop_size = 312   # 切片大小
lr = 0.1
gpu = True,
dir_img = './data/train/'
dir_checkpoint = './checkpoints/'
data_lst = 'train.txt'
batch_size = 1


def cross_entropy_loss2d(inputs, targets, cuda=True, balance=1.1):
    """
    :param inputs: inputs is a 4 dimensional data1 nx1xhxw
    :param targets: targets is a 3 dimensional data1 nx1xhxw
    :return:
    """
    # print(inputs.size())
    # print(targets.size())
    n, c, h, w = inputs.size()
    weights = np.zeros((n, c, h, w))
    for i in range(n):
        t = targets[i, :, :, :].cpu().data.numpy()
        pos = (t == 1).sum()
        neg = (t == 0).sum()
        valid = neg + pos
        weights[i, t == 1] = neg * 1. / valid
        weights[i, t == 0] = pos * balance / valid
    weights = torch.Tensor(weights)
    if cuda:
        weights = weights.cuda()
    loss = nn.BCELoss(weights, size_average=False)(inputs, targets)
    return loss


def train_net(net):
    train_augmentation = torchvision.transforms.Compose([
        torchvision.transforms.RandomHorizontalFlip()])
    train_img = Data(dir_img, data_lst, yita=0.6, crop_size=crop_size)
    trainloader = torch.utils.data.DataLoader(train_img, batch_size=batch_size, shuffle=True)
    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0002)

    criterion = nn.BCELoss()
    cur = 0
    data_iter = iter(trainloader)
    iter_per_epoch = len(trainloader)
    if gpu:
        net.cuda()
    net.train()

    start_step = 1
    start_time = time.time()
    for step in range(start_step, 10000):
        optimizer.zero_grad()
        epoch_loss = 0
        for i in range(batch_size):
            if cur == iter_per_epoch:
                cur = 0
                data_iter = iter(trainloader)
            images, true_masks = next(data_iter)
            if gpu:
                images = images.cuda()
                true_masks = true_masks.cuda()
            images, true_masks = Variable(images), Variable(true_masks)
            masks_pred = net(images)
            # masks_probs_flat = masks_pred.view(-1)
            # true_masks_flat = true_masks.view(-1)
            # loss
            # loss = cross_entropy_loss2d(masks_pred, true_masks)
            loss = criterion(masks_pred,true_masks)
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            cur += 1
        viz.line([epoch_loss], [step], win='loss', opts=dict(title='train loss' ) ,update=["append"])
        print('%d Step finished ! Time cost %d s ! Loss: %f' % (step, time.time()-start_time, epoch_loss))
        start_time = time.time()
        if step % 2000 == 0:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(step/2000))
            print('Checkpoint {} saved !'.format(step/2000))



def get_args():
    parser = OptionParser()
    parser.add_option('-e', '--epochs', dest='epochs', default=10, type='int',
                      help='number of epochs')
    parser.add_option('-b', '--batch-size', dest='batchsize', default=10,
                      type='int', help='batch size')
    parser.add_option('-l', '--learning-rate', dest='lr', default=0.1,
                      type='float', help='learning rate')
    parser.add_option('-g', '--gpu', action='store_true', dest='gpu',
                      default=False, help='use cuda')
    parser.add_option('-c', '--load', dest='load',
                      default=False, help='load file model')
    parser.add_option('-s', '--scale', dest='scale', type='float',
                      default=0.5, help='downscaling factor of the images')
    parser.add_option('--max-iter', type=int, default=40000,
                         help='max iters to train_mask network, default is 40000' )
    parser.add_option('--iter-size', type=int, default=10,
                         help='iter size equal to the batch size, default 10' )
    parser.add_option('--average-loss', type=int, default=50,
                         help='smoothed loss, default is 50' )
    parser.add_option('--step-size', type=int, default=10000,
                         help='the number of iters to decrease the learning rate, default is 10000' )

    (options, args) = parser.parse_args()
    return options

if __name__ == '__main__':
    args = get_args()

    net = UNet()

    if args.load:
        net.load_state_dict(torch.load(args.load))
        print('Model loaded from {}'.format(args.load))

    if args.gpu:
        net.cuda()
        # cudnn.benchmark = True # faster convolutions, but more memory

    try:
        train_net(net=net)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), 'INTERRUPTED.pth')
        print('Saved interrupt')
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
