# coding=utf8
from __future__ import print_function, division
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import argparse
from torch.autograd import Variable
import torch.utils.data as torchdata
from models.ssd.config import v2

from models.ssd.layers import MultiBoxLoss
from models.ssd.ssd import build_ssd
import numpy as np
import time
from torch.optim import lr_scheduler
import logging
from utils.training import train, trainlog
from GLSdata.GLSdataset import GLSdata, collate_fn,collate_fn2
import cPickle

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')
parser.add_argument('--img_root', default='/media/hszc/data1/glass_data/patches/mask_no_crayon/imgs', help='Image folder for training')
parser.add_argument('--anno_path_train', default='/media/hszc/data1/glass_data/patches/mask_no_crayon/train_annos.pkl',
                    help='csv anno path')
parser.add_argument('--anno_path_val', default='/media/hszc/data1/glass_data/patches/mask_no_crayon/val_annos.pkl',
                    help='csv anno path')
parser.add_argument('--batch_size', default=16, type=int, help='Batch size for training')
parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')
parser.add_argument('--save_dir', default='./saved_weights/test1', help='Location to save checkpoint models')
parser.add_argument('--num_workers', default=4, type=int, help='num_workers for preprocessing image')
parser.add_argument('--resume', default='./saved_weights/test1/bestweights-[0.6569].pth', type=str, help='Resume from checkpoint')
parser.add_argument('--start_iter', default=0, type=int,
                    help='Begin counting iterations starting from this value (should be used with resume)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float, help='initial learning rate')
args = parser.parse_args()




if args.cuda and torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
    torch.set_default_tensor_type('torch.FloatTensor')

cfg = v2

means = (104, 117, 123)  # only support voc now
num_classes = 2
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9

ssd_net = build_ssd('train', 300, num_classes)  # return 'nn.Module', forward return [loc pred, conf pred ,prior]
net = ssd_net

print(ssd_net)
# infer shape
ts1 = torch.autograd.Variable(torch.Tensor(1,3,300,300))
out1 = ts1

out = ssd_net(out1)
# print(out[0])   # bs, num_anchors, 4  location
# print(out[1])  # bs, num_anchors, 2   class proba
# print(out[2])  # num_anchors, 4    anchor location
# #
# ts2 = torch.autograd.Variable(torch.Tensor(1,3,600,600))
# out2 = ts2
#


net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True  # set to False when inputsize changes

if args.resume:
    print('Resuming training, loading {}...'.format(args.resume))
    net.load_state_dict(torch.load(args.resume, map_location=lambda storage, loc: storage))
    print('finish loading')

net = net.cuda()


def xavier(param):
    init.xavier_uniform(param)


def weights_init(m):
    if isinstance(m, nn.Conv2d):
        xavier(m.weight.data)
        m.bias.data.zero_()


if not args.resume:
    print('Initializing weights...')
    # initialize newly added layers' weights with xavier method
    ssd_net.extras.apply(weights_init)
    ssd_net.loc.apply(weights_init)
    ssd_net.conf.apply(weights_init)

annos = cPickle.load(open(args.anno_path_train))


data_set = {'train': GLSdata(annos, transforms=None),
           'val': GLSdata(annos, transforms=None)}

data_loader = {'train': torchdata.DataLoader(data_set['train'], batch_size, num_workers=args.num_workers,
                                             shuffle=True, collate_fn=collate_fn, pin_memory=True),
               'val': torchdata.DataLoader(data_set['val'], batch_size, num_workers=args.num_workers,
                                           shuffle=False, collate_fn=collate_fn2, pin_memory=True)}

optimizer = optim.SGD(net.parameters(), lr=args.lr,
                      momentum=0.9, weight_decay=5e-4)
criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False, args.cuda)

exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=400, gamma=0.1)
save_inter = 1

if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)

logfile = args.save_dir + 'trainlog.log'

if __name__ == '__main__':
    trainlog(logfile)


    train(model=net,
          epoch_num=800,
          start_epoch=0,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=data_loader,
          save_dir=args.save_dir,
          print_inter=10,
          val_inter=1,
          )
