# -*- coding: utf-8 -*-
from __future__ import print_function, division

## this code is used to train a baseline model of resnet50 on Duke
## it will be further inplemented with triplet loss soon!!! 
## there are two different ways to train our model, one is to use all the training samples, 
## another is to use only part of them and the rest of them for val

import argparse
import torch
import torch.nn as nn
import torch.optim as optim 
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
from PIL import Image
import time
import os
import json
from shutil import copyfile
from model import ft_net
from market import Market
from preprocessor import Preprocessor

version = torch.__version__
 
## set default parameters

parser = argparse.ArgumentParser(description='Training')

parser.add_argument('--gpu_ids', default='1', type=str)
parser.add_argument('--data_dir', default='./market_dataset', type=str)
parser.add_argument('--batch_size', default=16, type=int)

opt = parser.parse_args()

data_dir = opt.data_dir

str_ids = opt.gpu_ids.split(',') 

gpu_ids = []
for str_id in str_ids:
    gid = int(str_id)
    if gid >= 0:
        gpu_ids.append(gid)
if len(gpu_ids)>0:
    torch.cuda.set_device(gpu_ids[0])


## load the data and do some data augmentation

transform_train_list=[
    transforms.Resize((288,144), interpolation=3),
    transforms.RandomCrop((256,128)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]

transform_test_list=[
    transforms.Resize(size=(256,128), interpolation=3),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])    
]

data_transforms = {
    'train': transforms.Compose(transform_train_list),
    'test': transforms.Compose(transform_test_list)
}
dataset = Market(data_dir)

image_datasets={}
image_datasets['train']=Preprocessor(dataset.train, root=os.path.join(dataset.images_dir, dataset.train_path),
                     transform=data_transforms['train'])

#datasets.ImageFolder(os.path.join(data_dir,'train'), data_transforms['train'])

#image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir,'train_all'),
                                                #data_transforms['train'])
#image_datasets['val'] =
dataloaders = {}
dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=opt.batch_size,
                                                shuffle=True, num_workers=8) 

dataset_size = len(image_datasets['train'])


def _parse_data( inputs):
    imgs, _, pids, _ = inputs
    inputs = imgs.cuda()
    pids = pids.cuda()
    return inputs, pids

use_gpu = torch.cuda.is_available()
since=time.time()

inputss =next(iter(dataloaders['train']))
inputs, classes = _parse_data(inputss)
print(time.time()-since)

## train the model

y_loss = {}
y_loss['train'] = []
y_err = {}
y_err['train'] =[]

def train_model(model, criterion_c, optimizer, scheduler, criterion_t=None, num_epochs=40):
    since=time.time()

    best_model_wts = model.state_dict()
    best_acc = 0.0
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch,num_epochs-1))
        print('-' * 10)
        scheduler.step()
        model.train(True)
        running_loss = 0.00
        running_corrects = 0.00
        for data in dataloaders['train']:
            inputs, labels =_parse_data(data) 
            now_batch_size, c, h, w = inputs.shape
            if now_batch_size == 1:
                continue
            if use_gpu:
                inputs = Variable(inputs.cuda())
                labels = Variable(labels.cuda())

            optimizer.zero_grad()

            outputs = model(inputs)

            _, preds = torch.max(outputs.data, 1)
            if not criterion_t:
                loss = criterion_c(outputs, labels)
            else:
                loss_c = criterion_c(outputs, labels)   
                loss_t = criterion_t()
                loss = criterion_c + 0.5*criterion_t  # we set beat as 0.5, which is inplemented by HHL
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            running_corrects += float(torch.sum(labels.data==preds))   
        epoch_loss = running_loss/dataset_size
        epoch_corrects = running_corrects/dataset_size   

        print('Training Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_corrects))

        y_loss['train'].append(epoch_loss)
        y_err['train'].append(1-epoch_corrects)

        last_model_wts = model.state_dict()
        if epoch % 10 ==9:
            save_network(model, epoch)
            
        draw_curve(epoch)

    time_elapsed = time.time()-since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60 ))

    model.load_state_dict(last_model_wts)
    save_network(model, 'last')


## draw Curve

plt.switch_backend('agg')
x_epoch=[]
fig = plt.figure()
ax0 = fig.add_subplot(121, title='loss')
ax1 = fig.add_subplot(122, title='top1err')
def draw_curve(current_epoch):
    x_epoch.append(current_epoch)
    ax0.plot(x_epoch, y_loss['train'], 'bo-', label='train')
    ax1.plot(x_epoch, y_err['train'], 'ro-', label='train')
    if current_epoch==0:
        ax0.legend()
        ax1.legend()
    fig.savefig(os.path.join('./model_duke_train.jpg'))

## save the model
def save_network(network, epoch_label):
    save_filename = 'net_%s.pth' %epoch_label
    save_path = os.path.join('./pcb_baseline/model_path',save_filename)
    torch.save(network.cpu().state_dict(), save_path)
    if torch.cuda.is_available:
        network.cuda(gpu_ids[0])


## load the pretrained model and reset the fully connected layer
model = ft_net(751)
if use_gpu:
    model = model.cuda()

criterion_c = nn.CrossEntropyLoss()
criterion_t = None

#criterion_t = TripletLoss(margin=0.3).to(device) ## we set margin as 0.3, which is inplemented by HHL

ignored_params = list(map(id, model.model.fc.parameters()))+list(map(id, model.classifier.parameters()))

base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())

optimizer_ft = optim.SGD([
    {'params': base_params, 'lr':0.01},
    {'params': model.model.fc.parameters(), 'lr': 0.1},
    {'params': model.classifier.parameters(), 'lr': 0.1}
], weight_decay=5e-4, momentum=0.9, nesterov=True)

exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1) # original code is 40


## main training code 
dir_name = os.path.join('./pcb_baseline/model_resnet')
if not os.path.isdir(dir_name):
    os.mkdir(dir_name)
    copyfile('./pcb_baseline/train_wareid.py', dir_name + '/train.py')
    copyfile('./pcb_baseline/model.py', dir_name + '/model.py')

with open('%s/opts.json'%dir_name,'w') as fp:
    json.dump(vars(opt), fp, indent=1)

model = train_model(model, criterion_c, optimizer_ft, exp_lr_scheduler, num_epochs=60)