# -*- coding: utf-8 -*-

from __future__ import print_function, division

import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import scipy.io
# finetune 
transform_train_list = [
        # #transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
        # transforms.Resize((288,144), interpolation=3),
        # transforms.RandomCrop((256,128)),
        transforms.Resize((256,128), interpolation=3),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]

use_gpu = True

def finetune_model(model, label_base, finetune_dir):
    # if opt.PCB:
        # print('not support PCB yet')
        # return
    # online_persons_parent_dir = './online_persons/'
    finetune_dataloader = datasets.ImageFolder(finetune_dir, transforms.Compose( transform_train_list))

    criterion = nn.CrossEntropyLoss()
    ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
    base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
    finetune_rate_scale = 0.01
    optimizer = optim.SGD([
             {'params': base_params, 'lr': 0.01 * finetune_rate_scale},
             {'params': model.model.fc.parameters(), 'lr': 0.1 * finetune_rate_scale},
             {'params': model.classifier.parameters(), 'lr': 0.1 * finetune_rate_scale}
         ], weight_decay=5e-4, momentum=0.9, nesterov=True)    
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
    __finetune_dataloader = torch.utils.data.DataLoader(finetune_dataloader, batch_size=4, #todo magic
                                             shuffle=False, num_workers=4)
    market_dataset = datasets.ImageFolder('./dataset_market/train_v1/', transforms.Compose(transform_train_list))
    _market_dataloader =torch.utils.data.DataLoader(market_dataset, batch_size=4, shuffle=False, num_workers=4)

    exp_lr_scheduler.step()
    model.train(True)
    num_epochs = 10
    _dataloaders = {'finetune': __finetune_dataloader,'market': _market_dataloader}
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        exp_lr_scheduler.step()
        for k,_dataloader in _dataloaders.items():
            print('dataloader: {}'.format(k))
            for data in _dataloader:
                inputs, labels = data
                # if torch.gt(labels, 19).any():
                #     print('only support 20 extra label for now.')
                #     continue
                if _dataloader == __finetune_dataloader:
                    labels = label_base -1 - labels  # labels.add(label_base-20)
                if labels.shape[0] == 1:
                    print('bug, can\'t be 1D tensor. ')
                    print(labels.shape)
                    print(inputs.shape)                    
                    continue
                if use_gpu:
                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)
                # print(preds)
                # loss = criterion(outputs, preds)
                loss.backward()
                optimizer.step()
    return model