#coding=utf8
from __future__ import division
from torch import nn
import torch
import torch.utils.data as torchdata
from torchvision import datasets,transforms
import os,time
import pandas as pd
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
from utils.train import trainlog,train,train_eval_epoch
from dataset.augment import FaceAug
from dataset.facedata import LFW_np,MS1M
from convert_weight.incep_res_v1_converter import tf_to_ptch
from models.inception_res_v1 import InceptionResnetV1
import numpy as np
import torchvision.models as models

from torch.optim.lr_scheduler import MultiStepLR
from models.auxloss import loose_r_loss


os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

x1 = torch.FloatTensor(10, 1, 112, 96)
x1 = torch.autograd.Variable(x1)
x2 = torch.FloatTensor(10, 1, 112, 96)
x2 = torch.autograd.Variable(x2)


model = InceptionResnetV1(num_classes=75322, image_channel=1)
model.eval()
pt_weights = model.state_dict()
tf_weights = np.load('./trained_weights.npy')[()]
converted_weights = tf_to_ptch(tf_weights, pt_weights)

model.load_state_dict(converted_weights)
model = torch.nn.DataParallel(model)
model = model.cuda()

usecuda = 2
batch_size = 32
save_inter = 1

#data
images = np.load('/home/hszc/zhangchi/channel-prune/data/npy_2/images.npy')
labels = np.load('/home/hszc/zhangchi/channel-prune/data/npy_2/labels.npy')

images = images.reshape(-1,2,112,96,1)
images = images.transpose(0,1,4,2,3)  #(6000,2,1,112,96)

data_set = {}
data_set['train'] = MS1M(root_path='/media/hszc/data1/face_data/ms1m/ms1m_aligned',
             list_csv='/home/hszc/zhangchi/channel-prune/train_list.csv',
             transform=FaceAug(tg_size=(112, 96)))

data_set['val'] = LFW_np(images,labels)

data_loader = {}
data_loader['train'] = torchdata.DataLoader(data_set['train'] ,batch_size=640, num_workers=4,
                                          shuffle=True, pin_memory=True)
data_loader['val'] = torchdata.DataLoader(data_set['val'] ,batch_size=64, num_workers=4,
                                          shuffle=False, pin_memory=True)


# crit, optm, lr
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=1e-5)
criterion = [nn.CrossEntropyLoss(), loose_r_loss()]
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[70,100],gamma=0.1)

# save, log
save_dir = './finetune_result/'
logfile = './finetune_result/trainlog.log'
trainlog(logfile)


for epoch in range(100):
    train_eval_epoch(model,
                        optimizer,
                        criterion,
                        epoch,
                        exp_lr_scheduler,
                        data_set,
                        data_loader,
                        usecuda,
                        save_inter,
                        save_dir
                        )