#coding=utf-8
from __future__ import division
import re
import mxnet as mx
import os, urllib ,sys
import cv2
import numpy as np
import pandas as pd
import logging
sys.path.append('../utils/')
# from mxnet_helper import config_logging,download_model , get_fine_tune_model, get_iterators,\
#                         get_lr_scheduler
from mxnet_helper import get_fine_tune_model,download_model,get_iterators,get_lr_scheduler,get_bilinear_model

data_shape = (3,320,320)  # 喂进模型的数据大小
valresize = 360

# configure
num_classes = 100
batch_per_gpu = 16
isgpu = True
num_gpus = 1
batch_size = batch_per_gpu* num_gpus
modelname = 'resnext-101'
mode = 'finetune'  #'finetune', 'last_fc_only', 'finetune_lastfc'
modelfolder = '/media/hszc/model/zhangchi/BaiduImage_model/'
######################
# downing ori models #
######################
# download_model('http://data.mxnet.io/models/imagenet-11k/resnet-152/resnet-152',0,modelfolder,modelname)
# download_model('http://data.mxnet.io/models/imagenet/resnet/50-layers/resnet-50', 0,modelfolder,modelname)
# download_model('http://data.mxnet.io/models/imagenet/vgg/vgg16', 0,modelfolder,modelname)
print('loading model...')
sym, arg_params, aux_params = mx.model.load_checkpoint('{0}/{1}/ori_model/{1}'.format(modelfolder,modelname), 0)



###########################
# prepare finetune models #
###########################
if mode == 'finetune':
    (new_sym, new_args) = get_fine_tune_model(sym, arg_params, num_classes,'flatten0')
    fixed_param_names = None
    modedir = os.path.join(modelfolder,modelname ,'finetuned_model')
elif mode == 'last_fc_only':
    (new_sym, new_args) = get_fine_tune_model(sym, arg_params, num_classes)
    modedir = os.path.join(modelfolder, modelname,'lastfc_model')
    layers_to_tune = 'fc1'
    re_prog = re.compile(layers_to_tune)
    fixed_param_names = [name for name in new_sym.list_arguments() if not re_prog.match(name)]
    para_to_tune = [name for name in new_sym.list_arguments() if re_prog.match(name)]
    print('fixed_param_names:\n',fixed_param_names)
    print('para_to_tune:\n',para_to_tune)

elif mode == 'finetune_lastfc':
    new_sym, new_args, aux_params = mx.model.load_checkpoint('{0}/{1}/lastfc_model/{1}'.format(modelfolder,modelname), 127)
    fixed_param_names = None
    modedir = os.path.join(modelfolder, modelname,'finetune_lastfc_model')

elif mode == 'bilinear_step1':
    new_sym, new_args, fixed_param_names = get_bilinear_model(sym, arg_params, num_classes,step=1)
    modedir = os.path.join(modelfolder,modelname ,'bilinear_step1_model')
elif mode == 'bilinear_step2':
    new_sym, arg_params, aux_params = mx.model.load_checkpoint('{0}/{1}/bilinear_step1_model/{1}'.format(modelfolder,modelname), 100)
    new_sym, new_args, fixed_param_names = get_bilinear_model(new_sym, arg_params, num_classes,step=2)
    modedir = os.path.join(modelfolder,modelname ,'bilinear_step2_model')
elif mode == 'bilinear_step3':
    new_sym, new_args, aux_params = mx.model.load_checkpoint('{0}/{1}/bilinear_step2_model/{1}'.format(modelfolder,modelname), 110)
    modedir = os.path.join(modelfolder,modelname ,'bilinear_step3_model')
    fixed_param_names = None
elif mode == 'continue':
    new_sym, new_args, aux_params = mx.model.load_checkpoint('{0}/{1}/bilinear_step1_model/{1}'.format(modelfolder, modelname), 99)
    _, _, fixed_param_names = get_bilinear_model(sym, arg_params, num_classes,step=1)
    modedir = os.path.join(modelfolder,modelname ,'bilinear_step1_model')
else:
    raise ValueError('train mode wrong.')

interals = new_sym.get_internals()
print interals
_, out_shapes, _ = interals.infer_shape(data=(10,3,320,320))
shape_dict = dict(zip(interals.list_outputs(), out_shapes))

# print('relu1:',shape_dict['relu1_output'])
# print('relu_1:',shape_dict['relu_1_output'])
# print('reshape:',shape_dict['reshape_output'])
# print('bilinear:',shape_dict['bilinear_output'])
# print('flatten:',shape_dict['flatten_output'])
# print('l2norm:',shape_dict['l2norm_output'])
# print('fc1:',shape_dict['fc1_output'])

if not os.path.exists(modedir):
    os.makedirs(modedir)
#
#
# logging
logger = logging.getLogger('mylogger')
head = '%(asctime)-15s %(message)s'

logfilepath = '{0}/trainlog.log'.format(modedir)
logging.basicConfig(filename =logfilepath, level=logging.DEBUG, format=head)

console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(head)
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
#
#
# prepare iterators
print('preparing image iters...')
train_rec_path = '../data/trainall_train.rec'
val_rec_path = '../data/trainall_val.rec'
train_lst_path = '../data/trainall_train.lst'
train_idx_path = '../data/trainall_train.idx'
val_idx_path = '../data/trainall_val.idx'
(train, val) = get_iterators(train_rec_path, train_idx_path,
                   val_rec_path , val_idx_path, batch_size,data_shape,valresize)
#
############
# training #
############
begin_epoch=0
ini_lr = 0.01
lr_factor = 0.1
step_epochs = [10,20]   # 在这些step时将ini_lr 降低到lr_factor，即新的学习率lr = ini_lr×lr_factor
num_examples = 14963
lr , lr_scheduler = get_lr_scheduler(begin_epoch,
                                     ini_lr,
                                     lr_factor,
                                     step_epochs,
                                     num_examples ,
                                     batch_size)

optimizer_params = {
            'learning_rate': lr,   # begin_epoch 对应的学习率，begin_epoch不一定为0
            'momentum' : 0.0,
            'wd' : 0,
            'lr_scheduler': lr_scheduler}



checkpoint = mx.callback.do_checkpoint('{0}/{1}'.format(modedir,modelname),1) #每两个epoch存一个模型
speedometer = mx.callback.Speedometer(batch_size, 10)

batchcallbacks = speedometer
epochcallbacks = checkpoint

if isgpu:
    devs = [mx.gpu(i) for i in range(num_gpus)]
else:
    devs = mx.cpu()

mod = mx.mod.Module(symbol=new_sym, context=devs,fixed_param_names = fixed_param_names)
mod.fit(train, val,
        begin_epoch = begin_epoch,
        num_epoch=30,
        arg_params=new_args,
        aux_params=aux_params,
        allow_missing=True,
        batch_end_callback=batchcallbacks,
        epoch_end_callback=epochcallbacks,
        kvstore='device',
        optimizer='sgd',
        optimizer_params=optimizer_params,
        initializer=mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
        eval_metric='acc')
