#coding=utf8
from __future__ import division
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold,train_test_split
k_folds = 7
# test_size = 0.15 # val size when training model on a fold

# #生成k-fold data
# mx_trainlst = pd.read_csv('../data/trainall_train.lst',header=None,names=['idx','mxlabel','img_path'],sep='\t')
#
#
# skf = StratifiedKFold(n_splits= k_folds, shuffle=True, random_state=37)
# if not os.path.exists('{0}_fold_data'.format(k_folds)):
#     os.mkdir('{0}_fold_data'.format(k_folds))
#
# dataset = []
# for i,(train_idx, test_idx) in enumerate(skf.split(mx_trainlst[['idx','img_path']], mx_trainlst['mxlabel'])):
#     dataset = mx_trainlst.loc[train_idx,:]
#     dataset.index = range(dataset.shape[0])
#
#     trainpart, valpart = train_test_split(dataset, test_size=test_size, stratify=dataset['mxlabel'])
#     trainpart.to_csv('{0}_fold_data/trainall_train{1}.lst'.format(k_folds, i),
#                      index=False, sep='\t', header=None)
#     valpart.to_csv('{0}_fold_data/trainall_val{1}.lst'.format(k_folds, i),
#                      index=False, sep='\t', header=None)
#
#     mx_trainlst.loc[test_idx,:].to_csv('{0}_fold_data/trainall_test{1}.lst'.format(k_folds,i),
#                                        index=False,sep='\t',header=None)
#     print('fold-{0},train_len:{1},val_len:{2},test_len:{3}'\
#           .format(i, len(trainpart), len(valpart),len(test_idx)))


# # run the folowing .sh file in terminal(in 'k_fold_data' folder)
# with open('{0}_fold_data/gen_rec.sh'.format(k_folds),'a') as file:
#     for i in range(k_folds):
#         print i
#         file.write('python ~/anaconda2/lib/python2.7/site-packages/mxnet/tools/im2rec.py '
#                    '--resize 512 trainall_train{0} /media/hszc/data/BaiduImage/dataset/trainall/\n'.format(i))
#         file.write('python ~/anaconda2/lib/python2.7/site-packages/mxnet/tools/im2rec.py '
#                    '--resize 512 trainall_val{0} /media/hszc/data/BaiduImage/dataset/trainall/\n'.format(i))

#

import re
import mxnet as mx
import os, urllib ,sys
import cv2
import numpy as np
import pandas as pd
import logging
sys.path.append('../utils/')
from mxnet_helper import download_model , get_fine_tune_model, get_iterators,\
                        get_lr_scheduler

data_shape = (3,320,320)  # 喂进模型的数据大小
valresize = 360

num_classes = 100
batch_per_gpu = 16
isgpu = True
num_gpus = 2
batch_size = batch_per_gpu * num_gpus
modelname = 'resnext-101'
modelfolder = '/media/hszc/model/zhangchi/BaiduImage_model/'

# k fold training
for fold_idx in range(k_folds):
    if fold_idx<=1:continue

    sym, arg_params, aux_params = mx.model.load_checkpoint('{0}/{1}/ori_model/{1}'.format(modelfolder,modelname), 0)
    (new_sym, new_args) = get_fine_tune_model(sym, arg_params, num_classes,'flatten0')
    fixed_param_names = None

    modedir = os.path.join(modelfolder,modelname ,'fold_{0}_finetune'.format(fold_idx))
    if not os.path.exists(modedir):
        os.mkdir(modedir)

    # logging
    logger = logging.getLogger('mylogger')
    head = '%(asctime)-15s %(message)s'

    logfilepath = '{0}/trainlog.log'.format(modedir)
    logging.basicConfig(filename =logfilepath, level=logging.DEBUG, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    # prepare iterators
    print('preparing image iters...')
    train_rec_path = '{0}_fold_data/trainall_train{1}.rec'.format(k_folds,fold_idx)
    val_rec_path = '{0}_fold_data/trainall_val{1}.rec'.format(k_folds,fold_idx)
    train_idx_path = '{0}_fold_data/trainall_train{1}.idx'.format(k_folds,fold_idx)
    val_idx_path = '{0}_fold_data/trainall_val{1}.idx'.format(k_folds,fold_idx)
    (train, val) = get_iterators(train_rec_path, train_idx_path,
                       val_rec_path , val_idx_path, batch_size,data_shape,valresize)


    ############
    # training #
    ############
    begin_epoch=0
    ini_lr = 0.01
    lr_factor = 0.1
    step_epochs = [10,20]   # 在这些step时将ini_lr 降低到lr_factor，即新的学习率lr = ini_lr×lr_factor
    num_examples = 14963
    lr , lr_scheduler = get_lr_scheduler(begin_epoch,
                                         ini_lr,
                                         lr_factor,
                                         step_epochs,
                                         num_examples ,
                                         batch_size)

    optimizer_params = {
                'learning_rate': lr,   # begin_epoch 对应的学习率，begin_epoch不一定为0
                'momentum' : 0.0,
                'wd' : 0,
                'lr_scheduler': lr_scheduler}



    checkpoint = mx.callback.do_checkpoint('{0}/{1}'.format(modedir,modelname),2) #每两个epoch存一个模型
    speedometer = mx.callback.Speedometer(batch_size, 10)

    batchcallbacks = speedometer
    epochcallbacks = checkpoint

    if isgpu:
        devs = [mx.gpu(i) for i in range(num_gpus)]
    else:
        devs = mx.cpu()

    mod = mx.mod.Module(symbol=new_sym, context=devs,fixed_param_names = fixed_param_names)
    mod.fit(train, val,
            begin_epoch = begin_epoch,
            num_epoch=30,
            arg_params=new_args,
            aux_params=aux_params,
            allow_missing=True,
            batch_end_callback=batchcallbacks,
            epoch_end_callback=epochcallbacks,
            kvstore='device',
            optimizer='sgd',
            optimizer_params=optimizer_params,
            initializer=mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
            eval_metric='acc')





