#coding=utf-8
from __future__ import division
import re
import mxnet as mx
import os, urllib ,sys
import cv2
import numpy as np
import pandas as pd
import logging
sys.path.append('../utils/')
from mxnet_helper import config_logging,download_model , get_fine_tune_model, get_iterators,\
                        get_lr_scheduler

data_shape = (3,320,320)  # 喂进模型的数据大小
valresize = 360

# configure
num_classes = 100
batch_per_gpu = 16
isgpu = True
num_gpus = 2
batch_size = batch_per_gpu* num_gpus
modelname = 'Resnet50'
mode = 'finetune'  #'finetune', 'last_fc_only', 'finetune_lastfc'
######################
# downing ori models #
######################
# download_model('http://data.mxnet.io/models/imagenet-11k/resnet-152/resnet-152',0,modelname)
download_model('http://data.mxnet.io/models/imagenet/resnet/50-layers/resnet-50', 0,modelname)
print('loading model...')
sym, arg_params, aux_params = mx.model.load_checkpoint('{0}/ori_model/{0}'.format(modelname), 0)
all_layers = sym.get_internals()
net = all_layers['relu1_output']    # [batchsize,2048,10,10] #320/32=10
net = mx.sym.reshape(net,shape=(0,0,-3),name='reshape')
net = mx.symbol.batch_dot(net,net,transpose_b=True,name='bilinear')
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc1')
new_sym = mx.symbol.SoftmaxOutput(data=net, name='softmax')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc1' not in k})

interals = new_sym.get_internals()
print interals
_, out_shapes, _ = interals.infer_shape(data=(10,3,640,320))
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
for item in shape_dict.items():
    # if 'relu1_output' in item[0]:
    #     print item
    if 'reshape' in item[0]:
        print item
    if 'bilinear' in item[0]:
        print item
    if 'fc1' in item[0]:
        print item

# print (new_sym.list_arguments())
# print len(new_sym.infer_shape(data=(10,3,320,320))[0])

modedir = 'test'
fixed_param_names = None



A = mx.nd.array(np.arange(1,13).reshape(2,2,3))
B = mx.nd.array(np.arange(13,25).reshape(2,3,2))

print A.asnumpy()
print B.asnumpy()

print mx.nd.batch_dot(A,B).asnumpy()

# ###########################
# # prepare finetune models #
# ###########################
# if mode == 'finetune':
#     (new_sym, new_args) = get_fine_tune_model(sym, arg_params, num_classes)
#     fixed_param_names = None
#     modedir = 'finetuned_model'
# elif mode == 'last_fc_only':
#     (new_sym, new_args) = get_fine_tune_model(sym, arg_params, num_classes)
#     modedir = 'lastfc_model'
#     layers_to_tune = 'fc1'
#     re_prog = re.compile(layers_to_tune)
#     fixed_param_names = [name for name in new_sym.list_arguments() if not re_prog.match(name)]
#     para_to_tune = [name for name in new_sym.list_arguments() if re_prog.match(name)]
#     print('fixed_param_names:\n',fixed_param_names)
#     print('para_to_tune:\n',para_to_tune)
# elif mode == 'finetune_lastfc':
#     new_sym, new_args, aux_params = mx.model.load_checkpoint('{0}/lastfc_model/{0}'.format(modelname), 127)
#     fixed_param_names = None
#     modedir = 'finetune_lastfc_model'
#
# else:
#     raise ValueError('train mode wrong.')
# if not os.path.exists('{0}/{1}'.format(modelname,modedir)):
#     os.mkdir('{0}/{1}'.format(modelname,modedir))
#
#
# # logging
# logger = logging.getLogger('mylogger')
# head = '%(asctime)-15s %(message)s'
#
# logfilepath = '{0}/{1}/damnlog.log'.format(modelname,modedir)
# logging.basicConfig(filename =logfilepath, level=logging.DEBUG, format=head)
#
# console = logging.StreamHandler()
# console.setLevel(logging.DEBUG)
# formatter = logging.Formatter(head)
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
#
#
# # prepare iterators
# print('preparing image iters...')
# train_rec_path = '../data/trainall_train.rec'
# val_rec_path = '../data/trainall_val.rec'
# train_lst_path = '../data/trainall_train.lst'
# train_idx_path = '../data/trainall_train.idx'
# val_idx_path = '../data/trainall_val.idx'
# (train, val) = get_iterators(train_rec_path, train_idx_path,
#                    val_rec_path , val_idx_path, batch_size,data_shape,valresize)
#
# ############
# # training #
# ############
# begin_epoch=0
# ini_lr = 0.01
# lr_factor = 0.1
# step_epochs = [10,20]   # 在这些step时将ini_lr 降低到lr_factor，即新的学习率lr = ini_lr×lr_factor
# num_examples = 14963
# lr , lr_scheduler = get_lr_scheduler(begin_epoch,
#                                      ini_lr,
#                                      lr_factor,
#                                      step_epochs,
#                                      num_examples ,
#                                      batch_size)
#
# optimizer_params = {
#             'learning_rate': lr,   # begin_epoch 对应的学习率，begin_epoch不一定为0
#             'momentum' : 0,
#             'wd' : 0,
#             'lr_scheduler': lr_scheduler}
#
#
#
# checkpoint = mx.callback.do_checkpoint('{0}/{1}/{0}'.format(modelname,modedir),1) #每两个epoch存一个模型
# speedometer = mx.callback.Speedometer(batch_size, 10)
#
# batchcallbacks = speedometer
# # epochcallbacks = checkpoint
# epochcallbacks = None
#
# if isgpu:
#     devs = [mx.gpu(i) for i in range(num_gpus)]
# else:
#     devs = mx.cpu()
#
# mod = mx.mod.Module(symbol=new_sym, context=devs,fixed_param_names = fixed_param_names)
# mod.fit(train, val,
#         num_epoch=30,
#         arg_params=new_args,
#         aux_params=aux_params,
#         allow_missing=True,
#         batch_end_callback=batchcallbacks,
#         epoch_end_callback=epochcallbacks,
#         kvstore='device',
#         optimizer='sgd',
#         optimizer_params=optimizer_params,
#         initializer=mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
#         eval_metric='acc')
#
