#coding=utf-8
from __future__ import division
import re
import math
import mxnet as mx
import os, urllib ,sys
import cv2
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])

def get_fine_tune_model(sym, arg_params, num_classes,layername):
    all_layers = sym.get_internals()
    net = all_layers[layername+'_output']
    net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc1')
    new_sym = mx.symbol.SoftmaxOutput(data=net, name='softmax')
    new_args = dict({k: arg_params[k] for k in arg_params if 'fc1' not in k})
    return new_sym , new_args

def get_fixed_params(sym,layers_to_tune):
    para_to_tune = []
    for layer in layers_to_tune:
        re_prog = re.compile(layer)
        temp1 = [name for name in sym.list_arguments() if re_prog.match(name)]
        para_to_tune += temp1
    fixed_param_names = list( set(sym.list_arguments())-set(para_to_tune) )

    print('fixed_param_names: ', fixed_param_names)
    print('para_to_tune: ', para_to_tune)
    return fixed_param_names


def get_bilinear_model(sym, arg_params, num_classes,step=1):
    all_layers = sym.get_internals()

    # step1: 取原模型的relu1 层  + conv_1 降维 + fc1层
    if step==1:
        net = all_layers['relu1_output']
        net = mx.sym.Convolution(net,kernel=(1,1),dilate=(1,1),no_bias=True,
                                 num_filter=512,pad=(0,0),stride=(1,1),workspace=512,
                                 name='conv_1')
        net = mx.sym.BatchNorm(net,eps=2e-05,fix_gamma=False,momentum=0.9,
                               use_global_stats=False,name='bn_1')
        net = mx.sym.Activation(net,act_type='relu',name='relu_1')
        # # bilinear
        # net = mx.sym.reshape(net, shape=(0, 0, -3), name='reshape')
        # net = mx.sym.batch_dot(net, net, transpose_b=True, name='bilinear')
        # net = mx.sym.flatten(net,name='flatten')
        # net = mx.sym.sign(net) * mx.sym.sqrt(mx.sym.abs(net))
        # net = mx.sym.L2Normalization(net,name='l2norm')
        net = mx.sym.flatten(net, name='flatten')
        net = mx.sym.FullyConnected(data=net, num_hidden=num_classes, name='fc1')
        new_sym = mx.sym.SoftmaxOutput(data=net, name='softmax')
        new_args = dict({k: arg_params[k] for k in arg_params if 'fc1' not in k})

        fixed_param_names = get_fixed_params(new_sym, layers_to_tune=['conv_1','bn_1','fc1'])

    # step2: 取step1模型的relu_1 层  + bilinear + fc1层
    if step==2:
        net = all_layers['relu_1_output']
        net = mx.sym.reshape(net, shape=(0, 0, -3), name='reshape')
        net = mx.sym.batch_dot(net, net, transpose_b=True, name='bilinear')
        net = mx.sym.flatten(net,name='flatten')
        net = mx.sym.sign(net) * mx.sym.sqrt(mx.sym.abs(net))
        net = mx.sym.L2Normalization(net,name='l2norm')
        net = mx.sym.FullyConnected(data=net, num_hidden=num_classes, name='fc1')
        new_sym = mx.sym.SoftmaxOutput(data=net, name='softmax')
        new_args = dict({k: arg_params[k] for k in arg_params if 'fc1' not in k})

        fixed_param_names = get_fixed_params(new_sym, layers_to_tune=['fc1'])


    return new_sym , new_args, fixed_param_names

def download_model(url, epoch ,modelfolder,modelname):
    if not os.path.exists('{0}/{1}/ori_model'.format(modelfolder,modelname)):
        os.makedirs('{0}/{1}/ori_model'.format(modelfolder,modelname))

    jsonfile = '{0}/{1}/ori_model/{1}-symbol.json'.format(modelfolder, modelname)
    jsonurl = url+'-symbol.json'

    if not os.path.exists(jsonfile):
        urllib.urlretrieve(jsonurl, jsonfile)
    else:
        print('json file already exists.')
    paramfile = '{}/{}/ori_model/{}-{:0>4}.params'.format(modelfolder,modelname, modelname,epoch)
    paramurl = url+'-{:0>4}.params'.format(epoch)

    if not os.path.exists(paramfile):
        urllib.urlretrieve(paramurl, paramfile)
    else:
        print('params file already exists.')

def get_iterators(train_rec_path, train_idx_path,
                   val_rec_path , val_idx_path, batch_size,data_shape,valresize):
    train = mx.img.ImageIter(
        label_width=1,
        path_imgrec      = train_rec_path,
        path_imgidx     = train_idx_path,
        data_shape          = data_shape,
        batch_size          = batch_size,
        rand_crop           = True,
        rand_resize         = True,
        rand_mirror         = True,
        shuffle             = True,
        # brightness          = 0.4,
        # contrast            = 0.4,
        # saturation          = 0.4,
        # pca_noise           = 0.1
    )
    val = mx.img.ImageIter(
        label_width=1,
        path_imgrec      = val_rec_path,
        path_imgidx     = val_idx_path,
        batch_size          = batch_size,
        data_shape          =  data_shape,
        resize		    = valresize,
        rand_crop           = False,
        rand_resize         = False,
        rand_mirror         = False)
    return (train, val)



def get_lr_scheduler(begin_epoch,
                     ini_lr,
                     lr_factor,
                     step_epochs,
                     num_examples ,
                     batch_size):
    epoch_size = num_examples / batch_size
    lr = ini_lr
    for s in step_epochs:
        if begin_epoch >= s:
            lr *= lr_factor
    if lr != ini_lr:
        logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
    steps = [epoch_size * (x - begin_epoch) for x in step_epochs if x - begin_epoch > 0]
    lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_factor)
    return lr , lr_scheduler


def get_image(imgpath, img_sz, crop_sz ,isflip=False, crop_type='center'):
    img = cv2.cvtColor(cv2.imread(imgpath), cv2.COLOR_BGR2RGB)   #[h,w,3]
    # resize (shorter axis) to img_sz
    rows, cols = img.shape[:2]
    if cols < rows:
        resize_width = img_sz
        resize_height = resize_width * rows / cols
    else:
        resize_height = img_sz
        resize_width = resize_height * cols / rows

    img = cv2.resize(img, (int(resize_width), int(resize_height)), interpolation=cv2.INTER_CUBIC)
    h, w, _ = img.shape

    if isflip:
        img = img[:, ::-1,:]

    if crop_type == 'center':
        #center crop
        x0 = int((w - crop_sz) / 2)
        y0 = int((h - crop_sz) / 2)
    elif crop_type == 'top-left':
        #top-left crop
        x0 = 0
        y0 = 0
    elif crop_type == 'top-right':
        #top-right crop
        x0 = w-crop_sz
        y0 = 0
    elif crop_type == 'bottom-right':
        #bottom-left
        x0 = 0
        y0 = h-crop_sz
    elif crop_type == 'bottom-right':
        #bottom-right
        x0 = w-crop_sz
        y0 = h-crop_sz
    else:
        #center crop
        x0 = int((w - crop_sz) / 2)
        y0 = int((h - crop_sz) / 2)

    img = img[y0:y0 + crop_sz, x0:x0 + crop_sz]   #shape = [h,w,3]
    img = np.swapaxes(img, 0, 2)                    # shape = [3,w,h]
    img = np.swapaxes(img, 1, 2)                    # shape = [3,h,w]
    img = img[np.newaxis, :]  #[1,3,h,w]
    return img

def gen_prediction(mod,imgfolder,img_sz,crop_sz,isflip,crop_type):
    preds = []
    imgnames = []
    for idx,imgfile in enumerate(os.listdir(imgfolder)):
        imgnames.append(imgfile.split('.')[0])
        imgpath = os.path.join(imgfolder,imgfile)
        img = get_image(imgpath, img_sz, crop_sz,isflip, crop_type)
        mod.forward(Batch([mx.nd.array(img)]))
        prob = mod.get_outputs()[0].asnumpy()
        prob = np.squeeze(prob)
        a = np.argsort(prob)[::-1]
        preds.append(a[0])
        print idx

    return preds,imgnames
def gen_prediction2(mod,img_path_list,batchsize,img_sz,crop_sz,isflip,crop_type):
    preds = []
    prob_arr=[]

    iter_num = int(math.ceil(float(len(img_path_list)) / float(batchsize)))
    for i in range(iter_num):
        if i < iter_num - 1:
            batch_path_list = img_path_list[i*batchsize : (i+1)*batchsize]
            valid_idx = range(batchsize)
        else:
            batch_path_list = img_path_list[i*batchsize : ]
            valid_idx = range(len(batch_path_list))

        if i in [int(iter_num/10.0*perc) for perc in range(1,10)]:
            print ('{0} finished...,total {1}'.format(i,iter_num))

        # in a batch
        img_arr = []
        for imgpath in batch_path_list:
            img = get_image(imgpath, img_sz, crop_sz,isflip, crop_type)
            img_arr.append(np.squeeze(img,[0]))
        img_arr = np.array(img_arr)

        # pad to batchsize with extra zeros
        temp = np.zeros((batchsize,img_arr.shape[1],img_arr.shape[2],img_arr.shape[3]))
        temp[valid_idx]=img_arr

        img_arr = temp

        mod.forward(Batch([mx.nd.array(img_arr)]))
        prob = mod.get_outputs()[0].asnumpy()[valid_idx]
        a = np.argmax(prob,axis=1)
        preds += list(a)
        prob_arr += list(prob)

    return preds,np.array(prob_arr)


def label_mapping(preds,imgnames,train_lst_path):
    prediction = pd.DataFrame({'preds':preds,'imgid':imgnames})
    print prediction
    refer = pd.read_csv(train_lst_path, sep='\t',header=None,names = ['idx','mxlabel','imgfile'])
    refer['label'] = refer['imgfile'].str.split('/').str[0]
    refer = refer[['label', 'mxlabel']].drop_duplicates('label')

    prediction = pd.merge(prediction, refer, left_on='preds',right_on='mxlabel',how='left')
    print prediction
    return prediction


