#coding=utf8
import os
import sys
import numpy as np
import glob
import tensorflow as tf
from collections import namedtuple
import pandas as pd

###########
#  mxnet  #
###########
import mxnet as mx
Batch = namedtuple('Batch', ['data'])
sys.path.append('utils/')
from mxnet_helper import get_image


TRAIN_FOLDER = '/media/hszc/data/BaiduImage/dataset/trainall'
TEST_FOLDER = '/media/hszc/data/BaiduImage/dataset/test1'
IMG_SZ = 360
CROP_SZ = 320


# 从mxnet模型文件提特征
def mx_extract_ft(mx_ckpt_path,mx_ckpt_epoch,mx_layername,img_path_list):
    # load model
    devs = mx.gpu(0)
    sym, arg_params, aux_params = mx.model.load_checkpoint(mx_ckpt_path, mx_ckpt_epoch)
    all_layers = sym.get_internals()
    mx_ft = all_layers[mx_layername+'_output']
    mx_extractor = mx.mod.Module(symbol=mx_ft, context=devs,label_names=[])
    mx_extractor.bind(for_training=False, data_shapes=[('data', (1,3,320,320))])

    mx_extractor.set_params(arg_params, aux_params)

    # trainall dataset
    mx_ft_list = []
    print('mx_model extracting from {0} samples:'.format(len(img_path_list)))
    for idx,img_path in enumerate(img_path_list):

        if idx in [int(len(img_path_list)/10.0*perc) for perc in range(1,10)]:
            print ('{0} finished...'.format(idx))

        img = get_image(img_path, IMG_SZ, CROP_SZ)
        mx_extractor.forward(Batch([mx.nd.array(img)]))
        ft_arr = mx_extractor.get_outputs()[0].asnumpy().reshape(-1)
        mx_ft_list.append(list(ft_arr))

    return mx_ft_list



##################################
# # create img path label list
###################################
# # train
# img_path_list = []
# img_label_list = []
# for folder in os.listdir(TRAIN_FOLDER):
#     label = folder
#     folder_path = os.path.join(TRAIN_FOLDER,folder)
#     for img_file in os.listdir(folder_path):
#         img_path = os.path.join(folder_path,img_file)
#
#         img_path_list.append(img_path)
#         img_label_list.append(folder)
# df = pd.DataFrame({'img_path_list':img_path_list, 'img_label_list':img_label_list})
# df.to_csv('CNNft/train_img_list.csv',index=False)
#
# # test
# img_path_list = []
# for img_file in os.listdir(TEST_FOLDER):
#     img_path = os.path.join(TEST_FOLDER,img_file)
#     img_path_list.append(img_path)
# df = pd.DataFrame({'img_path_list':img_path_list})
# df.to_csv('CNNft/test_img_list.csv',index=False)

# mxnet model
def do_mx_extract_ft():
    mx_model_name = 'Resnet50'
    mx_model_mode = 'finetuned_model'
    mx_ckpt_path = '/media/hszc/model/zhangchi/BaiduImage_model/{0}/{1}/{0}'.format(mx_model_name,mx_model_mode)
    mx_ckpt_epoch = 20
    mx_layername = 'flatten0'


    if not os.path.exists('CNNft/{0}/{1}_{2}/{3}'.format(mx_model_name,mx_model_mode,mx_ckpt_epoch,mx_layername)):
        os.makedirs('CNNft/{0}/{1}_{2}/{3}'.format(mx_model_name,mx_model_mode,mx_ckpt_epoch,mx_layername))
    # train set
    img_path_list = pd.read_csv('CNNft/train_img_list.csv')['img_path_list']
    ft_list = mx_extract_ft(mx_ckpt_path, mx_ckpt_epoch,mx_layername,img_path_list)
    print('mx_train_ft',np.array(ft_list).shape)
    np.save('CNNft/{0}/{1}_{2}/{3}/ft_train.npy'.format(mx_model_name,mx_model_mode,mx_ckpt_epoch,mx_layername), np.array(ft_list))

    # test set
    img_path_list = pd.read_csv('CNNft/test_img_list.csv')['img_path_list']
    ft_list = mx_extract_ft(mx_ckpt_path, mx_ckpt_epoch,mx_layername,img_path_list)
    print('mx_test_ft',np.array(ft_list).shape)
    np.save('CNNft/{0}/{1}_{2}/{3}/ft_test.npy'.format(mx_model_name,mx_model_mode,mx_ckpt_epoch,mx_layername), np.array(ft_list))

#
################
#  tensorflow  #
################

import tensorflow as tf
import importlib
from PIL import Image
import math




def preprocess(im_path):
    im = Image.open(im_path)
    im = im.resize((341, 341))
    im = im.crop((21, 21, 320, 320))
    im = np.array(im, dtype=np.float32) / 255.0
    im = (im - 0.5) * 2.0
    assert im.ndim == 3, "Not RGB Image"
    return im[np.newaxis, :, :, :]

def tf_extract_ft(ft_tensor,sess,tf_batch_size,img_path_list):
    tf_ft_list = []
    iter_num = int(math.ceil(float(len(img_path_list)) / float(tf_batch_size)))
    print("tf_model begin to extract from {0} iters".format(iter_num))

    for i in range(iter_num):


        if i < iter_num - 1:
            batch_path_list = img_path_list[i*tf_batch_size : (i+1)*tf_batch_size]
        else:
            batch_path_list = img_path_list[i*tf_batch_size : ]

        im_list = []
        for img_path in batch_path_list:
            im_list.append(preprocess(img_path))

        feed_img = np.vstack(im_list)
        fea = sess.run(ft_tensor, feed_dict={images_pl:feed_img, phase_train:False})
        tf_ft_list += list(np.squeeze(fea,[1,2]).reshape(len(im_list),-1))
        if i in [int(iter_num/10.0*perc) for perc in range(1,10)]:
            print ('{0} finished...'.format(i))
            print np.array(tf_ft_list).shape
    return np.array(tf_ft_list)


def do_tf_extract_ft():

    tf_model_name = 'inception_resnet_v2'
    tf_model_mode = 'finetuned_model'
    tf_layername = 'PreLogitsFlatten'
    tf_ckpt_epoch = 0
    tf_ckpt_path = '/media/hszc/model/zhangchi/BaiduImage_model/{0}/{1}/{0}_model.ckpt-6000'\
                        .format(tf_model_name,tf_model_mode)
    tf_batch_size = 16

    with tf.Graph().as_default():
        ####################
        # set up input#
        ####################
        model = importlib.import_module('fintune_training_m.models.{0}'.format(tf_model_name))
        images_pl = tf.placeholder(shape=(None, 299, 299, 3), dtype=tf.float32, name='images')
        phase_train = tf.placeholder(tf.bool, name='phase_train')
        with tf.device('/gpu:0'):
            net, endpoints = model.inference(images_pl, False,
                                                num_class=100
                                                )

        saver = tf.train.Saver(tf.global_variables())
        # Build the summary operation based on the TF collection of Summaries.

        config = tf.ConfigProto()
        config.allow_soft_placement = True
        config.log_device_placement = False
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        sess = tf.Session(config=config)

        model_path = tf_ckpt_path
        saver.restore(sess, model_path)
        print("finish load model")

        if not os.path.exists('CNNft/{0}/{1}_{2}/{3}'.format(tf_model_name, tf_model_mode, tf_ckpt_epoch, tf_layername)):
            os.makedirs('CNNft/{0}/{1}_{2}/{3}'.format(tf_model_name, tf_model_mode, tf_ckpt_epoch, tf_layername))

        # train set
        img_path_list = pd.read_csv('CNNft/train_img_list.csv')['img_path_list']
        ft_arr = tf_extract_ft(endpoints[tf_layername], sess, tf_batch_size, img_path_list)
        print ft_arr.shape
        np.save('CNNft/{0}/{1}_{2}/{3}/ft_train.npy'.format(tf_model_name, tf_model_mode, tf_ckpt_epoch, tf_layername), ft_arr)

        # test set
        img_path_list = pd.read_csv('CNNft/test_img_list.csv')['img_path_list']
        ft_arr = tf_extract_ft(endpoints[tf_layername], sess, tf_batch_size, img_path_list)
        print ft_arr.shape
        np.save('CNNft/{0}/{1}_{2}/{3}/ft_test.npy'.format(tf_model_name,tf_model_mode, tf_ckpt_epoch,tf_layername), ft_arr)

if __name__ == '__main__':
    do_mx_extract_ft()
    do_tf_extract_ft()
