#!/usr/bin/env python
# -*- coding:utf8 -*-

from Sppnet_train import *
from layers import *

import os
import numpy
import theano
import Image

class featureMap:
    def __init__(self, exist_params):
        self.rng = numpy.random.RandomState(1234)
        self.exist_params = exist_params

<<<<<<< HEAD
    def layer_params(self, nkerns=[20,20,40,40,80],batch_size=0):
        src_channel = 3
        self.layer1_image_shape  = (batch_size, src_channel, 64, 64)
        self.layer1_filter_shape = (nkerns[0],  src_channel, 7, 7)
        self.layer2_image_shape  = (batch_size, nkerns[0], 58, 58)
        self.layer2_filter_shape = (nkerns[1], nkerns[0], 7, 7)
        self.layer3_image_shape  = (batch_size, nkerns[1], 52, 52)
        self.layer3_filter_shape = (nkerns[2], nkerns[1], 7, 7)
        self.layer4_image_shape  = (batch_size, nkerns[2], 46, 46)
        self.layer4_filter_shape = (nkerns[3], nkerns[2], 7, 7)
        self.layer5_image_shape  = (batch_size, nkerns[3], 40, 40)
        self.layer5_filter_shape = (nkerns[4], nkerns[3], 7, 7)
        self.result_image_shape  = (batch_size, nkerns[4], 34, 34)

    def build_layer_architecture(self, n_spp, n_out,n_hidden, acti_func=relu):
        self.index      = T.lscalar()
        self.step_rate  = T.dscalar()
        self.x = T.matrix('x')
        self.y = T.ivector('y')

        print 'building the model ...'

        layer1_input = self.x.reshape(self.layer1_image_shape)
=======
    def layer_params(self, nkerns,batch_size):
        src_channel = 3
        self.layer1_image_shape  = (batch_size, src_channel, 64, 64)
        self.layer1_filter_shape = (nkerns[0],  src_channel, 3, 3)
        self.layer2_image_shape  = (batch_size, nkerns[0], 62, 62)
        self.layer2_filter_shape = (nkerns[1], nkerns[0], 3, 3)
        self.layer3_image_shape  = (batch_size, nkerns[1], 30, 30)
        self.layer3_filter_shape = (nkerns[2], nkerns[1], 3, 3)
        self.layer4_image_shape  = (batch_size, nkerns[2], 28, 28)
        self.layer4_filter_shape = (nkerns[3], nkerns[2], 3,3 )
        self.layer5_image_shape  = (batch_size, nkerns[3], 13, 13)
        self.layer5_filter_shape = (nkerns[4], nkerns[3], 3, 3)
        self.layer6_image_shape  = (batch_size, nkerns[4], 11, 11)
        self.layer6_filter_shape = (nkerns[5], nkerns[4], 2, 2)
        self.layer7_image_shape  = (batch_size, nkerns[5], 5, 5)
        self.layer7_filter_shape = (nkerns[6], nkerns[5], 3,3 )
        self.layer8_image_shape  = (batch_size, nkerns[6], 3, 3)
        self.layer8_filter_shape = (nkerns[7], nkerns[6], 3, 3)
        self.result_image_shape  = (batch_size, nkerns[7], 1, 1)

    def build_layer_architecture(self, n_hidden, n_out, acti_func=relu):
        '''
        simple means the deepid layer input is only the layer4 output.
        layer1: convpool layer
        layer2: convpool layer
        layer3: convpool layer
        layer4: conv layer
        deepid: hidden layer
        softmax: logistic layer
        '''
        x = T.matrix('x')
        print 'building the model ...'

        layer1_input = x.reshape(self.layer1_image_shape)
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
        self.layer1 = ConvLayer(self.rng,
                input        = layer1_input,
                image_shape  = self.layer1_image_shape,
                filter_shape = self.layer1_filter_shape,
<<<<<<< HEAD
                W = self.exist_params[6][0],
                b = self.exist_params[6][1],
                activation   = acti_func)

        self.layer2 = ConvLayer(self.rng,
                input        = self.layer1.output,
                image_shape  = self.layer2_image_shape,
                filter_shape = self.layer2_filter_shape,
                W = self.exist_params[5][0],
                b = self.exist_params[5][1],
=======
                W = self.exist_params[9][0],
                b = self.exist_params[9][1],
                activation   = acti_func)
        self.layer2 =ConvPoolLayer(self.rng,
                input        = self.layer1.output,
                image_shape  = self.layer2_image_shape,
                filter_shape = self.layer2_filter_shape,
                poolsize     = (2,2),
                W = self.exist_params[8][0],
                b = self.exist_params[8][1],
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
                activation   = acti_func)

        self.layer3 = ConvLayer(self.rng,
                input        = self.layer2.output,
                image_shape  = self.layer3_image_shape,
                filter_shape = self.layer3_filter_shape,
<<<<<<< HEAD
                W = self.exist_params[4][0],
                b = self.exist_params[4][1],
                activation   = acti_func)

        self.layer4 = ConvLayer(self.rng,
                input        = self.layer3.output,
                image_shape  = self.layer4_image_shape,
                filter_shape = self.layer4_filter_shape,
                W = self.exist_params[3][0],
                b = self.exist_params[3][1],
=======
                W = self.exist_params[7][0],
                b = self.exist_params[7][1],
                activation   = acti_func)
        self.layer4 = ConvPoolLayer(self.rng,
                input        = self.layer3.output,
                image_shape  = self.layer4_image_shape,
                filter_shape = self.layer4_filter_shape,
                poolsize     = (2,2),
                W = self.exist_params[6][0],
                b = self.exist_params[6][1],
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
                activation   = acti_func)

        self.layer5 = ConvLayer(self.rng,
                input        = self.layer4.output,
                image_shape  = self.layer5_image_shape,
                filter_shape = self.layer5_filter_shape,
<<<<<<< HEAD
                W = self.exist_params[2][0],
                b = self.exist_params[2][1],
                activation   = acti_func)

        spp_input = self.layer5.output

        self.spp_layer = SppLayer_train(
                input = spp_input,
                bins= [4,2,1]
                )

        self.hidden_layer = HiddenLayer(self.rng,
                input = self.spp_layer.output,
                n_in  = n_spp,
=======
                W = self.exist_params[5][0],
                b = self.exist_params[5][1],
                activation   = acti_func)
        self.layer6 = ConvPoolLayer(self.rng,
                input        = self.layer5.output,
                image_shape  = self.layer6_image_shape,
                filter_shape = self.layer6_filter_shape,
                poolsize     = (2,2),
                W = self.exist_params[4][0],
                b = self.exist_params[4][1],
                activation   = acti_func)

        self.layer7 = ConvLayer(self.rng,
                input        = self.layer6.output,
                image_shape  = self.layer7_image_shape,
                filter_shape = self.layer7_filter_shape,
                W = self.exist_params[3][0],
                b = self.exist_params[3][1],
                activation   = acti_func)

        self.layer8 = ConvLayer(self.rng,
                input        = self.layer7.output,
                image_shape  = self.layer8_image_shape,
                filter_shape = self.layer8_filter_shape,
                W = self.exist_params[2][0],
                b = self.exist_params[2][1],
                activation   = acti_func)
        # deepid_input = layer4.output.flatten(2)

        layer7_output_flatten = self.layer7.output.flatten(2)
        layer8_output_flatten = self.layer8.output.flatten(2)
        deepid_input = T.concatenate([layer7_output_flatten, layer8_output_flatten], axis=1)

        self.deepid_layer = HiddenLayer(self.rng,
                input = deepid_input,
                n_in  = numpy.prod( self.result_image_shape[1:] ) + numpy.prod( self.layer8_image_shape[1:] ),
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
                # n_in  = numpy.prod( self.result_image_shape[1:] ),
                n_out = n_hidden,
                W = self.exist_params[1][0],
                b = self.exist_params[1][1],
                activation = acti_func)
<<<<<<< HEAD

        self.generator = theano.function(inputs=[self.x],
                outputs=self.layer5.output)
=======
        self.generator = theano.function(inputs=[x],
                outputs=self.layer1.output)
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8

    def generate_teaFeatures(self, x):
        print '\tgenerating ...'
        deepid_data = self.generator(x)
        return deepid_data

def featuremap_generating(dataset_folder, params_file,featuremap_path, nkerns, n_hidden, acti_func=relu,patch_dim=64):
    image_vector_len = patch_dim*patch_dim*3
    pd_helper = ParamDumpHelper(params_file)
    exist_params = pd_helper.get_params_from_file()
    if len(exist_params) != 0:
        exist_params = exist_params[-1]
    else:
        print 'error, no trained params'
        return
    arrs = []
    dataset_files = os.listdir(dataset_folder)
    for dataset_file in dataset_files:
<<<<<<< HEAD
        dataset_path = dataset_folder +"/"+ dataset_file
=======
        dataset_path = dataset_folder +"\\"+ dataset_file
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
        im = Image.open(dataset_path)
        arr_img = np.asarray(im, dtype='string')
        arr_img = arr_img.transpose(2,0,1).reshape((image_vector_len,))
        arrs.append(arr_img)
    arrs = np.asarray(arrs, dtype='float32')
    arrs = np.vstack(arrs)
    net=featureMap(exist_params)
    net.layer_params(nkerns,arrs.shape[0])
<<<<<<< HEAD
    net.build_layer_architecture(n_spp=1680,n_hidden=n_hidden,acti_func=acti_func,n_out=2)
=======
    net.build_layer_architecture(n_hidden,acti_func)
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
    feature_map = net.generate_teaFeatures(arrs)
    featureMap_to_file(feature_map,featuremap_path)


def featuremap_generating_single(src_image, params_file, featureMap_path,
                                 nkerns, n_hidden, acti_func=relu,patch_dim=64):
    arrs = []
    pd_helper = ParamDumpHelper(params_file)
    exist_params = pd_helper.get_params_from_file()
    if len(exist_params) != 0:
        exist_params = exist_params[-1]
    else:
<<<<<<< HEAD
        print 'error : no trained params'
=======
        print 'error, no trained params'
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
        return

    image_vector_len=patch_dim*patch_dim*3
    im = Image.open(src_image)
    arr_img = np.asarray(im, dtype='string')
    arr_img = arr_img.transpose(2,0,1).reshape((image_vector_len,))
    arrs.append(arr_img)
    arrs = np.asarray(arrs, dtype='float32')
    arrs = np.vstack(arrs)
    net=featureMap(exist_params)
    net.layer_params(nkerns,arrs.shape[0])
    net.build_layer_architecture(n_hidden,acti_func)
    feature_map = net.generate_teaFeatures(arrs)

    print feature_map

    featureMap_to_file(feature_map,featureMap_path)

def get_featureMap(featureMap_path):
    if os.path.exists(featureMap_path):
        f = gzip.open(featureMap_path,"rb")
        dumped_file = pickle.load(f)
        f.close()
        return dumped_file
    return []

def load_data_xy(dataset_path):
    print 'loading data of %s' % (dataset_path)
    f = open(dataset_path, 'rb')
    x, y = pickle.load(f)
    f.close()
    return x,y

def featureMap_to_file(featureMap,featureMap_path):
    f = gzip.open(featureMap_path, 'wb')
    pickle.dump(featureMap, f)
    f.close()

def output_featuremap():
<<<<<<< HEAD
    dataset_folder ='/home/eelab/wxm/data/tea/visualization_pos'
    params_file    = '/home/eelab/wxm/data/tea/result/param_e20_l0001_h160_spp'
    result_folder  = '/home/eelab/wxm/data/tea/result/featureMap_pos_e20_0001_c5_spp'
    nkerns  =[20,20,40,40,80]
=======
    dataset_folder ='C:\\Users\\wuxiaomin\\Desktop\\arobei\\new\\visualization'
    params_file    = 'C:\\Users\\wuxiaomin\\Desktop\\arobei\\new\\param_e50_0001_cpu'
    result_folder  = 'C:\\Users\\wuxiaomin\\Desktop\\arobei\\new\\features\\1featureMap_ims_e50_0001'
    nkerns  =[10,10,20,20,40,40,80,80]
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
    n_hidden = 160
    featuremap_generating(dataset_folder, params_file, result_folder, nkerns, n_hidden, acti_func=relu)

def show_featureMap():
    #4-D batch_size*单图map数*map_size行*map_size列
<<<<<<< HEAD
    featureMap = get_featureMap("/home/eelab/wxm/data/tea/result/featureMap_pos_e20_0001_c5_spp")
=======
    featureMap = get_featureMap("C:\\Users\\wuxiaomin\\Desktop\\arobei\\new\\features\\1featureMap_ims_e50_0001")
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
    for j in range(14):
        flag = 1
        for i in range(9):
            subplot(3,3,flag)
            imshow(featureMap[j][i])
            flag += 1
            axis("off")
        show()

if __name__ == '__main__':

<<<<<<< HEAD
    featureMap = get_featureMap("/home/eelab/wxm/data/tea/result/featureMap_pos_e20_0001_c5_spp")
    print len(featureMap[0][0][0])
    show_featureMap()


=======
    featureMap = get_featureMap("C:\\Users\\wuxiaomin\\Desktop\\arobei\\new\\features\\1featureMap_ims_e50_0001")
    print len(featureMap[0][0][0])
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
