# -*- coding:utf8 -*-
import theano
import numpy
import theano.tensor as T
import math

from theano.tensor.nnet import conv
from theano.tensor.signal import downsample

def relu(x):
    return T.switch(x<0,0,x)

class LogisticRegression(object):
    def __init__(self,input,n_in,n_out,W=None,b=None):
        if W is None:
            W = theano.shared(
                value = numpy.zeros((n_in,n_out),dtype=theano.config.floatX),
                name = "W",
                borrow = True)
        if b is None:
            b = theano.shared(
                value = numpy.zeros((n_out,),dtype=theano.config.floatX),
                name = "b",
                borrow = True)
        self.W = W
        self.b = b

        self.p_y_given_x = T.nnet.softmax(T.dot(input,self.W)+self.b)
        self.y_pred = T.argmax(self.p_y_given_x,axis=1)
        self.params = [self.W,self.b]

    def negtive_log_likehood(self,y):
        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])

    def errors(self,y):
        if y.ndim != self.y_pred.ndim:
             raise TypeError('y should hava the same shape as self.y_pred',
                    ('y', 'y_pred', self.y_pred,type))
        if y.dtype.startswith('int'):
            return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()

class HiddenLayer(object):
    def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):
        self.input = input
        if W is None:
            W_values = numpy.asarray( rng.uniform(
                low   = -numpy.sqrt(6. / (n_in + n_out)),
                high  = numpy.sqrt(6. / (n_in + n_out)),
                size  = (n_in, n_out)),
                dtype = theano.config.floatX)
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4
            W = theano.shared(value=W_values, name='W', borrow=True)

        if b is None:
            b_values = numpy.zeros( (n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W
        self.b = b
        lin_output = T.dot(input, self.W) + self.b
        self.output = (lin_output if activation is None else activation(lin_output))
        self.params = [self.W, self.b]

class MLP(object):
    def __init__(self, rng, input, n_in, n_hidden, n_out):
        self.hiddenLayer = HiddenLayer( rng=rng, input=input,
                                        n_in=n_in, n_out=n_hidden,
                                        activation=T.tanh)
        self.logRegressionLayer = LogisticRegression(
                input=self.hiddenLayer.output,
                n_in=n_hidden,
                n_out=n_out)
        self.L1     = abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()
        self.L2_sqr = (self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum()
        self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
        self.errors = self.logRegressionLayer.errors
        self.params = self.hiddenLayer.params + self.logRegressionLayer.params

class ConvLayer(object):
    def __init__(self,rng,input,filter_shape,image_shape,W = None,b = None,activation = T.tanh):
        assert  filter_shape[1] == image_shape[1]
        self.input = input
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = filter_shape[0] * numpy.prod(filter_shape[2:])

        if W is None:
            W_bound = numpy.sqrt(6./(fan_in+fan_out))
            W = theano.shared(
                value = numpy.asarray(rng.uniform(low = -W_bound,high = W_bound,size = filter_shape),\
                                       dtype = theano.config.floatX),
                            borrow = True)
        if b is None:
            b_values = numpy.zeros((filter_shape[0],),dtype = theano.config.floatX)
            b = theano.shared(value=b_values,borrow = True)
        self.W = W
        self.b = b

        conv_out = conv.conv2d(input=self.input,filters=self.W,
                               filter_shape=filter_shape,image_shape=image_shape)
        conv_out = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        self.params = [self.W,self.b]
        self.output = conv_out if activation is None else activation(conv_out)

class PoolLayer(object):
    def __init__(self,input,poolsize = (2,2)):
        pooled_out = downsample.max_pool_2d(input,
                                            ds=poolsize,
                                            ignore_border=True)
        self.output = pooled_out

class ConvPoolLayer(object):
    def __init__(self,rng,input,image_shape,filter_shape,W=None,b=None,activation=T.tanh,poolsize=(2,2)):
        assert image_shape[1] == filter_shape[1]
        self.input = input
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize)

        if W is None:
            W_bound = numpy.sqrt(6. /(fan_in + fan_out))
            W = theano.shared(
                        numpy.asarray(
                            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                            dtype=theano.config.floatX),
                        borrow=True)
        if b is None:
            b_values = numpy.zeros( (filter_shape[0],), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, borrow=True)
        self.W = W
        self.b = b

        conv_out = conv.conv2d(
                    input = input,
                    filters = self.W,
                    filter_shape = filter_shape,
                    image_shape = image_shape)
        conv_out = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        if not activation is None:
            conv_out = activation(conv_out)

        pooled_out = downsample.max_pool_2d(
                    input = conv_out,
                    ds = poolsize,
                    ignore_border=True)
        self.output = pooled_out
        self.params = [self.W, self.b]

class SppLayer_train(object):
    def __init__(self,input,bins):
        self.strides = []
        self.windows = []
        self.input = input
<<<<<<< HEAD
        self.a = 34#feature_map size
=======
        self.a = 54#feature_map size
>>>>>>> 8568c94926e3d0d8c2cb51272715f91c2e7295f8
        self.bins = bins
        self.num_bins = len(bins)

        self.strides.append(int(math.floor(self.a/self.bins[0])))
        self.windows.append(int(math.ceil(self.a/self.bins[0])))
        self.strides.append(int(math.floor(self.a/self.bins[1])))
        self.windows.append(int(math.ceil(self.a/self.bins[1])))
        self.strides.append(int(math.floor(self.a/self.bins[2])))
        self.windows.append(int(math.ceil(self.a/self.bins[2])))

        self.pooled_out = []

        self.pooled_out.append(downsample.max_pool_2d(input=self.input,
                                                          ds=(self.windows[0],self.windows[0]),
                                                          st=(self.strides[0],self.strides[0]),
                                                          ignore_border=True))
        self.pooled_out.append(downsample.max_pool_2d(input=self.input,
                                                          ds=(self.windows[1],self.windows[1]),
                                                          st=(self.strides[1],self.strides[1]),
                                                          ignore_border=True))
        self.pooled_out.append(downsample.max_pool_2d(input=self.input,
                                                          ds=(self.windows[2],self.windows[2]),
                                                          st=(self.strides[2],self.strides[2]),
                                                          ignore_border=True))

        self.pooled_out[0] = self.pooled_out[0].flatten(2)
        self.pooled_out[1] = self.pooled_out[1].flatten(2)
        self.pooled_out[2] = self.pooled_out[2].flatten(2)
        # batch_size * image_size
        self.output = T.concatenate([self.pooled_out[0],self.pooled_out[1],self.pooled_out[2]],axis=1)
        #for k in range(self.num_bins):
        #    self.output = T.concatenate([self.output,self.pooled_out[]])

