__author__ = 'carlxie'

from layer.Layer import Layer
from layer.util import *


class ConvLayer(Layer):

    ##
    #   K : num of kernels
    #   F : kernel size
    #   S : stride
    #   P : padding
    #   preLayer : pointer to previous layer
    ##
    def __init__(self,preLayer,K,F,S=1,P=0):
        assert (preLayer.width - F + 2 * P) % S == 0
        assert (preLayer.height - F + 2 * P) % S == 0
        super(ConvLayer,self).__init__(
            preLayer,
            LayerType.CONV_TYPE,
            (preLayer.width - F + 2 * P) / S + 1,
            (preLayer.height - F + 2 * P) / S + 1,
            K
        )
        self.stride = S
        self.padding = P
        self.w_grads = []
        self.b_grads = []
        self.weights = 0.2*np.random.random((F,F,preLayer.output.shape[-1],K))-0.1
        self.biases = np.zeros((1,1,K))

    def get_padding_input(self):
        preLayerOutput = self.preLayer.output
        if self.padding == 0:
            return preLayerOutput
        else:
            input = np.zeros((self.preLayer.width + 2 * self.padding,self.preLayer.height + 2 * self.padding, self.preLayer.depth))
            for i in range(self.preLayer.depth):
                input[self.padding:-self.padding,self.padding:-self.padding,i] = preLayerOutput[:,:,i]
            return input

    def forward_prop(self):
        self.score = np.zeros((self.width,self.height,self.depth))
        input_data = self.get_padding_input()
        shape = self.output.shape
        for depthIdx in range(shape[2]):
            for heightIdx in range(shape[1]):
                for widthIdx in range(shape[0]):
                    self.score[widthIdx,heightIdx,depthIdx] += self.calc(input_data,heightIdx,widthIdx,depthIdx)
            self.score[:,:,depthIdx] += self.biases[:,:,depthIdx]
        self.output = self.score


    def calc(self,data,hIdx,wIdx,dIdx):
        (kWdith,kHeight,kDepth) = self.weights[:,:,:,dIdx].shape
        val = 0.0
        w = wIdx * self.stride
        h = hIdx * self.stride
        for d in range(kDepth):
            for i in range(kWdith):
                for j in range(kHeight):
                    val += self.weights[i,j,d,dIdx] * data[w+i,h+j,d]
        return val


    def compute_w_grads(self):
        w_grad = np.zeros(self.weights.shape)
        (w,h,d,num_kernels) = w_grad.shape
        for num in range(num_kernels):
            for dIdx in range(d):
                for wIdx in range(w):
                    for hIdx in range(h):
                        w_grad[wIdx,hIdx,dIdx,num] = self.compute_single_w_grad(wIdx,hIdx,dIdx,num)
        return w_grad

    def compute_single_w_grad(self,wIdx,hIdx,dIdx,num):
        (w,h,_) = self.delta.shape
        data = self.get_padding_input()
        d_w = 0.0
        for i in range(w):
            for j in range(h):
                d_w += self.delta[i,j,num] * data[wIdx+i*self.stride,hIdx+j*self.stride,dIdx]
        return d_w

    def compute_b_grads(self):
        b_grad = np.empty(self.biases.shape)
        for i in range(b_grad.shape[-1]):
            b_grad[:,:,i] = sum(sum(self.delta[:,:,i]))
        return b_grad

    def back_prop(self):
        self.w_grads.append(self.compute_w_grads())
        self.b_grads.append(self.compute_b_grads())
        if self.preLayer.score != None:
            self.preLayer.delta = self.propagate_delta()

    def propagate_delta(self):
        delta = np.empty(self.preLayer.output.shape)
        (w,h,d) = self.preLayer.output.shape
        for k in range(d):
            for i in range(w):
                for j in range(h):
                    delta[i,j,k] = self.compute_delta(i,j,k)
        return delta

    def compute_delta(self,wIdx,hIdx,dIdx):
        weighted_delta = 0.0
        for kernelIdx in range(self.depth):
            weight = self.weights[:,:,:,kernelIdx]
            weighted_delta += self.compute_weighted_delta(weight,wIdx,hIdx,dIdx,kernelIdx)
        return weighted_delta

    def compute_weighted_delta(self,w,wIdx,hIdx,dIdx,kernelIdx):
        val = 0.0
        (width,height,_) = w.shape
        (wBound,hBound,_) = self.delta.shape
        for i in range(width):
            if wIdx - i < 0:break
            if self.qualified(wIdx,i,wBound):
                for j in range(height):
                    if hIdx - j < 0 :break
                    if self.qualified(hIdx,j,hBound):
                        val += w[i,j,dIdx] * self.delta[(wIdx-i)/self.stride,(hIdx-j)/self.stride,kernelIdx]
        return val

    def qualified(self,index1,index2,bound):
        return (index1 - index2) % self.stride == 0 and index1 - index2 < bound

    def update_weights(self,eta):
        self.weights -= eta * sum(self.w_grads) / len(self.w_grads)
        self.biases -= eta * sum(self.b_grads) / len(self.b_grads)
        self.w_grads = [] # clear cache
        self.b_grads = [] # clear cache

def test_forward_pass():
    b = np.array([
        [2, 1, 1, 1, 1],
        [2, 0, 2, 0, 1],
        [1, 0, 0, 1, 2],
        [2, 0, 2, 1, 0],
        [0, 2, 2, 2, 1]])
    c = np.array([
        [1, 1, 0, 2, 1],
        [2, 0, 1, 1, 0],
        [2, 2, 1, 1, 2],
        [0, 1, 1, 2, 1],
        [2, 1, 2, 2, 2]])
    a = np.array([
        [0, 0, 2, 2, 0],
        [0, 1, 1, 1, 1],
        [1, 2, 1, 2, 0],
        [2, 0, 2, 2, 0],
        [0, 0, 2, 1, 1]
    ])
    k1 = np.array([
        [ 0,  1, -1],
        [-1,  0,  0],
        [ 0,  0, -1]
    ])
    k2 = np.array([
        [-1,  1,  1],
        [-1, -1, -1],
        [ 1,  0,  1]
    ])
    k3 = np.array([
        [ 0,  1,  1],
        [ 1, -1,  0],
        [ 1,  0, -1]
    ])
    weights = np.zeros((3,3,3,1))
    weights[:,:,0,0] = k1
    weights[:,:,1,0] = k2
    weights[:,:,2,0] = k3

    input_data = np.zeros((5,5,3))
    input_data[:,:,0] = a
    input_data[:,:,1] = b
    input_data[:,:,2] = c


    biases = np.empty((1,1,1))
    biases[:,:,0] = 1

    layer = Layer(None,LayerType.INPUT_TYPE,5,5,3)
    layer.output = input_data

    convLayer = ConvLayer(layer,1,3,2,1)
    convLayer.weights = weights
    convLayer.biases = biases
    convLayer.forward_prop()
    print convLayer.score[:,:,0]
    print convLayer.output[:,:,0]

def test_backward_pass():
    score = np.ones((5,5,3))

    input_data = vec_tanh(score)
    layer = Layer(None,LayerType.CONV_TYPE,5,5,3)
    layer.output = input_data
    layer.score = score

    weights = np.ones((3,3,3,3))
    biases = np.zeros((1,1,3))

    convLayer = ConvLayer(layer,3,3,1)
    convLayer.weights = weights
    convLayer.biases = biases

    convLayer.delta = np.ones((3,3,3))

    convLayer.back_prop()
    print convLayer.preLayer.delta[:,:,0]
    print convLayer.preLayer.delta[:,:,1]

if __name__ == "__main__":
    test_backward_pass()
