__author__ = 'carlxie'

from layer.Layer import Layer
from layer.util import *

class FullConLayer(Layer):
    def __init__(self,preLayer,num_neurons):
        super(FullConLayer,self).__init__(preLayer, LayerType.FULL_CONNECT_TYPE,num_neurons,1,1)
        (w,h,d) = self.preLayer.output.shape
        self.input_size = w * h * d
        self.weights = 0.2*np.random.random((self.input_size,num_neurons))-0.1
        self.biases = np.zeros((num_neurons,1))
        self.output = np.empty((num_neurons,1,1))
        self.score = None
        self.w_grads = []
        self.b_grads = []

    def get_input(self):
        return self.preLayer.output.reshape((self.input_size,1))

    def forward_prop(self):
        data = self.get_input()
        self.score = self.weights.T.dot(data) + self.biases
        self.output[:,0,0] = self.score[:,0]

    def back_prop(self):
        layerType = self.preLayer.layerType
        self.preLayer.delta = self.weights.dot(self.delta)
        if layerType == LayerType.MAX_POOL_TYPE or layerType == LayerType.CONV_TYPE:
            self.preLayer.delta = self.preLayer.delta.reshape(self.preLayer.output.shape)
        self.w_grads.append(self.get_input().dot(self.delta.T))
        self.b_grads.append(self.delta)

    def update_weights(self,eta):
        self.weights -= eta * sum(self.w_grads) / len(self.w_grads)
        self.biases -= eta * sum(self.b_grads) / len(self.b_grads)
        self.w_grads = [] # clear cache
        self.b_grads = [] # clear cache

if __name__ == "__main__":
    clayer = Layer(None,LayerType.INPUT_TYPE,10,1,1)
    clayer.output = np.random.random((10,1,1))
    flayer = FullConLayer(clayer,5)
    flayer.forward_prop()
    flayer2 = FullConLayer(flayer,10)
    flayer2.forward_prop()
    flayer3 = FullConLayer(flayer2,3)
    flayer3.forward_prop()
    print flayer3.output[:,0,0]


