#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import numpy as np


class FullConnectedLayer(object):
    def __init__(self, input_size, output_size, activator):
        '''
        构造函数
        :param input_size: 输入向量的维度
        :param output_size: 输出向量的维度
        :param activator: 激活函数
        '''
        self.input_size = input_size
        self.output_size = output_size
        self.activator = activator

        # 权重数组
        self.W = np.random.uniform(-0.1, 0.1, (output_size, input_size))
        # 偏置项
        self.b = np.zeros((output_size, 1))
        # 输出向量
        self.output = np.zeros((output_size, 1))

    def forward(self, input_array):
        '''
        前向计算:  a = f(W * x)
        :param input_array: 输入向量，维度必须等于 input_size
        :return:
        '''
        self.input = input_array
        print input_array
        print  self.W
        self.output = self.activator.forward(np.dot(self.W, input_array) + self.b)

    def backward(self, delta_array):
        '''
        反向计算
        :param delta_array: 从上一层传递过来的误差项
        :return:
        '''
        self.delta = self.activator.backward(self.input) * np.dot(self.W.T, delta_array)
        self.W_grad = np.dot(delta_array, self.input.T)
        self.b_grad = delta_array

    def update(self, learning_rate):
        '''
        使用梯度下降算法更新权重
        :param learning_rate: 学习率
        :return:
        '''
        self.W += learning_rate * self.W_grad
        self.b += learning_rate * self.b_grad

    def dump(self):
        print 'W: %s\nb:%s' % (self.W, self.b)
