import numpy as np
import nn
import utils
from optimizor.Optimizor import numerical_gradient

"""
basic Dense layer in one dimension
"""


class Dense:
    def __init__(self, input_channel, output_channel, activation=lambda x: x):
        self.input = None
        self.activation = activation
        self.input_channel = input_channel
        self.output_channel = output_channel
        self.weight = 2*np.random.random((input_channel, output_channel)) - 1
        self.bias = utils.Tensor(np.full((1, output_channel), 0))

    def __call__(self, inputs):
        self.__check_input_size(inputs)
        inputs = self.__check_inputs_class(inputs)
        self.input = inputs
        self.output = self.activation(self.input.matmul(self.weight)) + self.bias
        return utils.Tensor(self.output[0])

    def __check_input_size(self, inputs):
        if len(inputs) != self.input_channel:
            raise ValueError("Your inputs length " + str(len(inputs)) + " should be equal to your input_channel " + str(
                self.input_channel) + ".")

    @staticmethod
    def __check_inputs_class(inputs):
        if isinstance(inputs, utils.Tensor):
            return inputs
        else:
            return utils.Tensor(inputs)

    def front_weight(self, weight):
        return self.activation(self.input.matmul(weight)) + self.bias

    def grad_input(self, startgrad):
        grad = startgrad
        if self.activation == nn.sigmod:
            grad *= nn.grad_sigmod(self.input.matmul(self.weight))
        elif self.activation == nn.relu:
            grad *= nn.grad_relu(self.input.matmul(self.weight))
        return grad.matmul(self.weight.transpose())

    def grad_weight(self, startgrad):
        grad = startgrad
        if self.activation == nn.sigmod:
            grad *= nn.grad_sigmod(self.input.matmul(self.weight))
        elif self.activation == nn.relu:
            grad *= nn.grad_relu(self.input.matmul(self.weight))
        return self.input.reshape((1, len(self.input))).transpose().matmul(grad)


