import numpy as np
import utils.Tensor
from optimizor import numerical_gradient

"""
all activation function
"""


def relu(inputs):
    return utils.Tensor(list(map(lambda x: (x > 0) * x, inputs)))


def softmax(inputs):
    sumE = sum(list(map(lambda x: np.exp(x), inputs)))
    return utils.Tensor(list(map(lambda x: np.exp(x) / sumE, inputs)))


def sigmod(inputs):
    same_one = np.full((1, len(inputs)), 1)
    re = same_one / (same_one + list(map(lambda x: np.exp(-x), inputs)))
    return utils.Tensor(re)

def grad_relu(inputs):
    return utils.Tensor(list(map(lambda x: (x > 0)*1, inputs)))

def grad_softmax(inputs):
    print(numerical_gradient(softmax, inputs))
    return numerical_gradient(softmax, inputs)


def grad_sigmod(inputs):
    same_one = np.full((1, len(inputs)), 1)
    front = sigmod(inputs)
    return front * (same_one - front)
