from implement.functions.activation.leaky_relu import LeakyReLU
from implement.functions.activation.log_softmax import LogSoftmax
from implement.functions.activation.sigmoid import Sigmoid
from implement.functions.activation.softmax import Softmax
from implement.functions.activation.tanh import Tanh
from implement.functions.activation.relu import ReLu
from implement.functions.batch_norm import BatchNorm
from implement.functions.clip import Clip
from implement.functions.conv.average_pooling import AveragePooling
from implement.functions.conv.conv2d import Conv2d
from implement.functions.conv.deconv2d import Deconv2d
from implement.functions.conv.im2col import Im2col
from implement.functions.conv.pooling import Pooling
from implement.functions.dropout import Dropout
from implement.functions.tensor_opera.broadcast_to import BroadcastTo
from implement.functions.loss.softmax_cross_entropy import SoftmaxCrossEntropy
from implement.functions.max import Max
from implement.functions.min import Min
from implement.functions.tensor_opera.get_item import GetItem
from implement.functions.layer_opera.matmul import MatMul
from implement.functions.loss.mean_squared_error import MeanSquaredError
from implement.functions.operand.add import Add
from implement.functions.operand.cos import Cos
from implement.functions.operand.div import Div
from implement.functions.operand.exp import Exp
from implement.functions.operand.log import Log
from implement.functions.operand.mul import Mul
from implement.functions.operand.neg import Neg
from implement.functions.operand.pow import Pow
from implement.functions.operand.sin import Sin
from implement.functions.operand.square import Square
from implement.functions.operand.sub import Sub
from implement.functions.tensor_opera.reshape import Reshape
from implement.functions.tensor_opera.sum_to import SumTo
from implement.functions.tensor_opera.transpose import Transpose
from implement.functions.tensor_opera.sum import Sum
from implement.functions.layer_opera.linear import Linear
import utils

# 四则运算封装
def add(x0, x1):
    return Add().add(x0, x1)


def exp(x):
    return Exp().exp(x)


def mul(x0, x1):
    return Mul().mul(x0, x1)


def square(x):
    return Square().square(x)


def neg(x):
    return Neg().neg(x)


def sub(x0, x1):
    return Sub().sub(x0, x1)


def rsub(x0, x1):
    return Sub().sub(x1, x0)


def div(x0, x1):
    return Div().div(x0, x1)


def rdiv(x0, x1):
    return Div().div(x1, x0)


def pow(x, c):
    return Pow(c).pow(x)


def sin(x):
    return Sin().sin(x)


def cos(x):
    return Cos().cos(x)


def tanh(x):
    return Tanh().tanh(x)


def reshape(x, shape):
    return Reshape(shape).reshape(x)


def transpose(x, axes=None):
    return Transpose(axes).transpose(x)


def sum(x, axis=None, keepdims=False):
    return Sum(axis, keepdims).sum(x)


def broadcast_to(x, shape):
    return BroadcastTo(shape).broadcast_to(x)


def sum_to(x, shape):
    return SumTo(shape).sum_to(x)


def mean_squared_error(x0, x1):
    return MeanSquaredError().mean_squared_error(x0, x1)


def matmul(x, W):
    return MatMul().matmul(x, W)


def linear(x, W, b=None):
    return Linear().linear(x, W, b)


def sigmoid(x):
    return Sigmoid().sigmoid(x)


def get_item(x, slices):
    return GetItem(slices).get_item(x)


def log(x):
    return Log().log(x)


def relu(x):
    return ReLu().relu(x)


def softmax(x, axis=1):
    return Softmax(axis).softmax(x)


def log_softmax(x, axis=1):
    return LogSoftmax(axis).log_softmax(x)


def leaky_relu(x, slope=0.2):
    return LeakyReLU(slope).leaky_relu(x)


def softmax_cross_entropy(x, t):
    return SoftmaxCrossEntropy().softmax_cross_entropy(x, t)


def batch_norm(x, gamma, beta, mean, var, decay=0.9, eps=2e-5):
    return BatchNorm(mean, var, decay, eps).batch_norm(x, gamma, beta)


def max(x, axis=None, keepdims=False):
    return Max(axis, keepdims).max(x)


def min(x, axis=None, keepdims=False):
    return Min(axis, keepdims).min(x)


def clip(x, x_min, x_max):
    return Clip(x_min, x_max).clip(x)


def dropout(x, dropout_ratio=0.5):
    return Dropout().dropout(x, dropout_ratio=dropout_ratio)


def conv2d(x, W, b=None, stride=1, pad=0):
    return Conv2d(stride, pad)(x, W, b)


def deconv2d(x, W, b=None, stride=1, pad=0, outsize=None):
    return Deconv2d(stride, pad, outsize)(x, W, b)


def pooling(x, kernel_size, stride=1, pad=0):
    return Pooling(kernel_size, stride, pad)(x)


def average_pooling(x, kernel_size, stride=1, pad=0):
    return AveragePooling(kernel_size, stride, pad)(x)


def im2col(x, kernel_size, stride=1, pad=0, to_matrix=True):
    y = Im2col(kernel_size, stride, pad, to_matrix)(x)
    return y



def binary_cross_entropy(p, t):
    if p.ndim != t.ndim:
        t = t.reshape(*p.shape)
    N = len(t)
    p = clip(p, 1e-15, 0.999)
    tlog_p = t * log(p) + (1 - t) * log(1 - p)
    y = -1 * sum(tlog_p) / N
    return y


def average(x, axis=None, keepdims=False):
    x = utils.common.to_variable(x)
    y = sum(x, axis, keepdims)
    return y * (y.data.size / x.data.size)


def flatten(x):
    """Flattens the input. Does not affect the batch size."""
    return reshape(x, (x.shape[0], -1))
