r'''定义需要手写反向传播的函数,这些函数继承于Function

'''
import numpy as np

#from autograd.tensor import Tensor
import autograd.tensor as tensor

class Function:
    def __init__(self):
        self.saved_tensors = []

    def save_for_backward(self, *x):
        self.saved_tensors.extend(x)

    def forward(self, *args, **kwargs):
        raise NotImplementedError("You must implement the forward function for custom Function.")

    def backward(self, *args, **kwargs):
        raise NotImplementedError("You must implement the forward function for custom Function.")

    def __call__(self, *ts, **kwargs):
        data = self.forward(*[t.data for t in ts], **kwargs)  #把ndarray取出后送入forward
        requires_grad = any([t.requires_grad for t in ts])
        outputs = tensor.Tensor(data, requires_grad=requires_grad, grad_fn=self) 

        self.inputs = ts
        self.outputs = outputs  #self.outputs 应该是outputs的一个引用,保证output更新时,self.outputs也能够更新

        
        return outputs


# 激活层
class Relu(Function):
    def forward(self, x):
        self.save_for_backward(x)
        return np.maximum(x, 0)

    def backward(self, grad):
        x, = self.saved_tensors
        return grad * (x > 0)

class Sigmoid(Function):
    def forward(self, x):
        out = np.array(1 / (1 + np.exp(-x)))
        self.save_for_backward(out)
        return out

    def backward(self, grad):
        out, = self.saved_tensors
        return (out - out * out)

class Tanh(Function):
    def forward(self, x):
        out = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
        self.save_for_backward(out)
        return out 

    def backward(self, grad):
        out, = self.saved_tensors
        return grad * (1 - out**2)

class Softmax(Function):
    def forward(self, x, axis):
        b = x.max(axis=axis, keepdims=True)
        y = (x - b).exp()
        return y / y.sum(axis=axis, keepdims=True)

    def backward(self, grad):
        pass

class LogSoftmax(Function):
    pass

#im2col
class Im2Col(Function):
    def forward(self, input, filter_shape, stride):
        #input是做过pad之后送进来的
        #input.shape=(N,H,W) -> output.shape=(N,oh,ow,fh,fw) -> (N * oh * ow, fh * fw)
        #input.shape=(N,C,H,W) -> output.shape=(N,C,oh,ow,fh,fw) -> (N * oh * ow, C * fh * fw)
        N, C, H, W = input.shape
        (fh,fw) = filter_shape
        oh = (H - fh)//stride + 1
        ow = (W - fw)//stride + 1

        col = np.zeros([N, C, oh, ow, fh, fw])

        for h in range(oh):
            for w in range(ow):
                col[:, :, h, w, :, :] = input[:, :, h*stride:h*stride+fh, w*stride:w*stride+fw]
        
        col = col.transpose(0,2,3,1,4,5).reshape(N*oh*ow, -1)

        self.save_for_backward(input.shape, fh, fw, stride, oh, ow)
        
        return col

    def backward(self, grad):
        (N, C, H, W), fh, fw, stride, oh, ow = self.saved_tensors
        #grad的shape是矩阵计算时的shape,需要先复原
        #(N * oh * ow, C * fh * fw) -> (N,oh,ow,C,fh,fw) -> (N,C,oh,ow,fh,fw)
        #(N,C,oh,ow,fh,fw)-> im.shape=(N,C,H,W) 梯度会累加
        grad = grad.reshape(N, oh, ow, C, fh, fw).transpose(0, 3, 1, 2, 4, 5)
        grad_im = np.zeros([N, C, H + stride - 1, W + stride - 1])
        for h in range(oh):
            for w in range(ow):
                grad_im[:,:,h*stride:h*stride+fh, w*stride:w*stride+fw] += grad[:,:,h,w,:,:]
        
        return grad_im

#padding

class Pad2D(Function):
    def forward(self, x, padding=0):
        self.save_for_backward(padding)
        x_p = np.pad(x,((0, 0),(0, 0),(padding, padding), (padding, padding)))

        return x_p

    def backward(self, grad):
        padding = self.saved_tensors
        (_, _, H, W) = grad.shape
        return grad[:,:,padding:H-padding,padding:W-padding]

#pooling

class MaxPool2d(Function):
    def forward(self, input, kernel_size, stride):
        N, C, H, W = input.shape
        ph, pw = kernel_size, kernel_size
        oh = (H - ph)//stride + 1
        ow = (W - pw)//stride + 1

        self.save_for_backward(input.shape, ph, pw, stride, oh, ow, input)
        out = np.zeros([N, C, oh, ow, ph, pw])
        for h in range(oh):
            for w in range(ow):
                out[:, :, h, w] = input[:, :, h*stride:h*stride+ph, w*stride:w*stride+pw].max(axis=(-2,-1))

        return out

    def backward(self, grad):
        (N, C, H, W), ph, pw, stride, oh, ow, input = self.saved_tensors
        grad_im = np.zeros([N, C, H, W])
        for h in range(oh):
            for w in range(ow):
                arg_max = input[:, :, h*stride:h*stride+ph, w*stride:w*stride+pw].reshape(-1,ph*pw).argmax(axis=-1).flatten()
                grad_flatten = grad_im[:,:,h*stride:h*stride+ph, w*stride:w*stride+pw].reshape(N*C, ph*pw)
                grad_flatten[np.arange(N*C),arg_max] = grad[:,:,h,w].flatten()
                grad_im[:,:,h*stride:h*stride+ph, w*stride:w*stride+pw] += grad_flatten.reshape(N, C, ph, pw)

        return grad_im

# loss

def softmax(x):
    return np.exp(x) / np.sum(np.exp(x), axis=-1, keepdims=True)

def softmax_1(x, axis=-1):
    b = x.max(axis=axis, keepdims=True)
    y = (x - b).exp()
    return y / y.sum(axis=axis, keepdims=True)


class CrossEntropyLoss(Function):
    r"""
    shape:
        - Input : Shape (N, C)
        - Target: shape (N)
    """
    def forward(self, input, target):
        z = softmax(input) # z.shape = (N, C)
        self.save_for_backward(z, target)
        loss = -np.log(z[np.arange(z.shape[0]), target]).mean()
        return loss

    def backward(self, grad):
        z, target = self.saved_tensors
        z[np.arange(z.shape[0]), target] -= 1

        return grad * z / z.shape[0]


# utils

def get_enum(reduction):
    # type: (str) -> int
    if reduction == 'none':
        ret = 0
    elif reduction == 'mean':
        ret = 1
    elif reduction == 'sum':
        ret = 2
    else:
        ret = -1  
        raise ValueError("{} is not a valid value for reduction".format(reduction))
    return ret
