r"""
各种接口,numpy实现,在Pytorch中是调用torch._C
linear,conv,rnn
relu,tanh, softmax
loss
pad, dropout, pool, norm, 
"""
import numpy as np

from autograd import Tensor
from autograd import functions as _vf

r"""# _vf 相当于pytorch中的底层实现, 此处通过python函数进行包装, 可以提供给nn.module,也可以单独使用,相当于pytorch中的nn.Conv2d 和 F.conv2d
"""
#常用基础网络层

#线性函数

def linear(input, weight, bias=None):
    output = input @ weight.T
    if bias is not None:
        ret = output + bias
        return ret
    return output

#卷积函数

def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
    pass

def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
    N, C, H, W = input.shape
    fn, fc, fh, fw = weight.shape
    oh = (H - fh)//stride + 1
    ow = (W - fw)//stride + 1
    if padding != 0:
        input = _vf.Pad2D()(input, padding)
    col_in = _vf.Im2Col()(input, filter_shape=(fh,fw), stride=stride)
    col_w = weight.reshape([fn,-1])
    
    col_out = col_in @ col_w.T + bias
    out = col_out.reshape([fn,-1,oh,ow])

    return out

def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
    pass


def im2col(input, fh, fw, stride):
    #input是做过pad之后送进来的
    #input.shape=(N,H,W) -> output.shape=(N,oh,ow,fh,fw) -> (N * oh * ow, fh * fw)
    #input.shape=(N,C,H,W) -> output.shape=(N,C,oh,ow,fh,fw) -> (N * oh * ow, C * fh * fw)
    N, C, H, W = input.shape
    oh = (H - fh)//stride + 1
    ow = (W - fw)//stride + 1

    col = np.zeros([N, C, oh, ow, fh, fw])

    for h in range(oh):
        for w in range(ow):
            col[:, :, h, w, :, :] = input[:, :, h*stride:h*stride+fh, w*stride:w*stride+fw]
    
    col = col.transpose(0,2,3,1,4,5).reshape(N*oh*ow, -1)
    return col

def col2im(col, im, fh, fw, stride):
    N, C, H, W = im.shape
    oh = (H - fh)//stride + 1
    ow = (W - fw)//stride + 1    
    #grad的shape是矩阵计算时的shape,需要先复原
    #(N * oh * ow, C * fh * fw) -> (N,oh,ow,C,fh,fw) -> (N,C,oh,ow,fh,fw)
    #(N,C,oh,ow,fh,fw)-> im.shape=(N,C,H,W) 梯度会累加
    col = col.reshape(N, oh, ow, C, fh, fw).transpose(0, 3, 1, 2, 4, 5)
    #grad_im = np.zeros([N, C, H + stride - 1, W + stride - 1])
    for h in range(oh):
        for w in range(ow):
            im[:,:,h*stride:h*stride+fh, w*stride:w*stride+fw] += col[:,:,h,w,:,:]
    
    return im

#循环神经网络

def rnn_tanh_cell(input, hx, weight_ih, weight_hh, bias):
    output = tanh(input @ weight_ih.T + hx @ weight_hh.T + bias)

    return output

def lstm_cell(input, hx, c, weight_ih, weight_hh, bias):
    _, H = hx.shape
    out = input @ weight_ih.T + hx @ weight_hh.T + bias
    
    #slice
    f_ = out[:, :H]
    g_ = out[:, H:2*H]
    i_ = out[:, 2*H:3*H]
    o_ = out[:, 3*H:]

    f = sigmoid(f_)
    g = tanh(g_)
    i = sigmoid(i_)
    o = sigmoid(o_)

    c_next = f * c + g * i
    h_next = o * tanh(c_next)

    return h_next, c_next

#池化函数

def avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
    pass


#激活函数

def relu(input: Tensor, inplace: bool = False) -> Tensor:
    r"""relu(input, inplace=False) -> Tensor

    Applies the rectified linear unit function element-wise. See
    :class:`~torch.nn.ReLU` for more details.
    """
    return _vf.Relu()(input)

def sigmoid(input: Tensor) -> Tensor:
    r"""sigmoid
    """
    return _vf.Sigmoid()(input)

def tanh(input: Tensor) -> Tensor:
    r"""tanh
    """
    return _vf.Tanh()(input)

def softmax(input: Tensor, dim):
    pass

def log_softmax():
    pass

#损失函数

def nll_loss(input, target, reduction='mean'):
    dim = input.dim()
    if dim < 2:
        raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
    if input.shape[0] != target.shape[0]:
        raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).'.format(input.shape[0], target.shape[0]))
    ret = input[np.arange(target.shape[0]), target.data]
    if reduction == 'none':
        return ret
    elif reduction == 'mean':
        return ret.mean()
    elif reduction == 'sum':
        return ret.sum()
    else:
        raise ValueError("{} is not a valid value for reduction".format(reduction))     

def cross_entropy(input, target, reduction='mean'):
    r""" warp of celoss
    """
    return _vf.CrossEntropyLoss()(input, target)

