import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable,Function

b = np.arange(12).reshape((2,2,3))
a = np.arange(12).reshape((2,2,3))
np.random.shuffle(b)
np.random.shuffle(a)
a = torch.from_numpy(a)
a=a.float()
b = torch.from_numpy(b)
b=b.float()


#####################################
#########    torch 第一种 ###############
##### 使用tensor的math operation实现loss定义;nn.Module还要维护一些其他变量和状态 ###
class Smoothl1Loss_v1(nn.Module):
    def __init__(self,alpha):
        super().__init__()
        self.alpha = alpha
    def forward(self, ypre,ytrue):
        """
        :param ypre:[Batch,anchor,4]
        :param ytrue: [Batch,anchor,4]
        """
        box_diff:torch.FloatTensor = ypre - ytrue # [b,a,4]
        abd = torch.abs(box_diff)
        mask = abd < 1
        mask = mask.float()
        in_loss = torch.pow(box_diff,2)*0.5*mask+(abd - 0.5)*(1-mask)
        out_loss = self.alpha * in_loss
        loss = torch.mean(torch.sum(torch.sum(out_loss,dim=2),dim=1))
        return loss
######### 自定义函数实现  #############
# 2. 直接定义函数 ， 不需要维护参数，梯度等信息
# 注意所有的数学操作需要使用tensor完成
def my_mse_loss(x, y):
    return torch.mean(torch.pow((x - y), 2))
#################################
###### nn.autograd.function #########
## 扩展 torch.autograd ;自定义层的实现（甚至不可导operation的backward写法
# 自己实现backward和forward函数，可能一些算法nn.functional中没有提供，要使用numpy或scipy中的方法实现

class LinearFunction(Function):
    # 创建torch.autograd.Function类的一个子类
    # 必须是staticmethod
    @staticmethod
    # 第一个是ctx，第二个是input，其他是可选参数。
    # ctx在这里类似self，ctx的属性可以在backward中调用。
    # 自己定义的Function中的forward()方法，所有的Variable参数将会转成tensor！因此这里的input也是tensor．在传入forward前，autograd engine会自动将Variable unpack成Tensor。
    def forward(ctx, input, weight, bias=None):
        print(type(input))
        ctx.save_for_backward(input, weight, bias)  # 将Tensor转变为Variable保存到ctx中
        output = input.mm(weight.t())  # torch.t()方法，对2D tensor进行转置
        if bias is not None:
            output += bias.unsqueeze(0).expand_as(output) # unsqueeze(0) 扩展处第0维
            # expand_as(tensor)等价于expand(tensor.size()), 将原tensor按照新的size进行扩展
        return output

    @staticmethod
    def backward(ctx, grad_output):
        # grad_output为反向传播上一级计算得到的梯度值
        input, weight, bias = ctx.saved_variables
        grad_input = grad_weight = grad_bias = None
        # 分别代表输入,权值,偏置三者的梯度
        # 判断三者对应的Variable是否需要进行反向求导计算梯度
        if ctx.needs_input_grad[0]:
            grad_input = grad_output.mm(weight) # 复合函数求导，链式法则
        if ctx.needs_input_grad[1]:
            grad_weight = grad_output.t().mm(grad_input) # 复合函数求导，链式法则
        if bias is not None and ctx.needs_input_grad[2]:
            grad_bias = grad_output.sum(0).squeeze(0)
        return grad_input, grad_weight, grad_bias
        
class Linear(nn.Module):
    def __init__(self, input_features, output_features, bias=True):
        super(Linear, self).__init__()
        self.input_features = input_features
        self.output_features = output_features
        # nn.Parameter is a special kind of Variable, that will get
        # automatically registered as Module's parameter once it's assigned
        # 这个很重要！ Parameters是默认需要梯度的！
        self.weight = nn.Parameter(torch.Tensor(output_features, input_features))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(output_features))
        else:
            # You should always register all possible parameters, but the
            # optional ones can be None if you want.
            self.register_parameter('bias', None)
        # Not a very smart way to initialize weights
        self.weight.data.uniform_(-0.1, 0.1)
        if bias is not None:
            self.bias.data.uniform_(-0.1, 0.1)
    def forward(self, input):
        # See the autograd section for explanation of what happens here.
        return LinearFunction.apply(input, self.weight, self.bias)

#建议把新操作封装在一个函数中
def linear(input, weight, bias=None):
    # First braces create a Function object. Any arguments given here
    # will be passed to __init__. Second braces will invoke the __call__
    # operator, that will then use forward() to compute the result and
    # return it.
    return LinearFunction()(input, weight, bias)#调用forward()
# 或者使用apply方法对自己定义的方法取个别名
# linear = LinearFunction.apply

# #检查实现的backward()是否正确
# from torch.autograd import gradcheck
# # gradchek takes a tuple of tensor as input, check if your gradient
# # evaluated with these tensors are close enough to numerical
# # approximations and returns True if they all verify this condition.
# input = (Variable(torch.randn(20,20).double(), requires_grad=True),)
# test = gradcheck(LinearFunction(), input, eps=1e-6, atol=1e-4)
# print(test)  #　没问题的话输出True

class MyReLU(torch.autograd.Function):
    '''
    # We can implement our own custom autograd Functions by subclassing
    # torch.autograd.Function and implementing the forward and backward passes
    # which operate on Tensors.
    '''

    @staticmethod
    def forward(ctx, input):
        '''
        # 作该操作的前向计算
        '''
        # ctx.save_for_backward(input)
        return input.clamp(min=0)

    @staticmethod
    def backward(ctx, grad_output):
        '''
        # In the backward pass we receive a Tensor containing the gradient of the loss
        # with respect to the output, and we need to compute the gradient of the loss
        # with respect to the input.
        '''
        # print(grad_output)
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        grad_input[input < 0] = 0
        return grad_input


dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU

# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 2, 10, 5, 2

# Create random Tensors to hold input and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)

# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(D_in,H).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)

learning_rate = 1e-6
for t in range(2):
    # To apply our Function, we use Function.apply method. We alias this as 'relu'.
    relu = MyReLU.apply

    # Forward pass: compute predicted y using operations on Variables; we compute
    # ReLU using our custom autograd operation.
    mid = x.mm(w1)
    midr = relu(mid)
    y_pred = midr.mm(w2)

    # Compute and print loss
    loss = (y_pred - y).pow(2).sum()
    # print(t, loss.item())

    # Use autograd to compute the backward pass.
    loss.backward()

    # Update weights using gradient descent
    w1.data -= learning_rate * w1.grad.data
    w2.data -= learning_rate * w2.grad.data

    # Manually zero the gradients after updating weights
    w1.grad.data.zero_()
    w2.grad.data.zero_()

class Mymm(Function):
    @staticmethod
    def forward(ctx,input):
        ctx.save_for_backward(input)  # 存储反向传播需要的值
        # ctx.input = input
        return input*input

    @staticmethod
    def backward(ctx:Function,grad_outputs):# REW:grad_output 前向传播下一个操作返回的导数值
        print('x',grad_outputs)
        # input = ctx.input
        input, = ctx.saved_tensors
        # print(z)
        return 2*input