import numpy as np
import math
from Way import *
# 类似于广播的效果，为了保存广播后出现梯度与数值的形状不符合
def keep(a, b):
    result = a.copy()

    # 处理维度数量差异，从后往前对齐维度
    # 计算需要调整的维度数量差
    dim_diff = b.ndim - result.ndim
    if dim_diff > 0:
        # 在最左侧添加维度，补充到与b维度数量一致
        result = np.expand_dims(result, axis=tuple(range(dim_diff)))
    elif dim_diff < 0:
        # 移除最左侧维度，通过求和压缩到与b维度数量一致
        result = result.sum(axis=tuple(range(-dim_diff)), keepdims=False)

    # 处理每个维度的大小差异，从后往前遍历维度
    for axis in reversed(range(result.ndim)):
        b_shape_axis = b.shape[axis]
        result_shape_axis = result.shape[axis]
        if result_shape_axis > b_shape_axis:
            # 进行求和，压缩维度到b对应维度大小
            result = result.sum(axis=axis, keepdims=True)
        elif result_shape_axis < b_shape_axis:
            # 进行重复，扩展维度到b对应维度大小
            repeat_times = b_shape_axis // result_shape_axis
            result = np.repeat(result, repeat_times, axis=axis)

    return result
# 张量类
class Var:
    # Var类的定义
    def __init__(self, data, require_grad=False):
        # Var类的数据
        self.data = np.array(data)
        # 梯度值
        self.grad = None
        # 用与计算图
        # 左节点
        self.left = None
        # 右节点
        self.right = None
        # 是否要求梯度
        self.require_grad = require_grad
        # 符号节点
        self.fh = None
        # 维度数
        self.ndim = len(self.data.shape)

    def __repr__(self):
        return 'Var:{}'.format(self.data)

    # 求和的操作，并且进行前向传播
    def sum(self):
        temp = Var(self.data.sum(), self.require_grad)
        if self.require_grad is True:
            temp.__add(0, self, None)
            return temp

    # 对方向的求和，并且进行前向传播
    def sum_axis(self, axis=0, keep_axis=True):
        temp = Var(self.data.sum(axis, keepdims=keep_axis), self.require_grad)
        if self.require_grad is True:
            temp.__add(-1, self, Var(axis))
        return temp

    # 每次进行运算都会在结果和中间过程的值创建Var类
    # 相加操作
    def __add__(self, other):
        if isinstance(other, Var) is False: #判断加法对象是否是Var类，不是的话就创建一个Var类
            other = Var(other)
        require = self.require_grad or other.require_grad #判断是否需要梯度跟踪，即是否要求梯度
        # 创建结果Var类
        temp = Var(self.data + other.data, require_grad=require)
        if require is True:
            temp.__add(1, self, other) #如果需要跟踪梯度，则添加到计算图当中
        return temp #返回结果Var类

    def __radd__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data + other.data, require_grad=require)
        if require is True:
            temp.__add(1, other, self)
        return temp

    # 相减操作
    def __sub__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data - other.data, require_grad=require)
        if require is True:
            temp.__add(2, self, other)
        return temp

    def __rsub__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(other.data - self.data, require_grad=require)
        if require is True:
            temp.__add(2, other, self)
        return temp

    # 普通的乘法操作
    def __mul__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data * other.data, require_grad=require)
        if require is True:
            temp.__add(3, self, other)
        return temp

    def __rmul__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data * other.data, require_grad=require)
        if require is True:
            temp.__add(3, other, self)
        return temp

    # 次方操作
    def __pow__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data ** other.data, require_grad=require)
        if require is True:
            temp.__add(4, self, other)
        return temp

    def __rpow__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(other.data ** self.data, require_grad=require)
        if require is True:
            temp.__add(4, other, self)
        return temp

    # 矩阵的乘法操作
    def __matmul__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(self.data @ other.data, require_grad=require)
        if require is True:
            temp.__add(5, self, other)
        return temp

    def __rmatmul__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        temp = Var(other.data @ self.data, require_grad=require)
        if require is True:
            temp.__add(5, other, self)
        return temp

    # 除法操作
    def __truediv__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        eps = 1e-8
        temp = Var(self.data / (other.data + eps), require_grad=require)
        if require is True:
            temp.__add(7, self, other)
        return temp

    def __rtruediv__(self, other):
        if isinstance(other, Var) is False:
            other = Var(other)
        require = self.require_grad or other.require_grad
        eps = 1e-8
        temp = Var(other.data / (self.data + eps), require_grad=require)
        if require is True:
            temp.__add(7, other, self)
        return temp

    # 负号操作
    def __neg__(self):
        other = Var(0)
        require = self.require_grad or other.require_grad
        temp = Var(other.data - self.data, require_grad=require)
        if self.require_grad is True:
            temp.__add(2, other, self)
        return temp

    # 添加计算图的函数
    def __add(self, num, left, right):
        self.fh = num
        self.left = left
        self.right = right

    # log运算操作
    def log(self, num=math.e):
        eps = 1e-10
        self.data = np.clip(self.data, eps, 1 - eps)  # 确保输入在 (eps, 1-eps)
        other = Var(num)
        require = self.require_grad or other.require_grad
        temp = Var(np.log(self.data) / np.log(num), require_grad=require)
        if require is True:
            temp.__add(9, other, self)
        return temp

    # 维度交换操作
    def transpose(self, axis_1, axis_2):
        temp = Var(self.data.swapaxes(axis_1, axis_2), require_grad=True)
        if self.require_grad is True:
            temp.__add(6, self, Var([axis_1, axis_2]))
        return temp

    # sigmoid函数
    def sigmoid(self):
        return 1 / (1 + math.e ** (-self))

    # relu函数
    def relu(self):
        temp = Var(np.maximum(0, self.data), require_grad=self.require_grad)
        if self.require_grad is True:
            temp.__add(8, self, None)
        return temp

    # softmax函数
    def softmax(self, dim=0, keep_axis=True):
        a = math.e ** self
        b = a.sum_axis(dim, keep_axis)
        temp = a / b
        return temp

    # 交叉熵
    def criterion(self,labels):
        return -(labels * self.log()).sum()

    # 改变维度
    def reshape(self, shape):
        right = Var(self.data.shape)
        temp = Var(self.data.reshape(shape), require_grad=self.require_grad)
        if self.require_grad:
            temp.__add(-2, self, right)
        return temp

    # 卷积操作
    def conv2d(self,x, out_channels=1, in_channels=1,padding = 0,stride=1):
        if isinstance(x, Var) is False:
            x = Var(x)
        require = self.require_grad or x.require_grad
        temp = Var(Way.conv2d(self.data,x.data,out_channels, in_channels,padding, stride), require_grad=require)
        if require is True:
            temp.__add(10,self,x)
        return temp

    # 最大池化层操作
    def max_pool2d(self,k_h,k_w,padding = 0,stride=None):
        temp = Var(Way.pool2d(k_h,k_w,self.data,padding, stride,'max'), require_grad=self.require_grad)
        if self.require_grad is True:
            if stride is None:
                s_h, s_w = k_h, k_w
            else:
                if isinstance(stride, int):
                    s_h, s_w = stride, stride
                else:
                    s_h, s_w = stride
            right = Var([k_h,k_w,s_h,s_w,padding])
            temp.__add(11,self,right)
        return temp

    # 平均池化层操作
    def avg_pool2d(self,k_h,k_w,padding = 0,stride=None):
        temp = Var(Way.pool2d(k_h, k_w, self.data, padding, stride, 'avg'), require_grad=self.require_grad)
        if self.require_grad is True:
            if stride is None:
                s_h, s_w = k_h, k_w
            else:
                if isinstance(stride, int):
                    s_h, s_w = stride, stride
                else:
                    s_h, s_w = stride
            right = Var([k_h,k_w,s_h,s_w,padding])
            temp.__add(12, self, right)
        return temp

    # 反向传播函数
    def __diff(self):

        # 求改变维度操作的梯度
        if self.fh == -2:
            if self.left.require_grad is True:
                    self.left.grad = self.grad.reshape(self.right.data.astype(np.int32))

        # 求方向求和操作的梯度
        if self.fh == -1:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad, self.left.data)
                else:
                    self.left.grad = keep(self.grad, self.left.data)

        # 求求和操作的梯度
        if self.fh == 0:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += self.grad * np.ones(self.left.data.shape)
                else:
                    self.left.grad = self.grad * np.ones(self.left.data.shape)

        # 求加法操作的梯度
        if self.fh == 1:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad * 1, self.left.data)
                else:
                    self.left.grad = keep(self.grad * 1, self.left.data)
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.grad * 1, self.right.data)
                else:
                    self.right.grad = keep(self.grad * 1, self.right.data)

        # 求减法操作的梯度
        if self.fh == 2:
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad -= keep(self.grad * 1, self.right.data)
                else:
                    self.right.grad = -keep(self.grad * 1, self.right.data)
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad * 1, self.left.data)
                else:
                    self.left.grad = keep(self.grad * 1, self.left.data)

        # 求乘法操作的梯度
        if self.fh == 3:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad * self.right.data, self.left.data)
                else:
                    self.left.grad = keep(self.grad * self.right.data, self.left.data)
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.grad * self.left.data, self.right.data)
                else:
                    self.right.grad = keep(self.grad * self.left.data, self.left.data)

        # 求次方操作的梯度
        if self.fh == 4:
            eps = 1e-8
            safe_left = np.maximum(self.left.data, eps)
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad * self.left.data ** (self.right.data - 1) * self.right.data,
                                           self.left.data)
                else:
                    self.left.grad = keep(self.grad * self.left.data ** (self.right.data - 1) * self.right.data,
                                          self.left.data)
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.grad * (np.log(safe_left) * (self.left.data ** self.right.data)),
                                            self.right.data)
                else:
                    self.right.grad = keep(self.grad * (np.log(safe_left) * (self.left.data ** self.right.data)),
                                           self.right.data)

        # 求矩阵乘法操作的梯度
        if self.fh == 5:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad @ self.right.data.swapaxes(-1, -2), self.left.data)
                else:
                    self.left.grad = keep(self.grad @ self.right.data.swapaxes(-1, -2), self.left.data)
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.left.data.swapaxes(-1, -2) @ self.grad, self.right.data)
                else:
                    self.right.grad = keep(self.left.data.swapaxes(-1, -2) @ self.grad, self.right.data)

        # 求维度交换操作的梯度
        if self.fh == 6:
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += self.grad.swapaxes(int(self.right.data[0]), int(self.right.data[1]))
                else:
                    self.left.grad = self.grad.swapaxes(int(self.right.data[0]), int(self.right.data[1]))

        # 求除法操作的梯度
        if self.fh == 7:
            eps = 1e-8
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    self.left.grad += keep(self.grad / (self.right.data + eps), self.left.data)
                else:
                    self.left.grad = keep(self.grad / (self.right.data + eps), self.left.data)
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.grad * ((0 - self.left.data) / (self.right.data ** 2 + eps)),
                                            self.right.data)
                else:
                    self.right.grad = keep(self.grad * ((0 - self.left.data) / (self.right.data ** 2 + eps)),
                                           self.right.data)

        # 求relu操作的梯度
        if self.fh == 8:  # ReLU
            # 梯度 = 1 if x > 0 else 0
            if self.left.require_grad is True:
                if self.left.grad is not None:
                    mask = (self.left.data > 0).astype(float)
                    self.left.grad += keep(self.grad * mask, self.left.data)
                else:
                    mask = (self.left.data > 0).astype(float)
                    self.left.grad = keep(self.grad * mask, self.left.data)

        # 求log操作的梯度
        if self.fh == 9:
            if self.right.require_grad is True:
                if self.right.grad is not None:
                    self.right.grad += keep(self.grad * 1 / (self.right.data * np.log(self.left.data)), self.right.data)
                else:
                    self.right.grad = keep(self.grad * 1 / (self.right.data * np.log(self.left.data)), self.right.data)

        # 求卷积操作的梯度
        if self.fh == 10:
            if self.left.require_grad is True:
                # 深拷贝
                right = self.right.data
                grad = self.grad
                k_h, k_w = grad.shape[2:4]
                h, w = right.shape[2:4]
                h -= k_h - 1
                w -= k_w - 1
                ans = list()
                # 一样的循环遍历
                for batch in range(grad.shape[0]):
                    for one in range(grad.shape[1]):
                        for two in range(right.shape[1]):
                            for three in range(h):
                                for four in range(w):
                                    temp = right[batch, two, three:three + k_h, four:four + k_w] * grad[batch, one, :,
                                                                                                  :]
                                    ans.append(temp.sum())
                ans = np.array(ans)
                ans = ans.reshape((grad.shape[0],grad.shape[1],right.shape[1],h,w))
                if self.left.grad is not None:
                    self.left.grad += np.sum(ans,axis=0)
                else:
                    self.left.grad = np.sum(ans,axis=0)
            if self.right.require_grad is True:
                # 深拷贝
                w = np.copy(self.left.data)
                dy = np.copy(self.grad)
                k_h = w.shape[2]
                k_w = w.shape[3]
                for c_out in range(w.shape[0]):
                    # 旋转 90 度两次，达到旋转 180 度的效果
                    w[c_out] = np.rot90(w[c_out], 2, axes=(1, 2))
                # 创建一个新的数组来存储填充后的 dy
                dy_padded = np.zeros((dy.shape[0], dy.shape[1], dy.shape[2] + 2 * (k_h-1), dy.shape[3] + 2 * (k_w-1)))

                # 对每个输出通道进行填充
                for c_out in range(w.shape[0]):
                    dy_padded[:, c_out, :, :] = np.pad(dy[:, c_out, :, :], ((0, 0), ((k_h-1), (k_h-1)), ((k_w-1), (k_w-1))),
                                                       mode='constant')
                # 使用填充后的 dy 进行卷积
                h, w0 = dy_padded.shape[2:4]
                h -= k_h - 1
                w0 -= k_w - 1
                ans = list()
                # 一样的循环遍历
                for batch in range(dy_padded.shape[0]):
                    for two in range(w.shape[1]):
                        for three in range(h):
                            for four in range(w0):
                                temp = dy_padded[batch, :, three:three + k_h, four:four + k_w] * w[:, two, :,
                                                                                               :]
                                ans.append(temp.sum())
                ans = np.array(ans)
                ans = ans.reshape((dy_padded.shape[0], w.shape[1], h, w0))

                # 更新梯度
                if self.right.grad is not None:
                    self.right.grad += ans
                else:
                    self.right.grad = ans

        # 求池化操作的梯度
        # 最大池化层
        if self.fh == 11:
            if self.left.require_grad is True:
                grad = np.zeros(self.left.data.shape)
                right = self.right.data
                k_h, k_w,s_h,s_w,padding = right
                h, w = self.left.data.shape[2:4]
                h = (h - k_h) // s_h + 1
                w = (w - k_w) // s_w + 1
                # 循环求卷积
                for batch in range(self.left.data.shape[0]):
                    for one in range(self.left.data.shape[1]):
                        for three in range(h):
                            for four in range(w):
                                temp = self.left.data[batch, one, three * s_h:three * s_h + k_h, four * s_w:four * s_w + k_w]
                                ans = np.argmax(temp)
                                ans = np.unravel_index(ans, temp.shape)
                                grad[batch, one, three * s_h:three * s_h + k_h,four * s_w:four * s_w + k_w][ans] = self.grad[batch, one, three ,four]
                if self.left.grad is None:
                    self.left.grad = grad
                else:
                    self.left.grad += grad

        # 平均池化层
        if self.fh == 12:
            if self.left.require_grad is True:
                if self.left.grad is None:
                    self.left.grad = np.zeros(self.left.data.shape)
                    right = self.right.data
                    k_h, k_w,s_h,s_w,padding = right
                    h, w = self.left.data.shape[2:4]
                    h = int((h - k_h) // s_h + 1)
                    w = int((w - k_w) // s_w + 1)
                    # 循环求卷积
                    for batch in range(self.left.data.shape[0]):
                        for one in range(self.left.data.shape[1]):
                            for three in range(h):
                                for four in range(w):
                                    ans = self.grad[batch, one]/(k_h*k_w)
                                    self.left.grad[batch, one, three * s_h:three * s_h + k_h,four * s_w:four * s_w + k_w] = ans

    # 反向传播
    def backward(self, save_graph=False):
        it = self
        it.grad = np.ones(self.data.shape)
        # 清除计算图
        def delete(it):
            if it.left is None and it.right is None:
                return
            else:
                if it.left is not None and it.left.require_grad is True:
                    delete(it.left)
                if it.right is not None and it.right.require_grad is True:
                    delete(it.right)
                it.grad = None
                it.left = None
                it.right = None
                if it.fh == 10:
                    it.out_channels = None
                    it.in_channels = None
                it.fh = None
        # 进行反向传播
        def back(it):
            if it.left is None and it.right is None:
                return
            else:
                it.__diff()
                if it.left is not None and it.left.require_grad is True:
                    back(it.left)
                if it.right is not None and it.right.require_grad is True:
                    back(it.right)
                it.grad = 0
        back(it)

    # 前向传播，不是主要内容（弃用）
    # def forward(self):
    #     it = self
    #     def forward_(it):
    #         if it.left is None and it.right is None :
    #             return it
    #         else:
    #             if it.fh == -1:
    #                 it = forward_(it.left).sum(it.right.data)
    #             if it.fh == 0:
    #                 it = forward_(it.left).sum()
    #             if it.fh == 1:
    #                 it = forward_(it.left)+forward_(it.right)
    #             if it.fh == 2:
    #                 it = forward_(it.left)-forward_(it.right)
    #             if it.fh == 3:
    #                 it = forward_(it.left)*forward_(it.right)
    #             if it.fh == 4:
    #                 it = forward_(it.left)**forward_(it.right)
    #             if it.fh == 5:
    #                 it = forward_(it.left)@forward_(it.right)
    #             if it.fh == 6:
    #                 it = forward_(it.left).swapaxes(-1,-2)
    #             if it.fh == 7:
    #                 it = forward_(it.left)/forward_(it.right)
    #             if it.fh == 8:
    #                 temp_data = forward_(it.left)
    #                 mask = (temp_data > 0).astype(float)
    #                 it = mask
    #             if it.fh == 9:
    #                 right = forward_(it.right)
    #                 left = forward_(it.left)
    #                 temp_data = np.log(right)/np.log(left)
    #                 it = temp_data
    #             if it.fh == 10:
    #                 right = forward_(it.right)
    #                 left = forward_(it.left)
    #                 temp_data = left.conv2d(right,self.out_channels,self.in_channels)
    #                 it = temp_data
    #             return it.data
    #     return forward_(it)



