import numpy as np
from typing import List, Tuple, Union, Optional, Any, Set, Callable
import  weakref

class Tensor:
    """
    张量类，类似于PyTorch的torch.Tensor
    """
    def __init__(self, data: Union[np.ndarray, List, float, int, 'Tensor'], 
                 requires_grad: bool = False, 
                 _children: Tuple['Tensor', ...] = (), 
                 _op: str = ''):
        """
        初始化张量
        
        Args:
            data: 可以是列表、NumPy数组或标量值
            requires_grad: 是否需要计算梯度
            _children: 创建此张量的上一个张量（用于构建计算图）
            _op: 创建此张量的操作名称
        """
        if isinstance(data, Tensor):
            self.data = data.data
        elif isinstance(data, np.ndarray):
            self.data = data
        else:
            self.data = np.array(data, dtype=np.float32)
        
        self.requires_grad = requires_grad
        self.grad: Optional[np.ndarray] = None
        self._backward: Callable[[], None] = lambda: None  # 反向传播函数
        self._prev: Set['Tensor'] = set(_children)  # 前向节点集合（计算图）
        self._op: str = _op  # 操作名称，用于调试
        
    def __repr__(self) -> str:
        return f"Tensor({self.data}, requires_grad={self.requires_grad})"
    
    @property
    def shape(self) -> Tuple[int, ...]:
        """获取张量形状"""
        return self.data.shape
    
    @property
    def dtype(self) -> np.dtype:
        """获取数据类型"""
        return self.data.dtype
    
    @property
    def T(self) -> 'Tensor':
        """矩阵转置"""
        return transpose(self)
    
    def backward(self, grad: Optional[np.ndarray] = None) -> None:
        """
        计算梯度
        
        Args:
            grad: 外部传入的梯度，默认为None
        """
        if not self.requires_grad:
            return
            
        if grad is None:
            if np.prod(self.shape) == 1:  # 如果是标量，默认梯度为1
                grad = np.ones_like(self.data)
            else:
                raise RuntimeError("非标量张量的backward需要指定梯度")
        
        # 设置或累加梯度
        if self.grad is None:
            self.grad = np.zeros_like(self.data)
        self.grad += grad
        
        # 构建拓扑排序
        topo: List['Tensor'] = []
        visited: Set['Tensor'] = set()
        
        def build_topo(v: 'Tensor') -> None:
            if v not in visited:
                visited.add(v)
                for child in v._prev:
                    build_topo(child)
                topo.append(v)
        
        build_topo(self)
        
        # 反向传播
        for node in reversed(topo):
            node._backward()
        
        # 清理计算图（防止内存泄漏）
        for node in topo:
            node._backward = lambda: None
            node._prev.clear()
    
    # 基本运算
    def __add__(self, other: Union['Tensor', int, float]) -> 'Tensor':
        """张量加法"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        return add(self, other)
    
    def __mul__(self, other: Union['Tensor', int, float]) -> 'Tensor':
        """张量乘法"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        return multiply(self, other)
    
    def __pow__(self, power: Union[int, float]) -> 'Tensor':
        """张量幂运算"""
        return power_tensor(self, power)
    
    def sum(self, dim: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> 'Tensor':
        """求和操作，可指定维度"""
        return sum_tensor(self, dim, keepdims)
    
    def mean(self) -> 'Tensor':
        """求均值操作"""
        return mean_tensor(self)
    
    def reshape(self, *shape: int) -> 'Tensor':
        shape = tuple(shape)
        return reshape_tensor(self, shape)
    
    def argmax(self):
        # 假设 self 是个一维 Tensor，返回最大值的索引（整数，不是 Tensor）
        return int(self.data.argmax())

    def max(self, dim: Optional[int] = None, keepdims: bool = False) -> 'Tensor':
        """最大值计算"""
        if dim is None:
            return Tensor(np.max(self.data))
        else:
            return Tensor(np.max(self.data, axis=dim, keepdims=keepdims))
        
    # 以下方法可根据需要实现
    def __sub__(self, other: Union['Tensor', int, float]) -> 'Tensor':
        """张量减法"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        return subtract(self, other)
    
    def __truediv__(self, other: Union['Tensor', int, float]) -> 'Tensor':
        """张量除法"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        return divide(self, other)
    
    def __matmul__(self, other: 'Tensor') -> 'Tensor':
        """矩阵乘法"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        return matmul(self, other)
    
    def __getitem__(self, idx: Union[int, slice, Tuple]) -> 'Tensor':
        """索引操作"""
        result = Tensor(self.data[idx], 
                     requires_grad=self.requires_grad,
                     _children=(self,) if self.requires_grad else (),
                     _op='getitem')
        
        if self.requires_grad:
            def _backward() -> None:
                if self.grad is None:
                    self.grad = np.zeros_like(self.data)
                # 将梯度放回正确的位置
                grad_placeholder = np.zeros_like(self.data)
                grad_placeholder[idx] = result.grad
                self.grad += grad_placeholder
            
            result._backward = _backward
            
        return result

    def exp(self) -> 'Tensor':
        """指数运算 e^x"""
        return exp_tensor(self)

    def log(self) -> 'Tensor':
        """对数运算 log(x)"""
        return log_tensor(self)

    def __neg__(self) -> 'Tensor':
        """张量负号运算"""
        return negate(self)
    
    def pad(self, pad_width: tuple) -> 'Tensor':
        """
        张量的 zero padding 操作，支持反向传播。

        Args:
            pad_width: 类似 numpy.pad 的格式，例如 ((0, 0), (0, 0), (1, 1), (1, 1))
        
        Returns:
            Padding 后的新 Tensor
        """
        padded_data = np.pad(self.data, pad_width, mode='constant')
        requires_grad = self.requires_grad

        out = Tensor(padded_data, requires_grad=requires_grad,
                    _children=(self,) if requires_grad else (),
                    _op='pad')

        if requires_grad:
            def _backward():
                grad = out.grad
                if self.grad is None:
                    self.grad = np.zeros_like(self.data)

                slices = []
                for (before, after), dim_len in zip(pad_width, self.shape):
                    start = before
                    end = start + dim_len
                    slices.append(slice(start, end))

                grad_input = grad[tuple(slices)]
                self.grad += grad_input

            out._backward = _backward

        return out
    
    @staticmethod
    def stack(tensors: List['Tensor'], dim: int = 0) -> 'Tensor':
        """
        将一组形状相同的 Tensor 沿指定维度堆叠成一个新的 Tensor（增加一个新维度）

        Args:
            tensors: Tensor 列表，每个 Tensor 的 shape 必须相同
            dim: 堆叠的维度（新维度插入的位置）

        Returns:
            新的 Tensor，shape = [..., len(tensors), ...]，在第 dim 维插入
        """
        assert all(isinstance(t, Tensor) for t in tensors), "只能堆叠 Tensor 类型"
        assert len(tensors) > 0, "至少需要一个 tensor"

        base_shape = tensors[0].data.shape
        for t in tensors:
            assert t.data.shape == base_shape, f"所有 tensor 的 shape 必须相同，发现不一致：{t.data.shape} vs {base_shape}"

        ndim = len(base_shape) + 1  # 堆叠后维度增加 1
        assert -ndim <= dim < ndim, f"dim 越界，期望范围 [{-ndim}, {ndim - 1}]，但得到 {dim}"
        if dim < 0:
            dim += ndim

        # 将每个 tensor 插入一个维度
        data_list = [np.expand_dims(t.data, axis=dim) for t in tensors]
        stacked_data = np.concatenate(data_list, axis=dim)

        requires_grad = any(t.requires_grad for t in tensors)
        out = Tensor(stacked_data, requires_grad=requires_grad, _children=set(tensors), _op='stack')

        if requires_grad:
            def _backward():
                if out.grad is None:
                    return
                for i, t in enumerate(tensors):
                    if t.requires_grad:
                        grad_i = np.take(out.grad, i, axis=dim)
                        if t.grad is None:
                            t.grad = grad_i
                        else:
                            t.grad = t.grad + grad_i  # 避免 in-place 操作

            out._backward = _backward

        return out
    
    def transpose(self, dim1: int, dim2: int) -> 'Tensor':
        assert 0 <= dim1 < self.data.ndim and 0 <= dim2 < self.data.ndim, "维度越界"
        new_data = self.data.swapaxes(dim1, dim2)
        
        out = Tensor(new_data,
                    requires_grad=self.requires_grad,
                    _children=(self,) if self.requires_grad else (),
                    _op='transpose')
        
        if self.requires_grad:
            def _backward():
                if out.grad is None:
                    return
                grad_transposed = out.grad.swapaxes(dim1, dim2)
                self.grad = self.grad + grad_transposed if self.grad is not None else grad_transposed
            out._backward = _backward

        return out


# 张量运算函数
def add(a: Tensor, b: Tensor) -> Tensor:
    """
    张量加法
    
    Args:
        a, b: 张量
        
    Returns:
        新的张量，内容为a+b
    """
    result = Tensor(a.data + b.data, 
                  requires_grad=(a.requires_grad or b.requires_grad),
                  _children=(a, b) if (a.requires_grad or b.requires_grad) else (),
                  _op='+')
    
    if a.requires_grad or b.requires_grad:        
        def _backward() -> None:
            if result.grad is None:
                return

            if a.requires_grad:
                if a.grad is None:
                    a.grad = np.zeros_like(a.data)
                a.grad += unbroadcast(result.grad, a.shape)

            if b.requires_grad:
                if b.grad is None:
                    b.grad = np.zeros_like(b.data)
                b.grad += unbroadcast(result.grad, b.shape)
        
        result._backward = _backward
    
    return result

def unbroadcast(grad: np.ndarray, shape: Tuple[int, ...]) -> np.ndarray:
    """
    将梯度从广播后的形状还原到原始张量形状
    """
    while len(grad.shape) > len(shape):
        grad = np.sum(grad, axis=0)
    for i, (s_orig, s_grad) in enumerate(zip(shape, grad.shape)):
        if s_orig == 1 and s_grad > 1:
            grad = np.sum(grad, axis=i, keepdims=True)
    return grad


def multiply(a: Tensor, b: Tensor) -> Tensor:
    """
    张量乘法（逐元素）
    
    Args:
        a, b: 张量
        
    Returns:
        新的张量，内容为a*b
    """
    result = Tensor(a.data * b.data,
                  requires_grad=(a.requires_grad or b.requires_grad),
                  _children=(a, b) if (a.requires_grad or b.requires_grad) else (),
                  _op='*')
    
    if a.requires_grad or b.requires_grad:        
        def _backward() -> None:
            if a.requires_grad:
                if a.grad is None:
                    a.grad = np.zeros_like(a.data)
                
                # 处理广播情况
                grad_a = b.data * result.grad
                if a.shape != result.shape:
                    # 计算需要求和的维度
                    ndims_added = len(result.shape) - len(a.shape)
                    if ndims_added > 0:
                        # 对前面添加的维度求和
                        grad_a = np.sum(grad_a, axis=tuple(range(ndims_added)))
                    
                    # 对广播的维度求和
                    for i, (dim_a, dim_result) in enumerate(zip(a.shape, grad_a.shape)):
                        if dim_a == 1 and dim_result > 1:
                            grad_a = np.sum(grad_a, axis=i, keepdims=True)
                
                a.grad += grad_a
                
            if b.requires_grad:
                if b.grad is None:
                    b.grad = np.zeros_like(b.data)
                
                # 处理广播情况
                grad_b = a.data * result.grad
                if b.shape != result.shape:
                    # 计算需要求和的维度
                    ndims_added = len(result.shape) - len(b.shape)
                    if ndims_added > 0:
                        # 对前面添加的维度求和
                        grad_b = np.sum(grad_b, axis=tuple(range(ndims_added)))
                    
                    # 对广播的维度求和
                    for i, (dim_b, dim_result) in enumerate(zip(b.shape, grad_b.shape)):
                        if dim_b == 1 and dim_result > 1:
                            grad_b = np.sum(grad_b, axis=i, keepdims=True)
                
                b.grad += grad_b
        
        result._backward = _backward
    
    return result

def power_tensor(tensor: Tensor, power: Union[int, float]) -> Tensor:
    """
    张量幂运算
    
    Args:
        tensor: 张量
        power: 幂次
        
    Returns:
        新的张量，内容为tensor^power
    """
    result = Tensor(tensor.data ** power,
                  requires_grad=tensor.requires_grad,
                  _children=(tensor,) if tensor.requires_grad else (),
                  _op=f'**{power}')
    
    if tensor.requires_grad:        
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            tensor.grad += (power * tensor.data ** (power - 1)) * result.grad
        
        result._backward = _backward
    
    return result

def sum_tensor(tensor: Tensor, dim: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> Tensor:
    """
    张量求和
    
    Args:
        tensor: 张量
        dim: 求和的维度，None表示全部维度
        keepdims: 是否保留被求和的维度
        
    Returns:
        新的张量，内容为tensor各元素之和
    """
    # 确保tensor.data是numpy数组
    tensor_data = tensor.data
    if not isinstance(tensor_data, np.ndarray):
        tensor_data = np.array(tensor_data, copy=True)
        
    result = Tensor(np.sum(tensor_data, axis=dim, keepdims=keepdims),
                   requires_grad=tensor.requires_grad,
                   _children=(tensor,) if tensor.requires_grad else (),
                   _op='sum')
    
    if tensor.requires_grad:        
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor_data)
            
            # 确保result.grad是numpy数组
            result_grad = result.grad
            if not isinstance(result_grad, np.ndarray):
                result_grad = np.array(result_grad, copy=True)
            
            # 处理维度不匹配的情况
            if dim is not None:
                # 如果没有保持维度，需要扩展grad
                if not keepdims:
                    # 创建用于广播的形状
                    output_shape = list(result.data.shape)
                    input_shape = list(tensor_data.shape)
                    
                    # 确定需要广播的维度
                    broadcast_dims = []
                    if isinstance(dim, (list, tuple)):
                        broadcast_dims = list(dim)
                    else:
                        broadcast_dims = [dim]
                    
                    # 为每个被求和的维度插入1
                    for d in sorted(broadcast_dims):
                        if d < 0:  # 处理负索引
                            d = len(input_shape) + d
                        output_shape.insert(d, 1)
                    
                    # 重塑结果梯度，然后广播
                    reshaped_grad = result_grad.reshape(output_shape)
                    # 广播到输入张量形状
                    tensor.grad += np.broadcast_to(reshaped_grad, input_shape)
                else:
                    # 保持维度的情况，可以直接广播
                    tensor.grad += result_grad
            else:
                # 全部维度求和的情况
                tensor.grad += np.ones_like(tensor_data) * result_grad
        
        result._backward = _backward
    
    return result

def mean_tensor(tensor: Tensor) -> Tensor:
    """
    张量求均值
    
    Args:
        tensor: 张量
        
    Returns:
        新的张量，内容为tensor各元素的平均值
    """
    result = Tensor(np.mean(tensor.data),
                  requires_grad=tensor.requires_grad,
                  _children=(tensor,) if tensor.requires_grad else (),
                  _op='mean')
    
    if tensor.requires_grad:        
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            tensor.grad += np.ones_like(tensor.data) * result.grad / tensor.data.size
        
        result._backward = _backward
    
    return result

def reshape_tensor(tensor: Tensor, shape: Tuple[int, ...]) -> Tensor:
    """
    改变张量形状
    
    Args:
        tensor: 张量
        shape: 新形状
        
    Returns:
        新的张量，具有指定的形状
    """
    result = Tensor(tensor.data.reshape(shape),
                  requires_grad=tensor.requires_grad,
                  _children=(tensor,) if tensor.requires_grad else (),
                  _op='reshape')
    
    if tensor.requires_grad:        
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            tensor.grad += result.grad.reshape(tensor.shape)
        
        result._backward = _backward
    
    return result

def subtract(a: Tensor, b: Tensor) -> Tensor:
    """
    张量减法
    
    Args:
        a, b: 张量
        
    Returns:
        新的张量，内容为a-b
    """
    result = Tensor(a.data - b.data,
                  requires_grad=(a.requires_grad or b.requires_grad),
                  _children=(a, b) if (a.requires_grad or b.requires_grad) else (),
                  _op='-')
    
    if a.requires_grad or b.requires_grad:        
        def _backward() -> None:
            if a.requires_grad:
                if a.grad is None:
                    a.grad = np.zeros_like(a.data)
                
                # 处理广播情况
                grad_a = result.grad
                if a.shape != result.shape:
                    # 计算需要求和的维度
                    ndims_added = len(result.shape) - len(a.shape)
                    if ndims_added > 0:
                        # 对前面添加的维度求和
                        grad_a = np.sum(grad_a, axis=tuple(range(ndims_added)))
                    
                    # 对广播的维度求和
                    for i, (dim_a, dim_result) in enumerate(zip(a.shape, grad_a.shape)):
                        if dim_a == 1 and dim_result > 1:
                            grad_a = np.sum(grad_a, axis=i, keepdims=True)
                
                a.grad += grad_a
                
            if b.requires_grad:
                if b.grad is None:
                    b.grad = np.zeros_like(b.data)
                
                # 处理广播情况
                grad_b = -result.grad
                if b.shape != result.shape:
                    # 计算需要求和的维度
                    ndims_added = len(result.shape) - len(b.shape)
                    if ndims_added > 0:
                        # 对前面添加的维度求和
                        grad_b = np.sum(grad_b, axis=tuple(range(ndims_added)))
                    
                    # 对广播的维度求和
                    for i, (dim_b, dim_result) in enumerate(zip(b.shape, grad_b.shape)):
                        if dim_b == 1 and dim_result > 1:
                            grad_b = np.sum(grad_b, axis=i, keepdims=True)
                
                b.grad += grad_b
        
        result._backward = _backward
    
    return result

def divide(a: Tensor, b: Tensor) -> Tensor:
    """
    张量除法
    
    Args:
        a, b: 张量
        
    Returns:
        新的张量，内容为a/b
    """
    result = Tensor(a.data / b.data,
                  requires_grad=(a.requires_grad or b.requires_grad),
                  _children=(a, b) if (a.requires_grad or b.requires_grad) else (),
                  _op='/')
    
    if a.requires_grad or b.requires_grad:        
        def _backward() -> None:
            if a.requires_grad:
                if a.grad is None:
                    a.grad = np.zeros_like(a.data)
                
                # 处理广播情况
                if a.shape != result.shape:
                    grad_a = np.sum((result.grad / b.data).reshape(-1), axis=0).reshape(a.shape)
                    a.grad += grad_a
                else:
                    a.grad += result.grad / b.data
                
            if b.requires_grad:
                if b.grad is None:
                    b.grad = np.zeros_like(b.data)
                
                # 处理广播情况
                grad_b = -a.data * result.grad / (b.data ** 2)
                if b.shape != result.shape:
                    # 如果形状不同，需要根据广播规则进行求和收缩
                    sum_dims = tuple(i for i in range(len(result.shape)) if i >= len(b.shape) - len(result.shape) + len(b.shape))
                    if sum_dims:
                        grad_b = np.sum(grad_b, axis=sum_dims)
                    
                    # 处理其他维度的广播
                    if grad_b.shape != b.shape:
                        # 找出需要求和的维度
                        sum_dims = []
                        b_shape = list(b.shape)
                        for i, (dim_b, dim_grad) in enumerate(zip(b_shape, grad_b.shape)):
                            if dim_b == 1 and dim_grad > 1:
                                sum_dims.append(i)
                        
                        # 对这些维度求和
                        if sum_dims:
                            grad_b = np.sum(grad_b, axis=tuple(sum_dims), keepdims=True)
                
                b.grad += grad_b
        
        result._backward = _backward
    
    return result

def matmul(a: Tensor, b: Tensor) -> Tensor:
    result = Tensor(np.matmul(a.data, b.data),
                    requires_grad=(a.requires_grad or b.requires_grad),
                    _children=(a, b) if (a.requires_grad or b.requires_grad) else (),
                    _op='@')

    if a.requires_grad or b.requires_grad:
        a_ref = weakref.ref(a)
        b_ref = weakref.ref(b)

        def _backward():
            grad = result.grad
            if grad is None:
                return
            grad = np.array(grad)

            a_ = a_ref()
            b_ = b_ref()
            if a_ is None or b_ is None:
                return

            b_data_T = np.swapaxes(b_.data, -1, -2)
            a_data_T = np.swapaxes(a_.data, -1, -2)

            if a_.requires_grad:
                if a_.grad is None:
                    a_.grad = np.zeros_like(a_.data)
                a_.grad += np.matmul(grad, b_data_T)

            if b_.requires_grad:
                if b_.grad is None:
                    b_.grad = np.zeros_like(b_.data)
                b_.grad += np.matmul(a_data_T, grad)

        result._backward = _backward

    return result

def exp_tensor(tensor: Tensor) -> Tensor:
    """
    指数运算 e^x
    
    Args:
        tensor: 张量
        
    Returns:
        新的张量，内容为e^tensor
    """
    result = Tensor(np.exp(tensor.data),
                   requires_grad=tensor.requires_grad,
                   _children=(tensor,) if tensor.requires_grad else (),
                   _op='exp')
    
    if tensor.requires_grad:
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            # e^x的导数是e^x本身
            # 确保result.data和result.grad是numpy数组
            result_data = result.data
            result_grad = result.grad
            if not isinstance(result_data, np.ndarray):
                result_data = np.array(result_data, copy=True)
            if not isinstance(result_grad, np.ndarray):
                result_grad = np.array(result_grad, copy=True)
            tensor.grad += result_data * result_grad
        
        result._backward = _backward
    
    return result

def log_tensor(tensor: Tensor) -> Tensor:
    """
    对数运算 log(x)
    
    Args:
        tensor: 张量
        
    Returns:
        新的张量，内容为log(tensor)
    """
    result = Tensor(np.log(tensor.data),
                   requires_grad=tensor.requires_grad,
                   _children=(tensor,) if tensor.requires_grad else (),
                   _op='log')
    
    if tensor.requires_grad:
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            # log(x)的导数是1/x
            tensor.grad += result.grad / tensor.data
        
        result._backward = _backward
    
    return result

def negate(tensor: Tensor) -> Tensor:
    """
    张量负号运算
    
    Args:
        tensor: 张量
        
    Returns:
        新的张量，内容为-tensor
    """
    result = Tensor(-tensor.data,
                   requires_grad=tensor.requires_grad,
                   _children=(tensor,) if tensor.requires_grad else (),
                   _op='neg')
    
    if tensor.requires_grad:
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            tensor.grad += -result.grad
        
        result._backward = _backward
    
    return result

def transpose(tensor: Tensor) -> Tensor:
    """
    矩阵转置
    
    Args:
        tensor: 输入张量
        
    Returns:
        转置后的张量
    """
    # 检查维度
    if len(tensor.shape) == 1:
        # 一维张量转为列向量 (n,) -> (n,1)
        result = Tensor(tensor.data.reshape(-1, 1),
                     requires_grad=tensor.requires_grad,
                     _children=(tensor,) if tensor.requires_grad else (),
                     _op='transpose')
    elif len(tensor.shape) >= 2:
        # 二维及以上张量，交换最后两个维度
        result = Tensor(np.swapaxes(tensor.data, -2, -1),
                     requires_grad=tensor.requires_grad,
                     _children=(tensor,) if tensor.requires_grad else (),
                     _op='transpose')
    else:
        # 零维张量（标量）返回自身
        return tensor
    
    if tensor.requires_grad:
        def _backward() -> None:
            if tensor.grad is None:
                tensor.grad = np.zeros_like(tensor.data)
            # 梯度的转置等于转置的梯度
            if len(tensor.shape) == 1:
                tensor.grad += result.grad.reshape(-1)
            else:
                tensor.grad += np.swapaxes(result.grad, -2, -1)
        
        result._backward = _backward
    
    return result 