from typing import Callable, List, NamedTuple, Optional, Union

import numpy as np


# 定义依赖关系：每个 Tensor 可能依赖于其他 Tensor，以及对应的梯度函数
class Dependency(NamedTuple):
    tensor: "Tensor"  # 依赖的前驱 Tensor
    grad_fn: Callable[[np.ndarray], np.ndarray]  # 从当前梯度反传到前驱的函数


# 可转换为 numpy 数组的类型：标量、列表、numpy 数组
Arrayable = Union[float, list, np.ndarray]


def ensure_array(arrayable: Arrayable) -> np.ndarray:
    """确保输入是 numpy 数组"""
    if isinstance(arrayable, np.ndarray):
        return arrayable
    else:
        return np.array(arrayable, dtype=np.float64)


# 可转换为 Tensor 的类型：Tensor、标量、numpy 数组
Tensorable = Union["Tensor", float, np.ndarray]


def ensure_tensor(tensorable: Tensorable) -> "Tensor":
    """确保输入是 Tensor"""
    if isinstance(tensorable, Tensor):
        return tensorable
    else:
        return Tensor(tensorable)


# -----------------------------
# 核心操作函数 (带梯度支持)
# -----------------------------


def _tensor_sum(t: "Tensor") -> "Tensor":
    """
    对 Tensor 所有元素求和，返回一个标量 Tensor (0维)

    前向: data = t.data.sum()  # 形状: () 标量
    反向: 每个输入元素对输出的梯度都是 1，所以 grad_input = grad_output * 1
          但需广播回原始形状

    示例:
        t = Tensor([[1, 2], [3, 4]], requires_grad=True)
        s = t.sum()  # s.data = 10.0, s.shape = ()
        s.backward()
        t.grad.data = [[1, 1], [1, 1]]
    """
    data = t.data.sum()
    requires_grad = t.requires_grad
    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            # grad 是标量 (形状 ())，需广播为 t.data 的形状
            return grad * np.ones_like(t.data)

        depends_on = [Dependency(t, grad_fn)]
    else:
        depends_on = []
    return Tensor(data, requires_grad, depends_on)


def _add(t1: "Tensor", t2: "Tensor") -> "Tensor":
    """
    Tensor 加法，支持广播

    前向: data = t1.data + t2.data
    反向:
        ∂L/∂t1 = ∂L/∂out  (但需处理广播维度)
        ∂L/∂t2 = ∂L/∂out

    广播处理策略：
      1. 如果输出比输入多了若干维度 (在前面)，则对这些维度求和
      2. 如果某维度大小为1 (广播)，则对该维度求和并保持维度 (keepdims=True)

    示例:
        t1 = Tensor([[1, 2]], requires_grad=True)   # shape (1,2)
        t2 = Tensor([3, 4], requires_grad=True)     # shape (2,)
        out = t1 + t2  # shape (1,2)
        out.backward(Tensor([[1, 1]]))
        t1.grad = [[1,1]], t2.grad = [1,1]
    """
    data = t1.data + t2.data
    requires_grad = t1.requires_grad or t2.requires_grad
    depends_on: List[Dependency] = []

    if t1.requires_grad:

        def grad_fn1(grad: np.ndarray) -> np.ndarray:
            # 处理 t1 的广播
            ndims_added = grad.ndim - t1.data.ndim
            # 如果 grad 比 t1 多了 ndims_added 个维度 (在前面)，则沿这些维度求和
            for _ in range(ndims_added):
                grad = grad.sum(axis=0)
            # 对 t1 中大小为1的维度，grad 也需要求和 (因为广播)
            for i, dim in enumerate(t1.shape):
                if dim == 1:
                    grad = grad.sum(axis=i, keepdims=True)
            return grad

        depends_on.append(Dependency(t1, grad_fn1))

    if t2.requires_grad:

        def grad_fn2(grad: np.ndarray) -> np.ndarray:
            ndims_added = grad.ndim - t2.data.ndim
            for _ in range(ndims_added):
                grad = grad.sum(axis=0)
            for i, dim in enumerate(t2.shape):
                if dim == 1:
                    grad = grad.sum(axis=i, keepdims=True)
            return grad

        depends_on.append(Dependency(t2, grad_fn2))

    return Tensor(data, requires_grad, depends_on)


def _mul(t1: "Tensor", t2: "Tensor") -> "Tensor":
    """
    Tensor 乘法 (逐元素)，支持广播

    前向: data = t1.data * t2.data
    反向:
        ∂L/∂t1 = ∂L/∂out * t2.data
        ∂L/∂t2 = ∂L/∂out * t1.data
    然后同样处理广播

    示例:
        t1 = Tensor([2, 3], requires_grad=True)
        t2 = Tensor(4.0, requires_grad=True)  # 标量
        out = t1 * t2  # [8, 12]
        out.sum().backward()
        t1.grad = [4, 4], t2.grad = 5 (因为 2+3=5)
    """
    data = t1.data * t2.data
    requires_grad = t1.requires_grad or t2.requires_grad
    depends_on: List[Dependency] = []

    if t1.requires_grad:

        def grad_fn1(grad: np.ndarray) -> np.ndarray:
            grad = grad * t2.data  # 链式法则
            ndims_added = grad.ndim - t1.data.ndim
            for _ in range(ndims_added):
                grad = grad.sum(axis=0)
            for i, dim in enumerate(t1.shape):
                if dim == 1:
                    grad = grad.sum(axis=i, keepdims=True)
            return grad

        depends_on.append(Dependency(t1, grad_fn1))

    if t2.requires_grad:

        def grad_fn2(grad: np.ndarray) -> np.ndarray:
            grad = grad * t1.data
            ndims_added = grad.ndim - t2.data.ndim
            for _ in range(ndims_added):
                grad = grad.sum(axis=0)
            for i, dim in enumerate(t2.shape):
                if dim == 1:
                    grad = grad.sum(axis=i, keepdims=True)
            return grad

        depends_on.append(Dependency(t2, grad_fn2))

    return Tensor(data, requires_grad, depends_on)


def _neg(t: "Tensor") -> "Tensor":
    """
    负号操作

    前向: data = -t.data
    反向: ∂L/∂t = -∂L/∂out
    """
    data = -t.data
    requires_grad = t.requires_grad
    if requires_grad:
        depends_on = [Dependency(t, lambda x: -x)]
    else:
        depends_on = []
    return Tensor(data, requires_grad, depends_on)


def _sub(t1: "Tensor", t2: "Tensor") -> "Tensor":
    """减法：t1 - t2 = t1 + (-t2)"""
    return t1 + (-t2)


def _matmul(t1: "Tensor", t2: "Tensor") -> "Tensor":
    """
    矩阵乘法 (@)

    假设 t1: (n, m), t2: (m, p) → out: (n, p)

    反向:
        ∂L/∂t1 = ∂L/∂out @ t2.T
        ∂L/∂t2 = t1.T @ ∂L/∂out

    注意：此实现仅支持 2D 矩阵乘法 (可扩展为高维，但此处简化)

    示例:
        A = Tensor([[1, 2]], requires_grad=True)  # (1,2)
        B = Tensor([[3], [4]], requires_grad=True) # (2,1)
        C = A @ B  # (1,1) → [[11]]
        C.sum().backward()
        A.grad = [[3, 4]], B.grad = [[1], [2]]
    """
    data = t1.data @ t2.data
    requires_grad = t1.requires_grad or t2.requires_grad
    depends_on: List[Dependency] = []

    if t1.requires_grad:

        def grad_fn1(grad: np.ndarray) -> np.ndarray:
            return grad @ t2.data.T

        depends_on.append(Dependency(t1, grad_fn1))

    if t2.requires_grad:

        def grad_fn2(grad: np.ndarray) -> np.ndarray:
            return t1.data.T @ grad

        depends_on.append(Dependency(t2, grad_fn2))

    return Tensor(data, requires_grad, depends_on)


def _slice(t: "Tensor", idxs) -> "Tensor":
    """
    切片操作 (如 t[0], t[:, 1:3])

    前向: data = t.data[idxs]
    反向: 将梯度放回原位置，其余位置为0

    示例:
        t = Tensor([1, 2, 3], requires_grad=True)
        s = t[1]  # 2.0
        s.backward()
        t.grad = [0, 1, 0]
    """
    data = t.data[idxs]
    requires_grad = t.requires_grad
    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            # 创建与原 Tensor 同形状的零梯度
            bigger_grad = np.zeros_like(t.data)
            # 将当前梯度放回切片位置
            bigger_grad[idxs] = grad
            return bigger_grad

        depends_on = [Dependency(t, grad_fn)]
    else:
        depends_on = []
    return Tensor(data, requires_grad, depends_on)


# -----------------------------
# Tensor 类
# -----------------------------


class Tensor:
    def __init__(
        self,
        data: Arrayable,
        requires_grad: bool = False,
        depends_on: List[Dependency] = None,
    ) -> None:
        """
        初始化 Tensor

        参数:
            data: 数据 (标量、列表、numpy 数组)
            requires_grad: 是否需要计算梯度
            depends_on: 依赖列表 (用于反向传播)
        """
        self._data = ensure_array(data)
        self.requires_grad = requires_grad
        self.depends_on = depends_on or []

        self.shape = self._data.shape
        self.grad: Optional["Tensor"] = None
        # 如果需要梯度，初始化为零
        if self.requires_grad:
            self.zero_grad()

    @property
    def data(self) -> np.ndarray:
        return self._data

    @data.setter
    def data(self, new_data: np.ndarray) -> None:
        """设置新数据时，清除梯度 (因为计算图已断)"""
        self._data = new_data
        self.grad = None

    def __repr__(self) -> str:
        return f"Tensor({self.data}, requires_grad={self.requires_grad})"

    # -----------------------------
    # 运算符重载
    # -----------------------------

    def __add__(self, other) -> "Tensor":
        return _add(self, ensure_tensor(other))

    def __radd__(self, other) -> "Tensor":
        return _add(ensure_tensor(other), self)

    def __iadd__(self, other) -> "Tensor":
        """原地加法 (注意：会破坏计算图，不推荐用于需要梯度的情况)"""
        self.data = self.data + ensure_tensor(other).data
        return self

    def __mul__(self, other) -> "Tensor":
        return _mul(self, ensure_tensor(other))

    def __rmul__(self, other) -> "Tensor":
        return _mul(ensure_tensor(other), self)

    def __imul__(self, other) -> "Tensor":
        self.data = self.data * ensure_tensor(other).data
        return self

    def __matmul__(self, other) -> "Tensor":
        return _matmul(self, ensure_tensor(other))

    def __neg__(self) -> "Tensor":
        return _neg(self)

    def __sub__(self, other) -> "Tensor":
        return _sub(self, ensure_tensor(other))

    def __rsub__(self, other) -> "Tensor":
        return _sub(ensure_tensor(other), self)

    def __isub__(self, other) -> "Tensor":
        self.data = self.data - ensure_tensor(other).data
        return self

    def __getitem__(self, idxs) -> "Tensor":
        return _slice(self, idxs)

    # -----------------------------
    # 梯度相关方法
    # -----------------------------

    def zero_grad(self) -> None:
        """将梯度重置为零"""
        self.grad = Tensor(np.zeros_like(self.data, dtype=np.float64))

    def backward(self, grad: "Tensor" = None) -> None:
        """
        反向传播

        参数:
            grad: 当前 Tensor 的梯度 (上游梯度)
                  如果是标量 (loss)，可省略 (默认为1.0)

        规则:
            - 只能在 requires_grad=True 的 Tensor 上调用
            - 如果是标量 (shape == ())，grad 默认为 1.0
            - 否则必须显式提供 grad
        """
        assert self.requires_grad, "只能在 requires_grad=True 的 Tensor 上调用 backward"
        if grad is None:
            if self.shape == ():  # 标量
                grad = Tensor(1.0)
            else:
                raise RuntimeError("非标量 Tensor 的 backward 必须提供 grad 参数")
        # 累加梯度 (支持多次 backward)
        self.grad.data += grad.data

        # 递归反向传播到依赖的 Tensor
        for dependency in self.depends_on:
            # 计算传给前驱的梯度
            backward_grad = dependency.grad_fn(grad.data)
            # 递归调用
            dependency.tensor.backward(Tensor(backward_grad))

    def sum(self) -> "Tensor":
        """对所有元素求和"""
        return _tensor_sum(self)


# -----------------------------
# 使用示例
# -----------------------------

if __name__ == "__main__":
    print("=== 示例 1: 简单加法与梯度 ===")
    a = Tensor(2.0, requires_grad=True)
    b = Tensor(3.0, requires_grad=True)
    c = a * b  # c = 6.0
    d = c.sum()  # d = 6.0 (标量)
    d.backward()
    print(f"a.grad = {a.grad}")  # 应为 3.0
    print(f"b.grad = {b.grad}")  # 应为 2.0

    print("\n=== 示例 2: 矩阵乘法 ===")
    A = Tensor([[1.0, 2.0]], requires_grad=True)  # (1,2)
    B = Tensor([[3.0], [4.0]], requires_grad=True)  # (2,1)
    C = A @ B  # (1,1) → [[11.0]]
    loss = C.sum()
    loss.backward()
    print(f"A.grad =\n{A.grad.data}")  # [[3, 4]]
    print(f"B.grad =\n{B.grad.data}")  # [[1], [2]]

    print("\n=== 示例 3: 广播与求和 ===")
    x = Tensor([[1.0, 2.0, 3.0]], requires_grad=True)  # (1,3)
    y = x.sum()  # 标量 6.0
    y.backward()
    print(f"x.grad =\n{x.grad.data}")  # [[1, 1, 1]]

    print("\n=== 示例 4: 切片 ===")
    z = Tensor([1.0, 2.0, 3.0], requires_grad=True)
    w = z[1]  # 2.0
    w.backward()
    print(f"z.grad = {z.grad}")  # [0, 1, 0]

    print("\n=== 示例 5: 多次 backward (梯度累加) ===")
    p = Tensor(2.0, requires_grad=True)
    q = p * p  # 4.0
    q.backward()
    print(f"第一次 backward 后 p.grad = {p.grad}")  # 4.0
    q.backward()
    print(f"第二次 backward 后 p.grad = {p.grad}")  # 8.0 (累加)
