import numpy as np
from typing import List, Optional, Union, Any


CUPY_AVAILABLE = False
cp = np  # fallback

# 全局配置：是否使用 GPU
USE_GPU = False  # 改为 True 启用 GPU（需 cupy）

def get_array_module(tensor):
    """返回当前 tensor 使用的后端（numpy 或 cupy）"""
    return cp if (USE_GPU and CUPY_AVAILABLE) else np

class Tensor:
    def __init__(self, data, requires_grad=False, _children=(), _op=''):
        """
        :param data: array-like, list, scalar
        :param requires_grad: bool
        :param _children: tuple of Tensors
        :param _op: operation name
        """
        xp = get_array_module(None)
        self.data = xp.array(data)
        self.requires_grad = requires_grad
        self.grad = None
        self._backward = lambda: None
        self._prev = set(_children)
        self._op = _op

    def __add__(self, other):
        other = other if isinstance(other, Tensor) else Tensor(other)
        out = Tensor(self.data + other.data,
                     requires_grad=self.requires_grad or other.requires_grad,
                     _children=(self, other),
                     _op='+')

        def _backward():
            xp = get_array_module(self.data)
            if self.requires_grad:
                grad = out.grad
                if self.data.shape != grad.shape:
                    # 处理 broadcasting：sum over broadcasted axes
                    grad = self._reduce_grad(grad, self.data.shape)
                if self.grad is None:
                    self.grad = grad
                else:
                    self.grad += grad

            if other.requires_grad:
                grad = out.grad
                if other.data.shape != grad.shape:
                    grad = self._reduce_grad(grad, other.data.shape)
                if other.grad is None:
                    other.grad = grad
                else:
                    other.grad += grad

        out._backward = _backward
        return out

    def __mul__(self, other):
        other = other if isinstance(other, Tensor) else Tensor(other)
        out = Tensor(self.data * other.data,
                     requires_grad=self.requires_grad or other.requires_grad,
                     _children=(self, other),
                     _op='*')

        def _backward():
            xp = get_array_module(self.data)
            if self.requires_grad:
                grad = out.grad * other.data
                grad = self._reduce_grad(grad, self.data.shape)
                if self.grad is None:
                    self.grad = grad
                else:
                    self.grad += grad

            if other.requires_grad:
                grad = out.grad * self.data
                grad = self._reduce_grad(grad, other.data.shape)
                if other.grad is None:
                    other.grad = grad
                else:
                    other.grad += grad

        out._backward = _backward
        return out

    def __pow__(self, power):
        assert isinstance(power, (int, float)), "power must be int or float"
        out = Tensor(self.data ** power,
                     requires_grad=self.requires_grad,
                     _children=(self,),
                     _op=f'**{power}')

        def _backward():
            if self.requires_grad:
                xp = get_array_module(self.data)
                grad = power * (self.data ** (power - 1)) * out.grad
                grad = self._reduce_grad(grad, self.data.shape)
                if self.grad is None:
                    self.grad = grad
                else:
                    self.grad += grad

        out._backward = _backward
        return out

    def __matmul__(self, other):
        """支持 self @ other（矩阵乘法）"""
        other = other if isinstance(other, Tensor) else Tensor(other)
        out = Tensor(self.data @ other.data,
                     requires_grad=self.requires_grad or other.requires_grad,
                     _children=(self, other),
                     _op='@')

        def _backward():
            xp = get_array_module(self.data)
            if self.requires_grad:
                # dL/dA = dL/dC @ B^T
                grad = out.grad @ other.data.T
                grad = self._reduce_grad(grad, self.data.shape)
                if self.grad is None:
                    self.grad = grad
                else:
                    self.grad += grad

            if other.requires_grad:
                # dL/dB = A^T @ dL/dC
                grad = self.data.T @ out.grad
                grad = self._reduce_grad(grad, other.data.shape)
                if other.grad is None:
                    other.grad = grad
                else:
                    other.grad += grad

        out._backward = _backward
        return out

    def sigmoid(self):
        """Sigmoid 激活函数: σ(x) = 1 / (1 + exp(-x))"""
        xp = get_array_module(self.data)
        s = 1 / (1 + xp.exp(-self.data))
        out = Tensor(s,
                     requires_grad=self.requires_grad,
                     _children=(self,),
                     _op='sigmoid')

        def _backward():
            if self.requires_grad:
                # σ'(x) = σ(x) * (1 - σ(x))
                grad = out.data * (1 - out.data) * out.grad
                grad = self._reduce_grad(grad, self.data.shape)
                if self.grad is None:
                    self.grad = grad
                else:
                    self.grad += grad

        out._backward = _backward
        return out

    def sum(self, axis=None, keepdims=False):
        """
        对 Tensor 求和
        :param axis: 求和的维度（int 或 tuple）
        :param keepdims: 是否保持维度
        """
        xp = get_array_module(self.data)
        data_sum = xp.sum(self.data, axis=axis, keepdims=keepdims)
        out = Tensor(data_sum,
                 requires_grad=self.requires_grad,
                 _children=(self,),
                 _op='sum')

        def _backward():
            if self.requires_grad:
                # sum 的梯度：把 out.grad 广播回 self.data 的形状
                grad = out.grad
                grad_broadcasted = xp.ones_like(self.data) * grad
                if self.grad is None:
                    self.grad = grad_broadcasted
                else:
                    self.grad += grad_broadcasted

        out._backward = _backward
        return out

    def mean(self, axis=None, keepdims=False):
        """
        对 Tensor 求均值
        """
        # mean = sum / N
        num_elements = np.prod(self.data.shape) if axis is None else (self.data.shape[axis] if isinstance(axis, int) else np.prod([self.data.shape[i] for i in axis]))
        out = self.sum(axis=axis, keepdims=keepdims) / num_elements
        return out

    def mse_loss(self, target):
        """
        计算均方误差: loss = mean((self - target)^2)
        :param target: 真实值，shape 应与 self 相同
        :return: Tensor (标量)
        """
        xp = get_array_module(self.data)
        diff = self - Tensor(target, requires_grad=False)  # target 不需要梯度
        loss = (diff * diff).sum() / np.prod(diff.data.shape)  # mean over all elements
        return loss

    def _reduce_grad(self, grad, target_shape):
        """处理 broadcasting 的梯度降维（sum over broadcasted axes）"""
        xp = get_array_module(grad)
        ndim_diff = len(grad.shape) - len(target_shape)

        # 如果维度不同（如 broadcasting 产生），先对新增的轴求和
        if ndim_diff > 0:
            axes = tuple(range(ndim_diff))
            grad = xp.sum(grad, axis=axes)

        # 对 shape 不匹配的维度求和
        for i, (g, t) in enumerate(zip(grad.shape, target_shape)):
            if g != t:
                grad = xp.sum(grad, axis=i, keepdims=True)

        return grad

    def backward(self):
        if not self.requires_grad:
            raise RuntimeError("backward called on tensor that does not require gradients")

        topo = []
        visited = set()
        def build_topo(v):
            if v not in visited:
                visited.add(v)
                for child in v._prev:
                    build_topo(child)
                topo.append(v)
        build_topo(self)

        # 初始化梯度（标量）
        xp = get_array_module(self.data)
        self.grad = xp.ones_like(self.data)

        for node in reversed(topo):
            node._backward()

    def zero_grad(self):
        """清空梯度"""
        self.grad = None
        for child in self._prev:
            child.zero_grad()

    def __repr__(self):
        device = 'cuda' if USE_GPU and CUPY_AVAILABLE else 'cpu'
        return (f"Tensor(data={self.data}, "
                f"shape={self.data.shape}, "
                f"requires_grad={self.requires_grad}, "
                f"grad={self.grad is not None}, "
                f"device={device})")