import numpy as np
from typing import Tuple, Union, Optional, List, Callable

from ml_lib.core import Tensor
from ml_lib.nn.module import Module, Parameter

class Linear(Module):
    def __init__(self, in_features: int, out_features: int, bias: bool = True):
        """
        初始化线性层
        
        Args:
            in_features: 输入维度
            out_features: 输出维度
            bias: 是否使用偏置项
        """
        # 权重初始化: 采用均匀分布初始化
        super().__init__()
        limit = np.sqrt(6 / (in_features + out_features))
        self.weight_data = Tensor(
            np.random.uniform(-limit, limit, (out_features, in_features)).astype(np.float32),
            requires_grad=True
        )
        self.weight = Parameter(self.weight_data)
        if bias:
            self.bias_data = Tensor(
                np.zeros(out_features, dtype=np.float32),
                requires_grad=True
            )
            self.bias = Parameter(self.bias_data)
        else:
            self.bias = None

    def forward(self, x: Tensor) -> Tensor:
        """
        前向计算
        
        Args:
            x: 输入张量，形状为 (batch_size, in_features)
        
        Returns:
            输出张量，形状为 (batch_size, out_features)
        """
        out = x @ self.weight.data.T  # shape: (batch_size, out_features)
        if self.bias is not None:
            out = out + self.bias.data  # broadcasting 自动处理偏置加法
        return out

# class BatchNorm1d(Module):
#     """
#     一维批归一化层
#     """
#     def __init__(self, num_features, eps=1e-5, momentum=0.1):
#         """
#         初始化批归一化层
        
#         Args:
#             num_features: 特征数量
#             eps: 添加到方差的小量，防止除零
#             momentum: 用于计算运行时均值和方差的动量
#         """
#         super().__init__()
#         self.num_features = num_features
#         self.eps = eps
#         self.momentum = momentum
        
#         # 可学习参数
#         self.gamma = Parameter(np.ones(num_features))
#         self.beta = Parameter(np.zeros(num_features))
        
#         # 运行时统计量，不参与梯度更新
#         self.running_mean = np.zeros(num_features)
#         self.running_var = np.ones(num_features)
        
#     def forward(self, x: Tensor) -> Tensor:
#         """
#         前向传播
        
#         Args:
#             x: 输入张量，形状为 (batch_size, num_features)
            
#         Returns:
#             归一化后的张量
#         """
#         x_data = x.data.data if hasattr(x.data, 'data') else x.data
#         gamma_data = self.gamma.data.data if hasattr(self.gamma.data, 'data') else self.gamma.data
#         beta_data = self.beta.data.data if hasattr(self.beta.data, 'data') else self.beta.data
        
#         if self.training:
#             # 计算批次统计量
#             batch_mean = np.mean(x_data, axis=0)
#             batch_var = np.var(x_data, axis=0)
            
#             # 更新运行时统计量
#             self.running_mean = self.momentum * batch_mean + (1 - self.momentum) * self.running_mean
#             self.running_var = self.momentum * batch_var + (1 - self.momentum) * self.running_var
            
#             # 归一化
#             x_norm = (x_data - batch_mean) / np.sqrt(batch_var + self.eps)
#             out = gamma_data * x_norm + beta_data
            
#             # 保存前向传播中间结果用于反向传播
#             self.batch_mean = batch_mean
#             self.batch_var = batch_var
#             self.x_norm = x_norm
#             self.x_data = x_data
#         else:
#             # 使用运行时统计量进行归一化
#             x_norm = (x_data - self.running_mean) / np.sqrt(self.running_var + self.eps)
#             out = gamma_data * x_norm + beta_data
        
#         result = Tensor(out, requires_grad=x.requires_grad or self.gamma.requires_grad or self.beta.requires_grad)
        
#         if result.requires_grad and self.training:
#             def _backward():
#                 # 获取输出梯度
#                 if not hasattr(result, 'grad') or result.grad is None:
#                     return
                
#                 dout = result.grad
#                 batch_size = x_data.shape[0]
                
#                 # 计算gamma和beta的梯度
#                 if self.gamma.requires_grad:
#                     if self.gamma.grad is None:
#                         self.gamma.grad = np.zeros_like(gamma_data)
#                     self.gamma.grad += np.sum(dout * self.x_norm, axis=0)
                
#                 if self.beta.requires_grad:
#                     if self.beta.grad is None:
#                         self.beta.grad = np.zeros_like(beta_data)
#                     self.beta.grad += np.sum(dout, axis=0)
                
#                 # 计算x的梯度
#                 if x.requires_grad:
#                     # 完整的批归一化反向传播公式实现
#                     dx_norm = dout * gamma_data
#                     dvar = np.sum(dx_norm * (self.x_data - self.batch_mean) * -0.5 * np.power(self.batch_var + self.eps, -1.5), axis=0)
#                     dmean = np.sum(dx_norm * -1 / np.sqrt(self.batch_var + self.eps), axis=0) + dvar * np.mean(-2 * (self.x_data - self.batch_mean), axis=0)
                    
#                     if x.grad is None:
#                         x.grad = np.zeros_like(x_data)
                    
#                     x.grad += dx_norm / np.sqrt(self.batch_var + self.eps) + dvar * 2 * (self.x_data - self.batch_mean) / batch_size + dmean / batch_size
            
#             result._backward = _backward
            
#         return result
    
#     def __repr__(self):
#         return f"BatchNorm1d(num_features={self.num_features}, eps={self.eps}, momentum={self.momentum})"        

class BatchNorm1d(Module):
    """
    一维批归一化层（支持自动求导）
    """
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super().__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum

        # 使用 Tensor 封装数据
        gamma_tensor = Tensor(np.ones(num_features, dtype=np.float32), requires_grad=True)
        beta_tensor = Tensor(np.zeros(num_features, dtype=np.float32), requires_grad=True)

        # 再封装为 Parameter
        self.gamma = Parameter(gamma_tensor)
        self.beta = Parameter(beta_tensor)

        # 运行时均值和方差（不需要梯度）
        self.running_mean = np.zeros(num_features, dtype=np.float32)
        self.running_var = np.ones(num_features, dtype=np.float32)

    def forward(self, x: Tensor) -> Tensor:
        if self.training:
            # 计算 batch 均值与方差（使用 Tensor 操作）
            mean = x.mean(axis=0, keepdims=True)
            var = ((x - mean) ** 2).mean(axis=0, keepdims=True)
            std = (var + self.eps) ** 0.5
            x_norm = (x - mean) / std
            out = self.gamma * x_norm + self.beta

            # 更新运行均值和方差（使用 numpy，不参与计算图）
            self.running_mean = self.momentum * mean.data + (1 - self.momentum) * self.running_mean
            self.running_var = self.momentum * var.data + (1 - self.momentum) * self.running_var

            return out
        else:
            # 推理模式下使用运行均值和方差（不参与梯度）
            running_mean_tensor = Tensor(self.running_mean.reshape(1, -1))
            running_var_tensor = Tensor(self.running_var.reshape(1, -1))
            std = (running_var_tensor + self.eps) ** 0.5
            x_norm = (x - running_mean_tensor) / std
            out = self.gamma * x_norm + self.beta
            return out
    
class Flatten(Module):
    def forward(self, x: Tensor) -> Tensor:
        self.input_shape = x.data.shape  # 保存原始形状用于反向传播
        x_data = x.data
        N = x_data.shape[0]
        out_data = x_data.reshape(N, -1)
        out = Tensor(out_data, requires_grad=x.requires_grad)

        if x.requires_grad:
            def _backward():
                if out.grad is not None:
                    x.grad = out.grad.reshape(self.input_shape)
            out._backward = _backward

        return out
        
class ReLU(Module):
    """
    ReLU激活函数: f(x) = max(0, x)
    """
    def __init__(self):
        super().__init__()
        
    def forward(self, x:Tensor) -> Tensor:
        """
        前向传播
        
        Args:
            x: 输入张量
            
        Returns:
            应用ReLU后的张量
        """
        return relu(x)
    
    def __repr__(self):
        return "ReLU()"
    
class Sigmoid(Module):
    """
    Sigmoid激活函数: f(x) = 1 / (1 + exp(-x))
    """
    def __init__(self):
        super().__init__()
        
    def forward(self, x):
        """
        前向传播
        
        Args:
            x: 输入张量
            
        Returns:
            应用Sigmoid后的张量
        """
        return sigmoid(x)
    
    def __repr__(self):
        return "Sigmoid()"
    
class Tanh(Module):
    """
    Tanh激活函数: f(x) = tanh(x)
    """
    def __init__(self):
        super().__init__()
        
    def forward(self, x):
        """
        前向传播
        
        Args:
            x: 输入张量
            
        Returns:
            应用Tanh后的张量
        """
        return tanh(x)
    
    def __repr__(self):
        return "Tanh()"
    
class Sequential(Module):
    def __init__(self, *modules):
        super().__init__()
        for idx, module in enumerate(modules):
            self._modules[str(idx)] = module
    
    def forward(self, x):
        for module in self._modules.values():
            x = module(x)

        return x

# 函数式API

def relu(x):
    """
    ReLU激活函数: f(x) = max(0, x)
    
    Args:
        x: 输入张量
        
    Returns:
        应用ReLU后的张量
    """
    result = Tensor(np.maximum(0, x.data), 
                   requires_grad=x.requires_grad,
                   _children=(x,) if x.requires_grad else ())
    
    if x.requires_grad:
        def _backward():
            if hasattr(result, 'grad') and result.grad is not None:
                if x.grad is None:
                    x.grad = np.zeros_like(x.data)
                x_data = x.data
                result_grad = result.grad
                if not isinstance(x_data, np.ndarray):
                    x_data = np.array(x_data)
                if not isinstance(result_grad, np.ndarray):
                    result_grad = np.array(result_grad)
                # 修正梯度计算，使用正确的掩码
                mask = (x_data > 0).astype(np.float32)
                x.grad += mask * result_grad
            
        result._backward = _backward
        
    return result

def sigmoid(x):
    """
    Sigmoid激活函数: f(x) = 1 / (1 + exp(-x))
    
    Args:
        x: 输入张量
        
    Returns:
        应用Sigmoid后的张量
    """
    s = 1 / (1 + np.exp(-x.data))
    result = Tensor(s, 
                   requires_grad=x.requires_grad,
                   _children=(x,) if x.requires_grad else ())
    
    if x.requires_grad:
        def _backward():
            if hasattr(result, 'grad') and result.grad is not None:
                if x.grad is None:
                    x.grad = np.zeros_like(x.data)
                result_grad = result.grad
                if not isinstance(result_grad, np.ndarray):
                    result_grad = np.array(result_grad)
                # 修正梯度计算
                # sigmoid的导数是 s * (1 - s)
                x.grad += s * (1 - s) * result_grad
            
        result._backward = _backward
        
    return result

def tanh(x):
    """
    Tanh激活函数: f(x) = tanh(x)
    
    Args:
        x: 输入张量
        
    Returns:
        应用Tanh后的张量
    """
    t = np.tanh(x.data)
    result = Tensor(t, 
                   requires_grad=x.requires_grad,
                   _children=(x,) if x.requires_grad else ())
    
    if x.requires_grad:
        def _backward():
            if hasattr(result, 'grad') and result.grad is not None:
                if x.grad is None:
                    x.grad = np.zeros_like(x.data)
                result_grad = result.grad
                if not isinstance(result_grad, np.ndarray):
                    result_grad = np.array(result_grad)
                # 修正梯度计算
                # tanh的导数是 1 - t^2
                x.grad += (1 - t**2) * result_grad
            
        result._backward = _backward
        
    return result 


class Conv2d(Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride
        self.padding = padding
        self.bias_enabled = bias

        weight_shape = (out_channels, in_channels, self.kernel_size[0], self.kernel_size[1])
        weight_data = Tensor(np.random.randn(*weight_shape).astype(np.float32) * np.sqrt(2. / np.prod(weight_shape[1:])), requires_grad=True)
        
        self.weight = Parameter(weight_data)

        if self.bias_enabled:
            bias_data = Tensor(np.zeros(out_channels, dtype=np.float32), requires_grad=True)
            self.bias = Parameter(bias_data)
        else:
            self.bias = None

    def forward(self, x: Tensor) -> Tensor:
        N, C_in, H_in, W_in = x.shape
        K_h, K_w = self.kernel_size
        S = self.stride
        P = self.padding

        if P > 0:
            x = x.pad(((0, 0), (0, 0), (P, P), (P, P)))  # zero pad height and width

        H_out = (H_in + 2 * P - K_h) // S + 1
        W_out = (W_in + 2 * P - K_w) // S + 1

        outputs = []  # 用于收集每个位置的卷积结果

        for n in range(N):
            for c_out in range(self.out_channels):
                for i in range(H_out):
                    for j in range(W_out):
                        h_start = i * S
                        w_start = j * S
                        window = x[n, :, h_start:h_start+K_h, w_start:w_start+K_w]    # Tensor
                        kernel = self.weight.data[c_out]                                  # Tensor
                        conv = (window * kernel).sum()                               # Tensor，保留计算图
                        if self.bias_enabled:
                            conv = conv + self.bias.data[c_out]                           # Tensor，加 bias 也保留图
                        outputs.append(conv)

        # 将 outputs 这个 Tensor 列表 reshape 成目标 shape
        out = Tensor.stack(outputs).reshape(N, self.out_channels, H_out, W_out)
        return out


class MaxPool2d(Module):
    def __init__(self, kernel_size, stride=None, padding=0):
        super().__init__()
        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if stride is not None else kernel_size
        self.stride = self.stride if isinstance(self.stride, tuple) else (self.stride, self.stride)
        self.padding = padding

    def forward(self, x: Tensor) -> Tensor:
        K_h, K_w = self.kernel_size
        S_h, S_w = self.stride
        P = self.padding

        if P > 0:
            x = x.pad(((0, 0), (0, 0), (P, P), (P, P)))  # Tensor padding

        N, C, H, W = x.shape
        H_out = (H - K_h) // S_h + 1
        W_out = (W - K_w) // S_w + 1

        out_vals = []
        max_indices = []

        for n in range(N):
            for c in range(C):
                for i in range(H_out):
                    for j in range(W_out):
                        h_start = i * S_h
                        w_start = j * S_w
                        window = x[n, c, h_start:h_start + K_h, w_start:w_start + K_w]  # Tensor

                        # Flatten for argmax
                        flat_window = window.reshape(-1)
                        max_idx = flat_window.argmax()
                        max_val = flat_window[max_idx]

                        out_vals.append(max_val)
                        max_indices.append((n, c, h_start + max_idx // K_w, w_start + max_idx % K_w))

        out = Tensor.stack(out_vals).reshape(N, C, H_out, W_out)
        return out