import numpy as np
import time
from typing import List, Tuple, Callable


class FullyConnectedNN:
    def __init__(self, layer_dims: List[int], activation: str = 'relu',
                 learning_rate: float = 0.01, random_seed: int = 42):
        """
        全连接神经网络初始化

        Args:
            layer_dims: 每层神经元数量，如 [input_dim, hidden1, hidden2, ..., output_dim]
            activation: 激活函数类型 ('relu', 'sigmoid', 'tanh')
            learning_rate: 学习率
            random_seed: 随机种子
        """
        np.random.seed(random_seed)
        self.layer_dims = layer_dims
        self.L = len(layer_dims) - 1  # 总层数（不包括输入层）
        self.learning_rate = learning_rate

        # 初始化参数
        self.parameters = {}
        self.gradients = {}
        self.cache = {}

        # 选择激活函数
        self.activation_fn, self.activation_derivative = self._get_activation_functions(activation)

        # 初始化权重和偏置
        self._initialize_parameters()

        # 统计信息
        self.forward_operations = 0
        self.backward_operations = 0
        self.weight_updates = 0
        self.bias_updates = 0

    def _get_activation_functions(self, activation: str) -> Tuple[Callable, Callable]:
        """获取激活函数及其导数"""
        if activation == 'relu':
            return self._relu, self._relu_derivative
        elif activation == 'sigmoid':
            return self._sigmoid, self._sigmoid_derivative
        elif activation == 'tanh':
            return self._tanh, self._tanh_derivative
        else:
            raise ValueError(f"不支持的激活函数: {activation}")

    def _relu(self, x: np.ndarray) -> np.ndarray:
        return np.maximum(0, x)

    def _relu_derivative(self, x: np.ndarray) -> np.ndarray:
        return (x > 0).astype(float)

    def _sigmoid(self, x: np.ndarray) -> np.ndarray:
        return 1 / (1 + np.exp(-np.clip(x, -250, 250)))

    def _sigmoid_derivative(self, x: np.ndarray) -> np.ndarray:
        s = self._sigmoid(x)
        return s * (1 - s)

    def _tanh(self, x: np.ndarray) -> np.ndarray:
        return np.tanh(x)

    def _tanh_derivative(self, x: np.ndarray) -> np.ndarray:
        return 1 - np.tanh(x) ** 2

    def _initialize_parameters(self):
        """初始化权重和偏置"""
        for l in range(1, self.L + 1):
            # He初始化
            self.parameters[f'W{l}'] = np.random.randn(
                self.layer_dims[l], self.layer_dims[l - 1]) * np.sqrt(2. / self.layer_dims[l - 1])
            self.parameters[f'b{l}'] = np.zeros((self.layer_dims[l], 1))

    def forward(self, X: np.ndarray) -> np.ndarray:
        """
        前向传播

        Args:
            X: 输入数据，形状 (特征数, 样本数)

        Returns:
            输出层激活值
        """
        self.cache['A0'] = X
        A_prev = X

        # 前向传播计数重置
        self.forward_operations = 0

        # 隐藏层前向传播
        for l in range(1, self.L):
            Z = np.dot(self.parameters[f'W{l}'], A_prev) + self.parameters[f'b{l}']
            A = self.activation_fn(Z)

            # 存储中间结果用于反向传播
            self.cache[f'Z{l}'] = Z
            self.cache[f'A{l}'] = A

            A_prev = A

            # 操作计数
            self.forward_operations += self.layer_dims[l] * self.layer_dims[l - 1]  # 矩阵乘法
            self.forward_operations += self.layer_dims[l]  # 偏置加法
            self.forward_operations += self.layer_dims[l]  # 激活函数

        # 输出层前向传播（线性激活）
        Z_output = np.dot(self.parameters[f'W{self.L}'], A_prev) + self.parameters[f'b{self.L}']
        A_output = Z_output  # 线性激活

        self.cache[f'Z{self.L}'] = Z_output
        self.cache[f'A{self.L}'] = A_output

        # 输出层操作计数
        self.forward_operations += self.layer_dims[self.L] * self.layer_dims[self.L - 1]  # 矩阵乘法
        self.forward_operations += self.layer_dims[self.L]  # 偏置加法

        return A_output

    def compute_loss(self, Y_hat: np.ndarray, Y: np.ndarray) -> float:
        """计算均方误差损失"""
        m = Y.shape[1]
        loss = np.sum((Y_hat - Y) ** 2) / (2 * m)
        return loss

    def backward(self, X: np.ndarray, Y: np.ndarray):
        """
        反向传播

        Args:
            X: 输入数据
            Y: 真实标签
        """
        m = X.shape[1]

        # 重置反向传播计数
        self.backward_operations = 0
        self.weight_updates = 0
        self.bias_updates = 0

        # 计算输出层误差
        A_L = self.cache[f'A{self.L}']
        dZ = A_L - Y  # 对于线性输出和MSE损失

        # 反向传播
        for l in reversed(range(1, self.L + 1)):
            A_prev = self.cache[f'A{l - 1}']

            # 计算梯度
            dW = np.dot(dZ, A_prev.T) / m
            db = np.sum(dZ, axis=1, keepdims=True) / m

            # 存储梯度
            self.gradients[f'dW{l}'] = dW
            self.gradients[f'db{l}'] = db

            # 如果不是第一层，计算前一层的误差
            if l > 1:
                dA_prev = np.dot(self.parameters[f'W{l}'].T, dZ)
                dZ = dA_prev * self.activation_derivative(self.cache[f'Z{l - 1}'])

            # 操作计数
            if l == self.L:
                # 输出层
                self.backward_operations += self.layer_dims[l] * self.layer_dims[l - 1] * m  # dW计算
                self.backward_operations += self.layer_dims[l] * m  # db计算
            else:
                # 隐藏层
                self.backward_operations += self.layer_dims[l] * self.layer_dims[l - 1] * m  # dW计算
                self.backward_operations += self.layer_dims[l] * m  # db计算
                self.backward_operations += self.layer_dims[l - 1] * self.layer_dims[l] * m  # dA_prev计算
                self.backward_operations += self.layer_dims[l - 1] * m  # dZ计算

    def update_parameters(self):
        """更新参数"""
        for l in range(1, self.L + 1):
            # 更新权重
            self.parameters[f'W{l}'] -= self.learning_rate * self.gradients[f'dW{l}']
            self.parameters[f'b{l}'] -= self.learning_rate * self.gradients[f'db{l}']

            # 更新计数
            self.weight_updates += self.layer_dims[l] * self.layer_dims[l - 1]
            self.bias_updates += self.layer_dims[l]

    def train(self, X: np.ndarray, Y: np.ndarray, epochs: int = 1000, verbose: bool = True):
        """
        训练神经网络

        Args:
            X: 训练数据
            Y: 训练标签
            epochs: 训练轮数
            verbose: 是否打印训练信息
        """
        print("开始训练...")
        print(f"网络结构: {self.layer_dims}")
        print(f"总参数数量: {self.count_parameters()}")

        for epoch in range(epochs):
            # 前向传播
            Y_hat = self.forward(X)

            # 计算损失
            loss = self.compute_loss(Y_hat, Y)

            # 反向传播
            self.backward(X, Y)

            # 更新参数
            self.update_parameters()

            if verbose and epoch % 100 == 0:
                print(f"Epoch {epoch}, Loss: {loss:.6f}")

    def count_parameters(self) -> int:
        """计算总参数数量"""
        total_params = 0
        for l in range(1, self.L + 1):
            total_params += self.layer_dims[l] * self.layer_dims[l - 1]  # 权重
            total_params += self.layer_dims[l]  # 偏置
        return total_params

    def get_operation_counts(self) -> dict:
        """获取操作计数"""
        return {
            'forward_operations': self.forward_operations,
            'backward_operations': self.backward_operations,
            'weight_updates': self.weight_updates,
            'bias_updates': self.bias_updates,
            'total_parameters': self.count_parameters()
        }


def analyze_complexity():
    """分析时间复杂度"""
    print("\n" + "=" * 50)
    print("时间复杂度分析")
    print("=" * 50)

    # 测试不同网络结构
    network_structures = [
        [2, 4, 1],  # 简单网络
        [2, 8, 4, 1],  # 中等网络
        [2, 16, 8, 4, 1],  # 较复杂网络
        [2, 32, 16, 8, 4, 1]  # 复杂网络
    ]

    results = []

    for i, structure in enumerate(network_structures):
        print(f"\n测试网络结构 {i + 1}: {structure}")

        # 创建网络
        nn = FullyConnectedNN(structure, activation='relu')

        # 生成虚拟数据
        X = np.random.randn(structure[0], 100)  # 100个样本
        Y = np.random.randn(structure[-1], 100)

        # 进行一次训练迭代
        start_time = time.time()
        Y_hat = nn.forward(X)
        nn.backward(X, Y)
        nn.update_parameters()
        end_time = time.time()

        # 获取统计信息
        stats = nn.get_operation_counts()
        stats['structure'] = structure
        stats['time'] = end_time - start_time
        results.append(stats)

        print(f"前向传播操作数: {stats['forward_operations']}")
        print(f"反向传播操作数: {stats['backward_operations']}")
        print(f"权重更新次数: {stats['weight_updates']}")
        print(f"偏置更新次数: {stats['bias_updates']}")
        print(f"总参数数量: {stats['total_parameters']}")
        print(f"执行时间: {stats['time']:.6f}秒")

    return results


def print_complexity_analysis(results):
    """打印时间复杂度分析"""
    print("\n" + "=" * 60)
    print("时间复杂度总结")
    print("=" * 60)

    print("\n各网络结构操作统计:")
    print("结构\t\t总参数\t前向操作\t反向操作\t权重更新\t偏置更新")
    for res in results:
        structure_str = '->'.join(map(str, res['structure']))
        print(
            f"{structure_str}\t{res['total_parameters']}\t{res['forward_operations']}\t\t{res['backward_operations']}\t\t{res['weight_updates']}\t\t{res['bias_updates']}")

    print("\n时间复杂度分析:")
    print("1. 前向传播时间复杂度: O(∑(n_l × n_{l-1} × m))")
    print("2. 反向传播时间复杂度: O(∑(n_l × n_{l-1} × m))")
    print("3. 参数更新时间复杂度: O(∑(n_l × n_{l-1}))")
    print("4. 总时间复杂度: O(L × n_max² × m)")
    print("\n其中:")
    print("  n_l: 第l层神经元个数")
    print("  L: 网络层数")
    print("  n_max: 最大层神经元个数")
    print("  m: 样本数量")

    print("\n观察结果:")
    print("- 权重更新次数 = 总权重参数数量 = ∑(n_l × n_{l-1})")
    print("- 偏置更新次数 = 总偏置参数数量 = ∑(n_l)")
    print("- 时间复杂度与网络深度L和宽度n_l呈多项式关系")
    print("- 对于全连接层，时间复杂度主要受相邻层神经元乘积影响")


if __name__ == "__main__":
    # 示例：训练一个简单的神经网络
    print("全连接神经网络实现示例")
    print("=" * 40)

    # 创建网络
    nn = FullyConnectedNN([2, 4, 1], activation='relu', learning_rate=0.01)

    # 生成简单的异或数据
    X = np.array([[0, 0, 1, 1],
                  [0, 1, 0, 1]])
    Y = np.array([[0, 1, 1, 0]])

    print(f"输入数据形状: {X.shape}")
    print(f"输出数据形状: {Y.shape}")

    # 训练网络
    nn.train(X, Y, epochs=1000, verbose=True)

    # 测试预测
    test_output = nn.forward(X)
    print(f"\n预测结果: {test_output.flatten()}")
    print(f"真实标签: {Y.flatten()}")

    # 分析时间复杂度
    results = analyze_complexity()
    print_complexity_analysis(results)