import numpy as np
import pandas as pd
from sklearn.metrics import (
    accuracy_score,
)  # , precision_score, recall_score, f1_score #仅需要准确率
import os


class NeuralNetwork:
    def __init__(self, input_size, hidden1_size, hidden2_size, output_size):
        # 初始化网络参数
        # W1: 输入层到隐藏层1的权重 (hidden1_size x input_size)
        self.W1 = np.random.randn(hidden1_size, input_size) * 0.01
        # b1: 隐藏层1的偏置 (hidden1_size x 1)
        self.b1 = np.zeros((hidden1_size, 1))

        # W2: 隐藏层1到隐藏层2的权重 (hidden2_size x hidden1_size)
        self.W2 = np.random.randn(hidden2_size, hidden1_size) * 0.01
        # b2: 隐藏层2的偏置 (hidden2_size x 1)
        self.b2 = np.zeros((hidden2_size, 1))

        # W3: 隐藏层2到输出层的权重 (output_size x hidden2_size)
        self.W3 = np.random.randn(output_size, hidden2_size) * 0.01
        # b3: 输出层的偏置 (output_size x 1)
        self.b3 = np.zeros((output_size, 1))

        self.input_size = input_size

    def relu(self, x):
        """ReLU激活函数"""
        return np.maximum(0, x)

    def relu_derivative(self, x):
        """ReLU函数的导数
        注意: 这里的x是ReLU的输入值，即z (加权和)
        """
        return np.where(x > 0, 1, 0)

    def sigmoid(self, x):
        """sigmoid激活函数"""
        x_clipped = np.clip(x, -500, 500)
        return 1 / (1 + np.exp(-x_clipped))

    def sigmoid_derivative(self, x):
        """sigmoid函数的导数
        注意: 这里的x是sigmoid的输出值，即 a = sigmoid(z)
        """
        return x * (1 - x)

    def forward(self, X):
        """
        前向传播过程
        X: 输入数据 (input_size x 1)
        """
        if X.shape[0] != self.W1.shape[1]:
            raise ValueError(
                f"Input X dimension {X.shape[0]} does not match W1 input dimension {self.W1.shape[1]}"
            )

        # 隐藏层1
        self.z1 = np.dot(self.W1, X) + self.b1  # (hidden1_size x 1)
        self.a1 = self.relu(self.z1)  # (hidden1_size x 1)

        # 隐藏层2
        self.z2 = np.dot(self.W2, self.a1) + self.b2  # (hidden2_size x 1)
        self.a2 = self.relu(self.z2)  # (hidden2_size x 1)

        # 输出层
        self.z3 = np.dot(self.W3, self.a2) + self.b3  # (output_size x 1)
        self.a3 = self.sigmoid(self.z3)  # (output_size x 1)

        return self.a3

    def backward(self, X, y, learning_rate=0.01):
        """
        反向传播过程：逐层计算损失函数对各参数的梯度，并据此更新参数。
        模型结构: Input -> ReLU(Hidden1) -> ReLU(Hidden2) -> Sigmoid(Output)

        X: 当前训练样本的输入 (input_size x 1)
        y: 当前训练样本的真实标签 (output_size x 1, 通常为 1x1)
        learning_rate: 学习率
        """
        m = 1  # 单个样本进行更新

        # --- 第1步: 输出层 (Layer 3 - Sigmoid激活) ---
        # 损失函数 L = 1/2 * (a3 - y)^2 (对于单个样本)
        # ∂L/∂a3 = a3 - y
        dL_da3 = self.a3 - y  # (output_size x 1)

        # ∂a3/∂z3 = sigmoid_derivative(a3)
        # (因为 a3 = sigmoid(z3), sigmoid导数是 a3*(1-a3))
        da3_dz3 = self.sigmoid_derivative(self.a3)  # (output_size x 1)

        # ∂L/∂z3 (delta3) = (∂L/∂a3) * (∂a3/∂z3)
        # 这是输出层的误差项
        delta3 = dL_da3 * da3_dz3  # (output_size x 1)

        # ∂L/∂W3 = (∂L/∂z3) * (∂z3/∂W3) = delta3 * a2.T
        # (因为 z3 = W3*a2 + b3)
        grad_W3 = np.dot(delta3, self.a2.T) / m  # (output_size x hidden2_size)

        # ∂L/∂b3 = (∂L/∂z3) * (∂z3/∂b3) = delta3 * 1
        grad_b3 = np.sum(delta3, axis=1, keepdims=True) / m  # (output_size x 1)

        # --- 第2步: 隐藏层2 (Layer 2 - ReLU激活) ---
        # ∂L/∂a2 = (∂L/∂z3) * (∂z3/∂a2) = W3.T * delta3
        # (因为 z3 = W3*a2 + b3)
        dL_da2 = np.dot(self.W3.T, delta3)  # (hidden2_size x 1)

        # ∂a2/∂z2 = relu_derivative(z2)
        # (因为 a2 = relu(z2), relu导数是对z2求导)
        da2_dz2 = self.relu_derivative(self.z2)  # (hidden2_size x 1)

        # ∂L/∂z2 (delta2) = (∂L/∂a2) * (∂a2/∂z2)
        # 这是隐藏层2的误差项
        delta2 = dL_da2 * da2_dz2  # (hidden2_size x 1)

        # ∂L/∂W2 = (∂L/∂z2) * (∂z2/∂W2) = delta2 * a1.T
        # (因为 z2 = W2*a1 + b2)
        grad_W2 = np.dot(delta2, self.a1.T) / m  # (hidden2_size x hidden1_size)

        # ∂L/∂b2 = (∂L/∂z2) * (∂z2/∂b2) = delta2 * 1
        grad_b2 = np.sum(delta2, axis=1, keepdims=True) / m  # (hidden2_size x 1)

        # --- 第3步: 隐藏层1 (Layer 1 - ReLU激活) ---
        # ∂L/∂a1 = (∂L/∂z2) * (∂z2/∂a1) = W2.T * delta2
        # (因为 z2 = W2*a1 + b2)
        dL_da1 = np.dot(self.W2.T, delta2)  # (hidden1_size x 1)

        # ∂a1/∂z1 = relu_derivative(z1)
        # (因为 a1 = relu(z1), relu导数是对z1求导)
        da1_dz1 = self.relu_derivative(self.z1)  # (hidden1_size x 1)

        # ∂L/∂z1 (delta1) = (∂L/∂a1) * (∂a1/∂z1)
        # 这是隐藏层1的误差项
        delta1 = dL_da1 * da1_dz1  # (hidden1_size x 1)

        # ∂L/∂W1 = (∂L/∂z1) * (∂z1/∂W1) = delta1 * X.T
        # (因为 z1 = W1*X + b1)
        grad_W1 = np.dot(delta1, X.T) / m  # (hidden1_size x input_size)

        # ∂L/∂b1 = (∂L/∂z1) * (∂z1/∂b1) = delta1 * 1
        grad_b1 = np.sum(delta1, axis=1, keepdims=True) / m  # (hidden1_size x 1)

        # --- 第4步: 更新所有参数 ---
        self.W3 -= learning_rate * grad_W3
        self.b3 -= learning_rate * grad_b3
        self.W2 -= learning_rate * grad_W2
        self.b2 -= learning_rate * grad_b2
        self.W1 -= learning_rate * grad_W1
        self.b1 -= learning_rate * grad_b1


def load_data(train_file_path, test_file_path):
    """
    从指定的CSV文件加载训练集和测试集数据。
    """
    try:
        print(f"开始加载训练数据从: {train_file_path}")
        train_df = pd.read_csv(train_file_path)
        X_train = train_df.iloc[:, :-1].values
        y_train = train_df.iloc[:, -1].values
        print(f"训练数据加载完成。特征形状: {X_train.shape}, 标签形状: {y_train.shape}")

        print(f"开始加载测试数据从: {test_file_path}")
        test_df = pd.read_csv(test_file_path)
        X_test = test_df.iloc[:, :-1].values
        y_test = test_df.iloc[:, -1].values
        print(f"测试数据加载完成。特征形状: {X_test.shape}, 标签形状: {y_test.shape}")

        return X_train, y_train, X_test, y_test
    except FileNotFoundError as e:
        print(f"错误: 数据文件未找到 - {e}")
        exit(1)  # 假设数据合法，但文件找不到是致命错误
    except Exception as e:
        print(f"加载数据时发生错误: {e}")
        exit(1)


def evaluate_model(nn_model, X_test_data, y_test_data):
    """
    评估神经网络模型在测试集上的准确率。
    """
    predictions_classes = []
    num_test_samples = X_test_data.shape[0]
    num_features = X_test_data.shape[1]

    print(f"开始在测试集上评估模型，测试样本数: {num_test_samples}")
    for i in range(num_test_samples):
        X_sample = X_test_data[i, :].reshape(num_features, 1)
        predicted_prob = nn_model.forward(X_sample)
        predicted_class = 1 if predicted_prob.item() > 0.5 else 0
        predictions_classes.append(predicted_class)

    accuracy = accuracy_score(y_test_data, predictions_classes)
    print("测试集评估完成。")

    return {"accuracy": accuracy}


if __name__ == "__main__":
    # --- 1. 配置参数 ---
    TRAIN_FILE_PATH = "spam_dataset_train.csv"
    TEST_FILE_PATH = "spam_dataset_test.csv"

    # 网络结构配置 (5-4-3-1)
    # INPUT_LAYER_SIZE 将从数据中动态获取
    HIDDEN_LAYER_1_SIZE = 4  # 第一个隐藏层有4个神经元
    HIDDEN_LAYER_2_SIZE = 3  # 第二个隐藏层有3个神经元
    OUTPUT_LAYER_SIZE = 1  # 输出层1个神经元 (用于二分类)

    NUM_EPOCHS = 600
    LEARNING_RATE = 0.01
    PRINT_LOSS_EVERY_N_EPOCHS = 20

    print("--- 神经网络 (5-4-3-1) 训练与评估开始 ---")

    # --- 2. 检查数据文件是否存在 (假设用户给出的数据是合法的，此处保留基本检查) ---
    print("\n--- 阶段1: 数据文件检查 ---")
    if not os.path.exists(TRAIN_FILE_PATH):
        print(f"错误: 训练数据文件 '{TRAIN_FILE_PATH}' 未找到。")
        exit(1)
    if not os.path.exists(TEST_FILE_PATH):
        print(f"错误: 测试数据文件 '{TEST_FILE_PATH}' 未找到。")
        exit(1)
    print("数据文件检查通过。")

    # --- 3. 加载数据 ---
    print("\n--- 阶段2: 加载数据 ---")
    X_train, y_train, X_test, y_test = load_data(TRAIN_FILE_PATH, TEST_FILE_PATH)

    num_train_samples = X_train.shape[0]
    INPUT_LAYER_SIZE = X_train.shape[1]

    # --- 4. 初始化神经网络 ---
    print("\n--- 阶段3: 初始化神经网络 ---")
    nn = NeuralNetwork(
        input_size=INPUT_LAYER_SIZE,
        hidden1_size=HIDDEN_LAYER_1_SIZE,
        hidden2_size=HIDDEN_LAYER_2_SIZE,
        output_size=OUTPUT_LAYER_SIZE,
    )
    print(
        f"神经网络初始化完成。结构: Input={INPUT_LAYER_SIZE}, Hidden1={HIDDEN_LAYER_1_SIZE}, Hidden2={HIDDEN_LAYER_2_SIZE}, Output={OUTPUT_LAYER_SIZE}"
    )

    # --- 5. 训练网络 ---
    print("\n--- 阶段4: 训练网络 ---")
    print(
        f"总轮数: {NUM_EPOCHS}, 学习率: {LEARNING_RATE}, 训练样本数: {num_train_samples}"
    )

    for epoch in range(NUM_EPOCHS):
        total_epoch_loss = 0
        permutation = np.random.permutation(num_train_samples)
        X_train_shuffled = X_train[permutation]
        y_train_shuffled = y_train[permutation]

        for i in range(num_train_samples):
            X_sample_train = X_train_shuffled[i, :].reshape(INPUT_LAYER_SIZE, 1)
            y_sample_train = np.array([[y_train_shuffled[i]]])

            prediction_prob = nn.forward(X_sample_train)
            sample_loss = 0.5 * np.square(
                prediction_prob.item() - y_sample_train.item()
            )
            total_epoch_loss += sample_loss
            nn.backward(X_sample_train, y_sample_train, learning_rate=LEARNING_RATE)

        average_epoch_loss = total_epoch_loss / num_train_samples
        if (epoch + 1) % PRINT_LOSS_EVERY_N_EPOCHS == 0 or epoch == NUM_EPOCHS - 1:
            print(
                f"轮次 {epoch + 1}/{NUM_EPOCHS}, 平均训练损失: {average_epoch_loss:.6f}"
            )
    print("网络训练完成。")

    # --- 6. 在测试集上评估模型 ---
    print("\n--- 阶段5: 在测试集上评估模型性能 ---")
    evaluation_metrics = evaluate_model(nn, X_test, y_test)
    print("\n--- 模型评估结果 (测试集) ---")
    print(f"准确率 (Accuracy):  {evaluation_metrics['accuracy']:.4f}")

    print("\n--- 神经网络 (5-4-3-1) 训练与评估结束 ---")
