import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import os  # 导入os模块用于文件操作


class NeuralNetwork:
    def __init__(self, input_size=5, hidden_size=3, output_size=1):
        # 初始化网络参数
        # 输入层到隐藏层的权重矩阵 (hidden_size x input_size)
        self.W1 = np.random.randn(hidden_size, input_size) * 0.01
        # 隐藏层到输出层的权重矩阵 (output_size x hidden_size)
        self.W2 = np.random.randn(output_size, hidden_size) * 0.01
        # 隐藏层的偏置项 (hidden_size x 1)
        self.b1 = np.zeros((hidden_size, 1))
        # 输出层的偏置项 (output_size x 1)
        self.b2 = np.zeros((output_size, 1))
        self.input_size = input_size

    def relu(self, x):
        """ReLU激活函数"""
        return np.maximum(0, x)

    def relu_derivative(self, x):
        """ReLU函数的导数"""
        return np.where(x > 0, 1, 0)

    def sigmoid(self, x):
        """sigmoid激活函数"""
        # 为防止overflow，对x进行裁剪
        x_clipped = np.clip(x, -500, 500)
        return 1 / (1 + np.exp(-x_clipped))

    def sigmoid_derivative(self, x):
        """sigmoid函数的导数
        注意: 这里的x是sigmoid的输出值，即 a = sigmoid(z)
        """
        return x * (1 - x)

    def forward(self, X):
        """
        前向传播过程
        X: 输入数据 (input_size x 1)
        """
        # 确保输入X的维度与W1的期望匹配
        if X.shape[0] != self.W1.shape[1]:
            raise ValueError(
                f"Input X dimension {X.shape[0]} does not match W1 input dimension {self.W1.shape[1]}"
            )

        # 第一层：输入层到隐藏层
        # z1 = W1 * X + b1
        self.z1 = np.dot(self.W1, X) + self.b1
        # 使用ReLU激活函数
        self.a1 = self.relu(self.z1)

        # 第二层：隐藏层到输出层
        # z2 = W2 * a1 + b2
        self.z2 = np.dot(self.W2, self.a1) + self.b2
        # 使用sigmoid激活函数
        self.a2 = self.sigmoid(self.z2)

        return self.a2

    def backward(self, X, y, learning_rate=0.01):
        """
        反向传播过程：逐层计算损失函数对各参数的梯度，并据此更新参数。

        X: 当前训练样本的输入 (input_size x 1)
        y: 当前训练样本的真实标签 (1x1)
        learning_rate: 学习率
        """
        m = 1  # 单个样本

        # --- 输出层 (Layer 2) 梯度计算 ---
        # 损失函数 L = 1/2 * (a2 - y)^2 (对于单个样本)
        # ∂L/∂a2 = a2 - y
        dL_da2 = self.a2 - y  # (output_size x 1)

        # ∂a2/∂z2 = sigmoid_derivative(a2)
        # (因为 a2 = sigmoid(z2), sigmoid导数是 a2*(1-a2))
        da2_dz2 = self.sigmoid_derivative(self.a2)  # (output_size x 1)

        # ∂L/∂z2 = (∂L/∂a2) * (∂a2/∂z2)
        delta2 = dL_da2 * da2_dz2  # (output_size x 1), 对应误差项 δ_L

        # ∂L/∂W2 = (∂L/∂z2) * (∂z2/∂W2) = delta2 * a1.T
        # (z2 = W2*a1 + b2)
        grad_W2 = np.dot(delta2, self.a1.T) / m  # (output_size x hidden_size)

        # ∂L/∂b2 = (∂L/∂z2) * (∂z2/∂b2) = delta2 * 1
        grad_b2 = np.sum(delta2, axis=1, keepdims=True) / m  # (output_size x 1)

        # --- 隐藏层 (Layer 1) 梯度计算 ---
        # ∂L/∂a1 = (∂L/∂z2) * (∂z2/∂a1) = W2.T * delta2
        dL_da1 = np.dot(self.W2.T, delta2)  # (hidden_size x 1)

        # ∂a1/∂z1 = relu_derivative(z1)
        da1_dz1 = self.relu_derivative(self.z1)  # (hidden_size x 1)

        # ∂L/∂z1 = (∂L/∂a1) * (∂a1/∂z1)
        delta1 = dL_da1 * da1_dz1  # (hidden_size x 1), 对应误差项 δ_l

        # ∂L/∂W1 = (∂L/∂z1) * (∂z1/∂W1) = delta1 * X.T
        # (z1 = W1*X + b1)
        grad_W1 = np.dot(delta1, X.T) / m  # (hidden_size x input_size)

        # ∂L/∂b1 = (∂L/∂z1) * (∂z1/∂b1) = delta1 * 1
        grad_b1 = np.sum(delta1, axis=1, keepdims=True) / m  # (hidden_size x 1)

        # --- 更新参数 ---
        self.W2 -= learning_rate * grad_W2
        self.b2 -= learning_rate * grad_b2
        self.W1 -= learning_rate * grad_W1
        self.b1 -= learning_rate * grad_b1


def load_data(train_file_path, test_file_path):
    """
    从指定的CSV文件加载训练集和测试集数据。

    参数:
    train_file_path (str): 训练集CSV文件的路径。
    test_file_path (str): 测试集CSV文件的路径。

    返回:
    tuple: (X_train, y_train, X_test, y_test)
           如果文件读取失败或数据为空，则对应项为None。
    """
    try:
        print(f"开始加载训练数据从: {train_file_path}")
        train_df = pd.read_csv(train_file_path)
        if train_df.empty:
            print(f"错误: 训练数据文件 {train_file_path} 为空。")
            return None, None, None, None
        X_train = train_df.iloc[:, :-1].values
        y_train = train_df.iloc[:, -1].values
        print(f"训练数据加载完成。特征形状: {X_train.shape}, 标签形状: {y_train.shape}")

        print(f"开始加载测试数据从: {test_file_path}")
        test_df = pd.read_csv(test_file_path)
        if test_df.empty:
            print(f"错误: 测试数据文件 {test_file_path} 为空。")
            return None, None, None, None  # 保持一致性，若测试数据加载失败也应返回None
        X_test = test_df.iloc[:, :-1].values
        y_test = test_df.iloc[:, -1].values
        print(f"测试数据加载完成。特征形状: {X_test.shape}, 标签形状: {y_test.shape}")

        return X_train, y_train, X_test, y_test
    except FileNotFoundError as e:
        print(f"错误: 数据文件未找到 - {e}")
        return None, None, None, None
    except Exception as e:
        print(f"加载数据时发生错误: {e}")
        return None, None, None, None


def evaluate_model(nn_model, X_test_data, y_test_data):
    """
    评估神经网络模型在测试集上的性能。

    参数:
    nn_model (NeuralNetwork): 训练好的神经网络模型。
    X_test_data (np.array): 测试集的特征数据。
    y_test_data (np.array): 测试集的真实标签。

    返回:
    dict: 包含准确率、精确率、召回率和F1分数的字典。
    """
    predictions_classes = []
    num_test_samples = X_test_data.shape[0]

    num_features = X_test_data.shape[1]

    print(f"开始在测试集上评估模型，测试样本数: {num_test_samples}")
    for i in range(num_test_samples):
        X_sample = X_test_data[i, :].reshape(num_features, 1)
        # 前向传播获取预测概率
        predicted_prob = nn_model.forward(X_sample)
        # 将概率转换为类别标签 (0或1)，阈值为0.5
        predicted_class = 1 if predicted_prob.item() > 0.5 else 0
        predictions_classes.append(predicted_class)

    # 计算评估指标
    accuracy = accuracy_score(y_test_data, predictions_classes)
    # zero_division=0 表示当分母为0时（例如没有真正例或没有预测正例），指标设为0，避免警告
    precision = precision_score(y_test_data, predictions_classes, zero_division=0)
    recall = recall_score(y_test_data, predictions_classes, zero_division=0)
    f1 = f1_score(y_test_data, predictions_classes, zero_division=0)
    print("测试集评估完成。")

    return {
        "accuracy": accuracy,
        "precision": precision,
        "recall": recall,
        "f1_score": f1,
    }


if __name__ == "__main__":
    # --- 1. 配置参数 ---
    TRAIN_FILE_PATH = "spam_dataset_train.csv"  # 训练数据集文件路径
    TEST_FILE_PATH = "spam_dataset_test.csv"  # 测试数据集文件路径
    # 网络结构配置 (可以根据数据调整)
    # INPUT_LAYER_SIZE 将从数据中动态获取
    HIDDEN_LAYER_SIZE = 3
    OUTPUT_LAYER_SIZE = 1

    NUM_EPOCHS = 600  # 训练轮数
    LEARNING_RATE = 0.01  # 学习率
    PRINT_LOSS_EVERY_N_EPOCHS = 20  # 每多少轮打印一次损失

    # --- 加载数据 ---
    X_train, y_train, X_test, y_test = load_data(TRAIN_FILE_PATH, TEST_FILE_PATH)

    num_train_samples = X_train.shape[0]
    INPUT_LAYER_SIZE = X_train.shape[1]  # 从训练数据获取特征数/输入层大小

    # --- 初始化神经网络 ---
    nn = NeuralNetwork(
        input_size=INPUT_LAYER_SIZE,
        hidden_size=HIDDEN_LAYER_SIZE,
        output_size=OUTPUT_LAYER_SIZE,
    )
    print(
        f"神经网络初始化完成。结构: Input={INPUT_LAYER_SIZE}, Hidden={HIDDEN_LAYER_SIZE}, Output={OUTPUT_LAYER_SIZE}"
    )

    # --- 训练网络 ---
    print(f"\n--- 训练网络 ---")
    print(
        f"总轮数: {NUM_EPOCHS}, 学习率: {LEARNING_RATE}, 训练样本数: {num_train_samples}"
    )

    for epoch in range(NUM_EPOCHS):
        total_epoch_loss = 0
        # 可选：在每个epoch开始时打乱训练数据
        permutation = np.random.permutation(num_train_samples)
        X_train_shuffled = X_train[permutation]
        y_train_shuffled = y_train[permutation]

        for i in range(num_train_samples):
            # 准备单个训练样本
            X_sample_train = X_train_shuffled[i, :].reshape(INPUT_LAYER_SIZE, 1)
            # 确保标签是 (1,1) 的形状，因为输出层大小是1
            y_sample_train = np.array([[y_train_shuffled[i]]])

            # 前向传播
            prediction_prob = nn.forward(X_sample_train)

            # 计算当前样本损失 (用于累加epoch总损失)
            # 使用均方误差损失: L = 1/2 * (prediction - actual)^2
            sample_loss = 0.5 * np.square(
                prediction_prob.item() - y_sample_train.item()
            )
            total_epoch_loss += sample_loss

            # 反向传播更新参数
            nn.backward(X_sample_train, y_sample_train, learning_rate=LEARNING_RATE)

        # 计算并打印当前epoch的平均损失
        average_epoch_loss = total_epoch_loss / num_train_samples
        if (epoch + 1) % PRINT_LOSS_EVERY_N_EPOCHS == 0 or epoch == NUM_EPOCHS - 1:
            print(
                f"轮次 {epoch + 1}/{NUM_EPOCHS}, 平均训练损失: {average_epoch_loss:.6f}"
            )

    print("网络训练完成。")

    # --- 评估模型 ---
    print("\n--- 评估模型 ---")
    evaluation_metrics = evaluate_model(nn, X_test, y_test)
    print("\n--- 模型评估结果 (测试集) ---")
    print(f"准确率 (Accuracy):  {evaluation_metrics['accuracy']:.4f}")
    print(f"精确率 (Precision): {evaluation_metrics['precision']:.4f}")
    print(f"召回率 (Recall):    {evaluation_metrics['recall']:.4f}")
    print(f"F1分数 (F1-score):  {evaluation_metrics['f1_score']:.4f}")
