import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import matplotlib.pyplot as plt

class NeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size, learning_rate=0.01):
        """
        初始化神经网络
        
        参数:
        input_size: 输入层大小
        hidden_size: 隐藏层大小
        output_size: 输出层大小
        learning_rate: 学习率
        """
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.learning_rate = learning_rate
        
        # 初始化权重和偏置
        self.W1 = np.random.randn(self.input_size, self.hidden_size) * 0.01
        self.b1 = np.zeros((1, self.hidden_size))
        self.W2 = np.random.randn(self.hidden_size, self.output_size) * 0.01
        self.b2 = np.zeros((1, self.output_size))
        
        # 记录训练历史
        self.loss_history = []
        self.accuracy_history = []
    
    def sigmoid(self, x):
        """Sigmoid激活函数"""
        return 1 / (1 + np.exp(-np.clip(x, -250, 250)))  # 防止数值溢出
    
    def sigmoid_derivative(self, x):
        """Sigmoid函数的导数"""
        return x * (1 - x)
    
    def softmax(self, x):
        """Softmax函数"""
        exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))  # 防止数值溢出
        return exp_x / np.sum(exp_x, axis=1, keepdims=True)
    
    def forward(self, X):
        """前向传播"""
        # 隐藏层
        self.z1 = np.dot(X, self.W1) + self.b1
        self.a1 = self.sigmoid(self.z1)
        
        # 输出层
        self.z2 = np.dot(self.a1, self.W2) + self.b2
        self.a2 = self.softmax(self.z2)
        
        return self.a2
    
    def compute_loss(self, y_true, y_pred):
        """计算交叉熵损失"""
        m = y_true.shape[0]
        # 添加小值防止log(0)
        log_likelihood = -np.log(y_pred[range(m), np.argmax(y_true, axis=1)] + 1e-8)
        loss = np.sum(log_likelihood) / m
        return loss
    
    def backward(self, X, y_true):
        """反向传播"""
        m = X.shape[0]
        
        # 输出层的梯度
        dz2 = self.a2 - y_true
        dW2 = (1 / m) * np.dot(self.a1.T, dz2)
        db2 = (1 / m) * np.sum(dz2, axis=0, keepdims=True)
        
        # 隐藏层的梯度
        dz1 = np.dot(dz2, self.W2.T) * self.sigmoid_derivative(self.a1)
        dW1 = (1 / m) * np.dot(X.T, dz1)
        db1 = (1 / m) * np.sum(dz1, axis=0, keepdims=True)
        
        return dW1, db1, dW2, db2
    
    def update_parameters(self, dW1, db1, dW2, db2):
        """更新参数"""
        self.W1 -= self.learning_rate * dW1
        self.b1 -= self.learning_rate * db1
        self.W2 -= self.learning_rate * dW2
        self.b2 -= self.learning_rate * db2
    
    def predict(self, X):
        """预测"""
        probabilities = self.forward(X)
        return np.argmax(probabilities, axis=1)
    
    def accuracy(self, y_true, y_pred):
        """计算准确率"""
        return np.mean(y_true == y_pred)
    
    def train(self, X_train, y_train, X_val, y_val, epochs=1000, verbose=True):
        """训练神经网络"""
        for epoch in range(epochs):
            # 前向传播
            y_pred = self.forward(X_train)
            
            # 计算损失
            loss = self.compute_loss(y_train, y_pred)
            self.loss_history.append(loss)
            
            # 反向传播
            dW1, db1, dW2, db2 = self.backward(X_train, y_train)
            
            # 更新参数
            self.update_parameters(dW1, db1, dW2, db2)
            
            # 计算验证集准确率
            if epoch % 100 == 0:
                train_pred = self.predict(X_train)
                val_pred = self.predict(X_val)
                
                train_acc = self.accuracy(np.argmax(y_train, axis=1), train_pred)
                val_acc = self.accuracy(y_val, val_pred)
                self.accuracy_history.append((train_acc, val_acc))
                
                if verbose:
                    print(f"Epoch {epoch}, Loss: {loss:.4f}, Train Acc: {train_acc:.4f}, Val Acc: {val_acc:.4f}")

def prepare_data():
    """准备数据"""
    # 加载鸢尾花数据集
    iris = load_iris()
    X = iris.data
    y = iris.target
    
    # 数据标准化
    scaler = StandardScaler()
    X = scaler.fit_transform(X)
    
    # 将标签转换为one-hot编码
    encoder = OneHotEncoder(sparse_output=False)
    y_onehot = encoder.fit_transform(y.reshape(-1, 1))
    
    # 分割数据集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y_onehot, test_size=0.2, random_state=42, stratify=y
    )
    
    # 进一步分割出验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_train, y_train, test_size=0.25, random_state=42, stratify=np.argmax(y_train, axis=1)
    )
    print(f"y_val训练集大小: {y_val}")
    
    return X_train, X_val, X_test, y_train, y_val, y_test, iris.target_names

def plot_training_history(nn):
    """绘制训练历史"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
    
    # 绘制损失曲线
    ax1.plot(nn.loss_history)
    ax1.set_title('Training Loss')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.grid(True)
    
    # 绘制准确率曲线
    epochs = range(0, len(nn.accuracy_history) * 100, 100)
    train_acc = [acc[0] for acc in nn.accuracy_history]
    val_acc = [acc[1] for acc in nn.accuracy_history]
    
    ax2.plot(epochs, train_acc, label='Training Accuracy')
    ax2.plot(epochs, val_acc, label='Validation Accuracy')
    ax2.set_title('Training and Validation Accuracy')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Accuracy')
    ax2.legend()
    ax2.grid(True)
    
    plt.tight_layout()
    plt.show()

def main():
    """主函数"""
    # 准备数据
    X_train, X_val, X_test, y_train, y_val, y_test, class_names = prepare_data()
    
    print("数据集信息:")
    print(f"训练集大小: {X_train.shape[0]}")
    print(f"验证集大小: {X_val.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    print(f"特征数量: {X_train.shape[1]}")
    print(f"类别数量: {len(class_names)}")
    print(f"类别名称: {class_names}")
    
    # 创建神经网络
    input_size = X_train.shape[1]
    hidden_size = 10
    output_size = len(class_names)
    
    nn = NeuralNetwork(input_size, hidden_size, output_size, learning_rate=0.1)
    
    print(f"\n神经网络结构: {input_size}-{hidden_size}-{output_size}")
    print("开始训练...")
    
    # 训练神经网络
    nn.train(X_train, y_train, X_val,
             np.argmax(y_val, axis=1),  # 验证集标签需要是原始格式
             epochs=2000, 
             verbose=True)
    
    # 绘制训练历史
    plot_training_history(nn)
    
    # 在测试集上评估
    test_predictions = nn.predict(X_test)
    test_accuracy = nn.accuracy(np.argmax(y_test, axis=1), test_predictions)
    
    print(f"\n测试集准确率: {test_accuracy:.4f}")
    
    # 显示一些预测结果
    print("\n前10个测试样本的预测结果:")
    print("真实标签 -> 预测标签")
    true_labels = np.argmax(y_test, axis=1)
    for i in range(min(10, len(test_predictions))):
        true_label = class_names[true_labels[i]]
        pred_label = class_names[test_predictions[i]]
        print(f"{true_label} -> {pred_label}")

if __name__ == "__main__":
    main()