#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DP1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/29 19:51
神经网络

'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt

class SimpleNeuralNetwork:
    """简单的神经网络实现"""

    def __init__(self, input_size, hidden_sizes, output_size, learning_rate=0.01):
        self.learning_rate = learning_rate

        # 初始化权重和偏置
        self.weights = []
        self.biases = []

        # 输入层到第一个隐藏层
        self.weights.append(np.random.randn(input_size, hidden_sizes[0]) * 0.1)
        self.biases.append(np.zeros((1, hidden_sizes[0])))

        # 隐藏层之间
        for i in range(1, len(hidden_sizes)):
            self.weights.append(np.random.randn(hidden_sizes[i - 1], hidden_sizes[i]) * 0.1)
            self.biases.append(np.zeros((1, hidden_sizes[i])))

        # 最后一个隐藏层到输出层
        self.weights.append(np.random.randn(hidden_sizes[-1], output_size) * 0.1)
        self.biases.append(np.zeros((1, output_size)))

    def relu(self, x):
        """ReLU激活函数"""
        return np.maximum(0, x)

    def relu_derivative(self, x):
        """ReLU导数"""
        return (x > 0).astype(float)

    def softmax(self, x):
        """Softmax函数"""
        exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
        return exp_x / np.sum(exp_x, axis=1, keepdims=True)

    def forward(self, X):
        """前向传播"""
        self.layer_inputs = []  # 存储每层的输入
        self.layer_outputs = []  # 存储每层的输出

        current_input = X
        self.layer_inputs.append(current_input)
        self.layer_outputs.append(current_input)

        # 隐藏层的前向传播
        for i in range(len(self.weights) - 1):
            z = np.dot(current_input, self.weights[i]) + self.biases[i]
            current_input = self.relu(z)
            self.layer_inputs.append(z)
            self.layer_outputs.append(current_input)

        # 输出层 (使用softmax)
        z_output = np.dot(current_input, self.weights[-1]) + self.biases[-1]
        output = self.softmax(z_output)
        self.layer_inputs.append(z_output)
        self.layer_outputs.append(output)

        return output

    def backward(self, X, y, output):
        """反向传播"""
        m = X.shape[0]  # 样本数量

        # 计算输出层的误差
        delta = output - y

        # 存储梯度
        dW = [None] * len(self.weights)
        db = [None] * len(self.biases)

        # 输出层的梯度
        dW[-1] = np.dot(self.layer_outputs[-2].T, delta) / m
        db[-1] = np.sum(delta, axis=0, keepdims=True) / m

        # 反向传播隐藏层
        for i in range(len(self.weights) - 2, -1, -1):
            delta = np.dot(delta, self.weights[i + 1].T) * self.relu_derivative(self.layer_inputs[i + 1])
            dW[i] = np.dot(self.layer_outputs[i].T, delta) / m
            db[i] = np.sum(delta, axis=0, keepdims=True) / m

        # 更新权重和偏置
        for i in range(len(self.weights)):
            self.weights[i] -= self.learning_rate * dW[i]
            self.biases[i] -= self.learning_rate * db[i]

    def train(self, X, y, epochs=1000):
        """训练网络"""
        losses = []

        for epoch in range(epochs):
            # 前向传播
            output = self.forward(X)

            # 计算损失 (交叉熵损失)
            loss = -np.mean(y * np.log(output + 1e-8))
            losses.append(loss)

            # 反向传播
            self.backward(X, y, output)

            if epoch % 100 == 0:
                accuracy = np.mean(np.argmax(output, axis=1) == np.argmax(y, axis=1))
                print(f"Epoch {epoch}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}")

        return losses


# 示例：使用神经网络解决分类问题
def neural_network_example():
    # 生成示例数据 (XOR问题)
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    y = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])  # one-hot编码

    # 创建神经网络
    nn = SimpleNeuralNetwork(input_size=2, hidden_sizes=[4, 4], output_size=2, learning_rate=0.1)

    # 训练网络
    losses = nn.train(X, y, epochs=1000)

    # 测试预测
    predictions = nn.forward(X)
    print("\nPredictions:")
    for i in range(len(X)):
        print(f"Input: {X[i]}, Predicted: {np.argmax(predictions[i])}, Expected: {np.argmax(y[i])}")

    # 绘制损失曲线
    plt.plot(losses)
    plt.title('Neural Network Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.show()


# 运行神经网络示例
neural_network_example()