import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.font_manager as fm
from matplotlib.patches import FancyBboxPatch
import matplotlib.patches as mpatches

# 设置中文字体和支持负号显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

class NeuralNetworkAnimation:
    def __init__(self):
        # 网络结构: 2个输入节点, 3个隐藏节点, 1个输出节点
        self.input_size = 2
        self.hidden_size = 3
        self.output_size = 1
        
        # 初始化权重和偏置 (随机初始化)
        np.random.seed(42)
        self.W1 = np.random.randn(self.input_size, self.hidden_size) * 0.5
        self.b1 = np.random.randn(1, self.hidden_size) * 0.5
        self.W2 = np.random.randn(self.hidden_size, self.output_size) * 0.5
        self.b2 = np.random.randn(1, self.output_size) * 0.5
        
        # 存储激活值和梯度
        self.a1 = None
        self.a2 = None
        self.dW1 = None
        self.db1 = None
        self.dW2 = None
        self.db2 = None
        
        # 输入数据和目标输出
        self.X = np.array([[0.5, 0.8]])  # 单个样本
        self.y = np.array([[1.0]])       # 目标输出
        
        # 学习率
        self.learning_rate = 0.5
        
        # 创建图形和轴
        self.fig, self.ax = plt.subplots(figsize=(12, 8))
        self.ax.set_xlim(0, 10)
        self.ax.set_ylim(0, 6)
        self.ax.set_aspect('equal')
        self.ax.axis('off')
        
        # 存储图形元素
        self.elements = {}
        
    def sigmoid(self, x):
        """Sigmoid激活函数"""
        return 1 / (1 + np.exp(-np.clip(x, -250, 250)))  # 防止溢出
    
    def sigmoid_derivative(self, x):
        """Sigmoid激活函数的导数"""
        return x * (1 - x)
    
    def forward_pass(self):
        """前向传播"""
        # 输入层到隐藏层
        z1 = np.dot(self.X, self.W1) + self.b1
        self.a1 = self.sigmoid(z1)
        
        # 隐藏层到输出层
        z2 = np.dot(self.a1, self.W2) + self.b2
        self.a2 = self.sigmoid(z2)
        
        return self.a2
    
    def backward_pass(self):
        """反向传播"""
        # 计算输出层误差
        error_output = self.a2 - self.y
        delta_output = error_output * self.sigmoid_derivative(self.a2)
        
        # 计算隐藏层误差
        error_hidden = delta_output.dot(self.W2.T)
        delta_hidden = error_hidden * self.sigmoid_derivative(self.a1)
        
        # 计算梯度
        self.dW2 = self.a1.T.dot(delta_output)
        self.db2 = np.sum(delta_output, axis=0, keepdims=True)
        self.dW1 = self.X.T.dot(delta_hidden)
        self.db1 = np.sum(delta_hidden, axis=0, keepdims=True)
        
        return delta_output, delta_hidden
    
    def update_weights(self):
        """更新权重"""
        self.W2 -= self.learning_rate * self.dW2
        self.b2 -= self.learning_rate * self.db2
        self.W1 -= self.learning_rate * self.dW1
        self.b1 -= self.learning_rate * self.db1
    
    def draw_network(self, title="神经网络结构"):
        """绘制神经网络结构"""
        self.ax.clear()
        self.ax.set_xlim(0, 10)
        self.ax.set_ylim(0, 6)
        self.ax.set_aspect('equal')
        self.ax.axis('off')
        self.ax.set_title(title, fontsize=16, pad=20)
        
        # 绘制节点
        # 输入层节点
        input_nodes = []
        for i in range(self.input_size):
            x, y = 2, 4 - i * 1.5
            circle = plt.Circle((x, y), 0.3, color='lightblue', ec='black', lw=1.5)
            self.ax.add_patch(circle)
            self.ax.text(x, y, f'$x_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
            input_nodes.append((x, y))
        
        # 隐藏层节点
        hidden_nodes = []
        for i in range(self.hidden_size):
            x, y = 5, 5 - i * 2
            circle = plt.Circle((x, y), 0.3, color='lightgreen', ec='black', lw=1.5)
            self.ax.add_patch(circle)
            self.ax.text(x, y, f'$h_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
            hidden_nodes.append((x, y))
        
        # 输出层节点
        output_nodes = []
        for i in range(self.output_size):
            x, y = 8, 3
            circle = plt.Circle((x, y), 0.3, color='lightcoral', ec='black', lw=1.5)
            self.ax.add_patch(circle)
            self.ax.text(x, y, f'$y_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
            output_nodes.append((x, y))
        
        # 绘制连接线
        # 输入层到隐藏层
        for i, (x1, y1) in enumerate(input_nodes):
            for j, (x2, y2) in enumerate(hidden_nodes):
                line = plt.Line2D([x1, x2], [y1, y2], color='gray', lw=1)
                self.ax.add_line(line)
        
        # 隐藏层到输出层
        for i, (x1, y1) in enumerate(hidden_nodes):
            for j, (x2, y2) in enumerate(output_nodes):
                line = plt.Line2D([x1, x2], [y1, y2], color='gray', lw=1)
                self.ax.add_line(line)
        
        # 添加图例
        legend_elements = [
            mpatches.Patch(color='lightblue', label='输入层'),
            mpatches.Patch(color='lightgreen', label='隐藏层'),
            mpatches.Patch(color='lightcoral', label='输出层')
        ]
        self.ax.legend(handles=legend_elements, loc='upper right', bbox_to_anchor=(1, 1))
        
        self.elements['input_nodes'] = input_nodes
        self.elements['hidden_nodes'] = hidden_nodes
        self.elements['output_nodes'] = output_nodes
        
    def animate_forward_pass(self, frame):
        """动画演示前向传播"""
        if frame == 0:
            self.draw_network("前向传播: 输入数据")
            # 显示输入数据
            for i, (x, y) in enumerate(self.elements['input_nodes']):
                self.ax.text(x, y-0.5, f'{self.X[0][i]:.2f}', ha='center', va='center', fontsize=10, color='blue')
        elif frame == 1:
            self.draw_network("前向传播: 计算隐藏层激活值")
            # 显示权重
            for i, (x1, y1) in enumerate(self.elements['input_nodes']):
                for j, (x2, y2) in enumerate(self.elements['hidden_nodes']):
                    mid_x, mid_y = (x1+x2)/2, (y1+y2)/2
                    self.ax.text(mid_x, mid_y+0.2, f'{self.W1[i][j]:.2f}', ha='center', va='center', fontsize=8, color='red')
            
            # 计算隐藏层激活值
            z1 = np.dot(self.X, self.W1) + self.b1
            a1 = self.sigmoid(z1)
            self.a1 = a1
            
            # 显示隐藏层激活值
            for j, (x, y) in enumerate(self.elements['hidden_nodes']):
                self.ax.text(x, y-0.5, f'{a1[0][j]:.2f}', ha='center', va='center', fontsize=10, color='green')
                
            # 显示偏置
            for j, (x, y) in enumerate(self.elements['hidden_nodes']):
                self.ax.text(x-0.5, y, f'b={self.b1[0][j]:.2f}', ha='center', va='center', fontsize=8, color='purple')
        elif frame == 2:
            self.draw_network("前向传播: 计算输出层激活值")
            # 显示隐藏层到输出层的权重
            for i, (x1, y1) in enumerate(self.elements['hidden_nodes']):
                for j, (x2, y2) in enumerate(self.elements['output_nodes']):
                    mid_x, mid_y = (x1+x2)/2, (y1+y2)/2
                    self.ax.text(mid_x, mid_y+0.2, f'{self.W2[i][j]:.2f}', ha='center', va='center', fontsize=8, color='red')
            
            # 计算输出层激活值
            z2 = np.dot(self.a1, self.W2) + self.b2
            a2 = self.sigmoid(z2)
            self.a2 = a2
            
            # 显示输出层激活值
            for j, (x, y) in enumerate(self.elements['output_nodes']):
                self.ax.text(x, y-0.5, f'{a2[0][j]:.2f}', ha='center', va='center', fontsize=10, color='green')
                
            # 显示输出层偏置
            for j, (x, y) in enumerate(self.elements['output_nodes']):
                self.ax.text(x-0.5, y, f'b={self.b2[0][j]:.2f}', ha='center', va='center', fontsize=8, color='purple')
                
            # 显示目标值和损失
            loss = 0.5 * np.sum((self.y - self.a2) ** 2)
            self.ax.text(9, 5, f'目标: {self.y[0][0]:.2f}\n输出: {self.a2[0][0]:.2f}\n损失: {loss:.4f}', 
                        ha='center', va='center', fontsize=10, bbox=dict(boxstyle="round,pad=0.3", facecolor="yellow"))
        return []
    
    def animate_backward_pass(self, frame):
        """动画演示反向传播"""
        if frame == 0:
            self.draw_network("反向传播: 计算输出层误差")
            # 显示输出层激活值
            for j, (x, y) in enumerate(self.elements['output_nodes']):
                self.ax.text(x, y-0.5, f'{self.a2[0][j]:.2f}', ha='center', va='center', fontsize=10, color='green')
                
            # 显示目标值和误差
            error = self.a2 - self.y
            self.ax.text(9, 5, f'目标: {self.y[0][0]:.2f}\n输出: {self.a2[0][0]:.2f}\n误差: {error[0][0]:.4f}', 
                        ha='center', va='center', fontsize=10, bbox=dict(boxstyle="round,pad=0.3", facecolor="yellow"))
        elif frame == 1:
            self.draw_network("反向传播: 计算隐藏层误差")
            # 显示隐藏层激活值
            for j, (x, y) in enumerate(self.elements['hidden_nodes']):
                self.ax.text(x, y-0.5, f'{self.a1[0][j]:.2f}', ha='center', va='center', fontsize=10, color='green')
                
            # 计算并显示梯度
            delta_output, delta_hidden = self.backward_pass()
            
            # 显示隐藏层误差
            for j, (x, y) in enumerate(self.elements['hidden_nodes']):
                self.ax.text(x+0.5, y, f'δ={delta_hidden[0][j]:.4f}', ha='center', va='center', fontsize=8, color='red')
        elif frame == 2:
            self.draw_network("反向传播: 更新权重和偏置")
            # 显示权重更新
            for i, (x1, y1) in enumerate(self.elements['input_nodes']):
                for j, (x2, y2) in enumerate(self.elements['hidden_nodes']):
                    mid_x, mid_y = (x1+x2)/2, (y1+y2)/2
                    old_w = self.W1[i][j]
                    new_w = old_w - self.learning_rate * self.dW1[i][j]
                    self.ax.text(mid_x, mid_y+0.2, f'{old_w:.2f}→{new_w:.2f}', ha='center', va='center', fontsize=8, color='red')
            
            # 显示偏置更新
            for j, (x, y) in enumerate(self.elements['hidden_nodes']):
                old_b = self.b1[0][j]
                new_b = old_b - self.learning_rate * self.db1[0][j]
                self.ax.text(x-0.5, y, f'{old_b:.2f}→{new_b:.2f}', ha='center', va='center', fontsize=8, color='purple')
                
            # 更新权重
            self.update_weights()
        return []
    
    def create_animation(self):
        """创建完整动画"""
        # 前向传播动画
        forward_frames = 3
        forward_anim = animation.FuncAnimation(
            self.fig, self.animate_forward_pass, frames=forward_frames,
            interval=2000, repeat=False, blit=False
        )
        
        # 反向传播动画
        backward_frames = 3
        backward_anim = animation.FuncAnimation(
            self.fig, self.animate_backward_pass, frames=backward_frames,
            interval=2000, repeat=False, blit=False
        )
        
        return forward_anim, backward_anim

def demonstrate_multiple_iterations():
    """演示多个训练迭代"""
    fig, ax = plt.subplots(figsize=(12, 8))
    
    # 创建神经网络
    nn = NeuralNetworkAnimation()
    
    # 记录损失
    losses = []
    iterations = []
    
    # 训练多个迭代
    for i in range(10):
        # 前向传播
        output = nn.forward_pass()
        
        # 计算损失
        loss = 0.5 * np.sum((nn.y - output) ** 2)
        losses.append(loss)
        iterations.append(i)
        
        # 反向传播
        nn.backward_pass()
        
        # 更新权重
        nn.update_weights()
    
    # 绘制损失曲线
    ax.plot(iterations, losses, 'b-o', linewidth=2, markersize=6)
    ax.set_xlabel('迭代次数')
    ax.set_ylabel('损失')
    ax.set_title('训练过程中的损失变化')
    ax.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def main():
    """主函数"""
    print("正在创建神经网络前向传播和反向传播动画演示...")
    
    # 创建动画演示
    nn_anim = NeuralNetworkAnimation()
    
    # 显示静态网络结构
    nn_anim.draw_network("神经网络结构")
    plt.show()
    
    # 演示前向传播动画
    print("演示前向传播过程...")
    nn_anim_forward = NeuralNetworkAnimation()
    forward_anim = animation.FuncAnimation(
        nn_anim_forward.fig, nn_anim_forward.animate_forward_pass, frames=3,
        interval=2000, repeat=False, blit=False
    )
    plt.show()
    
    # 演示反向传播动画
    print("演示反向传播过程...")
    nn_anim_backward = NeuralNetworkAnimation()
    # 先进行前向传播以获得激活值
    nn_anim_backward.forward_pass()
    backward_anim = animation.FuncAnimation(
        nn_anim_backward.fig, nn_anim_backward.animate_backward_pass, frames=3,
        interval=2000, repeat=False, blit=False
    )
    plt.show()
    
    # 演示多个训练迭代
    print("演示多个训练迭代过程...")
    demonstrate_multiple_iterations()
    
    print("动画演示完成！")

if __name__ == '__main__':
    main()