import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchviz import make_dot
import matplotlib.pyplot as plt
import numpy as np

# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 定义一个简单的神经网络
class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet, self).__init__()
        # 定义网络层
        self.fc1 = nn.Linear(2, 3)  # 输入层到隐藏层 (2个输入, 3个隐藏节点)
        self.fc2 = nn.Linear(3, 1)  # 隐藏层到输出层 (3个隐藏节点, 1个输出)
    
    def forward(self, x):
        # 前向传播
        x = F.relu(self.fc1(x))  # 隐藏层使用ReLU激活函数
        x = torch.sigmoid(self.fc2(x))  # 输出层使用Sigmoid激活函数
        return x

def create_sample_data():
    """创建示例数据"""
    # 创建一些示例数据
    X = torch.tensor([[0.0, 0.0],
                      [0.0, 1.0],
                      [1.0, 0.0],
                      [1.0, 1.0]], dtype=torch.float32)
    
    # XOR问题的目标输出
    y = torch.tensor([[0.0],
                      [1.0],
                      [1.0],
                      [0.0]], dtype=torch.float32)
    
    return X, y

def train_network():
    """训练神经网络"""
    # 创建网络实例
    model = SimpleNet().to(device)
    print("网络结构:")
    print(model)
    
    # 创建损失函数和优化器
    criterion = nn.MSELoss()  # 均方误差损失
    optimizer = optim.SGD(model.parameters(), lr=0.1)  # 随机梯度下降优化器
    
    # 创建示例数据
    X, y = create_sample_data()
    X, y = X.to(device), y.to(device)
    
    # 训练前向传播示例
    print("\n=== 训练前的前向传播 ===")
    with torch.no_grad():  # 不计算梯度
        output_before = model(X)
        loss_before = criterion(output_before, y)
        print(f"训练前输出: {output_before.squeeze().cpu().numpy()}")
        print(f"训练前损失: {loss_before.item():.6f}")
    
    # 训练网络
    print("\n=== 开始训练 ===")
    epochs = 1000
    losses = []
    
    for epoch in range(epochs):
        # 前向传播
        outputs = model(X)
        loss = criterion(outputs, y)
        
        # 反向传播和优化
        optimizer.zero_grad()  # 清零梯度
        loss.backward()        # 计算梯度
        optimizer.step()       # 更新权重
        
        losses.append(loss.item())
        
        # 每100个epoch打印一次
        if (epoch + 1) % 100 == 0:
            print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.6f}')
    
    # 训练后前向传播示例
    print("\n=== 训练后的前向传播 ===")
    with torch.no_grad():  # 不计算梯度
        output_after = model(X)
        loss_after = criterion(output_after, y)
        print(f"训练后输出: {output_after.squeeze().cpu().numpy()}")
        print(f"训练后损失: {loss_after.item():.6f}")
    
    return model, X, y, losses

def visualize_loss_curve(losses):
    """可视化损失曲线"""
    plt.figure(figsize=(10, 6))
    plt.plot(losses)
    plt.title('训练过程中的损失变化')
    plt.xlabel('训练轮次')
    plt.ylabel('损失值')
    plt.grid(True, alpha=0.3)
    plt.show()

def visualize_network_structure():
    """可视化网络结构"""
    # 创建一个简单的示意图
    fig, ax = plt.subplots(figsize=(10, 6))
    
    # 绘制节点
    # 输入层
    input_nodes = [(1, 3), (1, 1)]
    for i, (x, y) in enumerate(input_nodes):
        circle = plt.Circle((x, y), 0.2, color='lightblue', ec='black', lw=1.5)
        ax.add_patch(circle)
        ax.text(x, y, f'$x_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
    
    # 隐藏层
    hidden_nodes = [(3, 4), (3, 2), (3, 0)]
    for i, (x, y) in enumerate(hidden_nodes):
        circle = plt.Circle((x, y), 0.2, color='lightgreen', ec='black', lw=1.5)
        ax.add_patch(circle)
        ax.text(x, y, f'$h_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
    
    # 输出层
    output_nodes = [(5, 2)]
    for i, (x, y) in enumerate(output_nodes):
        circle = plt.Circle((x, y), 0.2, color='lightcoral', ec='black', lw=1.5)
        ax.add_patch(circle)
        ax.text(x, y, f'$y_{i+1}$', ha='center', va='center', fontsize=12, weight='bold')
    
    # 绘制连接线
    # 输入层到隐藏层
    for (x1, y1) in input_nodes:
        for (x2, y2) in hidden_nodes:
            ax.plot([x1, x2], [y1, y2], 'gray', lw=1)
    
    # 隐藏层到输出层
    for (x1, y1) in hidden_nodes:
        for (x2, y2) in output_nodes:
            ax.plot([x1, x2], [y1, y2], 'gray', lw=1)
    
    ax.set_xlim(0, 6)
    ax.set_ylim(-1, 5)
    ax.set_aspect('equal')
    ax.axis('off')
    ax.set_title('神经网络结构图', fontsize=16)
    
    # 添加图例
    import matplotlib.patches as mpatches
    legend_elements = [
        mpatches.Patch(color='lightblue', label='输入层'),
        mpatches.Patch(color='lightgreen', label='隐藏层'),
        mpatches.Patch(color='lightcoral', label='输出层')
    ]
    ax.legend(handles=legend_elements, loc='upper right')
    
    plt.tight_layout()
    plt.show()

def print_computation_graph(model, X):
    """打印计算图"""
    try:
        # 创建一个简单的输入张量用于可视化计算图
        x = torch.tensor([[0.5, 0.8]], dtype=torch.float32, requires_grad=True)
        
        # 前向传播
        y = model(x)
        
        # 创建计算图
        dot = make_dot(y, params=dict(model.named_parameters()))
        
        # 保存计算图
        dot.render("computation_graph", format="png", cleanup=True)
        print("计算图已保存为 computation_graph.png")
        
        # 也可以直接显示计算图（如果在Jupyter环境中）
        # dot.view()
        
        return dot
    except ImportError:
        print("警告: 未安装 torchviz，无法生成计算图可视化")
        print("可以通过以下命令安装: pip install torchviz")
        return None
    except Exception as e:
        print(f"生成计算图时出错: {e}")
        return None

def print_model_parameters(model):
    """打印模型参数"""
    print("\n=== 模型参数 ===")
    for name, param in model.named_parameters():
        print(f"参数名称: {name}")
        print(f"参数形状: {param.shape}")
        print(f"参数值:\n{param.data}")
        print("-" * 40)

def print_gradients(model):
    """打印梯度信息"""
    print("\n=== 梯度信息 ===")
    for name, param in model.named_parameters():
        if param.grad is not None:
            print(f"参数名称: {name}")
            print(f"梯度形状: {param.grad.shape}")
            print(f"梯度值:\n{param.grad}")
            print("-" * 40)
        else:
            print(f"参数名称: {name} (无梯度信息)")
            print("-" * 40)

def demonstrate_autograd():
    """演示自动求导机制"""
    print("\n=== 自动求导机制演示 ===")
    
    # 创建需要计算梯度的张量
    x = torch.tensor([2.0, 3.0], requires_grad=True)
    print(f"输入 x: {x}")
    
    # 定义计算过程
    y = x ** 2
    print(f"计算 y = x^2: {y}")
    
    z = y.sum()
    print(f"计算 z = sum(y): {z}")
    
    # 计算梯度
    z.backward()
    
    # 打印梯度
    print(f"x 的梯度 (dz/dx): {x.grad}")
    
    # 数学验证: 
    # z = x[0]^2 + x[1]^2
    # dz/dx[0] = 2*x[0] = 2*2 = 4
    # dz/dx[1] = 2*x[1] = 2*3 = 6
    print("数学验证:")
    print(f"dz/dx[0] = 2*x[0] = 2*{x[0].item()} = {2*x[0].item()}")
    print(f"dz/dx[1] = 2*x[1] = 2*{x[1].item()} = {2*x[1].item()}")

def main():
    """主函数"""
    print("PyTorch神经网络示例")
    print("=" * 50)
    
    # 演示自动求导机制
    demonstrate_autograd()
    
    # 可视化网络结构
    visualize_network_structure()
    
    # 训练网络
    model, X, y, losses = train_network()
    
    # 可视化损失曲线
    visualize_loss_curve(losses)
    
    # 打印模型参数
    print_model_parameters(model)
    
    # 执行一次前向和反向传播以生成梯度
    model.zero_grad()  # 清零梯度
    output = model(X)
    criterion = nn.MSELoss()
    loss = criterion(output, y)
    loss.backward()  # 计算梯度
    
    # 打印梯度信息
    print_gradients(model)
    
    # 打印计算图
    print("\n=== 计算图 ===")
    dot = print_computation_graph(model, X)
    
    if dot is not None:
        print("计算图已成功生成")
    else:
        print("计算图生成失败或不可用")
    
    print("\n程序执行完成!")

if __name__ == '__main__':
    main()