import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import time
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

# 检查设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 定义数据预处理
transform_train = transforms.Compose([
    transforms.RandomHorizontalFlip(p=0.5),  # 随机水平翻转
    transforms.RandomRotation(10),           # 随机旋转
    transforms.ToTensor(),                   # 转换为张量
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 标准化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

# 加载CIFAR10数据集
def load_cifar10_data():
    """加载CIFAR10数据集"""
    print("正在加载CIFAR10数据集...")
    
    trainset = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_train)
    trainloader = DataLoader(
        trainset, batch_size=128, shuffle=True, num_workers=2)
    
    testset = torchvision.datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_test)
    testloader = DataLoader(
        testset, batch_size=128, shuffle=False, num_workers=2)
    
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')
    
    print(f"训练集大小: {len(trainset)}")
    print(f"测试集大小: {len(testset)}")
    print(f"类别: {classes}")
    
    return trainloader, testloader, classes

# 定义卷积神经网络
class CIFAR10Net(nn.Module):
    def __init__(self, num_classes=10):
        super(CIFAR10Net, self).__init__()
        # 第一个卷积块
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(32)
        self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(32)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.dropout1 = nn.Dropout2d(0.25)
        
        # 第二个卷积块
        self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.pool2 = nn.MaxPool2d(2, 2)
        self.dropout2 = nn.Dropout2d(0.25)
        
        # 第三个卷积块
        self.conv5 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.bn5 = nn.BatchNorm2d(128)
        self.conv6 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
        self.bn6 = nn.BatchNorm2d(128)
        self.pool3 = nn.MaxPool2d(2, 2)
        self.dropout3 = nn.Dropout2d(0.25)
        
        # 全连接层
        self.fc1 = nn.Linear(128 * 4 * 4, 512)
        self.dropout4 = nn.Dropout(0.5)
        self.fc2 = nn.Linear(512, num_classes)
        
    def forward(self, x):
        # 第一个卷积块
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = self.pool1(x)
        x = self.dropout1(x)
        
        # 第二个卷积块
        x = F.relu(self.bn3(self.conv3(x)))
        x = F.relu(self.bn4(self.conv4(x)))
        x = self.pool2(x)
        x = self.dropout2(x)
        
        # 第三个卷积块
        x = F.relu(self.bn5(self.conv5(x)))
        x = F.relu(self.bn6(self.conv6(x)))
        x = self.pool3(x)
        x = self.dropout3(x)
        
        # 展平
        x = x.view(-1, 128 * 4 * 4)
        
        # 全连接层
        x = F.relu(self.fc1(x))
        x = self.dropout4(x)
        x = self.fc2(x)
        
        return x

# 训练函数
def train_model(model, trainloader, criterion, optimizer, epoch):
    """训练模型"""
    model.train()  # 设置为训练模式
    running_loss = 0.0
    correct = 0
    total = 0
    
    print(f"Epoch {epoch+1} 开始训练...")
    start_time = time.time()
    
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        # 将数据移到设备
        inputs, targets = inputs.to(device), targets.to(device)
        
        # 梯度清零
        optimizer.zero_grad()
        
        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        
        # 反向传播
        loss.backward()
        
        # 参数更新
        optimizer.step()
        
        # 统计信息
        running_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        # 每100个batch打印一次信息
        if (batch_idx + 1) % 100 == 0:
            print(f'Epoch: {epoch+1} | Batch: {batch_idx+1} | Loss: {running_loss/(batch_idx+1):.3f} | Acc: {100.*correct/total:.3f}%')
    
    epoch_time = time.time() - start_time
    train_loss = running_loss / len(trainloader)
    train_acc = 100. * correct / total
    
    print(f'Epoch {epoch+1} 训练完成 | Loss: {train_loss:.3f} | Acc: {train_acc:.3f}% | Time: {epoch_time:.2f}s')
    
    return train_loss, train_acc

# 测试函数
def test_model(model, testloader, criterion, classes):
    """测试模型"""
    model.eval()  # 设置为评估模式
    test_loss = 0
    correct = 0
    total = 0
    all_predictions = []
    all_targets = []
    
    print("开始测试...")
    with torch.no_grad():
        for inputs, targets in testloader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            
            # 保存预测结果和真实标签用于后续分析
            all_predictions.extend(predicted.cpu().numpy())
            all_targets.extend(targets.cpu().numpy())
    
    test_loss /= len(testloader)
    test_acc = 100. * correct / total
    
    print(f'测试结果 | Loss: {test_loss:.3f} | Acc: {test_acc:.3f}%')
    
    # 生成分类报告
    print("\n分类报告:")
    print(classification_report(all_targets, all_predictions, target_names=classes))
    
    return test_loss, test_acc, all_predictions, all_targets

# 可视化训练过程
def plot_training_history(train_losses, train_accuracies, test_losses, test_accuracies):
    """绘制训练历史"""
    epochs = range(1, len(train_losses) + 1)
    
    plt.figure(figsize=(12, 5))
    
    # 绘制损失
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, 'bo-', label='训练损失')
    plt.plot(epochs, test_losses, 'ro-', label='测试损失')
    plt.title('训练和测试损失')
    plt.xlabel('Epoch')
    plt.ylabel('损失')
    plt.legend()
    plt.grid(True)
    
    # 绘制准确率
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_accuracies, 'bo-', label='训练准确率')
    plt.plot(epochs, test_accuracies, 'ro-', label='测试准确率')
    plt.title('训练和测试准确率')
    plt.xlabel('Epoch')
    plt.ylabel('准确率 (%)')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

# 绘制混淆矩阵
def plot_confusion_matrix(y_true, y_pred, classes):
    """绘制混淆矩阵"""
    cm = confusion_matrix(y_true, y_pred)
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                xticklabels=classes, yticklabels=classes)
    plt.title('混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.show()

# 主函数
def main():
    """主函数"""
    print("PyTorch CIFAR10分类器")
    print("=" * 50)
    
    # 加载数据
    trainloader, testloader, classes = load_cifar10_data()
    
    # 创建模型
    model = CIFAR10Net(num_classes=10).to(device)
    print(f"\n模型结构:")
    print(model)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 打印模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"\n总参数数量: {total_params:,}")
    print(f"可训练参数数量: {trainable_params:,}")
    
    # 训练模型
    print("\n开始训练...")
    num_epochs = 10
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []
    
    start_time = time.time()
    
    for epoch in range(num_epochs):
        # 训练
        train_loss, train_acc = train_model(model, trainloader, criterion, optimizer, epoch)
        train_losses.append(train_loss)
        train_accuracies.append(train_acc)
        
        # 测试
        test_loss, test_acc, _, _ = test_model(model, testloader, criterion, classes)
        test_losses.append(test_loss)
        test_accuracies.append(test_acc)
        
        print("-" * 50)
    
    total_time = time.time() - start_time
    print(f"总训练时间: {total_time:.2f}秒")
    
    # 最终测试
    print("\n最终测试结果:")
    test_loss, test_acc, all_predictions, all_targets = test_model(model, testloader, criterion, classes)
    
    # 绘制训练历史
    print("\n绘制训练历史...")
    plot_training_history(train_losses, train_accuracies, test_losses, test_accuracies)
    
    # 绘制混淆矩阵
    print("绘制混淆矩阵...")
    plot_confusion_matrix(all_targets, all_predictions, classes)
    
    # 保存模型
    torch.save(model.state_dict(), 'cifar10_model.pth')
    print("\n模型已保存为 'cifar10_model.pth'")
    
    print("\n程序执行完成!")

# 显示CIFAR10样本数据
def show_sample_data():
    """显示CIFAR10样本数据"""
    # 加载未标准化的数据用于可视化
    transform_sample = transforms.Compose([
        transforms.ToTensor()
    ])
    
    trainset = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_sample)
    
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')
    
    # 显示样本
    fig, axes = plt.subplots(2, 5, figsize=(12, 6))
    axes = axes.ravel()
    
    for i in range(10):
        img, label = trainset[i]
        img = img.permute(1, 2, 0)  # 转换为HWC格式
        axes[i].imshow(img)
        axes[i].set_title(f'Label: {classes[label]}')
        axes[i].axis('off')
    
    plt.tight_layout()
    plt.show()

if __name__ == '__main__':
    # 显示样本数据
    print("显示CIFAR10样本数据...")
    show_sample_data()
    
    # 运行主程序
    main()