import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, random_split
import torch.nn as nn
import torchvision.models as models
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np


# 数据预处理
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])#将待加载图片ToTensor、统一化处理

# 加载数据集
data_dir = 'Dogs_Vs_Cats\cat-dog'
dataset = datasets.ImageFolder(data_dir, transform=transform)
"""
中间量dataset : 图片->(统一scale后ToTenser)->保存至中间量dataset中(图片->Tensor)）。
#torchvision.datasets.ImageFolder继承自torch.utils.data.Dataset,产生Dataset对象
关键属性方法:1、__len__;2、__getitem__。
通过 dataset[idx] 可获取索引为 idx 的样本，返回值通常是一个包含图像数据和对应标签的元组
例如，若数据集是猫狗分类，可能返回 (tensor([[...]]), 0) 表示猫（假设猫标签为 0)
"""

# 划分数据集
train_size = int(0.8 * len(dataset))
val_size = int(0.1 * len(dataset))
test_size = len(dataset) - train_size - val_size
train_dataset, val_dataset, test_dataset = random_split(dataset, [train_size, val_size, test_size])

# 创建数据加载器
batch_size = 32
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)   
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
"""
中间量train_loader : Tensor->训练模型model(Module类),这一过程对接的中间量就是train_loader
主要功能是按batch从Dataset中加载数据
"""

# 构建模型
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# 设置训练设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 训练模型
num_epochs = 10
train_losses = []
train_accs = []
val_losses = []
val_accs = []

for epoch in range(num_epochs):
    running_loss = 0.0
    running_corrects = 0

    model.train()
    for i, data in enumerate(train_loader, 0):#枚举train_loader中的数据，0表示仅取数据，1表示顺序
        inputs, labels = data[0].to(device), data[1].to(device)#参考前面的torchvision.datasets.ImageFolder。data[0]是数据；data[1]是idx即标签

        optimizer.zero_grad()

        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)
        """
        outputs结构是[value,preds]
        其中的value结构是[32,2],每个项分别对应训练的两类的两个分数；
        preds是每个结果的预测类别
        """
        loss = criterion(outputs, labels)

        loss.backward()
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)

    epoch_loss = running_loss / len(train_loader.dataset)
    # 将计算得到的准确率张量转移到CPU上
    epoch_acc = (running_corrects.double() / len(train_loader.dataset)).cpu()
    train_losses.append(epoch_loss)
    train_accs.append(epoch_acc)

    print(f'Epoch {epoch + 1}/{num_epochs}, Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f}')

    # 在验证集上评估
    running_loss = 0.0
    running_corrects = 0

    model.eval()
    with torch.no_grad():
        """
        with语句后接的是上下文管理器,即“运行环境”
        在这个 with 语句块内,PyTorch 停止梯度计算，以节省内存并加快计算，代码块结束后，梯度计算恢复正常。
        """
        for data in val_loader:
            inputs, labels = data[0].to(device), data[1].to(device)
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)

            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

    val_loss = running_loss / len(val_loader.dataset)
    # 将计算得到的准确率张量转移到CPU上
    val_acc = (running_corrects.double() / len(val_loader.dataset)).cpu()
    val_losses.append(val_loss)
    val_accs.append(val_acc)

    print(f'Validation Loss: {val_loss:.4f}, Validation Acc: {val_acc:.4f}')

# 绘制损失和准确率曲线
plt.figure(figsize=(12, 4))

plt.subplot(1, 2, 1)
plt.plot(range(num_epochs), train_losses, label='Train Loss')
plt.plot(range(num_epochs), val_losses, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(range(num_epochs), train_accs, label='Train Acc')
plt.plot(range(num_epochs), val_accs, label='Validation Acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()

plt.show()


# 计算混淆矩阵
all_preds = []
all_labels = []

model.eval()
with torch.no_grad():
    for data in val_loader:
        inputs, labels = data[0].to(device), data[1].to(device)
        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)

        all_preds.extend(preds.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())

cm = confusion_matrix(all_labels, all_preds)
print(cm)


# 在测试集上测试
model.eval()
running_loss = 0.0
running_corrects = 0

with torch.no_grad():
    for data in test_loader:
        inputs, labels = data[0].to(device), data[1].to(device)
        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)
        loss = criterion(outputs, labels)

        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)

test_loss = running_loss / len(test_loader.dataset)
test_acc = running_corrects.double() / len(test_loader.dataset)

print(f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}')