# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torchvision.models as models
device = torch.device("cuda" if torch.cuda.is_available() else "")

class VGG16_finetune(nn.Module):
    def __init__(self, num_class=5):
        super(VGG16_finetune, self).__init__()
        # 获取预训练模型
        # 0.12
        vgg16_net = models.vgg16(pretrained=True)
        # >0.12
        # weights= models.VGG16_Weights.DEFAULT
        # vgg16_net = model.vgg16(weights = weights)
        self.num_class = num_class
        # 获取预训练模型的特征和GAP的结果
        self.features = vgg16_net.features
        self.avgpool = vgg16_net.avgpool
        # 构建当前任务的输出端：FC
        self.classifier = nn.Sequential(
            nn.Linear(25088, 512),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512, 128),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(128, self.num_class),
        )

    # 前向传播过程
    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

# 模型实例化
model = VGG16_finetune(num_class = 5).to(device)
# 冻结特征提取部分的参数，不进行梯度计算
for param in model.features.parameters():
    param.requires_grad = False
# 学习率
learning_rate = 1e-3
# 训练轮数
num_epochs = 10
# 优化算法Adam = RMSProp + Momentum,在这里我们只更新后面的分类输出层的参数。卷积层的参数使用预训练模型的结果
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=learning_rate)
# 交叉熵损失函数
loss_fn = torch.nn.CrossEntropyLoss()

# 指定批次大小
batch_size = 8
# 指定数据集路径
flower_train_path = './data/train/'
flower_test_path = './data/val/'
# 先将数据转换为tensor类型，并调整数据的大小为224x224
dataset_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Resize((224,224))
])
# 获取训练集数据和测试集数据
flower_train = ImageFolder(flower_train_path, transform=dataset_transform)
flower_test = ImageFolder(flower_test_path,transform=dataset_transform)
# 获取数据的迭代
train_loader = DataLoader(dataset = flower_train, batch_size = batch_size, shuffle = True)
test_loader = DataLoader(dataset = flower_test, batch_size = batch_size, shuffle = False)


# 计算模型预测精度:测试集数据，模型
def evaluate_accuracy(data_iter, model):
    total = 0
    correct = 0
    # 不进行梯度计算
    with torch.no_grad():
        # 模型是验证模式
        model.eval()
        # 获取每一个batch的数据，进行预测
        for images, labels in data_iter:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            # 获取预测结果
            _, predicts = torch.max(outputs.data, dim=1)
            # 预测的次数
            total += labels.size(0)
            # 预测正确的个数
            correct += (predicts == labels).cpu().sum()
            break
    # 获取准确率
    return correct / total

# 定义模型训练过程:指定数据集，优化器，损失函数和轮次
def train(data_loader=train_loader,
          optimizer=optimizer,
          loss_fn=loss_fn,
          epochs=num_epochs):
    # 遍历每一个轮次进行训练
    for epoch in range(epochs):
        print('current epoch = {}'.format(epoch))
        # 每一个轮次的损失，预测个数和预测正确个数的初始化
        train_accuracy_total = 0
        train_correct = 0
        # 损失值的和
        train_loss_sum = 0
        # 迭代次数
        iter = 0
        for i, (images, labels) in enumerate(data_loader):
            images, labels = images.to(device), labels.to(device)
            # 模型定义为训练模式
            model.train()
            # 对数据进行预测
            outputs = model(images)
            # 计算模型的损失
            loss = loss_fn(outputs, labels)
            # 在做反向传播前先清除网络状态
            optimizer.zero_grad()
            # 损失值进行反向传播
            loss.backward()
            # 参数迭代更新
            optimizer.step()
            # 求损失的和
            train_loss_sum += loss.item()
            # 输出模型预测结果
            _, predicts = torch.max(outputs.data, dim=1)
            # 获取训练集预测正确的个数
            train_accuracy_total += labels.size(0)
            train_correct += (predicts == labels).cpu().sum().item()
            iter+=1
        # 测试集预测的准确率
        test_acc = evaluate_accuracy(test_loader, model)
        print(
            'epoch:{0},   loss:{1:.4f},   train accuracy:{2:.3f},  test accuracy:{3:.3f}'
            .format(epoch, train_loss_sum / (iter+0.01),
                    train_correct / train_accuracy_total, test_acc))
    print('------------finish training-------------')
