"""
# -*- coding: utf-8 -*-
# @Time    : 2023/6/15 15:22
# @Author  : 王摇摆
# @FileName: VGGNetModel.py
# @Software: PyCharm
# @Blog    ：https://blog.csdn.net/weixin_44943389?type=blog
"""
import time
import torch
from DataPreview import dataloader, image_datasets
import csv

train_losses = []  # 存储训练集损失
train_accs = []  # 存储训练集准确率
valid_losses = []  # 存储验证集损失
valid_accs = []  # 存储验证集准确率


class Models(torch.nn.Module):
    def __init__(self):
        super(Models, self).__init__()
        self.Conv = torch.nn.Sequential(
            torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),

            torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),

            torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),

            torch.nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2))

        self.Classes = torch.nn.Sequential(
            torch.nn.Linear(4 * 4 * 512, 1024),
            torch.nn.ReLU(),
            torch.nn.Dropout(p=0.5),
            torch.nn.Linear(1024, 1024),
            torch.nn.ReLU(),
            torch.nn.Dropout(p=0.5),
            torch.nn.Linear(1024, 2)
        )

    def forward(self, input):
        x = self.Conv(input)
        x = x.view(-1, 4 * 4 * 512)
        x = self.Classes(x)
        return x


# 创建模型
print('1. 模型已创建完毕')
model = Models()

# 定义好损失函数和优化器
loss_f = torch.nn.CrossEntropyLoss()
print('2. 损失函数已创建完毕')

optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)
print('3. 优化器已创建完毕')

# 模型超参数设定
epoch_n = 1
print('4. 超参数已创建完毕')

print('====================测试GPU是否可用=========================')
print(torch.cuda.is_available())
Use_gpu = torch.cuda.is_available()
if Use_gpu:
    model = model.cuda()
    print('您的GPU可用，将使用GPU进行模型训练！')
else:
    print('您的GPU不可用，将使用CPU进行模型训练！')
time_open = time.time()

print('\n====================模型开始训练！=========================')
for epoch in range(epoch_n):
    print("\n训练批次：Epoch:{}/{}".format(epoch + 1, epoch_n))
    print("-" * 10)
    epoch_start_time = time.time()  # 记录epoch开始时间

    for phase in ["train", "valid"]:
        if phase == "train":
            print("【训练集开始训练！Training...】")
            model.train(True)
        else:
            print("【验证集开始训练！Training...】")
            model.train(False)

        running_loss = 0.0
        running_corrects = 0

        for batch, data in enumerate(dataloader[phase], 1):
            batch_start_time = time.time()  # 记录batch开始时间
            x, y = data
            x, y = x, y
            y_pred = model(x)
            _, pred = torch.max(y_pred.data, 1)
            optimizer.zero_grad()
            loss = loss_f(y_pred, y)
            if phase == "train":
                loss.backward()
                optimizer.step()
            running_loss += loss.data
            running_corrects += torch.sum(pred == y.data)
            if batch % 10 == 0 and phase == "train":
                batch_end_time = time.time()  # 记录batch结束时间
                batch_time = batch_end_time - batch_start_time  # 计算batch耗时
                total_time = time.time() - time_open
                print("Batch {}, Train Loss:{:.4f}, Train ACC:{:.4f}, "
                      "本次Batch Time:{:.2f}秒，目前已用时{:.2f}秒"
                      .format(batch, running_loss / batch, 100 * running_corrects /
                              (16 * batch), batch_time, total_time))

        epoch_loss = running_loss * 16 / len(image_datasets[phase])
        epoch_acc = 100 * running_corrects / len(image_datasets[phase])
        print("第{}个epoch的{}的训练结果是： Loss:{:.4f} ACC:{:.4f}%".format(epoch + 1, phase, epoch_loss, epoch_acc))

        if phase == "train":
            train_losses.append(epoch_loss)
            train_accs.append(epoch_acc)
        else:
            valid_losses.append(epoch_loss)
            valid_accs.append(epoch_acc)

        epoch_end_time = time.time()  # 记录epoch结束时间
        epoch_time = epoch_end_time - epoch_start_time  # 计算epoch耗时
        print("第{}个epoch耗时：{:.2f}秒".format(epoch + 1, epoch_time))

time_end = time.time() - time_open
print("epoch已全部执行完毕！程序总耗时：{:.2f}秒".format(time_end))

# 保存数据到CSV文件
csv_filename = "VGGNet_results.csv"

with open(csv_filename, mode='w', newline='') as file:
    writer = csv.writer(file)
    writer.writerow(['Epoch', 'Train Loss', 'Train Accuracy', 'Valid Loss', 'Valid Accuracy'])

    for epoch in range(epoch_n):
        writer.writerow([epoch + 1, train_losses[epoch], train_accs[epoch], valid_losses[epoch], valid_accs[epoch]])

print("数据已保存到文件:", csv_filename)
exit()

'''
其实吧，根据自定义的batch_size的大小，就能手动算出来batch变量变化范围是从多少道多少，就能大概估摸着有多少输出的东西
'''
