from torch.utils.data import DataLoader
from math import ceil

from utils.myloss import Myloss
from models.model import VGG16
from dataset.read_yolo_dataset import ReadYolo
from augmentation.data_augment import DataAugment
from utils.collate import colle

import torch
import argparse
import time

# 参数配置
parser = argparse.ArgumentParser(description="VGG16 Training")
parser.add_argument("--lr", default=1e-2, help="learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--batch_size", default=16, type=int, help="batch-size")
parser.add_argument("--epochs", default=300, type=int, help="epochs")
parser.add_argument("--weight_decay",
                    default=5e-4,
                    type=float,
                    help="weight decay for SGD")

args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 读取数据集
data_augment = DataAugment()
dataset = ReadYolo(phase="train", trans=data_augment, device=device)
pic_num = len(dataset)

# 网络模型构建
net = VGG16()
net.train()
net = net.to(device)

# 迭代器与损失函数
optimizer = torch.optim.SGD(net.parameters(),lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.001, max_lr=0.1)

loss = Myloss()

# loss = torch.nn.CrossEntropyLoss()
# 此处loss不能处理collate做出的数据

data = DataLoader(dataset,
                  batch_size=args.batch_size,
                  shuffle=True,
                  drop_last=False,
                  collate_fn=colle)


# 训练
def train():
    # 设置epoch
    epochs = args.epochs
    total_epochs = epochs * ceil(pic_num / args.batch_size)
    batch_count = 0  # 对batch进行计数
    loop = 0
    for epoch in range(epochs):
        loop = loop + 1
        for batch, (imgs, targets) in enumerate(data):
            start = time.time()
            pred = net(imgs)  # (batch_size, 3,224,224)
            Loss = loss(pred, targets)
            optimizer.zero_grad()
            Loss.backward()
            optimizer.step()
            scheduler.step()
            # 训练完一个batch需要的时间
            batch_time = time.time() - start
            # 剩下的训练完需要的迭代次数
            last_batchs = total_epochs - batch_count
            # 剩下的训练完需要的时间，返回的是秒
            batch_count += 1
            # print("epoch:{0}/{1} ".format(epoch + 1, epochs), "\t loss:",
            #       float(Loss), "\t每个batch所需时间: {:.6f}".format(batch_time),
            #       "s\t剩余批次:", last_batchs)
            if Loss <= 5e-6:
                torch.save(net.state_dict(), "./weights/selsected_params_epoch{}.pth".format(epoch + 1))
                print("loss符合要求")
        # 每20个epoch保存一次参数
        if loop >= 20:
            torch.save(net.state_dict(), "./weights/VGG16_epoch{}_params.pth".format(epoch + 1))
            loop = 0
            print("epoch:{0}/{1} ".format(epoch + 1, epochs), "\t loss:", float(Loss))
    print("训练结束！!")


if __name__ == '__main__':
    train()
