
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from nets.vgg16 import vgg16
from nets.fused_mobilenet_v3 import mobilenet_v3
from nets.ghostnet import ghostnet
from nets.efficientnet import EfficientNet
from nets.shufflenet_v2 import shufflenetv2
from utils.dataloader import DataGenerator, detection_collate
from utils.utils import get_classes
from utils.utils_fit import train, lm_softmax_train
from utils.callbacks import LossAccHistory


#----------------------------------------------------#
#   训练自己的数据集的时候一定要注意修改classes_path
#   修改成自己对应的种类的txt
#----------------------------------------------------#
classes_path    = 'model_data/news_classes.txt'

#--------------------#
#   设置batch_size
#--------------------#
train_bs    = 16
valid_bs    = 8

#--------------------------------#
#   epochs      设置世代epoch
#--------------------------------#
epochs   = 50

#-------------------------------------------------------------------------------#
#   backbone        所用模型种类：
#                   LeNet、AlexNet、vgg16、
#                   squeezenet、mobilenetv2、mobilenetv3、shufflenetv2、ghostnet、
#                   DenseNet、resnet50、resnext50
#-------------------------------------------------------------------------------#
backbone        = "mobilenetv3-Large-Margin Softmax"

#----------------------------------------------------#
#   input_shape     输入的图片大小
#----------------------------------------------------#
input_shape     = [224, 224]

#------------------------------------------------------------------#
#   momentum        优化器内部使用到的momentum参数
#   weight_decay    权值衰减，可防止过拟合
#------------------------------------------------------------------#
momentum            = 0.9
weight_decay        = 5e-4

#------------------------------------------------------------------#
#   save_dir        权值与日志文件保存的文件夹
#------------------------------------------------------------------#
save_dir            = 'logs'

#------------------------------------------------------------------#
#   num_workers     用于设置是否使用多线程读取数据
#                   开启后会加快数据读取速度，但是会占用更多内存
#                   内存较小的电脑可以设置为2或者0
#------------------------------------------------------------------#
num_workers = 4

#-------------------------------------#
#   设置数据集的路径
#   train_txt_path   训练图片路径和标签
#   valid_txt_path   验证图片路径和标签
#-------------------------------------#
train_txt_path   = "cls_train.txt"
valid_txt_path   = "cls_valid.txt"

if __name__ == "__main__":

    #   获得类
    class_names_list, class_num = get_classes(classes_path)

    model = vgg16(pretrained=False, num_classes=class_num)
    # model = mobilenet_v3(pretrained=False, mode='small', num_classes=class_num)
    # model = ghostnet(num_classes=class_num)
    # model = EfficientNet.from_name('efficientnet-b0', override_params={'num_classes': class_num})
    # model = shufflenetv2(ratio=1, class_num=class_num)
    #----------------------#
    #   读取数据集对应的txt
    #----------------------#
    with open(train_txt_path, "r") as f:
        train_lines = f.readlines()
    with open(valid_txt_path, "r") as f:
        val_lines = f.readlines()

    # 获取训练数据和验证数据的数量
    num_train = len(train_lines)
    num_val = len(val_lines)

    np.random.seed(10101)
    np.random.shuffle(train_lines)
    np.random.seed(None)

    # 构建DataGenerator实例
    train_data = DataGenerator(train_lines, input_shape=input_shape, random=True)
    valid_data = DataGenerator(val_lines, input_shape=input_shape, random=False)

    # 构建DataLoder
    train_loader = DataLoader(dataset=train_data, batch_size=train_bs, shuffle=True, num_workers=num_workers,
                              drop_last=True,
                              pin_memory=True, collate_fn=detection_collate)
    valid_loader = DataLoader(dataset=valid_data, batch_size=valid_bs, shuffle=True, num_workers=num_workers,
                              drop_last=True,
                              pin_memory=True, collate_fn=detection_collate)

    # 训练一个世代,需要进行多少次小批次数据迭代,向下取整
    epoch_step = num_train // train_bs
    epoch_step_val = num_val // valid_bs

    if epoch_step == 0 or epoch_step_val == 0:
        raise ValueError("数据集过小，无法继续进行训练，请扩充数据集。")

    # 实例化数据可视化类
    loss_acc_history = LossAccHistory("logs", model, input_shape)

    # 设置损失函数
    loss_func = nn.CrossEntropyLoss()
    # #   选择优化器
    # optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=momentum, dampening=0.1, weight_decay=weight_decay)
    # #   设置学习率下降策略
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.892)
    #   选择优化器
    optimizer = optim.SGD(model.parameters(), lr=0.002, momentum=momentum, dampening=0.1, weight_decay=weight_decay)
    #   设置学习率下降策略
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.66)
    #   启动训练过程
    train(model, loss_func, optimizer, scheduler, train_loader,
          valid_loader, epoch_step, epoch_step_val, epochs, 10, save_dir, loss_acc_history)

    # lm_softmax_train(model, loss_func, optimizer, scheduler, train_loader,
    #       valid_loader, epoch_step, epoch_step_val, epochs, 10, save_dir, loss_acc_history)


