import os
import time
import datetime
from ptflops import get_model_complexity_info
import torch
from torch.utils.data import DataLoader

import torch
from I_FCN import VGG16
from seg_dataset import mydata
from train_utils.train_and_eval import create_lr_scheduler, train_one_epoch, evaluate



def create_model(aux, num_classes, pretrain=False):

    model=VGG16().to('cuda')
    flops, params = get_model_complexity_info(model, (3, 224, 224), as_strings=True, print_per_layer_stat=True)
    print('FLOPs:{}'.format(flops))
    print('Params:{}'.format(params))

    return model


def main(args):
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    batch_size = args.batch_size
    # segmentation nun_classes + background
    num_classes = args.num_classes + 1

    # 用来保存训练以及验证过程中信息
    results_file = "F:\\lunwen\\vgg_224{}.txt".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    iss = 224
    train_data = mydata('D:\\study\\pytorch_study\\seg_thryoid_picture\\datasets', iss, 'train')
    test_data = mydata('D:\\study\\pytorch_study\\seg_thryoid_picture\\datasets', iss, 'test')
    val_data = mydata('D:\\study\\pytorch_study\\seg_thryoid_picture\\datasets', iss, 'val')

    train_loader = DataLoader(train_data, batch_size=batch_size , shuffle=True,drop_last=True)
    test_loader = DataLoader(test_data, batch_size=batch_size,shuffle=False,drop_last=True)

    model = create_model(aux=args.aux, num_classes=num_classes)
    # flops, params = get_model_complexity_info(model, (3, 224, 224), as_strings=True, print_per_layer_stat=True)
    # print('FLOPs:{}'.format(flops))
    # print('Params:{}'.format(params))
    model.to(device)


    optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=5e-2)
    scaler = torch.cuda.amp.GradScaler() if args.amp else None

    # 创建学习率更新策略，这里是每个step更新一次(不是每个epoch)
    lr_scheduler = create_lr_scheduler(optimizer, len(train_loader), args.epochs, warmup=True)

    # import matplotlib.pyplot as plt
    # lr_list = []
    # font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 30, }
    # plt.figure(figsize=(14, 9.6))
    # for _ in range(args.epochs):
    #     for _ in range(len(train_loader)):
    #         lr_scheduler.step()
    #         lr = optimizer.param_groups[0]["lr"]
    #         lr_list.append(lr)
    # # plt.title('Learning Rate',font2)
    # plt.xlabel('step',font2)
    # plt.ylabel('Learning Rate',font2)
    # # 修改坐标轴字体及大小
    # plt.yticks(fontproperties='Times New Roman', size=25)  # 设置大小及加粗
    # plt.xticks(fontproperties='Times New Roman', size=25)
    # plt.plot(range(len(lr_list)), lr_list,linewidth=3.5)
    # plt.show()

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1
        if args.amp:
            scaler.load_state_dict(checkpoint["scaler"])

    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        mean_loss, lr = train_one_epoch(model, optimizer, train_loader, device, epoch,
                                        lr_scheduler=lr_scheduler, print_freq=args.print_freq, scaler=scaler)

        confmat = evaluate(model, test_loader, device=device, num_classes=num_classes)
        val_info = str(confmat)
        print(val_info)
        # write into txt
        with open(results_file, "a") as f:
            # 记录每个epoch对应的train_loss、lr以及验证集各指标
            train_info = f"[epoch: {epoch}]\n" \
                         f"train_loss: {mean_loss:.4f}\n" \
                         f"lr: {lr:.6f}\n"
            f.write(train_info + val_info + "\n\n")

        save_file = {"model": model.state_dict(),
                     "optimizer": optimizer.state_dict(),
                     "lr_scheduler": lr_scheduler.state_dict(),
                     "epoch": epoch,
                     "args": args}
        if args.amp:
            save_file["scaler"] = scaler.state_dict()
        torch.save(save_file, "F:\\lunwen\\save_weights\\vgg_224{}.pth".format(epoch))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print("training time {}".format(total_time_str))


def parse_args():
    import argparse
    parser = argparse.ArgumentParser(description="pytorch deeplabv3 training")

    parser.add_argument("--data-path", default="/data/", help="VOCdevkit root")
    parser.add_argument("--num-classes", default=1, type=int)
    parser.add_argument("--aux", default=False, type=bool, help="auxilier loss")#me
    parser.add_argument("--device", default="cuda", help="training device")
    parser.add_argument("-b", "--batch-size", default=8, type=int)#me
    parser.add_argument("--epochs", default=20, type=int, metavar="N",
                        help="number of total epochs to train")#me

    parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')#me
    parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    # Mixed precision training parameters
    parser.add_argument("--amp", default=False, type=bool,
                        help="Use torch.cuda.amp for mixed precision training")

    args = parser.parse_args()

    return args


if __name__ == '__main__':
    args = parse_args()

    if not os.path.exists("./save_weights"):
        os.mkdir("./save_weights")

    main(args)