import torch
import argparse
import os
import time

from torch import optim
from models.unet import UNet
from utils.trainer import Trainer
from torchvision.transforms import transforms
from utils.loader import get_loaders
from utils.logger import get_logger
from metrics import DSC
from metrics import IOU
from utils.loss import Loss


def get_args():
    """
    调参，可直接修改default
    :return: args
    """
    # ?
    parse = argparse.ArgumentParser()
    # task info
    parse.add_argument('--model_name', type=str, default='UNet')
    parse.add_argument('--task_name', type=str, default='breast_UNet') #Cell_UNet
    parse.add_argument('--pretrained', type=str, default=None)
    parse.add_argument('--pretrain_file', type=str, default=None)
    parse.add_argument('--mode_task', type=str, default=None,help='seg/consist/contrastive')

    # dataset
    parse.add_argument('--dataset_path', type=str, default=r'datasets/breast/train', help='dataset root path')
    parse.add_argument('--dataset_name', type=str, default='breast')
    parse.add_argument('--img_size', type=int, default=256)
    parse.add_argument('--in_channels', type=int, default=1, help='image channels')
    parse.add_argument('--num_workers', type=int, default=3)

    # train parameters
    parse.add_argument('--epochs', type=int, default=60, help='(60)epoch number')
    parse.add_argument('--batch_size', type=int, default=8, help='batch size')
    parse.add_argument('--val_epoch', type=int, default=4, help='val every n epoch')
    parse.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parse.add_argument('--beta1', type=float, default=0.5)  # momentum1 in Adam
    parse.add_argument('--beta2', type=float, default=0.999)  # momentum2 in Adam
    # test
    parse.add_argument('--batch_size_test', type=int, default=16, help='batch size test')

    # GPU
    parse.add_argument('--device', type=str, default='cuda')
    parse.add_argument('--DataParallel', type=bool, default=False, help='multi gpus')
    parse.add_argument('--cuda_ids', type=str, default='1', help='0/1/0,1')

    # save
    parse.add_argument('--save_model_epoch', type=int, default=500, help='(9999)save model every n epoch')
    parse.add_argument('--save_path', type=str, default=None, help='the path of model weight file')
    parse.add_argument('--weights', type=str, default=None)
    parse.add_argument('--save_pred_img', type=bool, default=True)

    # log
    parse.add_argument('--tensorboard_dir', type=str, default=None, help='tensorboard dir')
    parse.add_argument('--log_file', type=str, default=None, help='log dir')
    parse.add_argument('--current_time',type=str,default=None)
    #?
    args = parse.parse_args()
    update_args(args)

    return args


def update_args(args):
    """
    更新日志、结果保存路径参数
    :param args:
    :return:
    """
    # seg/consist/contrastive/segcontra
    args.mode_task = 'segcontra'
    args.current_time = time.strftime('%Y_%m%d_%H%M%S')
    if args.device is None:
        args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if args.save_path is None:
        args.save_path = os.path.join('./results', args.task_name, 'weights',args.mode_task,args.current_time)
    if args.tensorboard_dir is None:
        args.tensorboard_dir = os.path.join('./results', args.task_name, 'runs',args.mode_task,args.current_time)
    if args.log_file is None:
        args.log_file = os.path.join('./results', args.task_name, 'logs',args.mode_task,args.current_time,f'{time.strftime("%Y%m%d%H%M", time.localtime(time.time()))}.log')
    if args.pretrain_file is None:
        args.pretrain_file = os.path.join('./results',args.task_name,'weights','seg',"")


def train():
    args = get_args()  # v
    print(args)
    # --------------------------更改超参数区域-------------------------------
    args.dataset_path = r'datasets/breast/train_128'
    args.batch_size = 4
    args.img_size = 128
    args.epochs = 100
    args.pretrained = False
    args.pretrain_file = os.path.join('./results',args.task_name,'weights','seg',"")
    args.cuda_ids = "0"  #0/1/0,1
    # --------------------------更改超参数区域-------------------------------

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_ids
    print("look logfile",args.log_file)

    x_transforms = transforms.Compose([
        transforms.Resize((args.img_size, args.img_size)), #256
        # 转变为torch.FloatTensor的数据形式,像素除以255
        transforms.ToTensor(),
        #处理PILImage的变换方法（大多数方法）都需要放在ToTensor方法之前，而处理tensor的方法（比如Normalize方法）就要放在ToTensor方法之后。

        # 用给定的均值和标准差分别对每个通道的数据进行正则化。具体来说，给定均值(M1,…,Mn)，给定标准差(S1,…,Sn)，其中n是通道数（一般是3），对每个通道进行如下操作： output[channel] = (input[channel] - mean[channel]) / std[channel]
        # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # 3通道
        # transforms.Normalize([0.5], [0.5])  # 单通道
    ])
    y_transforms = transforms.Compose([
        transforms.Resize((args.img_size, args.img_size)),
        transforms.ToTensor()
    ])

    model = UNet(in_channels=args.in_channels, classes=2)
    criterion = Loss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))

    # 对应公式：new_lr = factor*old_lr
    # 学习率调整：max:检测当指标不再升高则做出反应。factor(float):学习率系数默认0.1。patience(int):忍受指标多少个step不变化，之后调整lr
    # verbose 是否每次改变都输出一次lr的值，默认false。
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2)

    # test_loader
    train_loader, val_loader,train_img_num,val_img_num = get_loaders(args.dataset_path, args.batch_size, args.batch_size_test, x_transforms,
                                           y_transforms, args.num_workers, channels=args.in_channels)

    train_img_num = len(train_loader) * args.batch_size
    val_img_num = len(val_loader) * args.batch_size

    # 计算Dice系数 和 IOU
    metric_dice = DSC()
    metric_iou = IOU()
    logger = get_logger(args.log_file)
    logger.info(f'''Starting training:
                 Model:           {args.model_name}
                 Dataset:         {args.dataset_name}
                 Mode_task:       {args.mode_task}
                 Pretrained:      {args.pretrained}
                 Pretrain_file:   {args.pretrain_file}
                 dataset_path:    {args.dataset_path}
                 Train_img_num:   {train_img_num}
                 Val_img_num:     {val_img_num}
                 Input Shape:     {(args.in_channels, args.img_size, args.img_size)}
                 Epochs:          {args.epochs}
                 Batch Size:      {args.batch_size}
                 Learning Rate:   {args.lr}
                 Test Batch Size: {args.batch_size_test}
                 DataParallel:    {args.DataParallel}
                 Device:          {args.device}
                 Cuda Ids:        {args.cuda_ids}
                 Save Model:      {'per ' + str(args.save_model_epoch) + ' epochs'}
                 CurrentTime:     {args.current_time}
             ''')

    print("train_loader type: ",type(train_loader))
    print("val_loader type: ",type(val_loader))
    # print(next(iter(train_loader)))
    # print(len(next(iter(train_loader))))
    # print(next(iter(train_loader))[0].shape)

    trainer = Trainer(model, criterion, optimizer, scheduler, metric_dice,metric_iou, train_loader, val_loader, args, logger)

    start = time.time()
    trainer.train()
    end = time.time()

    logger.info(f'Train Time:{int((end - start) / 60 // 60)}h:{int((end - start) / 60 % 60)}m:{int((end - start) % 60)}s')


if __name__ == '__main__':
    train()
