import argparse
import logging
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from torchsummary import summary
from utils.dataset import BasicDataset
from torch.utils.data import DataLoader
from deeplabv3 import resnet50
dir_img = 'C:/Users/sunyi/Desktop/haiyu_test/img2/'
dir_mask = 'C:/Users/sunyi/Desktop/haiyu_test/label2/'
dir_checkpoint = 'checkpoints/'

def get_args():
    parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-e', '--epochs', metavar='E', type=int, default=1,
                        help='Number of epochs', dest='epochs')
    parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=1,
                        help='Batch size', dest='batchsize')
    parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.1,
                        help='Learning rate', dest='lr')
    parser.add_argument('-f', '--load', dest='load', type=str, default=True,
                        help='Load models from a .pth file')
    parser.add_argument('-s', '--scale', dest='scale', type=float, default=1,
                        help='Downscaling factor of the images')
    parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
                        help='Percent of the data that is used as validation (0-100)')

    return parser.parse_args()


def train_net(net,
              device,
              epochs=1,
              batch_size=1,
              lr=0.001,
              val_percent=0.1,
              save_cp=True,
              img_scale=0.5):

    testset = BasicDataset(dir_img, dir_mask, img_scale)
    n_test = int(len(testset))
    n_val = int(0)
    test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1, pin_memory=True)

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {n_test}
        Validation size: {n_val}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if 4 > 1 else 'max', patience=2)

    for epoch in range(epochs):
        net.eval()
        epoch_corr = 0
        cnt = 0
        with tqdm(total=n_test, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
            for batch in test_loader:
                cnt = cnt + 1
                imgs = batch['image']
                true_masks = batch['mask']
                imgs = imgs.to(device=device, dtype=torch.float32)
                # mask_type = torch.long
                true_masks = true_masks * 255
                true_masks = true_masks.to(device=device, dtype=torch.long)
                masks_pred = net(imgs)
                true_masks = true_masks.squeeze(1)
                pred = masks_pred.max(1, keepdim=True)[1]  # get the index of the max log-probability
                epoch_corr += pred.eq(true_masks.view_as(pred)).sum().item()
                # true_masks = true_masks.squeeze(1)
                pred = pred.squeeze(1)
                pred = pred.squeeze(0)
                pred = pred.cpu()
                pred = pred.numpy()
                pred_filename = 'C:/Users/sunyi/Desktop/haiyu_test/unet_pred_img3/'+testset.ids[cnt-1]+'.png'
                cv2.imwrite(pred_filename, pred)
            print(epoch_corr / (cnt * 512 * 512))
            print(str(cnt))
            b=0
        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
            logging.info(f'Checkpoint {epoch + 1} saved !')


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    args = get_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')
    net = torch.load('G:/train_data/model_ppunet2/96_net.pt')
    # net = resnet50(num_classes=6, inchannel=4)
    # net.load_state_dict(torch.load('G:/成果/mask/model_deeplab/86.55_28_net.pth'))
    summary(net, (3, 512, 512))
    # summary(net.cuda(), (4, 512, 512))
    b=0
    net.to(device=device)
    try:
        train_net(net=net,
                  epochs=args.epochs,
                  batch_size=args.batchsize,
                  lr=args.lr,
                  device=device,
                  img_scale=args.scale,
                  val_percent=args.val / 100)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), 'INTERRUPTED.pth')
        logging.info('Saved interrupt')
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
