from torch.utils.data import dataset
from tqdm import tqdm
import network
import utils
import os
import argparse
from datasets import VOCSegmentation, Cityscapes, cityscapes
from torchvision import transforms
import torch
import torch.nn as nn
from PIL import Image
from glob import glob

input = '/media/jiji/fe0e60a9-bc54-4761-a52c-dd4ec10ba6db/PycharmProjects/DeepLabV3Plus-Pytorch/samples/'                        #单张图片路径或者目录
# input = '/media/jiji/fe0e60a9-bc54-4761-a52c-dd4ec10ba6db/PycharmProjects/DeepLabV3Plus-Pytorch/samples/1_image.png'
dataset = 'voc'                     #['voc', 'cityscapes']，训练集名字
model = 'deeplabv3plus_mobilenet'   #avalable models,模型名
separable_conv = False              #应用可分离卷积到decoder和aspp
output_stride = 16                  #[8,16]
save_val_results_to = r'/media/jiji/fe0e60a9-bc54-4761-a52c-dd4ec10ba6db/PycharmProjects/DeepLabV3Plus-Pytorch/result'          #保存分割结果到指定目录
crop_val = False                    #是否切割验证集
val_batch_size = 4                  #验证集的batch_size
crop_size = 513
ckpt = '/media/jiji/fe0e60a9-bc54-4761-a52c-dd4ec10ba6db/PycharmProjects/DeepLabV3Plus-Pytorch/checkpoints/best_deeplabv3plus_mobilenet_voc_os16.pth'                         #从断点pth文件中恢复
gpu_id = '0'                        #GPU id

def get_argparser():
    parser = argparse.ArgumentParser()
    # Datset Options
    parser.add_argument("--input", type=str,default=input,
                        help="path to a single image or image directory")
    parser.add_argument("--dataset", type=str, default=dataset,
                        choices=['voc', 'cityscapes'], help='Name of training set')

    # Deeplab Options
    available_models = sorted(name for name in network.modeling.__dict__ if name.islower() and \
                              not (name.startswith("__") or name.startswith('_')) and callable(
                              network.modeling.__dict__[name])
                              )

    parser.add_argument("--model", type=str, default=model,
                        choices=available_models, help='model name')
    parser.add_argument("--separable_conv", action='store_true', default=separable_conv,
                        help="apply separable conv to decoder and aspp")
    parser.add_argument("--output_stride", type=int, default=output_stride, choices=[8, 16])

    # Train Options
    parser.add_argument("--save_val_results_to", default=save_val_results_to,
                        help="save segmentation results to the specified dir")

    parser.add_argument("--crop_val", action='store_true', default=crop_val,
                        help='crop validation (default: False)')
    parser.add_argument("--val_batch_size", type=int, default=val_batch_size,
                        help='batch size for validation (default: 4)')
    parser.add_argument("--crop_size", type=int, default=crop_size)

    parser.add_argument("--ckpt", default=ckpt, type=str,
                        help="resume from checkpoint")
    parser.add_argument("--gpu_id", type=str, default=gpu_id,
                        help="GPU ID")
    return parser

def main():
    opts = get_argparser().parse_args()
    if opts.dataset.lower() == 'voc':
        opts.num_classes = 21
        decode_fn = VOCSegmentation.decode_target
    elif opts.dataset.lower() == 'cityscapes':
        opts.num_classes = 19
        decode_fn = Cityscapes.decode_target

    os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Device: %s" % device)

    #----------------------设置 dataloader，读取一张或一个文件夹的图片-------------------------------
    image_files = []
    if os.path.isdir(opts.input):
        for ext in ['png', 'jpeg', 'jpg', 'JPEG']:
            files = glob(os.path.join(opts.input, f'*.{ext}'), recursive=True)
            if len(files)>0:
                image_files.extend(files)
    elif os.path.isfile(opts.input):
        image_files.append(opts.input)
    
    # -------------------------设置模型-------------------------
    model = network.modeling.__dict__[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride,pretrained_backbone=False)
    if opts.separable_conv and 'plus' in opts.model:
        network.convert_to_separable_conv(model.classifier)
    utils.set_bn_momentum(model.backbone, momentum=0.01)
    
    if opts.ckpt is not None and os.path.isfile(opts.ckpt):
        # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
        checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
        model.load_state_dict(checkpoint["model_state"])
        model = nn.DataParallel(model)
        model.to(device)
        print("Resume model from %s" % opts.ckpt)
        del checkpoint
    else:
        print("[!] Retrain")
        model = nn.DataParallel(model)
        model.to(device)

    #denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # denormalization for ori images

    if opts.crop_val:
        transform = transforms.Compose([
                transforms.Resize(opts.crop_size),
                transforms.CenterCrop(opts.crop_size),
                transforms.Toensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
    else:
        transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])

    if opts.save_val_results_to is not None:#创建保存的文件夹
        os.makedirs(opts.save_val_results_to, exist_ok=True)


    with torch.no_grad():
        model = model.eval()
        for img_path in tqdm(image_files):
            img_name = os.path.basename(img_path).split('.')[0]
            img = Image.open(img_path).convert('RGB')
            img = transform(img).unsqueeze(0) # To tensor of NCHW
            img = img.to(device)
            
            pred = model(img).max(1)[1].cpu().numpy()[0] # HW
            colorized_preds = decode_fn(pred).astype('uint8')               #将mask变成rgb图像
            colorized_preds = Image.fromarray(colorized_preds)
            if opts.save_val_results_to:
                colorized_preds.save(os.path.join(opts.save_val_results_to, img_name+'.png'))

if __name__ == '__main__':
    main()
