from torch.utils.data import dataset
from tqdm import tqdm
from cityspace import network
from cityspace import utils
import os
import random
import argparse
import numpy as np

from torch.utils import data
from cityspace.datasets import VOCSegmentation, Cityscapes, cityscapes
from torchvision import transforms as T
from cityspace.metrics import StreamSegMetrics

import torch
import torch.nn as nn

from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
from glob import glob
import time

'''
使用现成的deeplabv3+cityspace的权重进行语义分割的批量预测
'''


def main():
    # 将原始命令行参数直接嵌入代码
    opts = {
        "input": "E:\change_data\c_data_0905key2/rgb",
        # todo  换成文件夹/文件
        "dataset": "cityscapes",
        "model": "deeplabv3plus_mobilenet",
        "separable_conv": False,
        "output_stride": 16,
        "save_val_results_to": "E:\change_data\c_data_0905key2/",
        "crop_val": False,
        "val_batch_size": 4,
        "crop_size": 513,
        "ckpt": "D:\guomengqi/biyecode\mapimg_change\checkpoints/best_deeplabv3plus_mobilenet_cityscapes_os16.pth",
        "gpu_id": '0'
    }

    if opts["dataset"].lower() == 'voc':
        opts["num_classes"] = 21
        decode_fn = VOCSegmentation.decode_target
    elif opts["dataset"].lower() == 'cityscapes':
        opts["num_classes"] = 19
        decode_fn = Cityscapes.decode_target

    os.environ['CUDA_VISIBLE_DEVICES'] = opts["gpu_id"]
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Device: %s" % device)

    # Setup dataloader
    image_files = []
    if os.path.isdir(opts["input"]):
        for ext in ['png', 'jpeg', 'jpg', 'JPEG']:
            files = glob(os.path.join(opts["input"], '**/*.%s' % (ext)), recursive=True)
            if len(files) > 0:
                image_files.extend(files)
    elif os.path.isfile(opts["input"]):
        image_files.append(opts["input"])
    print(f"Loaded {len(image_files)} images from {opts['input']}")

    # Set up model (all models are 'constructed at network.modeling)
    model = network.modeling.__dict__[opts["model"]](num_classes=opts["num_classes"],
                                                     output_stride=opts["output_stride"])
    if opts["separable_conv"] and 'plus' in opts["model"]:
        network.convert_to_separable_conv(model.classifier)
    utils.set_bn_momentum(model.backbone, momentum=0.01)

    if opts["ckpt"] is not None and os.path.isfile(opts["ckpt"]):
        # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
        checkpoint = torch.load(opts["ckpt"], map_location=torch.device('cpu'))
        model.load_state_dict(checkpoint["model_state"])
        model = nn.DataParallel(model)
        model.to(device)
        print("Resume model from %s" % opts["ckpt"])
        del checkpoint
    else:
        print("[!] Retrain")
        model = nn.DataParallel(model)
        model.to(device)

    if opts["crop_val"]:
        transform = T.Compose([
            T.Resize(opts["crop_size"]),
            T.CenterCrop(opts["crop_size"]),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225]),
        ])
    else:
        transform = T.Compose([
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406],
                        std=[0.229, 0.224, 0.225]),
        ])
    if opts["save_val_results_to"] is not None:
        os.makedirs(opts["save_val_results_to"], exist_ok=True)

    save_folder = opts["save_val_results_to"]
    if not os.path.exists(save_folder + "label_2d_img_raw/"):
        os.makedirs(save_folder + "label_2d_img_raw/")
    if not os.path.exists(save_folder + "label_2d_bin_raw/"):
        os.makedirs(save_folder + "label_2d_bin_raw/")

    with torch.no_grad():
        model = model.eval()
        for img_path in tqdm(image_files):
            ext = os.path.basename(img_path).split('.')[-1]
            img_name = os.path.basename(img_path)[:-len(ext) - 1]
            img = Image.open(img_path).convert('RGB')
            img = transform(img).unsqueeze(0)  # To tensor of NCHW
            img = img.to(device)

            output = model(img)
            pred = output.max(1)[1].cpu().numpy()[0]  # HW
            probabilities = torch.nn.functional.softmax(output, dim=1).cpu().numpy()[0]  # CHW

            # Save colorized predictions
            saved_color_img_file = save_folder + "label_2d_img_raw/" + img_name + "_color.png"
            colorized_preds = decode_fn(pred).astype('uint8')
            colorized_preds = Image.fromarray(colorized_preds)
            if opts["save_val_results_to"]:
                colorized_preds.save(saved_color_img_file)

            # Save grayscale predictions
            saved_bw_img_file = save_folder + "label_2d_img_raw/" + img_name + "_bw.png"
            grayscale_preds = Image.fromarray(pred.astype(np.uint8))
            if opts["save_val_results_to"]:
                grayscale_preds.save(saved_bw_img_file)

            # Save probabilities as .bin file
            saved_bin_file = save_folder + "label_2d_bin_raw/" + img_name + ".bin"
            # prob_file_path = os.path.join(opts["save_val_results_to"], img_name + '.bin')
            with open(saved_bin_file, 'wb') as f:
                probabilities.transpose(1, 2, 0).flatten().tofile(f)


if __name__ == '__main__':
    t_start = time.time()
    main()
    t_end = time.time()
    print("time:", t_end - t_start)
