

import json
from PIL import Image
import numpy as np


import cv2
import argparse
from model.build_BiSeNet import BiSeNet
import os
import torch
import cv2
from imgaug import augmenters as iaa
from PIL import Image
from torchvision import transforms
import numpy as np
from utils import reverse_one_hot, get_label_info, colour_code_segmentation

MODEL_PATH = "best_dice_loss.pth"
DEBUG = False
CLASS_NUM = 2


def colour_code_segmentation_my(image, label_values):

    # colour_codes =np.array( [[1, 80, 1],
    #       [2, 160, 2],
    #       [3, 240, 3],
    #       [0, 0, 0]],dtype=np.uint8)
    # colour_codes = np.array([[0, 0, 0], [1, 80, 1],
    #                          [2, 160, 2],
    #                          [3, 240, 3],
    #                          ], dtype=np.uint8)
    colour_codes = np.array([[0, 0, 0], [1, 200, 1]
                             ], dtype=np.uint8)
    x = colour_codes[image.astype(int)]
    return x


class Model():
    def __init__(self):
      # basic parameters
        parser = argparse.ArgumentParser()
        parser.add_argument('--image', action='store_true',
                            default=False, help='predict on image')
        parser.add_argument('--video', action='store_true',
                            default=False, help='predict on video')
        parser.add_argument('--checkpoint_path', type=str,
                            default='/project/train/models/best_dice_loss.pth')
        parser.add_argument('--num_classes', type=int, default=2)
        parser.add_argument('--data', type=str, default=None,
                            help='Path to image or video for prediction')
        parser.add_argument('--crop_height', type=int, default=720,
                            help='Height of cropped/resized input image to network')
        parser.add_argument('--crop_width', type=int, default=960,
                            help='Width of cropped/resized input image to network')
        parser.add_argument('--cuda', type=str, default='0',
                            help='GPU ids used for training')
        parser.add_argument('--use_gpu', type=bool, default=True,
                            help='Whether to user gpu for training')
        parser.add_argument('--csv_path', type=str, default="luotu.txt")
        parser.add_argument('--save_path', type=str, default=None,
                            required=True, help='Path to save predict image')
        parser.add_argument('--context_path', type=str, default="resnet18")
        params = [
            '--image',
            '--data', 'd.jpg',
            '--cuda', '0',
            '--save_path', 'out.png',
            '--context_path', 'resnet18'
        ]
        self.args = parser.parse_args(params)

        # build model
        os.environ['CUDA_VISIBLE_DEVICES'] = "0"
        model = BiSeNet(CLASS_NUM, self. args.context_path)
        self.model = model.cuda()
        # load pretrained model if exists
        print('load model from %s ...' % MODEL_PATH)
        self.model.load_state_dict(torch.load(MODEL_PATH))
        print('Done!')
        self.model.eval()
        # predict_on_image(model, args)

        dummy_input = torch.randn(1,3, 960, 720).cuda()  # 输入大小   #data type nchw
        onnx_path = 'ction.onnx'
        print("----- pt模型导出为onnx模型 -----")
        output_name = "action.onnx"
        torch.onnx.export(self.model, dummy_input, onnx_path,
                        export_params=True, input_names=['input'], output_names=['output'])
        print('finish!!!')

    @torch.inference_mode()
    def __call__(self, image_bgr):
        # pre-processing on image
        # print("input:",args.data)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_bgr = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
        src_size = image_bgr.shape
        image = cv2.resize(image_bgr, (self.args.crop_width,
                           self.args.crop_height), interpolation=cv2.INTER_CUBIC)
        image = transforms.ToTensor()(image)
        image = transforms.Normalize(
            (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(image).unsqueeze(0)

        predict = self. model(image.cuda()).squeeze()
        predict = reverse_one_hot(predict)
        predict = predict.cpu().numpy().astype(np.uint8)
        predict = cv2.resize(predict, (src_size[1], src_size[0]))
        predict = colour_code_segmentation_my(
            predict, None).astype(np.uint8)
        if DEBUG:
            print(predict.shape)
            # cv2.imshow("a", predict)
            # cv2.waitKey()
        # print(predict.size())
        # predict =
        # # print(predict.shape)
        # predict = cv2.resize(np.uint8(predict), (src_size[1],src_size[0]))

        return predict


def init():
    model = Model()
    return model


def process_image(handle=None, input_image=None, args=None, **kwargs):
    """Do inference to analysis input_image and get output
        Attributes:
        handle: algorithm handle returned by init()
        input_image (numpy.ndarray): image to be process, format: (h, w, c), BGR
        ## 格式标准:
        args: string in JSON format, format: {
        "mask_output_path": "/path/to/output/mask.png"
        }
        Returns: process result
    """
    args = json.loads(args)
    mask_output_path = args['mask_output_path']

    dummy_data = handle(input_image)
    b, g, r = cv2.split(dummy_data)
    if DEBUG:
        ret = cv2.resize(dummy_data, (960, 640))
        ret2 = cv2.resize(input_image, (960, 640))
        show_image = cv2.vconcat([ret, ret2])
        cv2.imwrite(mask_output_path, show_image)
    else:
        cv2.imwrite(mask_output_path, b)
    return json.dumps({'mask': mask_output_path}, indent=4)


if __name__ == '__main__':
    model = init()

    import glob
    images_path = glob.glob("/home/data/*/*.jpg")
    DEBUG = True
    i = 0
    for p in images_path:
        print(p)
        img = cv2.imread(p)
        args = {'mask_output_path': f'out/{i}.png', }
        args = json.dumps(args)
        process_image(model, img, args)
        i += 1
        # break
