import os
import cv2
import numpy as np
from tqdm import tqdm
from segmentor.mmseg.apis import inference_segmentor, init_segmentor

root_path="detector/data/kitti/training"
image_path = "image_2"
name_path = "detector/data/kitti/ImageSets/train.txt"
all_ids = []
with open(name_path, 'r') as f:
    all_ids.extend([s.strip('\n').zfill(6) + '.png' for s in f.readlines()])
config_file = './segmentor/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
checkpoint_file = './segmentor/checkpoints/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth'
model = init_segmentor(config_file, checkpoint_file, device='cuda:1')
save_path = "detector/data/kitti/training/deeplabv3plus_vis/image_2/train"
for id in tqdm(all_ids):
    path = os.path.join(root_path, image_path, id)
    img = cv2.imread(path, cv2.IMREAD_COLOR)
    mask = np.zeros_like(img, dtype=np.uint8)
    result = np.array(inference_segmentor(model, path), dtype=np.uint8).squeeze(0)
    mask[np.logical_or(result<11, (result > 13) & (result < 17)),:] = [0, 0, 0] # Background
    mask[np.logical_or(result==17, result==18)] = [204, 0, 102] # Bicycle
    mask[result==13] = [0, 0, 128]# Car
    mask[result==11] = [0, 215, 255] # Person
    mask[result==12] = [255, 144, 30] # Rider
    mask_img = cv2.addWeighted(src1=img, alpha=1.0, src2=mask, beta=0.7, gamma=0)
    cv2.imwrite(os.path.join(save_path, id), mask_img)
