import os
import argparse
import numpy as np
import torch
torch.multiprocessing.set_start_method("spawn", force=True)
from torch.utils import data
# from networks.CE2P import Res_Deeplab
# from dataset.datasets import LIPDataSet
import os
import sys
sys.path.append(os.getcwd())
import torchvision.transforms as transforms
from utils.miou import compute_mean_ioU
from copy import deepcopy
import cv2
from PIL import Image
from utils.transforms import BGR2RGB_transform

import torch
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm
from utils.transforms import transform_parsing
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import pickle
import networks
from utils.transforms import transform_logits
# from datasets.simple_extractor_dataset import SimpleFolderDataset
from utils.transforms import get_affine_transform

def vis_parsing_maps(im, parsing_anno, parsing, id="",save_im=False, save_path='',im_name='1.png', ):
    stride=0
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]
    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)

    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    # vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if(not os.path.exists(save_path+'/anno/')):
        os.mkdir(save_path+'/anno/')
    if(not os.path.exists(save_path+'/weights_img/')):
        os.mkdir(save_path+'/weights_img/')
    if(not os.path.exists(save_path+'/prob/')):
        os.mkdir(save_path+'/prob/')
    ann_path = os.path.join(save_path+'/anno/',id)
    vis_path = os.path.join(save_path+'/weights_img/',id)
    prob_path = os.path.join(save_path+'/prob/',id)


    if(not os.path.exists(ann_path)):
        os.mkdir(ann_path)
    if(not os.path.exists(vis_path)):
        os.mkdir(vis_path)
    if(not os.path.exists(prob_path)):
        os.mkdir(prob_path)
    # print(im_name+"======")
    # print(np.unique(vis_parsing_anno))
    if save_im:
        cv2.imwrite(ann_path+'/'+im_name[:-4] +'.png', vis_parsing_anno)
        # data_output = open(prob_path+'/'+im_name[:-4] +'.pkl','wb')
        # pickle.dump(parsing,data_output)
        # data_output.close()
        # np.save(ann_path+'/'+im_name[:-4] +'.npy', parsing)
        cv2.imwrite(vis_path+'/'+im_name, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])


# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
DATA_DIRECTORY = './data/cvpr/test_crop_gaussian/crop13'
SAVE_DIRECTORY = './data/cvpr/test_crop_gaussian/ensemble/' #pred_hair_hat
pred_dir = ['pred_crop13/prob/','pred_crop13_ocr/prob/','pred_crop13_gcnet/prob/']
if (not os.path.exists(SAVE_DIRECTORY)):
    os.mkdir(SAVE_DIRECTORY)
print(SAVE_DIRECTORY)
# h, w = map(int, args.input_size.split(','))
restore_from = 'log_small_face_all_augmentation/schp_7_checkpoint.pth.tar'   #'./log_small_face/checkpoint_42.pth.tar'
crop_size=[512, 512]
interp = torch.nn.Upsample(size=(crop_size[0],crop_size[1]), mode='bilinear', align_corners=True)
# input_size = (h, w)
NUM_CLASSES = 18

ids = [os.path.join(DATA_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
save_phrasing = [os.path.join(SAVE_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
def _box2cs(box, aspect_ratio):
    x, y, w, h = box[:4]
    return _xywh2cs(x, y, w, h, aspect_ratio)
def _xywh2cs(x, y, w, h, aspect_ratio):
    center = np.zeros((2), dtype=np.float32)
    center[0] = x + w * 0.5
    center[1] = y + h * 0.5
    if w > aspect_ratio * h:
        h = w * 1.0 / aspect_ratio
    elif w < aspect_ratio * h:
        w = h * aspect_ratio
    scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
    return center, scale

for n, id in enumerate(tqdm(ids)):
    # print(id[-4:])
    imgs = [os.path.join(id, path) for path in os.listdir(id)]
    for i, im_path in enumerate(imgs):
        im = cv2.imread(im_path, cv2.IMREAD_COLOR)

        # print(im_path)
        h, w, _ = im.shape
        # Get person center and scale
        # aspect_ratio = 
        aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
        person_center, s = _box2cs(box = [0, 0, w - 1, h - 1],aspect_ratio = aspect_ratio)
        r = 0
        # trans1 = get_affine_transform(person_center, s, r, crop_size,inv=1)
        trans = get_affine_transform(person_center, s, r, crop_size,inv=0)
        # print(trans.shape,trans)
        # print(trans1,trans)
        input = cv2.warpAffine(
            im,
            trans,
            (int(crop_size[1]), int(crop_size[0])),
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_CONSTANT,
            borderValue=(0, 0, 0))
        annos=[]
        for i in range(len(pred_dir)):
            anno_path = im_path.replace("crop13",pred_dir[i]).replace(".jpg",".pkl")
            # print(anno_path)
            pkl_file = open(anno_path, 'rb')
            anno = pickle.load(pkl_file)
            pkl_file.close()
            annos.append(anno)
        parsing = np.stack(annos)
        # print(parsing.shape)
        parsing =  parsing.mean(0)
        parsing_preds = np.asarray(np.argmax(parsing, axis=1), dtype=np.uint8)
        image=cv2.imread(im_path)
        parsing_preds=np.squeeze(parsing_preds)
        trans_ann = cv2.invertAffineTransform(trans)
        parsing_result = cv2.warpAffine(
                parsing_preds,
                trans_ann,
                (int(w), int(h)), 
                flags=cv2.INTER_NEAREST,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=(0))
        vis_parsing_maps(image,parsing_result, parsing,save_im=True, save_path=SAVE_DIRECTORY,id=id[-4:],im_name=im_path[-13:], )