import os
import argparse
import numpy as np
import torch
torch.multiprocessing.set_start_method("spawn", force=True)
from torch.utils import data
# from networks.CE2P import Res_Deeplab
# from dataset.datasets import LIPDataSet
import os
import torchvision.transforms as transforms
from utils.miou import compute_mean_ioU
from copy import deepcopy
import cv2
from PIL import Image
from utils.transforms import BGR2RGB_transform
import os
import torch
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm
from utils.transforms import transform_parsing
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import pickle
import networks
from utils.transforms import transform_logits
# from datasets.simple_extractor_dataset import SimpleFolderDataset
from utils.transforms import get_affine_transform

class SimpleFolderDataset(Dataset):
    def __init__(self, root, input_size=[512, 512], transform=None):
        self.root = root
        self.input_size = input_size
        self.transform = transform
        self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
        self.input_size = np.asarray(input_size)
        self.ids = [os.path.join(self.root, path) for path in os.listdir(self.root)]
        self.file_list = []
        for id in self.ids:
            self.file_list+=[os.path.join(id, img_path) for img_path in os.listdir(id)]
        self.save_phrasing = [os.path.join(SAVE_DIRECTORY, path) for img in os.listdir(self.ids)]
        # self.file_list = os.listdir(self.root)
        # self.file_list = os.path.join(self.root)

    def __len__(self):
        return len(self.file_list)

    def _box2cs(self, box):
        x, y, w, h = box[:4]
        return self._xywh2cs(x, y, w, h)

    def _xywh2cs(self, x, y, w, h):
        center = np.zeros((2), dtype=np.float32)
        center[0] = x + w * 0.5
        center[1] = y + h * 0.5
        if w > self.aspect_ratio * h:
            h = w * 1.0 / self.aspect_ratio
        elif w < self.aspect_ratio * h:
            w = h * self.aspect_ratio
        scale = np.array([w, h], dtype=np.float32)
        return center, scale

    def __getitem__(self, index):
        img_path = self.file_list[index]
        # img_path = os.path.join(self.root, img_name)
        print(img_path)
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)
        h, w, _ = img.shape
        # Get person center and scale
        person_center, s = self._box2cs([0, 0, w - 1, h - 1])
        r = 0
        trans = get_affine_transform(person_center, s, r, self.input_size)
        input = cv2.warpAffine(
            img,
            trans,
            (int(self.input_size[1]), int(self.input_size[0])),
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_CONSTANT,
            borderValue=(0, 0, 0))
        # input=cv2.resize(img,  (512,512))
        input=cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
        input = self.transform(input)
        meta = {
            'name': img_path,
            'center': person_center,
            'height': h,
            'width': w,
            'scale': s,
            'rotation': r
        }

        return input, meta

def multi_scale_testing(model, batch_input_im, crop_size=[512, 512], flip=False, multi_scales=[1]):
    flipped_idx = (15, 14, 17, 16, 19, 18)
    if len(batch_input_im.shape) > 4:
        batch_input_im = batch_input_im.squeeze()
    if len(batch_input_im.shape) == 3:
        batch_input_im = batch_input_im.unsqueeze(0)

    interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True)
    ms_outputs = []
    for s in multi_scales:
        # interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True)
        # scaled_im = interp_im(batch_input_im)
        parsing_output = model(batch_input_im)
        parsing_output = parsing_output[0][-1]
        output = parsing_output[0]
        if flip:
            flipped_output = parsing_output[1]
            flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :]
            output += flipped_output.flip(dims=[-1])
            output *= 0.5
        output = interp(output.unsqueeze(0))
        ms_outputs.append(output[0])
    ms_fused_parsing_output = torch.stack(ms_outputs)
    ms_fused_parsing_output = ms_fused_parsing_output.mean(0)
    ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0)  # HWC
    parsing = torch.argmax(ms_fused_parsing_output, dim=2)
    parsing = parsing.data.cpu().numpy()
    ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy()
    return parsing, ms_fused_parsing_output


def vis_parsing_maps(im, parsing_anno, parsing, id="",save_im=False, save_path='',im_name='1.png', ):
    stride=0
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]
    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)

    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    # vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if(not os.path.exists(save_path+'/anno/')):
        os.mkdir(save_path+'/anno/')
    if(not os.path.exists(save_path+'/weights_img/')):
        os.mkdir(save_path+'/weights_img/')
    ann_path = os.path.join(save_path+'/anno/',id)
    vis_path = os.path.join(save_path+'/weights_img/',id)

    if(not os.path.exists(ann_path)):
        os.mkdir(ann_path)
    if(not os.path.exists(vis_path)):
        os.mkdir(vis_path)
    # print(im_name+"======")
    # print(np.unique(vis_parsing_anno))
    if save_im:
        cv2.imwrite(ann_path+'/'+im_name[:-4] +'.png', vis_parsing_anno)
        # data_output = open(ann_path+'/'+im_name[:-4] +'.pkl','wb')
        # pickle.dump(parsing,data_output)
        # data_output.close()
        # np.save(ann_path+'/'+im_name[:-4] +'.npy', parsing)
        cv2.imwrite(vis_path+'/'+im_name, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])


# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
DATA_DIRECTORY = './data/cvpr/test_crop_gaussian/crop13'
MASK_DIRECTORY = './data/cvpr/test_crop_gaussian/crop_mask13/'
SAVE_DIRECTORY = './data/cvpr/test_crop_gaussian/pred_crop/' #pred_hair_hat
if (not os.path.exists(SAVE_DIRECTORY)):
    os.mkdir(SAVE_DIRECTORY)
print(SAVE_DIRECTORY)
# h, w = map(int, args.input_size.split(','))
restore_from = 'log_gaussian_face/schp_7_checkpoint.pth.tar'  #'./log_small_face/checkpoint_42.pth.tar'
crop_size=[512, 512]
interp = torch.nn.Upsample(size=(crop_size[0],crop_size[1]), mode='bilinear', align_corners=True)
# input_size = (h, w)
NUM_CLASSES = 18
model = networks.init_model('CE2P_gaussian', num_classes=NUM_CLASSES, pretrained=None)# 
state_dict = torch.load(restore_from)['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
    name = k[7:]  # remove `module.`
    new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.cuda()
model.eval()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
    transforms.ToTensor(),
    # BGR2RGB_transform(),
    normalize,
])


ids = [os.path.join(DATA_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
maskids = [os.path.join(MASK_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
save_phrasing = [os.path.join(SAVE_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
def _box2cs(box, aspect_ratio):
    x, y, w, h = box[:4]
    return _xywh2cs(x, y, w, h, aspect_ratio)
def _xywh2cs(x, y, w, h, aspect_ratio):
    center = np.zeros((2), dtype=np.float32)
    center[0] = x + w * 0.5
    center[1] = y + h * 0.5
    if w > aspect_ratio * h:
        h = w * 1.0 / aspect_ratio
    elif w < aspect_ratio * h:
        w = h * aspect_ratio
    scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
    return center, scale

with torch.no_grad():
    for n, id in enumerate(tqdm(ids)):
        # print(id[-4:])
        imgs = [os.path.join(id, path) for path in os.listdir(id)]
        for i, im_path in enumerate(imgs):
            im = cv2.imread(im_path, cv2.IMREAD_COLOR)
            
            ip = im_path.split('/')
            end_prefix = ip[-2]+'/'+ip[-1].replace('jpg','png')
            mask_path = MASK_DIRECTORY+end_prefix
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)

            h, w, _ = im.shape
            # Get person center and scale
            # aspect_ratio = 
            aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
            person_center, s = _box2cs(box = [0, 0, w - 1, h - 1],aspect_ratio = aspect_ratio)
            r = 0
            # trans1 = get_affine_transform(person_center, s, r, crop_size,inv=1)
            trans = get_affine_transform(person_center, s, r, crop_size,inv=0)
            # print(trans.shape,trans)
            # print(trans1,trans)
            input = cv2.warpAffine(
                im,
                trans,
                (int(crop_size[1]), int(crop_size[0])),
                flags=cv2.INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=(0, 0, 0))
            mask_t = cv2.warpAffine(
                mask,
                trans,
                (int(crop_size[1]), int(crop_size[0])),
                flags=cv2.INTER_LINEAR,
                borderMode=cv2.BORDER_CONSTANT,
                borderValue=(0, 0, 0))
            image = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)      
            # image = Image.open(im_path)
            image = transform(image)
            

            mask_t = mask_t / 255.
            mask_t = np.array(mask_t,dtype=np.float32)
            mask_t = torch.from_numpy(mask_t)
            image = torch.cat((image,mask_t.unsqueeze(0)),dim=0)
            image = image.cuda()
            image = torch.unsqueeze(image, 0)
            # print(image.shape)
            flipped_idx = (3, 2, 5, 4, 12, 11, 14, 13)
            idx = (2,3,4,5,11,12,13,14)
            flip =True
            ms_outputs = []
            multi_scales = [0.75, 1, 1.25, 1.5, 2]
            for s in multi_scales:
                interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True)
                image = interp_im(image)
                
                if flip:
                    flip_image = image.flip(dims=[-1])
                    batch_image_im = torch.cat((image, flip_image),dim=0)
                    # print(batch_image_im.shape)
                    outputs = model(batch_image_im.cuda())
                    parsing_output = outputs[0][1]
                    parsing = parsing_output[0]
                    parsing1 = parsing_output[1]
                    parsing1[idx, :, :] = parsing1[flipped_idx, :, :]
                    parsing += parsing1.flip(dims=[-1])
                    parsing*=0.5
                    parsing = torch.unsqueeze(parsing, 0)
                else:
                    outputs = model(image.cuda())
                    parsing = outputs[0][1]
                parsing = interp(parsing)
                ms_outputs.append(parsing)
            parsing = torch.stack(ms_outputs)
            parsing =  parsing.mean(0)
            # print(parsing.shape)
            parsing = interp(parsing).data.cpu().numpy()
            parsing_preds = np.asarray(np.argmax(parsing, axis=1), dtype=np.uint8)
            image=cv2.imread(im_path)
            parsing_preds=np.squeeze(parsing_preds)
            # trans = get_affine_transform(person_center, s, r, crop_size,inv=1)
            # print(trans.shape)
            # trans = np.concatenate([trans, np.array([[0,0,1]])], axis=0)
            # trans_ann = np.linalg.inv(trans)
            # trans_ann
            trans_ann = cv2.invertAffineTransform(trans)
            # trans_ann = trans_ann[:2]
            # print(trans_ann)
            parsing_result = cv2.warpAffine(
                    parsing_preds,
                    trans_ann,
                    (int(w), int(h)), 
                    flags=cv2.INTER_NEAREST,
                    borderMode=cv2.BORDER_CONSTANT,
                    borderValue=(0))
            # parsing_result = transform_parsing(parsing_preds, person_center, s, w, h, (h,w))
            # print(parsing_result.shape)
            # parsing_preds = cv2.resize(parsing_preds, (image.shape[1],image.shape[0]),interpolation=cv2.INTER_NEAREST)
            vis_parsing_maps(image,parsing_result, parsing,save_im=True, save_path=SAVE_DIRECTORY,id=id[-4:],im_name=im_path[-13:], )
            # parsing_preds = parsing_preds.transpose((1,2,0))
            # print(np.unique(parsing_preds))
            # print(len(outputs), len(outputs[1]), outputs[0][0].shape,  outputs[1][0].shape)
            # parsing_preds = cv2.resize(parsing_preds, (64, 128))
            # cv2.imwrite(save_phrasing[i], parsing_preds)