import os
import argparse
import numpy as np
import torch
torch.multiprocessing.set_start_method("spawn", force=True)
from torch.utils import data
# from networks.CE2P import Res_Deeplab
# from dataset.datasets import LIPDataSet
import os
import torchvision.transforms as transforms
from utils.miou import compute_mean_ioU
from copy import deepcopy
import cv2
from PIL import Image
from utils.transforms import BGR2RGB_transform
import os
import torch
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm

from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset

import networks
from utils.transforms import transform_logits
# from datasets.simple_extractor_dataset import SimpleFolderDataset
from utils.transforms import get_affine_transform

class SimpleFolderDataset(Dataset):
    def __init__(self, root, input_size=[512, 512], transform=None):
        self.root = root
        self.input_size = input_size
        self.transform = transform
        self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
        self.input_size = np.asarray(input_size)
        self.ids = [os.path.join(self.root, path) for path in os.listdir(self.root)]
        self.file_list = []
        for id in self.ids:
            self.file_list+=[os.path.join(id, img_path) for img_path in os.listdir(id)]
        # self.save_phrasing = [os.path.join(SAVE_DIRECTORY, path) for img in os.listdir(self.ids)]

        # self.file_list = os.listdir(self.root)
        # self.file_list = os.path.join(self.root)

    def __len__(self):
        return len(self.file_list)

    def _box2cs(self, box):
        x, y, w, h = box[:4]
        return self._xywh2cs(x, y, w, h)

    def _xywh2cs(self, x, y, w, h):
        center = np.zeros((2), dtype=np.float32)
        center[0] = x + w * 0.5
        center[1] = y + h * 0.5
        if w > self.aspect_ratio * h:
            h = w * 1.0 / self.aspect_ratio
        elif w < self.aspect_ratio * h:
            w = h * self.aspect_ratio
        scale = np.array([w, h], dtype=np.float32)
        return center, scale

    def __getitem__(self, index):
        img_path = self.file_list[index]
        # img_path = os.path.join(self.root, img_name)
        print(img_path)
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)
        h, w, _ = img.shape
        # Get person center and scale
        person_center, s = self._box2cs([0, 0, w - 1, h - 1])
        r = 0
        trans = get_affine_transform(person_center, s, r, self.input_size)
        input = cv2.warpAffine(
            img,
            trans,
            (int(self.input_size[1]), int(self.input_size[0])),
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_CONSTANT,
            borderValue=(0, 0, 0))
        # input=cv2.resize(img,  (512,512))
        input=cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
        input = self.transform(input)
        meta = {
            'name': img_path,
            'center': person_center,
            'height': h,
            'width': w,
            'scale': s,
            'rotation': r
        }

        return input, meta

def multi_scale_testing(model, batch_input_im, crop_size=[512, 512], flip=False, multi_scales=[1]):
    flipped_idx = (15, 14, 17, 16, 19, 18)
    if len(batch_input_im.shape) > 4:
        batch_input_im = batch_input_im.squeeze()
    if len(batch_input_im.shape) == 3:
        batch_input_im = batch_input_im.unsqueeze(0)

    interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True)
    ms_outputs = []
    for s in multi_scales:
        # interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True)
        # scaled_im = interp_im(batch_input_im)
        parsing_output = model(batch_input_im)
        parsing_output = parsing_output[0][-1]
        output = parsing_output[0]
        if flip:
            flipped_output = parsing_output[1]
            flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :]
            output += flipped_output.flip(dims=[-1])
            output *= 0.5
        output = interp(output.unsqueeze(0))
        ms_outputs.append(output[0])
    ms_fused_parsing_output = torch.stack(ms_outputs)
    ms_fused_parsing_output = ms_fused_parsing_output.mean(0)
    ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0)  # HWC
    parsing = torch.argmax(ms_fused_parsing_output, dim=2)
    parsing = parsing.data.cpu().numpy()
    ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy()
    return parsing, ms_fused_parsing_output


def vis_parsing_maps(im, parsing_anno, id="",save_im=False, save_path='',im_name='1.png'):
    stride=0
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]
    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)

    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    # vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if(not os.path.exists(save_path+'/anno/')):
        os.mkdir(save_path+'/anno/')
    if(not os.path.exists(save_path+'/weights_img/')):
        os.mkdir(save_path+'/weights_img/')
    ann_path = os.path.join(save_path+'/anno/',id)
    vis_path = os.path.join(save_path+'/weights_img/',id)

    if(not os.path.exists(ann_path)):
        os.mkdir(ann_path)
    if(not os.path.exists(vis_path)):
        os.mkdir(vis_path)
    print(im_name+"======")
    print(np.unique(vis_parsing_anno))
    if save_im:
        cv2.imwrite(ann_path+'/'+im_name[:-4] +'.png', vis_parsing_anno)
        cv2.imwrite(vis_path+'/'+im_name, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])


# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
INPUT_SIZE = (769,769)
DATA_DIRECTORY = './data/cvpr/test/image/'
SAVE_DIRECTORY = './data/cvpr/test/pred_hair_hat/'
if (not os.path.exists(SAVE_DIRECTORY)):
    os.mkdir(SAVE_DIRECTORY)
# h, w = map(int, args.input_size.split(','))
restore_from = './log_hair_hat/checkpoint_54.pth.tar'
# input_size = (h, w)
NUM_CLASSES = 18
model = networks.init_model('resnet101', num_classes=NUM_CLASSES, pretrained=None)
state_dict = torch.load(restore_from)['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
    name = k[7:]  # remove `module.`
    new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.cuda()
model.eval()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])

transform = transforms.Compose([
    transforms.Resize((384,384)),
    transforms.ToTensor(),
    # BGR2RGB_transform(),
    normalize,
])


interp = torch.nn.Upsample(size=(384,384), mode='bilinear', align_corners=True)
ids = [os.path.join(DATA_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
save_phrasing = [os.path.join(SAVE_DIRECTORY, path) for path in os.listdir(DATA_DIRECTORY)]
with torch.no_grad():
    for n, id in enumerate(ids):
        print(id[-4:])
        imgs = [os.path.join(id, path) for path in os.listdir(id)]
        for i, im_path in enumerate(imgs):
            image = Image.open(im_path)
            image = transform(image)
            image = image.cuda()
            image = torch.unsqueeze(image, 0)
            outputs = model(image.cuda())
            parsing = outputs[0][1]
            parsing = interp(parsing).data.cpu().numpy()
            parsing_preds = np.asarray(np.argmax(parsing, axis=1), dtype=np.uint8)
            image=cv2.imread(im_path)
            parsing_preds=np.squeeze(parsing_preds)
            parsing_preds = cv2.resize(parsing_preds, (image.shape[1],image.shape[0]),interpolation=cv2.INTER_NEAREST)
            vis_parsing_maps(image,parsing_preds, save_im=True, save_path=SAVE_DIRECTORY,id=id[-4:],im_name=im_path[-13:])
            # parsing_preds = parsing_preds.transpose((1,2,0))
            # print(np.unique(parsing_preds))
            # print(len(outputs), len(outputs[1]), outputs[0][0].shape,  outputs[1][0].shape)
            # parsing_preds = cv2.resize(parsing_preds, (64, 128))
            # cv2.imwrite(save_phrasing[i], parsing_preds)