#coding=utf8
# Copyright (c) 2016 Tinydot. inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.


import random
from PIL import Image, ImageEnhance, ImageOps
import torchvision.transforms as torch_tr
import numpy as np
import copy
from .instanceseg import instance_edt_center_offset, create_pos_map
import os
import json
from multiprocessing import cpu_count, Process, Queue
import sys
import time


def _f(value):
    return round(value, 4)


def do_scale_crop_in_image(img, mask, scale_amt, x1, y1, target_w, target_h, expand=None):
    assert img.size == mask.size
    sw, sh = [int(i * scale_amt) for i in img.size]
    resized_img, resized_mask = (img.resize((sw, sh), Image.BICUBIC),
                                 mask.resize((sw, sh), Image.NEAREST))
    if expand is not None:
        border = tuple(expand["border"])
        pad_color = tuple(expand["pad_color"])
        pad_mask = expand["ignore_mask"]
        resized_img = ImageOps.expand(resized_img, border=border, fill=pad_color)
        resized_mask = ImageOps.expand(resized_mask, border=border, fill=pad_mask)

    crop_pos = (x1, y1, x1 + target_w, y1 + target_h)
    return resized_img.crop(crop_pos), resized_mask.crop(crop_pos)


def args_scale_crop_in_image(target_w, target_h, w, h,
                             scale_amt_min,
                             scale_amt_max,
                             centroid=None,
                             ignore_mask=255,
                             ignore_color=(0, 0, 0),
                             no_scale_prob=0.1):
    if random.random() < no_scale_prob:
        scale_amt = 1.0
    else:
        scale_amt = _f(random.uniform(scale_amt_min, scale_amt_max))
    w, h = [int(i * scale_amt) for i in [w, h]]
    ret = {}
    # padding
    if target_h > h:
        pad_h = (target_h - h) // 2 + 1
    else:
        pad_h = 0
    if target_w > w:
        pad_w = (target_w - w) // 2 + 1
    else:
        pad_w = 0
    border = (pad_w, pad_h, pad_w, pad_h)
    if pad_h or pad_w:
        ret.update({"expand": {"border": border,
                               "pad_color": ignore_color,
                               "ignore_mask": ignore_mask}})

        w, h = target_w, target_h

    if centroid is not None:
        centroid = [int(c * scale_amt) for c in centroid]
        c_x, c_y = centroid
        max_x = w - target_w
        max_y = h - target_h
        x1 = random.randint(c_x - target_w, c_x)
        x1 = min(max_x, max(0, x1))
        y1 = random.randint(c_y - target_h, c_y)
        y1 = min(max_y, max(0, y1))
    else:
        if w == target_w:
            x1 = 0
        else:
            x1 = random.randint(0, w - target_w)
        if h == target_h:
            y1 = 0
        else:
            y1 = random.randint(0, h - target_h)

    ret.update({"x1": x1, "y1": y1,
                "target_w": target_w,
                "target_h": target_h, "scale_amt": scale_amt})
    return "do_scale_crop_in_image", ret


def do_horizontally_flip(img, mask, do_flip):
    if do_flip:
        return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
    return img, mask


def args_horizontally_flip():
    return "do_horizontally_flip", {"do_flip": random.random() < 0.5}


def args_color_jitter(color_aug=0.25):
    if color_aug <= 0 or random.random() < 0.5:
        return "do_color_jitter", {"func_list": [],
                                   "brightness_factor": None,
                                   "contrast_factor": None,
                                   "saturation_factor": None,
                                   "hue_factor": None,
                                   }
    brightness_factor = _f(np.random.uniform(max(0.0, 1.0 - color_aug), 1.0 + color_aug))
    contrast_factor = _f(np.random.uniform(max(0.0, 1.0 - color_aug), 1.0 + color_aug))
    saturation_factor = _f(np.random.uniform(max(0.0, 1.0 - color_aug), 1.0 + color_aug))
    hue_factor = _f(np.random.uniform(-color_aug, color_aug))
    func_list = ["adjust_brightness", "adjust_contrast", "adjust_saturation", "adjust_hue"]
    np.random.shuffle(func_list)
    return "do_color_jitter", {"brightness_factor": brightness_factor,
                               "contrast_factor": contrast_factor,
                               "saturation_factor": saturation_factor,
                               "hue_factor": hue_factor,
                               "func_list": func_list,
                               }


def adjust_brightness(img, brightness_factor):
    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img


def adjust_contrast(img, contrast_factor):
    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img


def adjust_saturation(img, saturation_factor):
    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img


def adjust_hue(img, hue_factor):
    if not(-0.5 <= hue_factor <= 0.5):
        raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
    input_mode = img.mode
    if input_mode in {'L', '1', 'I', 'F'}:
        return img
    h, s, v = img.convert('HSV').split()
    np_h = np.array(h, dtype=np.uint8)
    # uint8 addition take cares of rotation across boundaries
    with np.errstate(over='ignore'):
        np_h += np.uint8(hue_factor * 255)
    h = Image.fromarray(np_h, 'L')
    img = Image.merge('HSV', (h, s, v)).convert(input_mode)
    return img


def do_color_jitter(img, mask, brightness_factor, contrast_factor, saturation_factor, hue_factor, func_list):
    funcs = {"adjust_brightness": (adjust_brightness, brightness_factor),
             "adjust_contrast":   (adjust_contrast, contrast_factor),
             "adjust_saturation": (adjust_saturation, saturation_factor),
             "adjust_hue":        (adjust_hue, hue_factor)}
    transforms = []
    for fc in func_list:
        func_adj, func_arg = funcs[fc]
        if func_arg is None:
            continue
        transforms.append(torch_tr.Lambda(lambda img: func_adj(img, func_arg)))
    if len(transforms) > 0:
        return img, mask
    return torch_tr.Compose(transforms)(img), mask


def do_instance_max_offset(img, mask, meta, pre_process_mask, min_count, weight, return_center):
    instance_mask = pre_process_mask(mask)
    if meta is None:
        offset, meta = instance_edt_center_offset(instance_mask,
                                                  return_offset=True,
                                                  min_count=min_count)
        return img, offset, meta
    w, h = instance_mask.shape
    map_offset = np.zeros((w, h, 2)).astype(np.int)
    map_center = np.zeros((w, h, 2)).astype(np.int)
    map_static_pos = create_pos_map(w, h, norm=False)
    for u in meta.keys():
        pos = instance_mask == int(u)
        f_center = meta[u]["pos"]
        map_offset[pos] = f_center - map_static_pos[pos]
        if return_center:
            map_center[pos] = f_center
    if weight is not None:
        offset_w = np.clip(weight - np.linalg.norm(map_offset, axis=-1), 0.0, weight)
        offset_w[instance_mask == 0] = 0.0
        instance_weight = (instance_mask > 0).astype(np.float) + offset_w
        return img, mask, map_center if return_center else map_offset, instance_weight
    return img, mask, map_center if return_center else map_offset


def create_gaussian_map(sigma, max_sigma=8):
    sigma = min(sigma, max_sigma)
    size = 6 * sigma + 3
    x = np.arange(0, size, 1, float)
    y = x[:, np.newaxis]
    x0, y0 = 3 * sigma + 1, 3 * sigma + 1
    return sigma, np.exp(-((np.abs(x - x0)+1) ** 2 + (np.abs(y - y0)+1) ** 2) / (2 * sigma ** 2))


def center_to_heatmap(mask, meta=None,
                      pre_process_mask=None, min_count=-1, max_sigma=8):
    instance_mask = mask
    cts_meta = meta
    if callable(pre_process_mask):
        instance_mask = pre_process_mask(mask)
    if meta is None:
        cts_meta = instance_edt_center_offset(instance_mask,
                                              return_offset=False, min_count=min_count)

    # per instance
    ret_heat_map = np.zeros_like(mask, dtype=np.float32)
    height, width = mask.shape
    for k, v in cts_meta.items():
        y, x = int(round(v["pos"][0])), int(round(v["pos"][1]))
        sigma, g = create_gaussian_map(max(1, round(v["dis"])), max_sigma)

        # upper left
        ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1))
        # bottom right
        br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2))

        # start and end indices in default Gaussian image
        gaussian_x0, gaussian_x1 = max(0, -ul[0]), min(br[0], width) - ul[0]
        gaussian_y0, gaussian_y1 = max(0, -ul[1]), min(br[1], height) - ul[1]

        # start and end indices in center heatmap image
        center_x0, center_x1 = max(0, ul[0]), min(br[0], width)
        center_y0, center_y1 = max(0, ul[1]), min(br[1], height)

        ret_heat_map[center_y0:center_y1, center_x0:center_x1] = np.maximum(
            ret_heat_map[center_y0:center_y1, center_x0:center_x1],
            g[gaussian_y0:gaussian_y1, gaussian_x0:gaussian_x1],
        )
        ret_heat_map[y, x] = 1

    for k, v in cts_meta.items():
        y, x = int(round(v["pos"][0])), int(round(v["pos"][1]))
        assert ret_heat_map[y, x] == 1
    return ret_heat_map


__funcs__ = dict(
        do_scale_crop_in_image = do_scale_crop_in_image,
        do_horizontally_flip = do_horizontally_flip,
        do_color_jitter = do_color_jitter,
        do_instance_max_offset = do_instance_max_offset,
    )


def calc_transform_as_meta(img, mask, pre_process_mask, cfg,
                           is_val, centroid=None, min_count=-1):
    cfg = cfg or {}
    transforms = [
        (args_scale_crop_in_image, {"target_w":      cfg["crop_w"],
                                    "target_h":      cfg["crop_h"],
                                    "w":             cfg["image_w"],
                                    "h":             cfg["image_h"],
                                    "scale_amt_min": cfg.get("scale_min", 0.5),
                                    "scale_amt_max": cfg.get("scale_max", 2.0),
                                    "centroid":      centroid},),
        (args_horizontally_flip, {}),
        (args_color_jitter, {"color_aug": cfg.get("color_aug", 0.25)}),
    ]
    meta_list = []
    if not is_val:
        meta_list = [
            f(**a) for f, a in transforms
        ]
    for a in meta_list:
        args = copy.deepcopy(a[1])
        args["img"] = img
        args["mask"] = mask
        img, mask = __funcs__[a[0]](**args)
    # do offset convert
    img, offset, meta = do_instance_max_offset(img, mask, meta=None,
                                               pre_process_mask=pre_process_mask,
                                               min_count=min_count,
                                               weight=None,
                                               return_center=False)
    meta_list.append((
        "do_instance_max_offset", {"meta": meta, "min_count": min_count}
    ))
    return meta_list, (img, mask, offset)


def do_transform_from_meta(img, mask, pre_process_mask, meta_values, weight, return_center):
    assert "do_instance_max_offset" == meta_values[-1][0]
    for a in meta_values:
        args = copy.deepcopy(a[1])
        args["img"] = img
        args["mask"] = mask
        if a[0] == "do_instance_max_offset":
            args.update({"pre_process_mask": pre_process_mask,
                         "weight": weight,
                         "return_center": return_center,
                         })
        ret = __funcs__[a[0]](**args)
        if a[0] == "do_instance_max_offset":
            return ret
        img, mask = ret
    assert False, "error, cannot run to here"


def do_transform_from_meta_file(process_fc, meta_file, weight, return_center):
    assert os.path.isfile(meta_file), "%s not find" % meta_file
    with open(meta_file) as rf:
        meta = json.load(rf)
    img = Image.open(meta["img"])
    mask = Image.open(meta["mask"])
    return do_transform_from_meta(img, mask, process_fc, meta["meta"], weight, return_center)


def do_transform_cityscape_from_meta_file(meta_file, weight, return_center):

    def mask_process_fc(image):
        image = np.array(image)
        image[image % 1000 == 0] = 0  # ignore
        image[image < 1000] = 0
        return image
    return do_transform_from_meta_file(mask_process_fc, meta_file, weight, return_center)


def random_sampling(alist, num):
    sampling = []
    len_list = len(alist)
    assert len_list, 'len_list is zero!'
    indices = np.arange(len_list)
    np.random.shuffle(indices)

    for i in range(num):
        item = alist[indices[i % len_list]]
        sampling.append(item)
    return sampling


def transform_worker(t, e, c, centroid, img_mask_fc, mask_process_fc, fmt_nu_span, ag):
    img_path, mask_path = img_mask_fc(t)
    img, mask = Image.open(img_path), Image.open(mask_path)
    meta, data = calc_transform_as_meta(img, mask, mask_process_fc,
                                        {"crop_w": ag.crop_w, "crop_h": ag.crop_h,
                                         "image_w": ag.image_w, "image_h": ag.image_h},
                                        is_val=ag.is_val,
                                        centroid=centroid)
    p_img, p_mask, p_offset = data
    r_img, r_mask, r_offset = do_transform_from_meta(img, mask, mask_process_fc,
                                                     meta, None, return_center=False)
    assert np.all(p_offset == r_offset)
    f_name, _ = os.path.splitext(os.path.basename(mask_path))
    meta_f_path = ("%s_" + fmt_nu_span + "_tr.json") % (os.path.join(ag.result_dir, f_name), e)
    meta_data = {"meta": meta, "img": img_path, "mask": mask_path}
    with open(meta_f_path, "w+") as mf:
        json.dump(meta_data, mf)


def comm_app_worker(worker_id, ag, img_mask_fc, mask_process_fc, tar_count,
                    task_queue: Queue, ret_queue: Queue):

    def w_debug(msg, **k):
        k["file"] = sys.stderr
        print("\rW %02d> %s" % (worker_id, msg), **k)

    w_debug("started, wait task")
    fmt_nu_span = "%" + "0%dd" % max(len("0%d" % tar_count), 5)
    while True:
        task = task_queue.get()
        if task == "EXIT":
            w_debug("exit")
            ret_queue.put((worker_id, "exit", None))
            break
        t, e, c, centroid = task
        transform_worker(t, e, c, centroid, img_mask_fc, mask_process_fc, fmt_nu_span, ag)
        ret_queue.put((worker_id, "done", (t, e, c)))
    w_debug("exit success!")


def comm_app_stat(worker_count, tar_count, ret_queue: Queue):
    def s_debug(msg, **k):
        k["file"] = sys.stderr
        print("\rstat> %s" % msg, **k)
    exited_workers = 0
    ins_count = 0
    t_time = time.time()
    fmt_span = "%"+"0%dd" % len("0%s"%tar_count)
    s_debug("wait workers")
    while True:
        w_id, stat, data = ret_queue.get()
        if stat == "exit":
            exited_workers += 1
            s_debug("worker %02d exit, %d/%d" % (w_id, exited_workers, worker_count))
        else:
            t, e, c = data
            ins_count += 1
            s_debug(("[w %02d] %s\t\tinner [%04d/%04d]\tall ["+
                     fmt_span+"/"+fmt_span+"]\ttime:\t["+fmt_span + "s]") % (
                     w_id, os.path.basename(t), e, c, ins_count, tar_count, time.time() - t_time),
                    end="")
        if exited_workers >= worker_count:
            break
    s_debug("all workers[%d] exited, stat exit" % worker_count)


def comm_app_transform_meta(arg_fc,
                            img_mask_fc,
                            mask_process_fc,
                            args,
                            centroid_fc=None,
                            _debug=None):
    def default_debug(*a, **k):
        print(*a, **k)
    debug = _debug or default_debug
    import argparse
    from .instanceseg import load_files_form_dir
    import time
    import os
    parser = argparse.ArgumentParser("convert seg data to transform meta")
    parser.add_argument("dataset_dir",
                        help="dataset directory", type=str)
    parser.add_argument("result_dir",
                        help="result directory", type=str)
    parser.add_argument("--centroids", "-c",
                        help="use centroid file", action="store_true", default=False)
    parser.add_argument("--is_val",
                        help="is val model", action="store_true", default=False)
    parser.add_argument("--threads", "-t",
                        help="number of cpus to use", type=int, default=0)
    parser = arg_fc(parser)
    ag = parser.parse_args(args)
    target = ag.dataset_dir
    if ag.centroids:
        if not callable(centroid_fc):
            debug("centroid is not supported")
            return
        if not os.path.isfile(target):
            debug("centroid file: %s not find" % target)
            return
    if not os.path.isdir(ag.result_dir):
        debug("result dir: %s not find" % ag.result_dir)
        return
    epochs = ag.epochs
    if ag.is_val:
        debug("val model, force epochs to 1")
        epochs = 1
    target_files = []
    if ag.centroids:
        assert not ag.is_val, "centroid file can not used in val model"
        target_files = centroid_fc(target, epochs)
    else:
        if not os.path.isdir(target):
            debug("target dir: %s not find" % target)
            return
        for f in load_files_form_dir(target, ag.suffix):
            for e in range(epochs):
                target_files.append((
                    f, e, epochs, None
                ))
    if len(target_files) < 1:
        debug("no data find in %s" % target)
        return
    t_all_start = time.time()
    tar_count = len(target_files)
    debug("start convert %d images" % tar_count)
    worker_count = ag.threads
    if worker_count <= 0:
        worker_count = max(1, cpu_count()//2)
    worker_count = max(1, min(worker_count, tar_count//2))
    debug("start %d work" % worker_count)
    task_queue = Queue(maxsize=worker_count*2)
    ret_queue = Queue()
    worker_list = []
    for w in range(worker_count):
        worker_list.append(Process(
            target=comm_app_worker, args=(w, ag, img_mask_fc, mask_process_fc, tar_count,
                                          task_queue, ret_queue)
        ))
    for p in worker_list:
        p.start()
    p_stat = Process(target=comm_app_stat, args=(worker_count, tar_count, ret_queue))
    p_stat.start()
    for t, e, c, centroid in target_files:
        while task_queue.full():
            time.sleep(0.1)
        task_queue.put((t, e, c, centroid))

    debug("\rall task send complete, wait")
    while any([p.is_alive() for p in worker_list]):
        if task_queue.full():
            time.sleep(0.1)
            continue
        task_queue.put("EXIT")

    debug("\rall workers exit complete")
    for p in worker_list:
        p.join()
    p_stat.join()
    t_all_end = time.time()
    debug("\r ALL complete, time cost: %.2f, epochs: %d, ins: %d" % (
        t_all_end - t_all_start, ag.epochs, tar_count
    ))


def app_cityscape_transform_meta(args=None):

    def debug(*a, **k):
        k["file"] = sys.stderr
        print(*a, **k)

    def args_fc(parser):
        parser.add_argument("--suffix",
                            help="instance file suffix", type=str, default="_leftImg8bit.png")
        parser.add_argument("--crop_w",  type=int, default=2048)
        parser.add_argument("--crop_h",  type=int, default=1024)
        parser.add_argument("--image_w", type=int, default=2048)
        parser.add_argument("--image_h", type=int, default=1024)
        parser.add_argument("--epochs",
                            help="epochs to generate", type=int, default=60)
        return parser

    def img_mask_fc(image_file):
        return image_file, \
               image_file.replace("_leftImg8bit.png",
                                  "_gtFine_instanceIds.png").replace("/leftImg8bit/", "/gtFine/")

    def mask_process_fc(image):
        image = np.array(image)
        image[image % 1000 == 0] = 0  # ignore
        image[image < 1000] = 0
        return image

    def centroid_fc(centroids_file, epochs):
        with open(centroids_file, 'r') as json_data:
            centroids_data = json.load(json_data)
        centroids = {}
        images_count = set()
        for idx in centroids_data:
            centroids[int(idx)] = centroids_data[idx]
            for _ in centroids_data[idx]:
                images_count.add(_[0])
        total_count = len(images_count) * epochs
        class_count = len(centroids)
        per_class_num = total_count//class_count
        debug("classes: %d, num_per_class: %d, epochs: %d" % (class_count,
                                                              per_class_num, epochs))
        centroids_files = []
        for k in centroids.keys():
            centroids_files.extend(random_sampling(
                centroids[k], per_class_num
            ))
        target_images = {}
        for f in centroids_files:
            im, cet = f[0], f[2]
            if im in target_images:
                target_images[im].append(cet)
            else:
                target_images[im] = [cet]
        keys = [k for k in target_images.keys()]
        keys.sort()
        ret_files = []
        for k in keys:
            ep = len(target_images[k])
            for e, ct in enumerate(target_images[k]):
                ret_files.append((
                    k, e, ep, ct
                ))
        return ret_files

    return comm_app_transform_meta(args_fc, img_mask_fc, mask_process_fc, args,
                                   centroid_fc=centroid_fc, _debug=debug)
