# encoding=utf-8
'''
@author: kohill
@Date: 2018-7-11
This file is trying to implement a dataset wrapper for formula detection task,
also, some other useful functions are included, like coco converter.
'''
import os, logging
import numpy as np
import mxnet as mx
import xml.etree.ElementTree as ET
import cv2
from random import randint
import shutil


def lsdir(rootdir = "",suffix = ".png"):
    import os
    assert os.path.exists(rootdir)
    for r,y,names in os.walk(rootdir) :
        for name in names:
            if str(name).endswith(suffix):
                yield os.path.join(r,name)


def shortname(path):
    import os
    return os.path.split(path)[1]


latex_dict_map = {
    "α": "\\alpha",
    "β": "\\beta",
    "γ": "\\gamma",
    "δ": "\\delta",
    "ε": "\\epsilon",
    "ζ": "\\zelta",
    "η": "\\eta",
    "θ": "\\theta",
    "ι": "\\lota",
    "κ": "\\kappa",
    "λ": "\\lambda",
    "μ": "\\mu",
    "ν": "\\nu",
    "ξ": "\\xi",
    "ο": "\\omicron",
    "∃": "\\exists",
    "π": "\\pi",
    "、": ",",
    "ω": "\\omega",
    "Ω": "\\Omega",
    "σ": "\\sigma",

    "∙": "\\cdot",
    "•": "\\cdot",
    "⋅": "\\cdot",
    "·": "\\cdot",

    "．": ".",
    "。": ".",

    'ρ': "\\rho",
    "∪": "\\cup",
    "′": "'",
    "×": "\\times",
    "≠": "\\neq",
    "±": "\\pm",
    "∀": "\\forall",
    "；": ";",
    "∁": "\\complement",
    "∉": "\\notin",
    "⊗": "\\bigotimes",
    "⊕": "\\bigoplus",
    "⊆": "\\subseteq",
    "∅": "\\varnothing",
    "△": "\\triangle",
    "□": "\\square",
    "∠": "\\angle",
    "\\stackrel{∧}": "\\hat",
    "≈": "\\approx",
    "∩": "\\cap",
    "￢": "\\lnot",
    "→": "\\rightarrow",
    "φ": "\\varphi",
    "ϕ": "\\phi",
    "≌": "\\cong",
    "≥": "\\geq",
    "Γ": "\\Gamma",
    "^'": "'",
    "“": '"',
    "”": '"',
    "═": "=",
    "∣": "|",
    "丨": "|",
    "＇": "'",
    "∨": "v",
    "∽": "~",
    "～": "~",
    "！": "!",
    "①": "1",
    "②": "2",
    "③": "3",
    "④": "4",
    "⑤": "5",
    "⑥": "6",
    "⑨": "9",
    "⑩": " 1 0 ",
    "⑦": "7",
    "Ⅰ": "1",
    "∥": "|",
    "Ⅱ": "2",
    "Ⅲ": "3",
    "（": "(",
    "）": ")",
    "，": ",",
    "\\[": "[",
    "\\]": "[",
    "\\;": ";",
    "▱": "",
    "º": "",
    "℃": "\\textcelsius",

    "°": " ^ \circ ",
    "…": "",
    "※": "",
    "％": "%",
    "[": " ",
    "$" : " "
}


def check_brace(latex_str: str):
    x = 0
    for c in latex_str:
        if c is "{":
            x += 1
        if c is "}":
            x -= 1
    return x == 0  # ,"Illegal latex code '{}'.".format(latex_str)


def is_chinese(uchar):
    """判断一个unicode是否是汉字"""

    if uchar >= u'\u4e00' and uchar <= u'\u9fa5':

        return True

    else:

        return False


def list2str(d):
    r = ""
    try:
        d = iter(d)
        r = next(d)
        while True:
            r += next(d)
    except StopIteration:
        pass
    return r


def list2str_blank(l):
    r = l[0]
    for x in l[1:]:
        r += " " + x
    return r


def normalize(latex_str: str):
    import copy
    latex_str_ori = copy.copy(latex_str)

    # Do filtering.
    if "MathType" in latex_str or "edittable" in latex_str:
        return []

    if not check_brace(latex_str_ori.replace("\\{", "").replace("\\}", "")):
        logging.info("Error in {}, unbalance braces.".format(latex_str_ori))
        return []

    for key in latex_dict_map:
        latex_str = latex_str.replace(key, latex_dict_map[key] + " ")
    latex_str = list2str(filter(lambda x: not is_chinese(x), latex_str))

    try:
        for c in latex_str:
            assert ord(c) < 127 and ord(c) > 0, "'{}' is not a ascii character in latex code'{}'.".format(c,
                                                                                                          latex_str_ori)
    except AssertionError as e:
        logging.exception(e)
        return []
    # if "leftbrace" in latex_str:
    #     return None
    if "array" in latex_str:
        return []
    if "\\left" in latex_str or "\\right" in latex_str:
        return []

    latex_str = latex_str.replace("\\{", " \\leftbrace ")
    latex_str = latex_str.replace("\\}", " \\rightbrace ")
    latex_str = latex_str.replace("\\\\", " \\backslash ")

    latex_str = latex_str.replace("\\", " \\")

    latex_str = latex_str.replace("{", " { ")
    latex_str = latex_str.replace("}", " } ")
    latex_str = latex_str.replace("(", " ( ")
    latex_str = latex_str.replace(") ", " ) ")
    latex_str = latex_str.replace("_", " _ ")
    latex_str = latex_str.replace("^", " ^ ")
    # check all words.
    latex_sentece = []
    try:
        for w in latex_str.strip().split():
            w = w.strip()
            if w[0] == "\\" and len(w) != 1 and (
                    (w[1] <= "z" and w[1] >= "a") or (w[1] <= "Z" and w[1] >= "A") or w[1] is "%" or w[1] is "{" or w[
                1] is "}"):
                latex_sentece.append(w)
                pass  # word starts with "\".
            elif w[0] == "\\" and len(w) != 1:
                assert False, (latex_str_ori, latex_str)
            elif w[0] == "\\" and len(w) == 1:
                assert False, latex_str
            else:  # w[0] is not "\\"
                assert "\\" not in w, latex_str
                for c in w:
                    latex_sentece.append(c)
        try:
            if latex_sentece[0] == "{" and latex_sentece[-1] == "}":
                latex_sentece = latex_sentece[1:-1]
        except IndexError:
            latex_sentece = []
    except AssertionError as e:
        logging.exception(e)
        return []
    return latex_sentece


def random_split(root_dir, train_dir, val_dir, image_suffix=".png"):
    all_file = list(lsdir(root_dir, suffix=".xml"))
    try:
        os.makedirs(train_dir)
    except OSError as e:
        print(e)
    try:
        os.makedirs(val_dir)
    except OSError as e:
        print(e)

    for xml_file in all_file:
        if randint(0, 9) == 0:
            shutil.move(xml_file, val_dir)
            shutil.move(xml_file[:-4] + image_suffix, val_dir)
        else:
            shutil.move(xml_file, train_dir)
            shutil.move(xml_file[:-4] + image_suffix, train_dir)


class FormulaDataset(object):
    def parser_xml(self, xml_path):
        oneimg = {}
        oneimg['bboxes'] = []
        oneimg["latex"] = []
        oneimg["latex_normalized"] = []
        try:
            dom = ET.parse(xml_path)
        except Exception as e:
            logging.warning("{}_{}".format(e, xml_path))
            return None
        root = dom.getroot()
        img_name = root.findall('filename')[0].text
        oneimg['img_path'] = xml_path[:-4] + ".png"
        for objects in root.findall('object'):
            name = objects.find('name').text
            points = list(objects.find('polygon'))
            if len(points) != 4:
                logging.warning("find illegal label in file:{}.xml. ".format(img_name))
                return None
            toxy = lambda x: [int(x[0]), int(x[1])]
            points = map(lambda x: toxy(x.text.strip().split(",")), points)
            points = list(points)
            points = np.array(points)
            xmin = np.min(points[:, 0])
            ymin = np.min(points[:, 1])
            xmax = np.max(points[:, 0])
            ymax = np.max(points[:, 1])
            if str(name).startswith("###"):  # type: str
                oneimg['bboxes'].append([xmin, ymin, xmax, ymax, 0, 0, self.bbox_count])
                latex_label = str(name)[3:]
                oneimg["latex"].append(latex_label)
                latex_normalized = normalize(latex_label)
                latex_normalized = list(filter(lambda x: x, latex_normalized))
                oneimg["latex_normalized"].append(latex_normalized)
            elif str(name).startswith("@@@"):
                oneimg['bboxes'].append([xmin, ymin, xmax, ymax, 1, 0, -1])
                latex_label = ""
                oneimg["latex"].append(latex_label)
                latex_normalized = normalize(latex_label)
                latex_normalized = list(filter(lambda x: x, latex_normalized))
                oneimg["latex_normalized"].append(latex_normalized)

        return oneimg

    def create_words_dict(self, objs):
        words2index = {}
        index2words = {}
        for obj in objs:
            for sentence in obj["latex_normalized"]:
                for w in sentence:
                    words2index[w] = 0
        for n, key in enumerate(sorted(list(words2index.keys()))):
            words2index[key] = n + 2
        words2index["<START>"] = 0
        words2index["<END>"] = 1
        words2index["<PAD>"] = int(max(words2index.values()) + 1)
        for key in words2index.keys():
            index2words[int(words2index[key])] = key
        return words2index, index2words

    def __init__(self,
                 XML_ROOT,
                 transform=None,
                 words2index=None,
                 index2words=None
                 ):
        super(FormulaDataset, self).__init__()
        xml_path = XML_ROOT
        self.objs = []
        self.bbox_count = 1
        self.bboxid2latex = {}
        for x, _, z in os.walk(xml_path):
            for name in z:
                if str(name).endswith(".xml"):
                    sxml_path = os.path.join(x, name)
                    oneimg = self.parser_xml(sxml_path)
                    if oneimg is not None and len(oneimg["bboxes"]) > 0:
                        self.objs.append(oneimg)
        if words2index is None or index2words is None:
            self._words2index, self._index2words = self.create_words_dict(self.objs)
        else:
            self._words2index, self._index2words = words2index, index2words
        self.classes = ["formula", "pictures"]
        self._transform = transform

    def __len__(self):
        return len(self.objs)

    def __getitem__(self, idx):
        oneimg = self.objs[idx]

        img_path = oneimg["img_path"]
        assert os.path.exists(img_path), img_path

        img_ori = cv2.imread(img_path)[:, :, ::-1]
        bboxes = oneimg['bboxes']
        bboxes = np.array(bboxes)
        bboxes_r = np.empty(shape=(bboxes.shape[0], 6 + self.max_sentence_len))
        bboxes_r.fill(-1)
        bboxes_r[:, :6] = bboxes[:, :6]
        for i in range(len(bboxes)):
            sentence = oneimg["latex_normalized"][i]
            try:
                sentence_int = list(map(lambda x: self.words2index[x], sentence))
            except KeyError:
                sentence_int = []
            if len(sentence_int) > 0:
                bboxes_r[i, 6:(6 + len(sentence_int))] = sentence_int
        if self._transform is not None:
            img_ori, bboxes = self._transform(img_ori, bboxes_r)
        return mx.nd.array(img_ori), bboxes_r

    def at_with_image_path(self, idx):
        oneimg = self.objs[idx]
        img_path = oneimg["img_path"]
        bboxes = oneimg['bboxes']
        bboxes = np.array(bboxes)
        return img_path, bboxes

    @property
    def words_count(self):
        return len(self.words2index)

    @property
    def max_sentence_len(self):
        try:
            return self._max_sentence_len
        except AttributeError:
            self._max_sentence_len = 0
            for obj in self.objs:
                for l in obj["latex_normalized"]:
                    if len(l) > self._max_sentence_len:
                        self._max_sentence_len = len(l)
                    if len(l) > 100:
                        print(list2str_blank(l))
            return self._max_sentence_len

    @property
    def words2index(self):
        return self._words2index

    @property
    def index2words(self):
        return self._index2words

    def write2json(self, save_prefix, split="train"):
        count = 0
        objs = {}
        objs["images"] = []
        from tqdm import tqdm
        import random
        random.seed(46)
        from random import randint
        for obj in tqdm(train_dataset.objs):
            img = cv2.imread(obj["img_path"])
            bboxes = obj['bboxes']
            for nbbox, bbox in enumerate(bboxes):
                oneimg = {}
                x0, y0, x1, y1, cls = bbox[:5]
                if cls == 0:
                    roi = img[y0:y1, x0:x1]
                    filepath = save_prefix + "/imgs/{}.png".format(count)
                    oneimg["imgid"] = count
                    oneimg["filename"] = "{}.png".format(count)
                    oneimg["filepath"] = os.path.dirname(filepath)
                    cv2.imwrite(filepath, roi)
                    t = randint(1, 10)
                    if t >= 3:
                        oneimg["split"] = "train"
                    elif t == 1:
                        oneimg["split"] = "val"
                    else:
                        oneimg["split"] = "test"
                    # oneimg["split"] = split
                    oneimg["sentences"] = [{"tokens": obj["latex_normalized"][nbbox], "imgid": count}]
                    objs["images"].append(oneimg)
                    count += 1
        json.dump(objs, open(save_prefix + "/annotations.json", "wt"))
        json.dump({"words2index": self.words2index, "index2words": self.index2words},
                  open(save_prefix + "/vocabularies.json", "wt"))


if __name__ == '__main__':
    import json
    from pprint import pprint

    train_dataset = FormulaDataset(XML_ROOT="/home/kohill/Downloads/1106")
    train_dataset.write2json("/media/kohill/data/kohill/formula_image2latex_data1106/all", )
    print(train_dataset.max_sentence_len)
