import os

import cv2
import json
import mxnet as mx
import tqdm
from sklearn.model_selection import train_test_split

class latex_dataset(mx.gluon.data.Dataset):
    def __init__(self,root="/media/kohill/data/formula/data/images_and_formula"):
        self.root = root
        formulas_path = os.path.join(root,"formulas.norm.lst")
        formulas = list(open(formulas_path,"rt"))
        train_filter_path = os.path.join(root,"train_filter.lst")
        train_filter_dict = {x.strip().split()[0]:x.strip().split()[1] for x in open(train_filter_path,"rt")}
        self.objs = []
        for key in train_filter_dict.keys():
            self.objs.append((key, formulas[int(train_filter_dict[key])]))
        self.objs = filter(lambda x: "Object" not in x[1] and "0.4" not in x[1]
                                     and "0.14" not in x[1] and "0.5" not in x[1],
                                self.objs)
        self.objs = map(lambda x:(x[0],x[1].strip().split()), self.objs)
        self.objs = list(filter(lambda x:len(x[1]) < 64, self.objs))
    def at_with_image_path(self,idx):
        obj = self.objs[idx]
        img_path = os.path.join(self.root,"images_processed",obj[0])
        label = obj[1]
        label_len = len(label)
        return img_path,label,label_len
    def __len__(self):
        return len(self.objs)
    def create_dictionary(self):
        words = set()
        for idx in tqdm.tqdm(range(len(self))):
            img_path, label, label_len = self.at_with_image_path(idx)
            words |=  set(label)
        words_list = list(words)
        words_list.sort()
        words_list.insert(0,'<END>')
        words_list.insert(0,'<START>')
        words2index = {}
        for w in words_list:
            words2index[w] = words_list.index(w)
        words2index["<PAD>"] = len(words2index.keys())
        index2words = {}
        for key in words2index.keys():
            index2words[words2index[key]] = key
        self.index2words = index2words
        self.words2index = words2index
    def save2json(self, path):
        all_imgs = {}
        all_imgs["images_list"] = [x[0] for x in self.objs]
        train_imgs, val_imgs = train_test_split(all_imgs['images_list'],test_size = .1, random_state = 42)
        all_imgs["train_images_list"] = train_imgs
        all_imgs["val_images_list"] = val_imgs
        all_imgs["words2index"] = self.words2index
        all_imgs["index2words"] = self.index2words
        all_imgs["annotations"] = dict(self.objs)
        print(all_imgs["annotations"].keys())
        json.dump(all_imgs,open(path,"wt"))

    def save2json2(self, save_prefix):
        all_imgs = {}
        objs = {}
        objs["images"] = []
        from tqdm import tqdm
        import random
        random.seed(46)
        from random import randint
        for count, obj in enumerate(tqdm(self.objs)):
            img_path = os.path.join(self.root, "images_processed", obj[0])
            oneimg = {}
            oneimg["imgid"] = count
            oneimg["filename"] = os.path.basename(img_path)
            oneimg["filepath"] = os.path.dirname(img_path)
            t = randint(1, 10)
            if t >= 3:
                oneimg["split"] = "train"
            elif t == 1:
                oneimg["split"] = "val"
            else:
                oneimg["split"] = "test"
            # oneimg["split"] = split
            oneimg["sentences"] = [{"tokens": list(obj[1]), "imgid": count}]
            print(obj[1])
            objs["images"].append(oneimg)
        json.dump(objs, open(save_prefix + "/annotations.json", "wt"))
if __name__ == '__main__':
    dataset =latex_dataset()
    dataset.create_dictionary()
    dataset.save2json2("./data_im2markup")