import mxnet as mx

import os
import json
from pprint import pprint
import logging
import sympy,cv2
import matplotlib.pyplot as plt
import numpy as np
def lsdir(rootdir = "",suffix = ".png"):
    import os
    assert os.path.exists(rootdir)
    for r,y,names in os.walk(rootdir) :
        for name in names:
            if str(name).endswith(suffix):
                yield os.path.join(r,name)

class image_pad():
    def __init__(self,dest_shape = (768,768)):
        self.dest_shape = dest_shape #h,w
    def __call__(self,img_ori:np.ndarray):
        dshape = self.dest_shape
        fscale = min(dshape[0]/img_ori.shape[0],dshape[1]/img_ori.shape[1])
        img_resized = cv2.resize(img_ori,dsize=(0,0),fx=fscale,fy=fscale)  # type: np.ndarray
        img_padded = np.zeros(shape = (int(dshape[0]),int(dshape[1]),3),dtype=np.float32)
        img_padded[:img_resized.shape[0],:img_resized.shape[1],:img_resized.shape[2]] = img_resized
        return img_padded

class latex_dataset(mx.gluon.data.Dataset):
    def __init__(self,root="/data1/zyx/yks/dataset/images_and_formula",resize = True,create_dict = False,images_filter = "train_filter.lst"):
        self.root = root
        train_filter = os.path.join(root,images_filter)
        if create_dict:self.create_dictionary()
        self.load_dictionary()
        image_dict = {}
        with open(train_filter,"rt") as f:
            for l in f:
                l = l.strip().split()
                image_name,index = l
                index = int(index)
                image_dict[index] = {'name':image_name}
        with open(os.path.join(root,"formulas.norm.lst"),"rt") as f:
            for image_n,l in enumerate(f):
                l_split = l.strip().split()
                words_num = []
                try:
                    for word in l_split:
                        n = self.dict[word]
                        words_num.append(n)
                    words_num = list(filter(lambda x: self.index2words[x] != '{' and self.index2words[x] != '}', words_num))
                    if len(words_num) < self.max_len-2:
                        image_dict[image_n]["latex"] = words_num
                        image_dict[image_n]["latex_ori"] = l
                except KeyError as e:
                    pass
        self.objs = image_dict
        self.keys = list(filter(lambda x:'latex' in self.objs[x].keys(),self.objs.keys()))
        self._doresize = resize
    def load_dictionary(self):
        self.dict = {}
        self.index2words = {}
        with open(os.path.join(self.root,"words_dictionary.txt")) as f:
            for l in f:
                l = l.strip().split()
                index,key  = l[:2]
                index = int(index) +2 # 0 is `start` and 1 is `end`
                self.dict[key] = index
                self.index2words[index] = key
        self.index2words[0] = "<START>"
        self.index2words[1] = "<END>"
        self.dict["<START>"]=0
        self.dict["<END>"]=1
    @property
    def words_count(self):
        return max(self.index2words.keys()) + 2
    @property
    def max_len(self):
        return 64
    def create_dictionary(self):
        root = self.root
        all_words = set()
        with open(os.path.join(root,"formulas.norm.lst"),"rt") as f:
            for l in f:
                l_split = l.strip().split()
                if not set([";","\\;",'\\',',','\\,', ', ', '\\operatorname','~','\\begin{array}']) & set(l_split):
                    all_words = all_words | set(l_split)

        words_dict = {}
        for key in all_words:
            words_dict[key] = {"count":0}
        #  make sure digital and all alphabets are in dictionary.
        for key in set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
            words_dict[key] = {"count":1000}
        with open(os.path.join(root,"formulas.norm.lst"),"rt") as f:
            for l in f:
                l_split = l.strip().split()
                for k in l_split:
                    try:
                        words_dict[k]["count"] += 1
                    except KeyError:
                        pass
        words_list = [(x,words_dict[x]) for x in words_dict.keys()]
        words_list.sort(key = lambda x:-1 * x[1]['count'])
        # remove words whose appear counts less than 10.
        words_list = list(filter(lambda x:x[1]["count"] > 10,words_list))
        with open(os.path.join(root,"words_dictionary.txt"),"wt") as f:
            for i in range(len(words_list)):
                f.write("{} {} {}\r\n".format(i,words_list[i][0],words_list[i][1]['count']))
    def __getitem__(self, idx):
        _,image,label,label_len = self.at_with_image_path(idx)
        img_float = image.astype(np.float32)
        mean = np.array([103.06,115.90, 123.15])[np.newaxis,np.newaxis].astype(np.float32)
        img_float = img_float - mean
        img_float = img_float[:,:,(2,1,0)]
        img_float = np.transpose(img_float,(2,0,1))
        label = np.array(label)
        return img_float.astype(np.float32),label.astype(np.float32),label_len
    def at_with_image_path(self,idx):
        key = self.keys[idx]
        _item = self.objs[key]
        image = cv2.imread(os.path.join(self.root,"images_processed",_item['name']))
        label = list(_item['latex'])
        label = list(label)
        label.insert(0,self.dict["<START>"])
        label.append(self.dict["<END>"])
        label_len = len(label)
        assert len(label) <= self.max_len
        label = np.array(label)
        label_padded = np.empty((self.max_len,),dtype=np.int32)
        label_padded.fill(self.dict["<END>"])
        label_padded[:label.shape[0]] = label
        image = image_pad(dest_shape=(64,256))(image)
        return os.path.join(self.root,"images_processed",_item['name']),image.astype(np.uint8),np.array(label_padded),np.array([label_len])

    def __len__(self):
        return len(self.keys)
def str_sum(l):
    x0 = l[0]
    for i in range(len(l)-1):
        x0 += l[i+1]
    return x0
def viz(dataset):
    print(len(dataset))
    for idx in range(len(dataset)):
        path,image,label,label_len = dataset.at_with_image_path(idx)
        print(label,label_len)
        # max_len = len(label) if len(label) > max_len else max_len
        # if len(label) > 30:
        # label = filter(lambda x:x!=-1,label)
        label = map(lambda x:dataset.index2words[x], label)
        print(str_sum(list(label)))
        print(image.shape)
        print("")
        plt.imshow(image)
        plt.show()
if __name__ == '__main__':
    dataset =latex_dataset()
    viz(dataset)