import os
import json
from torch.utils.data import Dataset
from dataset.data_util import normalize_bbox
from PIL import Image
import numpy as np
import torch
import torch.nn as nn
from dataset.vocab import WordVocab
def get_label_list(file_dir):
    labels = []
    for guid, file in enumerate(sorted(os.listdir(file_dir))):
        if file[-4:] =="json":
            file_path = os.path.join(file_dir, file)
            file_labels = []
            with open(file_path,'r',encoding='utf-8') as fr:
                data = json.load(fr)
                for tokens in data:
                    file_labels.extend(tokens["ner_tags"])
        labels.extend(file_labels)
    unique_labels = list(set(labels))
    label_to_id = {l: i for i, l in enumerate(unique_labels)}
    return label_to_id
file_dir = "/mnt/myproject/pretrain/my-lm/data/handled_data/training_data/"
label_to_id = get_label_list(file_dir)

class DataHandler(Dataset):
    def __init__(self, corpus_path,image_path, vocab, seq_len, encoding="utf-8", on_memory=True):
        self.vocab = vocab
        self.seq_len = seq_len
        self.on_memory = on_memory
        self.corpus_path = corpus_path
        self.image_path =image_path
        self.encoding = encoding
        self.corpus_lines = self.get_corpus()
        # tokens_dir = "/mnt/data/python/mytry/my-lm/extracted_data/"
    def get_corpus(self):
        lines=dict()
        token_line = []
        box_line = []
        label_line =[]
        with open(self.corpus_path, "r", encoding="utf-8") as f:
            data = json.load(f)

            for tokens in data:
                token_line.extend(tokens['tokens'])
                label_line.extend(tokens['ner_tags'])
                for box in tokens["bbox"]:
                    box_line.append(box)

            lines["token_line"]=token_line
            lines["box_line"]= box_line
            lines['token_label'] =label_line

        return lines


    def get_images(self):
        img_pil = Image.open(self.image_path)
        img_pil = img_pil.convert("RGB")
        img_pil = img_pil.resize((256, 256), Image.BILINEAR)
        img_pil_arr = np.array(img_pil)
        img_tensor = torch.tensor(img_pil_arr)
        img_tensor = img_tensor.float()
        return img_tensor


    def __len__(self):
        return len(self.corpus_lines)

    def __getitem__(self):

        token_line=self.corpus_lines["token_line"]
        for i in range(len(token_line)):
            token_line[i] = self.vocab.stoi.get(token_line[i], self.vocab.unk_index)
        # [CLS] tag = SOS tag, [SEP] tag = EOS tag
        tokens_input = (token_line)[:self.seq_len]
        padding = [self.vocab.pad_index for _ in range( self.seq_len - len(tokens_input))]
        tokens_input.extend(padding)
        # padding boxes
        padding_box_type = [0,0,0,0]
        boxes = self.corpus_lines["box_line"]
        padding_box = [padding_box_type for _ in range(self.seq_len - len(boxes))]
        box_input = boxes[:self.seq_len]
        box_input.extend(padding_box)
        normalized_box = normalize_bbox(box_input)
        #image
        img_input = self.get_images()
        # labels
        token_label = self.corpus_lines["token_label"]
        pad_token_label_id = nn.CrossEntropyLoss().ignore_index
        padding_label = [pad_token_label_id for _ in range(self.seq_len - len(boxes))]
        label_id = [label_to_id[label] for label in token_label]
        label_id = label_id[:self.seq_len]
        label_id.extend(padding_label)

        output = {"curpus_input": tokens_input,
                  "curpus_label": label_id,
                  "box_input":normalized_box,
                  "img_input":img_input,
                  }


        return {key: torch.tensor(value) for key, value in output.items()}

if __name__=="__main__":
    file_dir = "/mnt/myproject/pretrain/my-lm/data/handled_data/training_data/"
    label_to_id = get_label_list(file_dir)
    print(len(label_to_id))
    # vocab_path = "/mnt/myproject/pretrain/my-lm/data/handled_data/vocab.pkl"
    # corpus_path='/mnt/myproject/pretrain/my-lm/data/handled_data/training_data'
    # for guid, file in enumerate(sorted(os.listdir(corpus_path))):
    #     if file[-4:] =="json":
    #
    #         image_path = file[:-4]+'png'
    #         file_path = os.path.join(corpus_path, file)
    #         image_path = os.path.join(corpus_path, image_path)
    #         vocab = WordVocab.load_vocab(vocab_path)
    #         data = DataHandler(file_path, image_path,vocab, seq_len=100, encoding="utf-8", on_memory=True)
    #         output = data.__getitem__()
    #
    #         print(output)
    #         exit()