import json
from torch.utils.data import Dataset
import tqdm
import random
import torch
from dataset.vocab import WordVocab
import os
from dataset.mask_images import mask_img
from dataset.data_util import normalize_bbox
class maksed_data(Dataset):
    def __init__(self, corpus_path, vocab, seq_len, encoding="utf-8", on_memory=True):
        self.vocab = vocab
        self.seq_len = seq_len
        self.on_memory = on_memory
        self.corpus_path = corpus_path
        self.encoding = encoding
        self.corpus_lines = self.get_corpus()
        # tokens_dir = "/mnt/data/python/mytry/my-lm/extracted_data/"
    def get_corpus(self):
        lines=dict()
        token_line = []
        box_line = []
        with open(self.corpus_path, "r", encoding="utf-8") as f:
            data = json.load(f)

            for tokens in data:
                token_line.extend(tokens['tokens'])
                for box in tokens["bbox"]:
                    box_line.append(box)

            lines["token_line"]=token_line
            lines["box_line"]= box_line

        return lines


    # def get_images(self):


    def __len__(self):
        return len(self.corpus_lines)

    def __getitem__(self):

        t1_random, t1_label = self.random_word(self.corpus_lines["token_line"])
        box_label = t1_label
        # [CLS] tag = SOS tag, [SEP] tag = EOS tag
        t1 = [self.vocab.sos_index] + t1_random + [self.vocab.eos_index]
        t1_label = [self.vocab.pad_index] + t1_label + [self.vocab.pad_index]
        layouylm_input = (t1)[:self.seq_len]
        layouylm_label = (t1_label)[:self.seq_len]
        padding = [self.vocab.pad_index for _ in range(self.seq_len - len(layouylm_input))]

        layouylm_input.extend(padding), layouylm_label.extend(padding)
        boxes_id = []
        for i,no_zero in enumerate(box_label):
            if no_zero != 0:
                boxes_id.append(i)
        img_input = mask_img(self.corpus_path.replace("json","png"),self.corpus_lines["box_line"],boxes_id)

        # covered_label
        covered_label =[]
        for label in box_label:
            if label != 0:
                covered_label.append(1)
            else:
                covered_label.append(0)
        covered_label = covered_label[:self.seq_len]
        covered_padding = [self.vocab.pad_index for _ in range(self.seq_len - len(covered_label))]
        covered_label.extend(covered_padding)

        # padding boxes
        padding_box_type = [0,0,0,0]
        boxes = self.corpus_lines["box_line"]

        padding_box = [padding_box_type for _ in range(self.seq_len - len(boxes))]

        box_input = boxes[:self.seq_len]
        box_input.extend(padding_box)
        normalized_box = normalize_bbox(box_input)
        output = {"curpus_input": layouylm_input,
                  "curpus_label": layouylm_label,
                  "box_input":normalized_box,
                  "img_input":img_input,
                  "covered_label":covered_label
                  }


        return {key: torch.tensor(value) for key, value in output.items()}

    def random_word(self, sentence):
        sentence=" ".join(sentence)
        tokens = sentence.split()
        output_label = []

        for i, token in enumerate(tokens):
            prob = random.random()
            if prob < 0.15:
                prob /= 0.15

                # 80% randomly change token to mask token
                if prob < 0.8:
                    tokens[i] = self.vocab.mask_index

                # 10% randomly change token to random token
                elif prob < 0.9:
                    tokens[i] = random.randrange(len(self.vocab))

                # 10% randomly change token to current token
                else:
                    tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)

                output_label.append(self.vocab.stoi.get(token, self.vocab.unk_index))

            else:
                tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
                output_label.append(0)

        return tokens, output_label



if __name__=="__main__":
    vocab_path = "/Users/qianghaozhang/Documents/PythonProject/NLP//my-lm/data/handled_data/vocab.pkl"
    corpus_path='/Users/qianghaozhang/Documents/PythonProject/NLP//my-lm/data/handled_data/training_data/'
    for guid, file in enumerate(sorted(os.listdir(corpus_path))):
        if file[-4:] =="json":

            file_path = os.path.join(corpus_path, file)
            vocab = WordVocab.load_vocab(vocab_path)
            data = maksed_data(file_path, vocab, seq_len=100, encoding="utf-8", on_memory=True)
            output = data.__getitem__()
            print(output)