from script import *
from dataloaderraw import *
import torch


def get_caption(data):
    # Load infos
    infos = data['infos']
    captions = []
    with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_val.json") as j:
        with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_test.json") as k:
            with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_train.json") as l:
                a = json.load(j)
                b = json.load(k)
                c = json.load(l)
                load_dict = dict(dict(a, **b), **c)
                batch_size = data['img'].shape[0]
                for i in range(infos.__len__()):
                    for k in range(5):
                        captions.append(load_dict[str(infos[i]['id'])]['caption'])

                new_phrases = get_phrase2(captions)
                # new_phrases = phrase_to_vector(phrases)
                berttokenizer = BertTokenizer.from_pretrained("/mnt/hdd0/home/fyc/Transformer/data/bert-base-cased",
                                                              do_lower_case=False, do_basic_tokenize=False)
                max_len = 0
                for i in range(new_phrases.__len__()):
                    if new_phrases[i].__len__() > max_len:
                        max_len = new_phrases[i].__len__()
                tokens = torch.zeros(batch_size, max_len, dtype=torch.int64)
                masks = torch.zeros(batch_size, max_len, dtype=torch.int64)
                for i in range(new_phrases.__len__()):
                    token_ids = berttokenizer.convert_tokens_to_ids(new_phrases[i])
                    for j in range(token_ids.__len__()):
                        tokens[i][j] = torch.tensor(token_ids[j])
                    masks[i][:len(token_ids)] = 1
            
                return tokens, masks


def get_phrase(captions):
    length = captions.__len__()
    nlp = spacy.load("en_core_web_sm")
    phrases = [[] for i in range(length)]
    for j in range(length):
        text = ''
        for i in range(5):
            text += captions[j][i]
            if i != 4:
                text += ','
            else:
                text += '.'
        doc = nlp(text)
        for phrase in doc.noun_chunks:
            phrases[j].append(phrase)
    return phrases


def get_phrase2(captions):
    length = captions.__len__()
    phrases = []
    for j in range(length):
        text = ''
        for i in range(5):
            text += captions[j][i]
            if i != 4:
                text += ' , '
            else:
                text += ' .'
        phrases.append(text)
    outs = []
    for i in range(phrases.__len__()):
        out = _tokenize(phrases[i])
        outs.append(out)
    return outs


def _tokenize(tokens):
    re_tokens = ['[CLS]']
    berttokenizer = BertTokenizer.from_pretrained("/mnt/hdd0/home/fyc/Transformer/data/bert-base-cased",
                                                  do_lower_case=False, do_basic_tokenize=False)
    for token in tokens.strip().split():
        subtoken = berttokenizer.tokenize(token)
        re_tokens += subtoken
    re_tokens.append('[SEP]')
    return re_tokens