import spacy
from transformers import BertTokenizer
import torch
import json


def convert_to_caption(labels, ix_to_words):
    # labels2 = labels[:, 1:]
    # length = labels2.shape[0]
    # length2 = labels2.shape[1]
    # captions = []
    # for i in range(length):
    #     caption = ''
    #     for j in range(length2):
    #         if labels2[i][j] == 0:
    #             break
    #         caption += ix_to_words[str(labels2[i][j])]
    #         if labels2[i][j + 1] != 0:
    #             caption += ' '
    # 
    #     captions.append(caption)
    
    captions = get_captions(labels)
    length = captions.__len__()
    # model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
    # img_path = []
    # for i in range(int(length/5)):
    #     img_path.append('/mnt/hdd0/home/fyc/COCO_dataset/' + img[i]['file_path'])
    # for i in range(img_path.__len__()):
    #     result = model(img_path[i])
    #     a = result.pandas().xyxy[0].name
    #     b = ''
    #     for i in range(int(a.shape[0])):
    #         b += a[i]
    #         if i != a.shape[0]:
    #             b += ','
    phrases = get_phrase(captions)
    new_phrases = phrase_to_vector(phrases)
    berttokenizer = BertTokenizer.from_pretrained("/mnt/hdd0/home/fyc/Transformer/data/bert-base-cased", do_lower_case=False, do_basic_tokenize=False)
    max_len = 0
    for i in range(new_phrases.__len__()):
        if new_phrases[i].__len__() > max_len:
            max_len = new_phrases[i].__len__()
    tokens = torch.zeros(length, max_len, dtype=torch.int64)
    masks = torch.zeros(length, max_len, dtype=torch.int64)
    for i in range(new_phrases.__len__()):
        token_ids = berttokenizer.convert_tokens_to_ids(new_phrases[i])
        for j in range(token_ids.__len__()):
            tokens[i][j] = torch.tensor(token_ids[j])
        masks[i][:len(token_ids)] = 1

    return tokens, masks


def get_phrase(captions):
    length = captions.__len__()
    nlp = spacy.load("en_core_web_sm")
    phrases = [[] for i in range(length)]
    for i in range(length):
        text = captions[i]
        doc = nlp(text)
        for phrase in doc.noun_chunks:
            phrases[i].append(phrase)

    return phrases


def phrase_to_vector(phrases):
    length = phrases.__len__()
    new_phrases = []
    for i in range(length):
        length2 = phrases[i].__len__()
        str = ''
        for j in range(length2):
            phrase = phrases[i][j].text
            if j == 0:
                phrase = '[CLS] ' + phrase
            phrase = phrase + ' [SEP]'
            if j != (length2 - 1):
                str = str + phrase + ' '
            else:
                str = str + phrase
        new_phrases.append(str.split())

    return new_phrases


def get_captions(info):
    captions = []
    with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result/swin_obj_dect_trans_rl2_train.json", "r") as load_f_train:
        with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result/feature_fusion3_rl_val.json", "r") as load_f_val:
            with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result/feature_fusion3_test.json", "r") as load_f_test:
                load_dict_train = json.load(load_f_train)
                load_dict_val = json.load(load_f_val)
                load_dict_test = json.load(load_f_test)
                load_dict = dict(dict(load_dict_train['imgToEval'], **load_dict_val['imgToEval']), **load_dict_test['imgToEval'])
                for i in range(info.__len__()):
                    for j in range(5):
                        captions.append(load_dict[str(info[i]['id'])]['caption'])
                return captions


