import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# from PIL import Image
# from torchvision import transforms
import numpy as np
import spacy
import torch
import json
# att_feat = np.load(os.path.join("/mnt/hdd0/home/fyc/Transformer/data/cocobu_att",  '170550.npz'))['feat']
# # nlp = spacy.load("en_core_web_sm")
# print(att_feat)

# model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# img = '/mnt/hdd0/home/fyc/COCO_dataset/train2014/COCO_train2014_000000011694.jpg'
# results = model(img)
# a = results.pandas().xyxy[0].name
# print('---------------------------')

with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_val.json") as j:
    with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_test.json") as k:
        with open("/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2/swin_obj_dect_trans_rl2_train.json") as l:
            a = json.load(j)
            b = json.load(k)
            c = json.load(l)

            print('---------------------------')

#
# text = u"Plates on a table filled with breakfast foods and cups of coffee and orange juice"
#
# doc = nlp(text)
#
# for np in doc.noun_chunks:
#     print(np)
#
#

