import os
import numpy as np
import torch

# model_path = "./model_best.pth.tar"
# checkpoint = torch.load(model_path)
# opt = checkpoint['opt']
# print(opt)

# from vocab import Vocabulary
# import pickle
# vocab = pickle.load(open(os.path.join(
#         "vocab", 'nus_vocab.pkl'), 'rb'))
# print(vocab.idx2word[8])

# ###########################################################3
# ## 3/21 data中的各个文件遍历了解
# data_path = '/data/yangy/xuyc/COCO/annotations'
# files  = None
# for root, dirs, fs in os.walk(data_path):
#     files = fs
# print(files)
# for file in files:
#     path = os.path.join(data_path,file)
#     name, behind = os.path.splitext(file)
#     # if behind == ".txt":
#     #     with open(path, "r") as f:
#     #         print(file)
#     #         cot = 0
#     #         for line in f:
#     #             cot += 1
#     #             if cot <= 10:
#     #                 print("第%d行"%cot, line.strip())
#     #         print(cot)
#     #     print("*" * 15)
#     if behind == ".npy":
#         print(file, end=": ")
#         data = np.load(path)
#         print(data.shape, end=" ")
#         print(np.sort(data)[0:20])
#         del data
#         print("*"*50)