import os
import json
import numpy as np

from data import get_test_loader
import time
import numpy as np
from vocab import Vocabulary, deserialize_vocab  # NOQA
from model import SCAN, xattn_score_t2i, xattn_score_i2t
import torch



def evalrank(model_path, device, data_path=None, split='dev', fold5=False):
    """
    Evaluate a trained model on either dev or test. If `fold5=True`, 5 fold
    cross-validation is done (only for MSCOCO). Otherwise, the full data is
    used for evaluation.
    """
    # load model and options
    checkpoint = torch.load(model_path, map_location=device)
    opt = checkpoint['opt']
    print(opt)
    if data_path is not None:
        opt.data_path = data_path

    # load vocabulary used by the model
    vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
    opt.vocab_size = len(vocab)
    # construct model
    opt.batch_size = 2
    # load model state

    print('Loading dataset')
    data_loader = get_test_loader(split, opt.data_name, vocab,
                                  opt.batch_size, opt.workers, opt)
    for i, (images, captions, lengths, ids, labels, tokens) in enumerate(data_loader):
        print(lengths, ids, labels, tokens)
        if i>=3:
            break


if __name__ == '__main__':
    data_path = '/data/yangy/xuyc/data1/data'
    device = torch.device("cuda: 5" if torch.cuda.is_available() else "cpu")
    evalrank("./runs/coco_scan/log/model_best.pth.tar", device=device, data_path=data_path, split="testall",
             fold5=False)

###########################################################3
### 3/22 检查label读取
# with open("label_cocoid.json", "r") as f:
#     data = json.load(f)
# print(data['391895']) # 发现：将key(int类型)保存为json文件，再读取是key(str)

###########################################################3
### 3/21 data中的各个文件遍历了解
# data_path = '/data/yangy/xuyc/data1/data/coco_precomp'
# files  = None
# for root, dirs, fs in os.walk(data_path):
#     files = fs
# print(files)
# for file in files:
#     path = os.path.join(data_path,file)
#     name, behind = os.path.splitext(file)
#     if behind == ".txt":
#         with open(path, "r") as f:
#             print(file)
#             cot = 0
#             for line in f:
#                 cot += 1
#                 if cot <= 10:
#                     print("第%d行"%cot, line.strip())
#             print(cot)
#         print("*" * 15)
    # if behind == ".npy":
    #     print(file)
    #     data = np.load(path)
    #     print(data.shape)
    #     del data
# file = "testall_ims.npy"
# path = os.path.join(data_path,file)
# data = np.load(path)
