import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from dataloader import *
import argparse
import models
from six.moves import cPickle
from dataloaderraw import *
from pycocotools.coco import COCO

parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='/mnt/hdd0/home/fyc/code/swin_transformer_checkpoint/qinliangji38.6/model.pth',
                help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str,  default='resnet101',
                help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='/mnt/hdd0/home/fyc/code/swin_transformer_checkpoint/qinliangji38.6/infos_swin_obj_dect_trans_rl2.pkl',
                help='path to infos to evaluate')
# Basic options
parser.add_argument('--batch_size', type=int, default=0,
                help='if > 0 then overrule, otherwise load from checkpoint.')
parser.add_argument('--num_images', type=int, default=5000,
                help='how many images to use when periodically evaluating the loss? (-1 = all)')
parser.add_argument('--language_eval', type=int, default=1,
                help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--dump_images', type=int, default=0,
                help='Dump images into vis/imgs folder for vis? (1=yes,0=no)')
parser.add_argument('--dump_json', type=int, default=1,
                help='Dump json with predictions into vis folder? (1=yes,0=no)')
parser.add_argument('--dump_path', type=int, default=0,
                help='Write image paths along with predictions into vis json? (1=yes,0=no)')

# Sampling options
parser.add_argument('--sample_max', type=int, default=1,
                help='1 = sample argmax words. 0 = sample from distributions.')
parser.add_argument('--max_ppl', type=int, default=0,
                help='beam search by max perplexity or max probability.')
parser.add_argument('--beam_size', type=int, default=5,
                help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--group_size', type=int, default=1,
                help='used for diverse beam search. if group_size is 1, then it\'s normal beam search')
parser.add_argument('--diversity_lambda', type=float, default=0.5,
                help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0,
                help='temperature when sampling from distributions (i.e. when sample_max = 0). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0,
                help='If 1, not allowing same word in a row')
# For evaluation on a folder of images:
parser.add_argument('--image_folder', type=str, default='',
                help='If this is nonempty then will predict on the images in this folder path')
parser.add_argument('--image_root', type=str, default='/mnt/hdd0/home/fyc/COCO_dataset',
                help='In case the image paths have to be preprended with a root path to an image folder')
# For evaluation on MSCOCO images from some split:
parser.add_argument('--input_fc_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_fc',
                help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_att_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_att',
                help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_box_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_box',
                help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_label_h5', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocotalk_label.h5',
                help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_json', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocotalk.json',
                help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.')
parser.add_argument('--cnn_weight_dir', type=str, default='',
                help='path to the directory containing the weights of a model trained on imagenet')
parser.add_argument('--split', type=str, default='test',
                help='if running on MSCOCO images, which split to use: val|test|train')
parser.add_argument('--coco_json', type=str, default='',
                help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.')

parser.add_argument('--input_rel_box_dir', type=str, default='/mnt/hdd0/home/fyc/Transformer/data/cocobu_box_relative',
                help="this directory contains the bboxes in relative coordinates for the corresponding image features in --input_att_dir")
# misc
parser.add_argument('--id', type=str, default='',
                help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files')
parser.add_argument('--verbose_beam', type=int, default=1,
                help='if we need to print out all beam search beams.')
parser.add_argument('--verbose_loss', type=int, default=0,
                help='if we need to calculate loss.')
# parser.add_argument('--use_box', type=int, default=1, help='If use box features')

opt = parser.parse_args()

with open(opt.infos_path, 'rb') as f:
    infos = cPickle.load(f)

# override and collect parameters
if len(opt.input_fc_dir) == 0:
    opt.input_fc_dir = infos['opt'].input_fc_dir
    opt.input_att_dir = infos['opt'].input_att_dir
    opt.input_box_dir = infos['opt'].input_box_dir
    opt.input_label_h5 = infos['opt'].input_label_h5
if len(opt.input_json) == 0:
    opt.input_json = infos['opt'].input_json
if opt.batch_size == 0:
    opt.batch_size = infos['opt'].batch_size
if len(opt.id) == 0:
    opt.id = infos['opt'].id

ignore = ["id", "batch_size", "beam_size", "start_from", "language_eval"]
for k in vars(infos['opt']).keys():
    if k not in ignore:
        if k in vars(opt):
            # assert vars(opt)[k] == vars(infos['opt'])[k], k + ' option not consistent'
            if vars(opt)[k] != vars(infos['opt'])[k]:
                found_issue = k + ' option not consistent'
                if not utils.want_to_continue(found_issue):
                    exit()
        else:
            vars(opt).update({k: vars(infos['opt'])[k]})  # copy over options from model

vocab = infos['vocab']  # ix -> word mapping

# Setup the model
model = models.setup(opt)
model.load_state_dict(torch.load(opt.model))
model.cuda()
model.eval()

if len(opt.image_folder) == 0:
  loader = DataLoader(opt)
else:
  loader = DataLoaderRaw({'folder_path': opt.image_folder,
                            'coco_json': opt.coco_json,
                            'batch_size': opt.batch_size,
                            'cnn_model': opt.cnn_model,
                            'cnn_weight_dir': opt.cnn_weight_dir})
# When eval using provided pretrained model, the vocab may be different from what you have in your cocotalk.json
# So make sure to use the vocab in infos file.
loader.ix_to_word = infos['vocab']
epoch_down = False
predictions = dict()
while True:
    if epoch_down:
        break
    data = loader.get_batch('train')

    tmp = [data['img'][np.arange(loader.batch_size) * loader.seq_per_img],
           data['fc_feats'][np.arange(loader.batch_size) * loader.seq_per_img],
           data['att_feats'][np.arange(loader.batch_size) * loader.seq_per_img],
           data['att_masks'][np.arange(loader.batch_size) * loader.seq_per_img] if data['att_masks'] is not None else None]
    tmp = [torch.from_numpy(_).cuda() if _ is not None else _ for _ in tmp]

    # img, fc_feats, att_feats, tokens, token_masks, att_masks = tmp
    img, fc_feats, att_feats, att_masks = tmp

    with torch.no_grad():
        seq = model(img, fc_feats, att_feats, att_masks, opt=vars(opt), mode='sample')[0].data
    batch_size = img.shape[0]
    captions = []
    for i in range(batch_size):
        a = [utils.decode_sequence(vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]
        captions.append(a)
    for k, sent in enumerate(captions):
        image_id = data['infos'][k]['id']
        entry = {'image_id': image_id, 'caption': sent,
                 'file_path': data['infos'][k]['file_path']}
        if str(image_id) in predictions.keys():
            results_dir = '/mnt/hdd0/home/fyc/Transformer/data/swin_caption_result2'
            model_id = vars(opt).get('id')
            image_root = vars(opt).get('image_root')

            if not os.path.isdir(results_dir):
                os.mkdir(results_dir)

            cache_path = os.path.join(results_dir, model_id + '_' + 'train' + '.json')
            json.dump(predictions, open(cache_path, 'w'))
            exit()
        predictions = dict({str(image_id): entry}, **predictions)

