import argparse
import os
import math
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path

import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist

from models import load_pretrained
from models.model_captioning import XVLM

from models.tokenization_bert import BertTokenizer
from models.tokenization_roberta import RobertaTokenizer

import utils
from utils.hdfs_io import hmkdir, hexists


import torch.utils.data as data

from PIL import Image
from torchvision import transforms
from tqdm import tqdm

class ImageInput(data.Dataset):
    def __init__(self, dir, transform):
        self.dir=dir
        self.datas=[os.path.join(dir, x) for x in os.listdir(dir) if x.endswith('.png')]
        self.transform=transform

    def __getitem__(self, idx):
        image = Image.open(self.datas[idx]).convert('RGB')
        image = self.transform(image)
        return image, self.datas[idx]

    def __len__(self):
        return len(self.datas)

class Caption:
    def __init__(self, args, config):
        self.args=args
        self.config=config

        normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
        self.transform = transforms.Compose([
            transforms.Resize((config['image_res'], config['image_res']), interpolation=Image.BICUBIC),
            transforms.ToTensor(),
            normalize,
        ])

    def demo(self):
        device = torch.device(self.args.device)

        seed = self.args.seed + utils.get_rank()
        torch.manual_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        cudnn.benchmark = True

        model = XVLM(config=self.config)
        model.load_pretrained(self.args.checkpoint, self.config, is_eval=True, load_capt_pretrain=self.args.load_capt_pretrain)
        model = model.to(device)
        #print(model)

        dataset=ImageInput(self.args.input_dir, self.transform)
        data_loader=data.DataLoader(dataset, args.bs, num_workers=8, shuffle=False, pin_memory=True)

        caption_list = {}

        #img_file_list=os.listdir(self.args.input_dir)
        for image, img_files in tqdm(data_loader):
            #image = self.load_image(os.path.join(self.args.input_dir, img_file))
            image = image.to(device, non_blocking=True)

            caption = model.generate(image, sample=False, num_beams=self.config['num_beams'],
                                             max_length=self.config['max_length'], min_length=self.config['min_length'])
            #得到text represent
            #caption = model.generate(image, sample=False, num_beams=self.config['num_beams'],
            #                         max_length=self.config['max_length'], min_length=self.config['min_length'],
            #                         text_reps=True)


            for x, cap in zip(img_files, caption):
                caption_list[os.path.basename(x)[:-4]]=cap
        return caption_list

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint', type=str, default='weights/coco_capt_cider_step_44275.th')
    parser.add_argument('--config', default='./configs/Captioning.yaml')
    parser.add_argument('--output_dir', default='output/x-vlm_cap')

    parser.add_argument('--device', default='cuda')
    parser.add_argument('--seed', default=42, type=int)
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    parser.add_argument('--distributed', action='store_false')

    parser.add_argument('--load_capt_pretrain', action='store_true')
    parser.add_argument('--bs', default=1, type=int)

    # for self-critical sequence training
    parser.add_argument('--input_dir', type=str, default='./input', help="to collect eval results among nodes")

    args = parser.parse_args()

    config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)

    hmkdir(args.output_dir)

    yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))

    cap=Caption(args, config)
    caption_res = cap.demo()

    yaml.dump(caption_res, open(os.path.join(args.output_dir, 'caption_res.yaml'), 'w'))

    #for k,v in caption_res.items():
    #    print(k, v)
    print('ok')