import argparse
import os
import math
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path

import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist

from models import load_pretrained
from models.model_captioning_encoder import XVLM

from torch.nn import functional as F

import utils
from utils.hdfs_io import hmkdir, hexists

import torch.utils.data as data

from PIL import Image
from torchvision import transforms
from tqdm import tqdm

from loguru import logger
#from threading import Thread
from concurrent.futures import ThreadPoolExecutor

from rs import rank_sim3 as rank_sim_c


class ImageBoxInput(data.Dataset):
    def __init__(self, img_root, bbox_path, transform):
        self.img_root=img_root
        self.bbox_path=bbox_path

        self.bbox_data = torch.load(bbox_path)
        self.img_ids = list(self.bbox_data['BBGTs'].keys())

        self.transform=transform

    def __getitem__(self, idx):
        imid=self.img_ids[idx]
        img_path = os.path.join(self.img_root, f'{str(imid).zfill(12) if args.coco else imid}.jpg')

        image = Image.open(img_path).convert('RGB')
        image = self.transform(image)
        return image, self.bbox_data['BBs_pos'][imid], self.bbox_data['BBGTs'][imid]

    def __len__(self):
        return len(self.bbox_data['BBGTs'])

class Caption:
    def __init__(self, args, config):
        self.args=args
        self.config=config

        '''normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
        self.transform = transforms.Compose([
            transforms.Resize((config['image_res'], config['image_res']), interpolation=Image.BICUBIC),
            transforms.ToTensor(),
            normalize,
        ])'''

        normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            normalize
        ])

        self.device = torch.device(self.args.device)

        self.patch_size=(self.config['image_res'], self.config['image_res'])
        self.img_buf = torch.empty((self.args.bs_patch,3,self.config['image_res'], self.config['image_res']), device=self.device)

        #self.data_mean=torch.tensor([0.48145466, 0.4578275, 0.40821073], device=self.device).float().view(-1,1,1)
        #self.data_std=torch.tensor([0.26862954, 0.26130258, 0.27577711], device=self.device).float().view(-1,1,1)

        self.embed_shape = (145, 1024)
        self.embed_len = self.embed_shape[0] * self.embed_shape[1]
        self.cuda_true=torch.tensor(True, device=self.device)

        seed = self.args.seed + utils.get_rank()
        torch.manual_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        cudnn.benchmark = True

    def build_model(self):
        model = XVLM(config=self.config)
        model.load_pretrained(self.args.checkpoint, self.config)
        model = torch.nn.DataParallel(model)
        self.model = model.cuda()

    def rank_similarity(self, emb_BB, emb_BBGT):
        topk_BB = emb_BB.topk(self.args.topk, dim=1)[1]
        topk_BBGT = emb_BBGT.topk(self.args.topk, dim=1)[1]

        t_BB = torch.zeros_like(emb_BB, dtype=torch.bool)
        t_BBGT = torch.zeros_like(emb_BBGT, dtype=torch.bool)

        t_BB.scatter_(1, topk_BB, True)
        t_BBGT.scatter_(1, topk_BBGT, True)

        sim_t = t_BB.unsqueeze(1) & t_BBGT.unsqueeze(0) #[N_BB, N_BBGT, N]

        rank_sim = sim_t.sum(dim=2)
        return rank_sim

    def rank_similarity_p(self, emb_BB, emb_BBGT, bs=1000):
        topk_BB = emb_BB.topk(self.args.topk, dim=1)[1]
        topk_BBGT = emb_BBGT.topk(self.args.topk, dim=1)[1]

        N_BB=emb_BB.shape[0]
        N_BBGT=emb_BBGT.shape[0]

        t_BB = torch.zeros_like(emb_BB, dtype=torch.uint8)
        t_BBGT = torch.zeros_like(emb_BBGT, dtype=torch.uint8)

        t_BB.scatter_(1, topk_BB, 1)
        t_BBGT.scatter_(1, topk_BBGT, 1)

        sim_t = torch.empty((N_BB, N_BBGT), dtype=torch.int16)

        for i in range(N_BB//bs):
            l,h=i*bs,(i+1)*bs
            sim_t[l:h,:]=(t_BB[l:h].unsqueeze(1) & t_BBGT.unsqueeze(0)).sum(dim=2)

        l=(N_BB//bs)*bs
        sim_t[l:, :] = (t_BB[l:].unsqueeze(1) & t_BBGT.unsqueeze(0)).sum(dim=2)

        rank_sim = sim_t.float() / self.args.topk
        return rank_sim

    def rank_similarity_cnp(self, emb_BB, emb_BBGT):
        topk_BB = emb_BB.topk(self.args.topk, dim=1)[1].int().numpy()
        topk_BBGT = emb_BBGT.topk(self.args.topk, dim=1)[1].int().numpy()

        sim_t = rank_sim_c(topk_BB, topk_BBGT, self.embed_len, self.args.topk)

        rank_sim = sim_t.astype(np.float16)/self.args.topk
        return rank_sim

    def rank_similarity_thread(self, embs_BB, embs_BBGT, imid):
        rank_sim = self.rank_similarity_cnp(embs_BB.flatten(1), embs_BBGT.flatten(1))
        #sla_all.append(rank_sim.cpu())
        #torch.save(rank_sim, os.path.join(self.args.output_dir, f'sla_data_{imid}.pth'))
        np.save(os.path.join(self.args.output_dir, f'sla_data_{imid}.npy'), rank_sim)

    def get_embeddedings(self, image, boxes, bs=128):
        embs = torch.empty((boxes.shape[0], *self.embed_shape), dtype=torch.float32)

        im_h, im_w = image.shape[1:]

        for i in range(len(boxes)//bs):
            p_base = i*bs
            for pid in range(0, bs):
                bbox_p = boxes[p_base+pid]
                l,r,t,b = max(0,bbox_p[0]),min(im_w, bbox_p[2]+1),max(0,bbox_p[1]),min(im_h, bbox_p[3]+1)
                if l+2<r and t+2<b:
                    self.img_buf[pid] = F.interpolate(image[:,t:b, l:r].unsqueeze(0), size=self.patch_size, mode='bilinear', align_corners=True)
                    #self.img_buf[pid] = (img_tmp-self.data_mean)/self.data_std
                else:
                    self.img_buf[pid] = torch.zeros((3, *self.patch_size), device=self.device)
            embs[p_base:p_base+bs] = self.model(self.img_buf).cpu()

        p_base=(len(boxes)//bs)*bs

        n_rest = len(boxes)%bs
        if n_rest>0:
            for pid in range(0, n_rest):
                bbox_p = boxes[p_base+pid]
                l, r, t, b = max(0, bbox_p[0]), min(im_w, bbox_p[2] + 1), max(0, bbox_p[1]), min(im_h, bbox_p[3] + 1)
                if l + 2 < r and t + 2 < b:
                    self.img_buf[pid] = F.interpolate(image[:, t:b, l:r].unsqueeze(0), size=self.patch_size, mode='bilinear', align_corners=True)
                    #self.img_buf[pid] = (img_tmp - self.data_mean) / self.data_std
                else:
                    self.img_buf[pid] = torch.zeros((3, *self.patch_size), device=self.device)
            embs[p_base:] = self.model(self.img_buf[:n_rest]).cpu()

        return embs

    @torch.no_grad()
    def demo(self):
        dataset=ImageBoxInput(self.args.img_root, self.args.bbox_path, self.transform)
        #data_loader=data.DataLoader(dataset, self.args.bs, num_workers=0, shuffle=False, pin_memory=False)
        logger.info('data load ok')

        sla_all = []
        pool = ThreadPoolExecutor(self.args.n_worker)

        if self.args.rank_range is None:
            self.args.rank_range=[0,len(dataset)]

        #img_file_list=os.listdir(self.args.input_dir)
        for imid in range(*self.args.rank_range):
            image, BB, BBGT = dataset[imid]

            if imid%10==0:
                logger.info(f'{imid}/{self.args.rank_range[1]}')
            #image = self.load_image(os.path.join(self.args.input_dir, img_file))
            image = image.to(self.device, non_blocking=True)
            BB = BB.to(self.device, non_blocking=True)
            BBGT = BBGT.to(self.device, non_blocking=True)

            embs_BB = self.get_embeddedings(image, BB, self.args.bs_patch)
            embs_BBGT = self.get_embeddedings(image, BBGT, self.args.bs_patch)

            #self.rank_similarity_thread(embs_BB.flatten(1), embs_BBGT.flatten(1), imid)
            pool.submit(self.rank_similarity_thread, embs_BB.flatten(1), embs_BBGT.flatten(1), imid)
            #Thread(target=self.rank_similarity_thread, args=(embs_BB.flatten(1), embs_BBGT.flatten(1), imid)).start()
            #rank_sim = self.rank_similarity(embs_BB.flatten(1), embs_BBGT.flatten(1))

            #sla_all.append(rank_sim.cpu())

        #time.sleep(60) #等待线程计算
        #torch.save({'sla_all': sla_all}, os.path.join(self.args.output_dir, 'sla_data.pth'), pickle_protocol=4)


if __name__ == '__main__':
    def str2range(v):
        ran = v.split(',')
        return [int(ran[0]), int(ran[1])]

    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint', type=str, default='weights/coco_capt_cider_step_44275.th')
    parser.add_argument('--config', default='./configs/Captioning.yaml')
    parser.add_argument('--output_dir', default='output/')

    parser.add_argument('--device', default='cuda')
    parser.add_argument('--seed', default=42, type=int)
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    parser.add_argument('--distributed', action='store_false')

    parser.add_argument('--load_capt_pretrain', action='store_true')
    parser.add_argument('--bs', default=1, type=int)
    parser.add_argument('--bs_patch', default=1, type=int)
    parser.add_argument('--topk', default=1000, type=int)

    parser.add_argument('--rank_range', default=None, type=str2range)
    parser.add_argument('--coco', default=False, type=bool)
    parser.add_argument('--n_worker', default=6, type=int)

    # for self-critical sequence training
    parser.add_argument('--img_root', type=str, default='./input', help="to collect eval results among nodes")
    parser.add_argument('--bbox_path', type=str, default='./input', help="to collect eval results among nodes")

    args = parser.parse_args()

    config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)

    hmkdir(args.output_dir)

    yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))

    if args.rank_range is None:
        logger.add(f"logs/sla_cap_all.log")
    else:
        logger.add(f"logs/sla_cap_{args.rank_range[0]}-{args.rank_range[1]}.log")

    cap=Caption(args, config)
    cap.build_model()
    cap.demo()

    logger.info('ok')