import os

import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

import data
from data import MyDataset
from mymodel import MyModule
from utils import LoggerSingleton
from vocab import deserialize_vocab


def calc_sims_metric(label_pd):
    # label_pd = label_pd
    img_all, txt_all = label_pd[:, 0], label_pd[:, -1]
    imgs = np.unique(img_all)
    txts = np.unique(txt_all)
    sim = np.zeros((imgs.shape[0], txts.shape[0]), dtype=np.float32)

    for i in range(label_pd.shape[0]):
        id_i, id_t = img_all[i], txt_all[i]
        u_i = np.where(imgs == id_i)
        u_t = np.where(txts == id_t)
        sim[u_i, u_t] = 1.
    return sim


def recall_at_k(metric, label, k, i2t_t2i='i2t'):
    if i2t_t2i == 'i2t':
        pass
    elif i2t_t2i == 't2i':
        metric = metric.T
        label = label.T
    lenx = label.shape[0]
    # leny = label.shape[1]
    count = 0.0

    for i in range(lenx):
        tmp1 = metric[i]
        tmp2 = label[i]
        # 使用 argsort 方法找到从大到小排序的索引
        sorted_indices = np.argsort(tmp1)[::-1]
        # 取前k个索引
        top_k_indices = sorted_indices[:k]
        # 使用 numpy.where 找到值为1的下标索引
        indices = np.where(tmp2 == 1.)[0]
        common_elements = np.intersect1d(top_k_indices, indices)
        if len(common_elements) > 0:
            count += 1.0
    res = 100.0 * count / lenx
    return round(res, 2)


def i2t(npts, sims, per_captions=1, return_ranks=False):
    """
    Images->Text (Image Annotation)
    Images: (N, n_region, d) matrix of images
    Captions: (per_captions * N, max_n_word, d) matrix of captions
    CapLens: (per_captions * N) array of caption lengths
    sims: (N, per_captions * N) matrix of similarity im-cap
    """
    ranks = np.zeros(npts)
    top1 = np.zeros(npts)
    top5 = np.zeros((npts, 5), dtype=int)
    retreivaled_index = []
    for index in range(npts):
        inds = np.argsort(sims[index])[::-1]
        retreivaled_index.append(inds)
        # Score
        rank = 1e20
        for i in range(per_captions * index, per_captions * index + per_captions, 1):
            tmp = np.where(inds == i)[0][0]
            if tmp < rank:
                rank = tmp
        ranks[index] = rank
        top1[index] = inds[0]
        top5[index] = inds[0:5]

    # Compute metrics
    r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
    r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
    r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
    medr = np.floor(np.median(ranks)) + 1
    meanr = ranks.mean() + 1
    if return_ranks:
        return (r1, r5, r10, medr, meanr), (ranks, top1, top5, retreivaled_index)
    else:
        return r1, r5, r10, medr, meanr


def encode_data_u(model, dl, logger=None, writer=None):
    """Encode all images and captions loadable by `data_loader`
    """
    # np array to keep all the embeddings
    l_ = len(dl.dataset)
    min_img_id = dl.dataset.min_img_id
    max_img_id = dl.dataset.max_img_id
    min_txt_id = dl.dataset.min_txt_id
    max_txt_id = dl.dataset.max_txt_id
    i_s = dl.dataset.img_size
    t_s = dl.dataset.txt_size

    img_embs = None
    cap_embs = None
    img_embs_u = None
    cap_embs_u = None

    bs = dl.batch_size
    iter_num = round((len(dl.dataset) / bs), 1)
    loop2 = tqdm(dl, desc='Val', ncols=100)
    for i, data_i in enumerate(loop2):
        (img, img_len,
         txt, txt_len,
         index, tab_img_id, tab_txt_id) = data_i
        # compute the embeddings
        # index = np.array(index)
        tab_img_id = np.array(tab_img_id)
        tab_txt_id = np.array(tab_txt_id)
        img_emb, cap_emb = model.forward_emb(img, txt, txt_len, img_len)
        if img_embs is None:
            if img_emb.dim() == 3:
                img_embs = np.zeros((l_, img_emb.size(1), img_emb.size(2)))
            else:
                img_embs = np.zeros((max_img_id - min_img_id + 1, img_emb.size(1)))
            cap_embs = np.zeros((max_txt_id - min_txt_id + 1, cap_emb.size(1)))
            cap_lens = [0] * l_
        # cache img embeddings
        i_ = img_emb.data.cpu().numpy().copy()
        index_i, s_i = sort_with_index(tab_img_id, i_)
        img_embs[index_i - min_img_id] = s_i
        img_embs_u = img_embs[[not np.all(img_embs[i] == 0) for i in range(img_embs.shape[0])]]

        # cache txt embeddings
        t_ = cap_emb.data.cpu().numpy().copy()
        index_t, s_t = sort_with_index(tab_txt_id, t_)
        cap_embs[index_t - min_txt_id] = s_t
        cap_embs_u = cap_embs[[not np.all(cap_embs[i] == 0) for i in range(cap_embs.shape[0])]]

        # measure accuracy and record loss
        # loss = model.criterion(img_emb, cap_emb)
        # if i % log_step == 0:
        #     logging.info(f'Val: [{i}/{iter_num}]\tLoss: {loss.item():.2f}')
        #     if writer is not None:
        #         writer.add_scalar(f'Val/loss_epoch{epoch}:', round(loss.item(), 2), i)
        del img, txt
        loop2.set_description(f'Val:\t[{i + 1}/{iter_num}][{bs}]')
        # loop2.set_postfix({'loss': f'{loss.item():.2f}'})
    return img_embs_u, cap_embs_u


def sort_with_index(index_, emb_):
    a_ = np.arange(0, index_.shape[0])
    i_extend = np.stack((index_, a_), axis=1)
    sorted_indices = np.argsort(i_extend[:, 0])
    sorted_i = i_extend[sorted_indices, :]
    _, indices_i = np.unique(sorted_i[:, 0], return_index=True, axis=0)
    sorted_i = sorted_i[indices_i, :]
    uni_emb = emb_[sorted_i[:, 1]]
    return sorted_i[:, 0], uni_emb


def compute_sim_i2t(images, captions):
    similarity = np.matmul(images, np.matrix.transpose(captions))  # n_img*n_txt
    k = 5
    alpha = 0.5

    for i in range(similarity.shape[1]):
        line = similarity[:, i]
        topkmax = np.partition(line, -k)[-k:]
        b_r = alpha * np.sum(topkmax) / k
        similarity[:, i] = similarity[:, i] - b_r

    return similarity


def compute_sim_t2i(images, captions):
    similarity = np.matmul(images, np.matrix.transpose(captions))
    k = 5
    alpha = 0.5
    for i in range(similarity.shape[0]):
        line = similarity[i]
        topkmax = np.partition(line, -k)[-k:]
        b_r = alpha * np.sum(topkmax) / k
        similarity[i] = similarity[i] - b_r
    return similarity


def test_rank(new_stat, split='val', save_path=None, logger=None):
    opt = new_stat['opt']
    model_param = new_stat['model']
    epoch = new_stat['epoch']
    best_rsum = new_stat['best_rsum']
    eiter = new_stat['eiter']
    logger.info(f'trained best rsum: {best_rsum}')
    logger.info(f'trained epoch num: {epoch}')
    logger.info(f'eiter: {eiter}')

    # 创建数据集
    vocab = deserialize_vocab(os.path.join(opt.vocab_path, f'{opt.data_name}.json'))
    vocab.add_word('<mask>')
    opt.vocab_size = len(vocab)

    model = MyModule(opt, logger)
    # load model state
    model.load_state_dict(model_param)
    model.val_start()

    logger.info('Loading Dataset...')
    collate_fn = data.Collate()
    testdatas = MyDataset(opt, split, vocab, train=False)
    testloader = DataLoader(dataset=testdatas, batch_size=opt.batch_size, pin_memory=True, shuffle=False,
                            collate_fn=collate_fn)
    logger.info('Computing results...')
    with torch.no_grad():
        # compute the encoding for all the validation images and captions
        img_embs, cap_embs = encode_data_u(model, testloader, logger=logger)

    # 相似度矩阵
    # join = os.path.join(opt.data_path, f'{split}_map.csv')
    sims01 = calc_sims_metric(testdatas.l_map)

    # start = time.time()
    sims_i2t = compute_sim_i2t(img_embs, cap_embs)
    sims_t2i = compute_sim_t2i(img_embs, cap_embs)
    # end = time.time()
    # np.save(f'{opt.runs_dir + opt.suffix}/sim.npy', sims)
    # log.info(f"saved similarity matrix {epoch}")
    # logger.info(f"calculate similarity time: {end - start:.2f}")

    r10 = recall_at_k(sims_i2t, sims01, 10, 'i2t')
    r5 = recall_at_k(sims_i2t, sims01, 5, 'i2t')
    r1 = recall_at_k(sims_i2t, sims01, 1, 'i2t')
    r10_ = recall_at_k(sims_t2i, sims01, 10, 't2i')
    r5_ = recall_at_k(sims_t2i, sims01, 5, 't2i')
    r1_ = recall_at_k(sims_t2i, sims01, 1, 't2i')
    rsum = r10 + r10_ + r5_ + r5 + r1 + r1_
    logger.info(f"------------ Test: ------------")
    logger.info(f"i2t R10: {r10}%\tR5: {r5}%\tR1: {r1}%")
    logger.info(f"t2i R10: {r10_}%\tR5: {r5_}%\tR1: {r1_}%\tRSUM: {rsum}")


def main():
    # 模型权重路径
    data_name = 'f30k'  # 测试数据集名称
    data_path = f'D:/AAExp/Dataset/{data_name}'  # 测试数据集路径
    vocab_path = 'D:/AAExp/Dataset/vocab'  # 语料库路径
    save_results = True  # 是否保存结果
    weights_bases = './results/models'  # 模型检查点路径
    weights_bases = '.'  # 模型检查点路径
    # 获取、加载存档点
    ck = torch.load(os.path.join(weights_bases, 'best.pth.tar'), weights_only=False)

    opt = ck['opt']
    opt.suffix = 'test'
    opt.data_path = data_path
    opt.vocab_path = vocab_path
    opt.data_name = data_name
    if not os.path.exists(opt.logs_dir):
        os.makedirs(opt.logs_dir)
    logger = LoggerSingleton(opt.logs_dir, opt.suffix)
    logger.info(opt)

    new_state = {
        'epoch': ck['epoch'],
        'model': ck['model'],
        'opt': opt,
        'best_rsum': ck['best_rsum'],
        'eiter': ck['eiter'],
    }

    if save_results:
        save_path = os.path.join(weights_bases, f'res_{data_name}.npy')
    else:
        save_path = None
    if data_name == 'f30k':
        test_rank(new_state, split='test', save_path=save_path, logger=logger)


if __name__ == '__main__':
    main()
