import torch
import numpy as np
import io

number_neg_sample_train = 1


def get_len_of_nonzero(texts):
    length = torch.zeros(len(texts), dtype=torch.long)
    for i in range(len(texts)):
        if texts[i][0] == 0:
            length[i] = 0
        elif texts[i][texts[i].shape[0] - 1] != 0:
            length[i] = texts[i].shape[0] - 1
        else:
            length[i] = texts[i].tolist().index(0) - 1
    return length


def computer_train_auc(scores):
    scores = scores.cpu()
    num_pos = len(scores) - number_neg_sample_train
    score_neg = scores[num_pos:]
    num_hit = 0

    for i in range(num_pos):
        num_hit += len(np.where(score_neg < scores[i])[0])

    auc = num_hit / (num_pos * number_neg_sample_train)
    return auc


# -- performance compute -------------------------------------------------------------

def dcg_at_k(r, k, method=1):
    r = np.asfarray(r)[:k]
    if r.size:
        if method == 0:
            return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
        elif method == 1:
            return np.sum(r / np.log2(np.arange(2, r.size + 2)))
        else:
            raise ValueError('method must be 0 or 1.')
    return 0.


def ndcg_at_k(r, k, method=0):
    dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
    if not dcg_max:
        return 0.
    return dcg_at_k(r, k, method) / dcg_max


def top_match(rec_scores, item_ids, true_indicator, k):
    # get rec item ids
    rec_scores = rec_scores.detach().cpu().numpy()
    rank_list = (-rec_scores).argsort()[:k]  # start from highest
    rec_list = item_ids[rank_list]

    # get true item ids
    true_list = true_indicator
    # import ipdb; ipdb.set_trace()
    # compute top-k hits
    hit = 0
    hit_indicator = []
    for item_id in rec_list:
        if item_id.item() == true_list:
            hit = 1
            break
        else:
            hit_indicator.append(0)

    # compute performance
    f = 0.0
    p = 0.0
    r = 0.0
    ndcg = 0.0
    if hit:
        p = float(hit)
        r = float(hit) / 1
        f = 2 * p * r / (p + r)
        ndcg = ndcg_at_k(hit_indicator, k)

    return p, r, f, ndcg


# -- 根据正样本的id从数据集获得负样本 ------------------------------------------------------------
def prepare_sample(number_neg_sample_train, index, BATCH_SIZE, image_texts):
    samples_id = []
    image_texts_index = [id for id in range(len(image_texts))]

    random_pos_index = np.random.choice(number_neg_sample_train)
    pos_samples_index = np.linspace(random_pos_index,
                                    (BATCH_SIZE - 1) * (number_neg_sample_train + 1) + random_pos_index,
                                    num=BATCH_SIZE, endpoint=True, retstep=False, dtype=int)
    neg_samples_index = np.setdiff1d(np.arange(0, BATCH_SIZE * (number_neg_sample_train + 1)), pos_samples_index)
    # 为每一个正例查找number_neg_sample_train个负样本
    for i in range(len(index)):
        # get the list of negative samples
        # get positive samples of the video_index
        pos_list_index = index[i]

        # get negative samples
        # filter samples
        samples_to_filter = index[i]

        neg_sample_pool = np.setdiff1d(image_texts_index, samples_to_filter)

        # sample neg samples from the pool
        neg_sample_id = np.random.choice(neg_sample_pool, number_neg_sample_train, replace=False)  # shape(1,)
        sample_index = np.append(neg_sample_id[:random_pos_index], pos_list_index)
        sample_index = np.append(sample_index, neg_sample_id[random_pos_index:]).tolist()
        samples_id += sample_index

    samples_index = torch.tensor(samples_id).squeeze()  # torch.Size([1])
    sample_indexes = samples_index
    return sample_indexes, pos_samples_index.tolist(), neg_samples_index.tolist()


# 便于矩阵运算
def my_repeat(text, number_neg_sample_train):
    text = text.cpu()
    repeat_text = torch.empty(0, dtype=torch.long)
    for i in range(len(text)):
        texts = text[i].repeat(number_neg_sample_train)
        # repeat_text = torch.cat([repeat_text, texts])
        repeat_text = torch.cat([repeat_text, texts.long()])
    return repeat_text


# -- calculate the auc ------------------------------------------------------------------
def compute_auc(scores, index, number_sample_eval):
    scores = scores.cpu()
    num_pos = len(scores) - number_sample_eval
    # score_neg = scores[num_pos:]
    score_neg = torch.cat([scores[:index], scores[(index + 1):]])
    num_hit = 0

    for i in range(num_pos):
        num_hit += len(np.where(score_neg < scores[index])[0])

    auc = num_hit / (num_pos * number_sample_eval)
    return auc


# -- val ------------------------------------------------------------------
def sampler(number_sample_eval, index):
    # get groundtruth items
    gt_item_ids = index
    neg_samples_index = []

    image_texts_index = [id for id in range(len(image_texts_test))]
    for i in range(len(index)):
        # get negative samples
        # filter samples
        samples_to_filter = index[i]

        sample_pool = np.setdiff1d(image_texts_index, samples_to_filter)

        # sample neg samples from the pool
        neg_sample_ids = np.random.choice(sample_pool, number_sample_eval, replace=False)  # shape(500,)
        for j in range(number_sample_eval):
            neg_samples_index.append(neg_sample_ids[j].item())

    # concate gt and neg samples
    gt_item_ids = torch.from_numpy(np.array(gt_item_ids))  # torch.Size([1])
    neg_sample_ids = torch.tensor(neg_samples_index).squeeze()
    # neg_sample_ids = torch.from_numpy(neg_samples_index)  # torch.Size([500])

    sample_ids = torch.cat([gt_item_ids, neg_sample_ids])  # torch.Size([501])

    # import ipdb; ipdb.set_trace()
    return sample_ids
