import numpy as np
import torch
import random

xrange = range  # Python 3


def top_match(rec_scores, item_ids, k):
    # get rec item ids
    rec_scores = rec_scores.detach().cpu().numpy()
    rank_list = (-rec_scores).argsort()[:k]  # start from highest
    item_ids = torch.tensor(item_ids).squeeze()
    rec_list = item_ids[rank_list]
    return rec_list


def my_repeat(text, number_neg_sample_train):
    text = text.cpu()
    repeat_text = torch.empty(0, dtype=torch.float32)
    #     repeat_text = torch.empty(0, dtype=torch.long)
    for i in range(len(text)):
        texts = text[i].repeat(number_neg_sample_train)
        repeat_text = torch.cat([repeat_text, texts])
    return repeat_text


# video_num=35000
def generate_samples(number_neg_sample_train, index_pos, BATCH_SIZE, video_num):
    index = index_pos.tolist()
    res_pos_list = []
    res_sample_list = []
    res_pos_samples_index = []
    res_neg_samples_index = []

    for i in range(BATCH_SIZE):
        sample_list = [index[i]]
        while len(sample_list) < number_neg_sample_train + 1:
            sample = random.randint(0, video_num - 1)
            if sample not in sample_list:
                sample_list.append(sample)
        random.shuffle(sample_list)
        pos_samples_index_num = sample_list.index(index[i])
        pos_samples_index = [pos_samples_index_num + i * (number_neg_sample_train + 1)]
        neg_samples_index = []
        for j in range(len(sample_list)):
            if j != pos_samples_index_num:
                neg_samples_index.append(j + i * (number_neg_sample_train + 1))
        pos_list = []
        for k in range(len(sample_list)):
            pos_list.append(index[i])
        res_pos_list += pos_list
        res_sample_list += sample_list
        res_pos_samples_index += pos_samples_index
        res_neg_samples_index += neg_samples_index

    return res_pos_list, res_sample_list, res_pos_samples_index, res_neg_samples_index

# -- 根据正样本的id从数据集获得负样本 ------------------------------------------------------------
def prepare_sample(number_neg_sample_train, index, BATCH_SIZE, image_texts):
    samples_id = []
    image_texts_index = [id for id in range(len(image_texts))]

    random_pos_index = np.random.choice(number_neg_sample_train)
    pos_samples_index = np.linspace(random_pos_index,
                                    (BATCH_SIZE - 1) * (number_neg_sample_train + 1) + random_pos_index,
                                    num=BATCH_SIZE, endpoint=True, retstep=False, dtype=int)
    neg_samples_index = np.setdiff1d(np.arange(0, BATCH_SIZE * (number_neg_sample_train + 1)), pos_samples_index)
    # 为每一个正例查找number_neg_sample_train个负样本
    for i in range(len(index)):
        # get the list of negative samples
        # get positive samples of the video_index
        pos_list_index = index[i]

        # get negative samples
        # filter samples
        samples_to_filter = index[i]

        neg_sample_pool = np.setdiff1d(image_texts_index, samples_to_filter)

        # sample neg samples from the pool
        neg_sample_id = np.random.choice(neg_sample_pool, number_neg_sample_train, replace=False)  # shape(1,)
        sample_index = np.append(neg_sample_id[:random_pos_index], pos_list_index)
        sample_index = np.append(sample_index, neg_sample_id[random_pos_index:]).tolist()
        samples_id += sample_index

    samples_index = torch.tensor(samples_id).squeeze()  # torch.Size([1])
    sample_indexes = samples_index
    return sample_indexes, pos_samples_index.tolist(), neg_samples_index.tolist()


def get_len_of_nonzero(texts):
    length = torch.zeros(len(texts), dtype=torch.long)
    for i in range(len(texts)):
        if texts[i][0] == 0:
            length[i] = 0
        elif texts[i][texts[i].shape[0] - 1] != 0:
            length[i] = texts[i].shape[0] - 1
        else:
            length[i] = texts[i].tolist().index(0) - 1
    return length