import torch
from tqdm import tqdm
import random

import sys
import copy
import torch
import random
import numpy as np
from collections import defaultdict
from multiprocessing import Process, Queue
import math
import torch.nn.functional as F
from dataset import *

def build_index(dataset_name):

    ui_mat = np.loadtxt('data/%s.txt' % dataset_name, dtype=np.int32)

    n_users = ui_mat[:, 0].max()
    n_items = ui_mat[:, 1].max()

    u2i_index = [[] for _ in range(n_users + 1)]
    i2u_index = [[] for _ in range(n_items + 1)]

    for ui_pair in ui_mat:
        u2i_index[ui_pair[0]].append(ui_pair[1])
        i2u_index[ui_pair[1]].append(ui_pair[0])

    return u2i_index, i2u_index

# sampler for batch generation
def random_neq(l, r, s):
    t = np.random.randint(l, r)
    while t in s:
        t = np.random.randint(l, r)
    return t


def sample_function(user_train, usernum, itemnum, batch_size, maxlen, result_queue, SEED):
    def sample(uid):

        # uid = np.random.randint(1, usernum + 1)
        while len(user_train[uid]) <= 1: uid = np.random.randint(1, usernum + 1)

        seq = np.zeros([maxlen], dtype=np.int32)
        pos = np.zeros([maxlen], dtype=np.int32)
        neg = np.zeros([maxlen], dtype=np.int32)
        nxt = user_train[uid][-1]
        idx = maxlen - 1

        ts = set(user_train[uid])
        for i in reversed(user_train[uid][:-1]):
            seq[idx] = i
            pos[idx] = nxt
            if nxt != 0: neg[idx] = random_neq(1, itemnum + 1, ts)
            nxt = i
            idx -= 1
            if idx == -1: break

        return (uid, seq, pos, neg)

    np.random.seed(SEED)
    uids = np.arange(1, usernum+1, dtype=np.int32)
    counter = 0
    while True:
        if counter % usernum == 0:
            np.random.shuffle(uids)
        one_batch = []
        for i in range(batch_size):
            one_batch.append(sample(uids[counter % usernum]))
            counter += 1
        result_queue.put(zip(*one_batch))


class WarpSampler(object):
    def __init__(self, User, usernum, itemnum, batch_size=64, maxlen=10, n_workers=1):
        self.result_queue = Queue(maxsize=n_workers * 10)
        self.processors = []
        for i in range(n_workers):
            self.processors.append(
                Process(target=sample_function, args=(User,
                                                      usernum,
                                                      itemnum,
                                                      batch_size,
                                                      maxlen,
                                                      self.result_queue,
                                                      np.random.randint(2e9)
                                                      )))
            self.processors[-1].daemon = True
            self.processors[-1].start()

    def next_batch(self):
        return self.result_queue.get()

    def close(self):
        for p in self.processors:
            p.terminate()
            p.join()


# train/val/test data generation
def data_partition(fname):
    usernum = 0
    itemnum = 0
    User = defaultdict(list)
    user_train = {}
    user_valid = {}
    user_test = {}
    # assume user/item index starting from 1
    f = open('data/%s.txt' % fname, 'r')
    for line in f:
        u, i = line.rstrip().split(' ')
        u = int(u)
        i = int(i)
        usernum = max(u, usernum)
        itemnum = max(i, itemnum)
        User[u].append(i)

    for user in User:
        nfeedback = len(User[user])
        if nfeedback < 3:
            user_train[user] = User[user]
            user_valid[user] = []
            user_test[user] = []
        else:
            user_train[user] = User[user][:-2]
            user_valid[user] = []
            user_valid[user].append(User[user][-2])
            user_test[user] = []
            user_test[user].append(User[user][-1])
    return [user_train, user_valid, user_test, usernum, itemnum]

# TODO: merge evaluate functions for test and val set
# evaluate on test set
def evaluate(model, dataset, args):
    [train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)

    NDCG = 0.0
    HT = 0.0
    MRR=0.0
    valid_user = 0.0

    if usernum>10000:
        users = random.sample(range(1, usernum + 1), 10000)
    else:
        users = range(1, usernum + 1)
    for u in users:

        if len(train[u]) < 1 or len(test[u]) < 1: continue

        seq = np.zeros([args.maxlen], dtype=np.int32)
        idx = args.maxlen - 1
        seq[idx] = valid[u][0]
        idx -= 1
        for i in reversed(train[u]):
            seq[idx] = i
            idx -= 1
            if idx == -1: break
        rated = set(train[u])
        rated.add(0)
        item_idx = [test[u][0]]
        for _ in range(100):
            t = np.random.randint(1, itemnum + 1)
            while t in rated: t = np.random.randint(1, itemnum + 1)
            item_idx.append(t)

        predictions = -model.predict(*[np.array(l) for l in [[u], [seq], item_idx]])
        predictions = predictions[0] # - for 1st argsort DESC

        rank = predictions.argsort().argsort()[0].item()

        valid_user += 1

        if rank < 10:
            NDCG += 1 / np.log2(rank + 2)
            HT += 1
            MRR+=1/(rank+1)
        if valid_user % 100 == 0:
            print('.', end="")
            sys.stdout.flush()

    return NDCG / valid_user, HT / valid_user

def sasrec_evaluate_all(model, dataset, args, k=10):
    [train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)

    NDCG = 0.0
    HT = 0.0
    MRR=0.0
    valid_user = 0.0

    users = range(1, usernum + 1)
    for u in users:

        if len(train[u]) < 1 or len(test[u]) < 1: continue

        seq = np.zeros([args.maxlen], dtype=np.int32)
        idx = args.maxlen - 1
        seq[idx] = valid[u][0]
        idx -= 1
        for i in reversed(train[u]):
            seq[idx] = i
            idx -= 1
            if idx == -1: break
        rated = set(train[u])
        rated.add(0)
        item_idx = [test[u][0]]+[x for x in range(1, itemnum + 1) if x not in (list(rated)+[test[u][0]]+[valid[u][0]])]
        # print("[seq]",[seq])
        # print("item_idx",item_idx[:20])
        predictions = -model.predict(*[np.array(l) for l in [[u], [seq], item_idx]])
        predictions = predictions[0] # - for 1st argsort DESC

        rank = predictions.argsort().argsort()[0].item()

        valid_user += 1

        if rank < k:
            NDCG += 1 / np.log2(rank + 2)
            HT += 1
            MRR+=1/(rank+1)
        if valid_user % 100 == 0:
            print('.', end="")
            sys.stdout.flush()

    return NDCG / valid_user, HT / valid_user, MRR / valid_user

def sasrec_evaluate_all_easy(model, dataset, args, k=10):
    [train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)

    ndcg = 0.0
    hr = 0.0
    mrr=0.0
    valid_user = 0.0

    test_data=[]

    users = range(1, usernum + 1)
    for u in tqdm(users):

        if len(train[u]) < 1 or len(test[u]) < 1: continue

        seq = np.zeros([args.maxlen], dtype=np.int32)
        idx = args.maxlen - 1
        valid_user+=1
        seq[idx] = valid[u][0]
        idx -= 1
        for i in reversed(train[u]):
            seq[idx] = i
            idx -= 1
            if idx == -1: break
        rated = set(train[u])
        rated.add(0)
        test_data.append([u,seq,test[u][0]])
    print("Data loaded.")
    ds=DataLoader(test_data,shuffle=False,batch_size=256,collate_fn=collate_fn5)
    for u,seq,labels in tqdm(ds):
        predictions = model.predict2(seq)
        # for i in range(predictions.shape[0]):
        #     for j in range(seq.shape[1]):
        #         if seq[i,j]!=0:
        #             predictions[i,j]=float('-inf')
        #     predictions[i,0]=float('-inf')
        _, indices = torch.topk(predictions, k=k)
        labels=labels.unsqueeze(1).to(args.device)
        hr+=HR(labels,indices)
        ndcg += NDCG(labels, indices)
        mrr += MRR(labels, indices)


    return (ndcg / valid_user).item(), hr / valid_user, (mrr / valid_user).item()

def collate_fn5(batch_samples):
    # sasrec_evaluate_all_easy
    u,seq,label=zip(*batch_samples)
    seq=np.array(seq)
    label=torch.tensor(label,dtype=torch.int)
    return u,seq,label

def evaluate2(model, dataset, args):
    [train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)

    NDCG = 0.0
    HT = 0.0
    valid_user = 0.0

    if usernum>10000:
        users = random.sample(range(1, usernum + 1), 10000)
    else:
        users = range(1, usernum + 1)
    for u in users:

        if len(train[u]) < 1 or len(test[u]) < 1: continue

        seq = np.zeros([args.maxlen], dtype=np.int32)
        idx = args.maxlen - 1
        seq[idx] = valid[u][0]
        idx -= 1
        for i in reversed(train[u]):
            seq[idx] = i
            idx -= 1
            if idx == -1: break
        rated = set(train[u])
        rated.add(0)
        item_idx = [test[u][0]]
        for i in range(1,itemnum+1):
            if i in rated or i==test[u][0]:
                continue
            else:
                item_idx.append(i)
        # for _ in range(100):
        #     t = np.random.randint(1, itemnum + 1)
        #     while t in rated: t = np.random.randint(1, itemnum + 1)
        #     item_idx.append(t)

        predictions = -model.predict(*[np.array(l) for l in [[u], [seq], item_idx]])
        predictions = predictions[0] # - for 1st argsort DESC

        rank = predictions.argsort().argsort()[0].item()

        valid_user += 1

        if rank < 10:
            NDCG += 1 / np.log2(rank + 2)
            HT += 1
        if valid_user % 100 == 0:
            print('.', end="")
            sys.stdout.flush()

    return NDCG / valid_user, HT / valid_user


# evaluate on val set
def evaluate_valid(model, dataset, args):
    [train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)

    NDCG = 0.0
    valid_user = 0.0
    HT = 0.0
    if usernum>10000:
        users = random.sample(range(1, usernum + 1), 10000)
    else:
        users = range(1, usernum + 1)
    for u in users:
        if len(train[u]) < 1 or len(valid[u]) < 1: continue

        seq = np.zeros([args.maxlen], dtype=np.int32)
        idx = args.maxlen - 1
        for i in reversed(train[u]):
            seq[idx] = i
            idx -= 1
            if idx == -1: break

        rated = set(train[u])
        rated.add(0)
        item_idx = [valid[u][0]]
        for _ in range(100):
            t = np.random.randint(1, itemnum + 1)
            while t in rated: t = np.random.randint(1, itemnum + 1)
            item_idx.append(t)

        predictions = -model.predict(*[np.array(l) for l in [[u], [seq], item_idx]])
        predictions = predictions[0]

        rank = predictions.argsort().argsort()[0].item()

        valid_user += 1

        if rank < 10:
            NDCG += 1 / np.log2(rank + 2)
            HT += 1
        if valid_user % 100 == 0:
            print('.', end="")
            sys.stdout.flush()

    return NDCG / valid_user, HT / valid_user

def get_entropy(data):
    # data's shape is [batch_size,hidden_size], tensor
    res=[]
    data = F.softmax(data, dim=1)
    l=data.tolist()
    # print(l)
    # print(data.shape)
    for i in range(data.shape[0]):
        entropy=0
        for j in range(data.shape[1]):
            num=l[i][j]
            entropy-=-num*math.log2(num+1e-5)
        res.append(entropy)
    return torch.tensor(res).unsqueeze(1)


def HR(labels, indices):  # labels是真实标签，indices是model预测结果的前k名索引
    k = indices.size(1)
    labels = labels.expand(-1, k)
    hits = (labels == indices).any(dim=1).float()
    return hits.sum().item()


def NDCG(labels, indices):
    bs = labels.shape[0]
    k = indices.size(1)
    labels = labels.expand(-1, k)
    idxs = torch.nonzero(torch.eq(labels, indices))
    ndcg = 0
    for i in idxs:
        r = i[1]+1
        ndcg += 1/torch.log2(r+1)
    return ndcg


def MRR(labels, indices):
    k = indices.size(1)
    bs = labels.shape[0]
    k = indices.size(1)
    labels = labels.expand(-1, k)
    idxs = torch.nonzero(torch.eq(labels, indices))
    mrr = 0
    for i in idxs:
        r = i[1]+1
        mrr += 1/r
    return mrr


def e4srec_evaluate_all(dataloader, model, k, device):
    model.eval()
    hr = 0
    ndcg = 0
    mrr = 0
    size = len(dataloader.dataset)
    with torch.no_grad():
        for inputs, inputs_mask, labels in tqdm(dataloader, position=0):
            inputs, inputs_mask, labels = inputs.to(
                device), inputs_mask.to(device), labels.to(device)
            bs = labels.shape[0]
            pred = model(inputs, inputs_mask)
            for i in range(bs):
                for j in range(inputs.shape[1]):
                    if inputs_mask[i, j]:
                        pred[i, inputs[i, j]] = float('-inf')
            _, indices = torch.topk(pred, k=k)
            indices = indices.to(device)
            hr += HR(labels, indices)
            ndcg += NDCG(labels, indices)
            mrr += MRR(labels, indices)

    hr /= size
    ndcg /= size
    mrr /= size
    print(f"HR@{k}={hr}\tnDCG@{k}={ndcg}\tMRR@{k}={mrr}")
    return hr, ndcg, mrr


def evaluate_100(dataloader, model, k, device):
    model.eval()
    hr = 0
    ndcg = 0
    mrr = 0
    size = len(dataloader.dataset)
    with torch.no_grad():
        for inputs, inputs_mask, labels in tqdm(dataloader, position=0):
            inputs, inputs_mask, labels = inputs.to(
                device), inputs_mask.to(device), labels.to(device)
            bs = labels.shape[0]
            pred = model(inputs, inputs_mask)
            mask_indices = torch.nonzero(inputs_mask, as_tuple=True)
            # 更新 pred 中对应位置的值为 -inf
            if mask_indices[0].numel() > 0:
                pred[mask_indices[0], inputs[mask_indices]] = float('-inf')

            # 将类别 0 设置为 -inf
            pred[:, 0] = float('-inf')
            pred2 = []
            indices = []
            for i in range(bs):
                # 找到 pred[i] 中不为 -inf 的元素的索引
                valid_indices = torch.where(
                    pred[i] != float('-inf'))[0]  # 获取单个维度的索引
                valid_indices = valid_indices.tolist()  # 转换为 Python 列表

                # 如果有效索引数少于 100，取全部；否则随机取 100 个
                if len(valid_indices) > 100:
                    sampled_indices = random.sample(valid_indices, 100)
                else:
                    print("Evaluate error.<100")
                    sampled_indices = valid_indices  # 取全部有效索引
                # 添加标签索引
                label_index = labels[i, 0].item()

                # 合并标签索引和采样索引
                pr = [label_index] + sampled_indices
                # 确保 pr 是一个整数列表
                pr2 = pred[i][pr]
                values, indice = torch.topk(pr2, k)
                # 将索引转换为对应的物品编号
                res = [pr[i.item()] for i in indice]

                indices.append(res)
            # pred2=torch.tensor([item.cpu().detach().numpy() for item in pred2]).cuda()
            # _,indices=torch.topk(pred2,k=k)
            # print("indices:",indices)
            indices = torch.tensor(indices).to(device)
            labels = labels.to(device)
            hr += HR(labels, indices)
            ndcg += NDCG(labels, indices)
            mrr += MRR(labels, indices)

    hr /= size
    ndcg /= size
    mrr /= size
    print(f"HR@{k}={hr}\tnDCG@{k}={ndcg}\tMRR@{k}={mrr}")
    return hr, ndcg, mrr


def collate_fn(batch_samples):
    seqs, labels = zip(*batch_samples)
    max_len = max(max([len(seq) for seq in seqs]), 2)
    inputs = [[0] * (max_len - len(seq)) + seq for seq in seqs]
    inputs_mask = [[0] * (max_len - len(seq)) + [1] * len(seq) for seq in seqs]
    labels = [[label] for label in labels]
    inputs, inputs_mask, labels = torch.LongTensor(
        inputs), torch.LongTensor(inputs_mask), torch.LongTensor(labels)

    return inputs, inputs_mask, labels


def collate_fn2(batch_samples):
    labels = [[sample[0]] for sample in batch_samples]
    indices = [sample[1:] for sample in batch_samples]

    labels = torch.IntTensor(labels)
    indices = torch.IntTensor(indices)
    return labels, indices


def collate_fn3(batch_samples):
    candidate_seqs, seqs = zip(*batch_samples)
    seqs, _ = zip(*seqs)

    labels = [[sample[0]] for sample in candidate_seqs]
    indices = [sample for sample in candidate_seqs]

    max_len = max(max([len(seq) for seq in seqs]), 2)
    inputs = [[0] * (max_len - len(seq)) + seq for seq in seqs]
    inputs_mask = [[0] * (max_len - len(seq)) + [1] * len(seq) for seq in seqs]

    labels, indices, inputs, inputs_mask = torch.IntTensor(labels), torch.IntTensor(
        indices), torch.IntTensor(inputs), torch.IntTensor(inputs_mask)
    return inputs, inputs_mask, labels, indices


def collate_class(batch_samples):
    seqs, labels = zip(*batch_samples)
    max_len = max(max([len(seq) for seq in seqs]), 2)
    inputs = [[0] * (max_len - len(seq)) + seq for seq in seqs]
    inputs_mask = [[1] * (max_len - len(seq)) + [0] * len(seq) for seq in seqs]
    labels = [[label] for label in labels]
    inputs, inputs_mask, labels = torch.LongTensor(
        inputs), torch.LongTensor(inputs_mask), torch.LongTensor(labels)

    return inputs, inputs_mask.bool(), labels


def collate_enclass(batch_samples):
    seqs, eens, sens, lengths, labels = zip(*batch_samples)
    max_len = max(max([len(seq) for seq in seqs]), 2)
    inputs = [[0] * (max_len - len(seq)) + seq for seq in seqs]
    inputs_mask = [[1] * (max_len - len(seq)) + [0] * len(seq) for seq in seqs]
    labels = [[label] for label in labels]
    inputs, inputs_mask, eens, sens, lengths, labels = torch.LongTensor(
        inputs), torch.LongTensor(inputs_mask), torch.tensor(eens), torch.tensor(sens), torch.LongTensor(lengths), torch.LongTensor(labels)

    return inputs, inputs_mask.bool(), eens, sens, lengths, labels

def get_teacher_inps(seq):# seq: np.array
    s=np.where(seq!=0)[0][0]#找到第一个不为0位置的索引
    i=len(seq)-1
    l=len(seq)-s
    inps=torch.zeros([l,l])
    msks=torch.zeros([l,l])
    # import pdb;pdb.set_trace()
    k=-1
    while i>=s:
        for j in range(l-(len(seq)-1-i)):
            inps[l-j-1][k]=seq[i]
            msks[l-j-1][k]=1
        i-=1
        k-=1
    return inps,msks

def collate_fn4(batch_samples):
    # 新SASDataset的处理函数
    id,u,seq,pos,neg=zip(*batch_samples)
    return id,u,seq,pos,neg

if __name__=='__main__':
    # seq=np.array([0,0,0,4,5,6,7,8,9,10])
    # print(get_teacher_inps(seq))
    maxlen=50
    ds=SeqDataset("./data/Toys_and_Games",maxlen)
    print(ds.item_max)
    ds2=SASDataset("./data/Toys_and_Games",item_max=20000,maxlen=maxlen)
    ds3=e4srecDataset("./data/Toys_and_Games",maxlen=maxlen)
    # import pdb;pdb.set_trace()
    # for i in ds2.train_data:
        # print(i)
    print([[0.0]*4]*20000)

    # user_train=[9,10,11,12,4,5,13]
    # seq = np.zeros([maxlen], dtype=np.int32)
    # pos = np.zeros([maxlen], dtype=np.int32)
    # neg = np.zeros([maxlen], dtype=np.int32)
    # nxt = user_train[-1]
    # idx = maxlen - 1

    # ts = set(user_train)
    # for i in reversed(user_train[:-1]):
    #     seq[idx] = i
    #     pos[idx] = nxt
    #     if nxt != 0: neg[idx] = random_neq(1, 20000 + 1, ts)
    #     nxt = i
    #     idx -= 1
    #     if idx == -1: break
    # print(seq,pos)

