import json

import torch
import tqdm
import faiss
import numpy as np

def generate_submission(emb,src_node_List,dst_node_List ,dimension=64, topK=5):
    with open('./raw/item_share_preliminary_test_info.json', 'r') as f:
        competition_A = json.load(f)

    with open('./datasets/bdsc2023/userid2num.json', 'r') as load_f:
        userid2num = json.load(load_f)

    A_inviters_index = []
    for line in competition_A:
        user_num=int(userid2num[line['inviter_id']])
        A_inviters_index.append(user_num)

    with open('./datasets/bdsc2023/usernum2id.json', 'r') as load_f:
        usernum2id = json.load(load_f)

    inviters_index=np.array(A_inviters_index)
    inviters_emb = emb[inviters_index]
    indexL2 =faiss.IndexFlatL2(dimension)
    indexL2.add(emb)
    distance, topKindex = indexL2.search(inviters_emb, topK)
    submission_A = []
    for i in range(len(inviters_index)):
        topKlist = list(topKindex[i, :])
        candidate_voter_list = [usernum2id[str(dst_node_List[top_voter_index])] for top_voter_index in topKlist]
        submission_A.append({'triple_id': str('%06d' % i), 'candidate_voter_list': candidate_voter_list})

    with open('submission_A.json', 'w') as f:
        json.dump(submission_A, f)

def gen_file(edge_embeddings,src_node_List,dst_node_List ,dimension=64, topK=5):
    with open('./raw/item_share_preliminary_test_info.json', 'r') as f:
        competition_A = json.load(f)

    with open('./datasets/bdsc2023/userid2num.json', 'r') as load_f:
        userid2num = json.load(load_f)

    A_inviters_index = []
    for line in competition_A:
        user_num=int(userid2num[line['inviter_id']])
        A_inviters_index.append(user_num)

    with open('./datasets/bdsc2023/usernum2id.json', 'r') as load_f:
        usernum2id = json.load(load_f)

    #find_candidate_edges(A_inviters_index,emb,src_node_List,dst_node_List)

        # create a dictionary to store the edge embeddings by node id
    node_embeddings = {}
    for i in range(len(src_node_List)):
        if src_node_List[i] not in node_embeddings:
            node_embeddings[src_node_List[i]] = []
        if dst_node_List[i] not in node_embeddings:
            node_embeddings[dst_node_List[i]] = []
        node_embeddings[src_node_List[i]].append(edge_embeddings[i])
        node_embeddings[dst_node_List[i]].append(edge_embeddings[i])
    # find the top-k candidate edges for each node in viters
    #candidate_edges = []
    submission_A = []
    for i in range(len(A_inviters_index)):
        if A_inviters_index[i] in node_embeddings:
            emb = np.array(node_embeddings[A_inviters_index[i]])
            scores = np.dot(edge_embeddings,emb.T)
            topk_indices = np.argsort(-scores)[:topK]

            topk_edges = [dst_node_List[idx][0] for idx in topk_indices]
            submission_A.append({'triple_id': str('%06d' % i), 'candidate_voter_list': topk_edges})

    with open('submission_A.json', 'w') as f:
        json.dump(submission_A, f)

def mrr_at_k1(edge_embeddings,origin_data,val_data,  topK=10):

    dim, measure = 64, faiss.METRIC_L2
    param = 'IVF100, Flat'  # 代表k-means聚类中心为100,
    index = faiss.index_factory(dim, param, measure)

    index.add(edge_embeddings)
    # _, topKindex = index.search(node_embeddings, topK)
    rank_list = []


    mrr = np.mean(rank_list)
    print(mrr)
    return mrr

def mrr_at_k(embeddings,origin_data,val_data,dim = 172,topk=10):
    src_node_list, dst_node_list = origin_data[0], origin_data[1]

    node_embeddings = {}
    for i,node_id in enumerate(src_node_list):
        if src_node_list[i] not in node_embeddings:
            node_embeddings[src_node_list[i]] = []
        if dst_node_list[i] not in node_embeddings:
            node_embeddings[dst_node_list[i]] = []

        node_embeddings[src_node_list[i]].append(embeddings[i])
        node_embeddings[dst_node_list[i]].append(embeddings[i])
    node_ids=node_embeddings.keys()

    node_ids=np.asarray(list(node_ids))
    new_embeddings=np.zeros((node_ids.max()+1,dim),dtype=np.float32)

    for key in node_embeddings :
        item=node_embeddings[key]
        new_embeddings[key]=np.mean(np.array(item),axis=0)

    import faiss
    # 定义向量维度和向量数量

    # 将节点的编码表示转化为 Faiss 所需的数据格式
    xb = np.array(new_embeddings).astype('float32')

    # 初始化索引
    index = faiss.IndexFlatL2(dim)
    index.add(xb)

    # 定义查询节点
    query_nodes = val_data[0]  # 假设有多个查询节点

    # 获取当前查询节点的编码表示
    xq = np.array(new_embeddings[query_nodes]).astype('float32')
    # 检索结果，返回前 k 个最相似的节点及其距离
    D, I = index.search(xq, topk)

    # 模拟查询，循环遍历每个查询节点
    mrr_sum = 0.0
    for idx,query_node in enumerate(query_nodes):

        pred_voter=list(I[idx])
        # 计算 MRR
        mrr = 0.0
        voter = val_data[1][idx]
        if voter in pred_voter:
            rank = pred_voter.index(voter)
            print(rank)
            mrr = 1.0 / (rank + 1)

        # 累加每个查询节点的 MRR
        mrr_sum += mrr

    # 计算平均 MRR
    mrr = mrr_sum / len(query_nodes)

    print('MRR:', mrr)


def test_mrr2(embeds,origin_data,val_data,dim = 172):
    src_node_list = origin_data[0]
    dst_node_list = origin_data[1]
    src_node_embeds = embeds[0]
    dst_node_embeds = embeds[1]

    # 假设这是你要查询的节点
    test_node = val_data[0]

    #np.matmul(src_node_embeds[:10000, :], dst_node_embeds.T)
    #t= np.matmul(src_node_embeds, dst_node_embeds.T)
    src_node_embeds=torch.Tensor(src_node_embeds)
    dst_node_embeds=torch.Tensor(dst_node_embeds)

    for idx in range(1,(len(src_node_embeds)//1000)+2):

        end_indices=1000*idx
        start_indices = end_indices - 1000
        idx_dst_node=src_node_list[start_indices:end_indices]
        t = torch.mm(src_node_embeds[start_indices:end_indices, :], src_node_embeds.t())
        sorted_indices = torch.argsort(t, dim=1, descending=True)
        topk = sorted_indices[:, :5].data.cpu().numpy()

        for node_id,topk_ids in zip(idx_dst_node,topk):
            dst_nodes=src_node_list[topk_ids]
            if node_id in dst_nodes:
                print(node_id,dst_nodes)

    print()



def main():
    embe = np.load("/home/akun/code/dyslp/embe.npy")
    tarin_data = np.load("/home/akun/code/dyslp/origin_date.npy")
    val_data = np.load("/home/akun/code/dyslp/val_data.npy")
    # with open('./raw/item_share_preliminary_test_info.json', 'r') as f:
    #     competition_A = json.load(f)
    #
    # with open('./datasets/bdsc2023/userid2num.json', 'r') as load_f:
    #     userid2num = json.load(load_f)
    #
    # A_inviters_index = []
    # for line in competition_A:
    #     user_num=int(userid2num[line['inviter_id']])
    #     A_inviters_index.append(user_num)

    test_mrr2(embe, tarin_data, val_data)
    # generate_submission(embe,src_nodelist,dst_nodelist)


if __name__=="__main__":
    main()