import pickle as pkl

import dgl
import networkx as nx
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
import torch

from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, precision_recall_fscore_support

from sklearn.preprocessing import normalize

dataset_folder = '../data/'
np.random.seed(17)
start_node = 4

'''--------------------------------------------------------------------our fun'''

recipe_num = 400


def load_recipes_contents(path):
    feature_one = []
    features = []
    file = path
    # print("file_path:", file)
    with open(file, 'r') as f:
        for line in f.readlines():
            line = line.strip().split('\t')
            # 保证特征矩阵
            if len(line[1:-1]) == 768:
                # print(line[0])
                feature_one.append([float(x) for x in line[1:-1]])
            else:
                feature_one.append([0. for _ in range(0, 768)])

    return feature_one


def load_instr_ingre_contents(path, f_type):
    feature_one = []

    file = path + f_type + '{}.csv'.format(0)
    # print("file_path:", file)
    with open(file, 'r') as f:
        for line in f.readlines():
            line = line.strip().split('\t')
            # 保证特征矩阵
            if len(line[1:-1]) == 768:
                # print(line[0])
                feature_one.append([float(x) for x in line[1:-1]])
            else:
                feature_one.append([0. for _ in range(0, 768)])

    return feature_one


def load_features():
    # 此处要把 特征矩阵分出 ingre instr recipe 三个 (n1+n2+n3,768),从第二列开始读
    recipes_path = 'F:\\new_VGAE\\data\\recipes\\recipes_feature0.csv'
    recipes = []
    # 每一行 向量代表的内容
    content = []

    recipes = load_recipes_contents(recipes_path)

    # 接下来是intr

    # 待定！！
    # 要预测的比如说1-50的食谱步骤，那么我导入的时候就两维，第一维我们存储的是每一个菜谱的步骤，第二维是具体的特征矩阵
    instructions_path = 'F:\\new_VGAE\\data\\instruction_steps\\'
    ingredients_path = 'F:\\new_VGAE\\data\\ingredients\\'

    instr = []
    ingre = []

    instr = load_instr_ingre_contents(instructions_path, 'Instruction')
    ingre = load_instr_ingre_contents(ingredients_path, 'Ingredient')

    # print("recipes:",len(recipes))
    # print("instr:",len(instr))
    # print("ingre:",len(ingre))

    return torch.tensor(recipes), torch.tensor(instr), torch.tensor(ingre)


def norm(input, p=1, dim=1, eps=1e-12):
    return input / input.norm(p, dim, keepdim=True).clamp(min=eps).expand_as(input)


def pro_adj(src_dst_dic, node_dic):
    node_num = len(node_dic)
    adj = np.zeros((node_num, node_num), dtype=np.float64)

    for k, v in enumerate(src_dst_dic):
        # print('k,v:',src_dst_dic[v],v)
        if v in node_dic.keys() and src_dst_dic[v] in node_dic.keys():
            adj[node_dic[v]][node_dic[src_dst_dic[v]]] = 1
    # print("length",len(adj))
    return adj


def get_words(adj_orig):
    return 0


def pos_graph_to_edge(node_dic, graph):  # 保证每节点的序号是打乱顺序后的  e.g. 原[4,5]现[10,1]
    dst_adj = []
    src_adj = []

    adj = []
    adj_orig = []

    val_EID = graph.edges['s-s'].data['_ID'].tolist()  # EID,在初始图中的边ID
    val_node_list = graph.ndata[dgl.NID]['step'].tolist()

    keys = list(node_dic.keys())

    for i in range(len(val_EID)):
        graph_dst = graph.successors(i, etype='s-s').tolist()[0]
        if val_EID[i] + start_node in keys and val_node_list[graph_dst] in keys:
            src_adj.append(node_dic[val_EID[i] + start_node])  # 由于数据处理选择从第5步开始打标签，故此处的边的顺序是[4,5]开始序号为0，node_dic是为保证节点顺序
            dst_adj.append(
                node_dic[val_node_list[graph_dst]])  # nodes[graph_dst]找的是在graphNID中对应的序号，graph_dst:3  则在最开始的图中对应的是序号是9
            # g.successors(v, etype='X')表示获取出发节点为v，边类型为‘X’的终止节点

    for i in range(len(src_adj)):
        adj_orig.append([val_EID[i] + start_node, keys[dst_adj[i]]])
        adj.append([src_adj[i], dst_adj[i]])

    # print('adj:',adj)
    print("orig pos_edge:", adj_orig)
    get_words(adj_orig)
    return adj, len(src_adj)


def neg_graph_to_edge(node_dic, val_graph, edge_num):
    dst_adj = []
    src_adj = []

    adj_orig = []
    adj = []

    val_node_list = val_graph.ndata[dgl.NID]['step'].tolist()
    keys = list(node_dic.keys())
    for i in range(edge_num):
        graph_dst = val_graph.successors(i, etype='s-s').tolist()[0]
        if val_node_list[i] in keys and val_node_list[graph_dst] in keys:
            src_adj.append(node_dic[val_node_list[i]])  # 由于是负采样，没有对应的EID，此处的src node按照NID中的节点顺序生成
            dst_adj.append(node_dic[val_node_list[graph_dst]])

    for i in range(len(src_adj)):
        adj_orig.append([val_node_list[i], keys[dst_adj[i]]])
        adj.append([src_adj[i], dst_adj[i]])

    # print('adj:',adj)
    print("orig neg_edge:", adj_orig)
    get_words(adj_orig)
    return adj


class test_NegativeSampler(object):
    def __init__(self, g, k):
        # get the negatives
        self.user2negs_100_dict = {}
        filename = dataset_folder + '/test_negatives_100.txt'
        with open(filename, "r") as f:
            lines = f.readlines()
            for line in nn.tqdm(lines):
                if line == None or line == "":
                    continue
                line = line[:-1]  # remove \n
                user = int(line.split('\t')[0].split(',')[0][1:])
                negs = [int(neg) for neg in line.split('\t')[1:]]
                self.user2negs_100_dict[user] = negs

        self.k = k

    def __call__(self, g, eids_dict):
        result_dict = {}
        for etype, eids in eids_dict.items():
            src, _ = g.find_edges(eids, etype=etype)
            dst = []
            for each_src in src:
                dst.extend(self.user2negs_100_dict[int(each_src)][:self.k])
            dst = torch.tensor(dst)
            src = src.repeat_interleave(self.k)
            result_dict[etype] = (src, dst)
        return result_dict


def node_drop(feats, drop_rate, training):
    n = feats.shape[0]
    drop_rates = torch.FloatTensor(np.ones(n) * drop_rate)

    if training:
        masks = torch.bernoulli(1. - drop_rates).unsqueeze(1)
        feats = masks.to(feats.device) * feats / (1. - drop_rate)
    else:
        feats = feats
    return feats


def get_adj_train(node_dic, eid):  # 顺序被打乱

    node_num = len(node_dic)

    adj = np.zeros((node_num, node_num), dtype=np.float64)

    dst = []
    src = []

    for i in range(len(eid)):
        src.append(node_dic[eid[i] + start_node])
        dst.append(node_dic[eid[i] + start_node + 1])
        adj[src[i]][dst[i]] = 1
    return adj


def get_accuracy(labels, preds):
    # preds = np.expand_dims(preds,axis=1)
    # threshold is 0.5, x = preds_all.copy()
    #
    x = preds.copy()
    # print("np.std(x)",np.std(x))
    if np.std(x) != 0:
        test = (x - np.mean(x)) / np.std(x)
    else:
        test = (x - np.mean(x))
    test[test > 0.6] = 1
    test[test <= 0.6] = 0
    print("labels:", labels)
    print("test:", test)

    acc = accuracy_score(labels, test)

    p, r, f1, _ = precision_recall_fscore_support(labels, test, average='macro')

    return acc, p, r, f1


def my_eval(emb, adj_orig, edges_pos, edges_neg):
    def sigmoid(x):
        'each value goes into this func individually'
        return 1 / (1 + np.exp(-x))

        # Predict on test set of edges

    'inner product?  <2017*16, 16*2017> -> <2017,2017>'
    # print("him.shape:",emb.shape)
    adj_rec = np.dot(emb, emb.T)
    # import pdb;
    # pdb.set_trace()
    # print("adj_rec",adj_rec)

    'pos'
    preds = []
    pos = []
    for e in edges_pos:
        preds.append(sigmoid(adj_rec[e[0], e[1]]))
        # preds.append(adj_rec[e[0], e[1]])
        # import pdb;pdb.set_trace()
        # print("e[0],e[1]",e[0],e[1])

        pos.append(adj_orig[e[0], e[1]])

    'neg'
    preds_neg = []
    neg = []
    for e in edges_neg:
        preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
        # preds_neg.append(adj_rec[e[0], e[1]])
        # print("e[0],e[1]",e[0],e[1])
        neg.append(adj_orig[e[0], e[1]])

    # print("pred_ned",preds_neg)
    # print("preds",preds)

    preds_all = np.hstack([preds, preds_neg])
    # print("preds_all", preds_all)
    labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])

    # import pdb;
    # pdb.set_trace()

    'get accuracy, preds_all ndarray'
    acc_score, p, r, f1 = get_accuracy(labels_all, preds_all)

    auc_score = roc_auc_score(labels_all, preds_all)
    map_score = average_precision_score(labels_all, preds_all)

    return acc_score, p, r, f1, map_score, auc_score


def get_roc_score(emb, adj_orig, edges_pos, edges_neg):
    def sigmoid(x):
        return 1 / (1 + np.exp(-x))

    # Predict on test set of edges
    adj_rec = np.dot(emb, emb.T)
    preds = []
    pos = []
    i = 0
    for e in edges_pos:
        # print("edge:",i)
        preds.append(sigmoid(adj_rec[e[0], e[1]]))
        pos.append(adj_orig[e[0], e[1]])
        i = i + 1

    preds_neg = []
    neg = []
    for e in edges_neg:
        preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
        neg.append(adj_orig[e[0], e[1]])

    preds_all = np.hstack([preds, preds_neg])
    labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])

    # print(preds_all)
    # print(labels_all)
    acc_score, p, r, f1 = get_accuracy(labels_all, preds_all)
    roc_score = roc_auc_score(labels_all, preds_all)
    map_score = average_precision_score(labels_all, preds_all)

    return acc_score, p, r, f1, roc_score, map_score
# test code
# my_load_data_tfidf_semi()
# _load_wmd_adj()
