
import pandas as pd
import numpy as np
import tensorflow as tf
import time
import argparse
from EGES_model import EGES_Model
from utils import *
import faiss  # make faiss available

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='manual to this script')
    parser.add_argument("--batch_size", type=int, default=2048)
    parser.add_argument("--n_sampled", type=int, default=10)
    parser.add_argument("--epochs", type=int, default=30)
    parser.add_argument("--lr", type=float, default=0.001)
    parser.add_argument("--root_path", type=str, default='./data_cache/')
    parser.add_argument("--num_feat", type=int, default=4)
    parser.add_argument("--embedding_dim", type=int, default=128)
    parser.add_argument("--outputEmbedFile", type=str, default='./embedding/EGES.embed')
    args = parser.parse_args()

    # read train_data
    print('read features...')
    start_time = time.time()
    side_info = np.loadtxt(args.root_path + 'api_side_info.csv', dtype=np.int32, delimiter='\t')
    all_pairs = np.loadtxt(args.root_path + 'all_pairs', dtype=np.int32, delimiter=' ')
    print(side_info.shape)
    print(all_pairs.shape)
    feature_lens = []
    for i in range(side_info.shape[1]):
        tmp_len = len(set(side_info[:, i]))
        feature_lens.append(tmp_len)

    end_time = time.time()
    print('time consumed for read features: %.2f' % (end_time - start_time))
    print(feature_lens)
    print(len(side_info))
    EGES = EGES_Model(len(side_info), args.num_feat, feature_lens, n_sampled=args.n_sampled, embedding_dim=args.embedding_dim,
                      lr=args.lr)

    # init model
    print('init...')
    start_time = time.time()
    init = tf.global_variables_initializer()
    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth = True
    sess = tf.Session(config=config_tf)
    sess.run(init)
    end_time = time.time()
    print('time consumed for init: %.2f' % (end_time - start_time))

    print_every_k_iterations = 100
    loss = 0
    iteration = 0
    start = time.time()

    print(len(all_pairs))

    train_data = pd.read_csv('./data_cache/train_data.csv', sep='\t')
    train_data['target'] = train_data['target'].apply(lambda x: [int(i) for i in x[1:-1].split(',')])

    print(train_data)
    test_data = pd.read_csv('./data_cache/test_data.csv', sep='\t')
    test_data['target'] = test_data['target'].apply(lambda x: [int(i) for i in x[1:-1].split(',')])
    print(test_data)

    for epoch in range(args.epochs):
        max_iter = len(all_pairs)//args.batch_size
        for iter in range(max_iter):
            iteration += 1
            batch_features, batch_labels = next(graph_context_batch_iter(all_pairs, args.batch_size, side_info,
                                                                         args.num_feat))
            feed_dict = {input_col: batch_features[:, i] for i, input_col in enumerate(EGES.inputs[:-1])}
            feed_dict[EGES.inputs[-1]] = batch_labels
            _, train_loss = sess.run([EGES.train_op, EGES.cost], feed_dict=feed_dict)

            loss += train_loss

            if iteration % print_every_k_iterations == 0:
                end = time.time()
                e = iteration*args.batch_size//len(all_pairs)
                print("Epoch {}/{}".format(e, args.epochs),
                      "Iteration: {}".format(iteration),
                      "Avg. Training loss: {:.4f}".format(loss / print_every_k_iterations),
                      "{:.4f} sec/batch".format((end - start) / print_every_k_iterations))
                loss = 0
                start = time.time()
        if epoch % 2 == 0:
            feed_dict_test = {input_col: list(side_info[:, i]) for i, input_col in enumerate(EGES.inputs[:-1])}
            feed_dict_test[EGES.inputs[-1]] = np.zeros((len(side_info), 1), dtype=np.int32)
            embedding_result = sess.run(EGES.merge_emb, feed_dict=feed_dict_test)
            print('saving embedding result...')
            write_embedding(embedding_result, args.outputEmbedFile)

            def avg_embedding(api_list):
                # api_list = [int(i) for i in api_list[1:-1].split(',')]
                embedding_list = embedding_result[api_list, :]
                return np.mean(embedding_list, axis=0)
            train_data['embedding'] = train_data['target'].apply(avg_embedding)
            print(train_data)

            tmp_data = test_data

            tmp_data =  pd.merge(tmp_data, train_data, on='source', how='inner').fillna('null')

            print(tmp_data)
            print(tmp_data.columns)
            print(tmp_data.shape)



            index = faiss.IndexFlatL2(args.embedding_dim)  # build the index
            print(index.is_trained)
            index.add(embedding_result)  # add vectors to the index
            print(index.ntotal)
            def find_near_emb(emb):

                emb = np.stack(emb)
                print(emb.shape)
                k = 10 # we want to see 4 nearest neighbors
                D, I = index.search(emb, k)  # actual search
                # print(I[:5])  # neighbors of the 5 first queries
                # print(D[-5:])  # neighbors of the 5 last queries

                return pd.Series(list(I))


            tmp_data['recommdation'] = find_near_emb(tmp_data['embedding'].values)
            print(tmp_data.columns)
            tmp_data['hit_num'] = tmp_data.apply(lambda x : [i for i in x.target_x if i in x.recommdation], axis=  1)

            pre_num = tmp_data['target_x'].apply(lambda x: len(x)).sum()
            hit_num = tmp_data['hit_num'].apply(lambda x: len(x)).sum()
            print("hit_rotio: ", hit_num/pre_num)
            print('visualization...')
            plot_embeddings(embedding_result[:5000, :], side_info[:5000, :], epoch)

    print('optimization finished...')
    saver = tf.train.Saver()
    saver.save(sess, "checkpoints/EGES")

    print(sess.run(EGES.embedding[0]))



