import numpy as np
import os
import json
import faiss
import struct


def ivecs_read(fname):
    a = np.fromfile(fname, dtype='int32')
    d = a[0]
    return a.reshape(-1, d + 1)[:, 1:].copy(), d


def fvecs_read(fname):
    data, d = ivecs_read(fname)
    return data.view('float32').astype(np.float32), d


def ivecs_write(filename, vecs):
    f = open(filename, "wb")
    dimension = [len(vecs[0])]

    for x in vecs:
        f.write(struct.pack('i' * len(dimension), *dimension))
        f.write(struct.pack('i' * len(x), *x))

    f.close()


def delete_dir_if_exist(dire):
    if os.path.isdir(dire):
        command = 'rm -rf %s' % dire
        print(command)
        os.system(command)


def ip_gnd(base, query, k):
    base_dim = base.shape[1]
    index = faiss.IndexFlatIP(base_dim)
    index.add(base)
    gnd_distance, gnd_idx = index.search(query, k)
    return gnd_idx, gnd_distance


def build_data(dataset_name, old_basic_dir, new_basic_dir, topk):
    base, dim = fvecs_read("%s/%s/%s_base.fvecs" % (old_basic_dir, dataset_name, dataset_name))
    query, dim = fvecs_read("%s/%s/%s_query.fvecs" % (old_basic_dir, dataset_name, dataset_name))

    delete_dir_if_exist("%s/%s" % (new_basic_dir, dataset_name))
    os.mkdir("%s/%s" % (new_basic_dir, dataset_name))
    np.savetxt("%s/%s/%s_base.txt" % (new_basic_dir, dataset_name, dataset_name), base)
    np.savetxt("%s/%s/%s_query.txt" % (new_basic_dir, dataset_name, dataset_name), query)
    print("save base query txt %s" % dataset_name)

    gnd_idx, gnd_dist = ip_gnd(base, query, topk)
    ivecs_write("%s/%s/gnd.ivecs" % (new_basic_dir, dataset_name), gnd_idx)
    print("save gnd %s" % dataset_name)

    return len(base), len(query), dim


def get_info(dataset_name, ds_config, topk, efsearch_l):
    base_len, query_len, dimension = ds_config
    dataset_info = {
        "vecsize": str(base_len),
        "vecdim": str(dimension),
        "qsize": str(query_len),
        "dataset": dataset_name
    }
    index_cmd = '../../build_graph.sh ../../data/%s/%s_base.txt %d %d' % (
        dataset_name, dataset_name, base_len, dimension)
    query_cmd_l = []
    for efsearch in efsearch_l:
        query_cmd = "../../test_query.sh ../../data/%s/%s_query.txt %d %d %d %d" % (
            dataset_name, dataset_name, query_len, dimension, efsearch, topk)
        query_cmd_l.append(query_cmd)
    print("get info dataset %s" % dataset_name)
    return dataset_info, index_cmd, query_cmd_l


'''
Usage: ./build_graph.sh <build_data> <row> <dimension> [mobius_pow]
For example: ./build_graph.sh base.txt 1000 128
With the optional mobius_pow argument: ./build_graph.sh base.txt 1000 128 1.9

Usage: ./test_query.sh <query_data> <built_graph_row> <built_graph_dimension> <search_budget> [display top k]
For example: ./test_query.sh test.txt 1000 128 100
Use display top 5: ./test_query.sh test.txt 1000 128 100 5
'''

if __name__ == '__main__':
    # old_basic_dir = '/home/zhengbian/Dataset/inner_product'
    old_basic_dir = '/home/zhengbian/plus-ip-nsw/data'
    new_basic_dir = '/home/zhengbian/mobius/data'
    dataset_name_l = ['text-to-image']
    # dataset_name_l = ['audio', 'imagenet', 'movielens', 'music100', 'netflix', 'normal-64', 'text-to-image-1b',
    #                   'tiny5m', 'word2vec', 'yahoomusic']
    ef_search_l = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300, 400, 500,
                   600, 700, 800, 900, 2000, 4000, 6000, 8000, 10000, 12000, 14000, 16000, 18000]
    topk = 10
    ds_info_l = []
    for ds_name in dataset_name_l:
        tmp_ds_info = build_data(ds_name, old_basic_dir, new_basic_dir, topk)
        ds_info_l.append(tmp_ds_info)

    dataset_info_m = {}
    index_cmd_m = {}
    query_cmd_m = {}
    for i in range(len(dataset_name_l)):
        dataset_info, tmp_index_cmd, tmp_query_cmd_l = get_info(dataset_name_l[i], ds_info_l[i], topk, ef_search_l)
        dataset_info_m[dataset_name_l[i]] = dataset_info
        index_cmd_m[dataset_name_l[i]] = tmp_index_cmd
        query_cmd_m[dataset_name_l[i]] = tmp_query_cmd_l
    with open("dataset_info.json", 'w') as f:
        json.dump(dataset_info_m, f)
    with open("index_cmd.json", 'w') as f:
        json.dump(index_cmd_m, f)
    with open("query_cmd.json", 'w') as f:
        json.dump(query_cmd_m, f)
