# -*- coding:utf-8 -*-
import sys
import numpy as np
import collections
import random
from utils.tokenizer import Tokenizer


def generate_neg_data_from_search_result(input_path, output_path):
    g_samples = []
    n_group = 0
    tokenizer = Tokenizer()
    with open(input_path, 'r', encoding='utf-8') as fr, \
            open(output_path, 'w', encoding='utf-8') as fw:
        for line in fr:
            ss = line.rstrip().split('\t')
            assert len(ss) == 4

            if len(g_samples) > 0 and g_samples[-1][0] != ss[0]:
                n_group += 1
                if n_group % 1000 == 0:
                    print('processing {} groups ...'.format(n_group))
                # random select 10 from last 300
                neg_samples = g_samples[-400:]
                post_words = tokenizer.tokenize(g_samples[0][0])
                post_sent = " ".join(post_words)

                neg_idx = np.random.choice(list(range(len(neg_samples))),
                                           size=[min(100, len(neg_samples))], replace=False)
                for idx in neg_idx:
                    neg_words = tokenizer.tokenize(neg_samples[idx][1])
                    neg_sent = " ".join(neg_words)
                    fw.write("{}\t{}\n".format(post_sent, neg_sent))

                g_samples = []
            g_samples += [(ss[0], ss[3])]

        if len(g_samples) > 0:
            n_group += 1
            neg_samples = g_samples[-400:]
            post_words = tokenizer.tokenize(g_samples[0][0])
            neg_idx = np.random.choice(list(range(len(neg_samples))), size=[100], replace=False)
            for idx in neg_idx:
                neg_words = tokenizer.tokenize(neg_samples[idx][1])
                fw.write("{}\t{}\n".format(" ".join(post_words), " ".join(neg_words)))

    print("total groups : {}".format(n_group))
    pass


def load_es_neg_data(path):
    data = collections.defaultdict(list)
    with open(path, 'r', encoding='utf-8') as fr:
        for line in fr:
            ss = line.rstrip().split('\t')
            # assert len(ss) == 2
            if len(ss) != 2:
                print("wrong line: {}".format(line.rstrip()))
                continue
            data[ss[0]].append(ss[1])
    print("group size: {}".format(len(data)))
    return data


def load_repos_data(path):
    data = collections.defaultdict(list)
    with open(path, 'r', encoding='utf-8') as fr:
        for line in fr:
            ss = line.rstrip().split('\t')
            assert len(ss) == 2
            data[ss[0]].append(ss[1])
    print("group size: {}".format(len(data)))
    return data


def generate_train_data(raw_input_path, es_input_path, saved_dir):
    es_neg_data = load_es_neg_data(es_input_path)
    raw_data = load_repos_data(raw_input_path)

    candidates = set([])
    for query in raw_data:
        for candidate in raw_data[query]:
            candidates.add(candidate)

    candidates = list(candidates)
    candidate_ids = list(range(len(candidates)))

    query_keys = list(raw_data.keys())
    q_ids = list(range(len(query_keys)))
    np.random.shuffle(q_ids)

    # generate valid data
    with open(saved_dir + "stc2-repos.valid.10k.txt", "w", encoding="utf-8") as fw:
        count = 0
        for q_id in q_ids[:10000]:
            query = query_keys[q_id]
            if query not in es_neg_data:
                print("not found query in es neg: {}".format(query))
                continue

            for candidate in raw_data[query][:5]:
                fw.write("1\t{}\t{}\n".format(query, candidate))
            for neg_sent in es_neg_data[query][:10]:
                fw.write("0\t{}\t{}\n".format(query, neg_sent))

            count += 1
            if count % 1000 == 0:
                print("index valid {} ...".format(count))

    # generate train data
    with open(saved_dir + "stc2-repos.gan.train.50k.txt", "w", encoding='utf-8') as fw2, \
            open(saved_dir+"/stc2-repos.rand.train.50k.txt", "w", encoding="utf-8") as fw1:
        count = 0
        for q_id in q_ids[10000:60000]:
            query = query_keys[q_id]
            if query not in es_neg_data:
                print("not found query in es neg: {}".format(query))
                continue

            es_neg_sents = es_neg_data[query]
            for candidate in raw_data[query][:5]:
                fw1.write("1\t{}\t{}\n".format(query, candidate))
                rid = random.randint(0, len(candidates) - 1)
                fw1.write("0\t{}\t{}\n".format(query, candidates[rid]))

                # if idx < len(es_neg_sents) and es_neg_sents[idx] not in pos_sents:
                #     fw1.write("1\t{}\t{}\n".format(query, candidate))
                #     fw1.write("0\t{}\t{}\n".format(query, es_neg_sents[idx]))
                #     idx += 1

            # write GAN data
            for candidate in raw_data[query][:5]:
                fw2.write("1\t{}\t{}\n".format(query, candidate))
            for neg_sent in es_neg_sents:
                fw2.write("0\t{}\t{}\n".format(query, neg_sent))

            rand_neg_idx = np.random.choice(candidate_ids, size=[200-len(es_neg_sents)], replace=False)
            for neg_idx in rand_neg_idx:
                fw2.write("0\t{}\t{}\n".format(query, candidates[neg_idx]))

            count += 1
            if count % 1000 == 0:
                print("index train {} ...".format(count))
            if count == 100000:
                break
    print('over')


if __name__ == '__main__':
    # generate_neg_data_from_search_result(sys.argv[1], sys.argv[2])
    generate_train_data(sys.argv[1], sys.argv[2], "./data/stc-gan/")
    pass
