import os
import pickle as pkl
import numpy as np
import tensorflow as tf
import pandas as pd
from process_text import process_text
from tokenization import FullTokenizer, BasicTokenizer, WordpieceTokenizer, \
    load_vocab, convert_to_unicode, whitespace_tokenize, printable_text

replace_list = ['I_M_G', 'H_T_T_P', 'E_M_A_I_L', 'A_T_P_E_R', 'T_I_M_E']


def _truncate_seq_pair(tokens_a, tokens_b, q_max_len, c_max_len):
    """Truncates a sequence pair in place to the maximum length."""

    # This is a simple heuristic which will always truncate the longer sequence
    # one token at a time. This makes more sense than truncating an equal percent
    # of tokens from each, since if one sequence is very short then each token
    # that's truncated likely contains more information than a longer sequence.
    def truncate(tokens, max_len):
        while True:
            total_length = len(tokens)
            if total_length <= max_len:
                break
            tokens.pop()

    # print('a:', len(tokens_a), '\tb:', len(tokens_b))
    truncate(tokens_a, q_max_len)
    truncate(tokens_b, c_max_len)


class InputExample(object):
    """
       A single training/test example for simple sequence classification.
    """

    def __init__(self, guid, text_a, text_b, q_type, label=None):
        """
        Constructs a InputExample.

        Args:
          guid: Unique id for the example.
          text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
          text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
          label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.text_b = text_b
        self.q_type = q_type
        self.label = label


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self, input_ids, input1_extract, input2_extract, input3_extract, input_mask, segment_ids, q_type, label_id):
        self.input_ids = input_ids
        self.input1_extract = input1_extract
        self.input2_extract = input2_extract
        self.input3_extract = input3_extract
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.q_type = q_type
        self.label_id = label_id


def read_examples(data_file):
    with open(data_file) as fr:
        examples = []
        first = True
        str2index = {}
        for row in fr:
            if first is True:
                first = False
                line = row.split('\t')
                str2index = {a.strip(): i for i, a in enumerate(line)}
                print(str2index)
                continue
            line = row.split('\t')

            guid = line[str2index['SentenceID']]
            text_a = line[str2index['Question']]
            text_b = line[str2index['Sentence']]
            q_type = None
            label = int(line[str2index['Label']])
            examples.append(InputExample(guid, text_a, text_b, q_type, label))
        return examples


def convert_to_features(examples, q_max_len, c_max_len, tokenizer):
    features = []
    new_qmaxlen = 0
    new_cmaxlen = 0
    q_len_list = []
    c_len_list = []
    for (ex_index, example) in enumerate(examples):
        tokens_a = tokenizer.tokenize(example.text_a)
        tokens_b = tokenizer.tokenize(example.text_b)

        q_len_list.append(len(tokens_a))
        c_len_list.append(len(tokens_b))

        if len(tokens_a) > new_qmaxlen:
            new_qmaxlen = len(tokens_a)
            print(example.guid)
        if len(tokens_b) > new_cmaxlen:
            new_cmaxlen = len(tokens_b)

        _truncate_seq_pair(tokens_a, tokens_b, q_max_len, c_max_len)

        sent_len_a = len(tokens_a)
        sent_len_b = len(tokens_b)

        tokens = []
        segment_ids = []
        input_extract = []
        tokens.append("[CLS]")
        input_extract.append(4)
        segment_ids.append(0)

        mark_count = 1
        for token in tokens_a:
            tokens.append(token)
            input_extract.append(mark_count)
            segment_ids.append(0)

        tokens.append("[SEP]")
        input_extract.append(6)
        segment_ids.append(0)

        mark_count += 1

        for token in tokens_b:
            tokens.append(token)
            input_extract.append(mark_count)
            segment_ids.append(1)

        tokens.append("[SEP]")
        input_extract.append(7)
        segment_ids.append(1)

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids)

        # Zero-pad up to the sequence length.
        max_len = q_max_len + c_max_len + 3
        while len(input_ids) < max_len:
            input_ids.append(0)
            input_extract.append(0)
            input_mask.append(0)
            segment_ids.append(0)

        assert len(input_ids) == max_len
        assert len(input_extract) == max_len
        assert len(input_mask) == max_len
        assert len(segment_ids) == max_len

        q_type = example.q_type
        label_id = example.label
        if ex_index < 5:
            tf.logging.info("*** Example ***")
            tf.logging.info("guid: %s" % example.guid)
            tf.logging.info("tokens: %s" % " ".join(
                [printable_text(x) for x in tokens]))
            tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
            tf.logging.info("input_extract: %s" % " ".join([str(x) for x in input_extract]))
            tf.logging.info("input_maske: %s" % " ".join([str(x) for x in input_mask]))
            tf.logging.info(
                "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
            tf.logging.info("label: %s (id = %d)" % (example.label, label_id))

        input1_extract = [i for i in range(1, 1 + sent_len_a)] + \
                         [0 for _ in range(1+sent_len_a, 1+q_max_len)]
        b_start_index = sent_len_a + 2
        input2_extract = [i for i in range(b_start_index, b_start_index + sent_len_b)] + \
                         [0 for _ in range(b_start_index + sent_len_b, b_start_index + c_max_len)]
        input3_extract = None

        features.append(
            InputFeatures(
                input_ids=input_ids,
                input1_extract=input1_extract,
                input2_extract=input2_extract,
                input3_extract=input3_extract,
                input_mask=input_mask,
                segment_ids=segment_ids,
                q_type=q_type,
                label_id=label_id))

    print("new q max length and c max length: ", new_qmaxlen, new_cmaxlen)
    q_len_list = sorted(q_len_list)
    c_len_list = sorted(c_len_list)
    view_rate = [0.99, 0.991, 0.995, 0.999]
    print('=================')
    [print(q_len_list[int(len(q_len_list) * r)]) for r in view_rate]
    print('=================')
    [print(c_len_list[int(len(c_len_list) * r)]) for r in view_rate]
    print('=================')

    return features


def main():
    tf.logging.set_verbosity(tf.logging.INFO)

    # bert_dir = '/home/guest/Documents/BERT/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12/'
    bert_dir = '/home/ahashi_syuu/Documents/BERT/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12/'
    vocab_file = bert_dir + 'vocab.txt'
    key_list = ['train', 'dev', 'test-gold']

    print('reading dataset')
    data_dir = './data/raw'

    q_max_len = 27
    c_max_len = 114

    train_list = key_list[0:2]
    dev_list = key_list[2:]
    test_list = []

    print('processing: reading')
    train_examples = []
    for name in train_list:
        data_file = os.path.join(data_dir, 'WikiQA-{}.tsv'.format(name))
        train_examples += read_examples(data_file)

    dev_examples = []
    dev_cid = []
    for name in dev_list:
        data_file = os.path.join(data_dir, 'WikiQA-{}.tsv'.format(name))
        dev_examples += read_examples(data_file)

    dev_cid += [a.guid for a in dev_examples]

    tokenizer = FullTokenizer(vocab_file=vocab_file, do_lower_case=True)
    print('processing: train to features')
    train_features = convert_to_features(train_examples, q_max_len, c_max_len, tokenizer)
    print('processing: dev to features')
    dev_features = convert_to_features(dev_examples, q_max_len, c_max_len, tokenizer)

    with open('cqa_data.pkl', 'wb') as fw:
        pkl.dump([train_features, dev_cid, dev_features], fw)


if __name__ == '__main__':
    main()

