# -*- coding: utf-8 -*-
"""
******* 文档说明 ******

# 当前项目: Project_Classify
# 创建时间: 2020-04-23  17:41
# 开发作者: Vincent
# 版    本: V1.0
"""
import os
import csv
import json
import tokenization
import collections
import tensorflow as tf
from collections import Counter


# 打印函数, 使代码更加健壮，同时兼容日志打印及print
def log(print_fun=None, level='DEBUG'):
    """
    :param print_fun:  打印函数 若为None直接print
    :param level:      日志级别
    :return:
    """

    def _print(string):
        if print_fun is None:
            print('{:8s} {}'.format(level, string))
        else:
            if level == 'INFO':
                print_fun.info(string)
            elif level == 'WARNING':
                print_fun.warning(string)
            elif level == 'ERROR':
                print_fun.error(string)
            else:
                print_fun.debug(string)

    return _print


# class DataProcessor(object):
#     """Base class for data converters for sequence classification data sets."""
#
#     def get_train_examples(self, data_dir):
#         """Gets a collection of `InputExample`s for the train set."""
#         raise NotImplementedError()
#
#     def get_dev_examples(self, data_dir):
#         """Gets a collection of `InputExample`s for the dev set."""
#         raise NotImplementedError()
#
#     def get_test_examples(self, data_dir):
#         """Gets a collection of `InputExample`s for prediction."""
#         raise NotImplementedError()
#
#     def get_labels(self, labels):
#         """Gets the list of labels for this data set."""
#         raise NotImplementedError()
#
#     # added base class function for get_results
#     def get_results(self, data_dir):
#         """Gets a collection of `InputExample`s for prediction."""
#         raise NotImplementedError()
#
#     @classmethod
#     def _read_tsv(cls, input_file, quotechar=None):
#         """Reads a tab separated value file."""
#         with tf.gfile.Open(input_file, "r") as f:
#             reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
#             lines = []
#             for line in reader:
#                 lines.append(line)
#             return lines
#
#     @classmethod
#     def _read_csv(cls, input_file, quotechar=None):
#         """Reads a tab separated value file."""
#         lines = []
#         for line_data in csv.reader(open(input_file, encoding='utf-8', errors='ignore')):
#             lines.append(line_data)
#         return lines


class DataProcessor(object):
    """Base class for data converters for sequence classification data sets."""

    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        raise NotImplementedError()

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        raise NotImplementedError()

    def get_test_examples(self, data_dir):
        """Gets a collection of `InputExample`s for prediction."""
        raise NotImplementedError()

    # TODO: 加如更多文件类型输入的处理方式
    @classmethod
    def read_data(cls, input_file):
        lines = []

        for line_i in open(input_file, "r", encoding='utf-8'):
            line_i = json.loads(line_i.strip())

            lines.append(line_i)

        return lines


class SimilarProcessor(DataProcessor):
    # TODO： 加入输入文件检验
    def get_train_examples(self, data_dir):
        return self._create_example(
            self.read_data(os.path.join(data_dir, "train.json")), "train"
        )

    def get_dev_examples(self, data_dir):
        return self._create_example(
            self.read_data(os.path.join(data_dir, "dev.json")), "dev"
        )

    def get_test_examples(self, data_dir):
        return self._create_example(
            self.read_data(os.path.join(data_dir, "test.json")), "test")

    def get_labels(self, labels):
        """See base class."""
        return set(labels)

    @staticmethod
    def _create_example(lines, set_type):
        examples = []
        labels = []
        for (i, line) in enumerate(lines):
            guid = "%s-%s" % (set_type, i)
            text1 = tokenization.convert_to_unicode(line['sentence1'])
            text2 = tokenization.convert_to_unicode(line['sentence2'])
            label = tokenization.convert_to_unicode(line['label'])
            labels.append(label)
            examples.append(InputExample(guid=guid, text_a=text1, text_b=text2, label=label))
        return examples, labels


class ClassificationProcessor(DataProcessor):
    """Processor for the myse data set (GLUE version)."""

    def get_train_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_csv(os.path.join(data_dir, "Train.csv")), "train")

    def get_dev_examples(self, data_dir):
        """See base class."""
        return self._create_examples(
            self._read_csv(os.path.join(data_dir, "Dev.csv")), "dev")

    def get_test_examples(self, data_dir):
        """See base class."""

        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "test")

    def get_labels(self, labels):
        """See base class."""
        return set(labels)

    # new function get the result tsv
    def get_results(self, data_dir):
        """See base class."""
        return self._read_tsv(os.path.join(data_dir, "test_results.tsv"))

    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        labels = []
        labels_test = []
        for (i, line) in enumerate(lines):
            # if i == 0:
            #     continue
            guid = "%s-%s" % (set_type, i)

            # tokenization is based on vocab file
            text_a = tokenization.convert_to_unicode(line[1])
            label = tokenization.convert_to_unicode(line[0])
            labels.append(label)

            if set_type == "test":
                label = "0"
            labels_test.append(label)
            examples.append(
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))

        return examples, labels, labels_test


# 将文本数据存储为一个类
class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, guid, text_a, text_b=None, label=None):
        """Constructs a InputExample.

        Args:
          guid: Unique id for the example.
          text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
          text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
          label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.text_b = text_b
        self.label = label


def file_based_convert_examples_to_features(
        examples, label_list, max_seq_length, tokenizer, output_file, logger):
    """Convert a set of `InputExample`s to a TFRecord file."""

    writer = tf.python_io.TFRecordWriter(output_file)

    label_map = {label: i for (i, label) in enumerate(sorted(label_list))}

    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            print("Writing example %d of %d" % (ex_index, len(examples)))

        # 将样本数据转换成特征数据
        feature = convert_single_example(ex_index, example, label_map,
                                         max_seq_length, tokenizer, logger)

        def create_int_feature(values):
            f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
            return f

        # 将训练数据转换成 Tensor 数据
        features = collections.OrderedDict()
        features["input_ids"] = create_int_feature(feature.input_ids)
        features["input_mask"] = create_int_feature(feature.input_mask)
        features["segment_ids"] = create_int_feature(feature.segment_ids)
        features["label_ids"] = create_int_feature([feature.label_id])

        tf_example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(tf_example.SerializeToString())
    return label_map


def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    """Truncates a sequence pair in place to the maximum length."""

    # This is a simple heuristic which will always truncate the longer sequence
    # one token at a time. This makes more sense than truncating an equal percent
    # of tokens from each, since if one sequence is very short then each token
    # that's truncated likely contains more information than a longer sequence.
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_length:
            break
        if len(tokens_a) > len(tokens_b):
            tokens_a.pop()
        else:
            tokens_b.pop()


# 将输入数据转换成输入特征
def convert_single_example(ex_index, example, label_map, max_seq_length,
                           tokenizer, logger):
    """Converts a single `InputExample` into a single `InputFeatures`."""

    # 分词
    tokens_a = tokenizer.tokenize(example.text_a)
    tokens_b = None
    if example.text_b:
        tokens_b = tokenizer.tokenize(example.text_b)

    if tokens_b:
        # Modifies `tokens_a` and `tokens_b` in place so that the total
        # length is less than the specified length.
        # Account for [CLS], [SEP], [SEP] with "- 3"
        _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
    else:
        # Account for [CLS] and [SEP] with "- 2"
        if len(tokens_a) > max_seq_length - 2:
            tokens_a = tokens_a[0:(max_seq_length - 2)]

    # The convention in BERT is:
    # (a) For sequence pairs:
    #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
    #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1
    # (b) For single sequences:
    #  tokens:   [CLS] the dog is hairy . [SEP]
    #  type_ids: 0     0   0   0  0     0 0
    #
    # Where "type_ids" are used to indicate whether this is the first
    # sequence or the second sequence. The embedding vectors for `type=0` and
    # `type=1` were learned during pre-training and are added to the wordpiece
    # embedding vector (and position vector). This is not *strictly* necessary
    # since the [SEP] token unambiguously separates the sequences, but it makes
    # it easier for the model to learn the concept of sequences.
    #
    # For classification tasks, the first vector (corresponding to [CLS]) is
    # used as as the "sentence vector". Note that this only makes sense because
    # the entire model is fine-tuned.
    tokens = []
    segment_ids = []
    tokens.append("[CLS]")
    segment_ids.append(0)
    for token in tokens_a:
        tokens.append(token)
        segment_ids.append(0)
    tokens.append("[SEP]")
    segment_ids.append(0)

    if tokens_b:
        for token in tokens_b:
            tokens.append(token)
            segment_ids.append(1)
        tokens.append("[SEP]")
        segment_ids.append(1)

    # 返回每个 Token 在词典中对应的位置序号
    input_ids = tokenizer.convert_tokens_to_ids(tokens)

    # The mask has 1 for real tokens and 0 for padding tokens. Only real
    # tokens are attended to.
    input_mask = [1] * len(input_ids)

    # Zero-pad up to the sequence length.
    while len(input_ids) < max_seq_length:
        input_ids.append(0)
        input_mask.append(0)
        segment_ids.append(0)

    assert len(input_ids) == max_seq_length
    assert len(input_mask) == max_seq_length
    assert len(segment_ids) == max_seq_length

    label_id = label_map[example.label]
    if ex_index < 5:
        log(logger, level='DEBUG')("*** Example ***")
        log(logger, level='DEBUG')("guid: %s" % example.guid)
        log(logger, level='DEBUG')("tokens: %s" % " ".join(
            [tokenization.printable_text(x) for x in tokens]))
        log(logger, level='DEBUG')("input_ids: %s" % " ".join([str(x) for x in input_ids]))
        log(logger, level='DEBUG')("input_mask: %s" % " ".join([str(x) for x in input_mask]))
        log(logger, level='DEBUG')("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
        log(logger, level='DEBUG')("label: %s (id = %d)" % (example.label, label_id))

    feature = InputFeatures(
        input_ids=input_ids,
        input_mask=input_mask,
        segment_ids=segment_ids,
        label_id=label_id)
    return feature


class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self, input_ids, input_mask, segment_ids, label_id):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.label_id = label_id


# 提取 TFRecord 文件数据，以 Tensor 格式通过迭代器返回
def read_tf_record(max_seq_length, num_train_epochs, buffer_size, train_batch_size, tfrecord_file):
    """
    max_seq_length   ：句子长度
    num_train_epochs ：批次数量
    buffer_size      ：缓冲区大小
    train_batch_size ：训练批次大小
    tfrecord_file   : TFRecord 保存路径
    """
    dataset = tf.data.TFRecordDataset(tfrecord_file)

    # 将一条序列化的样例转换为其包含的所有特征张量
    def parse_example(serial_exmp):
        features = tf.parse_single_example(
            serial_exmp,
            features={
                "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
                "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
                "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
                "label_ids": tf.FixedLenFeature([], tf.int64),
            }
        )
        return features['input_ids'], features['input_mask'], features['segment_ids'], features['label_ids']

    # 从 TFRecord 中提取张量数据
    dataset = dataset.map(parse_example)

    dataset = dataset.repeat(num_train_epochs)  # 迭代次数
    dataset = dataset.shuffle(buffer_size, seed=0)  # 随机打乱
    dataset = dataset.batch(train_batch_size)  # 每批数据

    iter_train = dataset.make_one_shot_iterator()

    # 迭代器输出， Tensor 格式，需 sess.run() 转换才获取其数据
    input_ids, input_mask, segment_ids, label_ids = iter_train.get_next()

    return input_ids, input_mask, segment_ids, label_ids


# 训练数据清洗、写入TFRecord中
def write_tf_record(train_examples_path, bert_vocab_file, do_lower_case, max_seq_length, tfrecord_file,
                    logger=None):
    """
    train_examples_path： 训练语料路径
    bert_vocab_file  :    BERT模型中字典
    do_lower_case  :    BERT模型中是否大小写区分
    max_seq_length :    最大句子长度
    tfrecord_file      :    TFRecord 保存路径
    logger ： 日志保存对象，为None时直接Print
    """
    # 获取训练、测试数据  【数据、标签、测试标签】
    processor = SimilarProcessor()
    train_examples, train_labels = processor.get_train_examples(train_examples_path)
    log(logger, level='info')("训练数据：{}".format(Counter(train_labels)))

    import random
    random.seed(0)
    random.shuffle(train_examples)   # 随机打乱数据次序

    # 标签值
    label_list = processor.get_labels(train_labels)

    # 文本清洗类
    tokenizer = tokenization.FullTokenizer(vocab_file=bert_vocab_file, do_lower_case=do_lower_case)

    # 将训练数据写入 TFRecord 中， 返回各个类别序列字典
    label_map_train = file_based_convert_examples_to_features(
        train_examples, label_list, max_seq_length, tokenizer, tfrecord_file, logger)
    log(logger, level='info')("标签序号字典：{}".format(label_map_train))


# 获取训练数据
def get_train_tfrecord_data(tfrecord_file, num_train_epochs=2, buffer_size=1000, train_batch_size=10,
                            train_examples_path=None, bert_vocab_file=None,
                            do_lower_case=None, max_seq_length=None, logger=None):
    """
    :param tfrecord_file:     # 训练数据 TFRecord 保存路径
    :param num_train_epochs:  # 循环训练次数
    :param buffer_size:       # 缓冲区大小
    :param train_batch_size:  # Batch 大小
    :param train_examples_path:  # 训练数据路径
    :param bert_vocab_file:      # Bert模型中词典路径
    :param do_lower_case:        # Bert模型中是否大小写
    :param max_seq_length:       # 模型句子最大长度
    :param logger:               # 日志保存对象
    :return:
    """

    # 判断训练数据是否已转写入 TFRecord 中， 若未转换则进行转换
    if not os.path.exists(tfrecord_file):
        os.makedirs(os.path.dirname(tfrecord_file), exist_ok=True)

        # TODO 训练数据清洗、写入TFRecord中
        write_tf_record(train_examples_path, bert_vocab_file, do_lower_case, max_seq_length, tfrecord_file, logger)

    # TODO 读取TFRecord
    input_ids, input_mask, segment_ids, label_ids = read_tf_record(
        max_seq_length, num_train_epochs, buffer_size, train_batch_size, tfrecord_file)

    return input_ids, input_mask, segment_ids, label_ids


# 模型验证数据
def get_dev_data(dir_data_dev, bert_vocab_file, do_lower_case, max_seq_length, logger=None):
    """
    :param dir_data_dev:     # 验证数据路径
    :param bert_vocab_file:  # Bert模型中词典路径
    :param do_lower_case:    # Bert模型中是否大小写
    :param max_seq_length:   # 模型句子最大长度
    :param logger:           # 日志保存对象
    :return:
    """

    processor = SimilarProcessor()

    # 获取训练、测试数据  【数据、标签、测试标签】
    # moved outside of the for loop
    dev_examples, dev_labels = processor.get_dev_examples(dir_data_dev)
    log(logger, level='info')("验证数据：{}".format(Counter(dev_labels)))

    # 标签值
    label_list = processor.get_labels(dev_labels)

    tokenizer = tokenization.FullTokenizer(
        vocab_file=bert_vocab_file, do_lower_case=do_lower_case)

    label_map = {label: i for (i, label) in enumerate(sorted(label_list))}

    dev_input_ids = list()
    dev_input_mask = list()
    dev_label_id = list()
    dev_segment_ids = list()

    for (ex_index, example) in enumerate(dev_examples):

        # 将样本数据转换成特征数据
        feature = convert_single_example(ex_index, example, label_map,
                                         max_seq_length, tokenizer, logger)

        dev_input_ids.append(feature.input_ids)
        dev_input_mask.append(feature.input_mask)
        dev_label_id.append(feature.label_id)
        dev_segment_ids.append(feature.segment_ids)

    log(logger, level='info')("Dev Data Num:{}".format(len(dev_label_id)))

    return dev_examples, dev_input_ids, dev_input_mask, dev_label_id, dev_segment_ids


# 模型验证数据
def get_test_data(dir_data_test, bert_vocab_file, do_lower_case, max_seq_length, logger=None):
    """
    :param dir_data_test:     # 测试数据路径
    :param bert_vocab_file:  # Bert模型中词典路径
    :param do_lower_case:    # Bert模型中是否大小写
    :param max_seq_length:   # 模型句子最大长度
    :param logger:           # 日志保存对象
    :return:
    """

    processor = SimilarProcessor()

    # 获取训练、测试数据  【数据、标签、测试标签】
    # moved outside of the for loop
    test_examples, test_labels = processor.get_test_examples(dir_data_test)
    log(logger, level='info')("验证数据：{}".format(Counter(test_labels)))

    # 标签值
    label_list = processor.get_labels(test_labels)

    tokenizer = tokenization.FullTokenizer(
        vocab_file=bert_vocab_file, do_lower_case=do_lower_case)

    label_map = {label: i for (i, label) in enumerate(sorted(label_list))}

    test_input_ids = list()
    test_input_mask = list()
    test_label_id = list()
    test_segment_ids = list()

    for (ex_index, example) in enumerate(test_examples):

        # 将样本数据转换成特征数据
        feature = convert_single_example(ex_index, example, label_map,
                                         max_seq_length, tokenizer, logger)

        test_input_ids.append(feature.input_ids)
        test_input_mask.append(feature.input_mask)
        test_label_id.append(feature.label_id)
        test_segment_ids.append(feature.segment_ids)

    log(logger, level='info')("Test Data Num:{}".format(len(test_label_id)))

    return test_examples, test_input_ids, test_input_mask, test_label_id, test_segment_ids


if __name__ == '__main__':
    # # 标签列表
    # _train_examples_path = r'D:\Data\NLP\Classify'
    # _bert_vocab_file = r'D:\Desktop\BERT_Demo\BertModel\BERT_BASE_DIR_CHINESE\vocab.txt'
    # _tfrecord_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
    #                                               '_Classify', '_tf_record', 'Classify_train.tfrecord'))

    # 标签列表
    _train_examples_path = r'D:\Data\CCF\train'
    _bert_vocab_file = r'D:\PreTrainedModel\chinese-bert_chinese_wwm_L-12_H-768_A-12\vocab.txt'
    _tfrecord_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '.',
                                                  '_temp', '_tf_record', 'ChnSentiCorp_train.tfrecord'))


    _m_do_lower_case = True
    _m_max_seq_length = 256

    _num_train_epochs = 2
    _buffer_size = 1000
    _train_batch_size = 3

    # TODO 读取TFRecord
    _input_ids, _input_mask, _segment_ids, _label_ids = get_train_tfrecord_data(
        _tfrecord_file, num_train_epochs=_num_train_epochs, buffer_size=_buffer_size,
        train_batch_size=_train_batch_size, train_examples_path=_train_examples_path,
        bert_vocab_file=_bert_vocab_file, do_lower_case=_m_do_lower_case, max_seq_length=_m_max_seq_length, logger=None)

    with tf.Session() as sess:
        j = 0
        try:
            while True:

                # 提取 训练 Batch Data
                input_ids_i, input_mask_i, segment_ids_i, label_ids_i = sess.run(
                    [_input_ids, _input_mask, _segment_ids, _label_ids])
                # print('input_ids\n{}\n input_mask\n{}\n segment_ids\n{}\n label_ids\n{}'.format(
                #     input_ids_i, input_mask_i, segment_ids_i, label_ids_i))
                print(label_ids_i)
                if j > 20:
                    break
                j += 1
        except tf.errors.OutOfRangeError:
            print('Finish Training!')
