# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Create a database of text blocks.

Each input file assumes lines with the following JSON format:
```
{
  "title": "Document Tile",
  "text": "This is a full document."
}
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import functools
from html import parser
import multiprocessing
import os
import random
import tempfile
import time
from nltk.util import pr
from tqdm import tqdm
import sys

sys.path.insert(0, "/home/jimx/codes/orqa/")


from absl import app
from absl import flags
from preprocessing import wiki_preprocessor
from utils import bert_utils
import nltk
import tensorflow.compat.v1 as tf

# https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1
# 下载 https://storage.googleapis.com/tfhub-modules/google/bert_uncased_L-12_H-768_A-12/1.tar.gz 并解压，提供路径即可
flags.DEFINE_string(
    "bert_hub_module_path",
    "/home/jimx/g/tf_hub/bert_uncased_L-12_H-768_A-12_1",
    "Path to the BERT TF-Hub module.",
)
flags.DEFINE_integer("max_block_length", 288, "Maximum block length.")
flags.DEFINE_string(
    "input_pattern", "enwiki-20181220/wiki_raw/**/wiki_*", "Path to input data"
)
flags.DEFINE_string(
    "output_dir", "enwiki-20181220/process_out/", "Path to output records."
)
flags.DEFINE_integer("num_threads", 1, "Number of threads.")

FLAGS = flags.FLAGS

# 本文件对wiki原文进行了简单处理，按照sub word分词后的句子长度，将原文切分为text block，然后进行sub word分词，
# 组装一个example，包含sub word分词后的title，text block和每句话的长度。此外原始的title，text block也有保存。


def get_sentence_splitter():
    # temp_dir = tempfile.mkdtemp()
    # nltk.download("punkt", download_dir=temp_dir)
    # 已经下好
    return nltk.data.load("/home/jimx/nltk_data/tokenizers/punkt/english.pickle")


def create_block_info(input_path, preprocessor, print=False):
    """Create block info."""
    post_num = input_path.split("_")[-1]
    results = []
    html_parser = parser.HTMLParser()  # 重点组件，解析HTML
    with tf.io.gfile.GFile(input_path) as input_file:
        if print:
            input_file = tqdm(input_file, desc=f"parsing {post_num}")
        for index, line in enumerate(input_file):
            results.extend(
                wiki_preprocessor.example_from_json_line(line, html_parser, preprocessor)
            )  # 每个元素：未分词的title，未分词的整块text，序列化后的example
            if index > 100:
                break
    return results


def main(_):

    block_count = 0
    input_paths = tf.io.gfile.glob(FLAGS.input_pattern)
    random.shuffle(input_paths)
    tf.logging.info("Processing %d input files.", len(input_paths))

    tokenizer = bert_utils.get_tokenizer(FLAGS.bert_hub_module_path)
    preprocessor = wiki_preprocessor.Preprocessor(
        get_sentence_splitter(), FLAGS.max_block_length, tokenizer
    )
    tf.logging.info("Using hub module %s", FLAGS.bert_hub_module_path)

    tf.io.gfile.makedirs(FLAGS.output_dir)
    blocks_path = os.path.join(FLAGS.output_dir, "blocks.tfr")
    examples_path = os.path.join(FLAGS.output_dir, "examples.tfr")
    titles_path = os.path.join(FLAGS.output_dir, "titles.tfr")

    start_time = time.time()
    if FLAGS.num_threads > 1:
        pool = multiprocessing.Pool(FLAGS.num_threads)
        mapper = functools.partial(create_block_info, preprocessor=preprocessor)
        with tf.python_io.TFRecordWriter(blocks_path) as blocks_writer:
            with tf.python_io.TFRecordWriter(examples_path) as examples_writer:
                with tf.python_io.TFRecordWriter(titles_path) as titles_writer:
                    for block_info in pool.imap_unordered(mapper, input_paths):
                        for title, block, examples in block_info:
                            blocks_writer.write(block.encode("utf-8"))
                            examples_writer.write(examples)
                            titles_writer.write(title.encode("utf-8"))
                            block_count += 1
                            if block_count % 10000 == 0:
                                tf.logging.info("Wrote %d blocks.", block_count)
        tf.logging.info("Wrote %d blocks in total.", block_count)
    else:
        # debug使用
        # 返回list，其中每个元素：未分词的title，未分词的整块text，序列化后的example
        block_infos = [
            create_block_info(
                input_path=input_paths[0], preprocessor=preprocessor, print=True
            )
        ]
        with tf.python_io.TFRecordWriter(blocks_path) as blocks_writer:
            with tf.python_io.TFRecordWriter(examples_path) as examples_writer:
                with tf.python_io.TFRecordWriter(titles_path) as titles_writer:
                    for block_info in block_infos:
                        # 每个元素：未分词的title，未分词的整块text，序列化后的example
                        for title, block, examples in block_info:
                            blocks_writer.write(block.encode("utf-8"))
                            examples_writer.write(examples)
                            titles_writer.write(title.encode("utf-8"))
                            block_count += 1
                            if block_count % 10000 == 0:
                                tf.logging.info("Wrote %d blocks.", block_count)
    end_time = time.time()
    print(f"time usage: {end_time - start_time:.4f}s.")


if __name__ == "__main__":
    app.run(main)
