# -*- coding: utf-8 -*-

'''
@Author  :   Xu

@Software:   PyCharm

@File    :   FAQ.py

@Time    :   2019-10-30 14:26

@Desc    :

'''

import glob
import random
import struct
import csv
from tensorflow.core.example import example_pb2

MARK_PAD = '<PAD>'  # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
MARK_UNK = '<UNK>'  # This has a vocab id, which is used to represent out-of-vocabulary words
MARK_GO = '<GO>'  # This has a vocab id, which is used at the start of every decoder input sequence
MARK_EOS = '<EOS>'  # This has a vocab id, which is used at the end of untruncated target sequences


class Vocab(object):
    """Vocabulary class for mapping between words and ids (integers)"""

    def __init__(self, vocab_file, max_size):
        """Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
        Args:
            vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
            max_size: integer. The maximum size of the resulting Vocabulary.
        """
        self._word_to_id = {}
        self._id_to_word = {}
        self._count = 0  # keeps track of total number of words in the Vocab

        # <UNK>, <PAD>, <GO> and <EOS> get the ids 0,1,2,3.
        for w in [MARK_UNK, MARK_PAD, MARK_GO, MARK_EOS]:
            self._word_to_id[w] = self._count
            self._id_to_word[self._count] = w
            self._count += 1

        # Read the vocab file and add words up to max_size
        with open(vocab_file, 'r') as vocab_f:
            for line in vocab_f:
                pieces = line.split('\t\t')
                if len(pieces) != 2:
                    print('Warning: incorrectly formatted line in vocabulary file: %s\n' % line)
                    continue
                w = pieces[0]
                if w in [MARK_UNK, MARK_PAD, MARK_GO, MARK_EOS]:
                    raise Exception(
                        '<UNK>, <PAD>, <GO> and <EOS> shouldn\'t be in the vocab file, but %s is'
                        % w)
                if w in self._word_to_id:
                    raise Exception(
                        'Duplicated word in vocabulary file: %s' % w)

                self._word_to_id[w] = self._count
                self._id_to_word[self._count] = w
                self._count += 1
                if max_size != 0 and self._count >= max_size:
                    print("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (
                        max_size, self._count))
                    break

        print("Finished constructing vocabulary of %i total words. Last word added: %s" % (
            self._count, self._id_to_word[self._count - 1].encode("utf-8")))

    def word2id(self, word):
        """Returns the id (integer) of a word (string). Returns <UNK> id if word is OOV."""
        if word not in self._word_to_id:
            return self._word_to_id[MARK_UNK]

        return self._word_to_id[word]

    def id2word(self, word_id):
        """Returns the word (string) corresponding to an id (integer)."""
        if word_id not in self._id_to_word:
            raise ValueError('Id not found in vocab: %d' % word_id)

        return self._id_to_word[word_id]

    def size(self):
        """Returns the total size of the vocabulary"""
        return self._count

    def write_metadata(self, fpath):
        """Writes metadata file for Tensorboard word embedding visualizer as described here:
        https://www.tensorflow.org/get_started/embedding_viz

        Args:
        fpath: place to write the metadata file
        """
        print("Writing word embedding metadata file to %s..." % (fpath))
        with open(fpath, "w") as f:
            fieldnames = ['word']
            writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
            for i in range(self.size()):
                writer.writerow({"word": self._id_to_word[i].encode('utf-8')})


def split_text_with_whitespace(text):
    res = []
    text = text.strip()
    if isinstance(text, str):
        text = text

    tmp = ''
    for ch in text:
        if u'\u4e00' <= ch <= u'\u9fff' or ch == ' ' or ch == 'の':
            if tmp != '':
                res.append(tmp)
                tmp = ''
            res.append(ch)
        else:
            tmp += ch
    if tmp != '':
        res.append(tmp)

    return res


def sentence2id(sentence, vocab, add_eos=False):
    """Converting a sentence (list of words) to a list of ids."""
    unk_id = vocab.word2id(MARK_UNK)

    ids = [
        vocab.word2id(token) for token in split_text_with_whitespace(sentence)
    ]

    if add_eos:
        ids.append(vocab.word2id(MARK_EOS))

    return ids


def context2ids(context_words, vocab):
    """Map the context words to their ids. Also return a list of OOVs in the context.
    Args:
        context_words: list of words (strings)
        vocab: Vocabulary object
    Returns:
      ids:
          A list of word ids (integers); OOVs are represented by their temporary article OOV number.
          If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
      oovs:
          A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.
    """
    ids = []
    oovs = []
    unk_id = vocab.word2id(MARK_UNK)
    for w in context_words:
        i = vocab.word2id(w)
        if i == unk_id:  # If w is OOV
            if w not in oovs:  # Add to list of OOVs
                oovs.append(w)
            oov_num = oovs.index(
                w
            )  # This is 0 for the first article OOV, 1 for the second article OOV...
            ids.append(
                vocab.size() + oov_num
            )  # This is e.g. 50000 for the first article OOV, 50001 for the second...
        else:
            ids.append(i)
    return ids, oovs

def query2ids(context_words, vocab, oovs):
    """Map the context words to their ids. Also return a list of OOVs in the context.
    Args:
        context_words: list of words (strings)
        vocab: Vocabulary object
    Returns:
      ids:
          A list of word ids (integers); OOVs are represented by their temporary article OOV number.
          If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
      oovs:
          A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.
    """
    ids = []
    unk_id = vocab.word2id(MARK_UNK)
    for w in context_words:
        i = vocab.word2id(w)
        if i == unk_id:  # If w is OOV
            if w not in oovs:  # Add to list of OOVs
                oovs.append(w)
            oov_num = oovs.index(
                w
            )  # This is 0 for the first article OOV, 1 for the second article OOV...
            ids.append(
                vocab.size() + oov_num
            )  # This is e.g. 50000 for the first article OOV, 50001 for the second...
        else:
            ids.append(i)
    return ids, oovs

def summarization2ids(summarization_words, vocab, context_oovs):
    """Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
    Args:
        summarization_words: list of words (strings)
        vocab: Vocabulary object
        context_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
    Returns:
        ids: List of ids (integers).
        In-article OOV words are mapped to their temporary OOV numbers.
        Out-of-article OOV words are mapped to the UNK token id.
    """
    ids = []
    unk_id = vocab.word2id(MARK_UNK)
    for w in summarization_words:
        i = vocab.word2id(w)
        if i == unk_id:  # If w is an OOV word
            if w in context_oovs:  # If w is an in-article OOV
                vocab_idx = vocab.size() + context_oovs.index(
                    w)  # Map to its temporary article OOV number
                ids.append(vocab_idx)
            else:  # If w is an out-of-article OOV
                ids.append(unk_id)  # Map to the UNK token id
        else:
            ids.append(i)
    return ids


def outputids2words(id_list, vocab, article_oovs):
    """
    Maps output ids to words, including mapping in-article OOVs 
    from their temporary ids to the original OOV string (applicable in pointer-generator mode).
    Args:
        id_list: list of ids (integers)
        vocab: Vocabulary object
        article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids 
        (that have been assigned in pointer-generator mode), or None (in baseline mode)
    Returns:
        words: list of words (strings)
    """
    words = []
    for i in id_list:
        try:
            w = vocab.id2word(i)  # might be [UNK]
        except ValueError as e:  # w is OOV
            assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
            article_oov_idx = i - vocab.size()
            try:
                w = article_oovs[article_oov_idx]
            except ValueError as e:  # i doesn't correspond to an article oov
                raise ValueError(
                    'Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs'
                    % (i, article_oov_idx, len(article_oovs)))
        words.append(w)
    return words


def show_art_oovs(article, vocab):
    """
    Returns the article string, highlighting the OOVs
    by placing __underscores__ around them
    """
    unk_token = vocab.word2id(MARK_UNK)
    words = split_text_with_whitespace(article)
    words = [("__%s__" % w) if vocab.word2id(w) == unk_token else w
             for w in words]
    out_str = ''.join(words)  #中文，所以不用空格join
    return out_str


def show_abs_oovs(abstract, vocab, article_oovs):
    """
    Returns the abstract string, highlighting the article OOVs with __underscores__.
    If a list of article_oovs is provided, 
    non-article OOVs are differentiated like !!__this__!!.
    Args:
        abstract: string
        vocab: Vocabulary object
        article_oovs: list of words (strings), or None (in baseline mode)
    """
    unk_token = vocab.word2id(MARK_UNK)
    words = split_text_with_whitespace(abstract)
    new_words = []
    for w in words:
        if vocab.word2id(w) == unk_token:  # w is oov
            if article_oovs is None:  # baseline mode
                new_words.append("__%s__" % w)
            else:  # pointer-generator mode
                if w in article_oovs:
                    new_words.append("__%s__" % w)
                else:
                    new_words.append("!!__%s__!!" % w)
        else:  # w is in-vocab word
            new_words.append(w)
    out_str = ''.join(new_words)
    return out_str