import math
import os
import random
from properties import Properties
from tensorflow.python.platform import gfile
import re
# 特殊标记，用来填充标记对话
PAD = "__PAD__"
GO = "__GO__"
EOS = "__EOS__"  # 对话结束
UNK = "__UNK__"  # 标记未出现在词汇表中的字符
START_VOCABULART = [PAD, GO, EOS, UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3


# 用于语句切割的正则表达
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")


def _create_vocabulary(input_file,vocabulary_size,output_file):
    """
    从文件中创建常用字词库
    :param input_file: str, 输入文件，由分词后的语句池组成
    :param vocabulary_size: int, 词库大小
    :param output_file: str, 输出的词库文件
    """
    vocabulary = {}
    k=int(vocabulary_size)
    with open(input_file,'r', encoding='utf-8') as f:
         counter = 0
         for line in f:
            counter += 1
            tokens = [word for word in line.split()]
            for word in tokens:
                if word in vocabulary:
                   vocabulary[word] += 1
                else:
                   vocabulary[word] = 1
         vocabulary_list = START_VOCABULART + sorted(vocabulary, key=vocabulary.get, reverse=True)
          # 取前20000个常用汉字
         if len(vocabulary_list) > k:
            vocabulary_list = vocabulary_list[:k]
         with open(output_file, 'w', encoding='utf-8') as ff:
               for word in vocabulary_list:
                   ff.write(word + "\n")


def _convert_to_vector(input_file, vocabulary_file, output_file):
    """
    将语料数字向量化，并写入文件
    :param input_file: str, 语料文件
    :param vocabulary_file: str, 词库文件， 用作标记
    :param output_file: str, 输出的向量化文件
    """
    tmp_vocab = []
    with open(vocabulary_file, "rU", encoding='utf-8') as f:
        tmp_vocab.extend(f.readlines())
    tmp_vocab = [line.strip() for line in tmp_vocab]
    vocab = dict([(x, y) for (y, x) in enumerate(tmp_vocab)])
    output_f = open(output_file, 'w', encoding='utf-8')
    with open(input_file, 'rU', encoding='utf-8') as f:
        for line in f:
            line_vec = []
            for words in line.split():
                line_vec.append(vocab.get(words, UNK_ID))
            output_f.write(" ".join([str(num) for num in line_vec]) + "\n")
    output_f.close()


def prepare_data(props):
    """
    将 Encoder 和 Decoder 的语料文件转换为可训练的文件
    :param props: Properties, 环境参数对象
    :return: tuple(str), 输出文件路径
    """
    workspace = props.getProperties('Path.workspace')
    train_enc = props.getProperties('Path.train.train_enc')
    train_dec = props.getProperties('Path.train.train_dec')
    test_enc = props.getProperties('Path.test.test_enc')
    test_dec = props.getProperties('Path.test.test_dec')
    enc_vocab_size = int(props.getProperties('Attrs.enc_vocab_size'))
    dec_vocab_size = int(props.getProperties('Attrs.dec_vocab_size'))
    enc_vocab_path = os.path.join(workspace, 'vocab%d.enc' % enc_vocab_size)
    dec_vocab_path = os.path.join(workspace, 'vocab%d.dec' % dec_vocab_size)

    _create_vocabulary(train_enc, enc_vocab_size, enc_vocab_path)
    _create_vocabulary(train_dec, dec_vocab_size, dec_vocab_path)

    # Create token ids for the training data.
    enc_train_ids_path = train_enc + (".ids%d" % enc_vocab_size)
    dec_train_ids_path = train_dec + (".ids%d" % dec_vocab_size)
    _convert_to_vector(train_enc, enc_vocab_path, enc_train_ids_path)
    _convert_to_vector(train_dec, dec_vocab_path, dec_train_ids_path)

    # Create token ids for the development data.
    enc_dev_ids_path = test_enc + (".ids%d" % enc_vocab_size)
    dec_dev_ids_path = test_dec + (".ids%d" % dec_vocab_size)
    _convert_to_vector(test_enc, enc_vocab_path, enc_dev_ids_path)
    _convert_to_vector(test_dec, dec_vocab_path, dec_dev_ids_path)

    return (enc_train_ids_path, dec_train_ids_path, enc_dev_ids_path,
            dec_dev_ids_path, enc_vocab_path, dec_vocab_path)


def basic_tokenizer(sentence):
  """
  将一个语句中的字符切割成一个list，这样是为了下一步进行向量化训练
  :param sentence: str, 被切割的语句
  :return: list, 被切割的结果
  """
  words = []
  for space_separated_fragment in sentence.strip().split():
    words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
  return [w for w in words if w]


def sentence_to_token_ids(sentence, vocabulary, normalize_digits=True):
  """
  将语句转换为词库的数字向量
  :param sentence: str, 输入的语句
  :param vocabulary: dict, 词库
  :param normalize_digits:bool, 是否处理数字
  :return:语句数字向量化结果
  """
  words = basic_tokenizer(sentence)
  if not normalize_digits:
    return [vocabulary.get(str(w,'utf-8'), UNK_ID) for w in words]
  # Normalize digits by 0 before looking words up in the vocabulary.
  return [vocabulary.get(str(re.sub(_DIGIT_RE, b"0", w),'utf-8'), UNK_ID) for w in words]



def initialize_vocabulary(vocabulary_path):
  """
  初始化字库
  :param vocabulary_path: 词库文件路径
  :return:cell, 词库字典和词库向量
  """
  if gfile.Exists(vocabulary_path):
    rev_vocab = []
    with open(vocabulary_path, "r", encoding='utf-8') as f:
      rev_vocab.extend(f.readlines())
    rev_vocab = [line.strip() for line in rev_vocab]
    vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
    return vocab, rev_vocab
  else:
    raise ValueError("Vocabulary file %s not found.", vocabulary_path)