import collections
import os
import sys
import logging
import itertools
import pickle

from . import cornell

UNKNOWN_TOKEN = '#UNK' # 未知字符
BEGIN_TOKEN = "#BEG" # 开始字符
END_TOKEN = "#END" # 结束字符
MAX_TOKENS = 20 # 应答对话的最大长度
MIN_TOKEN_FEQ = 10
SHUFFLE_SEED = 5871

EMB_DICT_NAME = "emb_dict.dat"
EMB_NAME = "emb.npy"

log = logging.getLogger("data")


def save_emb_dict(dir_name, emb_dict):
    '''
    将单词token_id字典保存到指定的目录中
    '''
    with open(os.path.join(dir_name, EMB_DICT_NAME), "wb") as fd:
        pickle.dump(emb_dict, fd)


def load_emb_dict(dir_name):
    with open(os.path.join(dir_name, EMB_DICT_NAME), "rb") as fd:
        return pickle.load(fd)


def encode_words(words, emb_dict):
    """
    Convert list of words into list of embeddings indices, adding our tokens
    将句子转换为id
    :param words: list of strings
    :param emb_dict: embeddings dictionary
    :return: list of IDs
    """
    res = [emb_dict[BEGIN_TOKEN]]
    unk_idx = emb_dict[UNKNOWN_TOKEN]
    for w in words:
        idx = emb_dict.get(w.lower(), unk_idx)
        res.append(idx)
    res.append(emb_dict[END_TOKEN])
    return res


def encode_phrase_pairs(phrase_pairs, emb_dict, filter_unknows=True):
    """
    Convert list of phrase pairs to training data
    将对白单词转换为token id，作为训练数据
    :param phrase_pairs: list of (phrase, phrase) 对话对
    :param emb_dict: embeddings dictionary (word -> id) 单词 -> token id
    :return: list of tuples ([input_id_seq], [output_id_seq])  返回转换为token id的对白结果集合
    """
    unk_token = emb_dict[UNKNOWN_TOKEN]
    result = []
    for p1, p2 in phrase_pairs:
        p = encode_words(p1, emb_dict), encode_words(p2, emb_dict)
        # 如果转换后的句子token中含有位置字符，则跳过本次对白训练数据
        if unk_token in p[0] or unk_token in p[1]:
            continue
        # 将转换后的对话保存到结果集中
        result.append(p)
    return result


def group_train_data(training_data):
    """
    Group training pairs by first phrase
    这个方法的作用是将对话重新进行分组，找到所有对话中上一句下相同的对话后，再将这些对话
    重新按照上一句进行分组，因为相同的上一句有不同的下一句进行回答
    :param training_data: list of (seq1, seq2) pairs
    :return: list of (seq1, [seq*]) pairs （上一句，[对话下一句列表]）
    """
    groups = collections.defaultdict(list)
    for p1, p2 in training_data:
        l = groups[tuple(p1)]
        l.append(p2)
    return list(groups.items())


def iterate_batches(data, batch_size):
    '''
    利用协程的方式，从data中提取本轮的训练数据
    '''
    assert isinstance(data, list)
    assert isinstance(batch_size, int)

    ofs = 0
    while True:
        batch = data[ofs*batch_size:(ofs+1)*batch_size]
        if len(batch) <= 1:
            break
        yield batch
        ofs += 1


def load_data(genre_filter, max_tokens=MAX_TOKENS, min_token_freq=MIN_TOKEN_FEQ):
    '''
    用于训练中加载训练数据

    genre_filter: 训练数据类型过滤器，如果为null则表示全部的对话类型都可以用于训练
    max_tokens: 最大句子长度 todo
    min_token_freq: 最小词频

    return: (phrase_pairs, phrase_dict)=(对话列表，单词token_id映射字典)
    '''
    dialogues = cornell.load_dialogues(genre_filter=genre_filter)
    if not dialogues:
        log.error("No dialogues found, exit!")
        sys.exit()
    log.info("Loaded %d dialogues with %d phrases, generating training pairs",
             len(dialogues), sum(map(len, dialogues)))
    # 拆分对话，得到对白对
    phrase_pairs = dialogues_to_pairs(dialogues, max_tokens=max_tokens)
    log.info("Counting freq of words...")
    # 统计词频
    word_counts = collections.Counter()
    for dial in dialogues:
        for p in dial:
            word_counts.update(p)
    # 根据统计的词频，过滤掉过小的单词
    freq_set = set(map(lambda p: p[0], filter(lambda p: p[1] >= min_token_freq, word_counts.items())))
    log.info("Data has %d uniq words, %d of them occur more than %d",
             len(word_counts), len(freq_set), min_token_freq)
    # 获取对话中出现的单词 token_id 映射字典
    phrase_dict = phrase_pairs_dict(phrase_pairs, freq_set)
    return phrase_pairs, phrase_dict


def phrase_pairs_dict(phrase_pairs, freq_set):
    """
    Return the dict of words in the dialogues mapped to their IDs
    这里主要是将对话中的出现的单词提取出来，组成单词 token_id 映射词典
    :param phrase_pairs: list of (phrase, phrase) pairs
    :return: dict
    """
    res = {UNKNOWN_TOKEN: 0, BEGIN_TOKEN: 1, END_TOKEN: 2}
    next_id = 3
    for p1, p2 in phrase_pairs:
        # 这句是将对话两句话转转为小写拼接成一句话，然后遍历每个单词
        for w in map(str.lower, itertools.chain(p1, p2)):
            if w not in res and w in freq_set:
                res[w] = next_id
                next_id += 1
    return res


def dialogues_to_pairs(dialogues, max_tokens=None):
    """
    Convert dialogues to training pairs of phrases
    将对话转换为可以训练的短语对 todo做了什么
    :param dialogues: [电影对话列表]
    :param max_tokens: limit of tokens in both question and reply 限制对话的长度
    :return: list of (phrase, phrase) pairs [（上一句，下一句）...]
    """
    result = []
    # 这里过滤对话的方式是如果上一句和下一句的长度小于max_tokens才加入result对话列表 或者 max_tokens为空，否则则不加入对话
    for dial in dialogues:
        prev_phrase = None
        for phrase in dial:
            if prev_phrase is not None:
                if max_tokens is None or (len(prev_phrase) <= max_tokens and len(phrase) <= max_tokens):
                    result.append((prev_phrase, phrase))
            prev_phrase = phrase
    return result


def decode_words(indices, rev_emb_dict):
    return [rev_emb_dict.get(idx, UNKNOWN_TOKEN) for idx in indices]


def trim_tokens_seq(tokens, end_token):
    res = []
    for t in tokens:
        res.append(t)
        if t == end_token:
            break
    return res


def split_train_test(data, train_ratio=0.95):
    '''
    将训练数据分割为训练数据和测试数据
    '''
    count = int(len(data) * train_ratio)
    return data[:count], data[count:]
