"""
@author: 石沙
@date: 2020-10-16
@content：本模块用于特征抽取
"""

import configs.settings as conf
from datasets import load_dic, load_cleaned_data, load_expanded_dic
import torch
from torch.utils.data import Dataset, DataLoader
from site_packages.utils.job import DataOp
from gensim.models.word2vec import Word2Vec
from itertools import zip_longest
import warnings
import random


dic = load_dic()
expanded_dic = load_expanded_dic()


def indices_from_sentences(sentences):
    """
    :param sentences: list of list，元素为已切分过的句子列表，e.g.:[['营销', '文案', '生成'], [['今天', '天气', '不错', '啊']]]
    :return: list of list，元素为已转化为词索引的句子列表，具体的词索引的值取决于字典，e.g.:[[2, 4, 67], [[79, 58, 11, 101]]]
    """
    return [[dic.SOS] + [dic[word] for word in s] + [dic.EOS] for s in sentences]


def expanded_indices_from_sentences(sentences):
    """
    :param sentences: list of list，元素为已切分过的句子列表，e.g.:[['营销', '文案', '生成'], [['今天', '天气', '不错', '啊']]]
    :return: list of list，元素为已转化为词索引的句子列表，具体的词索引的值取决于字典，e.g.:[[2, 4, 67], [[79, 58, 11, 101]]]
    """
    return [[dic.SOS] + [expanded_dic[word] for word in s] + [dic.EOS] for s in sentences]


def sentences_from_indices(indices_list, pointer=False):
    """
    :param indices_list: list of list，元素为已转化为词索引的句子列表，具体的词索引的值取决于字典，e.g.:[[2, 4, 67], [[79, 58, 11, 101]]]
    :return: list of list，元素为已切分过的句子列表，e.g.:[['营销', '文案', '生成'], [['今天', '天气', '不错', '啊']]]
    """
    if pointer:
        return [[expanded_dic[idx] for idx in indices] for indices in indices_list]
    else:
        return [[dic[idx] for idx in indices] for indices in indices_list]


def expanded_sentences_from_indices(indices_list):
    """
    :param sentences: list of list，元素为已切分过的句子列表，e.g.:[['营销', '文案', '生成'], [['今天', '天气', '不错', '啊']]]
    :return: list of list，元素为已转化为词索引的句子列表，具体的词索引的值取决于字典，e.g.:[[2, 4, 67], [[79, 58, 11, 101]]]
    """
    return [[expanded_dic[idx] for idx in indices] for indices in indices_list]


def zero_padding(indices_list, fillvalue=dic.PAD):
    """
    :param indices_list:  list of list，元素为已转化为词索引的句子列表，具体的词索引的值取决于字典，e.g.:[[2, 4, 67], [[79, 58, 11, 101]]]
    :param fillvalue: 使用字典的PAD索引进行长度填充，长度由最长的句子决定
    :return:
    """
    padded = list(zip_longest(*indices_list, fillvalue=fillvalue))
    return torch.tensor(padded).transpose(1, 0)


def gather_inputs(sentences, pointer=False):
    """收集seq2seq所需的输入变量数据"""
    indices_list = indices_from_sentences(sentences)
    input_lens = torch.tensor([len(indices) for indices in indices_list])
    inputs = zero_padding(indices_list)
    mask = torch.logical_not(inputs == dic['PAD']).short()
    mask = torch.tensor(mask, dtype=torch.bool)
    if pointer:
        expanded_indices_list = expanded_indices_from_sentences(sentences)
        expanded_inputs = zero_padding(expanded_indices_list)
        return inputs, input_lens, mask, expanded_inputs
    return inputs, input_lens, mask


def gather_targets(sentences, pointer=False):
    """收集seq2seq所需的目标变量数据"""
    indices_list = indices_from_sentences(sentences)
    target_lens = torch.tensor([len(indices) for indices in indices_list])
    targets = zero_padding(indices_list)
    mask = torch.logical_not(targets == dic['PAD']).short()
    mask = torch.tensor(mask, dtype=torch.bool)
    if pointer:
        expanded_indices_list = expanded_indices_from_sentences(sentences)
        expanded_target = zero_padding(expanded_indices_list)
        return targets, target_lens, mask, expanded_target
    return targets, target_lens, mask


def sort_inputs(input_, input_len, input_mask, expanded_input=None, target=None,
                target_len=None, target_mask=None, expanded_target=None, rec_ids=None, training=True, pointer=False):
    """
    因为在seq2seq模型的encoder部分，需要进行pack_padded_sequence的操作，该操作要求每个batch的输入的句子序列按照实际长度进行排序。
    为了提升gpu的使用率，必须减少一些在cpu中的转换操作，故而将每个batch的提前按实际长度进行排序。

    args:
        input_, input_len, input_mask, target, target_len, target_mask为每个批次所需的输入和目标变量信息
        rec_ids：如果是非训练状态，需要保持句子的序号与排序后的inputs等信息顺序一致，故而对rec_ids也需要进行排序
        is_training: 当is_training=True，既有输入信息又出目标信息；当is_training=False，即在提供模型服务或者测试状态时，
            仅向模型提供输入信息
    """
    _, sorted_idx = torch.sort(input_len, dim=0, descending=True)
    input_ = input_.index_select(0, sorted_idx)
    input_len = input_len[sorted_idx]
    input_mask = input_mask.index_select(0, sorted_idx)
    if pointer:
        expanded_input = expanded_input.index_select(0, sorted_idx)
    if training:
        target = target.index_select(0, sorted_idx)
        target_mask = target_mask.index_select(0, sorted_idx)
        target_len = target_len[sorted_idx]
        if pointer:
            expanded_target = expanded_target.index_select(0, sorted_idx)
            return input_, input_len, input_mask, expanded_input, target, target_len, target_mask, expanded_target
        return input_, input_len, input_mask, target, target_len, target_mask
    else:
        assert rec_ids is not None, '在非训练状态状态下，rec_ids不能为空'
        rec_ids = torch.tensor(rec_ids)
        rec_ids = rec_ids[sorted_idx]
        if pointer:
            return input_, input_len, input_mask, expanded_input, rec_ids
        else:
            return input_, input_len, input_mask, rec_ids


def train_torch_embedding(sentences, dic):
    """torch的Embedding，不再使用"""
    embedding_layer = torch.nn.Embedding(dic.num_words, conf.MODEL_CONF['hidden_size'], padding_idx=dic.PAD)
    embedding_layer(sentences)
    return embedding_layer.weight


def train_gensim_embedding(sentences, dic):
    """
    使用gensim，提前训练好词向量，可以让训练过程更加稳定，
    并且减少利用torch的Embedding训练的步骤，可提升GPU使用率

    args：
        sentences: list of list，元素为已切分过的句子列表，e.g.:[['营销', '文案', '生成'], [['今天', '天气', '不错', '啊']]]
        dic: 字典
    """
    # 训练并保存word2vec模型
    w2v = Word2Vec(
        sentences=sentences,
        iter=15,
        min_count=2,
        size=conf.MODEL_CONF['hidden_size']
    )
    DataOp.save(w2v, 'word2vec', is_model=True)

    def get_gensim_embedding(word):
        try:
            return w2v[word]
        except:
            return [random.random() - 0.5 for i in range(conf.MODEL_CONF['hidden_size'])]

    words = [dic[idx] for idx in range(dic.num_words)]
    embedding = list(map(get_gensim_embedding, words))
    embedding = torch.tensor(embedding)
    DataOp.save(embedding, 'gensim_embedding', is_model=False)


class MarketingData(Dataset):
    """
    训练中用于迭代的数据集
    """
    def __init__(self, pointer=False):
        self.inputs = DataOp.load_data('inputs')
        self.input_lens = DataOp.load_data('input_lens')
        self.input_mask = DataOp.load_data('input_mask')
        self.targets = DataOp.load_data('targets')
        self.target_lens = DataOp.load_data('target_lens')
        self.target_mask = DataOp.load_data('target_mask')
        self.pointer = pointer
        if pointer:
            self.expanded_inputs = DataOp.load_data('expanded_inputs')
            self.expanded_targets = DataOp.load_data('expanded_targets')

    def __len__(self):
        return self.inputs.shape[0]

    def __getitem__(self, index):
        if not self.pointer:
            return (
                self.inputs[index],
                self.input_lens[index],
                self.input_mask[index],
                self.targets[index],
                self.target_lens[index],
                self.target_mask[index]
            )
        else:
            return (
                self.inputs[index],
                self.input_lens[index],
                self.input_mask[index],
                self.expanded_inputs[index],
                self.targets[index],
                self.target_lens[index],
                self.target_mask[index],
                self.expanded_targets[index]
            )


def presort_all_inputs(pointer=False):
    """对数据按批次进行排序"""
    dataset = MarketingData(pointer=pointer)
    train_batches = DataLoader(dataset, batch_size=conf.MODEL_CONF['batch_size'], shuffle=False)
    inputs_list = []
    input_mask_list = []
    input_lens_list = []
    targets_list = []
    target_mask_list = []
    target_lens_list = []
    if not pointer:
        for iteration, (input_, input_len, input_mask, target, target_len, target_mask) in enumerate(train_batches):
            input_, input_len, input_mask, target, target_len, target_mask \
                = sort_inputs(input_, input_len, input_mask, target, target_len, target_mask)
            inputs_list.append(input_)
            input_lens_list.append(input_len)
            input_mask_list.append(input_mask)
            targets_list.append(target)
            target_lens_list.append(target_len)
            target_mask_list.append(target_mask)
    else:
        expanded_input_list = []
        expanded_target_list = []
        for iteration, (input_, input_len, input_mask, expanded_input, target, target_len, target_mask, expanded_target) in enumerate(train_batches):
            input_, input_len, input_mask, expanded_input, target, target_len, target_mask, expanded_target\
                = sort_inputs(input_, input_len, input_mask, expanded_input, target, target_len, target_mask, expanded_target, pointer=pointer)
            inputs_list.append(input_)
            input_lens_list.append(input_len)
            input_mask_list.append(input_mask)
            targets_list.append(target)
            target_lens_list.append(target_len)
            target_mask_list.append(target_mask)
            expanded_input_list.append(expanded_input)
            expanded_target_list.append(expanded_target)

    inputs = torch.cat(inputs_list, dim=0)
    input_lens = torch.cat(input_lens_list, dim=0)
    input_masks = torch.cat(input_mask_list, dim=0)
    targets = torch.cat(targets_list, dim=0)
    target_masks = torch.cat(target_mask_list, dim=0)
    target_lens = torch.cat(target_lens_list, dim=0)

    DataOp.save(inputs, 'inputs', is_model=False)
    DataOp.save(input_lens, 'input_lens', is_model=False)
    DataOp.save(input_masks, 'input_mask', is_model=False)
    DataOp.save(targets, 'targets', is_model=False)
    DataOp.save(target_lens, 'target_lens', is_model=False)
    DataOp.save(target_masks, 'target_mask', is_model=False)

    if pointer:
        expanded_inputs = torch.cat(expanded_input_list, dim=0)
        expanded_targets = torch.cat(expanded_target_list, dim=0)
        DataOp.save(expanded_inputs, 'expanded_inputs', is_model=False)
        DataOp.save(expanded_targets, 'expanded_targets', is_model=False)


def trim_by_max_len(df):
    """在模型评估阶段，对超过句子长度限制的输入进行截断"""
    over_input_max_len = sum(df['inputs'].apply(len) >= conf.INPUT_MAX_LEN) > 0

    if over_input_max_len:
        warnings.warn('''模型单词最大长度限制为{max_len}个单词，有输入长度超过限制，
        将按{max_len}个单词执行截断'''.format(max_len=conf.INPUT_MAX_LEN))

    df['inputs'] = df['inputs'].apply(lambda line: line[:conf.INPUT_MAX_LEN]).values
    return df


if __name__ == '__main__':
    # 加载数据
    df = load_cleaned_data()

    # 剔除长度大于最大长度限制的句子
    df['input_len'] = df['inputs'].apply(len)
    df['ref_len'] = df['reference'].apply(len)
    df = df[(df['input_len'] <= conf.INPUT_MAX_LEN) & (df['ref_len'] <= conf.TARGET_MAX_LEN)].head(20000)
    print(df.shape)

    pointer = conf.MODEL_CONF['pointer']
    # 收集并存储数据
    if not pointer:
        inputs, input_lens, input_mask = gather_inputs(df['inputs'].values.tolist())
        targets, target_lens, target_mask = gather_targets(df['reference'].values.tolist())
    else:
        inputs, input_lens, input_mask, expanded_inputs = gather_inputs(df['inputs'].values.tolist(), pointer=pointer)
        targets, target_lens, target_mask, expanded_target = gather_targets(df['reference'].values.tolist(), pointer=pointer)

    DataOp.save(inputs, 'inputs', is_model=False)
    DataOp.save(input_lens, 'input_lens', is_model=False)
    DataOp.save(input_mask, 'input_mask', is_model=False)
    DataOp.save(targets, 'targets', is_model=False)
    DataOp.save(target_lens, 'target_lens', is_model=False)
    DataOp.save(target_mask, 'target_mask', is_model=False)

    if pointer:
        DataOp.save(expanded_inputs, 'expanded_inputs', is_model=False)
        DataOp.save(expanded_target, 'expanded_targets', is_model=False)

    # 提前对批次进行排序，以加快训练速度
    presort_all_inputs(pointer=pointer)

    # 使用gensim生成word2vec模型
    # df = DataOp.load_data('cleaned_data')
    # inputs = df['inputs'].apply(lambda line: ['SOS'] + line + ['EOS']).values.tolist()
    # targets = df['reference'].apply(lambda line: ['SOS'] + line + ['EOS']).values.tolist()
    # sentences = inputs + targets
    # print(sentences[:2])
    # train_gensim_embedding(sentences, dic)

