from train import build_model
from preprocess import Preprocessor
from datasets import json_to_df
import configs.settings as conf
from features import gather_inputs, trim_by_max_len, sort_inputs, sentences_from_indices
import json
from site_packages.utils.job import DataOp
from torch import nn
from site_packages.ml_libs.nlp import metrics as mc


class Generator:
    """
    句子生成器

    args:
        beam_search: 当beam_search=False，使用greedy_search方法进行模型输出

    methods:
        preprocess: 执行json字符串的预处理
        cut_by_eos: 按照第一个eos进行截断，返回前面的句子
    """
    def __init__(self, beam_search=False, pointer=False, is_coverage=False):
        seq2seq = build_model(training=False)
        # 将模型切换至测试模式
        seq2seq.eval()
        # 内部设置在测试模式时，行为模式也有变化
        seq2seq.to_eval()
        self.seq2seq = seq2seq.to(conf.DEVICE)
        self.beam_search = beam_search
        self.pointer = pointer
        self.is_coverage = is_coverage

    def preprocess(self, json_dicts):
        df = json_to_df(json_dicts=json_dicts, return_df=True)
        cleaned_data = Preprocessor(df, is_training=False).run()
        cleaned_data = trim_by_max_len(cleaned_data)
        if self.pointer:
            inputs, input_lens, input_mask, expanded_inputs = gather_inputs(cleaned_data['inputs'].values.tolist(), pointer=self.pointer)
            inputs, input_lens, input_mask, expanded_inputs, rec_ids = sort_inputs(inputs, input_lens, input_mask, expanded_input=expanded_inputs,
                                                                  rec_ids=cleaned_data['rec_id'].values.tolist(),
                                                                  training=False, pointer=self.pointer)
        else:
            inputs, input_lens, input_mask = gather_inputs(cleaned_data['inputs'].values.tolist(), pointer=self.pointer)
            inputs, input_lens, input_mask, rec_ids = sort_inputs(inputs, input_lens, input_mask,
                                                                  rec_ids=cleaned_data['rec_id'].values.tolist(),
                                                                  training=False, pointer=self.pointer)

        inputs = inputs.to(conf.DEVICE)
        input_lens = input_lens.to(conf.DEVICE)
        input_mask = input_mask.to(conf.DEVICE)
        if self.pointer:
            expanded_inputs = expanded_inputs.to(conf.DEVICE)
            return inputs, input_lens, input_mask, expanded_inputs, rec_ids
        else:
            return inputs, input_lens, input_mask, rec_ids

    def cut_by_eos(self, sentences):
        new_sentences = []
        for s in sentences:
            if 'EOS' in s:
                index = s.index('EOS')
            else:
                index = len(s)
            new_sentences.append(s[:index + 1])
        return new_sentences

    def __call__(self, json_dicts):
        if self.pointer:
            inputs, input_lens, input_mask, expanded_inputs, rec_ids = self.preprocess(json_dicts)
        else:
            inputs, input_lens, input_mask, rec_ids = self.preprocess(json_dicts)
            expanded_inputs = None

        if self.beam_search:
            indices = self.seq2seq(inputs, input_lens, input_mask, beam_search=True, top_k=2, expanded_inputs=expanded_inputs)
        else:
            indices = self.seq2seq(inputs, input_lens, input_mask, expanded_inputs=expanded_inputs)

        results = sentences_from_indices(indices.squeeze().cpu().detach().numpy(), pointer=self.pointer)
        results = self.cut_by_eos(results)
        references = sentences_from_indices(inputs.cpu().detach().numpy())
        return rec_ids, results, references


if __name__ == '__main__':
    # 读取测试数据
    with open(conf.DATA_PATH + '/服饰数据.json', 'r', encoding='utf-8') as f:
        json_dicts = json.load(f)

    # 进行句子生成
    rec_ids = [1, 2, 3, 4, 5]
    raw_inputs = {rec_id: json_dicts[str(rec_id)] for rec_id in rec_ids}
    generator = Generator(beam_search=True, pointer=conf.MODEL_CONF['pointer'], is_coverage=conf.MODEL_CONF['is_coverage'])
    rec_ids, results, references = generator(raw_inputs)
    print(results)
    # 进行评估
    rouge_n = mc.RougeN(n_gram=1)
    print(rouge_n(results, references))
    rouge_l = mc.RougeL(beta=1.2)
    print(rouge_l(results, references))
    rouge_s = mc.RougeS(n_gram=2, n_skip=2)
    print(rouge_s(results, references))
