#! -*- coding: utf-8 -*-
# 微调uer版T5做Seq2Seq任务
# 数据集：https://github.com/CLUEbenchmark/CLGE 中的CSL数据集

from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, seed_everything, ListDataset
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import json
from rouge import Rouge  # pip install rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction

# 基本参数
max_c_len = 64
max_t_len = 12
batch_size = 1
epochs = 50
steps_per_epoch = None

# bert配置
# config_path = 'D:/codes/nlp_about/pretrained_model/uer_t5-base-chinese-cluecorpussmall/config.json'
config_path = 'D:/codes/nlp_about/pretrained_model/uer_t5-base-chinese-cluecorpussmall/bert4torch_config.json'
checkpoint_path = 'D:/codes/nlp_about/pretrained_model/uer_t5-base-chinese-cluecorpussmall/pytorch_model.bin'
dict_path = 'D:/codes/nlp_about/pretrained_model/uer_t5-base-chinese-cluecorpussmall/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)


class MyDataset(ListDataset):
    @staticmethod
    def load_data(filename):
        """加载数据
        单条格式：(标题, 正文)
        """
        D = []
        with open(filename, encoding='utf-8') as f:
            for l in f:
                l = json.loads(l)
                title, content = l['title'], l['content']
                D.append((title, content))
        return D


# 加载并精简词表，建立分词器
token_dict, keep_tokens = load_vocab(
    dict_path=dict_path,
    simplified=True,
    startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)


def collate_fn(batch):
    """单条样本格式：content：[CLS]简称[SEP]  tgt: [CLS]电商商品名称[SEP]
    """
    batch_content_ids, batch_titile_ids = [], []
    for title, content in batch:
        token_ids, _ = tokenizer.encode(content, maxlen=max_c_len)
        batch_content_ids.append(token_ids)
        token_ids, _ = tokenizer.encode(title, maxlen=max_t_len)
        batch_titile_ids.append(token_ids)

    batch_content_ids = torch.tensor(sequence_padding(batch_content_ids), dtype=torch.long, device=device)
    batch_titile_ids = torch.tensor(sequence_padding(batch_titile_ids), dtype=torch.long, device=device)
    return [[batch_content_ids], [batch_titile_ids[:, :-1]]], batch_titile_ids[:, 1:].flatten()


# train_path = "D:/codes/nlp_about/sku-short-name-extractor/data/train.json"
train_path = "D:/codes/nlp_about/sku-short-name-extractor/data/test.json"

train_dataloader = DataLoader(MyDataset(train_path), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataset = MyDataset('D:/codes/nlp_about/sku-short-name-extractor/data/valid.json')
test_dataset = MyDataset('D:/codes/nlp_about/sku-short-name-extractor/data/test.json')

model = build_transformer_model(
    config_path,
    checkpoint_path,
    keep_tokens=keep_tokens,  # 只保留keep_tokens中的字，精简原字表
    add_trainer=True
).to(device)


class CrossEntropyLoss(nn.CrossEntropyLoss):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def forward(self, outputs, y_true):
        y_pred = outputs[-1]
        y_pred = y_pred.reshape(-1, y_pred.shape[-1])
        return super().forward(y_pred, y_true)


# model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-4))
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.SGD(model.parameters(), 1e-4, momentum=0.9))


class AutoTitle(AutoRegressiveDecoder):
    """seq2seq解码器
    """

    @AutoRegressiveDecoder.wraps(default_rtype='logits')
    def predict(self, inputs, output_ids, states):
        res = model.decoder.predict([output_ids] + inputs)
        return res[-1][:, -1, :] if isinstance(res, list) else res[:, -1, :]  # 保留最后一位

    def generate(self, text, topk=1):
        token_ids, _ = tokenizer.encode(text, maxlen=max_c_len)
        token_ids = torch.tensor([token_ids], device=device)
        encoder_output = model.encoder.predict([token_ids])
        output_ids = self.beam_search(encoder_output, topk=topk)[0]  # 基于beam search
        return tokenizer.decode(output_ids.cpu().numpy())


autotitle = AutoTitle(start_id=tokenizer._token_start_id, end_id=tokenizer._token_end_id, maxlen=max_t_len,
                      device=device)


class Evaluator(Callback):
    """评估与保存
    """

    def __init__(self):
        self.rouge = Rouge()
        self.smooth = SmoothingFunction().method1
        self.best_bleu = 0.

    def on_epoch_end(self, steps, epoch, logs=None):
        just_show()
        metrics = self.evaluate(valid_dataset.data)  # 评测模型
        metrics_test = self.evaluate(test_dataset.data)  # 评测模型
        if metrics['bleu'] > self.best_bleu:
            self.best_bleu = metrics['bleu']
            model.save_weights('./best_model.pt')  # 保存模型
        metrics['best_bleu'] = self.best_bleu
        print('valid_data:', metrics)
        print('test_data:', metrics_test)

    def evaluate(self, data, topk=1):
        total = 0
        rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
        for title, content in tqdm(data):
            total += 1
            title = ' '.join(title).lower()
            pred_title = ' '.join(autotitle.generate(content, topk)).lower()
            if pred_title.strip():
                scores = self.rouge.get_scores(hyps=pred_title, refs=title)
                rouge_1 += scores[0]['rouge-1']['f']
                rouge_2 += scores[0]['rouge-2']['f']
                rouge_l += scores[0]['rouge-l']['f']
                bleu += sentence_bleu(references=[title.split(' ')], hypothesis=pred_title.split(' '),
                                      smoothing_function=self.smooth)
        rouge_1, rouge_2, rouge_l, bleu = rouge_1 / total, rouge_2 / total, rouge_l / total, bleu / total
        return {'rouge-1': rouge_1, 'rouge-2': rouge_2, 'rouge-l': rouge_l, 'bleu': bleu}


def just_show():
    s1 = u'格力（GREE） KS-06S61Dg 6L 遥控式单冷型空调扇'
    s2 = u'华为智选HUAWEI KJ400F-C400 除甲醛 家用型空气净化器'
    for s in [s1, s2]:
        print(u'生成简称:', autotitle.generate(s))


if __name__ == '__main__':
    evaluator = Evaluator()
    print(u'生成简称:', autotitle.generate('夏普(Sharp) KI-BC608 除PM2.5 家用型空气净化器'))
    model.fit(
        train_dataloader,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=[evaluator],
        bar="tqdm"
    )
