# transformers\generation\configuration_utils.py
import os
import platform
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from datasets import load_from_disk
from transformers import AdamW
from transformers import T5Tokenizer, T5ForConditionalGeneration
from logging_util import get_logger
from rouge import Rouge

device = 'cuda' if torch.cuda.is_available() else 'cpu'

# 获取当前操作系统的名称
os_name = platform.system()
logger = get_logger(model_name='mengzi-t5-base')

# 设置模型路径及数据集路径
if os_name == "Windows":
    model_dir = r'D:\python\models\langboat\meng_zi_t5'
    data_dir = r'C:\Users\23248\PycharmProjects\stance\StanceDetectionLab\lab1\data'
    logger.info("当前执行环境是 Windows...")
elif os_name == "Linux":
    model_dir = r'/root/autodl-fs/models/meng_zi_t5'
    data_dir = r'/root/autodl-fs/data/nlp_ai/nlp_seq2seq/nlpcc_2017'
    logger.info("当前执行环境是 Linux...")
else:
    raise ValueError("当前执行环境不是 Windows 也不是 Linux")

class GenerationMode(ExplicitEnum):
    """
    Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
    """
    # Non-beam methods
    CONTRASTIVE_SEARCH = "contrastive_search"
    GREEDY_SEARCH = "greedy_search"
    SAMPLE = "sample"
    ASSISTED_GENERATION = "assisted_generation"
    # Beam methods
    BEAM_SEARCH = "beam_search"
    BEAM_SAMPLE = "beam_sample"
    CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
    GROUP_BEAM_SEARCH = "group_beam_search"

class MengZiT5Model(nn.Module):
    def __init__(self):
        super().__init__()
        # 加载预训练模型
        self.model = T5ForConditionalGeneration.from_pretrained(model_dir)


    def forward(self, inputs, labels=None):
        # 1、encoder的input_ids和attention_mask
        input_ids = inputs['input_ids']
        attention_mask = inputs['attention_mask']

        if labels is not None:
            # 2、decoder 的labels
            train_labels = labels['input_ids'].contiguous()
            train_labels_mask = labels['attention_mask']

            # 3、decoder 的input_ids和attention_mask
            decoder_input_ids = train_labels.new_zeros(train_labels.shape)
            decoder_input_ids[..., 1:] = train_labels[..., :-1].clone()

            decoder_attention_mask = train_labels_mask.new_zeros(train_labels_mask.shape)
            decoder_attention_mask[..., 1:] = train_labels_mask[..., :-1].clone()
            decoder_attention_mask[..., 0] = 1
            # 4、送入模型进行预测
            outputs = self.model(input_ids=input_ids
                                 , attention_mask=attention_mask
                                 , decoder_input_ids=decoder_input_ids
                                 , decoder_attention_mask=decoder_attention_mask
                                 , labels=train_labels)
            # 5、返回训练时候的Loss值
            return outputs.loss
        else:
            # 模型生成
            summary_ids = self.model.generate(input_ids
                                              , num_beams=4 # 束搜索法
                                              , no_repeat_ngram_size=2 # 确保不重复
                                              , min_length=10 # 长度限制
                                              , max_length=64
                                              , early_stopping=True
            )
            # 将id转换为输出 summary_ids.shape = [bs, length]
            outputs = tokenizer.batch_decode(summary_ids, skip_special_tokens=True)
            return outputs

        # 模型训练
        def train(epochs, model, loader):
            model.to(device)
            lr = 2e-5
            # 训练
            optimizer = AdamW(model.parameters(), lr=lr)

            model.train()
            for epoch in range(epochs):
                for step, (inputs, labels) in enumerate(loader):
                    inputs = inputs.to(device)
                    labels = labels.to(device)
                    # 模型计算
                    # [b, lens] -> [b, lens, 8]
                    loss = model(inputs, labels)

                    # 梯度下降
                    loss.backward()
                    optimizer.step()
                    optimizer.zero_grad()

                    if step % 10 == 0:
                        print(f'epoch = {epoch}, step = {step}, loss = {loss:.4f}')

            os.makedirs('./output', exist_ok=True)
            torch.save(model, './output/meng_zi_t5_sft.pt')

        # 模型评估
        def test():
            # 1、加载模型
            model_load = torch.load('output/meng_zi_t5_sft.pt').to(device)
            model_load.eval()

            rouge = Rouge()

            # 2、加载测试集
            test_loader = DataLoader(dataset=Dataset('test'),
                                     batch_size=32,
                                     collate_fn=get_collate_fn(tokenizer=tokenizer),
                                     shuffle=False,
                                     drop_last=True)

            for step, (inputs, labels) in enumerate(test_loader):
                if step == 2:
                    break
                with torch.no_grad():
                    # [b, lens] -> [b, lens, 8]
                    decode_preds = model_load(inputs.to(device))
                    decode_labels = tokenizer.batch_decode(labels['input_ids'].to(device), skip_special_tokens=True)
                    decode_preds = [" ".join(p) for p in decode_preds]
                    decode_labels = [" ".join(l) for l in decode_labels]
                    scores = rouge.get_scores(decode_preds, decode_labels, avg=True)
                    r = {
                        "rouge-1": scores["rouge-1"]["f"],
                        "rouge-2": scores["rouge-2"]["f"],
                        "rouge-l": scores["rouge-l"]["f"],
                    }
                    logger.info(f'setp = {step}, 评估结果：\n{r}')
                    return r

        if __name__ == '__main__':
            # 1、加载分词器
            tokenizer = T5Tokenizer.from_pretrained(model_dir, legacy=False)

            # 2、加载训练数据
            train_loader = DataLoader(dataset=Dataset('train'),
                                      batch_size=16,
                                      collate_fn=get_collate_fn(tokenizer=tokenizer),
                                      shuffle=True,
                                      drop_last=True)

            # 3、创建模型
            model = MengZiT5Model()

            # 4、模型训练及评估
            train(epochs=1, model=model, loader=train_loader)

            test()  # 模型评估

            # 5、对模型进行预测
            text = """摘要生成: \n在经历了那段惊心动魄但又充满人情味的艰难时刻后，32岁的埃里克森时隔1100天再次为国征战欧洲杯，而且奉献了进球。
            丹麦队对垒斯洛文尼亚，这场热度并不算高的小组赛首轮争夺因为一个人的出现得到了外界的关注，他就是埃里克森。
            曼联中场在在第17分钟的进球帮助祖国球队取得了领先，他也在经历上届欧洲杯的心脏骤停的遭遇之后，实现了“王者归来”。
            尽管这次破门遗憾没能帮助丹麦队最终获得胜利，但绰号“爱神”的埃里克森依然得到了全场乃至全世界球迷的祝福。
            """
            inputs = tokenizer(text, return_tensors='pt')
            model_load = torch.load('output/meng_zi_t5_sft.pt')
            model_load.eval()
            print('摘要内容：\n', model_load(inputs))

