import numpy as np
from transformers import AutoTokenizer,AutoModelForSequenceClassification,DataCollatorForSeq2Seq,Seq2SeqTrainer,Seq2SeqTrainingArguments,AutoModelForSeq2SeqLM,T5Tokenizer
from datasets import Dataset
import jieba
import os
from rouge_chinese import Rouge

os.environ["SWANLAB_PROJECT"] = "Textsummarization"
os.environ["SWANLAB_WORKSPACE"] = "XYLCURRY30"

ds = Dataset.load_from_disk(r"D:\datasets\nlpcc_2017")
ds = ds.train_test_split(test_size=0.1)

print(ds['train'][0])

tokenizer = T5Tokenizer(
    vocab_file=r"D:\models\mengzi-t5-base\spiece.model",  # 直接指定分词器文件
    model_max_length=512,
    use_fast=False,  # 禁用快速分词器，彻底规避 tiktoken
    legacy=False  # 禁用旧版行为，避免格式兼容问题
)

def process_fun(examples):
    contens = ["摘要生成:\n" + e for e in examples['content']]
    inputs = tokenizer(contens,max_length=350,truncation=True)
    labels = tokenizer(text_target=examples['title'],max_length=64,truncation=True)
    inputs['labels'] = labels['input_ids']

    return inputs

tokenizer_ds = ds.map(process_fun,batched=True)
model = AutoModelForSeq2SeqLM.from_pretrained(r"D:\models\mengzi-t5-base")
rouge = Rouge()

def compute_metric(eval_pred):
    pre,labels = eval_pred
    # 使用tokenizer对预处理结果进行批量解码，转换为可读的文本格式
    # skip_special_tokens=True 表示跳过特殊token（如开始符、结束符等）
    decode_pre = tokenizer.batch_decode(pre,skip_special_tokens=True)
    # 将标签中值为-100的位置替换为tokenizer的pad_token_id，其他位置保持原值不变
    labels = np.where(labels != -100,labels,tokenizer.pad_token_id)
    decode_labels = tokenizer.batch_decode(labels,skip_special_tokens=True)

    # 将解码预测结果中的字符列表转换为字符串列表
    # 通过遍历decode_pre中的每个字符序列p，使用"".join(p)将字符列表拼接成完整字符串
    decode_pre = ["".join(p) for p in decode_pre]

    # 将解码标签中的字符列表转换为字符串列表
    # 通过遍历decode_labels中的每个字符序列l，使用"".join(l)将字符列表拼接成完整字符串
    decode_labels = ["".join(l) for l in decode_labels]

    # 计算ROUGE评估指标得分
    # 该函数调用rouge工具计算预测文本和标签文本之间的相似度得分
    # 返回包含rouge-1、rouge-2和rouge-l三种指标的字典
    scores = rouge.get_scores(decode_pre,decode_labels)

    # 构造并返回评估结果字典
    # 从scores中提取相应的F1得分作为评估指标
    return {
        "rouge-1":scores['rouge']['f'],
        "rouge-2":scores['rouge']['f'],
        "rouge-l":scores['rouge']['f']
    }

args = Seq2SeqTrainingArguments(
    output_dir=r"D:\Code\sshcode\HuggingFace\summary",
    per_device_train_batch_size=4,
    per_device_eval_batch_size=8,
    logging_steps=8,
    eval_strategy="epoch",
    save_strategy="epoch",
    metric_for_best_model="rouge-1",
    num_train_epochs=10,
    report_to="swanlab",
)

trainer = Seq2SeqTrainer(
    model=model,
    args=args,
    train_dataset=tokenizer_ds['train'],
    eval_dataset=tokenizer_ds['test'],
    data_collator=DataCollatorForSeq2Seq(tokenizer),
    compute_metrics=compute_metric #评估函数
)
if __name__ == '__main__':
    trainer.train()
