import evaluate
from datasets import load_dataset
from transformers import AutoTokenizer,AutoModelForTokenClassification,TrainingArguments,DataCollatorForTokenClassification,Trainer,EarlyStoppingCallback
import numpy as np
from transformers import pipeline

#加载数据集
ner_dataset = load_dataset("/root/lanyun-tmp/datasets/peoples-daily-ner")
# print(ner_dataset['train'][0])
label_list = ner_dataset['train'].features['ner_tags'].feature.names

#数据预处理
tokenizer = AutoTokenizer.from_pretrained("/root/lanyun-tmp/models/chinese-macbert-base")
tokenizer(ner_dataset['train'][0]['tokens'], is_split_into_words=True)
#若一个单词被拆分成多个token
#如果这些token来自于同一个单词，那么这些token的标签的id应该相同

#借助word_ids实现便签的映射
def process_function(examples):
    tokenized_examples = tokenizer(examples['tokens'],max_length=128
                                  ,truncation=True,is_split_into_words=True)
    labels = []
    for i,label in enumerate(examples['ner_tags']):
        word_ids = tokenized_examples.word_ids(batch_index=i)
        label_ids = []
        for word_id in word_ids:
            if word_id is None:
                #CLS,SEP不参与计算
                label_ids.append(-100) #cross entropy loss会忽略-100的标签
            else:
                label_ids.append(label[word_id])
        labels.append(label_ids)
    tokenized_examples["labels"] = labels
    return tokenized_examples

tokenized_datasets = ner_dataset.map(process_function,batched=True)
# print(tokenized_datasets)

#加载模型
model = AutoModelForTokenClassification.from_pretrained("/root/lanyun-tmp/models/chinese-macbert-base",num_labels=len(label_list))

#创建评估函数
seqeval = evaluate.load(r"/root/lanyun-tmp/HuggingFace/transformers/seqeval_metric.py")

def eval_metric(pred):
    predictions, labels = pred
    predictions = np.argmax(predictions,axis=-1)
    #将id转换为原始的字符串类型的标签
    true_predictions = [
        [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels)
    ]
    true_labels = [
        [label_list[l] for (p, l) in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels)
    ]

    result = seqeval.compute(predictions=true_predictions, references=true_labels,scheme='IOB2')
    return {"f1":result["overall_f1"]}

#配置训练参数
args = TrainingArguments(
    output_dir=r"/root/lanyun-tmp/HuggingFace/output",  #输出文件夹
    num_train_epochs=10,                                #训练轮数
    per_device_train_batch_size=256,                    #训练批次大小
    per_device_eval_batch_size=50,                      #验证批次大小
    save_total_limit=3,                                 #最大保存数量
    save_strategy="epoch",                              #保存策略
    eval_strategy="epoch",                              #评估策略
    metric_for_best_model="f1",                         #设定评估指标
    load_best_model_at_end=True,                        #保存最优模型(训练完成后加载最优模型)
    fp16=False,                                         #混合精度训练
    bf16=True,                                          #半精度训练
)

#创建训练器
trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    tokenizer=tokenizer,
    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer),
    compute_metrics=eval_metric,
    callbacks=[EarlyStoppingCallback(early_stopping_patience=5)]
)

#模型训练
# trainer.train()

#使用模型进行预测
model_path = r"/root/lanyun-tmp/HuggingFace/output/checkpoint-1476"
new_model = AutoModelForTokenClassification.from_pretrained(model_path)
#评估
eval_trainer = Trainer(
    model=new_model,  # 训练后的模型
    compute_metrics=eval_metric,  # 评估函数
    eval_dataset=tokenized_datasets["test"],  # 测试集
    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer),
)
# 执行评估（得到测试集 f1 分数）
eval_result = eval_trainer.evaluate(eval_dataset=tokenized_datasets['test'])
print(eval_result)

#使用pipeline进行预测
new_model.config.id2label = {idx:label for idx,label in enumerate(label_list)}
new_model.config.label2id = {label: idx for idx, label in enumerate(label_list)}
ner_pipe = pipeline(task="token-classification",model=new_model,
                    tokenizer=tokenizer,device=0,
                    aggregation_strategy="simple")
print(tokenized_datasets['test'][0]['tokens'])
res = ner_pipe(tokenized_datasets['test'][0]['tokens'])
print(res)