
# 导入相关的包
from transformers import  AutoTokenizer,AutoModelForTokenClassification,TrainingArguments,Trainer,DataCollatorForTokenClassification
import evaluate
from datasets import  load_dataset
import wandb
# 配置 WandB API 密钥
wandb.login(key="f8faa7a67ba54735a3e66f118d466640711b323d")

# 加载数据集,并指定好相关的下载目录集
print("开始加载数据集")
ner_datasets=load_dataset("peoples_daily_ner",cache_dir="data_cache/",trust_remote_code=True)
print("数据集加载成功")

print(ner_datasets["train"][0])
# 加载出来分类
label_list=ner_datasets["train"].features["ner_tags"].feature.names
print(label_list)

print("加载分词器")
# 加载分词器
tokenizer=AutoTokenizer.from_pretrained("hfl/chinese-macbert-base",cache_dir="model_cache/")
print("加载分词器完毕")
# 数据集预处理
def process_function(examples):
    tokenized_examples=tokenizer(examples["tokens"],max_length=128,truncation=True,is_split_into_words=True)
    labels=[]
    for i, label in enumerate(examples["ner_tags"]):
        word_ids=tokenized_examples.word_ids(batch_index=i)
        label_ids=[label[word_id] if word_id is not None else -100 for word_id in word_ids]
        labels.append(label_ids)
    tokenized_examples["labels"]=labels
    return tokenized_examples

# 处理数据
tokenized_datasets=ner_datasets.map(process_function,batched=True)
# 打印一下看一下
print(tokenized_datasets["train"][:3])

#  创建模型
model=AutoModelForTokenClassification.from_pretrained("hfl/chinese-macbert-base",num_labels=len(label_list))
print("创建模型结束")
# 加载评估函数
seqeval=evaluate.load("seqeval")
print("加载评估函数结束")
import numpy as np
def eval_metric(pred):
    predictions,labels=pred
    predictions=np.argmax(predictions,axis=-1)
    true_predictions=[
       [label_list[p] for p,l in zip(predictions,labels) if l!=-100]
    ]
    true_labels = [
        [label_list[l] for p, l in zip(predictions, labels) if l != -100]
    ]

    return seqeval.compute(predictions=true_predictions,references=true_labels,mode="strict",scheme="IOB2")

# 配置训练参数
args=TrainingArguments(
    output_dir="output",
    num_train_epochs=3,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=64,
    evaluation_strategy="epoch",
    save_strategy="epoch",
    metric_for_best_model="f1",
    warmup_steps=500,
    weight_decay=0.01,
)

# 创建Trainer
trainer=Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    compute_metrics=eval_metric,
    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer)
)

print("开始训练")
# 开始训练
trainer.train()

# 开始测试
trainer.evaluate()

# 开始推理
from transformers import pipeline

ner_pipe=pipeline("token-classification",model=model,tokenizer=tokenizer,device=0)


text="我在北京,北京天气很好"
print(ner_pipe(text))