from datasets import load_dataset,load_from_disk
import os
from rich import print
import torch
from transformers import AutoTokenizer,AutoModelForSequenceClassification,AutoModelForTokenClassification,BertForTokenClassification,BertForSequenceClassification,Trainer,TrainingArguments,DataCollatorForTokenClassification,DataCollatorWithPadding
from torch.utils.data import Dataset,DataLoader
import evaluate
from datasets import load_dataset
import numpy as np
import pathlib

save_model_folder = pathlib.Path(__file__).parent.joinpath('models')

if os.getlogin() == 'caofei':
    data_folder = r'C:\Users\caofei\Desktop\desktop link\torch1\hgface\token_classification\ner_data'
    model_folder = r'D:\Models\chinese-macbert-base'
else:
    data_folder = r'C:\Users\COLORFUL\Desktop\AI_NLP\hgface\token_classification\ner_data\ner_data'
    data_folder = r'C:\Users\COLORFUL\Desktop\AI_NLP\hgface\2-token_classification\ner_data\ner_data'
    model_folder = r'C:\Users\COLORFUL\.cache\modelscope\hub\hfl\chinese-macbert-base'
ner_data = load_from_disk(data_folder)
print(ner_data)


# model = AutoModelForTokenClassification.from_pretrained(model_folder)
# model2 = AutoModelForSequenceClassification.from_pretrained(model_folder)
tokenizer = AutoTokenizer.from_pretrained(model_folder)


def map_function(data):
    # output = tokenizer(data['tokens'],max_length=128,truncation=True,is_split_into_words=True,return_tensors='pt')
    output = tokenizer(data['tokens'],max_length=128,truncation=True,is_split_into_words=True)
    # output['labels'] = data['ner_tags']
    labels = []
    # 需要拼凑起来
    tags = data["ner_tags"]
    for k in output.word_ids(): #[None,0,0,1,None]
        if k is None:
            labels.append(-100)
        else:
            labels.append(tags[k])
    output['labels'] = torch.Tensor(labels)
    return output

ner_data_tokiner = ner_data.map(map_function,remove_columns=ner_data.column_names['test'])
label_list=ner_data['train'].features['ner_tags'].feature.names


# ner_data_tokiner['train'] = ner_data_tokiner['train'].select(range(100))
# ner_data_tokiner['validation'] = ner_data_tokiner['validation'].select(range(100))
# ner_data_tokiner['test'] = ner_data_tokiner['test'].select(range(100))


# model = AutoModelForTokenClassification.from_pretrained(model_folder,num_labels=len(label_list))



model = AutoModelForTokenClassification.from_pretrained(model_folder,num_labels=len(label_list))

# d = ner_data_tokiner['train'][:1]
# res = model(input_ids=torch.Tensor(d['input_ids']).long(),token_type_ids=torch.Tensor(d['token_type_ids']).long(),attention_mask=torch.Tensor(d['input_ids']).long(),labels=torch.Tensor(d['labels']).long())
# print(res)




args = TrainingArguments(
    output_dir=save_model_folder.resolve().__str__(),
    per_device_train_batch_size = 128,
    per_device_eval_batch_size= 128,
    save_strategy="epoch",
    eval_strategy="epoch", # steps
    # eval_steps=2,
    metric_for_best_model="f1",
    logging_steps=10,
    num_train_epochs=3,
    save_total_limit=3
)



# 这里方便大家加载，替换成了本地的加载方式，无需额外下载


# 评估指标

# 精确率
precision = r"C:\Users\caofei\Desktop\evaluate-main\metrics\precision\precision.py"
if os.getlogin()!='caofei':
    precision=r"C:\Users\COLORFUL\Desktop\evaluate-main\metrics\precision\precision.py"
metric_precision = evaluate.load(precision)

# 召回率
recall = r"C:\Users\caofei\Desktop\evaluate-main\metrics\recall\recall.py"
if os.getlogin()!='caofei':
    recall = r"C:\Users\COLORFUL\Desktop\evaluate-main\metrics\recall\recall.py"
metric_recall = evaluate.load(recall)


# ner 相关指标
seqeval_path = r"C:\Users\caofei\Desktop\evaluate-main\metrics\seqeval\seqeval.py"
if os.getlogin()!='caofei':
    seqeval_path = r"C:\Users\COLORFUL\Desktop\evaluate-main\metrics\seqeval\seqeval.py"
metric_seqeval = evaluate.load(seqeval_path)
seqeval = metric_seqeval


def eval_metric(pred):
    predictions, labels = pred
    predictions = np.argmax(predictions, axis=-1)
    print('-----------predictions',predictions)
    print('-----------labels',labels)
    print(predictions.shape,labels.shape)
    
    # 将id转换为原始的字符串类型的标签
    true_predictions = [
        [label_list[p] for p, l in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels) 
    ]

    true_labels = [
        [label_list[l] for p, l in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels) 
    ]

    result = seqeval.compute(predictions=true_predictions, references=true_labels, mode="strict", scheme="IOB2")

    return {
        "f1": result["overall_f1"]
    }


trainer = Trainer(
    model=model,
    args=args,
    train_dataset=ner_data_tokiner['train'],
    eval_dataset=ner_data_tokiner['validation'],
    tokenizer = tokenizer,
    compute_metrics=eval_metric,
    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer),
    # metric_for_best_model="f1",
    # load_best_model_at_end=True,
)


trainer.train()




