import collections

from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, \
    TrainingArguments, Trainer
from transformers import BertForTokenClassification
from datasets import DatasetDict
from seqeval.metrics import f1_score
import numpy as np

ner_datasets = DatasetDict.load_from_disk("/data/datasets/ner_data")
label_list = ner_datasets['train'].features["ner_tags"].feature.names
tokenizer = AutoTokenizer.from_pretrained("/data/models/huggingface/chinese-macbert-base")
res = tokenizer(ner_datasets['train'][0:2]["tokens"], is_split_into_words=True, padding='max_length', max_length=64,
                truncation=True, return_offsets_mapping=True, return_tensors='pt')


def process_function(example):
    tokenizer_example = tokenizer(example["tokens"], max_length=128, truncation=True, is_split_into_words=True)
    labels = []
    # 有两个词的标注，因此要循环两次
    for i, label in enumerate(example["ner_tags"]):
        # 取其中一个句子的文本词id信息
        word_ids = tokenizer_example.word_ids(batch_index=i)
        label_ids = []
        for word_id in word_ids:
            if word_id is None:
                label_ids.append(-100)
            else:
                label_ids.append(label[word_id])
        labels.append(label_ids)
    tokenizer_example["labels"] = labels
    return tokenizer_example


tokenized_datasets = ner_datasets.map(process_function, batched=True)
examples = tokenized_datasets['train'][0:4]
# print(examples)

collator_examples = []
input_ids = [example for example in examples['input_ids']]
token_type_ids = [example for example in examples['token_type_ids']]
attention_mask = [example for example in examples['attention_mask']]
labels = [example for example in examples['labels']]

for i in range(len(input_ids)):
    collator_examples.append({
        "input_ids": input_ids[i],
        "token_type_ids": token_type_ids[i],
        "attention_mask": attention_mask[i],
        "labels": labels[i],
    })

data_collator = DataCollatorForTokenClassification(tokenizer)
collator_result = data_collator.torch_call(collator_examples)

# print(collator_examples,"collator_examples")
# print(collator_result,"collator_result")
# model = AutoModelForTokenClassification.from_pretrained(
#     "/data/models/huggingface/chinese-macbert-base",
#     num_labels=len(label_list))
# print(model.bert(collator_result))
# print(tokenized_datasets['train'],"tokenized_datasets['train']")
print(tokenized_datasets['train'][0:4])
#
# model = AutoModelForTokenClassification.from_pretrained(
#     "/data/models/huggingface/chinese-macbert-base",
#     num_labels=len(label_list))
#
#
# def evaluate_metrics(pred):
#     predictions, labels = pred
#     # 128个token 每一个都需要预测一个结果，以7个类别中最大的那个作为预测结果
#     predictions = np.argmax(predictions, axis=-1)
#
#     true_predictions = [
#         [label_list[p] for p, l in zip(prediction, label) if l != -100]
#         for prediction, label in zip(predictions, labels)
#     ]
#     true_labels = [
#         [label_list[l] for p, l in zip(prediction, label) if l != -100]
#         for prediction, label in zip(predictions, labels)
#     ]
#     f1 = f1_score(true_predictions, true_labels)
#     return {
#         "f1": f1
#     }
#
#
# training_args = TrainingArguments(
#     output_dir="/data/logs/2_2/results",  # output directory
#     num_train_epochs=3,  # total number of training epochs
#     per_device_train_batch_size=4,  # batch size per device during training
#     per_device_eval_batch_size=4,  # batch size for evaluation
#     warmup_steps=500,  # number of warmup steps for learning rate scheduler
#     # evaluation_strategy="epoch",
#     save_strategy="steps",
#     save_steps=400,
#     learning_rate=2e-5,
#     weight_decay=0.01,  # strength of weight decay
#     metric_for_best_model="f1",
#     load_best_model_at_end=True,
#     logging_steps=400,
#     eval_strategy="steps",
#     eval_steps=400,
# )
# from transformers import DefaultDataCollator
#
# trainer = Trainer(
#     model=model,
#     args=training_args,
#     train_dataset=tokenized_datasets['train'],
#     eval_dataset=tokenized_datasets['validation'],
#     compute_metrics=evaluate_metrics,
#     # data_collator=DefaultDataCollator(),
#     data_collator=DataCollatorForTokenClassification(tokenizer),
# )
# """
# from transformers import DefaultDataCollator
# trainer = Trainer(
#     model=model,
#     args=training_args,
#     train_dataset=tokenied_datasets['train'],
#     eval_dataset=tokenied_datasets['validation'],
#     data_collator=DefaultDataCollator()
# )
# """
#
# trainer.train()
# """
# # 将一个字典列表处理按照批次处理成更大的字典，字典的每一个key都包含相应数据的列表
# [
# {'input_ids': [101, 704, 1360, 697, 1054, 1469, 697, 1744, 7566, 2193, 782, 5307, 2382, 757, 6393, 8024, 2190, 1352, 6804, 1068, 5143, 4638, 1355, 2245, 6629, 1168, 749, 4916, 3353, 4638, 2972, 1220, 868, 4500, 511, 102],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# 'labels': [-100, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -100]},
# {'input_ids': [101, 2823, 1168, 7309, 7579, 4638, 4568, 5310, 8024, 2356, 5279, 1999, 1104, 2137, 2190, 1392, 5277, 7566, 2193, 4408, 2094, 1469, 7566, 2193, 2397, 6956, 6822, 6121, 2442, 3124, 3696, 712, 3844, 6397, 511, 102],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# 'labels': [-100, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -100]},
# {'input_ids': [101, 6821, 3221, 7676, 3949, 1726, 2495, 4862, 1744, 809, 3341, 8024, 7676, 3949, 860, 5509, 807, 6134, 7339, 1762, 1744, 7354, 3683, 6612, 704, 1357, 2533, 1946, 4277, 3297, 1914, 4638, 671, 3613, 511, 102],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# 'labels': [-100, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -100]},
# {'input_ids': [101, 6821, 819, 7270, 6809, 123, 121, 126, 7552, 8024, 1066, 1146, 128, 702, 6956, 1146, 4638, 6444, 3389, 2600, 5310, 2845, 1440, 2847, 7463, 749, 3805, 1744, 3124, 2424, 1469, 1744, 2157, 7213, 6121, 1728, 1104, 5032, 1927, 6428, 8024, 1220, 4500, 1920, 7030, 1912, 3726, 996, 1906, 7032, 8024, 924, 1310, 3805, 2355, 8024, 2530, 1227, 3187, 1216, 8024, 6863, 2768, 1744, 2157, 7028, 1920, 5307, 3845, 2938, 1927, 4638, 1079, 2391, 511, 102],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
# 'labels': [-100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 0, 0, 3, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -100]},
#
# {'input_ids': tensor([[ 101,  704, 1360,  ...,    0,    0,    0],
#         [ 101, 2823, 1168,  ...,    0,    0,    0],
#         [ 101, 6821, 3221,  ...,    0,    0,    0],
#         ...,
#         [ 101, 6821, 4905,  ...,    0,    0,    0],
#         [ 101, 2190, 2356,  ...,    0,    0,    0],
#         [ 101,  100, 2207,  ...,    0,    0,    0]]),
# 'token_type_ids': tensor([[0, 0, 0,  ..., 0, 0, 0],
#         [0, 0, 0,  ..., 0, 0, 0],
#         [0, 0, 0,  ..., 0, 0, 0],
#         ...,
#         [0, 0, 0,  ..., 0, 0, 0],
#         [0, 0, 0,  ..., 0, 0, 0],
#         [0, 0, 0,  ..., 0, 0, 0]]),
# 'attention_mask': tensor([[1, 1, 1,  ..., 0, 0, 0],
#         [1, 1, 1,  ..., 0, 0, 0],
#         [1, 1, 1,  ..., 0, 0, 0],
#         ...,
#         [1, 1, 1,  ..., 0, 0, 0],
#         [1, 1, 1,  ..., 0, 0, 0],
#         [1, 1, 1,  ..., 0, 0, 0]]),
# 'labels': tensor([[-100,    5,    5,  ..., -100, -100, -100],
#         [-100,    0,    0,  ..., -100, -100, -100],
#         [-100,    0,    0,  ..., -100, -100, -100],
#         ...,
#         [-100,    0,    0,  ..., -100, -100, -100],
#         [-100,    0,    0,  ..., -100, -100, -100],
#         [-100,    0,    0,  ..., -100, -100, -100]])}
# """
