import evaluate
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification


from datasets import DatasetDict

# ner_datasets = DatasetDict.load_from_disk("ner_data")
# ner_datasets
from datasets import Dataset
import pandas as pd
import os


# data = pd.read_csv
# dataset = Dataset.from_pandas(data)
# dataset

# from make_ner_data import DataMaker
# data_maker = DataMaker()
# label_list = data_maker.get_labels()

# csv_dirs = ['tempdir/outputs/style_1', 'tempdir/outputs/style_2', 'tempdir/outputs/style_3']
# data_files = []
# for csv_dir_i in csv_dirs:
#     _files = os.listdir(csv_dir_i)
#     _pathes = [os.path.join(csv_dir_i, f) for f in _files]
#     data_files.extend(_pathes)

_train = ['tempdir/outputs/style_1/style_1__n_1000.csv', 'tempdir/outputs/style_2/style_2__n_1000.csv']
_test = ['tempdir/outputs/style_3/style_3__n_1000.csv']
_validation = ['tempdir/outputs/style_3/style_3__n_2000.csv']

_train = []
_test = []
_validation = []
for csv_dir_i in csv_dirs:
    _files = os.listdir(csv_dir_i)

    _pathes = [os.path.join(csv_dir_i, f).replace('\\', '/') for f in _files]
    _length = len(_pathes)

    split_train = _length // 10 * 7
    split_test = _length // 10 * 2
    split_validation = _length // 10 * 1

    _train.extend(_pathes[:split_train])
    _test.extend(_pathes[split_train: split_train + split_test])
    _validation.extend(_pathes[split_train + split_test: ])

data_files = {"train": _train, "test": _test, 'validation': _validation}

# from bdtime import show_json, show_ls
# show_json(data_files)

# data_files = [
#     "tempdir/outputs/style_1/style_1__n_1000.csv",
#     "tempdir/outputs/style_1/style_1__n_2000.csv",
# ]

# ner_datasets = load_dataset("csv", data_files=data_files, split="train")
# ner_datasets = ner_datasets.train_test_split(test_size=0.1)

ner_datasets = load_dataset("csv", data_files=data_files)

ner_datasets = ner_datasets.remove_columns(["Unnamed: 0"])
print(ner_datasets)


label_list = ['O',
              'B_cn_title', 'I_cn_title',
              'B_cn_authors', 'I_cn_authors',
              'B_cn_institutions', 'I_cn_institutions',
              'B_en_title', 'I_en_title',
              'B_en_authors', 'I_en_authors',
              'B_en_institutions', 'I_en_institutions']

# label_list = dataset["train"].features["ner_tags"].feature
# label_list = dataset["train"].features["ner_tags"].feature

print('label_list ---', label_list)

# ner_datasets
#
#
# ner_datasets["train"][0]
# label_i = eval(ner_datasets["train"][0]['label'])
#
# ner_datasets

proxies = {
    "http": "http://127.0.0.1:7890",
    "https": "http://127.0.0.1:7890",
}
local_files_only = False

tokenizer_name = "hfl/chinese-macbert-base"
model_name = "hfl/chinese-macbert-base"

save_dir = 'tempdir/ner_models/article'
if os.path.exists(save_dir):
    print('====== save dir is exists!')
    model_name = save_dir
    local_files_only = True
    proxies = None


tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, proxies=proxies, local_files_only=local_files_only)


tokenizer(ner_datasets["train"][0]["text"], is_split_into_words=False)   # 对于已经做好tokenize的数据，要指定is_split_into_words参数为True
tokenizer(ner_datasets["train"][0]["text"])   # 对于已经做好tokenize的数据，要指定is_split_into_words参数为True

# res = tokenizer("interesting word")
# res

# dataset = Dataset.from_csv("./ChnSentiCorp_htl_all.csv")
# dataset



# 借助word_ids 实现标签映射
def process_function(examples):
    # res = examples["text"][0][:50]
    # tt = "hello, 张三, how are you?"
    # res = tokenizer(tt)
    # res
    # res.word_ids()

    # examples["label"]
    # examples["text"]

    tokenized_exmaples = tokenizer(examples["text"], max_length=512, padding=True, truncation=True, is_split_into_words=False)
    labels = []
    for i, label in enumerate(examples["label"]):
        label = eval(label)
        word_ids = tokenized_exmaples.word_ids(batch_index=i)
        label_ids = []
        for word_id in word_ids:
            if word_id is None:
                label_ids.append(-100)
            else:
                label_ids.append(label[word_id])
        labels.append(label_ids)

    # type(label_ids)
    # type(labels)
    # [len(i) for i in examples["label"]]
    # [len(i) for i in labels]
    tokenized_exmaples["labels"] = labels
    return tokenized_exmaples


# In[ ]:


tokenized_datasets = ner_datasets.map(process_function, batched=True)
print(tokenized_datasets)

tokenized_datasets = tokenized_datasets.remove_columns(['text', 'label', 'label_span'])
# print(tokenized_datasets["train"][0])


model = AutoModelForTokenClassification.from_pretrained(model_name, num_labels=len(label_list), proxies=proxies, local_files_only=local_files_only)
model.config.num_labels

import torch
device = torch.device("cuda")
model.to(device)
# model = model.cuda()

seqeval = evaluate.load("seqeval_metric.py")
seqeval

import numpy as np


def eval_metric(pred):
    predictions, labels = pred
    predictions = np.argmax(predictions, axis=-1)

    # 将id转换为原始的字符串类型的标签
    true_predictions = [
        [label_list[p] for p, l in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels)
    ]

    true_labels = [
        [label_list[l] for p, l in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels)
    ]

    # result = seqeval.compute(predictions=true_predictions, references=true_labels, mode="strict", scheme="IOB2", zero_division=1)
    # result = seqeval.compute(predictions=true_predictions, references=true_labels, zero_division=1)
    # result = seqeval.compute(predictions=true_predictions, references=true_labels, mode="strict", zero_division=1)
    result = seqeval.compute(predictions=true_predictions, references=true_labels, mode="strict", zero_division=1)
    result

    return {
        "f1": result["overall_f1"]
    }


# per_device_train_batch_size = 64
per_device_train_batch_size = 32
# per_device_train_batch_size = 16
num_train_epochs = 12
logging_steps = 10
learning_rate = 5e-5

if os.path.exists(save_dir):
    learning_rate = 1e-5


args = TrainingArguments(
    per_device_train_batch_size=per_device_train_batch_size,  # 每个设备上的训练批次大小
    per_device_eval_batch_size=per_device_train_batch_size * 2,  # 每个设备上的评估批次大小
    logging_steps=logging_steps,
    num_train_epochs=num_train_epochs,

    learning_rate=learning_rate,
    weight_decay=0.01,  # 权重衰减系数

    output_dir="./checkpoints",
    save_strategy="epoch",
    evaluation_strategy="epoch",
    metric_for_best_model="f1",
    save_total_limit=3,  # 最多保存3个训练检查点
    load_best_model_at_end=True,
)

trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
    compute_metrics=eval_metric,
    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer)
)


import os
os.environ["WANDB_DISABLED"] = "true"


trainer.train()

trainer.save_model(save_dir)
print('======= end train')
