import logging
import os
import sys
import random
from dataclasses import dataclass, field
from typing import Optional

import datasets
import torch
import numpy as np
from datasets import ClassLabel, load_dataset, load_metric
from datasets import load_from_disk

import transformers
from transformers import (
    AutoConfig,
    AutoModelForTokenClassification,
    AutoTokenizer,
    DataCollatorForTokenClassification,
    HfArgumentParser,
    PreTrainedTokenizerFast,
    Trainer,
    TrainingArguments,
    set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version

# Model
model_name_or_path = "../pretrained_models/bert-base-chinese/"
config_name = None
tokenizer_name = None
cache_dir = None
model_revision = "main"
use_auth_token = False

# Data
batch_size = 8
gradient_accumulation_steps = 64 // batch_size

task_name = "ner"
dataset_name = "../data/char_ner_dataset.py"
dataset_config_name = "train" # "test"
preprocessing_num_workers = 4
pad_to_max_length = False
max_train_samples = None
label_all_tokens = True
return_entity_level_metrics = True

set_seed(402)

names = ['B-GPE',
 'B-LOC',
 'B-ORG',
 'B-PER',
 'E-GPE',
 'E-LOC',
 'E-ORG',
 'E-PER',
 'M-GPE',
 'M-LOC',
 'M-ORG',
 'M-PER',
 'O',
 'S-GPE',
 'S-LOC',
 'S-ORG',
 'S-PER']


# 数据
def get_dataset(seed, test_size=0.2):
    raw_dataset = load_dataset(dataset_name, name=dataset_config_name)
    raw_dataset = raw_dataset["train"].train_test_split(test_size=0.2, seed=seed)  # 402， 1218， 1007

    return raw_dataset

# 模型
def get_model(seed):
    config = AutoConfig.from_pretrained(model_name_or_path)
    model = AutoModelForTokenClassification.from_pretrained(model_name_or_path, config=config)
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, config=config)
    model.resize_token_embeddings(len(tokenizer))

    raw_dataset = get_dataset(seed=seed)
    def tokenize_and_align_labels(examples):
        tokenized_inputs = tokenizer(examples["tokens"], is_split_into_words=True,
                                     padding=True, truncation=True)

        labels = []
        for i, label in enumerate(examples["labels"]):
            word_ids = tokenized_inputs.word_ids(batch_index=i)
            previous_word_idx = None
            label_ids = []
            for word_idx in word_ids:
                # Special tokens have a word id that is None. We set the label to -100 so they are automatically
                # ignored in the loss function.
                if word_idx is None:
                    label_ids.append(-100)
                # We set the label for the first token of each word.
                elif word_idx != previous_word_idx:
                    label_ids.append(label[word_idx])
                # For the other tokens in a word, we set the label to either the current label or -100, depending on
                # the label_all_tokens flag.
                else:
                    label_ids.append(label[word_idx] if label_all_tokens else -100)
                previous_word_idx = word_idx

            labels.append(label_ids)

        tokenized_inputs["labels"] = labels
        return tokenized_inputs
    tokenized_datasets = raw_dataset.map(tokenize_and_align_labels, batched=True)
    tokenized_datasets.set_format(type="torch", columns=['attention_mask', 'input_ids', 'labels', 'token_type_ids'])

    return tokenized_datasets, model, tokenizer


# 训练
def train_loop(seed):
    metric = load_metric("./seqeval.py")
    tokenized_datasets, model, tokenizer = get_model(seed)
    data_collator = DataCollatorForTokenClassification(tokenizer)

    def compute_metrics(p):
        predictions, labels = p
        predictions = np.argmax(predictions, axis=2)

        # Remove ignored index (special tokens)
        true_predictions = [
            [names[p] for (p, l) in zip(prediction, label) if l != -100]
            for prediction, label in zip(predictions, labels)
        ]
        true_labels = [
            [names[l] for (p, l) in zip(prediction, label) if l != -100]
            for prediction, label in zip(predictions, labels)
        ]

        results = metric.compute(predictions=true_predictions, references=true_labels)
        return {
            "precision": results["overall_precision"],
            "recall": results["overall_recall"],
            "f1": results["overall_f1"],
            "accuracy": results["overall_accuracy"],
        }

    args = TrainingArguments(
        "ner-3",
        do_train=True,
        do_eval=True,
        evaluation_strategy="epoch",
        per_device_train_batch_size=batch_size,
        per_device_eval_batch_size=batch_size,
        num_train_epochs=10,
        gradient_accumulation_steps=gradient_accumulation_steps,
        learning_rate=3e-5,
        weight_decay=5e-3,
        max_grad_norm=1.0,
        warmup_ratio=0.2,
        save_strategy="no",
        load_best_model_at_end=True,
        no_cuda=False,
        fp16=True,
        label_smoothing_factor=0.1,
        dataloader_pin_memory=True,
    )

    trainer = Trainer(
        model,
        args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["test"],
        tokenizer=tokenizer,
        compute_metrics=compute_metrics
    )

    trainer.train()

    model.save_pretrained("")



if __name__ == "__main__":
    pass