import os.path

import numpy as np
from datasets import load_dataset
from sklearn.metrics import f1_score
from transformers import TrainingArguments, Trainer, DataCollatorWithPadding, AutoModelForSequenceClassification, \
    AutoTokenizer, BertForMaskedLM
from models.bert import BertForSequenceClassification

do_mlm = True
do_train = False
do_infer = False if do_train else True

check_point = "Langboat/mengzi-bert-base"  # Langboat/mengzi-bert-base   hfl/chinese-macbert-base
data = None
if do_train:
    data = load_dataset("json",
                        data_files="./data/train.json")  # 958 ['id', 'title', 'assignee', 'abstract', 'label_id']
if do_infer:
    data = load_dataset("json", data_files="./data/testA.json")  # ['id', 'title', 'assignee', 'abstract']
tokenizer = AutoTokenizer.from_pretrained(check_point)
max_length = 512


def preprocess_function(examples):
    title = examples["title"]
    assignee = examples["assignee"]
    abstract = examples["abstract"]
    input_string = ["这份专利的标题为：《{title}》，由{assignee}公司申请，详细说明如下：{abstract}".format
                    (title=title, assignee=assignee, abstract=abstract) for title, assignee, abstract in
                    zip(title, assignee, abstract)]
    model_inputs = tokenizer(input_string, max_length=max_length, truncation=True, padding=True)
    # with tokenizer.as_target_tokenizer():
    #     labels = tokenizer(examples["tgt_txt"], max_length=max_length, truncation=True)
    if do_train:
        model_inputs["labels"] = examples["label_id"]
    return model_inputs


remove_columns = ["title", "id", "assignee", "abstract"]
# 数据处理
ds = data["train"].map(function=preprocess_function, batched=True,
                       remove_columns=remove_columns)
if do_train:
    ds = ds.train_test_split(test_size=0.2, seed=42)


def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    predictions = np.argmax(predictions, axis=-1)
    acc = f1_score(labels, predictions, average='macro') * 100
    return {"f1": acc}


# 总数据量 15000   2e-5 好
args = TrainingArguments(output_dir="./output", gradient_accumulation_steps=1, learning_rate=5e-5,
                         num_train_epochs=30, per_device_train_batch_size=16, weight_decay=0,
                         per_device_eval_batch_size=16, greater_is_better=True,
                         logging_steps=20, eval_steps=20, save_steps=20, label_smoothing_factor=0,
                         evaluation_strategy="steps", save_strategy="steps", warmup_ratio=0.1,
                         save_total_limit=2, load_best_model_at_end=True, metric_for_best_model="f1")

data_collator = DataCollatorWithPadding(tokenizer)
trainer = None
mlm_args = None
mlm_save_path = None
if do_train:
    if do_mlm:
        mlm_args = TrainingArguments(output_dir="./mlm_output", gradient_accumulation_steps=1, learning_rate=5e-5,
                                     num_train_epochs=10, per_device_train_batch_size=16, weight_decay=0,
                                     per_device_eval_batch_size=16, greater_is_better=False,
                                     logging_steps=20, eval_steps=20, save_steps=20, label_smoothing_factor=0,
                                     evaluation_strategy="epoch", save_strategy="epoch", warmup_ratio=0.1,
                                     save_total_limit=2, load_best_model_at_end=True)
        mlm_model = BertForMaskedLM.from_pretrained(check_point)
        mlm_trainer = Trainer(model=mlm_model, args=mlm_args, train_dataset=ds["train"], eval_dataset=ds["test"],
                              compute_metrics=compute_metrics, data_collator=data_collator)
        mlm_trainer.train()
        mlm_save_path = './best_mlm_ckpt'
        mlm_trainer.save_model(mlm_save_path)

    model = BertForSequenceClassification.from_pretrained(check_point, num_labels=36)
    # ========================================================
    from utils.tricks import build_optimizer

    args.attack = "fgm"
    optimizers = (None, None)
    # args.bert_learning_rate = 5e-5
    # optimizers = build_optimizer(args, model)
    # from utils.trainer import _Trainer

    trainer = Trainer(model=model, args=args, train_dataset=ds["train"], eval_dataset=ds["test"],
                      compute_metrics=compute_metrics, data_collator=data_collator)
    # ========================================================
    trainer.train(resume_from_checkpoint=mlm_save_path if os.path.exists(mlm_save_path) else None)
    trainer.save_model('./best_ckpt/4')
if do_infer:
    print("开始预测.....")
    path = './best_ckpt/4'
    # path = "./output/checkpoint-680"

    model = BertForSequenceClassification.from_pretrained(path)
    trainer = Trainer(model=model)
    result = trainer.predict(test_dataset=ds)
    predictions = np.argmax(result[0], axis=-1)
    df = data["train"].to_pandas()
    df["label"] = predictions
    df[['id', 'label']].to_csv('submit.csv', index=False)
