import os
import evaluate
from transformers import (
    Trainer,
    AutoModelForSequenceClassification,
    AutoTokenizer,
    TrainingArguments,
)
import numpy as np
from dataclasses import dataclass
from datasets import load_dataset, load_from_disk, concatenate_datasets
from collections import Counter

from utils import save_obj
from prompt import get_industry_trans_func
from setting import StaticValues


@dataclass
class BertCLS:
    def __init__(
        self,
        model,
        tokenizer,
        train_dataset=None,
        eval_dataset=None,
        output_dir="output",
    ):
        self.model = model
        self.tokenizer = tokenizer
        self.train_dataset = train_dataset
        self.eval_dataset = eval_dataset

        from transformers import DataCollatorWithPadding

        self.data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

        self.args = self.get_args(output_dir)

        self.trainer = Trainer(
            model=self.model,
            args=self.args,
            train_dataset=self.train_dataset,
            eval_dataset=self.eval_dataset,
            data_collator=self.data_collator,
            # compute_metrics=compute_metrics,
            tokenizer=tokenizer,
        )

    def get_args(self, output_dir, epoch=3):
        per_device_train_batch_size = 16
        per_device_eval_batch_size = 128
        if self.eval_dataset:
            args = TrainingArguments(
                output_dir=output_dir,
                evaluation_strategy="epoch",
                save_strategy="epoch",
                save_total_limit=3,
                learning_rate=2e-5,
                num_train_epochs=epoch,
                weight_decay=0.01,
                per_device_train_batch_size=per_device_train_batch_size,
                per_device_eval_batch_size=per_device_eval_batch_size,
                # logging_steps=16,
                save_safetensors=True,
                overwrite_output_dir=True,
                load_best_model_at_end=True,
            )
        else:
            args = TrainingArguments(
                output_dir=output_dir,
                evaluation_strategy="no",
                save_strategy="epoch",
                save_total_limit=3,
                learning_rate=2e-5,
                num_train_epochs=epoch,
                weight_decay=0.01,
                per_device_train_batch_size=per_device_train_batch_size,
                per_device_eval_batch_size=per_device_eval_batch_size,
                # logging_steps=16,
                save_safetensors=True,
                overwrite_output_dir=True,
                # load_best_model_at_end=True,
            )
        return args

    def set_args(self, args):
        """
        从外部重新设置 TrainingArguments，args 更新后，trainer也进行更新
        """
        self.args = args

        self.trainer = Trainer(
            model=self.model,
            args=self.args,
            train_dataset=self.train_dataset,
            eval_dataset=self.eval_dataset,
            data_collator=self.data_collator,
            # compute_metrics=compute_metrics,
            tokenizer=self.tokenizer,
        )

    def train(self, epoch=3, over_write=False):
        self.args.num_train_epochs = epoch
        best_model_path = os.path.join(self.args.output_dir, "best_model")

        if over_write or not os.path.exists(best_model_path):
            self.trainer.train()
            self.trainer.save_model(best_model_path)
        else:
            print(
                f"预训练权重 {best_model_path} 已存在，且over_write={over_write}。不启动模型训练！"
            )

    def eval(self, eval_dataset):
        predictions = self.trainer.predict(eval_dataset)
        preds = np.argmax(predictions.predictions, axis=-1)
        metric = evaluate.load("glue", "mrpc")
        return metric.compute(predictions=preds, references=predictions.label_ids)

    def predict(self, pred_dataset):
        predictions = self.trainer.predict(pred_dataset)
        preds = np.argmax(predictions.predictions, axis=-1)
        # return pred_dataset.add_column("pred", preds)
        return preds


def bert_binary(name, epoch=3):
    sv = StaticValues(name=name)
    logger = sv.logger

    best_model = os.path.join(sv.bert_config.output_binary_dir, "best_model")
    model_name = "bert-base-chinese"
    if os.path.exists(best_model):
        model = AutoModelForSequenceClassification.from_pretrained(best_model)
    else:
        model = AutoModelForSequenceClassification.from_pretrained(model_name)

    tokenizer = AutoTokenizer.from_pretrained(model_name)
    dataset = load_from_disk(sv.bert_config.bert_binary_train)

    def _tokenize_func(item):
        tokenized_inputs = tokenizer(
            item["industry_info"],
            max_length=512,
            truncation=True,
        )
        return tokenized_inputs

    tokenized_datasets = dataset.map(
        _tokenize_func,
        batched=True,
    )

    bert_cls = BertCLS(
        model=model,
        tokenizer=tokenizer,
        train_dataset=tokenized_datasets,
        output_dir=sv.bert_config.output_binary_dir,
        epoch=epoch,
    )

    bert_cls.train()

    ## predict
    industry_dataset = load_from_disk(sv.industry_dataset)
    pred_dataset = industry_dataset["test"].map(
        get_industry_trans_func("industry_info", "{industry_info}")
    )
    pred_tokenized_datasets = pred_dataset.map(
        _tokenize_func,
        batched=True,
    )

    pred_label = bert_cls.pred(pred_tokenized_datasets)

    industry_dataset["test"].add_column("label", pred_label)
    industry_dataset.save_to_disk(sv.industry_dataset)


def bert_tokenize_func(tokenizer):

    def _func(item):
        # "bert-base-chinese"
        tokenized_inputs = tokenizer(
            item["industry_info"],
            max_length=512,
            truncation=True,
        )
        return tokenized_inputs

    return _func


"""
def bert_cls_trans(name, bert_multi_dataset):
    # 把多标签分类的数据集转成多类别分类数据集
    import numpy as np

    sv = StaticValues(name=name)
    LABEL_NAME = sv.LABEL_NAME
    # bert_multi_dataset = load_from_disk(sv.bert_config.bert_multi_train)
    tokenizer = AutoTokenizer.from_pretrained(sv.bert_config.model_name)

    def bert_trans_train_dataset(item):
        if "industry_info" not in item.keys():
            item = get_industry_trans_func("industry_info", "{industry_info}")(item)
        label = 0
        for k in LABEL_NAME:
            label += item[k]
        if label == 1:
            for idx, k in enumerate(LABEL_NAME):
                if item[k] == 1:
                    label = idx
        else:
            label = -1
        item["label"] = label

        tokenized_inputs = tokenizer(
            item["industry_info"],
            max_length=512,
            truncation=True,
        )
        return tokenized_inputs

    new_dataset = bert_multi_dataset.map(bert_trans_train_dataset)

    idx = np.array(new_dataset["label"]) >= 0
    data = np.array(new_dataset["label"])[idx]
    min_len = min(Counter(data).values())

    def cut_dataset(_dataset, min_len):
        if len(_dataset) > 2 * min_len:
            return _dataset.train_test_split(train_size=2 * min_len)["train"]
        return _dataset

    dataset_d = {
        k: cut_dataset(new_dataset.filter(lambda item: item["label"] == k), min_len)
        for k in range(len(LABEL_NAME))
    }
    return concatenate_datasets(dataset_d.values())
"""


def bert_cls_trans(name, bert_multi_dataset):
    """
    把多标签分类的数据集转成多类别分类数据集
    """
    import numpy as np
    from collections import Counter

    sv = StaticValues(name=name)
    logger = sv.logger
    LABEL_NAME = sv.LABEL_NAME
    # bert_multi_dataset = load_from_disk(sv.bert_config.bert_multi_train)
    tokenizer = AutoTokenizer.from_pretrained(sv.bert_config.model_name)

    def bert_trans_train_dataset(item):
        if "industry_info" not in item.keys():
            item = get_industry_trans_func("industry_info", "{industry_info}")(item)
        label = 0
        for k in LABEL_NAME:
            label += item[k]
        if label == 1:
            for idx, k in enumerate(LABEL_NAME):
                if item[k] == 1:
                    label = idx
        else:
            label = -1
        item["label"] = label

        tokenized_inputs = tokenizer(
            item["industry_info"],
            max_length=512,
            truncation=True,
        )
        return tokenized_inputs

    new_dataset = bert_multi_dataset.map(bert_trans_train_dataset)

    idx = np.array(new_dataset["label"]) >= 0
    data = np.array(new_dataset["label"])[idx]
    min_len = min(Counter(data).values())

    def cut_dataset(_dataset, min_len):
        length = min(max(3000, 2 * min_len), len(_dataset) - 1)
        return _dataset.train_test_split(train_size=length)["train"]

    dataset_d = {
        k: cut_dataset(new_dataset.filter(lambda item: item["label"] == k), min_len)
        for k in range(len(LABEL_NAME))
    }

    res_dataset = concatenate_datasets(dataset_d.values())

    logger.info(f"{Counter(res_dataset['label'])}")
    return res_dataset


if __name__ == "__main__":
    sv = StaticValues(name="machine")
    # logger = sv.logger

    # bert_binary(name="biomedical", epoch=1)
    # nohup python bert_train.py > bert_train.log 2>&1 &

    # industry_dataset = load_from_disk(sv.industry_dataset)
    # print(industry_dataset['test'])
    # print(industry_dataset['test'][0])
    model_name = "bert-base-chinese"

    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForSequenceClassification.from_pretrained(model_name)
    # def _tokenize_func(item):
    #     tokenized_inputs = tokenizer(
    #         item["industry_info"],
    #         max_length=512,
    #         truncation=True,
    #     )
    #     return tokenized_inputs

    # pred_tokenized_datasets = pred_dataset.map(
    #     _tokenize_func,
    #     batched=True,
    # )

    bert_dataset = load_from_disk(sv.bert_config.bert_binary_train)

    print(bert_dataset)
