import os
import pickle
import logging
import evaluate
import numpy as np
from transformers import (
    Trainer,
    AutoModelForSequenceClassification,
    AutoTokenizer,
    TrainingArguments,
)
import warnings
import functools
from vllm import LLM, SamplingParams
from modelscope import snapshot_download
from datasets import load_dataset
from dataclasses import dataclass
from prompt import get_industry_trans_func

# from utils import deprecated

from setting import StaticValues
import pandas as pd
from datasets import concatenate_datasets, load_from_disk


def save_obj(obj, name):
    """
    将对象保存到文件
    :param obj: 要保存的对象
    :param name: 文件的名称（包括路径）
    """
    with open(name, "wb") as f:
        pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)


def load_obj(name):
    """
    从文件加载对象
    :param name: 文件的名称（包括路径）
    :return: 反序列化后的对象
    """
    with open(name, "rb") as f:
        return pickle.load(f)


def glm4_vllm(prompts, output_dir, temperature=0, max_tokens=1024):
    # GLM-4-9B-Chat-1M
    max_model_len, tp_size = 131072, 1
    model_dir = snapshot_download("ZhipuAI/glm-4-9b-chat")

    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    llm = LLM(
        model=model_dir,
        tensor_parallel_size=tp_size,
        max_model_len=max_model_len,
        trust_remote_code=True,
        enforce_eager=True,
    )
    stop_token_ids = [151329, 151336, 151338]
    sampling_params = SamplingParams(
        temperature=temperature, max_tokens=max_tokens, stop_token_ids=stop_token_ids
    )

    inputs = tokenizer.apply_chat_template(
        prompts, tokenize=False, add_generation_prompt=True
    )
    outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)

    save_obj(outputs, output_dir)


def deprecated(func):
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        warnings.warn(
            f"{func.__name__} is deprecated and will be removed in future versions.",
            category=DeprecationWarning,
            stacklevel=2,
        )
        return func(*args, **kwargs)

    return wrapper


def tokenize_func(tokenizer):
    def _tokenize_func(item):
        if "industry_info" not in item.keys():
            item = get_industry_trans_func("industry_info", "{industry_info}")(item)

        tokenized_inputs = tokenizer(
            item["industry_info"],
            max_length=512,
            truncation=True,
        )
        return tokenized_inputs

    return _tokenize_func


def export_human_csv(name, output_file):
    sv = StaticValues(name=name)
    kw_df = pd.read_csv(sv.KW_CSV, low_memory=False)
    llm_infer_dataset = load_from_disk(
        # "/home/jie/gitee/pku_industry/general/output/biomedical/bert_multi_train"
        sv.bert_config.bert_multi_train
    )["train"]

    if os.path.exists(fd := os.path.join(sv.home_folder, "bert_multi_pred.csv")):
        bert_pred_dataset = load_dataset(
            "csv",
            data_files=fd,
            split="train",
        )

        def trans_to_human(name):
            """
            把 bert 多分类的结果 转化为 人类可读的结果
            """
            sv = StaticValues(name=name)
            LABEL_NAME = sv.LABEL_NAME

            def func(item):
                for idx, label_name in enumerate(LABEL_NAME):
                    item[label_name] = 0
                    if idx == item["pred_label"]:
                        item[label_name] = 1
                return item

            return func

        bert_pred_dataset = bert_pred_dataset.map(
            trans_to_human(name=name),
            remove_columns=[
                "reason",
                "label",
                "industry_info",
                "input_ids",
                "token_type_ids",
                "attention_mask",
            ],
        )

        new_dataset = concatenate_datasets([llm_infer_dataset, bert_pred_dataset])
        output_columns = list(kw_df.columns) + sv.LABEL_NAME + ["pred_label"]
    else:
        new_dataset = llm_infer_dataset
        output_columns = list(kw_df.columns) + sv.LABEL_NAME

    new_df = new_dataset.to_pandas()

    df = pd.merge(
        new_df,
        kw_df,
        on=["企业名称", "经营范围", "一级行业分类", "二级行业分类", "三级行业分类"],
        how="inner",
    )

    df = df[output_columns]
    df.to_csv(output_file, index=False)


if __name__ == "__main__":
    # get_logger("test", "test.log")
    pass
