import os
import pandas as pd

from datasets import load_dataset, Dataset, load_from_disk, dataset_dict
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
)

from utils import glm4_vllm
from kw_filter import kw_cls
from industry_binary_cls import (
    run_llm_binary_cls,
    build_binary_dataset,
    merge_binary_dataset,
)
from setting import address, StaticValues
from prompt import get_industry_trans_func
from bert_train import BertCLS, bert_cls_trans, bert_tokenize_func
from utils import tokenize_func, export_human_csv
from industry_multi_cls import run_multi_cls, trans_multi_cls_dataset

import wandb

call_vllm = glm4_vllm
folder = "industries"
llm_binary_threshold = int(1e5)
llm_multi_threshold = int(5e4)


def run(name):
    sv = StaticValues(name)
    logger = sv.logger

    # 关键词筛选企业数据
    kw_cls(name)

    """
        industry_dataset：
            * train 是大模型二分类后的数据；
            * test 是尚未分类的数据；
    """
    if not os.path.exists(sv.industry_dataset):
        col_name = [
            "企业名称",
            "经营范围",
            "一级行业分类",
            "二级行业分类",
            "三级行业分类",
        ]
        df = pd.read_csv(sv.KW_CSV, usecols=col_name, low_memory=False)
        industry_dataset = Dataset.from_pandas(df)
        # split
        # 二分类随机切出10万条数据，用于训练

        if len(industry_dataset) > llm_binary_threshold:
            industry_dataset = industry_dataset.train_test_split(
                # train_size=min(int(1e5), len(industry_dataset) - 1)
                train_size=llm_binary_threshold
            )
        else:
            _industry_dataset = dataset_dict.DatasetDict()
            _industry_dataset["train"] = industry_dataset
            industry_dataset = _industry_dataset

        # 把训练集的数据给大模型去推理，然后再存到train属性中
        industry_dataset["train"] = run_llm_binary_cls(
            name, industry_dataset["train"], call_vllm
        )
        # test 还是原始的表格数据
        industry_dataset.save_to_disk(sv.industry_dataset)
    else:
        logger.info(
            f"{sv.industry_dataset} 已存在，跳过大模型筛选{sv.chinese_name}企业。"
        )
        industry_dataset = load_from_disk(sv.industry_dataset)

    # bert 二分类数据集
    if not os.path.exists(sv.bert_config.bert_binary_train):
        bert_binary_dataset = build_binary_dataset(name, industry_dataset["train"])
        bert_binary_dataset.save_to_disk(sv.bert_config.bert_binary_train)
    else:
        bert_binary_dataset = load_from_disk(sv.bert_config.bert_binary_train)
        logger.info(
            f"{sv.bert_config.bert_binary_train}, bert 企业二分类数据集已存在，不再重新生成"
        )

    if len(industry_dataset) == llm_binary_threshold:
        # LLM 已经能够全部预测，无需BERT训练和预测

        best_model = os.path.join(sv.bert_config.output_binary_dir, "best_model")
        model_name = "bert-base-chinese"

        ## bert 二分类训练
        if not os.path.exists(best_model):
            logger.info(f"开始训练 {sv.chinese_name} 二分类模型")
            model = AutoModelForSequenceClassification.from_pretrained(model_name)
            tokenizer = AutoTokenizer.from_pretrained(model_name)
            bert_binary_tokenize_dataset = bert_binary_dataset.map(
                tokenize_func(tokenizer=tokenizer), batched=True
            )

            bert_cls = BertCLS(
                model,
                tokenizer,
                bert_binary_tokenize_dataset,
                output_dir=sv.bert_config.output_binary_dir,
            )
            bert_cls.train(5)

        else:
            logger.info(
                f"{sv.bert_config.output_binary_dir}, 二分类模型已存在，不再重新训练"
            )
            model = AutoModelForSequenceClassification.from_pretrained(best_model)
            tokenizer = AutoTokenizer.from_pretrained(model_name)
            bert_cls = BertCLS(
                model,
                tokenizer,
            )

            logger.info(
                f"{sv.bert_config.output_binary_dir} 已存在，跳过bert二分类训练{sv.chinese_name}企业。"
            )

        pred_binary_csv = os.path.join(sv.home_folder, "bert_binary_pred.csv")
        if not os.path.exists(pred_binary_csv):
            pred_dataset = load_from_disk(sv.industry_dataset)["test"]
            pred_tokenize_dataset = pred_dataset.map(tokenize_func(tokenizer=tokenizer))
            label_ids = bert_cls.predict(pred_tokenize_dataset)
            pred_dataset = pred_dataset.add_column("binary_label", label_ids)
            pred_dataset.to_csv(pred_binary_csv)
        logger.info(f"{sv.chinese_name} 企业数据二分类已经预测完毕！")

        wandb.finish()

    if not os.path.exists(sv.bert_config.bert_multi_train):
        logger.info("开始使用大模型预测多分类")

        # 合并 原始LLM推理 和 BERT推理的正样本数据
        bert_multi_dataset = merge_binary_dataset(name)
        # TODO： 如果多分类数据条数不够5万条呢？
        # 目前会为那一条数据单独训练BERT模型
        if len(bert_multi_dataset) > llm_multi_threshold:
            bert_multi_dataset = bert_multi_dataset.train_test_split(
                # train_size=min(50000, len(bert_multi_dataset) - 1)
                train_size=llm_multi_threshold
            )
        else:
            _bert_multi_dataset = dataset_dict.DatasetDict()
            _bert_multi_dataset["train"] = bert_multi_dataset
            bert_multi_dataset = _bert_multi_dataset

        bert_multi_dataset["train"] = run_multi_cls(
            name, bert_multi_dataset["train"], call_vllm
        )
        bert_multi_dataset["train"] = bert_multi_dataset["train"].map(
            trans_multi_cls_dataset(name)
        )
        bert_multi_dataset.save_to_disk(sv.bert_config.bert_multi_train)
    else:
        bert_multi_dataset = load_from_disk(sv.bert_config.bert_multi_train)

    ## bert

    if len(bert_multi_dataset) == llm_multi_threshold:
        # 说明全部可以通过大模型进行推理
        logger.info("开始进入bert训练多分类")
        new_dataset = bert_cls_trans(name, bert_multi_dataset["train"])

        multi_best_model = os.path.join(sv.bert_config.output_multi_dir, "best_model")
        if not os.path.exists(multi_best_model):
            model = AutoModelForSequenceClassification.from_pretrained(
                sv.bert_config.model_name, num_labels=len(sv.LABEL_NAME)
            )
            tokenizer = AutoTokenizer.from_pretrained(sv.bert_config.model_name)
            bert_cls = BertCLS(
                model=model,
                tokenizer=tokenizer,
                train_dataset=new_dataset,
                output_dir=sv.bert_config.output_multi_dir,
            )
            bert_cls.train(5)
        else:
            model = AutoModelForSequenceClassification.from_pretrained(
                multi_best_model, num_labels=len(sv.LABEL_NAME)
            )
            tokenizer = AutoTokenizer.from_pretrained(sv.bert_config.model_name)
            bert_cls = BertCLS(model=model, tokenizer=tokenizer)

        logger.info("bert 多分类预测开始")
        pred_multi_csv = os.path.join(sv.home_folder, "bert_multi_pred.csv")
        if not os.path.exists(pred_multi_csv):
            pred_dataset = bert_multi_dataset["test"]
            # test, TODO
            # pred_dataset = pred_dataset.train_test_split(train_size=200)["train"]
            logger.info("get_industry_trans_func...")
            pred_dataset = pred_dataset.map(
                get_industry_trans_func("industry_info", "{industry_info}")
            )
            logger.info("bert_tokenize_func...")
            pred_dataset = pred_dataset.map(bert_tokenize_func(tokenizer), batched=True)
            pred_ids = bert_cls.predict(pred_dataset)
            pred_dataset = pred_dataset.add_column("pred_label", pred_ids)
            pred_dataset.to_csv(pred_multi_csv, index=False)

    export_human_csv_file = os.path.join(sv.home_folder, f"{sv.chinese_name}.csv")
    if not os.path.exists(export_human_csv_file):
        export_human_csv(name=name, output_file=export_human_csv_file)


if __name__ == "__main__":
    # run("hydrogen")
    # for file in os.listdir(folder):
    #     if file.endswith(".py"):
    #         name = file.split(".")[0]
    #         run(name)

    # nohup python pipeline.py > pipe.log 2>&1 &

    # run("next_generation_display")
    run("intelligent_terminal")
    # nohup python pipeline.py > intelligent_terminal.log 2>&1 &
