# uuid

# 切分属性列

# 构建大模型分类提示词

# 原因 和 label
import os
import time
import pandas as pd
import shutil
from tqdm import tqdm
from datasets import load_dataset, Dataset, load_from_disk
from langchain_openai import ChatOpenAI
from datetime import datetime

from process import process_data


def llm_run(file, prompt_reason_template, dataset_folder, uuid=False):
    """
    import glob

    def find_csv_files(directory):
        # Use glob to find all .csv files in the directory and its subdirectories
        csv_files = glob.glob(os.path.join(directory, "**", "*.csv"), recursive=True)
        return csv_files

    for area in ["苏州", "南京"]:
        # Example usage
        directory = f"/home/jie/gitee/pku_industry/kw_filter_example/output/kw/{area}/"
        csv_files = find_csv_files(directory)

        for file in csv_files:
            dataset_folder = file.split(".")[0]
            llm_run(file, prompt_reason_template, dataset_folder)
        # print(file, dataset_folder)

    # nohup python pipeline_api.py > pipeline_api.log 2>&1 &
    """
    # dataset = load_dataset("csv", data_files=file, split="train")
    col_name = [
        "企业名称",
        "经营范围",
        "一级行业分类",
        "二级行业分类",
        "三级行业分类",
    ]

    if uuid:
        col_name.append("uuid")

    if not os.path.exists(dataset_folder):
        df = pd.read_csv(file, usecols=col_name)
        dataset = Dataset.from_pandas(df)
        dataset = process_data(dataset, prompt_reason_template)
    else:
        dataset = load_from_disk(dataset_folder)

    # 大模型可能会断网、中途运行报错，所以设置中途保存
    if "infer" not in dataset.column_names:
        dataset = dataset.add_column("infer", ["" for _ in range(len(dataset))])

    # if "result" not in dataset.column_names:
    #     dataset.add_column("result", ['' for _ in range(len(dataset))])

    llm = ChatOpenAI(
        model="gpt-4o-mini",
        base_url="https://api.chatfire.cn/v1",
        api_key="sk-uzqOOLE2fCDUwYEv260fE0DdCa91484090D20291DfB11e99",
    )
    # res = llm.invoke(text)

    err = 0
    infer_list = []

    for item in tqdm(dataset):
        prompt = item["industry_info"]
        infer = item["infer"]

        if infer or err == 3:
            infer_list.append(infer)
            continue

        try:
            if not infer:
                infer = llm.invoke(prompt).content
                # time.sleep(0.2)
        except Exception as e:
            print(e)
            err += 1
            infer = ""

        infer_list.append(infer)

    dataset = dataset.remove_columns("infer")
    # dataset = dataset.add_column("infer", ['' for _ in range(len(dataset))])
    dataset = dataset.add_column("infer", infer_list)

    # 获取当前时间
    now = datetime.now()
    # 格式化时间为纯数字字符串
    formatted_time = now.strftime("%Y%m%d%H%M%S%f")  # 去掉毫秒的最后三位以匹配示例格式
    dataset.save_to_disk(formatted_time)
    # 删除
    if os.path.exists(dataset_folder):
        shutil.rmtree(dataset_folder)
    os.rename(formatted_time, dataset_folder)


"""
案例：
    prompt_reason_template = '你是氢能企业筛选专家，专注于氢能企业的识别分类。label: ["是", "否"]'

    # print(prompt_reason_template)

    file = "/home/jie/gitee/pku_industry/hydrogen/data_clean/浙江_氢能企业名单.csv"
    dataset_dir = "/home/jie/gitee/pku_industry/haitao/hydrogen_zhejiang/dataset"

    # llm_run(file, prompt_reason_template, dataset_dir)

    from parse import binary_result_parse

    dataset = load_from_disk(dataset_dir)
    binary_result_parse(
        dataset,
        "/home/jie/gitee/pku_industry/haitao/hydrogen_zhejiang/浙江氢能企业.csv",
        True,
    )

"""

if __name__ == "__main__":
    # kws = ["半导体", "集成电路", "芯片", "电子元器件", "设备和材料", "微电子"]
    # for kw in kws:
    #     file = "/home/jie/github/LLM/API/openai_examples/industry_dataset/data/csvs/"
    #     file = file.format(kw=kw)
    #     dataset_folder = file.split(".")[0]
    #     run(file, dataset_folder)

    pass
