# 导入所需库
import pandas as pd
import os
import logging
from logging.handlers import RotatingFileHandler
from tunning_base_api import MY_MaaS_2025_HTTP_CAN_USE

# 日志目录和文件
log_dir = os.path.join(os.path.dirname(__file__), "log")
log_file = os.path.join(log_dir, "extract_result.log")
# 自动创建日志目录
os.makedirs(log_dir, exist_ok=True)

# 配置日志，单文件最大10MB，最多保留5个历史文件
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(
    log_file, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8", mode="a"
)
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.handlers = [handler]


# 定义主函数
def main():
    # 允许输入文件为xlsx或csv
    input_file = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "202509问题词筛选后两列数据 - 副本.csv")
    )
    output_file = os.path.join(os.path.dirname(__file__), "result.csv")
    batch_size = 100  # 每批写入条数
    try:
        ext = os.path.splitext(input_file)[1].lower()
        if ext == ".xlsx":
            df = pd.read_excel(input_file, dtype={"id": str, "content": str})
        elif ext == ".csv":
            df = pd.read_csv(input_file, dtype={"id": str, "content": str})
        else:
            msg = f"不支持的文件格式: {ext}"
            print(msg)
            logging.error(msg)
            return
    except Exception as e:
        msg = f"读取输入文件失败: {e}"
        print(msg)
        logging.error(msg)
        return

    # 输入文件有多少id总数
    total_id_count = len(df["id"])
    print(f"输入文件有多少id总数: {total_id_count}")
    logging.info(f"输入文件有多少id总数: {total_id_count}")

    # 实例化API调用类
    maas = MY_MaaS_2025_HTTP_CAN_USE()

    # 结果列表
    results = []
    total = len(df)
    first_write = True  # 首次写入带表头

    # 遍历每一行，调用invoke
    for idx, row in df.iterrows():
        prompt_id = row["id"]
        prompt_content = row["content"]
        log_msg = f"开始处理ID: {prompt_id}"
        print(log_msg)
        logging.info(log_msg)
        logging.info(prompt_content)
        try:
            result = maas.invoke(prompt_content)
            if result:
                (
                    content,
                    prompt_tokens,
                    completion_tokens,
                    total_tokens,
                    api_id,
                ) = result

                result_text = content
                token_info = (
                    f"Prompt tokens: {prompt_tokens}; "
                    f"Completion tokens: {completion_tokens}; "
                    f"Total tokens: {total_tokens}"
                )
                logging.info(f"ID {prompt_id} 处理成功，API_ID: {api_id}")
            else:
                result_text = "API调用失败或无返回结果。"
                token_info = ""
                api_id = ""
                logging.error(f"ID {prompt_id} 处理失败，无返回结果。")
        except Exception as e:
            result_text = "API调用异常。"
            token_info = ""
            api_id = ""
            logging.error(f"ID {prompt_id} 处理异常: {e}")
        # 保存结果
        results.append(
            {
                "id": prompt_id,
                # "prompt": prompt_content,
                "result": result_text,
                "token_info": token_info,
                "api_id": api_id,
            }
        )

        # 每batch_size条写入一次
        if (idx + 1) % batch_size == 0 or (idx + 1) == total:
            result_df = pd.DataFrame(results)
            try:
                result_df.to_csv(
                    output_file,
                    index=False,
                    header=first_write,
                    mode="w" if first_write else "a",
                    encoding="utf-8-sig",
                )
                msg = f"已写入{idx + 1}条结果到 {output_file}"
                print(msg)
                logging.info(msg)
                first_write = False
                results = []  # 清空本批次结果
            except Exception as e:
                msg = f"写入结果失败: {e}"
                print(msg)
                logging.error(msg)


# 脚本入口
if __name__ == "__main__":
    main()
