import requests
import json
from pathlib import Path
import pandas as pd
import random
import time
import os

# --- 全局变量 ---
CONFIG = {}
API_KEY = 'sk-c2842e064a3045a2a4130e0366beea9c'


# --- 配置加载函数 ---
def load_configuration(config_filename="config_annotation.json"):
    global CONFIG
    try:
        script_dir = Path(__file__).resolve().parent
        config_path = script_dir / config_filename
        with open(config_path, 'r', encoding='utf-8') as f:
            CONFIG = json.load(f)
        print(f"✅ 配置文件 '{config_filename}' 加载成功。")
        return True

    except FileNotFoundError:
        print(f"错误：找不到配置文件 {config_path}")
    except json.JSONDecodeError:
        print(f"错误：配置文件 {config_path} 格式有误。")
    except Exception as e:
        print(f"加载配置文件 {config_path} 时出错: {e}")
    return False


# --- 阿里云API调用函数 (适配DashScope风格的文本生成) ---
def call_aliyun_annotation_api(system_prompt: str, user_prompt: str) -> dict:
    global CONFIG
    api_conf = CONFIG['api_settings']

    # 构建URL (DashScope风格)
    url = f"https://{api_conf['api_host']}{api_conf['api_path_template']}"

    headers = {
        'Content-Type': 'application/json',
        'Authorization': f'Bearer {API_KEY}'
    }

    payload = {
        "model": api_conf['model_id_for_payload'],  # 例如 "qwen-max"
        "input": {
            "messages": [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ]
        },
        "parameters": api_conf.get("default_model_params", {})
    }

    try:
        response = requests.post(
            url,
            headers=headers,
            json=payload,
            timeout=CONFIG['script_parameters'].get('api_timeout_seconds', 120)
        )
        response.raise_for_status()  # 如果HTTP错误则抛出异常
        response_data = response.json()

        # 解析DashScope的响应
        text_output = response_data.get("output", {}).get("text", "")
        usage_data = response_data.get("usage", {})

        return {
            "text": text_output,
            "usage": {  # 保持与您参考脚本相似的usage结构
                "input_tokens": usage_data.get("input_tokens"),
                "output_tokens": usage_data.get("output_tokens"),
                "total_tokens": usage_data.get("input_tokens", 0) + usage_data.get("output_tokens", 0)
                # DashScope可能不直接给total
            },
            "request_id": response_data.get("request_id", "")
        }
    except requests.exceptions.Timeout:
        print("    -> 错误：API 请求超时。")
        return {"text": "错误：请求超时", "usage": {}, "request_id": ""}
    except requests.exceptions.RequestException as e:
        error_message = f"错误：API 请求失败: {e}"
        if e.response is not None:
            try:
                error_detail = e.response.json()
                error_message += f" | 响应: {error_detail}"
            except json.JSONDecodeError:
                error_message += f" | 响应文本: {e.response.text}"
        print(f"    -> {error_message}")
        return {"text": f"错误：{str(e)}", "usage": {}, "request_id": ""}


# --- Excel数据加载函数 ---
def load_excel_data_for_annotation(config_excel):
    try:
        file_path = Path(__file__).resolve().parent / config_excel["input_excel_file"]
        # 读取所有指定的工作表
        all_sheets_data = pd.read_excel(file_path, sheet_name=config_excel["sheets_to_process"])
        print(f"✅ 成功从 '{file_path}' 加载工作表: {', '.join(all_sheets_data.keys())}")
        return all_sheets_data
    except FileNotFoundError:
        print(f"错误：找不到Excel文件 {file_path}")
    except Exception as e:
        print(f"加载Excel文件 {file_path} 时出错: {e}")
    return None


# --- 保存函数 (与您提供的参考脚本类似) ---
def save_data_to_excel(data_list, output_file_config_key, chunk_save=False):
    if not data_list:
        print("没有数据需要保存。")
        return

    df_new = pd.DataFrame(data_list)
    script_dir = Path(__file__).resolve().parent
    file_name = CONFIG["excel_settings"][output_file_config_key]
    file_path = script_dir / file_name

    if chunk_save and file_path.exists():
        try:
            df_existing = pd.read_excel(file_path)
            df_combined = pd.concat([df_existing, df_new], ignore_index=True)
            df_combined.to_excel(file_path, index=False, engine='openpyxl')
            print(f"已追加 {len(data_list)} 条数据到 {file_name}")
        except Exception as e:
            print(f"追加到 Excel 文件 {file_name} 时出错: {e}. 尝试覆盖写入...")
            df_new.to_excel(file_path, index=False, engine='openpyxl')  # 覆盖写入
            print(f"已将 {len(data_list)} 条数据覆盖写入到 {file_name}")
    else:
        df_new.to_excel(file_path, index=False, engine='openpyxl')
        action = "创建并写入" if chunk_save else "成功将"
        print(f"✅ {action} {len(data_list)} 条数据保存到 {file_name}")


if __name__ == "__main__":
    if not load_configuration():  # 确保配置文件名与您保存的一致
        exit()

    # 从配置中获取参数
    excel_config = CONFIG["excel_settings"]
    prompt_config = CONFIG["prompt_settings"]
    script_params = CONFIG["script_parameters"]

    system_prompt = prompt_config["system_prompt_content"]
    user_prompt_template = prompt_config["user_prompt_template_for_annotation"]

    # 加载Excel数据
    sheets_data_map = load_excel_data_for_annotation(excel_config)
    if not sheets_data_map:
        print("错误：无法加载Excel数据，程序退出。")
        exit()

    collected_data_chunk = []
    total_annotated_count = 0
    total_api_requests = 0

    annotated_data_key = excel_config.get("annotated_data_key", "分类结果")

    print("\n--- 开始进行专利分类 (调用阿里云API) ---")

    for sheet_name, df in sheets_data_map.items():
        print(f"\n--- 正在处理工作表: '{sheet_name}' ({len(df)} 行) ---")
        if df.empty:
            print("    工作表为空，跳过。")
            continue

        # 从配置中获取待标注列的引用
        col_to_annotate_ref = excel_config["column_to_annotate_name_or_index"]
        texts_to_annotate_series = None
        actual_column_name = ""  # 实际用于标注的列名

        # 解析待标注列
        if isinstance(col_to_annotate_ref, str):  # 如果提供的是列名
            if col_to_annotate_ref not in df.columns:
                print(
                    f"    错误：列名 '{col_to_annotate_ref}' 在工作表 '{sheet_name}' 中不存在。可用列: {list(df.columns)}。跳过此工作表。")
                continue
            texts_to_annotate_series = df[col_to_annotate_ref]
            actual_column_name = col_to_annotate_ref
        elif isinstance(col_to_annotate_ref, int):  # 如果提供的是列索引
            if col_to_annotate_ref >= len(df.columns):
                print(
                    f"    错误：列索引 {col_to_annotate_ref} 超出工作表 '{sheet_name}' 的范围 (共 {len(df.columns)} 列)。跳过此工作表。")
                continue
            texts_to_annotate_series = df.iloc[:, col_to_annotate_ref]
            actual_column_name = df.columns[col_to_annotate_ref]
        else:
            print(
                f"    错误：配置文件中的 'column_to_annotate_name_or_index' ('{col_to_annotate_ref}') 无效。跳过此工作表。")
            continue

        print(f"    将对列 '{actual_column_name}' 的混合文本内容进行分类。")

        # 遍历待标注的文本序列
        for index, combined_text_data in texts_to_annotate_series.items():
            original_excel_row_num = index + 2  # Excel行号（假设第一行为标题）
            print(
                f"    [请求 {total_api_requests + 1}] 处理 '{sheet_name}' - Excel行号约 {original_excel_row_num} - 内容: '{str(combined_text_data)[:50]}...'")

            full_user_prompt = ""
            api_response = {}  # 初始化以防空文本跳过API调用

            if pd.isna(combined_text_data) or str(combined_text_data).strip() == "":
                print("        -> 内容为空，跳过分类。")
                classification_result_text = "错误：输入为空"  # 或您希望记录的其他标记
                full_user_prompt = "N/A (输入为空)"
            else:
                full_user_prompt = user_prompt_template.format(patent_text_combined=str(combined_text_data))
                total_api_requests += 1
                api_response = call_aliyun_annotation_api(system_prompt, full_user_prompt)
                classification_result_text = api_response.get("text", "错误：未能获取分类结果")

            usage_info = api_response.get("usage", {})
            request_id_val = api_response.get("request_id", "")

            collected_data_chunk.append({
                "工作表名": sheet_name,
                "Excel近似行号": original_excel_row_num,
                "来源列名": actual_column_name,
                "原始文本（标题+摘要）": str(combined_text_data),
                annotated_data_key: classification_result_text,
                "System提示词": system_prompt,
                "User提示词（含文本）": full_user_prompt,
                "API请求ID": request_id_val,
                "输入Tokens": usage_info.get("input_tokens"),
                "输出Tokens": usage_info.get("output_tokens"),
                "总Tokens": usage_info.get("total_tokens"),
                "分类时间": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
            })
            total_annotated_count += 1  # 即使是空内容也计数为已处理
            print(
                f"        -> 分类结果: '{classification_result_text[:50]}...'，当前批次 {len(collected_data_chunk)} 条。")

            if len(collected_data_chunk) >= script_params["save_interval"]:
                save_data_to_excel(collected_data_chunk, "output_excel_file", chunk_save=True)
                collected_data_chunk = []

            time.sleep(random.uniform(
                script_params["request_delay_min_seconds"],
                script_params["request_delay_max_seconds"]
            ))
        print(f"    --- 工作表 '{sheet_name}' 处理完毕 ---")

    # ... (脚本末尾的保存剩余数据和总结部分与之前相同)
    print("\n--- 所有工作表处理完成 ---")
    if collected_data_chunk:
        print(f"正在保存最后剩余的 {len(collected_data_chunk)} 条数据...")
        save_data_to_excel(collected_data_chunk, "output_excel_file", chunk_save=True)
    else:
        print("没有剩余数据需要保存。")

    output_filename = CONFIG['excel_settings']['output_excel_file']
    print(f"\n总计处理了 {total_annotated_count} 行数据，发起了 {total_api_requests} 次API请求。")
    print(f"✅ 所有分类数据已尝试保存到 Excel 文件中 '{output_filename}'。")
