import requests
import json
from pathlib import Path
import pandas as pd
import random
import time
import os

# --- 全局变量，将由配置加载函数填充 ---
CONFIG = {}
API_KEY = None  # 将在加载配置后设置


# --- 配置加载函数 ---
def load_configuration(config_filename="config.json"):
    global CONFIG, API_KEY
    try:
        script_dir = Path(__file__).resolve().parent
        config_path = script_dir / config_filename
        with open(config_path, 'r', encoding='utf-8') as f:
            CONFIG = json.load(f)
        print(f"✅ 配置文件 '{config_filename}' 加载成功。")

        # 从环境变量加载 API Key (环境变量名称来自配置文件)
        api_key_env_var = CONFIG.get("api_settings", {}).get("api_key_env_var")
        if not api_key_env_var:
            print("错误：配置文件中未指定 'api_key_env_var'。")
            exit()

        API_KEY = os.getenv(api_key_env_var)
        if not API_KEY:
            print(f"错误：找不到 API Key。请设置 '{api_key_env_var}' 环境变量。")
            exit()


    except FileNotFoundError:
        print(f"错误：找不到配置文件 {config_path}")
        exit()
    except json.JSONDecodeError:
        print(f"错误：配置文件 {config_path} 格式有误。")
        exit()
    except Exception as e:
        print(f"加载配置文件 {config_path} 时出错: {e}")
        exit()


# --- API 调用函数 (现在使用全局 API_KEY 和 CONFIG 中的设置) ---
def call_aliyun_ai_search_api(system_prompt: str, user_prompt: str) -> dict:
    url = f"https://{CONFIG['api_settings']['api_host']}/v3/openapi/workspaces/{CONFIG['api_settings']['workspace_name']}/text-generation/{CONFIG['api_settings']['service_id']}"
    model_params_from_config = CONFIG.get("api_settings", {}).get("default_model_params", {})
    headers = {
        'Content-Type': 'application/json',
        'Authorization': f'Bearer {API_KEY}'  # 使用全局加载的 API_KEY
    }
    api_messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt}
    ]
    payload = {
        "messages": api_messages,
        "enable_search": True,
        "stream": False,
        "parameters": model_params_from_config
    }
    response = requests.post(url, headers=headers, json=payload, timeout=120)
    response.raise_for_status()
    response_data = response.json()
    usage_data = response_data.get("usage", {})
    output_tokens = usage_data.get("output_tokens", usage_data.get("autput_tokens"))
    return {
        "text": response_data.get("result", {}).get("text", ""),
        "search_results": response_data.get("result", {}).get("search_results", []),
        "usage": {
            "input_tokens": usage_data.get("input_tokens"),
            "output_tokens": output_tokens,
            "total_tokens": usage_data.get("total_tokens")
        },
        "request_id": response_data.get("request_id", "")
    }


# --- 文件加载函数 (保持不变, 路径处理依赖脚本位置) ---
def load_keywords(file_path_str):  # 参数改为字符串，因为从配置中读取的是字符串
    """从文件加载关键词列表"""
    try:
        script_dir = Path(__file__).resolve().parent
        # file_path_str 是相对于脚本目录的文件名
        path = script_dir / file_path_str
        keywords = []
        with open(path, 'r', encoding='utf-8') as f:
            for line in f:
                keyword = line.strip()
                if keyword:
                    keywords.append(keyword)
        print(f"成功加载 {len(keywords)} 个关键词从 {path}")
        return keywords
    except FileNotFoundError:
        print(f"错误：找不到关键词文件 {path}")
        return []
    except Exception as e:
        print(f"加载关键词文件 {path} 时出错: {e}")
        return []


def load_templates(file_path_str):  # 参数改为字符串
    """从文件加载提示模板列表"""
    try:
        script_dir = Path(__file__).resolve().parent
        path = script_dir / file_path_str
        templates = []
        with open(path, 'r', encoding='utf-8') as f:
            for line in f:
                template = line.strip()
                if template:
                    templates.append(template)
        print(f"成功加载 {len(templates)} 个提示模板从 {path}")
        return templates
    except FileNotFoundError:
        print(f"错误：找不到提示模板文件 {path}")
        return []
    except Exception as e:
        print(f"加载提示模板文件 {path} 时出错: {e}")
        return []


# --- 保存函数 (输出文件名现在从 CONFIG 获取) ---
def save_data_to_excel(data_list, file_name, chunk_save=False):  # file_name 参数现在必须传入
    """将数据保存或追加到 Excel 文件"""
    if not data_list:
        print("没有数据需要保存。")
        return

    df_new = pd.DataFrame(data_list)
    script_dir = Path(__file__).resolve().parent  # 确保路径基于脚本位置
    file_path = script_dir / file_name  # file_name 来自配置

    if chunk_save and file_path.exists():
        try:
            df_existing = pd.read_excel(file_path)
            df_combined = pd.concat([df_existing, df_new], ignore_index=True)
            df_combined.to_excel(file_path, index=False, engine='openpyxl')
            print(f"已追加 {len(data_list)} 条数据到 {file_name}")
        except Exception as e:
            print(f"追加到 Excel 文件 {file_name} 时出错: {e}. 尝试覆盖写入...")
            df_new.to_excel(file_path, index=False, engine='openpyxl')
            print(f"已将 {len(data_list)} 条数据覆盖写入到 {file_name}")
    else:
        df_new.to_excel(file_path, index=False, engine='openpyxl')
        if chunk_save:
            print(f"文件 {file_name} 不存在，已创建并写入 {len(data_list)} 条数据。")
        else:
            print(f"✅ 已成功将 {len(data_list)} 条数据保存到 {file_name}")


# --- 主逻辑 ---
if __name__ == "__main__":
    # 首先加载配置
    load_configuration()

    # 从加载的 CONFIG 中获取参数
    system_prompt_content = CONFIG["prompt_settings"]["system_prompt_content"]
    keyword_files_config = CONFIG["file_settings"]["keyword_files"]
    template_file_config = CONFIG["prompt_settings"]["template_file"]
    output_excel_file_config = CONFIG["file_settings"]["output_excel_file"]
    save_interval_config = CONFIG["script_parameters"]["save_interval"]
    delay_min = CONFIG["script_parameters"]["request_delay_min_seconds"]
    delay_max = CONFIG["script_parameters"]["request_delay_max_seconds"]

    # 加载模板文件
    search_templates = load_templates(template_file_config)
    if not search_templates:
        print("错误：无法加载提示模板，程序退出。")
        exit()

    # 加载关键词文件
    categories_keywords = {}
    for category, kw_filename in keyword_files_config.items():
        keywords = load_keywords(kw_filename)  # kw_filename 是配置文件中的文件名
        if keywords:
            categories_keywords[category] = keywords
        else:
            print(f"警告：类别 '{category}' 的关键词文件 '{kw_filename}' 加载失败或为空，将跳过此类别。")

    if not categories_keywords:
        print("错误：所有类别的关键词均未成功加载，程序退出。")
        exit()

    collected_data_chunk = []
    total_processed_count = 0
    total_requests_count = 0

    print("\n--- 开始抓取语料 (使用配置文件和直接 API 调用) ---")
    for category, keywords in categories_keywords.items():
        print(f"\n--- 处理类别: {category} ({len(keywords)} 个关键词) ---")
        for keyword in keywords:
            for template in search_templates:
                user_input_content = template.format(query=f"【{category} - {keyword}】")
                print(
                    f"[{total_requests_count + 1}] 正在请求: 类别='{category}', 关键词='{keyword}', User Prompt='{user_input_content[:50]}...'")
                total_requests_count += 1
                try:
                    api_response_dict = call_aliyun_ai_search_api(
                        system_prompt=system_prompt_content,
                        user_prompt=user_input_content
                    )
                    response_text = api_response_dict.get("text", "").strip()
                    search_results_list = api_response_dict.get("search_results", [])
                    usage_info = api_response_dict.get("usage", {})
                    request_id_val = api_response_dict.get("request_id", "")
                    if response_text:
                        search_results_str = json.dumps(search_results_list,
                                                        ensure_ascii=False) if search_results_list else ""
                        collected_data_chunk.append({
                            "类别": category,
                            "训练语料": response_text,
                            "关键词": keyword,
                            "系统提示词": system_prompt_content,
                            "提示词": user_input_content,
                            "联网搜索结果": search_results_str,
                            "输入Tokens": usage_info.get("input_tokens"),
                            "输出Tokens": usage_info.get("output_tokens"),
                            "总Tokens": usage_info.get("total_tokens"),
                            "生成时间": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                        })
                        total_processed_count += 1
                        print(f"    -> 成功获取内容，当前批次 {len(collected_data_chunk)} 条。")
                        if len(collected_data_chunk) >= save_interval_config:
                            save_data_to_excel(collected_data_chunk, output_excel_file_config, chunk_save=True)
                            collected_data_chunk = []
                    else:
                        print("    -> 警告：API 返回文本内容为空。")
                except Exception as e:
                    print(f"    -> 错误：处理 API 请求时发生异常: {e}")
                    if hasattr(e, 'response') and e.response is not None:
                        try:
                            print(f"       错误响应体: {e.response.json()}")
                        except json.JSONDecodeError:
                            print(f"       错误响应体 (非 JSON): {e.response.text}")
                time.sleep(random.uniform(delay_min, delay_max))  # 使用配置的延迟

    print("\n--- 数据抓取完成 ---")
    if collected_data_chunk:
        print(f"正在保存最后剩余的 {len(collected_data_chunk)} 条数据...")
        save_data_to_excel(collected_data_chunk, output_excel_file_config, chunk_save=True)
    else:
        print("没有剩余数据需要保存。")

    print(f"\n总计处理了 {total_processed_count} 条有效数据，发起 {total_requests_count} 次请求。")
    print(f"✅ 所有数据已保存到 Excel 文件中 '{output_excel_file_config}'。")