# missing_elements_filler.py
import asyncio
import json
import logging # logging 模块应在最前面导入
import argparse
import datetime
import time # For sleep

from config import Config
from db import DB
from ai_client import AIClientAsync

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(name)s - %(filename)s:%(lineno)d - %(message)s'
)

# 降低一些库的日志级别，除非需要详细调试它们
logging.getLogger("openai").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
# logging.getLogger("aiomysql").setLevel(logging.WARNING) # 可以暂时设置为 INFO 观察 aiomysql 行为
# --- MODIFICATION: Set aiomysql to INFO to see its logs for connection/query issues ---
logging.getLogger("aiomysql").setLevel(logging.INFO)


AI_SYSTEM_MESSAGE = """请你扮演一个五行命理分析助手。你的任务是仔细阅读我提供的文本，并识别出文本中明确指出或强烈暗示一个人八字中 **需要补充、对命局有利、或所喜** 的五行元素。

此外，如果文本在讨论一个具体名字的五行构成（例如，某个字属什么五行），并且语境是关于这个名字是否适合某个人的八字，那么请提取这个名字中明确指出的五行元素，因为这些元素是正在被考虑用于补益八字的。

这些元素通常是'金'、'木'、'水'、'火'、'土'。

请你关注文本的整体语义，理解哪些元素是根据命理分析被建议补充的，或者是一个名字中包含的、旨在配合八字的元素。而不是简单罗列文本中提到的所有五行属性。

*   例如，如果文本说“八字喜木火”，那么需要提取的是“木”和“火”。
*   如果文本说“此命局缺水，宜补水”，则提取“水”。
*   **重要补充逻辑：** 即使文本中包含诸如“需要结合个人八字具体分析是否合适”、“具体还需看八字配合”之类的限定性或条件性语句，但如果上下文中明确给出了一个或多个字的五行属性（如“瑾字五行属火，瑜字五行属金”），并且讨论的是这些字（或由这些字组成的名字）与八字的适用性，那么这些被明确指出的五行属性（此例中为“火”、“金”）就应该被提取出来，因为它们是正在被考虑用于配合八字的元素。

输出要求：
1.  仅返回一个JSON格式的字符串数组，例如 `["木", "火"]`。
2.  数组中只包含被识别为需要补充的、或名字中旨在配合八字的五行元素名称（'金', '木', '水', '火', '土'）。
3.  如果根据语义判断没有明确需要补充或适合的元素，则返回一个空数组 `[]`。
4.  数组中的元素应保持唯一，即使原文中多次提及。
5.  **绝对不要**返回任何数组之外的文字、解释、前导或尾随说明，包括任何形式的确认语。

例子1：
输入文本: "从字的五行属性来看，'李'属木，'逾'属金，'黭'属土。五行搭配较为平衡，对于八字中需要木、金、土的孩子可能较为合适。"
期望输出: `["木", "金", "土"]`

例子2：
输入文本: "姓氏'李'字的五行属性为木，'庭'字五行属火，'柯'字五行属木，木火相生，五行搭配较为和谐，适合八字喜木火的孩子。"
期望输出: `["木", "火"]`

例子3 (针对你的新情况)：
输入文本: "瑾字五行属火，瑜字五行属金，火克金，从五行角度来看，需要结合个人八字具体分析是否合适。"
期望输出: `["火", "金"]`


现在，请分析以下文本：
[这里替换为你要分析的文本]
"""

AI_USER_PROMPT_TEMPLATE = "现在，请分析以下文本：\n{five_elements_text}"


class MissingElementsFiller:
    def __init__(self, config: Config, db: DB, ai_client: AIClientAsync):
        self.config = config
        self.db = db
        self.ai_client = ai_client
        self.semaphore = asyncio.Semaphore(5)
        self.batch_size = 10
        self.max_records_to_process_for_testing = 20 # 你可以调整这个值或设为 None 来处理所有
        logging.info(
            f"MissingElementsFiller initialized with batch_size={self.batch_size}, "
            f"semaphore_limit={self.semaphore._value}, "
            f"max_records_for_testing={self.max_records_to_process_for_testing}"
        )

    async def fill_all_missing_elements(self):
        logging.info("开始补全 names 表中的 missing_elements 字段...")
        processed_count_total = 0
        loop_count = 0

        while True:
            if self.max_records_to_process_for_testing is not None and \
               processed_count_total >= self.max_records_to_process_for_testing:
                logging.info(f"已达到测试处理上限 ({self.max_records_to_process_for_testing} 条记录)，测试结束。")
                break

            loop_count += 1
            logging.info(f"进入主循环第 {loop_count} 次。已处理 {processed_count_total} 条。")

            remaining_for_test = self.max_records_to_process_for_testing - processed_count_total \
                                 if self.max_records_to_process_for_testing is not None else self.batch_size
            current_batch_limit = min(self.batch_size, remaining_for_test)

            if current_batch_limit <= 0 and self.max_records_to_process_for_testing is not None:
                 logging.info(f"已达到测试处理上限 ({self.max_records_to_process_for_testing} 条记录)，无需再获取数据，测试结束。")
                 break

            query = f"""
                SELECT id, explanation, missing_elements
                FROM names
                WHERE
                    (explanation IS NOT NULL AND explanation <> '' AND explanation <> 'null')
                    AND JSON_TYPE(missing_elements) = 'ARRAY'
                    AND JSON_LENGTH(missing_elements) = 0
                ORDER BY id ASC
                LIMIT %s
            """
            params = (current_batch_limit,)
            logging.debug(f"准备执行数据库查询。SQL:\n{query.strip()}\nParams: {params}")

            if self.db.pool is None:
                logging.error("数据库连接池未初始化！程序中止。")
                break

            try:
                records_to_process = await self.db.fetch_all(query, params)
            except Exception as e:
                if "JSON_TYPE" in str(e) or "JSON_LENGTH" in str(e):
                    logging.error(f"数据库查询时发生JSON函数相关错误: {e}. 请确保MySQL版本支持JSON函数。", exc_info=True)
                else:
                    logging.error(f"数据库查询时发生错误: {e}", exc_info=True)
                break

            logging.debug(f"数据库查询返回 {len(records_to_process)} 条记录。")

            if not records_to_process:
                logging.info("数据库查询未返回任何需要处理的记录。")
                if self.max_records_to_process_for_testing is not None and \
                   processed_count_total < self.max_records_to_process_for_testing:
                    logging.info("仍未达到测试上限，但没有符合条件的记录了。程序结束。")
                else:
                    logging.info("程序结束，因为没有更多符合新查询条件的记录（或已达测试上限）。")
                break

            record_ids_in_batch = [r['id'] for r in records_to_process]
            min_id = record_ids_in_batch[0] if record_ids_in_batch else "N/A"
            max_id = record_ids_in_batch[-1] if record_ids_in_batch else "N/A"
            logging.info(f"获取到 {len(records_to_process)} 条记录进行处理。ID范围: 从 {min_id} 到 {max_id}")

            tasks = []
            for record in records_to_process:
                tasks.append(asyncio.create_task(self.process_name_record(record)))

            successfully_updated_ids_in_batch = []
            results = await asyncio.gather(*tasks, return_exceptions=True)

            successful_updates_in_batch = 0
            failed_updates_in_batch = 0
            for i, result in enumerate(results):
                if i < len(records_to_process):
                    record_id_for_log = records_to_process[i]['id']
                    if isinstance(result, Exception):
                        logging.error(f"处理记录 ID {record_id_for_log} 时主任务捕获到严重错误: {result}", exc_info=True)
                        failed_updates_in_batch += 1
                    elif result is True:
                        successful_updates_in_batch += 1
                        successfully_updated_ids_in_batch.append(record_id_for_log)
                    else:
                        failed_updates_in_batch +=1
                        logging.warning(f"记录 ID {record_id_for_log} 处理未标记为完全成功（AI或数据问题）: {result}")
                else:
                    logging.error(f"结果列表索引 {i} 超出记录列表长度 {len(records_to_process)}。")
                    failed_updates_in_batch += 1

            processed_count_total += len(records_to_process)
            logging.info(
                f"本批次处理完成: {len(records_to_process)} 条记录尝试。 "
                f"初步成功(AI处理+DB命令发送): {successful_updates_in_batch} 条。失败或部分成功: {failed_updates_in_batch} 条。"
                f"累计已处理: {processed_count_total} 条。"
            )

            if successfully_updated_ids_in_batch:
                logging.info(f"开始验证本批次中 {len(successfully_updated_ids_in_batch)} 条声称成功的更新...")
                verification_tasks = []
                for rid in successfully_updated_ids_in_batch:
                    # --- MODIFICATION START: Wrap verify_update_in_db in a try-except for gather ---
                    async def safe_verify_task(record_id_to_verify):
                        try:
                            await self.verify_update_in_db(record_id_to_verify)
                        except Exception as e_verify:
                            logging.error(f"验证任务中捕获到异常 (ID: {record_id_to_verify}): {e_verify}", exc_info=True)
                    verification_tasks.append(asyncio.create_task(safe_verify_task(rid)))
                    # --- MODIFICATION END ---

                if verification_tasks: # Ensure list is not empty before gather
                    await asyncio.gather(*verification_tasks, return_exceptions=True) # return_exceptions for gather too
                logging.info(f"验证任务完成 for {len(successfully_updated_ids_in_batch)} IDs.")

            elif successful_updates_in_batch > 0 :
                 logging.warning("有 successful_updates_in_batch > 0 但 successfully_updated_ids_in_batch 为空，检查逻辑！")


            await asyncio.sleep(0.5)

        logging.info(f"所有 missing_elements 补全任务完成。总共处理了 {processed_count_total} 条记录。")

    async def process_name_record(self, record):
        record_id = record['id']
        explanation_str = record['explanation']
        current_missing_elements_str = record.get('missing_elements', "MISSING_COLUMN")

        logging.info(f"记录 ID {record_id}: 开始处理。当前 missing_elements: {current_missing_elements_str}")


        try:
            explanation_json = json.loads(explanation_str)
        except json.JSONDecodeError as e:
            logging.warning(f"记录 ID {record_id}: explanation 字段 JSON 解析失败: {e}.")
            await self.update_missing_elements_in_db(record_id, ["error_parsing_explanation"])
            return "error_parsing_explanation"

        if explanation_json is None:
            logging.warning(f"记录 ID {record_id}: explanation_str 解析后为 None.")
            await self.update_missing_elements_in_db(record_id, ["error_explanation_parsed_to_none"])
            return "error_explanation_parsed_to_none"

        if not isinstance(explanation_json, dict):
            logging.warning(f"记录 ID {record_id}: explanation_str 解析后不是字典类型 (类型: {type(explanation_json)}).")
            await self.update_missing_elements_in_db(record_id, ["error_explanation_not_a_dict"])
            return "error_explanation_not_a_dict"


        five_elements_analysis = explanation_json.get('five_elements_and_eight_characters_analysis')
        if not five_elements_analysis or not isinstance(five_elements_analysis, dict):
            logging.info(f"记录 ID {record_id}: 未找到 'five_elements_and_eight_characters_analysis' 或其格式不正确.")
            db_update_success = await self.update_missing_elements_in_db(record_id, ["checked_no_five_elements_section"])
            return True if db_update_success else "error_db_update_for_no_section"


        five_elements_text = five_elements_analysis.get('five_elements')
        if not five_elements_text or not isinstance(five_elements_text, str) or not five_elements_text.strip():
            logging.info(f"记录 ID {record_id}: 'five_elements' 文本为空或不存在.")
            db_update_success = await self.update_missing_elements_in_db(record_id, ["checked_empty_five_elements_text"])
            return True if db_update_success else "error_db_update_for_empty_text"

        logging.debug(f"记录 ID {record_id}: 提取到的五行文本: '{five_elements_text[:100]}...'")

        messages = [
            {"role": "system", "content": AI_SYSTEM_MESSAGE},
            {"role": "user", "content": AI_USER_PROMPT_TEMPLATE.format(five_elements_text=five_elements_text)}
        ]

        async with self.semaphore:
            max_retries = 1
            for attempt in range(max_retries):
                try:
                    ai_response_obj = await self.ai_client.chat(messages)

                    if ai_response_obj is None:
                        logging.warning(f"记录 ID {record_id} (AI尝试 {attempt+1}/{max_retries}): AI 返回 None。")
                        if attempt == max_retries - 1:
                            await self.update_missing_elements_in_db(record_id, ["error_ai_returned_none"]) # Update before returning
                            return "error_ai_returned_none"
                        await asyncio.sleep(0.5)
                        continue

                    if not isinstance(ai_response_obj, list):
                        logging.warning(f"记录 ID {record_id} (AI尝试 {attempt+1}/{max_retries}): AI 未返回预期的列表格式. 类型: {type(ai_response_obj)}, 内容: {str(ai_response_obj)[:100]}...")
                        if attempt == max_retries - 1:
                            await self.update_missing_elements_in_db(record_id, ["error_ai_invalid_format"]) # Update before returning
                            return "error_ai_invalid_format"
                        await asyncio.sleep(0.5)
                        continue

                    valid_elements_keywords = ['金', '木', '水', '火', '土']
                    extracted_elements = []
                    for item in ai_response_obj:
                        if isinstance(item, str) and item in valid_elements_keywords:
                            if item not in extracted_elements:
                                extracted_elements.append(item)
                        else:
                            logging.warning(f"记录 ID {record_id}: AI 返回的数组包含无效元素 '{item}'。已忽略。")

                    logging.info(f"记录 ID {record_id}: AI 成功提取 missing_elements 为: {extracted_elements}")
                    db_update_success = await self.update_missing_elements_in_db(record_id, extracted_elements)
                    return True if db_update_success else "error_db_update_after_ai_success"


                except Exception as e:
                    logging.error(f"记录 ID {record_id} (AI调用或后续处理重试 {attempt+1}/{max_retries}): {e}", exc_info=True)
                    if attempt == max_retries - 1:
                        error_tag = f"error_processing_attempt_{type(e).__name__}"
                        await self.update_missing_elements_in_db(record_id, [error_tag]) # Update before returning
                        return error_tag
                    await asyncio.sleep(0.5 + attempt * 0.5)
            return "max_retries_exceeded_for_ai_call"

    async def update_missing_elements_in_db(self, record_id, elements_list):
        try:
            elements_json_str = json.dumps(elements_list, ensure_ascii=False)
            now = datetime.datetime.now()

            sql_update = "UPDATE names SET missing_elements = %s, updated_at = %s WHERE id = %s"
            params_update = (elements_json_str, now, record_id)

            logging.info(f"记录 ID {record_id}: 准备执行更新。SQL: {sql_update}, Params: {params_update}")

            await self.db.execute(sql_update, params_update)
            logging.info(f"记录 ID {record_id}: 数据库更新命令已发送。假定成功（因 autocommit=True 且未抛异常）。")
            return True

        except Exception as e:
            logging.error(f"记录 ID {record_id}: 更新数据库时发生异常: {e}", exc_info=True)
            return False

    async def verify_update_in_db(self, record_id):
        # --- MODIFICATION START: Add entry log and ensure it runs ---
        logging.info(f"VERIFY_TASK: 验证任务开始 for ID {record_id}.")
        if record_id is None:
            logging.warning(f"VERIFY_TASK: record_id is None for ID {record_id}, skipping.")
            return
        await asyncio.sleep(0.1) # Give DB a moment
        # --- MODIFICATION END ---
        try:
            # --- MODIFICATION START: More detailed log for query ---
            logging.info(f"VERIFY_TASK: 准备查询记录 ID {record_id} 的最新数据...")
            # --- MODIFICATION END ---
            query_verify = "SELECT missing_elements, updated_at FROM names WHERE id = %s"
            updated_record = await self.db.fetch_one(query_verify, (record_id,))

            # --- MODIFICATION START: More detailed log for result ---
            if updated_record:
                retrieved_missing_elements = updated_record.get('missing_elements')
                retrieved_updated_at = updated_record.get('updated_at')
                logging.info(
                    f"VERIFY_TASK: 查询到记录 ID {record_id}. "
                    f"DB missing_elements: {retrieved_missing_elements} (Type: {type(retrieved_missing_elements)}), "
                    f"DB updated_at: {retrieved_updated_at}"
                )

                # Attempt to make it more comparable for logging if it's a string
                elements_for_log = retrieved_missing_elements
                if isinstance(retrieved_missing_elements, str):
                    try:
                        elements_for_log = json.loads(retrieved_missing_elements)
                    except json.JSONDecodeError:
                        elements_for_log = f"(Unparsable String in DB): '{retrieved_missing_elements}'"
                
                logging.info(
                    f"VERIFY_RESULT: ID {record_id} - "
                    f"missing_elements: {elements_for_log}, "
                    f"updated_at: {retrieved_updated_at}"
                )
            else:
                logging.warning(f"VERIFY_TASK: 未能查询到记录 ID {record_id} 的数据（可能已被删除或ID错误）。")
            # --- MODIFICATION END ---
        except Exception as e:
            # --- MODIFICATION START: More detailed log for exception ---
            logging.error(f"VERIFY_TASK: 查询记录 ID {record_id} 时出错: {e}", exc_info=True)
            # --- MODIFICATION END ---

async def main():
    parser = argparse.ArgumentParser(description="Fill missing_elements in the names table using AI.")
    args = parser.parse_args()

    logging.info("主程序开始。")
    config = Config()
    db = DB(config)

    try:
        logging.info("尝试连接数据库...")
        await db.connect()
        if db.pool:
            logging.info(f"数据库连接成功。连接池状态: minsize={db.pool.minsize}, maxsize={db.pool.maxsize}, current_active_or_creating_connections={db.pool.size}, free_connections={db.pool.freesize}")
        else:
            logging.error("数据库连接池创建失败或未被赋值给 self.pool！程序中止。")
            return

        ai_client = AIClientAsync(config, task_type="analyze_elements")
        filler = MissingElementsFiller(config, db, ai_client)

        await filler.fill_all_missing_elements()

    except Exception as e:
        logging.critical(f"主程序发生严重错误: {e}", exc_info=True)
    finally:
        if db.pool and not db.pool.closed:
            logging.info("尝试关闭数据库连接...")
            await db.close()
        elif db.pool and db.pool.closed:
            logging.info("数据库连接池已关闭，无需再次关闭。")
        else:
            logging.info("数据库连接池未创建或已显式置为None，无需关闭。")
        logging.info("主程序结束。")

if __name__ == "__main__":
    asyncio.run(main())