# -*- coding: utf-8 -*-

# 修复 Windows 控制台编码问题

# 导入异步编程相关库，用于处理异步任务，异步编程中协程是核心概念
import asyncio
import sys
# 导入日期时间处理库，用于记录和计算程序的执行时间
from datetime import datetime
import time
# 导入火山引擎异步 Ark 客户端库，用于与火山引擎的 Ark 服务进行异步通信
from volcenginesdkarkruntime import AsyncArk
import threading
from typing import Dict, List, Optional
from asyncio import Queue
# 异步MySQL驱动
import aiomysql
import hashlib
import json
import re
import concurrent.futures
import tqdm
sys.stdout.reconfigure(encoding='utf-8', errors='replace')  # Python 3.7+
sys.stderr.reconfigure(encoding='utf-8', errors='replace')
# 数据库配置（异步连接）
DB_CONFIG = {
    'host': '192.168.200.50',
    'user': 'xu',
    'password': '1q2w3e4r',
    'db': 'bxgsjyxx',
    'charset': 'utf8mb4',
    'autocommit': True
}

# 连接池配置
POOL_CONFIG = {
    'minsize': 10,  # 最小连接数
    'maxsize': 10000,  # 最大连接数
    'pool_recycle': 3600,  # 连接回收时间（秒）
    'echo': False  # 是否打印SQL语句
}

# 并发配置
CALL_STAFF_COUNT = 40   # 调用staff数量（线程）
PARSE_STAFF_COUNT = 40 # 解析staff数量（线程）
WORKER_PER_STAFF = 10   # 每个staff的协程数量
BATCH_SIZE = 50         # 每批母词数量
RESULT_QUEUE_SIZE = 10000 # 结果队列大小，增加以适应更大的并发量

# 定义协程数量和任务数量(协程)
worker_num, task_num = WORKER_PER_STAFF, WORKER_PER_STAFF


# 提示词模板
PROMPT_TEMPLATE = """
任务：请告诉我以下词语的所有独立且不重复的词义和每个词义对应的同义词、等义词、近义词、绝对反义词、相对反义词，
每个词需基于权威词典收录本质含义不同的词义，排除因场景差异导致的衍生性相似义项:
{title_list}

要求： 
1、每个词必须提供尽可能多的词义，将每一个词的词义全都输出，不要有遗漏，不要胡编乱造，有几个词义就输出几个词义。
2、每个词需基于权威词典收录本质含义不同的词义，排除因场景差异导致的衍生性相似义项，需确保不同词义间不存在语义重叠或高度相似的情况。
3、若不同义项的核心语义一致（仅场景不同），需整合为一条词义，并在 "语境" 中分场景说明；若核心语义存在本质差异，则保留为独立词义。
4、对不能理解的词输出 "0"，严禁硬凑或拆分相似词义。
5、每一项不同的词义都要有对应的例句、词性、语境、所属领域，例句需为真实文献中的原句，避免自造。
6、如果没有对应的同义词、等义词、近义词、绝对反义词、相对反义词则回复"0"。
7、title_list里的每个词都要处理，不要跳过或省略。
8、输出的JSON格式必须严格按照以下要求,不要输出多余内容：
{{
    "id":"10",
    "词义": "秤锤",
    "例句（包含来源）": "权，然后知轻重；度，然后知长短。（《孟子·梁惠王上》）",
    "词性": "名词",
    "语境": "在讨论衡量事物的方法时使用",
    "所属类别": "通用",
    "等义词": "秤砣；...",
    "同义词": "衡；锤",
    "近义词": "砝码",
    "绝对反义词": "...；...",
    "相对反义词": "...；..."
}},
{{
    "id":"20", 
    "词义": "表示通过某种方式传递或获取信息（如：打电话、打报告等）",
    "例句（包含来源）": "他及时给领导打了报告，说明了事情的进展情况。（《职场指南》）",
    "词性": "动词",
    "语境": "在信息传递、沟通交流的场景中使用",
    "所属类别": "信息沟通类",
    "等义词": "发；通",
    "同义词": "传；递",
    "近义词": "送；报",
    "绝对反义词": "细；纤细",
    "相对反义词": "接；收"
}}
"""

# 数据库连接池管理类
class DatabasePool:
    def __init__(self, config: Dict, pool_config: Dict):
        self.config = config
        self.pool_config = pool_config
        self.pool = None
        self._lock = threading.Lock()
        
    async def initialize(self):
        """初始化连接池"""
        if self.pool is None:
            with self._lock:
                if self.pool is None:
                    try:
                        self.pool = await aiomysql.create_pool(
                            **self.config,
                            **self.pool_config
                        )
                        print(f"数据库连接池初始化完成，大小: {self.pool_config['maxsize']}")
                    except Exception as e:
                        print(f"数据库连接池初始化失败: {str(e)}")
                        raise e
    
    async def get_connection(self):
        """获取数据库连接"""
        if self.pool is None:
            await self.initialize()
            
        try:
            conn = await self.pool.acquire()
            # 测试连接是否有效
            async with conn.cursor() as cursor:
                await cursor.execute("SELECT 1")
            return conn
        except Exception as e:
            print(f"获取数据库连接失败: {str(e)}")
            # 如果连接池可能有问题，重新初始化
            if self.pool:
                try:
                    self.pool.close()
                    await self.pool.wait_closed()
                except Exception as close_error:
                    print(f"关闭连接池失败: {str(close_error)}")
                self.pool = None
            await self.initialize()
            return await self.pool.acquire()
    
    async def release_connection(self, conn):
        """释放数据库连接"""
        if conn and self.pool:
            await self.pool.release(conn)
    
    async def close(self):
        """关闭连接池"""
        if self.pool:
            self.pool.close()
            await self.pool.wait_closed()
            print("数据库连接池已关闭")
    
    @staticmethod
    async def update_muci_state(conn: aiomysql.Connection, muci_id: int, state: int, staff_id: Optional[int] = None) -> bool:
        """更新母词状态
        
        Args:
            conn: 数据库连接
            muci_id: 母词ID
            state: 状态值 (0: 待处理, 1: 处理中, 2: 成功, 3: 失败)
            staff_id: 操作的staff ID，用于日志记录
            
        Returns:
            bool: 更新是否成功
        """
        try:
            async with conn.cursor() as cursor:
                sql = "UPDATE xaxis_muci_new_xu SET xaxis_state = %s WHERE id = %s"
                await cursor.execute(sql, (state, muci_id))
                await conn.commit()
                
                # 打印日志信息
                state_names = {0: "待处理", 1: "处理中", 2: "成功", 3: "失败"}
                state_name = state_names.get(state, f"未知状态({state})")
                
                if staff_id is not None:
                    print(f"Staff {staff_id} 已将母词 {muci_id} 状态更新为: {state_name}")
                else:
                    print(f"母词 {muci_id} 状态已更新为: {state_name}")
                
                return True
                
        except Exception as e:
            print(f"更新母词状态失败 - 母词ID: {muci_id}, 目标状态: {state}, 错误: {str(e)}")
            return False
    
    @staticmethod
    async def batch_update_muci_state(conn: aiomysql.Connection, muci_ids: list, state: int, staff_id: Optional[int] = None) -> bool:
        """批量更新母词状态
        
        Args:
            conn: 数据库连接
            muci_ids: 母词ID列表
            state: 状态值 (0: 待处理, 1: 处理中, 2: 成功, 3: 失败)
            staff_id: 操作的staff ID，用于日志记录
            
        Returns:
            bool: 更新是否成功
        """
        if not muci_ids:
            return True
            
        try:
            async with conn.cursor() as cursor:
                ids_str = ','.join(map(str, muci_ids))
                sql = f"UPDATE xaxis_muci_new_xu SET xaxis_state = %s WHERE id IN ({ids_str})"
                await cursor.execute(sql, (state,))
                await conn.commit()
                
                # 打印日志信息
                state_names = {0: "待处理", 1: "处理中", 2: "成功", 3: "失败"}
                state_name = state_names.get(state, f"未知状态({state})")
                
                if staff_id is not None:
                    print(f"Staff {staff_id} 已批量更新 {len(muci_ids)} 个母词状态为: {state_name}")
                else:
                    print(f"已批量更新 {len(muci_ids)} 个母词状态为: {state_name}")
                
                return True
                
        except Exception as e:
            print(f"批量更新母词状态失败 - 母词数量: {len(muci_ids)}, 目标状态: {state}, 错误: {str(e)}")
            return False

# 创建两个独立的连接池实例
call_pool = DatabasePool(DB_CONFIG, POOL_CONFIG)
parse_pool = DatabasePool(DB_CONFIG, POOL_CONFIG)

# 结果缓存队列
result_queue = Queue(maxsize=RESULT_QUEUE_SIZE)

async def log_process(conn, muci_id: int, process_type: int, status: int, error_msg: str = None, cost_time: int = None, input_tokens: int = 0, output_tokens: int = 0, token_usage: int = 0):
    """异步记录处理日志"""
    async with conn.cursor() as cursor:
        sql = """
            INSERT INTO xaxis_process_log_new
            (muci_id, process_type, status, cost_time, error_msg, input_tokens, output_tokens, token_usage) 
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        """
        await cursor.execute(sql, (muci_id, process_type, status, cost_time, error_msg, input_tokens, output_tokens, token_usage))

class ParseStaff:
    def __init__(self, staff_id: int):
        self.staff_id = staff_id
        self.conn = None
        self.pool = None
        
    async def initialize(self):
        """初始化staff，获取数据库连接"""
        try:
            # 初始化数据库连接池
            self.pool = DatabasePool(DB_CONFIG, POOL_CONFIG)
            await self.pool.initialize()
            
            # 获取staff自己的数据库连接
            self.conn = await self.pool.get_connection()
            if not self.conn:
                raise Exception("无法获取数据库连接")
            
            # print(f"ParseStaff {self.staff_id} 初始化完成")
        except Exception as e:
            error_msg = f"ParseStaff {self.staff_id} 初始化失败: {str(e)}"
            print(error_msg)
            # 确保在发生错误时释放所有资源
            await self.close()
            raise e
        
    async def close(self):
        """关闭staff，释放数据库连接"""
        try:
            # 释放staff的连接
            if self.conn:
                try:
                    await self.pool.release_connection(self.conn)
                except Exception as conn_error:
                    print(f"释放Staff连接时发生错误: {str(conn_error)}")
                self.conn = None
            
            # 关闭连接池
            if self.pool:
                try:
                    await self.pool.close()
                except Exception as pool_error:
                    print(f"关闭连接池时发生错误: {str(pool_error)}")
                self.pool = None
            
            print(f"ParseStaff {self.staff_id} 已关闭")
        except Exception as e:
            print(f"ParseStaff {self.staff_id} 关闭时发生错误: {str(e)}")
            
    async def ensure_connection(self):
        """确保数据库连接有效"""
        try:
            # 测试连接是否有效
            async with self.conn.cursor() as cursor:
                await cursor.execute("SELECT 1")
            return True
        except Exception as e:
            print(f"ParseStaff {self.staff_id} 数据库连接已断开，尝试重新连接: {str(e)}")
            try:
                # 如果连接池存在，尝试获取新连接
                if self.pool:
                    self.conn = await self.pool.get_connection()
                    return True
            except Exception as reconnect_error:
                print(f"ParseStaff {self.staff_id} 重新连接失败: {str(reconnect_error)}")
            return False
            
    async def parse_and_save_from_queue(self, worker_id: int, result_queue: asyncio.Queue):
        """从队列中读取结果并解析保存"""
        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 开始从队列读取结果")
        while True:
            try:
                # 从队列中获取结果
                result = await result_queue.get()
                print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 获取到新结果")
                
                # 记录解析开始时间
                parse_start_time = time.time()
                
                # 解析并保存结果
                await self.parse_and_save(worker_id, result['content'])
                
                # 计算解析耗时
                parse_cost_time = (time.time() - parse_start_time) * 1000  # 转换为毫秒
                
                # 获取API调用耗时
                api_cost_time = result.get('api_cost_time', 0)
                
                # 打印时间统计
                print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 处理完成:")
                print(f"  - API调用耗时: {api_cost_time:.2f}ms")
                print(f"  - 解析耗时: {parse_cost_time:.2f}ms")
                print(f"  - 总耗时: {(api_cost_time + parse_cost_time):.2f}ms")
                
                # 标记任务完成
                result_queue.task_done()
                
            except Exception as e:
                error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 处理队列结果时发生错误: {str(e)}"
                print(error_msg)
                await log_process(self.conn, 0, 1, 1, error_msg)

    async def parse_and_save(self, worker_id: int, content: str):
        """解析API响应并保存到数据库"""
        # 确保数据库连接有效
        if not await self.ensure_connection():
            error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 无法建立数据库连接"
            print(error_msg)
            await log_process(self.conn, 0, 1, 1, error_msg)
            return
        
        try:
            # 尝试解析JSON数组
            json_start = content.find('[')
            json_end = content.rfind(']') + 1
            if json_start != -1 and json_end != 0:
                json_str = content[json_start:json_end]
                print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 找到JSON数组: {json_str}")
            else:
                # 尝试解析单个JSON对象
                json_start = content.find('{')
                json_end = content.rfind('}') + 1
                if json_start != -1 and json_end != 0:
                    json_str = content[json_start:json_end]
                    # 将单个对象转换为数组
                    json_str = f"[{json_str}]"
                    print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 找到单个JSON对象，转换为数组: {json_str}")
                else:
                    # 尝试从文本中提取数据
                    print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 未找到JSON格式，尝试从文本中提取数据")
                    # 使用正则表达式提取字段
                    data = {}
                    # 提取id
                    id_match = re.search(r'"id"\s*:\s*"(\d+)"', content)
                    if id_match:
                        data['id'] = id_match.group(1)
                    
                    # 提取其他字段
                    fields = {
                        '词义': r'"词义"\s*:\s*"([^"]+)"',
                        '例句（包含来源）': r'"例句（包含来源）"\s*:\s*"([^"]+)"',
                        '词性': r'"词性"\s*:\s*"([^"]+)"',
                        '语境': r'"语境"\s*:\s*"([^"]+)"',
                        '所属类别': r'"所属类别"\s*:\s*"([^"]+)"',
                        '等义词': r'"等义词"\s*:\s*"([^"]+)"',
                        '同义词': r'"同义词"\s*:\s*"([^"]+)"',
                        '近义词': r'"近义词"\s*:\s*"([^"]+)"',
                        '绝对反义词': r'"绝对反义词"\s*:\s*"([^"]+)"',
                        '相对反义词': r'"相对反义词"\s*:\s*"([^"]+)"'
                    }
                    
                    for field, pattern in fields.items():
                        match = re.search(pattern, content)
                        if match:
                            data[field] = match.group(1)
                    
                    if data:
                        json_str = json.dumps([data], ensure_ascii=False)
                        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 从文本提取的数据: {json_str}")
                    else:
                        error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 无法从内容中提取有效数据\n原始内容：\n{content}"
                        print(error_msg)
                        # 更新母词状态为失败
                        await DatabasePool.update_muci_state(self.conn, data.get('id', 0), 3, self.staff_id)
                        await log_process(self.conn, data.get('id', 0), 1, 1, error_msg)
                        return
            
            # 清理和格式化JSON字符串
            json_str = re.sub(r'^```json\s*', '', json_str)
            json_str = re.sub(r'\s*```$', '', json_str)
            json_str = re.sub(r'}\s*{', '},{', json_str)  # 修复对象之间的分隔符
            json_str = re.sub(r'([^\\])"([^"]*?)([^\\])"', r'\1"\2\3"', json_str)
            json_str = re.sub(r'\n\s*', ' ', json_str)
            
            try:
                parsed_data_list = json.loads(json_str)
            except json.JSONDecodeError as json_error:
                error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} JSON解析失败: {str(json_error)}\n错误位置: 行 {json_error.lineno}, 列 {json_error.colno}\n错误字符位置: {json_error.pos}\n原始内容：\n{content}"
                print(error_msg)
                # 更新母词状态为失败
                muci_id = parsed_data_list[0].get('id', 0) if parsed_data_list else 0
                await DatabasePool.update_muci_state(self.conn, muci_id, 3, self.staff_id)
                await log_process(self.conn, muci_id, 1, 1, error_msg)
                return
            
            # 处理数组中的每个对象
            success_count = 0  # 添加成功计数器
            for parsed_data in parsed_data_list:
                try:
                    # 验证必要字段
                    required_fields = ["id", "词义", "词性", "所属类别", "例句（包含来源）", "语境"]
                    missing_fields = [field for field in required_fields if field not in parsed_data]
                    if missing_fields:
                        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 错误：缺少必要字段: {missing_fields}")
                        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 当前数据: {parsed_data}")
                        # 更新母词状态为失败
                        await DatabasePool.update_muci_state(self.conn, parsed_data.get('id', 0), 3, self.staff_id)
                        continue
                    
                    # 保存到数据库
                    max_retries = 3  # 最大重试次数
                    retry_count = 0
                    
                    while retry_count < max_retries:
                        try:
                            async with self.conn.cursor() as cursor:
                                # 开启事务
                                await self.conn.begin()
                                
                                # 保存词义
                                sql = """
                                    INSERT INTO xaxis_word_meaning_new_xu
                                    (muci_id, meaning, part_of_speech, domain, example_sentence, context) 
                                    VALUES (%s, %s, %s, %s, %s, %s)
                                """
                                values = (
                                    parsed_data.get("id", ""),
                                    parsed_data.get("词义", ""),
                                    parsed_data.get("词性", ""),
                                    parsed_data.get("所属类别", ""),
                                    parsed_data.get("例句（包含来源）", ""),
                                    parsed_data.get("语境", "")
                                )
                                # print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 准备插入词义数据: {values}")
                                
                                await cursor.execute(sql, values)
                                meaning_id = cursor.lastrowid
                                # print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 成功插入词义，获取到meaning_id: {meaning_id}")
                                
                                # 保存词语关系
                                relation_types = {
                                    "同义词": 1,
                                    "等义词": 2,
                                    "近义词": 3,
                                    "绝对反义词": 4,
                                    "相对反义词": 5
                                }
                                
                                for rel_name, rel_type in relation_types.items():
                                    words = parsed_data.get(rel_name, "0")
                                    print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 处理{rel_name}，原始值: {words}")
                                    
                                    if words != "0":
                                        # 分割词语（支持中文分号和英文分号）
                                        word_list = re.split(r'[;；]', words)
                                        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 分割后的词语列表: {word_list}")
                                        
                                        for word in word_list:
                                            word = word.strip()
                                            if word:
                                                try:
                                                    # 使用INSERT IGNORE避免重复插入
                                                    insert_values = (
                                                        parsed_data.get("id", ""),
                                                        meaning_id,
                                                        rel_type,
                                                        word
                                                    )
                                                    # print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 准备插入关系数据: {insert_values}")
                                                    
                                                    await cursor.execute(
                                                        """
                                                        INSERT IGNORE INTO xaxis_word_relations_new_xu
                                                        (muci_id, meaning_id, related_type, related_word) 
                                                        VALUES (%s, %s, %s, %s)
                                                        """,
                                                        insert_values
                                                    )
                                                    print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 成功插入关系: {rel_name} - {word}")
                                                except Exception as e:
                                                    if "Lock wait timeout exceeded" in str(e):
                                                        print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 遇到锁等待超时，准备重试...")
                                                        raise  # 抛出异常以触发重试
                                                    error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 保存词语关系失败 - 词语: {word}, 类型: {rel_name}, 错误: {str(e)}"
                                                    print(error_msg)
                                                    await log_process(self.conn, parsed_data.get("id", ""), 1, 1, error_msg)
                        
                                # 更新母词状态为成功
                                muci_id = parsed_data.get("id", "")
                                await DatabasePool.update_muci_state(self.conn, muci_id, 2, self.staff_id)
                                
                                # 提交事务
                                await self.conn.commit()
                                print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 成功保存解析结果")
                                success_count += 1  # 增加成功计数
                                break  # 成功完成，跳出重试循环
                                
                        except Exception as e:
                            # 回滚事务
                            await self.conn.rollback()
                            retry_count += 1
                            
                            if retry_count < max_retries:
                                wait_time = 1 * retry_count  # 递增等待时间
                                print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 事务失败，第 {retry_count} 次重试，等待 {wait_time} 秒...")
                                await asyncio.sleep(wait_time)
                            else:
                                error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 数据库操作失败，已重试 {max_retries} 次: {str(e)}"
                                print(error_msg)
                                # 更新母词状态为失败
                                await DatabasePool.update_muci_state(self.conn, parsed_data.get('id', 0), 3, self.staff_id)
                                await log_process(self.conn, parsed_data.get("id", ""), 1, 1, error_msg)
                                # 继续处理下一个数据，而不是直接返回
                                continue
                
                except Exception as e:
                    error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 处理数据时发生错误: {str(e)}"
                    print(error_msg)
                    # 更新母词状态为失败
                    await DatabasePool.update_muci_state(self.conn, parsed_data.get('id', 0), 3, self.staff_id)
                    await log_process(self.conn, parsed_data.get("id", ""), 1, 1, error_msg)
                    # 继续处理下一个数据，而不是直接返回
                    continue
                
            # 打印处理结果统计
            print(f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 处理完成，成功保存 {success_count}/{len(parsed_data_list)} 条数据")
            
        except Exception as e:
            error_msg = f"ParseStaff {self.staff_id} 的 ParseWorker {worker_id} 解析结果失败: {str(e)}"
            print(error_msg)
            await log_process(self.conn, 0, 1, 1, error_msg)

    async def run(self):
        """运行主循环"""
        try:
            await self.initialize()
            while True:
                print(f"ParseStaff {self.staff_id} 等待处理数据...")
                await asyncio.sleep(1)  # 避免过度消耗CPU
        except Exception as e:
            error_msg = f"ParseStaff {self.staff_id} 运行异常: {str(e)}"
            print(error_msg)
            await log_process(self.conn, 0, 1, 1, error_msg)
        finally:
            await self.close()

class CallStaff:
    def __init__(self, staff_id: int):
        self.staff_id = staff_id
        self.conn = None
        self.parse_staff = None  # 只创建一个ParseStaff
        self.pool = None
        self.result_queue = None  # 只创建一个结果队列
        
    async def initialize(self):
        """初始化staff，获取数据库连接"""
        try:
            # 初始化数据库连接池
            self.pool = DatabasePool(DB_CONFIG, POOL_CONFIG)
            await self.pool.initialize()
            
            # 获取staff自己的数据库连接
            self.conn = await self.pool.get_connection()
            if not self.conn:
                raise Exception("无法获取数据库连接")
            
            # 创建一个ParseStaff和一个结果队列
            try:
                # 创建解析staff
                self.parse_staff = ParseStaff(self.staff_id)
                self.parse_staff.pool = self.pool
                
                # 创建结果队列
                self.result_queue = asyncio.Queue()
                
                # print(f"CallStaff {self.staff_id} 创建了对应的 ParseStaff {self.staff_id}，获得独立数据库连接和结果队列")
                
            except Exception as worker_error:
                print(f"ParseStaff初始化失败: {str(worker_error)}")
                raise worker_error
                
            # print(f"调用Staff {self.staff_id} 初始化完成")
        except Exception as e:
            error_msg = f"调用Staff {self.staff_id} 初始化失败: {str(e)}"
            print(error_msg)
            # 确保在发生错误时释放所有资源
            await self.close()
            raise e
        
    async def close(self):
        """关闭staff，释放数据库连接"""
        try:
            # 关闭ParseStaff
            if self.parse_staff:
                try:
                    await self.parse_staff.close()
                except Exception as worker_error:
                    print(f"关闭ParseStaff时发生错误: {str(worker_error)}")
            self.parse_staff = None
            
            # 释放staff的连接
            if self.conn:
                try:
                    await self.pool.release_connection(self.conn)
                except Exception as conn_error:
                    print(f"释放Staff连接时发生错误: {str(conn_error)}")
                self.conn = None
            
            # 关闭连接池
            if self.pool:
                try:
                    await self.pool.close()
                except Exception as pool_error:
                    print(f"关闭连接池时发生错误: {str(pool_error)}")
                self.pool = None
            
            print(f"调用Staff {self.staff_id} 已关闭")
        except Exception as e:
            print(f"调用Staff {self.staff_id} 关闭时发生错误: {str(e)}")
        
    async def fetch_batch(self):
        """获取一批待处理的母词"""
        try:
            async with self.conn.cursor() as cursor:
                # 开启事务
                await self.conn.begin()
                
                try:
                    # 获取task_num * BATCH_SIZE个母词
                    sql = """
                        SELECT id, title FROM xaxis_muci_new_xu
                        WHERE xaxis_state = 0 AND id>8000000 and id<10000000
                        ORDER BY id ASC 
                        LIMIT %s FOR UPDATE
                    """
                    await cursor.execute(sql, (task_num * BATCH_SIZE,))
                    all_records = await cursor.fetchall()
                    
                    if not all_records:
                        print(f"调用Staff {self.staff_id} 没有获取到待处理母词")
                        return []
                    
                    # 获取当前批次的所有ID
                    current_ids = [str(record[0]) for record in all_records]
                    # 更新当前批次的状态
                    update_sql = f"UPDATE xaxis_muci_new_xu SET xaxis_state = 1 WHERE id IN ({','.join(current_ids)})"
                    await cursor.execute(update_sql)
                    
                    # 提交事务
                    await self.conn.commit()
                    
                    print(f"调用Staff {self.staff_id} 获取到 {len(all_records)} 条待处理母词")
                    return all_records
                        
                except Exception as e:
                    # 如果出错，回滚事务
                    await self.conn.rollback()
                    raise e
        except Exception as e:
            await log_process(self.conn, 0, 1, 1, f"调用Staff {self.staff_id} 获取批次失败: {str(e)}")
            print(f"调用Staff {self.staff_id} 获取批次失败: {str(e)}")
            return []

    async def process_batch(self, batch):
        """
        主函数，负责初始化客户端、生成请求、启动协程并监控任务完成情况。

        使用协程来实现并发处理请求，避免了使用线程带来的较大开销。
        多个协程可以在一个线程中并发执行，提高程序的性能。
        """
        if not batch:
            return
        
        start = datetime.now()
        # 创建一个异步队列用于存储请求
        requests = asyncio.Queue()
        results = []  # 创建结果列表
        
        # 初始化客户端
        try:
            client = AsyncArk(
                api_key="b2eb10d1-5d5a-4450-9574-73845fcc8d67",
                timeout=24 * 3600
            )
        except Exception as e:
            error_msg = f"API客户端初始化失败: {type(e).__name__}: {str(e)}"
            print(error_msg)
            await log_process(self.conn, 0, 1, 1, error_msg, 0, 0, 0, 0)
            return

        # 将母词分成BATCH_SIZE个一组
        batch_groups = [batch[i:i + BATCH_SIZE] for i in range(0, len(batch), BATCH_SIZE)]
        
        # 为每个任务创建不同的请求
        for group in batch_groups:
            # 构建包含原始ID的title_list
            group_title_list = "、".join([f"{record[0]}:{record[1]}" for record in group])
            await requests.put({
                "model": "ep-bi-20250523134937-j74rz",
                "messages": [
                    {
                        "role": "system",
                        "content": "你是一个汉语言专家，请保持输出中的id与输入的id完全一致。",
                    },
                    {
                        "role": "user",
                        "content": PROMPT_TEMPLATE.format(title_list=group_title_list)
                    }
                ],
                "max_tokens": 12288
            })
        
        # 创建所有worker的任务
        call_tasks = []
        
        # 创建调用协程任务
        for i in range(WORKER_PER_STAFF):
            task = asyncio.create_task(self.callworker(i, client, requests, results))
            call_tasks.append(task)
            print(f"CallStaff {self.staff_id} 创建了 CallWorker {i} 的任务")
            
        # 创建解析协程任务
        parse_task = asyncio.create_task(self.parse_staff.parse_and_save_from_queue(self.staff_id, self.result_queue))
        print(f"CallStaff {self.staff_id} 创建了 ParseWorker {self.staff_id} 的任务")
        
        # 等待所有请求处理完成
        await requests.join()
        
        # 停止所有协程，取消所有正在运行的任务
        for task in call_tasks:
            task.cancel()
        parse_task.cancel()
        
        # 等待所有协程取消完成，确保所有任务都已停止
        await asyncio.gather(*call_tasks, parse_task, return_exceptions=True)
        
        # 关闭客户端连接，释放资源
        await client.close()
        # 记录程序结束执行的时间
        end = datetime.now()
        # 打印程序总执行时间和处理的总任务数
        total_time = (end - start).total_seconds() * 1000  # 转换为毫秒
        print(f"Total time: {end - start}, Total task: {task_num}")
        # 记录总执行时间和任务数到日志
        await log_process(self.conn, 0, 1, 0, 
            f"批次处理完成，总耗时: {total_time:.2f}ms，处理任务数: {task_num}", 
            int(total_time), 0, 0, 0
        )

    async def callworker(self, worker_id: int, client: AsyncArk, requests: asyncio.Queue, results: List):
        """异步协程函数，负责从队列中获取请求并处理"""
        while True:
            try:
                # 从队列中获取一个请求
                request = await requests.get()
                print(f"\n{'='*50}")
                print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 准备发送请求...")
                
                # 记录API调用开始时间
                api_start_time = time.time()
                
                # 调用API
                print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 开始调用API...")
                completion = await client.batch_chat.completions.create(**request)
                
                # 计算API调用耗时
                api_cost_time = (time.time() - api_start_time) * 1000  # 转换为毫秒
                print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} API调用完成，耗时: {api_cost_time:.2f}ms，返回结果数量: {len(completion.choices)}")
                
                # 获取token使用情况
                prompt_tokens = completion.usage.prompt_tokens if hasattr(completion, 'usage') else 0
                completion_tokens = completion.usage.completion_tokens if hasattr(completion, 'usage') else 0
                total_tokens = completion.usage.total_tokens if hasattr(completion, 'usage') else 0
                
                # 打印token使用情况
                print(f"\nToken使用情况:")
                print(f"  - 输入tokens: {prompt_tokens}")
                print(f"  - 输出tokens: {completion_tokens}")
                print(f"  - 总tokens: {total_tokens}")
                
                # 记录token使用情况到日志
                await log_process(
                    self.conn,
                    0,  # muci_id 设为 0
                    0,  # process_type: 0表示调用
                    0,  # status: 0表示成功
                    f"CallStaff {self.staff_id} 的 CallWorker {worker_id} API调用成功",  # 简化的成功信息
                    int(api_cost_time),  # cost_time
                    prompt_tokens,
                    completion_tokens,
                    total_tokens
                )
                
                # 处理所有返回的结果
                for i, choice in enumerate(completion.choices):
                    if hasattr(choice, 'message') and hasattr(choice.message, 'content'):
                        content = choice.message.content
                        print(f"\n{'='*50}")
                        print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 正在处理结果")
                        
                        # 将结果放入结果队列
                        await self.result_queue.put({
                            'content': content,
                            'usage': {
                                'prompt_tokens': prompt_tokens,
                                'completion_tokens': completion_tokens,
                                'total_tokens': total_tokens
                            },
                            'api_cost_time': api_cost_time  # 添加API调用耗时
                        })
                        
                        # 同时保存到结果列表
                        results.append({
                            'content': content,
                            'usage': {
                                'prompt_tokens': prompt_tokens,
                                'completion_tokens': completion_tokens,
                                'total_tokens': total_tokens
                            },
                            'api_cost_time': api_cost_time  # 添加API调用耗时
                        })
                        print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 保存了结果")
            
            except Exception as e:
                print(f"\n{'='*50}")
                print(f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 发生错误: {str(e)}")
                print(f"{'='*50}\n")
                
                # 从请求中提取母词ID
                try:
                    # 从请求内容中提取母词ID
                    content = request.get('messages', [{}])[1].get('content', '')
                    # 使用正则表达式匹配ID
                    id_match = re.search(r'(\d+):', content)
                    if id_match:
                        muci_id = id_match.group(1)
                        # 更新母词状态为失败
                        await DatabasePool.update_muci_state(self.conn, muci_id, 3, self.staff_id)
                except Exception as extract_error:
                    print(f"提取母词ID失败: {str(extract_error)}")
                
                # 记录调用失败的日志
                await log_process(
                    self.conn,
                    0,  # muci_id 设为 0
                    0,  # process_type: 0表示调用
                    1,  # status: 1表示失败
                    f"CallStaff {self.staff_id} 的 CallWorker {worker_id} 调用失败: {str(e)}",
                    0, 0, 0, 0
                )
            finally:
                requests.task_done()

    async def run(self):
        """运行主循环"""
        try:
            await self.initialize()
            while True:
                print(f"调用Staff {self.staff_id} 开始获取批次...")
                batch = await self.fetch_batch()
                if not batch:
                    print(f"调用Staff {self.staff_id} 无待处理数据，等待10秒...")
                    await asyncio.sleep(10)
                    continue
                    
                print(f"调用Staff {self.staff_id} 开始处理批次，包含 {len(batch)} 条母词")
                await self.process_batch(batch)
                print(f"调用Staff {self.staff_id} 完成批次处理")
        except Exception as e:
            error_msg = f"调用Staff {self.staff_id} 运行异常: {str(e)}"
            print(error_msg)
            await log_process(self.conn, 0, 1, 1, error_msg)
        finally:
            await self.close()

async def main():
    """主函数"""
    call_staffs = []
    call_tasks = []
    try:
        # 创建并启动调用staff
        for i in range(CALL_STAFF_COUNT):
            staff = CallStaff(i)
            await staff.initialize()  # 初始化staff，这会同时创建对应的ParseStaff
            call_staffs.append(staff)
            call_tasks.append(asyncio.create_task(staff.run()))
            await asyncio.sleep(0.1)
        
        # 等待所有任务完成
        await asyncio.gather(*call_tasks)
        
    except Exception as e:
        print(f"主程序异常: {str(e)}")
    finally:
        # 确保所有staff都被正确关闭
        for staff in call_staffs:
            try:
                await staff.close()
            except Exception as e:
                print(f"关闭staff时发生错误: {str(e)}")
        
        # 等待所有连接完全关闭
        await asyncio.sleep(1)

if __name__ == "__main__":
    try:
        # 运行异步主函数，启动整个程序
        print("启动母词处理服务...")
        asyncio.run(main())
    except KeyboardInterrupt:
        print("程序被用户中断")
    except Exception as e:
        print(f"程序异常退出: {str(e)}")
    finally:
        print("程序结束")