import asyncio
from typing import List, Dict
from concurrent.futures import ThreadPoolExecutor
import time
import json
import os

class RequestProcessor:
    def __init__(self, workflow_client, performance_monitor, thread_pool: ThreadPoolExecutor, request_queue: asyncio.Queue, request_semaphore: asyncio.Semaphore, error_callback):
        self.workflow_client = workflow_client
        self.performance_monitor = performance_monitor
        self.thread_pool = thread_pool
        self.request_queue = request_queue
        self.request_semaphore = request_semaphore
        self.error_callback = error_callback
        # 创建失败请求队列
        self.failed_requests_queue = asyncio.Queue()
        # 失败请求重试次数记录
        self.retry_counts = {}
        # 最大重试次数
        self.max_retries = 3
        # 重试间隔（秒）
        self.retry_interval = 5

    async def process_request_queue(self):
        """处理请求队列的后台任务"""
        workers = []
        # 创建5个工作线程
        for _ in range(5):
            worker = asyncio.create_task(self.process_single_worker())
            workers.append(worker)
        
        # 创建重试处理器
        retry_worker = asyncio.create_task(self.process_failed_requests())
        workers.append(retry_worker)
        
        await asyncio.gather(*workers)

    async def process_single_worker(self):
        """单个工作线程的处理逻辑"""
        while True:
            try:
                # 从队列中获取请求
                request_data = await self.request_queue.get()
                if request_data is None:  # 退出信号
                    break
                    
                inputs, bh, timeout, future = request_data
                
                # 每次从队列中取出请求时，增加已处理计数
                await self.performance_monitor.increment_processed()

                try:
                    # 使用信号量控制并发
                    async with self.request_semaphore:
                        start_time = time.time()
                        result = await self.workflow_client.execute_workflow_with_retry(inputs, bh, timeout, self.error_callback)
                        duration = time.time() - start_time
                        await self.performance_monitor.add_request_time(duration)
                        await self.performance_monitor.add_queue_size(self.request_queue.qsize())
                        future.set_result(result)
                        # 请求成功，增加成功计数
                        await self.performance_monitor.increment_successful()
                except Exception as e:
                    # 将失败的请求添加到失败队列
                    await self.failed_requests_queue.put((inputs, bh, timeout, future, str(e)))
                    # 注意：这里不增加失败计数，因为请求可能会被重试
                    future.set_exception(e)
                finally:
                    self.request_queue.task_done()
                    
            except Exception as e:
                await self.error_callback("queue_processor", f"队列处理错误: {str(e)}")
                await asyncio.sleep(1)  # 发生错误时短暂等待

    async def process_failed_requests(self):
        """处理失败请求的重试逻辑"""
        while True:
            try:
                # 从失败队列中获取请求
                request_data = await self.failed_requests_queue.get()
                if request_data is None:  # 退出信号
                    break
                    
                inputs, bh, timeout, future, error_msg = request_data
                
                # 获取当前重试次数
                retry_count = self.retry_counts.get(bh, 0)
                
                if retry_count < self.max_retries:
                    # 增加重试次数
                    self.retry_counts[bh] = retry_count + 1
                    
                    # 记录重试信息
                    await self.error_callback(bh, f"请求失败，准备第{retry_count + 1}次重试，错误信息: {error_msg}")
                    
                    # 等待重试间隔
                    await asyncio.sleep(self.retry_interval)
                    
                    try:
                        # 重新加入主请求队列
                        await self.request_queue.put((inputs, bh, timeout, future))
                    except Exception as e:
                        await self.error_callback(bh, f"重试请求加入队列失败: {str(e)}")
                        future.set_exception(e)
                else:
                    # 超过最大重试次数，记录失败信息
                    await self.error_callback(bh, f"请求失败，已达到最大重试次数({self.max_retries})，最终错误: {error_msg}")
                    # 保存失败请求信息到文件
                    await self.save_failed_request(inputs, bh, error_msg)
                    future.set_exception(Exception(f"请求失败，已达到最大重试次数: {error_msg}"))
                    # 最终失败，增加失败计数
                    await self.performance_monitor.increment_failed()
                
                self.failed_requests_queue.task_done()
                
            except Exception as e:
                await self.error_callback("retry_processor", f"重试处理错误: {str(e)}")
                await asyncio.sleep(1)

    async def save_failed_request(self, inputs: Dict, bh: str, error_msg: str):
        """保存失败请求信息到文件"""
        try:
            failed_request = {
                "bh": bh,
                "inputs": inputs,
                "error": error_msg,
                "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
            }
            
            # 确保目录存在
            os.makedirs("logs/failed_requests", exist_ok=True)
            
            # 保存到文件
            filename = f"logs/failed_requests/failed_request_{bh}_{int(time.time())}.json"
            with open(filename, "w", encoding="utf-8") as f:
                json.dump(failed_request, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            await self.error_callback(bh, f"保存失败请求信息失败: {str(e)}")

    async def batch_process_requests(self, requests: List[dict]) -> List[dict]:
        tasks = []
        for request in requests:
            future = asyncio.Future()
            await self.request_queue.put((request, request['bh'], 300, future))  # 300秒超时
            tasks.append(future)
        return await asyncio.gather(*tasks) 