from dataclasses import dataclass
from abc import ABC, abstractmethod
from typing import List, Dict, Optional, Union, Any, Tuple
import os
import asyncio
import time

from anthropic import AsyncAnthropic
from openai import AsyncOpenAI
from together import AsyncTogether

from openai import OpenAI

@dataclass
class LLMResponse:
    """Unified response format across all LLM providers"""
    content: str
    model_name: str

class LLMProvider(ABC):
    """Abstract base class for LLM providers"""
    
    @abstractmethod
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        """Generate a response from the LLM"""
        pass

class OpenAIProvider(LLMProvider):
    """OpenAI API provider implementation"""
    
    def __init__(self, model_name: str = "gpt-4o", api_key: Optional[str] = None, base_url: Optional[str] = None):
        self.model_name = model_name
        self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
        self.base_url = base_url  # 支持自定义base_url
        
        if not self.api_key:
            raise ValueError("OpenAI API key not provided and not found in environment variables")
        
        if self.base_url:
            self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
        else:
            self.client = AsyncOpenAI(api_key=self.api_key)
    
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        if "o1-mini" in self.model_name:
            if messages and messages[0].get("role") == "system":
                messages = messages[1:]
        
        # 分离标准参数和extra_body参数
        standard_params = {}
        extra_body_params = {}
        
        # 定义OpenAI标准参数
        openai_standard_params = {
            'temperature', 'max_tokens', 'top_p', 'frequency_penalty', 'presence_penalty',
            'stop', 'stream', 'n', 'logprobs', 'top_logprobs', 'response_format',
            'seed', 'tools', 'tool_choice', 'parallel_tool_calls', "thinking"
        }
        
        # 分离参数
        for key, value in kwargs.items():
            if key in openai_standard_params:
                standard_params[key] = value
            else:
                # 非标准参数放入extra_body
                extra_body_params[key] = value
        
        # 检查是否为流式请求
        is_stream = standard_params.get('stream', False)
        
        # 构建API调用参数
        api_params = {
            "model": self.model_name,
            "messages": messages,
            **standard_params
        }
        
        # 如果有extra_body参数，添加到API调用中
        if extra_body_params:
            api_params["extra_body"] = extra_body_params
            print(f"[DEBUG] 使用extra_body参数: {extra_body_params}")
        
        # 网络重试配置（固定延迟）
        retry_delay = 2
        attempt = 0
        while True:  # 无限重试直到成功
            try:
                response = await self.client.chat.completions.create(**api_params)
                
                # 处理流式响应
                if is_stream:
                    reasoning_content = ""
                    content = ""
                    model_name = self.model_name
                    finish_reason = None
                    
                    # 遍历流式响应
                    async for chunk in response:
                        if chunk.choices and len(chunk.choices) > 0:
                            delta = chunk.choices[0].delta
                            finish_reason = chunk.choices[0].finish_reason
                            
                            # 收集思考内容（reasoning_content）
                            if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
                                reasoning_content += delta.reasoning_content
                            
                            # 收集回复内容（content）
                            if hasattr(delta, "content") and delta.content:
                                content += delta.content
                        
                        # 获取模型名称
                        if hasattr(chunk, 'model') and chunk.model:
                            model_name = chunk.model
                    
                    # 检查finish_reason
                    if finish_reason in ['length', 'content_filter']:
                        raise ValueError(f"Content filtered or length exceeded: {finish_reason}")
                    
                    # 如果有思考内容，格式化输出
                    if reasoning_content:
                        formatted_content = f"<think>{reasoning_content}</think>{content}"
                    else:
                        formatted_content = content
                    
                    return LLMResponse(
                        content=formatted_content,
                        model_name=model_name
                    )
                
                # 处理非流式响应（原有逻辑）
                else:
                    if response.choices[0].finish_reason in ['length', 'content_filter']:
                        raise ValueError("Content filtered or length exceeded")
                    
                    # 提取思考内容和回答内容
                    message = response.choices[0].message
                    content = message.content or ""
                    
                    # 检查是否有reasoning_content
                    reasoning_content = ""
                    if hasattr(message, 'model_extra') and message.model_extra:
                        reasoning_content = message.model_extra.get('reasoning_content', '')
                    
                    # 如果有思考内容，格式化输出
                    if reasoning_content:
                        formatted_content = f"<think>{reasoning_content}</think>{content}"
                    else:
                        formatted_content = content
                    
                    return LLMResponse(
                        content=formatted_content,
                        model_name=response.model
                    )
            except Exception as e:
                attempt += 1
                print(f"[WARN] OpenAIProvider.generate attempt {attempt} failed: {e}. Retrying in {retry_delay}s...")
                await asyncio.sleep(retry_delay)

class DeepSeekProvider(LLMProvider):
    """DeepSeek API provider implementation"""
    
    def __init__(self, model_name: str = "deepseek-reasoner", api_key: Optional[str] = None):
        self.model_name = model_name
        self.api_key = api_key or os.environ.get("DEEPSEEK_API_KEY")
        if not self.api_key:
            raise ValueError("DeepSeek API key not provided and not found in environment variables")
        
        self.client = AsyncOpenAI(api_key=self.api_key, base_url="https://api.deepseek.com")
    
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        # deepseek-r1 模型只支持 "user" 类型的消息：将 system 转为 user
        filtered_messages: List[Dict[str, str]] = []
        for msg in messages:
            if msg.get("role") == "system":
                filtered_messages.append({"role": "user", "content": f"系统指令：{msg.get('content', '')}"})
            else:
                filtered_messages.append(msg)
        
        retry_delay = 2
        attempt = 0
        while True:
            try:
                response = await self.client.chat.completions.create(
                    model=self.model_name,
                    messages=filtered_messages,
                    **kwargs
                )
                if response.choices[0].finish_reason in ['length', 'content_filter']:
                    raise ValueError("Content filtered or length exceeded")
                
                # 提取思考内容和回答内容
                message = response.choices[0].message
                content = message.content or ""
                
                # 检查是否有reasoning_content
                reasoning_content = ""
                if hasattr(message, 'model_extra') and message.model_extra:
                    reasoning_content = message.model_extra.get('reasoning_content', '')
                
                # 如果有思考内容，格式化输出
                if reasoning_content:
                    formatted_content = f"<think>\n{reasoning_content}\n</think>\n\n<answer>\n{content}\n</answer>"
                else:
                    formatted_content = content
                
                return LLMResponse(
                    content=formatted_content,
                    model_name=response.model
                )
            except Exception as e:
                attempt += 1
                print(f"[WARN] DeepSeekProvider.generate attempt {attempt} failed: {e}. Retrying in {retry_delay}s...")
                await asyncio.sleep(retry_delay)

class AnthropicProvider(LLMProvider):
    """Anthropic Claude API provider implementation
    Refer to https://github.com/anthropics/anthropic-sdk-python
    """
    
    def __init__(self, model_name: str = "claude-3.5-sonnet-20240620", api_key: Optional[str] = None):
        self.model_name = model_name
        self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
        if not self.api_key:
            raise ValueError("Anthropic API key not provided and not found in environment variables")
        
        self.client = AsyncAnthropic(api_key=self.api_key)
    
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        # Extract system message if present
        system_content = ""
        chat_messages = []
        
        for msg in messages:
            if msg["role"] == "system":
                system_content = msg["content"]
            else:
                # Map to Anthropic's format
                chat_messages.append({
                    "role": "assistant" if msg["role"] == "assistant" else "user",
                    "content": msg["content"]
                })
        
        retry_delay = 2
        attempt = 0
        while True:
            try:
                response = await self.client.messages.create(
                    model=self.model_name,
                    system=system_content,
                    messages=chat_messages,
                    **kwargs
                )
                if response.stop_reason == "max_tokens":
                    raise ValueError("Max tokens exceeded")
                return LLMResponse(
                    content=response.content[0].text,
                    model_name=response.model
                )
            except Exception as e:
                attempt += 1
                print(f"[WARN] AnthropicProvider.generate attempt {attempt} failed: {e}. Retrying in {retry_delay}s...")
                await asyncio.sleep(retry_delay)

class TogetherProvider(LLMProvider):
    """Together AI API provider implementation"""
    
    def __init__(self, model_name: str = "meta-llama/Llama-3-70b-chat-hf", api_key: Optional[str] = None):
        self.model_name = model_name
        self.api_key = api_key or os.environ.get("TOGETHER_API_KEY")
        if not self.api_key:
            raise ValueError("Together API key not provided and not found in environment variables")
        
        self.client = AsyncTogether(api_key=self.api_key)
    
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        retry_delay = 2
        attempt = 0
        while True:
            try:
                response = await self.client.chat.completions.create(
                    model=self.model_name,
                    messages=messages,
                    **kwargs
                )
                return LLMResponse(
                    content=response.choices[0].message.content,
                    model_name=response.model
                )
            except Exception as e:
                attempt += 1
                print(f"[WARN] TogetherProvider.generate attempt {attempt} failed: {e}. Retrying in {retry_delay}s...")
                await asyncio.sleep(retry_delay)

class ConcurrentLLM:
    """Unified concurrent interface for multiple LLM providers"""
    
    def __init__(self, provider: Union[str, LLMProvider], model_name: Optional[str] = None, 
                api_key: Optional[str] = None, base_url: Optional[str] = None, max_concurrency: int = 4):
        """
        Initialize the concurrent LLM client.
        
        Args:
            provider: Either a provider instance or a string ('openai', 'anthropic', 'together')
            model_name: Model name (if provider is a string)
            api_key: API key (if provider is a string)
            base_url: Base URL for API calls (if provider is a string, mainly for openai)
            max_concurrency: Maximum number of concurrent requests
        """
        if isinstance(provider, LLMProvider):
            self.provider = provider
        else:
            if provider.lower() == "openai":
                self.provider = OpenAIProvider(model_name or "gpt-4o", api_key, base_url)
            elif provider.lower() == "deepseek":
                self.provider = DeepSeekProvider(model_name or "deepseek-reasoner", api_key)
            elif provider.lower() == "anthropic":
                self.provider = AnthropicProvider(model_name or "claude-3-7-sonnet-20250219", api_key)
            elif provider.lower() == "together":
                self.provider = TogetherProvider(model_name or "meta-llama/Llama-3-70b-chat-hf", api_key)
            else:
                raise ValueError(f"Unknown provider: {provider}")
        
        # Store max_concurrency but don't create the semaphore yet
        self.max_concurrency = max_concurrency
        self._semaphore = None
    
    @property
    def semaphore(self):
        """
        Lazy initialization of the semaphore.
        This ensures the semaphore is created in the event loop where it's used.
        """
        if self._semaphore is None:
            self._semaphore = asyncio.Semaphore(self.max_concurrency)
        return self._semaphore
    
    async def generate(self, messages: List[Dict[str, str]], **kwargs) -> LLMResponse:
        """Generate a response with concurrency control"""
        async with self.semaphore:
            return await self.provider.generate(messages, **kwargs)
    
    def run_batch(self, 
                messages_list: List[List[Dict[str, str]]], 
                **kwargs) -> Tuple[List[Dict[str, Any]], List[List[Dict[str, str]]]]:
        """Process batches with retries in separate event loops, using id() to track messages"""

        results = [None] * len(messages_list)
        position_map = {id(messages): i for i, messages in enumerate(messages_list)}
        
        # Queue to store unfinished or failed tasks
        current_batch = messages_list.copy()
        max_retries = kwargs.get("max_retries", 100)
        retry_count = 0
        
        while current_batch and retry_count < max_retries:
            async def process_batch():
                self._semaphore = None  # Reset semaphore for this event loop
                batch_results = []
                failures = []
                
                tasks_with_messages = [(msg, asyncio.create_task(self.generate(msg, **kwargs))) 
                                    for msg in current_batch]
                for messages, task in tasks_with_messages:
                    try:
                        response = await task
                        position = position_map[id(messages)]
                        batch_results.append((position, {
                            "messages": messages,
                            "response": response.content,
                            "model": response.model_name,
                            "success": True
                        }))
                    except Exception as e:
                        print(f'[DEBUG] error: {e}')
                        failures.append(messages)
                
                return batch_results, failures
            
            # Run in fresh event loop
            batch_results, next_batch = asyncio.run(process_batch())
            
            # Update results with successful responses
            for position, result in batch_results:
                results[position] = result
            
            # Update for next iteration
            if next_batch:
                retry_count += 1
                # Update position map for failed messages
                position_map = {id(messages): position_map[id(messages)] 
                            for messages in next_batch}
                
                current_batch = next_batch
                time.sleep(5)
                print(f'[DEBUG] {len(next_batch)} failed messages, retry_count: {retry_count}')
            else:
                break

        return results, next_batch



if __name__ == "__main__":
    # llm = ConcurrentLLM(provider="openai", model_name="gpt-4o")
    # llm = ConcurrentLLM(provider="anthropic", model_name="claude-3-5-sonnet-20240620")
    llm = ConcurrentLLM(provider="together", model_name="Qwen/Qwen2.5-7B-Instruct-Turbo")
    messages = [
        [{"role": "user", "content": "what is 2+2?"}],
        [{"role": "user", "content": "what is 2+3?"}],
        [{"role": "user", "content": "what is 2+4?"}],
        [{"role": "user", "content": "what is 2+5?"}],
        [{"role": "user", "content": "what is 2+6?"}],
        [{"role": "user", "content": "what is 2+7?"}],
        [{"role": "user", "content": "what is 2+8?"}],
        [{"role": "user", "content": "what is 2+9?"}],
        [{"role": "user", "content": "what is 2+10?"}],
        [{"role": "user", "content": "what is 2+11?"}],
        [{"role": "user", "content": "what is 2+12?"}],
        [{"role": "user", "content": "what is 2+13?"}],
        [{"role": "user", "content": "what is 2+14?"}],
        [{"role": "user", "content": "what is 2+15?"}],
        [{"role": "user", "content": "what is 2+16?"}],
        [{"role": "user", "content": "what is 2+17?"}],
    ]
    response = llm.run_batch(messages, max_tokens=100)
    print(f"final response: {response}")
