import requests
import json
import os
from typing import List, Dict, Optional, Union, Any
from dotenv import load_dotenv

class SiliconFlowAPI:
    """A class for interacting with SiliconFlow's LLM API."""
    BASE_URL: Any
    def __init__(self, api_key: str = None):
        """
        Initialize the SiliconFlow API client.
        
        Args:
            api_key: Your SiliconFlow API key. If not provided, will attempt to load from .env file.
        """
        ...
    def response_structure() -> Dict[str, Any]:
        """
        Returns the structure of the API response.
        
        SiliconFlow API响应的数据结构如下：
        {
            "id": "响应ID",
            "choices": [
                {
                    "message": {
                        "role": "assistant",
                        "content": "模型生成的内容",
                        "reasoning_content": "推理内容（仅在部分模型中可用）",
                        "tool_calls": [
                            {
                                "id": "工具调用ID",
                                "type": "function",
                                "function": {
                                    "name": "函数名称",
                                    "arguments": "函数参数（JSON格式字符串）"
                                }
                            }
                        ]
                    },
                    "finish_reason": "响应结束原因，可能为 stop、eos、length 或 tool_calls"
                }
            ],
            "usage": {
                "prompt_tokens": 输入的token数量,
                "completion_tokens": 输出的token数量,
                "total_tokens": 总token数量
            },
            "created": 创建时间戳,
            "model": "使用的模型名称",
            "object": "chat.completion"
        }
        
        流式响应的片段结构类似，但会分多次返回，每个片段包含部分内容。
        
        Returns:
            示例响应结构的字典
        """
        ...
    def create_chat_completion(self, model: str = "Qwen/QwQ-32B", messages: List[Any] = None, stream: bool = False, max_tokens: int = 4096, stop: Optional[Any] = None, temperature: float = 0.7, top_p: float = 0.7, top_k: int = 50, frequency_penalty: float = 0.5, n: int = 1, response_format: Optional[Any] = None, tools: Optional[Any] = None) -> Dict[str, Any]:
        """
        Create a chat completion using SiliconFlow's API.
        
        Args:
            model: Model name (e.g., "Qwen/QwQ-32B", "deepseek-ai/DeepSeek-V3")
            messages: List of message objects with 'role' and 'content' keys.
                     Role can be 'system', 'user', or 'assistant'
            stream: Whether to stream the response incrementally (bool)
            max_tokens: Maximum tokens to generate in response (1-4096 for most models,
                       up to 8192 for Qwen3 series, up to 16384 for DeepSeek-R1 series).
                       Note: This limits output length only, not total context window
            stop: String or list of strings where API stops generating further tokens
            temperature: Controls randomness (0.0-2.0). Lower = more deterministic,
                        higher = more creative. Recommended: 0.1-0.9
            top_p: Nucleus sampling threshold (0.0-1.0). Consider tokens with cumulative
                  probability up to this value. Recommended: 0.7-0.95
            top_k: Top-k sampling parameter (1-100). Consider only top k most likely tokens
            frequency_penalty: Penalize repeated tokens (-2.0 to 2.0). Positive values
                             discourage repetition
            n: Number of chat completion choices to generate (1-10)
            response_format: Response format specification. Use {"type": "text"} for text
                           or {"type": "json_object"} for JSON responses
            tools: List of function tools the model may call. Each tool should have
                  'type': 'function' and 'function' with name, description, parameters
            
        Returns:
            Dict containing API response with 'choices', 'usage', 'model', etc.
            For streaming requests, returns the raw response object for iteration
            
        Raises:
            requests.exceptions.HTTPError: If API request fails
            ValueError: If parameters are invalid
        """
        ...
    def create_image_chat_completion(self, model: str = "Qwen/Qwen2.5-VL-72B-Instruct", messages: List[Any] = None, stream: bool = False, max_tokens: int = 8192, stop: Optional[Any] = None, temperature: float = 0.7, top_p: float = 0.7, top_k: int = 50, frequency_penalty: float = 0.5, n: int = 1, response_format: Optional[Any] = None) -> Dict[str, Any]:
        """
        Create a vision-enabled chat completion that can process images and text.
        
        This method supports multimodal inputs where messages can contain both text
        and image content. Images can be provided as URLs or base64-encoded data.
        
        Args:
            model: Vision model name. Recommended models:
                   - "Qwen/Qwen2.5-VL-72B-Instruct" (high quality)
                   - "Qwen/Qwen2.5-VL-32B-Instruct" (balanced)
                   - "deepseek-ai/deepseek-vl2" (alternative)
            messages: List of message objects supporting multimodal content.
                     Each message can contain:
                     - role: 'system', 'user', or 'assistant'
                     - content: String for text OR list for multimodal content with:
                       * {"type": "text", "text": "your text"}
                       * {"type": "image_url", "image_url": {"url": "image_url"}}
            stream: Whether to stream the response incrementally (bool)
            max_tokens: Maximum tokens to generate in response (1-8192 for vision models).
                       Note: This limits output length only, not total context window.
                       Vision models typically need more tokens for detailed descriptions
            stop: String or list of strings where API stops generating further tokens
            temperature: Controls randomness (0.0-2.0). Lower = more deterministic,
                        higher = more creative. Recommended: 0.1-0.7 for vision tasks
            top_p: Nucleus sampling threshold (0.0-1.0). Recommended: 0.7-0.95
            top_k: Top-k sampling parameter (1-100). Consider only top k most likely tokens
            frequency_penalty: Penalize repeated tokens (-2.0 to 2.0). Positive values
                             discourage repetition in image descriptions
            n: Number of chat completion choices to generate (1-10)
            response_format: Response format specification. Use {"type": "text"} for text
                           descriptions or {"type": "json_object"} for structured data
            
        Returns:
            Dict containing API response with 'choices', 'usage', 'model', etc.
            For streaming requests, returns the raw response object for iteration
            
        Raises:
            requests.exceptions.HTTPError: If API request fails
            ValueError: If parameters are invalid or image format is unsupported
            
        Example:
            ```python
            # Analyze an image
            messages = [{
                "role": "user",
                "content": [
                    {"type": "text", "text": "What's in this image?"},
                    {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
                ]
            }]
            response = api.create_image_chat_completion(messages=messages)
            ```
        """
        ...
    def handle_stream(self, response):
        """
        Process a streaming response from the API.
        
        Args:
            response: The streaming response object
            
        Yields:
            Parsed chunks of the streaming response
        """
        ...
    def extract_stream_usage(self, stream_response) -> Dict[str, int]:
        """
                提取流式响应中的token用量信息。
                
                流式响应中，token用量信息只在最后一个片段中返回。
                这个方法会迭代流式响应，收集全部内容的同时，返回最终的用量统计。
                
                Args:
                    stream_response: 流式响应对象
                    
                Returns:
                    包含token用量统计的字典，格式为：
                    {
                        "prompt_tokens": 输入的token数量,
                        "completion_tokens": 输出的token数量,
                        "total_tokens": 总token数量
                    }
                    
                Example:
                    ```python
                    # 创建流式响应
                    stream_response = api.create_chat_completion(
                        model="Qwen/QwQ-32B",
                        messages=[{"role": "user", "content": "讲个故事"}],
                        stream=True
                    )
                    
                    # 处理流式响应并获取token用量
                    full_response = ""
                    for chunk in api.handle_stream(stream_response):
                        if "choices" in chunk and len(chunk["choices"]) > 0:
                            content = chunk["choices"][0].get("delta", {}).get("content", "")
                            if content:
                                full_response += content
                                print(content, end="", flush=True)
                    
                    # 获取token用量统计
                    usage = api.extract_stream_usage(stream_response)
                    print(f"
        
        总共使用了 {usage.get('total_tokens', '未知')} 个tokens")
                    ```
                
        """
        ...
    def image_data_extraction_example(self, image_url: str, extraction_prompt: str) -> Dict[str, Any]:
        """
        多模态示例：从图片中提取数据。
        
        这个方法展示了如何使用视觉模型从图片中提取结构化数据。
        
        Args:
            image_url: 图片的URL或Base64编码
            extraction_prompt: 数据提取提示，指定需要从图片中提取什么数据
            
        Returns:
            模型对图片的分析结果
            
        Example:
            ```python
            # 初始化API客户端
            api = SiliconFlowAPI(api_key="your_api_key_here")
            
            # 提取图片中的表格数据
            result = api.image_data_extraction_example(
                image_url="https://example.com/table_image.jpg", 
                extraction_prompt="请从这张图片中提取表格数据，并以JSON格式返回"
            )
            
            # 或者使用Base64编码的图片数据
            import base64
            with open("receipt.jpg", "rb") as image_file:
                base64_image = base64.b64encode(image_file.read()).decode('utf-8')
                
            # 提取收据中的信息
            result = api.image_data_extraction_example(
                image_url=f"data:image/jpeg;base64,{base64_image}",
                extraction_prompt="请从这张收据图片中提取以下信息：日期、商店名称、购买的物品及其价格、总金额"
            )
            
            print(result["choices"][0]["message"]["content"])
            ```
        """
        ...
