"""
ConnLLM OpenRouter提供商V2实现
提供OpenRouter API访问和特有功能支持
"""

from typing import Dict, Any, List, Optional, Tuple
import logging

from ..core_v2.provider import BaseProviderV2
from ..core_v2.processors.thinking import ThinkingProcessor

logger = logging.getLogger("connllm.providers.openrouter_v2")

class OpenRouterProviderV2(BaseProviderV2):
	"""OpenRouter API提供商V2实现"""
	
	def __init__(self, config: Dict[str, Any]):
		"""
		初始化OpenRouter提供商
		
		Args:
			config: 配置字典
		"""
		# 设置提供商类型
		if "provider_type" not in config:
			config["provider_type"] = "openrouter"
			
		# 设置默认base_url（如果配置中没有提供）
		if "base_url" not in config:
			config["base_url"] = "https://openrouter.ai/api/v1"
			
		# 保存特有配置，在调用父类初始化前
		self.app_name = config.get("app_name", "ConnLLM")
		self.app_url = config.get("app_url", "https://connllm.local")
		
		# 调用父类初始化
		super().__init__(config)
	
	def _init_capabilities(self) -> None:
		"""初始化提供商级能力"""
		try:
			# 导入图像输入能力实现
			from ..implementations.openrouter.image_input import OpenRouterImageInput
			
			# 注册图像输入能力
			self.capability_manager.register("image_input", OpenRouterImageInput)
			logger.debug("已注册OpenRouter图像输入能力")
			
		except ImportError as e:
			logger.warning(f"导入OpenRouter图像输入能力实现失败: {str(e)}")
	
	def _init_model_capabilities(self) -> None:
		"""初始化模型级能力"""
		try:
			# 导入模型级能力实现
			from ..implementations.openrouter.streaming_v2 import OpenRouterStreamingV2
			from ..implementations.openrouter.thinking_v2 import OpenRouterThinkingV2
			
			# 添加流式输出能力
			self.model_capability_manager.register("streaming", OpenRouterStreamingV2)
			logger.debug("已注册OpenRouter流式输出能力")
			
			# 添加思考过程能力
			self.model_capability_manager.register("thinking", OpenRouterThinkingV2)
			logger.debug("已注册OpenRouter思考过程能力")
			
		except ImportError as e:
			logger.warning(f"导入OpenRouter模型级能力实现失败: {str(e)}")
			logger.warning("将创建临时模拟实现以保持功能完整性")
			
			# 创建临时能力实现
			from ..core_v2.capability import ModelCapability
			
			class TempStreamingCapability(ModelCapability):
				def is_supported_for_model(self, model: str) -> bool:
					return True
					
				def stream_response(self, messages):
					response = self.provider._complete_internal(messages)
					content = self.provider._extract_text_from_response(response)
					yield content
			
			class TempThinkingCapability(ModelCapability):
				def is_supported_for_model(self, model: str) -> bool:
					return True
					
				def extract_thinking(self, response):
					return None
					
				def should_remove_thinking(self):
					return False
					
				def clean_response(self, response):
					return response
			
			# 添加临时能力
			self.model_capability_manager.register("streaming", TempStreamingCapability)
			self.model_capability_manager.register("thinking", TempThinkingCapability)
			logger.debug("已注册OpenRouter临时模型能力")
	
	def _get_default_headers(self) -> Dict[str, str]:
		"""
		获取默认HTTP头
		
		Returns:
			HTTP头字典
		"""
		headers = super()._get_default_headers()
		
		# OpenRouter特有头部
		headers.update({
			"HTTP-Referer": self.app_url,
			"X-Title": self.app_name
		})
		
		return headers
	
	def _build_default_processor_pipeline(self, model: str) -> None:
		"""
		构建默认处理器管道
		
		Args:
			model: 模型名称
		"""
		# 添加思考过程处理器（如果模型支持）
		if self.model_capability_manager.model_supports("thinking", model):
			thinking = self.model_capability_manager.get("thinking")
			processor = ThinkingProcessor(thinking)
			self.response_processors.append(processor)
			logger.debug("已添加OpenRouter思考过程处理器")
	
	def _get_completion_endpoint(self) -> str:
		"""
		获取完成请求的API端点
		
		Returns:
			API端点路径
		"""
		return "/chat/completions"
	
	def _prepare_request_params(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
		"""
		准备API请求参数
		
		Args:
			messages: 消息列表
			
		Returns:
			请求参数字典
		"""
		# 获取完成设置
		completion_settings = self.config_manager.get_completion_settings()
		
		# 构建基本参数
		params = {
			"model": self.config_manager.get_model(),
			"messages": messages,
			"max_tokens": completion_settings.get("max_tokens", 4096),
			"temperature": completion_settings.get("temperature", 0.7),
			"top_p": completion_settings.get("top_p", 1.0),
			"stream": False  # 非流式模式
		}
		
		# 添加OpenRouter特有参数
		# 用户信息
		user_id = self.config_manager.get("user_id")
		if user_id:
			params["user"] = user_id
			
		# 路由选项
		route = self.config_manager.get("route")
		if route:
			params["route"] = route
			
		# 转发额外头部
		transforms = self.config_manager.get("transforms")
		if transforms:
			params["transforms"] = transforms
			
		return params
	
	def _extract_token_usage(self, response: Dict[str, Any]) -> Tuple[int, int]:
		"""
		从响应中提取token使用量
		
		Args:
			response: API响应
			
		Returns:
			输入token数和输出token数
		"""
		try:
			if "usage" in response:
				usage = response["usage"]
				input_tokens = usage.get("prompt_tokens", 0)
				output_tokens = usage.get("completion_tokens", 0)
				return input_tokens, output_tokens
				
			# OpenRouter有时使用不同的格式
			if "openrouter" in response:
				open_router_data = response["openrouter"]
				if "usage" in open_router_data:
					usage = open_router_data["usage"]
					input_tokens = usage.get("prompt_tokens", 0)
					output_tokens = usage.get("completion_tokens", 0)
					return input_tokens, output_tokens
		except Exception as e:
			logger.warning(f"从响应中提取token使用量失败: {str(e)}")
			
		return 0, 0
	
	def _calculate_cost(self, input_tokens: int, output_tokens: int) -> float:
		"""
		计算成本
		
		Args:
			input_tokens: 输入token数
			output_tokens: 输出token数
			
		Returns:
			成本（美元）
		"""
		# 基本实现，提供商可能会在响应中包含成本信息
		model = self.config_manager.get_model()
		
		try:
			# 常见模型的价格估计
			pricing = {
				"openai/gpt-4": (0.00003, 0.00006),  # 输入, 输出
				"openai/gpt-3.5-turbo": (0.0000015, 0.000002),
				"anthropic/claude-3-opus": (0.000015, 0.000075),
				"anthropic/claude-3-sonnet": (0.000003, 0.000015),
				"anthropic/claude-3-haiku": (0.00000025, 0.00000125),
				"google/gemini-pro": (0.000000375, 0.00000125),
				"meta-llama/llama-3-70b-instruct": (0.00000075, 0.0000045),
			}
			
			# 尝试匹配模型
			for model_pattern, rates in pricing.items():
				if model.startswith(model_pattern):
					input_rate, output_rate = rates
					return (input_tokens * input_rate) + (output_tokens * output_rate)
		except Exception as e:
			logger.warning(f"计算成本失败: {str(e)}")
			
		return 0.0
