"""
ConnLLM V2 配置管理器
负责管理提供商配置和模型能力配置
"""

from typing import Dict, Any, Optional, List, Set

class ConfigManagerV2:
	"""配置管理器V2实现"""
	
	def __init__(self, config: Dict[str, Any]):
		"""
		初始化配置管理器
		
		Args:
			config: 配置字典
		"""
		self.raw_config = config.copy()
		
		# 加载模型能力配置
		self.model_capabilities = self._load_model_capabilities()
	
	def get(self, key: str, default: Any = None) -> Any:
		"""
		获取配置项
		
		Args:
			key: 配置键
			default: 默认值
			
		Returns:
			配置值
		"""
		return self.raw_config.get(key, default)
	
	def get_provider_type(self) -> str:
		"""
		获取提供商类型
		
		Returns:
			提供商类型名称
		"""
		return self.raw_config.get("provider_type", "unknown")
	
	def get_model(self) -> str:
		"""
		获取当前模型
		
		Returns:
			模型名称
		"""
		return self.raw_config.get("model", "")
	
	def get_api_settings(self) -> Dict[str, Any]:
		"""
		获取API设置
		
		Returns:
			API设置字典
		"""
		# 提取API相关设置
		api_settings = {
			"api_key": self.raw_config.get("api_key", ""),
			"base_url": self.raw_config.get("base_url", ""),
			"timeout": self.raw_config.get("timeout", 60),
			"proxy": self.raw_config.get("proxy", None)
		}
		
		return api_settings
	
	def get_completion_settings(self) -> Dict[str, Any]:
		"""
		获取完成请求设置
		
		Returns:
			完成请求设置字典
		"""
		# 提取完成请求相关设置
		completion_settings = {
			"max_tokens": self.raw_config.get("max_tokens", 4096),
			"temperature": self.raw_config.get("temperature", 0.7),
			"top_p": self.raw_config.get("top_p", 1.0),
			"presence_penalty": self.raw_config.get("presence_penalty", 0.0),
			"frequency_penalty": self.raw_config.get("frequency_penalty", 0.0)
		}
		
		return completion_settings
	
	def model_supports_capability(self, model: str, capability: str) -> bool:
		"""
		检查指定模型是否支持某能力
		
		Args:
			model: 模型名称
			capability: 能力名称
			
		Returns:
			是否支持
		"""
		# 获取模型能力配置
		model_config = self.get_model_capabilities(model)
		
		# 映射能力名称到配置键
		capability_map = {
			"streaming": "supports_streaming",
			"partial_mode": "supports_partial_mode",
			"tool_calls": "supports_tool_calls",
			"web_search": "supports_web_search",
			"thinking": "supports_thinking"
		}
		
		# 检查能力是否支持
		config_key = capability_map.get(capability)
		if not config_key:
			return False
			
		return model_config.get(config_key, False)
	
	def get_model_capabilities(self, model: str) -> Dict[str, Any]:
		"""
		获取模型能力配置
		
		Args:
			model: 模型名称
			
		Returns:
			模型能力配置字典
		"""
		# 获取所有模型配置
		models_config = self.model_capabilities.get(model, {})
		
		# 如果没有特定模型配置，使用默认配置
		if not models_config and model:
			# 尝试使用模型名称前缀匹配
			for model_pattern, config in self.model_capabilities.items():
				if model.startswith(model_pattern):
					models_config = config
					break
		
		return models_config
	
	def _load_model_capabilities(self) -> Dict[str, Dict[str, Any]]:
		"""
		加载模型能力配置
		
		Returns:
			模型能力配置字典
		"""
		# 从配置中加载模型能力
		models_config = self.raw_config.get("models", {})
		
		# 如果配置中没有模型能力，使用默认配置
		if not models_config:
			models_config = self._get_default_model_capabilities()
			
		return models_config
	
	def _get_default_model_capabilities(self) -> Dict[str, Dict[str, Any]]:
		"""
		获取默认模型能力配置
		
		Returns:
			默认模型能力配置字典
		"""
		# 根据提供商类型提供默认配置
		provider_type = self.get_provider_type()
		
		if provider_type == "moonshot":
			return {
				"moonshot-v1-8k": {
					"supports_streaming": True,
					"supports_partial_mode": True,
					"supports_tool_calls": True,
					"supports_web_search": True,
					"context_window": 8192
				},
				"moonshot-v1-32k": {
					"supports_streaming": True,
					"supports_partial_mode": True,
					"supports_tool_calls": True,
					"supports_web_search": True,
					"context_window": 32768
				},
				"moonshot-v1-128k": {
					"supports_streaming": True,
					"supports_partial_mode": True,
					"supports_tool_calls": True,
					"supports_web_search": True,
					"context_window": 131072
				}
			}
		elif provider_type == "openrouter":
			return {
				"claude-3-opus": {
					"supports_streaming": True,
					"supports_thinking": True,
					"context_window": 200000
				},
				"claude-3-sonnet": {
					"supports_streaming": True,
					"supports_thinking": True,
					"context_window": 200000
				},
				"gpt-4": {
					"supports_streaming": True,
					"supports_thinking": True,
					"context_window": 8192
				}
			}
			
		# 默认空配置
		return {}
