"""
Ollama流式输出功能实现
支持Ollama模型的流式文本生成
"""
from typing import Dict, Any, List, Optional, Generator
import json
import logging
import time

from ...capabilities.streaming import StreamingCapability

logger = logging.getLogger("llm.ollama.stream")

class OllamaStreaming(StreamingCapability):
	"""Ollama的流式输出实现"""
	
	def _initialize(self) -> None:
		"""初始化流式功能"""
		self.supported = True
		self.use_library = False
		logger.debug("初始化Ollama流式输出功能")
		
		# 检查是否可以使用Ollama库
		try:
			import ollama
			self.use_library = True
			logger.debug("使用ollama库进行流式输出")
		except ImportError:
			logger.debug("未安装ollama库，将使用REST API进行流式输出")
	
	def is_supported(self) -> bool:
		"""
		检查是否支持流式输出
		
		Returns:
			是否支持流式输出
		"""
		# Ollama支持所有模型的流式输出
		return self.supported
		
	def stream_response(self, messages: List[Dict[str, Any]], **kwargs) -> Generator[str, None, None]:
		"""
		生成流式响应
		
		Args:
			messages: 消息列表
			**kwargs: 额外的提供商特定参数
			
		Returns:
			字符串流生成器
		"""
		logger.debug("开始Ollama流式生成")
		start_time = time.time()
		chunks_count = 0
		
		try:
			# 准备参数
			params = {
				"model": self.provider.model,
				"messages": messages,
				"keep_alive": self.provider.keep_alive,
				"stream": True,
				"options": {
					"temperature": self.provider.temperature,
					"num_ctx": self.provider.max_tokens
				},
				**kwargs
			}
			
			if self.use_library:
				# 使用ollama库
				import ollama
				response = ollama.chat(**params)
				
				for chunk in response:
					chunk_text = self.process_chunk(chunk)
					if chunk_text:
						chunks_count += 1
						yield chunk_text
						
					# 记录最终的性能指标
					if self._is_final_chunk(chunk):
						self._log_performance_metrics(chunk)
			else:
				# 使用REST API
				import requests
				
				response = requests.post(
					f"{self.provider.host}/api/chat",
					json=params,
					stream=True
				)
				
				if response.status_code != 200:
					raise Exception(f"API请求失败: {response.status_code} {response.text}")
					
				for line in response.iter_lines():
					if line:
						try:
							chunk = json.loads(line)
							chunk_text = self.process_chunk(chunk)
							if chunk_text:
								chunks_count += 1
								yield chunk_text
								
							# 记录最终的性能指标
							if self._is_final_chunk(chunk):
								self._log_performance_metrics(chunk)
						except json.JSONDecodeError:
							pass
							
			elapsed = time.time() - start_time
			logger.info(f"Ollama流式请求完成，耗时: {elapsed:.2f}秒，块数: {chunks_count}，模型: {self.provider.model}")
		except Exception as e:
			elapsed = time.time() - start_time
			logger.error(f"流式生成过程中出错: {str(e)}")
			yield f"\n[错误: {str(e)}]"
			
		logger.debug("Ollama流式生成结束")
	
	def process_chunk(self, chunk: Dict[str, Any]) -> Optional[str]:
		"""
		处理单个响应数据块
		
		Args:
			chunk: 响应数据块
			
		Returns:
			处理后的文本，如果数据块不包含文本则返回None
		"""
		try:
			# Ollama的响应格式
			if "message" in chunk and "content" in chunk["message"]:
				content = chunk["message"]["content"]
				if content:
					return content
		except Exception as e:
			logger.error(f"处理流式数据块时出错: {str(e)}")
			
		return None
		
	def _is_final_chunk(self, chunk: Dict[str, Any]) -> bool:
		"""
		检查是否为最终数据块
		
		Args:
			chunk: 响应数据块
			
		Returns:
			是否为最终块
		"""
		return chunk.get("done", False)
		
	def _log_performance_metrics(self, chunk: Dict[str, Any]) -> None:
		"""
		记录性能指标
		
		Args:
			chunk: 包含性能信息的数据块
		"""
		if "prompt_eval_count" in chunk:
			prompt_tokens = chunk["prompt_eval_count"]
			completion_tokens = chunk["eval_count"]
			prompt_eval_duration = chunk["prompt_eval_duration"]
			eval_duration = chunk["eval_duration"]
			
			s2ns = 1000000000  # 秒到纳秒的转换
			logger.info(f"输入: {prompt_tokens}t/{prompt_eval_duration/s2ns:.1f}s {s2ns*prompt_tokens/prompt_eval_duration:.1f}t/s")
			logger.info(f"输出: {completion_tokens}t/{eval_duration/s2ns:.1f}s {s2ns*completion_tokens/eval_duration:.1f}t/s")
