import aiohttp
import asyncio
import json
import os
import time
from typing import Dict, Any, AsyncGenerator, Optional
import logging


class AsyncOllamaClient:
    def __init__(self, base_url: str = None):
        # 本地开发环境配置
        self.base_url = base_url or os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434")
        self.model = "qwen3:4b"  # 使用你宿主机上已有的模型
        self._stop_flags = {}  # 存储停止标志
        self._active_sessions = {}  # 存储活跃的会话
        self._session = None  # aiohttp session
        self._lock = asyncio.Lock()  # 异步锁，替换线程锁
        self.logger = logging.getLogger(__name__)

    async def _get_session(self):
        """获取或创建aiohttp会话"""
        if self._session is None or self._session.closed:
            timeout = aiohttp.ClientTimeout(total=300, connect=30)
            connector = aiohttp.TCPConnector(
                limit=100,
                limit_per_host=30,
                keepalive_timeout=30,
                enable_cleanup_closed=True
            )
            self._session = aiohttp.ClientSession(
                timeout=timeout,
                connector=connector,
                headers={'Content-Type': 'application/json'}
            )
        return self._session

    async def close(self):
        """关闭aiohttp会话"""
        if self._session and not self._session.closed:
            await self._session.close()
            # 等待连接完全关闭
            await asyncio.sleep(0.1)

    async def chat_stream(self, message: str, session_id: str = None) -> AsyncGenerator[str, None]:
        """异步流式聊天功能"""
        session_id = session_id or f"session_{int(time.time())}"

        try:
            async with self._lock:
                self._stop_flags[session_id] = False

            payload = {
                "model": self.model,
                "prompt": message,
                "stream": True
            }

            session = await self._get_session()
            
            async with session.post(
                f"{self.base_url}/api/generate",
                json=payload
            ) as response:
                
                async with self._lock:
                    self._active_sessions[session_id] = response

                if response.status == 200:
                    async for line in response.content:
                        # 在每次迭代开始时检查停止标志
                        async with self._lock:
                            if self._stop_flags.get(session_id, False):
                                self.logger.info(f"检测到停止标志，终止生成: {session_id}")
                                yield "⏹️ 生成已被用户停止"
                                return  # 使用return而不是break，确保立即退出

                        if line:
                            try:
                                line_str = line.decode('utf-8').strip()
                                if line_str:
                                    data = json.loads(line_str)
                                    if 'response' in data:
                                        # 在yield之前再次检查停止标志
                                        async with self._lock:
                                            if self._stop_flags.get(session_id, False):
                                                self.logger.info(f"在输出前检测到停止标志: {session_id}")
                                                yield "⏹️ 生成已被用户停止"
                                                return
                                        yield data['response']
                                    if data.get('done', False):
                                        break
                            except (json.JSONDecodeError, UnicodeDecodeError):
                                continue
                else:
                    error_text = await response.text()
                    yield f"LLM服务错误: {response.status} - {error_text}"

        except asyncio.CancelledError:
            self.logger.info(f"聊天流被取消: {session_id}")
            yield "⏹️ 生成已被取消"
        except Exception as e:
            self.logger.error(f"聊天流异常: {e}")
            yield f"连接LLM服务失败: {str(e)}"
        finally:
            async with self._lock:
                self._stop_flags.pop(session_id, None)
                self._active_sessions.pop(session_id, None)

    async def chat_stream_with_history(self, history: list, session_id: str = None) -> AsyncGenerator[str, None]:
        """带历史上下文的异步流式聊天功能"""
        session_id = session_id or f"session_{int(time.time())}"
        
        try:
            # 设置停止标志
            async with self._lock:
                self._stop_flags[session_id] = False
            
            # 构建包含历史的完整提示词
            prompt_parts = []
            
            # 添加系统提示
            prompt_parts.append("你是一个智能助手，请根据对话历史回答用户的问题。\n")
            
            # 添加历史对话
            for item in history:
                if item["role"] == "user":
                    prompt_parts.append(f"用户: {item['content']}")
                elif item["role"] == "assistant":
                    prompt_parts.append(f"助手: {item['content']}")
                elif item["role"] == "system":
                    prompt_parts.append(f"系统: {item['content']}")
            
            # 组合完整提示词
            full_prompt = "\n".join(prompt_parts)
            
            # 限制提示词长度，避免超出模型限制
            if len(full_prompt) > 8000:
                # 保留最近的对话历史
                recent_history = history[-10:]  # 保留最近10条对话
                prompt_parts = ["你是一个智能助手，请根据对话历史回答用户的问题。\n"]
                for item in recent_history:
                    if item["role"] == "user":
                        prompt_parts.append(f"用户: {item['content']}")
                    elif item["role"] == "assistant":
                        prompt_parts.append(f"助手: {item['content']}")
                    elif item["role"] == "system":
                        prompt_parts.append(f"系统: {item['content']}")
                full_prompt = "\n".join(prompt_parts)

            payload = {
                "model": self.model,
                "prompt": full_prompt,
                "stream": True
            }

            session = await self._get_session()
            
            async with session.post(
                f"{self.base_url}/api/generate",
                json=payload
            ) as response:
                
                # 存储活跃请求
                async with self._lock:
                    self._active_sessions[session_id] = response

                if response.status == 200:
                    async for line in response.content:
                        # 检查停止标志
                        async with self._lock:
                            if self._stop_flags.get(session_id, False):
                                self.logger.info(f"带历史聊天检测到停止标志: {session_id}")
                                yield "⏹️ 生成已被用户停止"
                                return  # 使用return确保立即退出
                        
                        if line:
                            try:
                                line_str = line.decode('utf-8').strip()
                                if line_str:
                                    data = json.loads(line_str)
                                    if 'response' in data:
                                        # 在yield之前再次检查停止标志
                                        async with self._lock:
                                            if self._stop_flags.get(session_id, False):
                                                self.logger.info(f"带历史聊天在输出前检测到停止标志: {session_id}")
                                                yield "⏹️ 生成已被用户停止"
                                                return
                                        yield data['response']
                                    
                                    # 检查是否完成
                                    if data.get('done', False):
                                        break
                                        
                            except (json.JSONDecodeError, UnicodeDecodeError):
                                continue
                else:
                    error_text = await response.text()
                    yield f"LLM服务错误: {response.status} - {error_text}"

        except asyncio.CancelledError:
            self.logger.info(f"带历史的聊天流被取消: {session_id}")
            yield "⏹️ 生成已被取消"
        except Exception as e:
            self.logger.error(f"带历史的聊天流异常: {e}")
            yield f"连接LLM服务失败: {str(e)}"
        finally:
            # 清理资源
            async with self._lock:
                self._stop_flags.pop(session_id, None)
                self._active_sessions.pop(session_id, None)

    async def chat(self, message: str, session_id: str = None) -> str:
        """异步基础聊天功能"""
        session_id = session_id or f"session_{int(time.time())}"
        
        try:
            # 设置停止标志
            async with self._lock:
                self._stop_flags[session_id] = False

            payload = {
                "model": self.model,
                "prompt": message,
                "stream": False
            }

            session = await self._get_session()
            
            async with session.post(
                f"{self.base_url}/api/generate",
                json=payload
            ) as response:
                
                # 存储活跃请求
                async with self._lock:
                    self._active_sessions[session_id] = response

                if response.status == 200:
                    data = await response.json()
                    return data.get('response', '无响应内容')
                else:
                    error_text = await response.text()
                    return f"LLM服务错误: {response.status} - {error_text}"

        except asyncio.CancelledError:
            self.logger.info(f"聊天请求被取消: {session_id}")
            return "⏹️ 请求已被取消"
        except Exception as e:
            self.logger.error(f"聊天请求异常: {e}")
            return f"连接LLM服务失败: {str(e)}"
        finally:
            # 清理资源
            async with self._lock:
                self._stop_flags.pop(session_id, None)
                self._active_sessions.pop(session_id, None)

    async def chat_with_history(self, history: list, session_id: str = None) -> str:
        """带历史上下文的异步聊天功能"""
        session_id = session_id or f"session_{int(time.time())}"
        
        try:
            # 设置停止标志
            async with self._lock:
                self._stop_flags[session_id] = False
            
            # 构建包含历史的完整提示词
            prompt_parts = []
            
            # 添加系统提示
            prompt_parts.append("你是一个智能助手，请根据对话历史回答用户的问题。\n")
            
            # 添加历史对话
            for item in history:
                if item["role"] == "user":
                    prompt_parts.append(f"用户: {item['content']}")
                elif item["role"] == "assistant":
                    prompt_parts.append(f"助手: {item['content']}")
                elif item["role"] == "system":
                    prompt_parts.append(f"系统: {item['content']}")
            
            # 组合完整提示词
            full_prompt = "\n".join(prompt_parts)
            
            # 限制提示词长度，避免超出模型限制
            if len(full_prompt) > 8000:
                # 保留最近的对话历史
                recent_history = history[-10:]  # 保留最近10条对话
                prompt_parts = ["你是一个智能助手，请根据对话历史回答用户的问题。\n"]
                for item in recent_history:
                    if item["role"] == "user":
                        prompt_parts.append(f"用户: {item['content']}")
                    elif item["role"] == "assistant":
                        prompt_parts.append(f"助手: {item['content']}")
                    elif item["role"] == "system":
                        prompt_parts.append(f"系统: {item['content']}")
                full_prompt = "\n".join(prompt_parts)

            payload = {
                "model": self.model,
                "prompt": full_prompt,
                "stream": False
            }

            session = await self._get_session()
            
            async with session.post(
                f"{self.base_url}/api/generate",
                json=payload
            ) as response:
                
                # 存储活跃请求
                async with self._lock:
                    self._active_sessions[session_id] = response

                if response.status == 200:
                    data = await response.json()
                    return data.get('response', '无响应内容')
                else:
                    error_text = await response.text()
                    return f"LLM服务错误: {response.status} - {error_text}"

        except asyncio.CancelledError:
            self.logger.info(f"带历史的聊天请求被取消: {session_id}")
            return "⏹️ 请求已被取消"
        except Exception as e:
            self.logger.error(f"带历史的聊天请求异常: {e}")
            return f"连接LLM服务失败: {str(e)}"
        finally:
            # 清理资源
            async with self._lock:
                self._stop_flags.pop(session_id, None)
                self._active_sessions.pop(session_id, None)

    async def is_available(self) -> bool:
        """检查Ollama服务是否可用"""
        try:
            session = await self._get_session()
            async with session.get(f"{self.base_url}/api/tags") as response:
                return response.status == 200
        except Exception as e:
            print(f"Ollama服务检查失败: {e}")
            return False

    async def stop_generation(self, session_id: str):
        """停止指定会话的生成"""
        try:
            async with self._lock:
                # 设置停止标志
                self._stop_flags[session_id] = True
                self.logger.info(f"已设置停止标志: {session_id}")
                
                # 取消活跃的请求
                if session_id in self._active_sessions:
                    try:
                        response = self._active_sessions[session_id]
                        if response is not None and hasattr(response, 'close'):
                            await response.close()
                        # 从活跃会话中移除
                        del self._active_sessions[session_id]
                        self.logger.info(f"已取消活跃请求: {session_id}")
                    except Exception as e:
                        self.logger.error(f"取消请求时出错: {e}")
                        # 即使出错也要清理会话
                        if session_id in self._active_sessions:
                            del self._active_sessions[session_id]
            return True
        except Exception as e:
            self.logger.error(f"停止生成失败: {e}")
            return False

    async def stop_all_generations(self):
        """停止所有活跃的生成"""
        try:
            async with self._lock:
                # 设置所有会话的停止标志
                for session_id in list(self._stop_flags.keys()):
                    self._stop_flags[session_id] = True
                
                # 取消所有活跃的请求
                sessions_to_remove = []
                for session_id, response in list(self._active_sessions.items()):
                    try:
                        if response is not None and hasattr(response, 'close'):
                            await response.close()
                        sessions_to_remove.append(session_id)
                        self.logger.info(f"已取消活跃请求: {session_id}")
                    except Exception as e:
                        self.logger.error(f"取消请求时出错: {e}")
                        sessions_to_remove.append(session_id)  # 即使出错也要清理
                
                # 清理所有已处理的会话
                for session_id in sessions_to_remove:
                    if session_id in self._active_sessions:
                        del self._active_sessions[session_id]
                
                self.logger.info("已停止所有生成")
                return True
        except Exception as e:
            self.logger.error(f"停止所有生成时出错: {e}")
            return False

    async def clear_stop_flag(self, session_id: str):
        """清除停止标志"""
        async with self._lock:
            self._stop_flags.pop(session_id, None)
            self._active_sessions.pop(session_id, None)
            self.logger.info(f"已清除停止标志: {session_id}")