import logging
import time
import requests
import json
import os
import signal
import threading
import subprocess
from typing import Dict, List, Optional, Union, Generator

from config.settings import (
    OLLAMA_BASE_URL, OLLAMA_MODEL, OLLAMA_TEMPERATURE,
    OLLAMA_CONTEXT_WINDOW, DEBUG
)

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

class OllamaWrapper:
    """增强版Ollama API封装类，具有健壮的错误处理和资源管理"""
    
    def __init__(self):
        self.base_url = OLLAMA_BASE_URL
        self.model = OLLAMA_MODEL
        self.temperature = OLLAMA_TEMPERATURE
        self.max_tokens = OLLAMA_CONTEXT_WINDOW
        self.active_request = None
        self.subprocess_pid = None
        
        # 验证模型是否可用
        self._check_model()
        
    def _check_model(self) -> None:
        """检查模型是否已加载并可用"""
        try:
            response = requests.get(
                f"{self.base_url}/api/tags",
                timeout=5
            )
            
            if response.status_code == 200:
                models = response.json().get("models", [])
                available_models = [model.get("name") for model in models]
                
                if self.model in available_models:
                    logger.info(f"模型 {self.model} 已加载并可用")
                else:
                    logger.warning(f"模型 {self.model} 不在可用列表中: {available_models}")
                    logger.warning(f"请使用 'ollama pull {self.model}' 下载此模型")
            else:
                logger.warning(f"检查模型状态失败: {response.status_code} {response.text}")
        except Exception as e:
            logger.error(f"连接Ollama服务失败: {str(e)}")
            logger.info("请确保Ollama服务正在运行，尝试使用'ollama serve'启动服务")
            self._restart_ollama_service()
    
    def _restart_ollama_service(self) -> None:
        """尝试重启Ollama服务"""
        try:
            logger.info("尝试重启Ollama服务...")
            # 杀死所有现有的ollama进程
            subprocess.run(["pkill", "ollama"], stderr=subprocess.DEVNULL)
            time.sleep(2)
            # 启动ollama服务
            subprocess.Popen(["ollama", "serve"], 
                             stdout=subprocess.DEVNULL,
                             stderr=subprocess.DEVNULL)
            logger.info("已启动Ollama服务，等待服务就绪...")
            time.sleep(5)  # 等待服务启动
        except Exception as e:
            logger.error(f"重启Ollama服务失败: {str(e)}")
    
    def _check_resources(self) -> bool:
        """检查系统资源状态"""
        try:
            # 检查系统内存
            mem_info = {}
            with open('/proc/meminfo', 'r') as f:
                for line in f:
                    parts = line.split(':')
                    if len(parts) >= 2:
                        key = parts[0].strip()
                        value = parts[1].strip()
                        if 'kB' in value:
                            value = int(value.replace('kB', '').strip()) * 1024
                        mem_info[key] = value
            
            total = int(mem_info.get('MemTotal', 0))
            available = int(mem_info.get('MemAvailable', 0))
            
            if total > 0:
                used_percent = (total - available) / total * 100
                if used_percent > 90:
                    logger.warning(f"系统内存使用率高: {used_percent:.1f}%")
                    return False
            
            # 检查GPU显存(如果有)
            try:
                nvidia_smi = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used,memory.total', '--format=csv,nounits,noheader'])
                values = nvidia_smi.decode('ascii').strip().split('\n')
                for i, line in enumerate(values):
                    mem_used, mem_total = map(int, line.split(','))
                    mem_percent = mem_used / mem_total * 100
                    if mem_percent > 90:
                        logger.warning(f"GPU {i} 显存使用率高: {mem_percent:.1f}%")
                        return False
            except:
                pass  # 没有GPU或无法查询
                
            return True
        except Exception as e:
            logger.warning(f"资源检查失败: {str(e)}")
            return True  # 如果检查失败，默认允许继续
    
    def generate(self, prompt: str) -> str:
        """
        生成文本响应
        
        参数:
            prompt: 提示词
            
        返回:
            生成的文本响应
        """
        # 检查资源状态
        if not self._check_resources():
            return "系统资源不足，请稍后再试或重启服务"
            
        if DEBUG:
            logger.debug(f"提示词: {prompt[:200]}...")
            
        start_time = time.time()
        response = None
        
        try:
            # 构建API请求
            payload = {
                "model": self.model,
                "prompt": prompt,
                "temperature": self.temperature,
                "num_predict": self.max_tokens,
                "stream": False
            }
            
            # 设置超时控制
            timeout_event = threading.Event()
            result = {"text": "", "error": None}
            
            def request_thread():
                nonlocal response
                try:
                    # 调用Ollama API
                    response = requests.post(
                        f"{self.base_url}/api/generate",
                        json=payload,
                        timeout=180  # 3分钟超时
                    )
                    
                    if response.status_code != 200:
                        result["error"] = f"Ollama API错误: {response.status_code} {response.text}"
                        return
                    
                    api_result = response.json()
                    result["text"] = api_result.get("response", "")
                except Exception as e:
                    result["error"] = str(e)
                finally:
                    timeout_event.set()
            
            # 启动请求线程
            thread = threading.Thread(target=request_thread)
            thread.start()
            
            # 等待完成或超时
            if not timeout_event.wait(timeout=180):  # 3分钟后仍未完成
                logger.error("Ollama API请求超时，可能需要重启服务")
                self._restart_ollama_service()
                return "生成超时，已自动重启服务，请稍后重试"
            
            # 检查结果
            if result["error"]:
                logger.error(result["error"])
                return f"生成失败: {result['error']}"
            
            generated_text = result["text"]
            
            elapsed_time = time.time() - start_time
            tokens_generated = len(generated_text.split())
            logger.info(f"生成完成: {tokens_generated} tokens, 耗时 {elapsed_time:.2f} 秒")
            
            return generated_text
            
        except Exception as e:
            logger.error(f"调用Ollama API时出错: {str(e)}")
            return f"生成出错: {str(e)}"
        finally:
            # 确保清理资源
            if response:
                try:
                    response.close()
                except:
                    pass
            
    def generate_stream(self, prompt: str) -> Generator[str, None, None]:
        """流式生成文本响应，实时输出推理过程"""
        # 检查资源状态
        if not self._check_resources():
            yield "系统资源不足，请稍后再试或重启服务"
            return
            
        if DEBUG:
            logger.debug(f"提示词: {prompt[:200]}...")
            
        start_time = time.time()
        response = None
        
        try:
            # 构建API请求
            payload = {
                "model": self.model,
                "prompt": prompt,
                "temperature": self.temperature,
                "num_predict": self.max_tokens,
                "stream": True  # 启用流式输出
            }
            
            # 设置超时标志
            self.generation_timeout = False
            
            # 超时处理函数
            def timeout_handler():
                logger.error("Ollama API流式请求超时")
                self.generation_timeout = True
                # 重启Ollama服务
                self._restart_ollama_service()
            
            # 设置定时器
            timer = threading.Timer(300, timeout_handler)  # 5分钟超时
            timer.start()
            
            # 调用Ollama API流式接口
            response = requests.post(
                f"{self.base_url}/api/generate",
                json=payload,
                stream=True,
                timeout=360  # 6分钟
            )
            self.active_request = response
            
            if response.status_code != 200:
                error_msg = f"Ollama API错误: {response.status_code}"
                logger.error(error_msg)
                yield f"生成失败: {error_msg}"
                return
            
            # 流式处理响应
            full_text = ""
            for line in response.iter_lines():
                if self.generation_timeout:
                    yield "\n\n[生成超时，已重启服务]"
                    break
                    
                if line:
                    try:
                        data = json.loads(line)
                        chunk = data.get("response", "")
                        full_text += chunk
                        yield chunk  # 返回这一小段文本
                        # 重置定时器
                        timer.cancel()
                        timer = threading.Timer(300, timeout_handler)
                        timer.start()
                    except json.JSONDecodeError:
                        logger.warning(f"无法解析响应: {line}")
            
            # 取消定时器
            timer.cancel()
            
            elapsed_time = time.time() - start_time
            tokens_generated = len(full_text.split())
            logger.info(f"流式生成完成: {tokens_generated} tokens, 耗时 {elapsed_time:.2f} 秒")
        
        except requests.exceptions.Timeout:
            logger.error("Ollama API请求超时")
            yield "\n\n生成超时，请尝试减少输入长度或简化请求"
            # 重启服务
            self._restart_ollama_service()
        except Exception as e:
            logger.error(f"流式生成时出错: {str(e)}")
            yield f"\n\n生成出错: {str(e)}"
            # 检查是否需要重启服务
            if "connection" in str(e).lower() or "timeout" in str(e).lower():
                self._restart_ollama_service()
        finally:
            # 清理资源
            self.active_request = None
            if response:
                try:
                    response.close()
                except:
                    pass
    
    def generate_with_subprocess(self, prompt: str) -> str:
        """通过子进程方式生成文本，提供额外的隔离和超时保护"""
        # 准备安全的提示词（避免shell注入）
        safe_prompt = prompt.replace('"', '\\"').replace('$', '\\$')
        
        try:
            # 创建临时文件存储提示词
            prompt_file = "temp_prompt.txt"
            with open(prompt_file, 'w', encoding='utf-8') as f:
                f.write(prompt)
            
            logger.info(f"通过子进程调用Ollama模型: {self.model}")
            
            # 构建命令
            cmd = f'ollama run {self.model} -f {prompt_file}'
            
            # 使用子进程执行
            process = subprocess.Popen(
                cmd,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True
            )
            self.subprocess_pid = process.pid
            
            # 设置超时
            try:
                stdout, stderr = process.communicate(timeout=300)  # 5分钟超时
                
                # 清理临时文件
                if os.path.exists(prompt_file):
                    os.remove(prompt_file)
                
                if process.returncode == 0:
                    return stdout.strip()
                else:
                    logger.error(f"Ollama子进程错误: {stderr}")
                    return f"生成失败: {stderr}"
            except subprocess.TimeoutExpired:
                # 超时处理
                logger.error("Ollama子进程超时，终止进程")
                
                # 终止进程及其子进程
                try:
                    os.killpg(os.getpgid(process.pid), signal.SIGTERM)
                except:
                    # 备用终止方法
                    process.terminate()
                    process.kill()
                
                # 等待进程结束
                try:
                    process.wait(timeout=5)
                except:
                    pass
                
                # 重启服务
                self._restart_ollama_service()
                
                return "生成超时，已终止进程并重启服务"
                
        except Exception as e:
            logger.error(f"子进程生成出错: {str(e)}")
            return f"生成出错: {str(e)}"
        finally:
            self.subprocess_pid = None
            # 清理临时文件
            if os.path.exists(prompt_file):
                try:
                    os.remove(prompt_file)
                except:
                    pass
    
    def cancel_generation(self):
        """取消当前正在进行的生成请求"""
        if self.active_request:
            try:
                self.active_request.close()
                logger.info("已取消当前API请求")
            except:
                pass
            self.active_request = None
            
        if self.subprocess_pid:
            try:
                os.kill(self.subprocess_pid, signal.SIGTERM)
                logger.info(f"已终止子进程 {self.subprocess_pid}")
            except:
                pass
            self.subprocess_pid = None
    
    def chat(self, messages: List[Dict[str, str]]) -> str:
        """
        使用聊天模式生成响应
        
        参数:
            messages: 消息列表，格式为[{"role": "user", "content": "..."}]
            
        返回:
            生成的聊天响应
        """
        # 检查资源状态
        if not self._check_resources():
            return "系统资源不足，请稍后再试或重启服务"
            
        if DEBUG:
            logger.debug(f"聊天消息: {messages}")
            
        start_time = time.time()
        response = None
        
        try:
            # 构建API请求
            payload = {
                "model": self.model,
                "messages": messages,
                "temperature": self.temperature,
                "stream": False
            }
            
            # 设置超时控制
            timeout_event = threading.Event()
            result = {"text": "", "error": None}
            
            def request_thread():
                nonlocal response
                try:
                    # 调用Ollama API
                    response = requests.post(
                        f"{self.base_url}/api/chat",
                        json=payload,
                        timeout=180  # 3分钟超时
                    )
                    
                    if response.status_code != 200:
                        result["error"] = f"Ollama API错误: {response.status_code} {response.text}"
                        return
                    
                    api_result = response.json()
                    message = api_result.get("message", {})
                    result["text"] = message.get("content", "")
                except Exception as e:
                    result["error"] = str(e)
                finally:
                    timeout_event.set()
            
            # 启动请求线程
            thread = threading.Thread(target=request_thread)
            thread.start()
            
            # 等待完成或超时
            if not timeout_event.wait(timeout=180):  # 3分钟后仍未完成
                logger.error("Ollama API聊天请求超时，可能需要重启服务")
                self._restart_ollama_service()
                return "聊天超时，已自动重启服务，请稍后重试"
            
            # 检查结果
            if result["error"]:
                logger.error(result["error"])
                return f"聊天失败: {result['error']}"
            
            generated_text = result["text"]
            
            elapsed_time = time.time() - start_time
            tokens_generated = len(generated_text.split())
            logger.info(f"聊天生成完成: {tokens_generated} tokens, 耗时 {elapsed_time:.2f} 秒")
            
            return generated_text
            
        except Exception as e:
            logger.error(f"调用Ollama API聊天接口时出错: {str(e)}")
            return f"聊天出错: {str(e)}"
        finally:
            # 确保清理资源
            if response:
                try:
                    response.close()
                except:
                    pass
