import os
import sys
import json
import time
import uuid
import queue
import signal
import logging
import threading
from datetime import datetime
from typing import List, Dict, Any, Optional

# Optional enhancements
try:
    from prompt_toolkit import PromptSession
    from prompt_toolkit.history import FileHistory
    from prompt_toolkit.patch_stdout import patch_stdout
    from prompt_toolkit.key_binding import KeyBindings
    PROMPT_OK = True
except Exception:
    PROMPT_OK = False

try:
    from rich.console import Console
    from rich.panel import Panel
    from rich.table import Table
    from rich.prompt import Confirm
    from rich.markdown import Markdown
    from rich.live import Live
    from rich.text import Text
    from rich.align import Align
    from rich.rule import Rule
    from rich.spinner import Spinner
    from rich.status import Status
    from rich.syntax import Syntax
    RICH_OK = True
except Exception:
    RICH_OK = False

try:
    import colorama
    colorama.init(autoreset=True)
    COLOR_OK = True
except Exception:
    COLOR_OK = False

import requests
import subprocess
import re
import locale

# -------------------------------------------------
# Defaults and paths
# -------------------------------------------------
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(BASE_DIR, 'config.json')
SESSIONS_DIR = os.path.join(BASE_DIR, 'sessions')
LOGS_DIR = os.path.join(BASE_DIR, 'logs')

DEFAULT_CONFIG = {
    "providers": {
        "local": {
            "type": "ollama",
            "api_url": "http://192.168.1.5:11434",
            "chat_endpoint": "/api/chat",
            "models_endpoint": "/api/tags",
            "model": "qwen7b:latest"
        },
        "deepseek": {
            "type": "openai",
            "api_url": "https://api.deepseek.com",
            "chat_endpoint": "/v1/chat/completions",
            "models_endpoint": "/v1/models",
            "model": "deepseek-chat",
            "api_key": "sk-38c4020d709a420bb61e7ad22f8aacea"
        }
    },
    "current_provider": "local",
    "temperature": 0.1,
    "top_p": 0.7,
    "max_tokens": 2048,
    "frequency_penalty": 0.0,
    "presence_penalty": 0.0,
    "repeat_penalty": 1.0,
    "stream": True,
    "timeout": 60,
    "proxy": None,
    "safe_mode": True,
    "input_encoding": "auto"
}

SYSTEM_PROMPT = (
    "你是一个基于麒麟服务器操作系统（Kylin Server OS）设计的智能运维助手，专注于为系统管理、故障排查、性能优化、安全监控等运维场景提供智能化支持。\n\n"
    "你的核心任务是协助用户执行命令行操作、分析日志信息、监控系统状态、提供配置建议，并能够基于上下文进行推理和辅助决策。\n\n"
    "【技术问题处理原则】\n"
    "当用户提出与命令、配置、操作、排障等运维相关的问题时，你必须：\n"
    "1. ✅ 优先提供准确、清晰、可执行的命令，使用代码块（如 ```bash\\n命令内容\\n```）清晰包裹命令。\n"
    "2. ❌ 禁止编造、猜测、模糊回答，或提及未经验证的信息和软件包。\n"
    "3. 保持回答简洁、专业，优先使用代码块、列表或 Markdown 格式。\n"
    "4. 如果你不确定答案，必须明确回答“我不确定”。\n\n"
    "【非技术问题处理原则】\n"
    "如果用户的问题不属于运维技术范畴，你应友好回应并引导用户提出具体的运维相关问题。\n\n"
    "请始终基于你训练时接收到的知识、官方文档和实际运维实践回答问题，确保内容严谨、可信、有价值。"
)

# -------------------------------------------------
# Utilities
# -------------------------------------------------

def ensure_dirs():
    os.makedirs(SESSIONS_DIR, exist_ok=True)
    os.makedirs(LOGS_DIR, exist_ok=True)


def load_config() -> Dict[str, Any]:
    ensure_dirs()
    if not os.path.exists(CONFIG_PATH):
        with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
            json.dump(DEFAULT_CONFIG, f, ensure_ascii=False, indent=2)
        return DEFAULT_CONFIG.copy()
    try:
        with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
            data = json.load(f)
        # merge defaults
        cfg = DEFAULT_CONFIG.copy()
        cfg.update(data or {})
        return cfg
    except Exception:
        return DEFAULT_CONFIG.copy()


def save_config(cfg: Dict[str, Any]):
    with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
        json.dump(cfg, f, ensure_ascii=False, indent=2)


# -------------------------------------------------
# Logging
# -------------------------------------------------

def setup_logging():
    ensure_dirs()
    log_file = os.path.join(LOGS_DIR, f"enhanced_cli_{datetime.now().strftime('%Y%m%d')}.log")
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(message)s',
        handlers=[
            logging.FileHandler(log_file, encoding='utf-8')
        ]
    )


# -------------------------------------------------
# Console wrappers
# -------------------------------------------------
if RICH_OK:
    console = Console()
else:
    class DummyConsole:
        def print(self, *args, **kwargs):
            print(*args)
    console = DummyConsole()


# -------------------------------------------------
# Enhanced Markdown for better code highlighting
# -------------------------------------------------
class EnhancedMarkdown(Markdown):
    """增强的Markdown类，提供更好的代码高亮"""
    
    def __init__(self, markup, code_theme="monokai", **kwargs):
        super().__init__(markup, code_theme=code_theme, **kwargs)


# -------------------------------------------------
# AI Response Display Utilities
# -------------------------------------------------
def display_ai_response_start():
    """显示AI回复开始的标识"""
    if RICH_OK:
        console.print(Rule("🤖 AI助手回复", style="bright_blue"))
    else:
        print("\n" + "="*50)
        print("🤖 AI助手回复")
        print("="*50)


def display_ai_response_end():
    """显示AI回复结束的标识"""
    if RICH_OK:
        console.print(Rule("", style="dim blue"))
        console.print()
    else:
        print("="*50 + "\n")


def display_ai_response(content: str, is_streaming: bool = False):
    """美化显示AI回复内容"""
    if not content.strip():
        return
        
    if RICH_OK:
        try:
            # 创建美化的Panel来包装AI回复
            panel = Panel(
                EnhancedMarkdown(content.strip()),
                title="🤖 麒麟智能助手",
                title_align="left",
                border_style="bright_blue",
                padding=(1, 2),
                expand=False
            )
            console.print(panel)
        except Exception:
            # 降级到简单显示
            try:
                console.print(Panel(
                    Text(content.strip()),
                    title="🤖 麒麟智能助手",
                    title_align="left",
                    border_style="bright_blue",
                    padding=(1, 2),
                    expand=False
                ))
            except Exception:
                console.print(f"[bright_blue]🤖 AI:[/bright_blue] {content.strip()}")
    else:
        print(f"\n🤖 AI: {content.strip()}\n")


def create_ai_live_display():
    """创建流式输出的Live显示对象"""
    if RICH_OK:
        return Live(
            Panel(
                Text("⏳ 正在生成回复..."),
                title="🤖 麒麟智能助手 (正在回复...)",
                title_align="left", 
                border_style="bright_blue",
                padding=(1, 2),
                expand=False
            ),
            refresh_per_second=10,
            console=console
        )
    return None


def update_ai_live_display(live_obj, content: str):
    """更新流式显示内容"""
    if live_obj and RICH_OK:
        try:
            # 确定标题
            title = "🤖 麒麟智能助手" if content.strip() else "🤖 麒麟智能助手 (正在回复...)"
            
            panel = Panel(
                EnhancedMarkdown(content) if content.strip() else Text("⏳ 正在生成回复..."),
                title=title,
                title_align="left",
                border_style="bright_blue", 
                padding=(1, 2),
                expand=False
            )
            live_obj.update(panel)
        except Exception:
            try:
                panel = Panel(
                    Text(content) if content.strip() else Text("⏳ 正在生成回复..."),
                    title="🤖 麒麟智能助手",
                    title_align="left",
                    border_style="bright_blue", 
                    padding=(1, 2),
                    expand=False
                )
                live_obj.update(panel)
            except Exception:
                live_obj.update(Text(content))


def show_thinking_status():
    """显示AI思考状态"""
    if RICH_OK:
        with console.status("[bold green]🤔 AI正在思考您的问题...", spinner="dots") as status:
            import time
            time.sleep(0.5)  # 短暂显示状态
    else:
        print("🤔 AI正在思考您的问题...")


def show_error_message(error_msg: str):
    """美化显示错误信息"""
    if RICH_OK:
        console.print(Panel(
            f"[red]❌ {error_msg}[/red]",
            title="错误",
            border_style="red",
            padding=(1, 2)
        ))
    else:
        print(f"\n❌ 错误: {error_msg}\n")


# -------------------------------------------------
# Multi-provider compatible client
# -------------------------------------------------
class ChatClient:
    def __init__(self, cfg: Dict[str, Any]):
        self.cfg = cfg
        self.session = requests.Session()
        if cfg.get('proxy'):
            self.session.proxies.update({
                'http': cfg['proxy'],
                'https': cfg['proxy']
            })
        
        # Get current provider config
        self.provider_name = cfg.get('current_provider', 'local')
        self.provider = cfg.get('providers', {}).get(self.provider_name, {})
        
        # Set API key if exists
        self.api_key = self.provider.get('api_key')

    def _get_current_config(self) -> Dict[str, Any]:
        """Get current provider configuration"""
        return self.provider

    def _url(self, endpoint: str) -> str:
        """Generate full URL for endpoint"""
        api_url = self.provider.get('api_url', '')
        full_url = (api_url.rstrip('/') + '/' + endpoint.lstrip('/')).strip()
        logging.info(f"Request URL: {full_url}")
        return full_url

    def _get_headers(self) -> Dict[str, str]:
        """Get headers for API requests"""
        headers = {"Content-Type": "application/json"}
        if self.api_key:
            # For OpenAI compatible APIs
            headers["Authorization"] = f"Bearer {self.api_key}"
        logging.info(f"Request Headers: {headers}")
        return headers

    def list_models(self) -> List[str]:
        url = self._url(self.provider.get('models_endpoint', '/v1/models'))
        try:
            headers = self._get_headers()
            resp = self.session.get(url, headers=headers, timeout=self.cfg['timeout'])
            resp.raise_for_status()
            data = resp.json()
            # OpenAI compatible returns { data: [{id: ...}] }
            models = []
            if isinstance(data, dict) and 'data' in data:
                for m in data['data']:
                    mid = m.get('id') or m.get('model')
                    if mid:
                        models.append(mid)
            # Ollama native may return different shape; best-effort
            if not models and isinstance(data, dict) and 'models' in data:
                for m in data['models']:
                    mid = m.get('name') or m.get('id')
                    if mid:
                        models.append(mid)
            return models
        except Exception as e:
            logging.warning(f"/models 列表失败: {e}")
            return []

    def ping(self) -> bool:
        # Prefer /v1/models
        if self.list_models():
            return True
        # Fallback: minimal chat completion
        try:
            payload = {
                'model': self.provider.get('model'),
                'messages': [{"role": "user", "content": "ping"}],
                'max_tokens': 1,
                'stream': False
            }
            provider_type = (self.provider.get('type','') or '').lower()
            endpoint = '/api/chat' if provider_type == 'ollama' else self.provider.get('chat_endpoint', '/v1/chat/completions')
            url = self._url(endpoint)
            headers = self._get_headers()
            resp = self.session.post(url, headers=headers,
                                     data=json.dumps(payload), timeout=self.cfg['timeout'])
            resp.raise_for_status()
            return True
        except Exception as e:
            logging.warning(f"/ping 失败: {e}")
            return False

    def chat(self, messages: List[Dict[str, str]], stream: Optional[bool] = None):
        payload = {
            'model': self.provider.get('model'),
            'messages': messages,
            'temperature': self.cfg['temperature'],
            'top_p': self.cfg['top_p'],
            'max_tokens': self.cfg['max_tokens'],
            'frequency_penalty': self.cfg['frequency_penalty'],
            'presence_penalty': self.cfg['presence_penalty'],
            'stream': self.cfg['stream'] if stream is None else stream,
            'repeat_penalty': self.cfg['repeat_penalty'],
        }
        provider_type = (self.provider.get('type','') or '').lower()
        endpoint = '/api/chat' if provider_type == 'ollama' else self.provider.get('chat_endpoint', '/v1/chat/completions')
        url = self._url(endpoint)
        headers = self._get_headers()
        resp = self.session.post(url, headers=headers,
                                 data=json.dumps(payload), stream=payload['stream'],
                                 timeout=self.cfg['timeout'])
        resp.raise_for_status()
        logging.info(f"Provider type: {self.provider.get('type', '')}")
        return resp


# -------------------------------------------------
# Session management
# -------------------------------------------------
class SessionManager:
    def __init__(self):
        ensure_dirs()
        self.messages: List[Dict[str, str]] = [
            {"role": "system", "content": SYSTEM_PROMPT}
        ]
        self.session_id = datetime.now().strftime('%Y%m%d_%H%M%S') + '_' + uuid.uuid4().hex[:8]
        self.last_user_input: Optional[str] = None

    def reset(self):
        self.messages = [{"role": "system", "content": SYSTEM_PROMPT}]

    def add_user(self, content: str):
        self.last_user_input = content
        self.messages.append({"role": "user", "content": content})

    def add_assistant(self, content: str):
        self.messages.append({"role": "assistant", "content": content})

    def history(self) -> List[Dict[str, str]]:
        return self.messages

    def save(self, name: Optional[str] = None) -> str:
        if not name:
            name = self.session_id
        fname = f"{name}.json"
        path = os.path.join(SESSIONS_DIR, fname)
        with open(path, 'w', encoding='utf-8') as f:
            json.dump(self.messages, f, ensure_ascii=False, indent=2)
        return path

    def load(self, name: str) -> str:
        # support file name or plain name
        if not name.endswith('.json'):
            name = name + '.json'
        path = os.path.join(SESSIONS_DIR, name)
        if not os.path.exists(path):
            raise FileNotFoundError(f"未找到会话文件: {path}")
        with open(path, 'r', encoding='utf-8') as f:
            self.messages = json.load(f)
        return path

    def export(self, fmt: str, target: Optional[str] = None) -> str:
        fmt = fmt.lower()
        if fmt not in ('md', 'json'):
            raise ValueError('仅支持 md 或 json 格式')
        if not target:
            target = f"export_{self.session_id}.{fmt}"
        out_path = os.path.join(BASE_DIR, target)
        if fmt == 'json':
            with open(out_path, 'w', encoding='utf-8') as f:
                json.dump(self.messages, f, ensure_ascii=False, indent=2)
        else:
            # markdown
            lines = [f"# 会话导出 - {self.session_id}", '']
            for m in self.messages:
                role = m.get('role', 'unknown')
                content = m.get('content', '')
                lines.append(f"## {role}")
                lines.append('')
                lines.append(content)
                lines.append('')
            with open(out_path, 'w', encoding='utf-8') as f:
                f.write('\n'.join(lines))
        return out_path


# -------------------------------------------------
# CLI
# -------------------------------------------------
HELP_TEXT = (
    "说明：以 ai: 或 ai： 开头则向助手提问；否则将作为终端命令执行。\n"
    "示例：ai: 如何查看端口占用？\n"
    "/help                       显示帮助\n"
    "/exit | /quit               退出\n"
    "/clear | clear              清屏\n"
    "/reset                      清空上下文\n"
    "/history                    显示对话轮次数量\n"
    "/save [name]                保存会话到 sessions/\n"
    "/load <name>                加载会话\n"
    "/export <md|json> [file]    导出当前会话\n"
    "/ping                       检查服务可用性\n"
    "/models                     列出可用模型\n"
    "/model <name>               切换模型；当 <name> 为提供商名时切换提供商；支持 provider:model 或 provider/model 语法\n"
    "/provider list              列出可用模型提供商\n"
    "/provider switch <name>     切换模型提供商 (local, deepseek)\n"
    "/nonstream on|off           切换流式输出\n"
    "/timeout <sec>              设置超时时间（秒）\n"
    "/set <key> <value>          设置参数(temperature/top_p/max_tokens/repeat_penalty)\n"
    "/config show                显示当前配置\n"
    "/config set url <value>     设置当前提供商 API 地址\n"
    "/config set apikey <value>  设置当前提供商 API 密钥\n"
    "/config set encoding <v>    设置输入编码 utf-8|gbk|auto（默认 auto）\n"
    "/retry                      重发上条用户问题\n"
    "/safe on|off                切换命令执行安全确认模式（默认 on）\n"
)


def banner(cfg: Dict[str, Any]):
    title = "---麒麟OS 智能运维助手--- "
    if RICH_OK:
        # 创建包含完整提示信息的Panel内容
        help_text = "输入 /help 查看帮助 | ai: 提问 | 其他为终端命令"
        panel_content = f"{title}\n\n[dim]{help_text}[/dim]"
        
        console.print(Panel.fit(
            panel_content,
            style="bold blue",
            padding=(1, 2)
        ))
    else:
        print("=" * 60)
        print(title)
        print("输入 /help 查看命令 | ai: 提问 | 其他为终端命令")
        print("=" * 60)
    if cfg.get('stream', True):
        console.print("[cyan]当前模式：流式输出[/cyan]")
    else:
        console.print("[cyan]当前模式：非流式输出[/cyan]")


def print_help():
    if RICH_OK:
        console.print(Panel(HELP_TEXT, title="帮助", border_style="blue"))
    else:
        print(HELP_TEXT)


def print_config(cfg: Dict[str, Any]):
    if RICH_OK:
        table = Table(title="当前配置")
        table.add_column("Key", style="bold")
        table.add_column("Value")
        
        # Display current provider info
        current_provider = cfg.get('current_provider', 'local')
        table.add_row("当前提供商", current_provider)
        
        # Display provider-specific configs
        providers = cfg.get('providers', {})
        for provider_name, provider_config in providers.items():
            table.add_row(f"提供商.{provider_name}.类型", provider_config.get('type', ''))
            table.add_row(f"提供商.{provider_name}.API地址", provider_config.get('api_url', ''))
            table.add_row(f"提供商.{provider_name}.模型", provider_config.get('model', ''))
            # Only show API key info, not the actual key
            api_key = provider_config.get('api_key', '')
            if api_key:
                table.add_row(f"提供商.{provider_name}.API密钥", f"已设置 ({len(api_key)} 字符)")
            else:
                table.add_row(f"提供商.{provider_name}.API密钥", "未设置")
        
        # Display general configs
        general_keys = ['temperature', 'top_p', 'max_tokens', 'frequency_penalty', 
                       'presence_penalty', 'repeat_penalty', 'stream', 'timeout', 
                       'proxy', 'safe_mode', 'input_encoding']
        for k in general_keys:
            if k in cfg:
                table.add_row(k, str(cfg[k]))
                
        console.print(table)
    else:
        print(json.dumps(cfg, ensure_ascii=False, indent=2))


# -------------------------------------------------
# Shell execution helpers
# -------------------------------------------------

def _is_dangerous_command(cmd: str) -> bool:
    c = cmd.strip().lower()
    patterns = [
        r"\brm\b", r"\bdel\b", r"\brd\b", r"\brmdir\b", r"\bformat\b",
        r"shutdown", r"poweroff", r"reboot", r"mkfs", r"diskpart", r"wmic\s+disk"
    ]
    return any(re.search(p, c) for p in patterns)


def _confirm(prompt: str) -> bool:
    try:
        if RICH_OK:
            return Confirm.ask(prompt)
        else:
            ans = input(f"{prompt} [y/N]: ").strip().lower()
            return ans in ("y", "yes")
    except Exception:
        return False


def _run_shell(cmd: str) -> int:
    try:
        cmd_lower = cmd.strip().lower().split()[0] if cmd.strip() else ''
        
        # 特殊处理history命令
        if cmd_lower == 'history':
            try:
                # 尝试读取bash历史文件
                import os
                home = os.path.expanduser('~')
                hist_file = os.path.join(home, '.bash_history')
                if os.path.exists(hist_file):
                    with open(hist_file, 'r', encoding='utf-8', errors='replace') as f:
                        lines = f.readlines()
                    # 显示最近的20条历史记录
                    recent_lines = lines[-20:] if len(lines) > 20 else lines
                    for i, line in enumerate(recent_lines, 1):
                        console.print(f"{i:4d}  {line.rstrip()}")
                else:
                    console.print("[yellow]未找到bash历史文件[/yellow]")
                return 0
            except Exception as e:
                console.print(f"[red]读取历史失败: {e}[/red]")
                return -1
        
        # 对于交互式命令（如top、htop等），直接执行不捕获输出
        interactive_commands = ['top', 'htop', 'vim', 'nano', 'less', 'more', 'man']
        
        if cmd_lower in interactive_commands:
            # 交互式命令直接执行
            return subprocess.run(cmd, shell=True).returncode
        
        # 对于非交互式命令，捕获输出
        completed = subprocess.run(cmd, shell=True, capture_output=True, text=True, encoding='utf-8', errors='replace')
        out = completed.stdout
        err = completed.stderr
        code = completed.returncode
        
        if out:
            console.print(out)
        if err:
            console.print(f"[red]{err}[/red]")
        return code
    except Exception as e:
        console.print(f"[red]命令执行失败:[/red] {e}")
        return -1

# -------------------------------------------------
# Encoding setup
# -------------------------------------------------

def setup_io_encoding(cfg: Dict[str, Any]):
    enc_cfg = (cfg.get('input_encoding') or 'auto').lower()
    if enc_cfg == 'auto':
        enc = locale.getpreferredencoding(False) or 'utf-8'
    else:
        enc = enc_cfg
    try:
        if hasattr(sys.stdin, 'reconfigure'):
            sys.stdin.reconfigure(encoding=enc, errors='replace')
        if hasattr(sys.stdout, 'reconfigure'):
            sys.stdout.reconfigure(encoding=enc, errors='replace')
    except Exception:
        pass

# -------------------------------------------------
# Main loop
# -------------------------------------------------

def input_loop():
    cfg = load_config()
    setup_io_encoding(cfg)
    setup_logging()
    client = ChatClient(cfg)
    sm = SessionManager()

    banner(cfg)

    if PROMPT_OK:
        hist_file = os.path.join(BASE_DIR, '.cli_history')
        session = PromptSession(history=FileHistory(hist_file))
        kb = KeyBindings()

        @kb.add('c-c')
        def _(event):
            event.app.exit(exception=KeyboardInterrupt, style='class:aborting')

        prompt_text = lambda: "You> "

    else:
        session = None

    def get_user_input() -> str:
        if PROMPT_OK:
            with patch_stdout():
                return session.prompt(prompt_text(), key_bindings=kb)
        else:
            try:
                return input("You> ")
            except UnicodeDecodeError:
                enc_cfg = (cfg.get('input_encoding') or 'auto').lower()
                enc = locale.getpreferredencoding(False) if enc_cfg == 'auto' else enc_cfg
                try:
                    raw = sys.stdin.buffer.readline()
                    return raw.decode(enc or 'utf-8', errors='replace').rstrip('\r\n')
                except Exception as e:
                    console.print(f"[red]输入解码失败: {e}[/red]")
                    return ''

    while True:
        try:
            text = get_user_input().strip()
            if not text:
                continue

            # commands
            if text.startswith('/'):
                parts = text.split()
                cmd = parts[0].lower()
                args = parts[1:]

                if cmd in ('/exit', '/quit'):
                    console.print("👋 再见！祝您系统稳定，运维顺利！")
                    break
                elif cmd == '/help':
                    print_help()
                elif cmd == '/clear':
                    # 使用多种方法确保清屏功能正常工作
                    try:
                        if RICH_OK:
                            # 使用rich console的clear方法
                            console.clear()
                        else:
                            # 降级方案：优先使用系统命令
                            if os.name == 'nt':  # Windows
                                # Windows下使用cls命令
                                os.system('cls')
                            else:  # Unix/Linux
                                # Unix/Linux下使用clear命令
                                os.system('clear')
                    except Exception:
                        # 最后的备用方案
                        if os.name == 'nt':
                            os.system('cls')
                        else:
                            sys.stdout.write('\033[2J\033[H')
                            sys.stdout.flush()
                    # 清屏命令处理完毕，跳过后续处理
                    continue
                elif cmd == '/reset':
                    sm.reset()
                    console.print("[green]上下文已清空[/green]")
                elif cmd == '/history':
                    console.print(f"当前对话轮次: {len(sm.history()) - 1}")
                elif cmd == '/save':
                    name = args[0] if args else None
                    path = sm.save(name)
                    console.print(f"[green]已保存: {path}[/green]")
                elif cmd == '/load':
                    if not args:
                        console.print("[red]用法: /load <name>[/red]")
                    else:
                        try:
                            path = sm.load(args[0])
                            console.print(f"[green]已加载: {path}[/green]")
                        except Exception as e:
                            console.print(f"[red]{e}[/red]")
                elif cmd == '/export':
                    if not args:
                        console.print("[red]用法: /export <md|json> [file][/red]")
                    else:
                        fmt = args[0]
                        target = args[1] if len(args) > 1 else None
                        try:
                            out = sm.export(fmt, target)
                            console.print(f"[green]已导出: {out}[/green]")
                        except Exception as e:
                            console.print(f"[red]{e}[/red]")
                elif cmd == '/ping':
                    ok = client.ping()
                    console.print("[green]服务可用[/green]" if ok else "[red]服务不可用[/red]")
                elif cmd == '/models':
                    models = client.list_models()
                    if not models:
                        console.print("[yellow]未获取到模型列表[/yellow]")
                    else:
                        if RICH_OK:
                            table = Table(title="可用模型")
                            table.add_column("Model")
                            for m in models:
                                table.add_row(m)
                            console.print(table)
                        else:
                            for m in models:
                                print("-", m)
                elif cmd == '/model':
                    if not args:
                        current_provider = cfg.get('current_provider', 'local')
                        current_model = cfg.get('providers', {}).get(current_provider, {}).get('model', '')
                        console.print(f"当前模型: {current_model} (提供商: {current_provider})")
                    else:
                        arg_str = ' '.join(args).strip()
                        providers = cfg.get('providers', {})
                        # 支持 provider:model 或 provider/model 语法
                        prov, mdl = None, None
                        if ':' in arg_str:
                            parts = arg_str.split(':', 1)
                            prov, mdl = parts[0].strip(), parts[1].strip()
                        elif '/' in arg_str:
                            parts = arg_str.split('/', 1)
                            prov, mdl = parts[0].strip(), parts[1].strip()
                        
                        if arg_str in providers:
                            # 输入为提供商名，执行提供商切换
                            cfg['current_provider'] = arg_str
                            save_config(cfg)
                            client = ChatClient(cfg)
                            console.print(f"[green]已切换到提供商: {arg_str}[/green]")
                        elif prov and prov in providers and mdl:
                            # 同时切换提供商与模型
                            cfg['current_provider'] = prov
                            cfg['providers'][prov]['model'] = mdl
                            save_config(cfg)
                            client = ChatClient(cfg)
                            console.print(f"[green]已切换到提供商: {prov}，模型: {mdl}[/green]")
                        else:
                            # 仅切换当前提供商的模型
                            current_provider = cfg.get('current_provider', 'local')
                            if current_provider in providers:
                                cfg['providers'][current_provider]['model'] = arg_str
                                save_config(cfg)
                                client = ChatClient(cfg)
                                console.print(f"[green]已切换模型为 {arg_str} (提供商: {current_provider})[/green]")
                            else:
                                console.print("[red]配置错误: 无法找到当前提供商[/red]")
                elif cmd == '/nonstream':
                    if not args or args[0] not in ('on', 'off'):
                        console.print("[red]用法: /nonstream on|off[/red]")
                    else:
                        cfg['stream'] = (args[0] == 'off') and False or True
                        # The command is non-intuitive; explain:
                        if args[0] == 'on':
                            cfg['stream'] = False
                        elif args[0] == 'off':
                            cfg['stream'] = True
                        save_config(cfg)
                        console.print(f"[green]流式输出: {'开启' if cfg['stream'] else '关闭'}[/green]")
                elif cmd == '/timeout':
                    if not args:
                        console.print(f"当前超时: {cfg['timeout']} 秒")
                    else:
                        try:
                            cfg['timeout'] = max(5, int(args[0]))
                            save_config(cfg)
                            console.print(f"[green]已设置超时为 {cfg['timeout']} 秒[/green]")
                        except ValueError:
                            console.print("[red]请输入有效的秒数[/red]")
                elif cmd == '/set':
                    if len(args) < 2:
                        console.print("[red]用法: /set <key> <value>[/red]")
                    else:
                        key, val = args[0], ' '.join(args[1:])
                        if key not in ('temperature', 'top_p', 'max_tokens', 'repeat_penalty'):
                            console.print("[red]可设置项: temperature/top_p/max_tokens/repeat_penalty[/red]")
                        else:
                            try:
                                if key == 'max_tokens':
                                    cfg[key] = int(val)
                                else:
                                    cfg[key] = float(val)
                                save_config(cfg)
                                console.print(f"[green]{key} 已更新为 {cfg[key]}[/green]")
                            except ValueError:
                                console.print("[red]值格式不正确[/red]")
                elif cmd == '/provider':
                    if not args:
                        console.print("[yellow]用法: /provider list | /provider switch <name>[/yellow]")
                    elif args[0] == 'list':
                        providers = cfg.get('providers', {})
                        current = cfg.get('current_provider', 'local')
                        if RICH_OK:
                            table = Table(title="模型提供商")
                            table.add_column("名称")
                            table.add_column("类型")
                            table.add_column("当前使用")
                            for name, provider in providers.items():
                                is_current = "✓" if name == current else ""
                                table.add_row(name, provider.get('type', 'unknown'), is_current)
                            console.print(table)
                        else:
                            console.print(f"当前使用提供商: {current}")
                            for name, provider in providers.items():
                                print(f"- {name} ({provider.get('type', 'unknown')})")
                    elif args[0] == 'switch' and len(args) >= 2:
                        provider_name = args[1]
                        if provider_name in cfg.get('providers', {}):
                            cfg['current_provider'] = provider_name
                            save_config(cfg)
                            client = ChatClient(cfg)
                            console.print(f"[green]已切换到提供商: {provider_name}[/green]")
                            # 提示可用性与必要配置
                            if provider_name.lower() == 'deepseek' and not cfg['providers'][provider_name].get('api_key'):
                                console.print("[yellow]提示：deepseek 需要有效的 API Key，请使用 /config set apikey <key> 设置。[/yellow]")
                            ok = client.ping()
                            console.print("[green]提供商可用[/green]" if ok else "[yellow]无法验证提供商可用性，请检查网络/URL/密钥[/yellow]")
                        else:
                            console.print(f"[red]未知提供商: {provider_name}[/red]")
                    else:
                        console.print("[yellow]用法: /provider list | /provider switch <name>[/yellow]")
                elif cmd == '/config':
                    if not args:
                        console.print("[yellow]用法: /config show | /config set url <value> | /config set apikey <value>[/yellow]")
                    elif args[0] == 'show':
                        print_config(cfg)
                    elif args[0] == 'set' and len(args) >= 3:
                        if args[1] == 'url':
                            current_provider = cfg.get('current_provider', 'local')
                            cfg['providers'][current_provider]['api_url'] = args[2]
                            save_config(cfg)
                            # Reinitialize client with new config
                            client = ChatClient(cfg)
                            console.print(f"[green]API 地址已设为 {args[2]}[/green]")
                        elif args[1] == 'apikey':
                            current_provider = cfg.get('current_provider', 'local')
                            cfg['providers'][current_provider]['api_key'] = args[2]
                            save_config(cfg)
                            # Reinitialize client with new config
                            client = ChatClient(cfg)
                            console.print(f"[green]API 密钥已更新[/green]")
                        else:
                            console.print("[yellow]用法: /config set url <value> | /config set apikey <value>[/yellow]")
                    else:
                        console.print("[yellow]用法: /config show | /config set url <value> | /config set apikey <value>[/yellow]")
                elif cmd == '/retry':
                    if not sm.last_user_input:
                        console.print("[yellow]暂无可重试的问题[/yellow]")
                    else:
                        text = sm.last_user_input
                        # fall through to ask
                        # no continue here
                elif cmd == '/safe':
                    if not args or args[0] not in ('on','off'):
                        console.print("[yellow]用法: /safe on|off[/yellow]")
                    else:
                        cfg['safe_mode'] = (args[0] == 'on')
                        save_config(cfg)
                        console.print(f"[green]安全模式已{'开启' if cfg['safe_mode'] else '关闭'}[/green]")
                else:
                    console.print("[yellow]未知命令，输入 /help 查看支持的命令[/yellow]")
                    continue
                # command handled
                continue

            # route: ai prefix -> chat; otherwise -> shell command
            lowered = text.lower()
            ai_prefixes = ("ai:", "ai：")
            if any(lowered.startswith(p) for p in ai_prefixes):
                question = text.split(':', 1)[-1].split('：', 1)[-1].strip()
                if not question:
                    console.print("[yellow]提示：在 ai: 后输入您的问题[/yellow]")
                    continue
                sm.add_user(question)
                try:
                    # 显示思考状态
                    show_thinking_status()
                    
                    resp = client.chat(sm.history())
                    assistant_reply = ""
                    provider_type = (client.provider or {}).get('type', '').lower()
                    if client.cfg['stream']:
                        if RICH_OK:
                            from time import monotonic
                            last_update = 0.0
                            
                            # 显示开始标识
                            display_ai_response_start()
                            
                            # 创建美化的Live显示
                            with create_ai_live_display() as live:
                                for line in resp.iter_lines():
                                    if not line:
                                        continue
                                    decoded = line.decode('utf-8', errors='replace').strip()
                                    try:
                                        if provider_type == 'openai':
                                            if not decoded.startswith('data: '):
                                                continue
                                            payload = decoded[len('data: '):].strip()
                                            if payload == '[DONE]':
                                                break
                                            jd = json.loads(payload)
                                            delta = jd.get('choices', [{}])[0].get('delta', {}).get('content', '')
                                            if delta:
                                                assistant_reply += delta
                                        else:  # ollama and others
                                            jd = json.loads(decoded)
                                            # Ollama stream lines contain: {"message": {"role": "assistant", "content": "..."}, "done": false}
                                            msg = jd.get('message', {})
                                            delta = msg.get('content', '')
                                            if delta:
                                                assistant_reply += delta
                                            if jd.get('done') is True:
                                                break
                                    except Exception:
                                        continue
                                    now = monotonic()
                                    if now - last_update >= 0.05:
                                        update_ai_live_display(live, assistant_reply)
                                        last_update = now
                                # 最终更新
                                update_ai_live_display(live, assistant_reply)
                            
                            # 显示结束标识
                            display_ai_response_end()
                        else:
                            # 非Rich环境下的流式输出
                            display_ai_response_start()
                            for line in resp.iter_lines():
                                if not line:
                                    continue
                                decoded = line.decode('utf-8', errors='replace').strip()
                                try:
                                    if provider_type == 'openai':
                                        if not decoded.startswith('data: '):
                                            continue
                                        payload = decoded[len('data: '):].strip()
                                        if payload == '[DONE]':
                                            break
                                        jd = json.loads(payload)
                                        delta = jd.get('choices', [{}])[0].get('delta', {}).get('content', '')
                                        if delta:
                                            print(delta, end='', flush=True)
                                            assistant_reply += delta
                                    else:  # ollama and others
                                        jd = json.loads(decoded)
                                        # Prefer chat style
                                        delta = jd.get('message', {}).get('content', '')
                                        # Fallback to older generate style
                                        if not delta:
                                            delta = jd.get('response', '')
                                        if delta:
                                            print(delta, end='', flush=True)
                                            assistant_reply += delta
                                        if jd.get('done') is True:
                                            break
                                except Exception:
                                    continue
                            print()
                            display_ai_response_end()
                    else:
                        data = resp.json()
                        if provider_type == 'openai':
                            assistant_reply = data.get('choices', [{}])[0].get('message', {}).get('content', '')
                        else:  # ollama
                            assistant_reply = data.get('message', {}).get('content', '') or data.get('response', '')
                    # Render output (non-streaming or non-rich)
                    if assistant_reply.strip():
                        if (not client.cfg['stream']) or (client.cfg['stream'] and not RICH_OK):
                            display_ai_response(assistant_reply)
                        sm.add_assistant(assistant_reply.strip())
                except requests.exceptions.RequestException as e:
                    show_error_message(f"请求失败: {e}")
                except KeyboardInterrupt:
                    console.print("\n[yellow]⏹️ 已中断当前请求[/yellow]")
                except Exception as e:
                    show_error_message(f"发生错误: {e}")
            else:
                # treat as shell command
                if text in ("exit", "quit"):
                    console.print("👋 再见！祝您系统稳定，运维顺利！")
                    break
                
                # 特殊处理clear命令
                if text.strip().lower() == "clear":
                    try:
                        if RICH_OK:
                            console.clear()
                        else:
                            if os.name == 'nt':
                                os.system('cls')
                            else:
                                os.system('clear')
                    except Exception:
                        if os.name == 'nt':
                            os.system('cls')
                        else:
                            sys.stdout.write('\033[2J\033[H')
                            sys.stdout.flush()
                    continue
                
                safe_mode = bool(load_config().get('safe_mode', True))
                if safe_mode and _is_dangerous_command(text):
                    if not _confirm(f"检测到潜在危险命令，确认执行? {text}"):
                        console.print("[yellow]已取消执行[/yellow]")
                        continue
                code = _run_shell(text)
                if code != 0:
                    console.print(f"[cyan]退出码: {code}[/cyan]")

        except KeyboardInterrupt:
            console.print("\n👋 会话已中断，再见！")
            break
        except EOFError:
            console.print("\n👋 再见！")
            break


if __name__ == '__main__':
    input_loop()
