"""AI Engine for command generation and intelligent assistance - Ollama Only"""

import json
import logging
from typing import Dict, List, Optional
from datetime import datetime

# AI model integration with Ollama only
try:
    import requests
    OLLAMA_AVAILABLE = True
except ImportError:
    OLLAMA_AVAILABLE = False

logger = logging.getLogger(__name__)

class AICommandGenerator:
    """AI-powered command generator using Ollama only"""
    
    def __init__(self):
        self.model_loaded = False
        self.learning_database = {}
        
        # AI service configuration - Ollama only
        self.ai_service = None
        self.service_type = None
        
        # Initialize Ollama service
        self._init_ollama()
    
    def _init_ollama(self) -> bool:
        """Initialize Ollama as the only AI service"""
        if not OLLAMA_AVAILABLE:
            logger.error("Requests library not available. Install with: pip install requests")
            return False
        
        try:
            # Check if Ollama is running
            response = requests.get("http://localhost:11434/api/tags", timeout=5)
            if response.status_code == 200:
                models = response.json().get('models', [])
                if models:
                    # Prefer llama2:7b-chat as highest priority
                    preferred_models = ['llama2:7b-chat', 'llama2:7b', 'llama2:13b-chat', 'llama2', 'llama2:13b']
                    selected_model = None
                    
                    # First try to find exact matches for preferred models
                    for preferred in preferred_models:
                        for model in models:
                            model_name = model.get('name', '')
                            if model_name == preferred or model_name.startswith(preferred + ':'):
                                selected_model = model_name
                                break
                        if selected_model:
                            break
                    
                    # If no preferred model found, use the first available
                    if not selected_model:
                        selected_model = models[0].get('name', 'llama2:7b-chat')
                    
                    self.ai_service = selected_model
                    self.service_type = "ollama"
                    self.model_loaded = True
                    logger.info(f"✅ Ollama initialized with model: {selected_model}")
                    available_models = [m.get('name') for m in models[:5]]
                    logger.info(f"Available models: {available_models}")
                    return True
                else:
                    logger.error("Ollama running but no models found. Please run: ollama pull llama2:7b-chat")
            else:
                logger.error("Ollama service not responding. Please start with: ollama serve")
        except requests.exceptions.ConnectionError:
            logger.error("Ollama not running. Please start with: ollama serve")
        except Exception as e:
            logger.error(f"Ollama initialization failed: {e}")
        
        return False
    
    def generate_command(self, query: str, command_type: str = "linux", context: Optional[Dict] = None) -> str:
        """Generate command from natural language query using Ollama"""
        if not self.model_loaded or not self.ai_service:
            return f"Ollama not available. Please ensure Ollama is running: ollama serve. Query: {query}"
        
        return self._generate_with_ollama(query, command_type, context)
    
    def _generate_with_ollama(self, query: str, command_type: str, context: Optional[Dict] = None) -> str:
        """Generate command using Ollama with enhanced prompt"""
        # Create a specialized prompt for command generation
        system_prompt = f"You are a {command_type} command expert. Generate ONLY the command, no explanation. Support both English and Chinese queries."
        
        if command_type == "linux":
            examples = "Examples: '列文件' -> 'ls -la', 'list files' -> 'ls -la', '查看磁盘空间' -> 'df -h'"
        else:
            examples = f"Generate {command_type} commands based on natural language descriptions."
        
        prompt = f"{system_prompt}\n{examples}\n\nQuery: {query}\nCommand:"
        
        try:
            response = requests.post(
                "http://localhost:11434/api/generate",
                json={
                    "model": self.ai_service,
                    "prompt": prompt,
                    "stream": False,
                    "options": {
                        "temperature": 0.1,
                        "num_predict": 50,
                        "stop": ["\n\n", "Query:", "Examples:"]
                    }
                },
                timeout=60  # Increased timeout to 60 seconds
            )
            
            if response.status_code == 200:
                result = response.json().get('response', '').strip()
                command = self._extract_command(result)
                
                # Store learning data
                self._store_learning_data(query, command, command_type)
                
                return command
            else:
                return f"Ollama error: {response.status_code}. Query: {query}"
        except requests.exceptions.ConnectionError:
            return f"Ollama connection failed. Please ensure Ollama is running: ollama serve. Query: {query}"
        except Exception as e:
            return f"Ollama generation failed: {str(e)}. Query: {query}"
    
    def _extract_command(self, generated_text: str) -> str:
        """Extract clean command from AI response"""
        lines = generated_text.strip().split('\n')
        
        # Get the first non-empty line
        for line in lines:
            line = line.strip()
            if line and not line.startswith('#') and not line.startswith('//'):
                # Remove common prefixes
                for prefix in ['Command:', 'command:', '$', '#', '>', '>>>']:
                    if line.startswith(prefix):
                        line = line[len(prefix):].strip()
                return line
        
        return generated_text.strip()
    
    def generate_chat_response(self, message: str, context: Optional[Dict] = None) -> str:
        """Generate conversational AI response using Ollama"""
        # Check if this is a command generation request (Chinese or English)
        if self._is_command_request(message):
            # Auto-generate command for Chinese/English requests
            command = self.generate_command(message, "linux", context)
            return f"Generated command: {command}"
        
        if not self.model_loaded or not self.ai_service:
            return "Ollama not available. Please install and run Ollama: https://ollama.ai/"
        
        return self._chat_with_ollama(message)
    
    def _chat_with_ollama(self, message: str) -> str:
        """Chat using Ollama with Chinese support"""
        try:
            response = requests.post(
                "http://localhost:11434/api/generate",
                json={
                    "model": self.ai_service,
                    "prompt": f"You are a helpful Linux system administrator assistant. Respond in the same language as the user's question. Support both Chinese and English.\n\nUser: {message}\nAssistant:",
                    "stream": False,
                    "options": {
                        "temperature": 0.7,
                        "num_predict": 200,
                        "stop": ["User:", "\n\nUser:"]
                    }
                },
                timeout=60  # Increased timeout to 60 seconds
            )
            
            if response.status_code == 200:
                return response.json().get('response', '').strip()
            else:
                return f"Ollama chat error: HTTP {response.status_code}"
        except requests.exceptions.ConnectionError:
            return "Ollama connection failed. Please ensure Ollama is running: ollama serve"
        except Exception as e:
            return f"Ollama chat error: {str(e)}"
    
    def generate_chat_response_stream(self, message: str, context: Optional[Dict] = None):
        """Generate streaming chat response using Ollama"""
        if not self.model_loaded or not self.ai_service:
            yield "Ollama not available. Please install and run Ollama: https://ollama.ai/"
            return
        
        try:
            response = requests.post(
                "http://localhost:11434/api/generate",
                json={
                    "model": self.ai_service,
                    "prompt": f"You are a helpful Linux system administrator assistant. Respond in the same language as the user's question. Support both Chinese and English.\n\nUser: {message}\nAssistant:",
                    "stream": True,  # Enable streaming
                    "options": {
                        "temperature": 0.7,
                        "num_predict": 200,
                        "stop": ["User:", "\n\nUser:"]
                    }
                },
                stream=True,
                timeout=120  # Longer timeout for streaming
            )
            
            if response.status_code == 200:
                for line in response.iter_lines():
                    if line:
                        try:
                            data = json.loads(line)
                            if 'response' in data:
                                yield data['response']
                            if data.get('done', False):
                                break
                        except json.JSONDecodeError:
                            continue
            else:
                yield f"Ollama streaming error: HTTP {response.status_code}"
                
        except requests.exceptions.ConnectionError:
            yield "Ollama connection failed. Please ensure Ollama is running: ollama serve"
        except Exception as e:
            yield f"Ollama streaming error: {str(e)}"
    
    def _is_command_request(self, message: str) -> bool:
        """Check if the message is a command generation request"""
        message_lower = message.lower()
        
        # Chinese command patterns
        chinese_patterns = [
            '列文件', '查看文件', '显示文件', '查看目录', '显示目录',
            '磁盘空间', '磁盘使用', '内存使用', '内存情况', '进程列表',
            '查看进程', '系统信息', '查找文件', '搜索文件', '复制文件',
            '移动文件', '删除文件', '创建目录', '删除目录', '修改权限',
            '网络连接', '运行服务', '搜索文本', '统计行数', '排序文件',
            '去重', '编辑文件'
        ]
        
        # English command patterns
        english_patterns = [
            'list files', 'show files', 'view files', 'list directory',
            'disk space', 'disk usage', 'memory usage', 'process list',
            'system info', 'find file', 'search file', 'copy file',
            'move file', 'delete file', 'create directory', 'remove directory',
            'change permissions', 'network connections', 'running services',
            'search text', 'count lines', 'sort file', 'unique lines',
            'edit file'
        ]
        
        # Check for direct pattern matches
        all_patterns = chinese_patterns + english_patterns
        for pattern in all_patterns:
            if pattern in message_lower:
                return True
        
        # Check for imperative verbs that suggest command generation
        command_verbs = ['show', 'list', 'find', 'search', 'check', 'get', 'display', 'view']
        for verb in command_verbs:
            if message_lower.startswith(verb + ' '):
                return True
        
        return False
    
    def get_related_commands(self, command: str, command_type: str) -> List[str]:
        """Get related commands based on the generated command"""
        # Extract the main operation from the command
        command_parts = command.split()
        if not command_parts:
            return []
        
        main_cmd = command_parts[0]
        
        related_commands = {
            "ls": ["ls -la", "ls -lh", "ls -R", "find . -name '*'"],
            "ps": ["ps aux", "ps -ef", "top", "htop"],
            "df": ["df -h", "du -sh *", "lsblk", "fdisk -l"],
            "docker": ["docker ps", "docker images", "docker logs", "docker exec"],
            "kubectl": ["kubectl get pods", "kubectl get services", "kubectl describe", "kubectl logs"]
        }
        
        return related_commands.get(main_cmd, [])
    
    def _store_learning_data(self, query: str, command: str, command_type: str):
        """Store learning data for model improvement"""
        key = f"{command_type}:{query.lower()}"
        self.learning_database[key] = {
            "command": command,
            "count": self.learning_database.get(key, {}).get("count", 0) + 1,
            "last_used": datetime.now().isoformat()
        }

class AIConnectionManager:
    """AI-powered connection management and analytics"""
    
    def __init__(self):
        self.connection_analytics = {}
        self.enterprise_patterns = {}
    
    def analyze_connection_patterns(self, connections: List[Dict]) -> Dict:
        """Analyze connection patterns for intelligent suggestions"""
        patterns = {
            "frequent_connections": [],
            "enterprise_groups": {},
            "common_ports": {},
            "usage_trends": {}
        }
        
        # Group by enterprise
        for conn in connections:
            enterprise = conn.get("enterprise", "Unknown")
            if enterprise not in patterns["enterprise_groups"]:
                patterns["enterprise_groups"][enterprise] = []
            patterns["enterprise_groups"][enterprise].append(conn)
        
        return patterns
    
    def get_contextual_suggestions(self, connection_id: str, context: Dict) -> List[Dict]:
        """Get intelligent suggestions based on context"""
        suggestions = []
        
        # Time-based suggestions
        current_hour = datetime.now().hour
        if 9 <= current_hour <= 17:  # Business hours
            suggestions.extend([
                {"type": "command", "text": "Check system status", "command": "systemctl status"},
                {"type": "command", "text": "Monitor logs", "command": "tail -f /var/log/syslog"},
                {"type": "reminder", "text": "Remember to check backup status"}
            ])
        else:  # After hours
            suggestions.extend([
                {"type": "command", "text": "Check for updates", "command": "apt list --upgradable"},
                {"type": "command", "text": "System maintenance", "command": "df -h && free -m"},
                {"type": "reminder", "text": "Good time for system maintenance"}
            ])
        
        # Context-based suggestions
        if context.get("server_type") == "web":
            suggestions.extend([
                {"type": "command", "text": "Check web server", "command": "systemctl status apache2"},
                {"type": "command", "text": "View access logs", "command": "tail -f /var/log/apache2/access.log"}
            ])
        
        return suggestions
    
    def get_enterprise_summary(self) -> List[Dict]:
        """Get enterprise connection summary"""
        # This would integrate with the database
        return [
            {
                "name": "Production",
                "connection_count": 15,
                "server_types": ["web", "database", "api"],
                "common_commands": ["systemctl status", "df -h", "top"],
                "health_status": "good"
            },
            {
                "name": "Development",
                "connection_count": 8,
                "server_types": ["web", "database"],
                "common_commands": ["git status", "docker ps", "npm start"],
                "health_status": "warning"
            }
        ]