#!/usr/bin/env python3
"""
MCP Server for Qwen3 and DeepSeek Clients

This module implements a Model Context Protocol (MCP) server that exposes
both Qwen3 and DeepSeek AI models through standardized MCP tools.
"""

import os
import json
import asyncio
from pathlib import Path
from typing import Optional, Dict, Any, List

from mcp.server.fastmcp import FastMCP

from .core import Qwen3Client, Qwen3Config
from .ds_core import DeepSeekClient, DeepSeekConfig
from .oss_core import OSSClient, OSSConfig
from .utils import setup_logging, load_env_file


class QwenMCPServer:
    """MCP Server for Qwen3 and DeepSeek clients"""
    
    def __init__(self, name: str = "qwen-mcp-server", version: str = "1.0.0"):
        self.mcp = FastMCP(name)
        self.version = version
        self.logger = setup_logging("INFO")
        
        # Initialize clients
        self._qwen_client: Optional[Qwen3Client] = None
        self._deepseek_client: Optional[DeepSeekClient] = None
        self._oss_client: Optional[OSSClient] = None
        
        # Load environment configuration
        self._load_config()
        
        # Register tools
        self._register_tools()
    
    def _load_config(self):
        """Load configuration from environment"""
        try:
            # Try to load from default locations
            load_env_file()
            self.logger.info("Environment configuration loaded")
            
            # Log available models based on API keys
            available_models = []
            if os.getenv('DASHSCOPE_API_KEY') or os.getenv('QWEN_SKIP_API_VALIDATION', 'false').lower() == 'true':
                qwen_model = os.getenv('DASHSCOPE_MODEL', 'qwen3-30b-a3b-instruct-2507')
                available_models.append(f"Qwen3 ({qwen_model})")
            
            if os.getenv('DEEPSEEK_API_KEY') or os.getenv('QWEN_SKIP_API_VALIDATION', 'false').lower() == 'true':
                deepseek_model = os.getenv('DEEPSEEK_MODEL', 'deepseek-chat')
                available_models.append(f"DeepSeek ({deepseek_model})")
            
            if os.getenv('OSS_BASE_URL') or os.getenv('OLLAMA_BASE_URL') or os.getenv('OSS_MODEL') or os.getenv('OLLAMA_MODEL_ID'):
                oss_model = os.getenv('OSS_MODEL') or os.getenv('OLLAMA_MODEL_ID') or 'gpt-oss:120b'
                available_models.append(f"OSS ({oss_model})")

            if available_models:
                self.logger.info(f"Available models: {', '.join(available_models)}")
            else:
                self.logger.warning("No API keys found in environment. Set DASHSCOPE_API_KEY and/or DEEPSEEK_API_KEY, or enable QWEN_SKIP_API_VALIDATION for testing")
                
        except Exception as e:
            self.logger.warning(f"Could not load environment config: {e}")
    
    def _get_workspace_root(self) -> str:
        """Get workspace root directory for file resolution"""
        # When MCP config uses cwd: ".", process.cwd() is where the MCP client runs from
        current_working_dir = os.getcwd()
        
        # Use CLAUDE_CWD if provided by MCP server configuration
        if os.getenv('CLAUDE_CWD'):
            claude_cwd = Path(os.getenv('CLAUDE_CWD')).resolve()
            if claude_cwd.exists():
                return str(claude_cwd)
        
        # Check explicit workspace root
        if os.getenv('WORKSPACE_ROOT'):
            return os.getenv('WORKSPACE_ROOT')
        
        home_dir = Path.home()
        
        # Check environment variables for project paths
        env_paths = [
            os.getenv('INIT_CWD'),
            os.getenv('PWD'), 
            os.getenv('CURSOR_PROJECT_ROOT'),
            os.getenv('VSCODE_CWD')
        ]
        env_paths = [p for p in env_paths if p and Path(p) != home_dir and Path(p).exists()]
        
        for env_path in env_paths:
            if self._has_project_markers(env_path):
                return env_path
        
        # If we're not in the home directory, use current working directory
        if Path(current_working_dir) != home_dir:
            return current_working_dir
        
        # Find project root from current directory
        current_path = Path(current_working_dir)
        while current_path != current_path.parent:
            if self._has_project_markers(str(current_path)):
                return str(current_path)
            current_path = current_path.parent
        
        return current_working_dir
    
    def _has_project_markers(self, dir_path: str) -> bool:
        """Check if directory contains project markers"""
        markers = ['.git', 'package.json', 'pnpm-workspace.yaml', 'yarn.lock', 
                  'pyproject.toml', 'setup.py', 'requirements.txt']
        dir_path = Path(dir_path)
        return any((dir_path / marker).exists() for marker in markers)
    
    def _get_qwen_client(self) -> Qwen3Client:
        """Get or create Qwen3 client instance"""
        if self._qwen_client is None:
            try:
                config = Qwen3Config.auto_config()
                self._qwen_client = Qwen3Client(config, self.logger)
                self.logger.info("Qwen3 client initialized successfully")
            except Exception as e:
                self.logger.error(f"Failed to initialize Qwen3 client: {e}")
                raise
        return self._qwen_client
    
    def _get_deepseek_client(self) -> DeepSeekClient:
        """Get or create DeepSeek client instance"""
        if self._deepseek_client is None:
            try:
                config = DeepSeekConfig.auto_config()
                self._deepseek_client = DeepSeekClient(config, self.logger)
                self.logger.info("DeepSeek client initialized successfully")
            except Exception as e:
                self.logger.error(f"Failed to initialize DeepSeek client: {e}")
                raise
        return self._deepseek_client

    def _get_oss_client(self) -> OSSClient:
        """Get or create OSS client instance"""
        if self._oss_client is None:
            try:
                config = OSSConfig.auto_config()
                self._oss_client = OSSClient(config, self.logger)
                self.logger.info("OSS client initialized successfully")
            except Exception as e:
                self.logger.error(f"Failed to initialize OSS client: {e}")
                raise
        return self._oss_client
    
    def _read_file_content(self, file_path: str, workspace_root: str) -> str:
        """Read content from a file with proper path resolution"""
        if Path(file_path).is_absolute():
            resolved_path = Path(file_path)
        else:
            resolved_path = Path(workspace_root) / file_path
        
        self.logger.debug(f"File resolution: file_path='{file_path}', workspace_root='{workspace_root}', resolved_path='{resolved_path}'")
        
        if not resolved_path.exists():
            raise FileNotFoundError(f"File not found: {file_path}. Working directory: {workspace_root}. Tried path: {resolved_path}")
        
        with open(resolved_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        return content
    
    def _prepare_prompt_with_files(self, question: str, file_paths: Optional[List[str]] = None) -> str:
        """Prepare prompt by including file contents if specified"""
        if not file_paths:
            return question
        
        workspace_root = self._get_workspace_root()
        final_question = question
        
        for file_path in file_paths:
            try:
                file_content = self._read_file_content(file_path, workspace_root)
                file_extension = Path(file_path).suffix.lstrip('.')
                
                final_question += f"\n\nFile: {file_path}\nFile Content:\n```{file_extension}\n{file_content}\n```\n"
            except Exception as e:
                self.logger.warning(f"Could not read file {file_path}: {e}")
                final_question += f"\n\nFile: {file_path}\n[Error reading file: {e}]\n"
        
        # Add concise response instruction
        final_question += "\n\nIMPORTANT: Please provide a concise, focused answer that directly addresses the question without unnecessary elaboration. Be brief and to the point."
        
        return final_question
    
    def _save_response_to_file(self, response: str, output_file: str) -> Dict[str, Any]:
        """Save response to specified output file and return summary"""
        workspace_root = self._get_workspace_root()
        
        if Path(output_file).is_absolute():
            output_path = Path(output_file)
        else:
            output_path = Path(workspace_root) / output_file
        
        # Create output directory if needed
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        # Write response to file
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(response)
        
        # Calculate summary statistics
        lines_count = len(response.split('\n'))
        chars_count = len(response)
        
        return {
            'output_file': str(output_path),
            'lines_written': lines_count,
            'characters_written': chars_count,
            'size_bytes': output_path.stat().st_size
        }
    
    def _register_tools(self):
        """Register MCP tools for Qwen3, DeepSeek, and OSS"""
        
        @self.mcp.tool()
        async def ask_qwen(
            question: str,
            output_file: str,
            model: Optional[str] = None,
            file_path: Optional[str] = None,
            file_paths: Optional[List[str]] = None,
            system_prompt: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None
        ) -> str:
            """
            Ask a question using Qwen3 models via the Qwen3 API.
            
            Args:
                question: The question to ask the Qwen3 model
                output_file: Required path to output file where results will be saved
                model: Optional model name to use (defaults to configured model)
                file_path: Optional path to a file whose content should be included in the prompt
                file_paths: Optional array of file paths whose contents should be included in the prompt
                system_prompt: Optional system prompt to set context
                max_tokens: Optional maximum tokens for response
                temperature: Optional temperature for response generation
            
            Returns:
                Summary of the operation and where results were saved
            """
            try:
                # Get client instance
                client = self._get_qwen_client()
                
                # Prepare file paths list
                files_to_process = file_paths or ([] if not file_path else [file_path])
                
                # Prepare final question with file contents
                final_question = self._prepare_prompt_with_files(question, files_to_process)
                
                # Prepare kwargs for client
                kwargs = {}
                if max_tokens:
                    kwargs['max_tokens'] = max_tokens
                if temperature is not None:
                    kwargs['temperature'] = temperature
                if model:
                    # Update client config temporarily
                    original_model = client.config.model_name
                    client.config.model_name = model
                
                # Ask the question
                self.logger.info(f"Asking Qwen3: {question[:100]}...")
                response = client.ask(final_question, system_prompt, **kwargs)
                
                # Restore original model if changed
                if model:
                    client.config.model_name = original_model
                
                # Save response to file
                summary = self._save_response_to_file(response, output_file)
                
                # Return summary
                result = f"✅ Qwen3 response completed successfully!\n\n"
                result += f"📊 Summary:\n"
                result += f"   • Response length: {summary['lines_written']} lines, {summary['characters_written']} characters\n"
                result += f"   • File size: {summary['size_bytes']} bytes\n"
                result += f"   • Output saved to: {Path(summary['output_file']).name}\n"
                
                if files_to_process:
                    result += f"   • Input files processed: {len(files_to_process)}\n"
                
                return result
                
            except Exception as e:
                error_msg = f"Error asking Qwen3: {str(e)}"
                self.logger.error(error_msg)
                
                # Try to save error to output file
                try:
                    error_content = f"# Qwen3 API Error\n\nRequest failed: {str(e)}\n\n## Details\n- Question: {question}\n- Error Time: {os.popen('date').read().strip()}\n"
                    self._save_response_to_file(error_content, output_file)
                except:
                    pass
                
                return f"❌ Error: {error_msg}"

        @self.mcp.tool()
        async def ask_oss(
            question: str,
            output_file: str,
            model: Optional[str] = None,
            file_path: Optional[str] = None,
            file_paths: Optional[List[str]] = None,
            system_prompt: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
        ) -> str:
            """
            Ask a question using OSS (GPT-OSS via Ollama) models.
            """
            try:
                client = self._get_oss_client()
                files_to_process = file_paths or ([] if not file_path else [file_path])
                final_question = self._prepare_prompt_with_files(question, files_to_process)

                kwargs = {}
                if max_tokens:
                    kwargs['max_tokens'] = max_tokens
                if temperature is not None:
                    kwargs['temperature'] = temperature
                if model:
                    original_model = client.config.model_name
                    client.config.model_name = model

                self.logger.info(f"Asking OSS: {question[:100]}...")
                response = client.ask(final_question, system_prompt, **kwargs)

                if model:
                    client.config.model_name = original_model

                summary = self._save_response_to_file(response, output_file)

                result = "✅ OSS response completed successfully!\n\n"
                result += "📊 Summary:\n"
                result += f"   • Response length: {summary['lines_written']} lines, {summary['characters_written']} characters\n"
                result += f"   • File size: {summary['size_bytes']} bytes\n"
                result += f"   • Output saved to: {Path(summary['output_file']).name}\n"

                if files_to_process:
                    result += f"   • Input files processed: {len(files_to_process)}\n"

                return result

            except Exception as e:
                error_msg = f"Error asking OSS: {str(e)}"
                self.logger.error(error_msg)

                try:
                    error_content = (
                        "# OSS API Error\n\n"
                        f"Request failed: {str(e)}\n\n"
                        "## Details\n"
                        f"- Question: {question}\n"
                        f"- Error Time: {os.popen('date').read().strip()}\n"
                    )
                    self._save_response_to_file(error_content, output_file)
                except Exception:
                    pass

                return f"❌ Error: {error_msg}"

        @self.mcp.tool()
        async def ask_deepseek(
            question: str,
            output_file: str,
            model: Optional[str] = None,
            file_path: Optional[str] = None,
            file_paths: Optional[List[str]] = None,
            system_prompt: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None
        ) -> str:
            """
            Ask a question using DeepSeek models via the DeepSeek API.
            
            Args:
                question: The question to ask the DeepSeek model
                output_file: Required path to output file where results will be saved
                model: Optional model name to use (defaults to configured model)
                file_path: Optional path to a file whose content should be included in the prompt
                file_paths: Optional array of file paths whose contents should be included in the prompt
                system_prompt: Optional system prompt to set context
                max_tokens: Optional maximum tokens for response
                temperature: Optional temperature for response generation
            
            Returns:
                Summary of the operation and where results were saved
            """
            try:
                # Get client instance
                client = self._get_deepseek_client()
                
                # Prepare file paths list
                files_to_process = file_paths or ([] if not file_path else [file_path])
                
                # Prepare final question with file contents
                final_question = self._prepare_prompt_with_files(question, files_to_process)
                
                # Prepare kwargs for client
                kwargs = {}
                if max_tokens:
                    kwargs['max_tokens'] = max_tokens
                if temperature is not None:
                    kwargs['temperature'] = temperature
                if model:
                    # Update client config temporarily
                    original_model = client.config.model_name
                    client.config.model_name = model
                
                # Ask the question
                self.logger.info(f"Asking DeepSeek: {question[:100]}...")
                response = client.ask(final_question, system_prompt, **kwargs)
                
                # Restore original model if changed
                if model:
                    client.config.model_name = original_model
                
                # Save response to file
                summary = self._save_response_to_file(response, output_file)
                
                # Return summary
                result = f"✅ DeepSeek response completed successfully!\n\n"
                result += f"📊 Summary:\n"
                result += f"   • Response length: {summary['lines_written']} lines, {summary['characters_written']} characters\n"
                result += f"   • File size: {summary['size_bytes']} bytes\n"
                result += f"   • Output saved to: {Path(summary['output_file']).name}\n"
                
                if files_to_process:
                    result += f"   • Input files processed: {len(files_to_process)}\n"
                
                return result
                
            except Exception as e:
                error_msg = f"Error asking DeepSeek: {str(e)}"
                self.logger.error(error_msg)
                
                # Try to save error to output file
                try:
                    error_content = f"# DeepSeek API Error\n\nRequest failed: {str(e)}\n\n## Details\n- Question: {question}\n- Error Time: {os.popen('date').read().strip()}\n"
                    self._save_response_to_file(error_content, output_file)
                except:
                    pass
                
                return f"❌ Error: {error_msg}"
    
    def run(self):
        """Run the MCP server"""
        self.logger.info(f"Starting {self.mcp.name} v{self.version}")
        self.mcp.run()


def main():
    """Main entry point for MCP server"""
    server = QwenMCPServer("qwen-deepseek-mcp-server", "1.0.0")
    server.run()


if __name__ == "__main__":
    main()
