#!/usr/bin/env node

const path = require('path');
const os = require('os');
// Load .env from tool directory, not current working directory
const toolDir = process.env.LITELLM_TOOL_DIR || __dirname;
require('dotenv').config({ path: path.join(toolDir, '.env') });

const { Server } = require('@modelcontextprotocol/sdk/server/index.js');
const { StdioServerTransport } = require('@modelcontextprotocol/sdk/server/stdio.js');
const {
  CallToolRequestSchema,
  ListToolsRequestSchema,
} = require('@modelcontextprotocol/sdk/types.js');
const axios = require('axios');
const fs = require('fs');
const { processAIResponse } = require('./src/responseProcessor.js');

class LiteLLMMCPServer {
  constructor() {
    this.config = this.loadConfiguration();
    this.server = new Server(
      {
        name: 'mcp-litellm-js-configurable',
        version: '1.0.0',
      },
      {
        capabilities: {
          tools: {},
        },
      }
    );

    this.setupHandlers();
    this.setupErrorHandlers();
  }

  getWorkspaceRoot() {
    // When MCP config uses cwd: ".", process.cwd() is where Claude Code runs from
    // This is exactly what we want for file resolution
    const currentWorkingDir = process.cwd();
    
    // Use CLAUDE_CWD if provided by MCP server configuration
    if (process.env.CLAUDE_CWD) {
      const claudeCwd = path.resolve(process.env.CLAUDE_CWD);
      if (fs.existsSync(claudeCwd)) {
        return claudeCwd;
      }
    }
    
    // Check explicit workspace root
    if (process.env.WORKSPACE_ROOT) {
      return process.env.WORKSPACE_ROOT;
    }
    
    const homeDir = os.homedir();
    
    // Check npm/IDE environment variables
    const envPaths = [
      process.env.INIT_CWD,
      process.env.PWD,
      process.env.CURSOR_PROJECT_ROOT,
      process.env.VSCODE_CWD
    ].filter(p => p && p !== homeDir && fs.existsSync(p));
    
    for (const envPath of envPaths) {
      if (this.hasProjectMarkers(envPath)) {
        return envPath;
      }
    }
    
    // If we're not in the home directory, use current working directory
    if (currentWorkingDir !== homeDir) {
      return currentWorkingDir;
    }
    
    // Find project root from current directory
    let currentPath = currentWorkingDir;
    while (currentPath !== path.dirname(currentPath)) {
      if (this.hasProjectMarkers(currentPath)) {
        return currentPath;
      }
      currentPath = path.dirname(currentPath);
    }
    
    return currentWorkingDir;
  }
  
  hasProjectMarkers(dirPath) {
    const markers = ['.git', 'package.json', 'pnpm-workspace.yaml', 'yarn.lock'];
    return markers.some(marker => fs.existsSync(path.join(dirPath, marker)));
  }

  loadConfiguration() {
    // Global defaults
    const globalConfig = {
      ip: process.env.GLOBAL_IP || '127.0.0.1',
      port: process.env.GLOBAL_PORT || '4000',
      model: process.env.DEFAULT_MODEL || 'gemini-2.5-flash-lite',
      maxTokens: parseInt(process.env.DEFAULT_MAX_TOKENS) || 48192
    };

    // Per-model configuration
    const models = {};
    
    // Parse all environment variables for model-specific settings
    Object.keys(process.env).forEach(key => {
      const match = key.match(/^MODEL_(.+)_(.+)$/);
      if (match) {
        const [, modelName, configKey] = match;
        const normalizedModelName = modelName.toLowerCase().replace(/_/g, '-');
        const normalizedConfigKey = configKey.toLowerCase();
        
        if (!models[normalizedModelName]) {
          models[normalizedModelName] = {};
        }
        
        let value = process.env[key];
        if (normalizedConfigKey === 'maxtokens') {
          value = parseInt(value) || globalConfig.maxTokens;
        }
        
        models[normalizedModelName][normalizedConfigKey] = value;
      }
    });


    return {
      global: globalConfig,
      models: models
    };
  }

  getModelConfig(modelName) {
    const normalizedModelName = modelName.toLowerCase().replace(/_/g, '-');
    const modelConfig = this.config.models[normalizedModelName] || {};
    
    return {
      ip: modelConfig.ip || this.config.global.ip,
      port: modelConfig.port || this.config.global.port,
      maxTokens: modelConfig.maxtokens || this.config.global.maxTokens,
      model: modelName
    };
  }

  setupErrorHandlers() {
    this.server.onerror = (error) => console.error('[MCP Error]', error);
    process.on('SIGINT', async () => {
      await this.server.close();
      process.exit(0);
    });
  }

  setupHandlers() {
    this.server.setRequestHandler(ListToolsRequestSchema, async () => {
      return {
        tools: [
          {
            name: 'ask_litellm',
            description: 'Ask a question using configurable AI models via LiteLLM API. Can optionally read content from a single file (file_path) or multiple files (file_paths) and include it in the prompt. Results are always written to a specified output file. Model, IP, port, and maxTokens can be configured via .env file.',
            inputSchema: {
              type: 'object',
              properties: {
                question: {
                  type: 'string',
                  description: 'The question to ask the AI model',
                },
                model: {
                  type: 'string',
                  description: 'The AI model to use (optional, defaults to configured DEFAULT_MODEL or gemini-2.5-flash-lite)',
                },
                file_path: {
                  type: 'string',
                  description: 'Optional path to a file whose content should be included in the prompt. Can be absolute or relative path.',
                },
                file_paths: {
                  type: 'array',
                  items: {
                    type: 'string'
                  },
                  description: 'Optional array of file paths whose contents should be included in the prompt. Can be absolute or relative paths. Use this for multiple files instead of file_path.',
                },
                output_file: {
                  type: 'string',
                  description: 'Required path to output file where results will be saved. Results are always written to file.',
                },
              },
              required: ['question', 'output_file'],
            },
          },
        ],
      };
    });

    this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
      const { name, arguments: args } = request.params;
      
      try {
        if (name === 'ask_litellm') {
          const result = await this.handleAskLiteLLM(args);
          return result;
        } else {
          throw new Error(`Unknown tool: ${name}`);
        }
      } catch (error) {
        console.error('[DEBUG] Error in tool execution:', error);
        return {
          content: [
            {
              type: 'text',
              text: `Error: ${error.message}`,
            },
          ],
          isError: true,
        };
      }
    });
  }

  async handleAskLiteLLM(args) {
    const { question, model: requestedModel, file_path, file_paths, output_file } = args;
    const model = requestedModel || this.config.global.model;
    const modelConfig = this.getModelConfig(model);

    if (!question) {
      throw new Error('Question is required');
    }

    if (!output_file) {
      throw new Error('Output file is required');
    }

    // Resolve and create output file
    const outputPath = path.isAbsolute(output_file) ? output_file : path.resolve(this.getWorkspaceRoot(), output_file);
    const outputDir = path.dirname(outputPath);
    
    if (!fs.existsSync(outputDir)) {
      fs.mkdirSync(outputDir, { recursive: true });
    }
    
    const testContent = `# Processing Started\nCreated: ${new Date().toISOString()}\nQuestion: ${question}\nProcessing request...\n\n---\n`;
    fs.writeFileSync(outputPath, testContent, 'utf8');

    let finalQuestion = question;

    // Read file content if provided
    const filesToProcess = file_paths || (file_path ? [file_path] : []);
    
    if (filesToProcess.length > 0) {
      const workspaceRoot = this.getWorkspaceRoot();
      let fileContents = '';
      
      for (const filePath of filesToProcess) {
        const resolvedPath = path.isAbsolute(filePath) ? filePath : path.resolve(workspaceRoot, filePath);
        
        console.error(`[DEBUG] File resolution: file_path="${filePath}", workspaceRoot="${workspaceRoot}", resolvedPath="${resolvedPath}"`);
        console.error(`[DEBUG] process.cwd()="${process.cwd()}", __dirname="${__dirname}"`);
        console.error(`[DEBUG] Environment variables: PWD="${process.env.PWD}", INIT_CWD="${process.env.INIT_CWD}"`);
        
        if (!fs.existsSync(resolvedPath)) {
          throw new Error(`File not found: ${filePath}. Working directory: ${workspaceRoot}. Tried paths: ${resolvedPath}, ${path.resolve(process.cwd(), filePath)}, ${path.resolve(__dirname, filePath)}`);
        }
        
        const fileContent = fs.readFileSync(resolvedPath, 'utf8');
        const fileExtension = path.extname(resolvedPath).replace('.', '');
        fileContents += `\nFile: ${filePath}\nFile Content:\n\`\`\`${fileExtension}\n${fileContent}\n\`\`\`\n`;
      }
      
      finalQuestion = `${question}${fileContents}`;
    }

    // Append concise response instruction
    finalQuestion += `\n\nIMPORTANT: Please provide a concise, focused answer that directly addresses the question without unnecessary elaboration. Be brief and to the point.`;


    // Make API call to LiteLLM
    try {
      const response = await axios.post(
        `http://${modelConfig.ip}:${modelConfig.port}/chat/completions`,
        {
          model: modelConfig.model,
          messages: [{ role: 'user', content: finalQuestion }],
          max_tokens: modelConfig.maxTokens,
          temperature: 0.1,
        },
        {
          headers: {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${process.env.LITELLM_API_KEY || 'sk-1234'}`,
          },
          timeout: 200000,
        }
      );

      if (response.data && response.data.choices && response.data.choices.length > 0) {
        let aiResponse = response.data.choices[0].message.content;
        
        const outputExt = path.extname(outputPath);

        // if (outputExt === '.md' || outputExt === '.tex' || outputExt === '.log') {
        //   fs.writeFileSync(outputPath, aiResponse, 'utf8');
          
        //   let preview = aiResponse;
        //   if (preview.length > 200) {
        //     preview = preview.substring(0, 200) + '...';
        //   }

        //   return {
        //     content: [
        //       {
        //         type: 'text',
        //         text: `Response saved to: ${path.basename(outputPath)}\n\nPreview:\n${preview}`,
        //       },
        //     ],
        //   };
        // }
        
        const outputDir = path.dirname(outputPath);
        const finalBaseName = path.basename(outputPath, outputExt);

        const results = processAIResponse(aiResponse, finalBaseName, outputDir, outputPath, outputExt);

        let summaryText = '';
        
        if (results.filesCreated > 0) {
          summaryText += `✓ Processing completed successfully!\n\n`;
          summaryText += `📊 Summary:\n`;
          summaryText += `   • Original content: ${results.originalLines} lines\n`;
          summaryText += `   • Codeblocks identified: ${results.totalCodeblocks}\n`;
          summaryText += `   • Files created: ${results.filesCreated}\n`;
          if (results.totalExtractedLines > 0) {
            summaryText += `   • Code lines extracted: ${results.totalExtractedLines}\n`;
          }
          summaryText += `   • Remaining content: ${results.remainingLines} lines\n\n`;
          
          summaryText += `📁 Created Files:\n`;
          results.filesCreatedDetails.forEach(file => {
            const fileType = path.extname(file.path) === '.md' ? '📋' : '💾';
            summaryText += `   ${fileType} ${path.basename(file.path)} (${file.lines} lines)\n`;
          });

          if (results.remainingLines > 0) {
            fs.writeFileSync(outputPath, results.remainingContent.trim(), 'utf8');
            summaryText += `\n📝 Remaining content saved to: llm_output/${path.basename(outputPath)}\n`;
          } else {
            // If we created files and there's no remaining content, we can delete the placeholder file.
            fs.unlinkSync(outputPath);
            summaryText += `\n🗑️  Original file removed as content was fully extracted.\n`;
          }
        } else {
          // No files created, write the full response to the output file
          fs.writeFileSync(outputPath, aiResponse, 'utf8');
          summaryText = `Response saved to: ${path.basename(outputPath)}\n`;
        }
        
        return {
          content: [
            {
              type: 'text',
              text: summaryText,
            },
          ],
        };
      } else {
        throw new Error('Invalid response format from API');
      }
    } catch (apiError) {
      
      const errorContent = `# API Error\n\nRequest failed: ${apiError.message}\n\n## Details\n- Model: ${modelConfig.model}\n- Error Time: ${new Date().toISOString()}\n- Response: ${apiError.response ? JSON.stringify(apiError.response.data) : 'No response data'}\n`;
      fs.writeFileSync(outputPath, errorContent, 'utf8');
      throw new Error(`API request failed: ${apiError.message}`);
    }
  }

  async run() {
    const transport = new StdioServerTransport();
    await this.server.connect(transport);
  }
}

const server = new LiteLLMMCPServer();
// Export for use in other scripts
if (require.main === module) {
  // Running as main script
  server.run().catch(console.error);
} else {
  // Required as module
  module.exports = {
    ask_litellm: async (args) => {
      return await server.handleAskLiteLLM(args);
    }
  }; 
}
