import { Command } from 'commander';
import inquirer from 'inquirer';
import { Config, ConfigError } from './types.js';

// Define a type for the model information we expect from the API
interface ModelInfo {
  id: string;
  context_length?: number;
  context_size?: number;
  max_tokens?: number;
  max_output_tokens?: number;
  features?: string[];
  limits?: {
    max_tokens?: number;
  };
}

const DEFAULT_MAX_TOKENS = 8192; // A safe default
const FETCH_TIMEOUT = 15000; // 15 seconds

export class ConfigManager {
  static async fromCLI(argv: string[] = process.argv): Promise<Config> {
    const program = new Command();

    program
      .name('claude-bridge')
      .description('A proxy server that enables Claude Code CLI to work with OpenAI-compatible API providers')
      .version('1.0.0')
      .option('-u, --url <string>', 'Target API base URL (e.g., https://api.openai.com/v1)')
      .option('-m, --model <string>', 'Model name (e.g., gpt-4, gpt-4o-mini)')
      .option('-k, --key <string>', 'API key (or set CLAUDE_BRIDGE_API_KEY)')
      .option('-p, --port <number>', 'Server port', (value) => parseInt(value, 10), 8000)
      .option('--host <string>', 'Server host', 'localhost')
      .option('--max-tokens <number>', 'Maximum tokens limit (auto-detected if not set)', (value) => parseInt(value, 10))
      .option('--timeout <number>', 'Request timeout in seconds', (value) => parseInt(value, 10), 30)
      .option('--log-level <level>', 'Log level (debug|info|warn|error)', 'info')
      .parse(argv);

    const options = program.opts();

    const url = options.url || process.env.CLAUDE_BRIDGE_API_URL;
    if (!url) {
      throw new ConfigError('API URL is required. Use -u/--url or set CLAUDE_BRIDGE_API_URL.');
    }

    const apiKey = options.key || process.env.CLAUDE_BRIDGE_API_KEY;
    if (!apiKey) {
      throw new ConfigError('API key is required. Use -k/--key or set CLAUDE_BRIDGE_API_KEY.');
    }

    const config: Config = {
      url,
      model: options.model ? options.model.trim() : undefined, // Sanitize model name
      apiKey,
      port: options.port,
      host: options.host,
      maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
      timeout: options.timeout,
      logLevel: options.logLevel as Config['logLevel'],
    };

    // If model is not provided, prompt user to select one
    if (!config.model) {
      try {
        config.model = await this.promptForModel(config);
      } catch (error) {
        const message = error instanceof Error ? error.message : String(error);
        throw new ConfigError(`Failed to select a model: ${message}`);
      }
    }

    // Auto-detect maxTokens if not provided by user
    if (!options.maxTokens) {
      try {
        const detectedConfig = await this.autoDetectMaxTokens(config);
        config.maxTokens = detectedConfig.maxTokens;
      } catch (error) {
        console.warn(`⚠️  Could not auto-detect max tokens: ${error instanceof Error ? error.message : String(error)}. Using default: ${config.maxTokens}`);
      }
    }

    this.validate(config);
    return config;
  }

  private static async fetchAvailableModels(config: Pick<Config, 'url' | 'apiKey'>): Promise<ModelInfo[]> {
    const modelsUrl = `${config.url.replace(/\/$/, '')}/models`;
    console.log(`🔎 Fetching available models from ${modelsUrl}...`);

    try {
      const response = await fetch(modelsUrl, {
        headers: { 'Authorization': `Bearer ${config.apiKey}` },
        signal: AbortSignal.timeout(FETCH_TIMEOUT)
      });

      if (!response.ok) {
        throw new Error(`API request failed with status ${response.status}`);
      }

      const modelsResponse = await response.json() as { data?: ModelInfo[] } | ModelInfo[];

      let modelList: ModelInfo[];
      if (Array.isArray(modelsResponse)) {
        modelList = modelsResponse;
      } else if (modelsResponse && Array.isArray(modelsResponse.data)) {
        modelList = modelsResponse.data;
      } else {
        throw new Error('Model list not found in API response. Expected an array or an object with a "data" property.');
      }
      return modelList;
    } catch (error) {
      if (error instanceof Error && error.name === 'AbortError') {
        throw new Error(`Request to ${modelsUrl} timed out after ${FETCH_TIMEOUT / 1000} seconds.`);
      }
      throw new Error(`Failed to fetch or parse model list: ${error instanceof Error ? error.message : String(error)}`);
    }
  }

  private static async promptForModel(config: Pick<Config, 'url' | 'apiKey'>): Promise<string> {
    let models = await this.fetchAvailableModels(config);

    if (models.length === 0) {
      throw new Error('No models available from the API.');
    }

    // Filter for models that support function calling
    const functionCallingModels = models.filter(model => 
      model.features && Array.isArray(model.features) && model.features.includes('function-calling')
    );

    if (functionCallingModels.length > 0) {
      console.log('ℹ️  Showing models that support function calling.');
      models = functionCallingModels;
    } else {
      console.warn('⚠️  Could not find any models that explicitly support function calling. Showing all models.');
    }

    const choices = models.map(model => {
      const context = model.context_length || model.context_size || 'N/A';
      const output = model.max_output_tokens || model.max_tokens || 'N/A';
      return {
        name: `${model.id} (Context: ${context}, Output: ${output})`,
        value: model.id,
      };
    });

    const { selectedModel } = await inquirer.prompt([
      {
        type: 'list',
        name: 'selectedModel',
        message: 'Please select a model to use:',
        choices: choices,
        pageSize: 15,
        loop: false,
      },
    ]);

    console.log(`✅ Selected model:`, models.find(m => m.id === selectedModel));

    return selectedModel;
  }

  static async autoDetectMaxTokens(config: Config): Promise<Config> {
    const models = await this.fetchAvailableModels(config);
    const modelInfo = models.find(m => m.id === config.model);

    if (!modelInfo) {
      throw new Error(`Model "${config.model}" not found in the list returned by the API.`);
    }
    
    const outputTokens = modelInfo.max_output_tokens || modelInfo.max_tokens;
    const contextWindow = modelInfo.context_length || modelInfo.context_size;

    let detectedMax: number | undefined;
    if (outputTokens && typeof outputTokens === 'number') {
        detectedMax = outputTokens;
        console.log(`✅ Detected max output tokens: ${detectedMax}.`);
    } else if (contextWindow && typeof contextWindow === 'number') {
        detectedMax = Math.floor(contextWindow * 0.9);
        console.log(`✅ Detected context window: ${contextWindow}. Setting max_tokens to ${detectedMax} (90% safety margin).`);
    }

    if (detectedMax) {
      return { ...config, maxTokens: detectedMax };
    }

    console.warn('⚠️ Could not determine max tokens from model info. Using default.');
    return config;
  }

  static validate(config: Config): void {
    // Validate URL
    try {
      new URL(config.url);
    } catch {
      throw new ConfigError(`Invalid URL: ${config.url}`);
    }

    // Validate port
    if (config.port < 1 || config.port > 65535) {
      throw new ConfigError(`Invalid port: ${config.port}. Must be between 1 and 65535`);
    }

    // Validate model name
    if (!config.model || config.model.trim().length === 0) {
      throw new ConfigError('Model name cannot be empty');
    }

    // REMOVED: maxTokens validation is now handled by auto-detection and user override

    // Validate timeout
    if (config.timeout < 1 || config.timeout > 300) {
      throw new ConfigError(`Invalid timeout: ${config.timeout}. Must be between 1 and 300 seconds`);
    }

    // Validate log level
    const validLogLevels = ['debug', 'info', 'warn', 'error'];
    if (!validLogLevels.includes(config.logLevel)) {
      throw new ConfigError(`Invalid log level: ${config.logLevel}. Must be one of: ${validLogLevels.join(', ')}`);
    }
  }

  static getQuickStartInstructions(config: Config): string {
    return `
🌉 Claude Bridge v1.0.0

✅ Server running at: http://${config.host}:${config.port}
🎯 Target API: ${config.url}
🤖 Model: ${config.model}

📖 Usage with Claude Code CLI:
   export ANTHROPIC_BASE_URL=http://${config.host}:${config.port}
   export ANTHROPIC_AUTH_TOKEN="dummy"
   claude

🔗 Health check: http://${config.host}:${config.port}/health
`;
  }
}