class QwenLongTransformer {
  name = "qwen-long";

  constructor(options) {
    this.max_tokens = options.max_tokens || 32768;
    this.max_input_tokens = options.max_input_tokens || 31000; // Hard limit at 31k
    this.long_context_threshold = options.long_context_threshold || 16000; // Switch to qwen-long at 16k
    this.enable_thinking = options.enable_thinking || false;
    this.stream = options.stream || false; // Default to false to prevent SSE parsing issues
    this.temperature = options.temperature || 0.7;
    this.top_p = options.top_p || 0.9;
  }

  // Token estimation function (same as ark-transformer)
  estimateTokenCount(text) {
    if (!text || typeof text !== 'string') return 0;
    // Rough estimation: 1 token ≈ 4 characters for English, 1-2 characters for Chinese
    const englishChars = text.match(/[a-zA-Z0-9\s\.,!?;:\-'"()[\]{}]/g)?.length || 0;
    const otherChars = text.length - englishChars;
    return Math.ceil(englishChars / 4 + otherChars / 1.5);
  }

  // Calculate total input tokens from messages
  calculateInputTokens(messages) {
    if (!Array.isArray(messages)) return 0;
    
    let totalTokens = 0;
    for (const message of messages) {
      if (message.content) {
        if (typeof message.content === 'string') {
          totalTokens += this.estimateTokenCount(message.content);
        } else if (Array.isArray(message.content)) {
          // Handle multimodal content
          for (const content of message.content) {
            if (content.type === 'text' && content.text) {
              totalTokens += this.estimateTokenCount(content.text);
            } else if (content.type === 'image') {
              totalTokens += 1000; // Conservative estimate for image
            }
          }
        }
      }
      
      // Add tokens for role and other metadata
      if (message.role) {
        totalTokens += 5;
      }
    }
    
    return totalTokens;
  }

  async transformRequestIn(request, provider) {
    // Check input token count
    if (request.messages) {
      const inputTokens = this.calculateInputTokens(request.messages);
      
      // Hard limit: Refuse requests > 16k tokens
      if (inputTokens > this.max_input_tokens) {
        const warningMessage = `⚠️  WARNING: Input token count (${inputTokens}) exceeds maximum limit (${this.max_input_tokens}). Request refused to prevent excessive costs.`;
        console.warn(warningMessage);
        
        const error = new Error(`Input token count (${inputTokens}) exceeds maximum limit (${this.max_input_tokens}). Please reduce the input length and try again.`);
        error.code = 'INPUT_TOKEN_LIMIT_EXCEEDED';
        error.inputTokens = inputTokens;
        error.maxInputTokens = this.max_input_tokens;
        throw error;
      }
      
      // Log token usage and routing decision
      if (process.env.LOG === 'true' || process.env.LOG === true) {
        if (inputTokens > this.long_context_threshold) {
          console.log(`[QwenLongTransformer] Large context detected (${inputTokens} tokens > ${this.long_context_threshold}). Using qwen-long model.`);
        }
        console.log(`[QwenLongTransformer] Input tokens: ${inputTokens}/${this.max_input_tokens}`);
      }
    }
    
    // Set DashScope/Qwen-specific parameters
    request.model = "qwen-long";  // Ensure correct model name
    request.max_tokens = this.max_tokens;
    request.stream = this.stream;
    request.temperature = this.temperature;
    request.top_p = this.top_p;
    
    // Handle Qwen-specific parameters if needed
    if (this.enable_thinking) {
      request.enable_thinking = this.enable_thinking;
    }
    
    // Ensure the request follows OpenAI format for DashScope compatibility
    if (!request.messages) {
      request.messages = [];
    }
    
    // Add stream_options for usage tracking only if streaming is enabled
    if (this.stream) {
      request.stream_options = { "include_usage": true };
    } else {
      // Ensure no streaming-related options when disabled
      delete request.stream_options;
    }
    
    return request;
  }

  async transformResponseOut(response, provider) {
    // Handle response transformation if needed
    // DashScope should be OpenAI-compatible, so minimal transformation required
    return response;
  }
}

module.exports = QwenLongTransformer;