/**
 * OpenAI模型适配器
 * 支持GPT-4和GPT-3.5-turbo
 */

import { BaseModelAdapter, ModelConfig, ModelResponse, StreamResponse } from './base-model-adapter';

export class OpenAIAdapter extends BaseModelAdapter {
  private apiKey: string;
  private baseUrl: string;

  constructor(config: ModelConfig, modelName: string) {
    super(config, modelName);
    this.apiKey = config.apiKey;
    this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
  }

  async generate(prompt: string, options?: Partial<ModelConfig>): Promise<ModelResponse> {
    const requestConfig = { ...this.config, ...options };
    
    try {
      const response = await fetch(`${this.baseUrl}/chat/completions`, {
        method: 'POST',
        headers: {
          'Authorization': `Bearer ${this.apiKey}`,
          'Content-Type': 'application/json',
        },
        body: JSON.stringify({
          model: this.modelName,
          messages: [{ role: 'user', content: prompt }],
          max_tokens: requestConfig.maxTokens,
          temperature: requestConfig.temperature,
          stream: false
        })
      });

      if (!response.ok) {
        throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
      }

      const data = await response.json();
      
      return {
        content: data.choices[0].message.content,
        usage: {
          promptTokens: data.usage.prompt_tokens,
          completionTokens: data.usage.completion_tokens,
          totalTokens: data.usage.total_tokens
        },
        model: this.modelName,
        finishReason: data.choices[0].finish_reason,
        cost: this.estimateCost(data.usage.prompt_tokens, data.usage.completion_tokens)
      };
    } catch (error) {
      console.error('OpenAI generation error:', error);
      throw error;
    }
  }

  async stream(
    prompt: string,
    onChunk: (chunk: StreamResponse) => void,
    options?: Partial<ModelConfig>
  ): Promise<void> {
    const requestConfig = { ...this.config, ...options };

    try {
      const response = await fetch(`${this.baseUrl}/chat/completions`, {
        method: 'POST',
        headers: {
          'Authorization': `Bearer ${this.apiKey}`,
          'Content-Type': 'application/json',
        },
        body: JSON.stringify({
          model: this.modelName,
          messages: [{ role: 'user', content: prompt }],
          max_tokens: requestConfig.maxTokens,
          temperature: requestConfig.temperature,
          stream: true
        })
      });

      if (!response.ok) {
        throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
      }

      const reader = response.body?.getReader();
      if (!reader) {
        throw new Error('Failed to get response reader');
      }

      const decoder = new TextDecoder();
      let buffer = '';

      while (true) {
        const { done, value } = await reader.read();
        
        if (done) {
          onChunk({ content: '', done: true });
          break;
        }

        buffer += decoder.decode(value, { stream: true });
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.startsWith('data: ')) {
            const data = line.slice(6);
            if (data === '[DONE]') {
              onChunk({ content: '', done: true });
              return;
            }

            try {
              const parsed = JSON.parse(data);
              const content = parsed.choices[0]?.delta?.content || '';
              
              onChunk({
                content,
                done: false,
                usage: parsed.usage
              });
            } catch (e) {
              // 忽略解析错误
            }
          }
        }
      }
    } catch (error) {
      console.error('OpenAI streaming error:', error);
      throw error;
    }
  }

  async countTokens(text: string): Promise<number> {
    // 简单的token估算，实际应该使用tiktoken
    return Math.ceil(text.length / 4);
  }

  estimateCost(promptTokens: number, completionTokens: number): number {
    const costPer1k = this.modelName.includes('gpt-4') ? 0.03 : 0.002;
    return ((promptTokens + completionTokens) / 1000) * costPer1k;
  }

  async healthCheck(): Promise<boolean> {
    try {
      const response = await fetch(`${this.baseUrl}/models`, {
        headers: {
          'Authorization': `Bearer ${this.apiKey}`,
        }
      });
      return response.ok;
    } catch {
      return false;
    }
  }
}
