import { ModelType, ModelParameters } from '@/types/models';
import { PromptTemplate } from '@/types/prompt';

export interface GenerationRequest {
  model: ModelType;
  messages: Array<{
    role: 'user' | 'assistant' | 'system';
    content: string;
  }>;
  parameters: ModelParameters;
  stream?: boolean;
  promptTemplate?: PromptTemplate;  // 添加提示词模板
  promptVariables?: Record<string, any>;  // 添加提示词变量
  // 自定义模型配置
  provider?: string;
  apiKey?: string;
  baseUrl?: string;
}

export interface GenerationResponse {
  id: string;
  model: ModelType;
  content: string;
  usage?: {
    promptTokens: number;
    completionTokens: number;
    totalTokens: number;
  };
  responseTime?: number;
  error?: string;
}

export interface ModelHealthStatus {
  model: ModelType;
  available: boolean;
  responseTime?: number;
  lastChecked: Date;
  error?: string;
}

class GenerationService {
  private baseUrl: string;
  private retryAttempts: number = 3;
  private retryDelay: number = 1000; // ms
  private modelHealthCache: Map<ModelType, ModelHealthStatus> = new Map();

  constructor() {
    // Use empty string to use relative path (proxied by Vite in dev mode)
    // This allows mobile devices to access through the same host
    this.baseUrl = import.meta.env.VITE_AI_SERVICE_URL || '';
  }

  /**
   * Send a generation request to the API
   */
  async generate(request: GenerationRequest): Promise<GenerationResponse> {
    const startTime = Date.now();
    
    // Process prompt template if provided
    let finalMessages = [...request.messages];
    if (request.promptTemplate) {
      const processedPrompt = this.processPromptTemplate(
        request.promptTemplate,
        request.promptVariables || {}
      );
      
      // Add system prompt if defined
      if (processedPrompt.systemPrompt) {
        finalMessages.unshift({
          role: 'system',
          content: processedPrompt.systemPrompt
        });
      }
      
      // Apply user prompt prefix if defined
      if (processedPrompt.userPromptPrefix && finalMessages.length > 0) {
        const lastUserMessage = finalMessages.findLast(m => m.role === 'user');
        if (lastUserMessage) {
          lastUserMessage.content = `${processedPrompt.userPromptPrefix}\n${lastUserMessage.content}`;
        }
      }
    }
    
    for (let attempt = 0; attempt < this.retryAttempts; attempt++) {
      try {
        const response = await fetch(`${this.baseUrl}/ai/generate`, {
          method: 'POST',
          headers: {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${this.getAuthToken()}`
          },
          body: JSON.stringify({
            model: request.model,
            messages: finalMessages,
            parameters: {
              temperature: request.parameters.temperature,
              top_p: request.parameters.topP,
              max_tokens: request.parameters.maxTokens,
              presence_penalty: request.parameters.presencePenalty,
              frequency_penalty: request.parameters.frequencyPenalty
            },
            stream: request.stream || false,
            prompt_id: request.promptTemplate?.id,  // 添加提示词ID用于后端追踪
            prompt_version: request.promptTemplate?.version,  // 添加版本号
            // 传递自定义模型配置
            provider: request.provider,
            api_key: request.apiKey,
            api_base: request.baseUrl
          })
        });

        if (!response.ok) {
          const error = await response.json();
          throw new Error(error.message || `HTTP ${response.status}`);
        }

        const data = await response.json();
        
        return {
          id: data.id,
          model: request.model,
          content: data.content,
          usage: data.usage,
          responseTime: Date.now() - startTime
        };
      } catch (error) {
        console.error(`Generation attempt ${attempt + 1} failed:`, error);
        
        // Handle specific error types
        if (this.isModelSpecificError(error)) {
          return this.handleModelSpecificError(request, error);
        }
        
        // Retry logic
        if (attempt < this.retryAttempts - 1) {
          await this.delay(this.retryDelay * Math.pow(2, attempt)); // Exponential backoff
          continue;
        }
        
        // Final failure
        return {
          id: '',
          model: request.model,
          content: '',
          error: error instanceof Error ? error.message : 'Generation failed',
          responseTime: Date.now() - startTime
        };
      }
    }

    return {
      id: '',
      model: request.model,
      content: '',
      error: 'Max retry attempts reached',
      responseTime: Date.now() - startTime
    };
  }

  /**
   * Stream generation response using Server-Sent Events
   */
  async *streamGenerate(request: GenerationRequest & { provider?: string; api_key?: string; api_base?: string }): AsyncGenerator<string, void, unknown> {
    // Process prompt template if provided (same as in generate method)
    let finalMessages = [...request.messages];
    if (request.promptTemplate) {
      const processedPrompt = this.processPromptTemplate(
        request.promptTemplate,
        request.promptVariables || {}
      );
      
      if (processedPrompt.systemPrompt) {
        finalMessages.unshift({
          role: 'system',
          content: processedPrompt.systemPrompt
        });
      }
      
      if (processedPrompt.userPromptPrefix && finalMessages.length > 0) {
        const lastUserMessage = finalMessages.findLast(m => m.role === 'user');
        if (lastUserMessage) {
          lastUserMessage.content = `${processedPrompt.userPromptPrefix}\n${lastUserMessage.content}`;
        }
      }
    }

    const response = await fetch(`${this.baseUrl}/ai/generate`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${this.getAuthToken()}`
      },
      body: JSON.stringify({
        model: request.model,
        messages: finalMessages,
        parameters: {
          temperature: request.parameters.temperature,
          top_p: request.parameters.topP,
          max_tokens: request.parameters.maxTokens,
          presence_penalty: request.parameters.presencePenalty,
          frequency_penalty: request.parameters.frequencyPenalty
        },
        stream: true,
        prompt_id: request.promptTemplate?.id,
        prompt_version: request.promptTemplate?.version,
        // Add model configuration if provided
        provider: request.provider,
        api_key: request.api_key,
        api_base: request.api_base
      })
    });

    if (!response.ok) {
      throw new Error(`HTTP ${response.status}`);
    }

    const reader = response.body?.getReader();
    if (!reader) {
      throw new Error('No response body');
    }

    const decoder = new TextDecoder();
    let buffer = '';

    try {
      while (true) {
        const { done, value } = await reader.read();
        if (done) break;

        buffer += decoder.decode(value, { stream: true });
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.startsWith('data: ')) {
            const data = line.slice(6);
            if (data === '[DONE]') {
              return;
            }
            try {
              const json = JSON.parse(data);
              if (json.content) {
                yield json.content;
              }
            } catch (e) {
              console.error('Failed to parse SSE data:', e);
            }
          }
        }
      }
    } finally {
      reader.releaseLock();
    }
  }

  /**
   * Check model health and availability
   */
  async checkModelHealth(model: ModelType): Promise<ModelHealthStatus> {
    // Check cache first
    const cached = this.modelHealthCache.get(model);
    if (cached && Date.now() - cached.lastChecked.getTime() < 60000) { // 1 minute cache
      return cached;
    }

    try {
      const startTime = Date.now();
      const response = await fetch(`${this.baseUrl}/ai/models/${model}/health`, {
        method: 'GET',
        headers: {
          'Authorization': `Bearer ${this.getAuthToken()}`
        }
      });

      const available = response.ok;
      const responseTime = Date.now() - startTime;
      
      const status: ModelHealthStatus = {
        model,
        available,
        responseTime,
        lastChecked: new Date(),
        error: available ? undefined : `Model ${model} is not available`
      };

      this.modelHealthCache.set(model, status);
      return status;
    } catch (error) {
      const status: ModelHealthStatus = {
        model,
        available: false,
        lastChecked: new Date(),
        error: error instanceof Error ? error.message : 'Health check failed'
      };
      
      this.modelHealthCache.set(model, status);
      return status;
    }
  }

  /**
   * Check health of multiple models
   */
  async checkMultipleModelsHealth(models: ModelType[]): Promise<ModelHealthStatus[]> {
    return Promise.all(models.map(model => this.checkModelHealth(model)));
  }

  /**
   * Handle model-specific errors
   */
  private handleModelSpecificError(request: GenerationRequest, error: any): GenerationResponse {
    const errorMessage = error?.message || error?.toString() || 'Unknown error';
    
    // Model-specific error handling
    const errorHandlers: Record<string, string> = {
      'rate_limit': 'Rate limit exceeded. Please try again later.',
      'context_length': 'Input too long for this model. Try reducing the message length.',
      'api_key': 'Invalid API key for this model.',
      'quota': 'API quota exceeded for this model.',
      'model_not_found': 'Model not available. Please select another model.'
    };

    for (const [key, message] of Object.entries(errorHandlers)) {
      if (errorMessage.toLowerCase().includes(key)) {
        return {
          id: '',
          model: request.model,
          content: '',
          error: message
        };
      }
    }

    return {
      id: '',
      model: request.model,
      content: '',
      error: `Model error: ${errorMessage}`
    };
  }

  /**
   * Check if error is model-specific
   */
  private isModelSpecificError(error: any): boolean {
    const modelErrors = ['rate_limit', 'context_length', 'api_key', 'quota', 'model_not_found'];
    const errorMessage = error?.message?.toLowerCase() || '';
    return modelErrors.some(err => errorMessage.includes(err));
  }

  /**
   * Get auth token from storage
   */
  private getAuthToken(): string {
    // TODO: Implement proper auth token retrieval
    return localStorage.getItem('auth_token') || '';
  }

  /**
   * Delay helper for retry logic
   */
  private delay(ms: number): Promise<void> {
    return new Promise(resolve => setTimeout(resolve, ms));
  }

  /**
   * Process prompt template with variables
   */
  private processPromptTemplate(
    template: PromptTemplate,
    variables: Record<string, any>
  ): { systemPrompt?: string; userPromptPrefix?: string } {
    let systemPrompt = template.systemPrompt || '';
    let userPromptPrefix = template.userPromptPrefix || '';
    
    // Replace variables in prompts
    Object.entries(variables).forEach(([key, value]) => {
      const placeholder = `{${key}}`;
      systemPrompt = systemPrompt.replace(new RegExp(placeholder, 'g'), value);
      userPromptPrefix = userPromptPrefix.replace(new RegExp(placeholder, 'g'), value);
    });
    
    return { systemPrompt, userPromptPrefix };
  }

  /**
   * Calculate token cost
   */
  calculateCost(model: ModelType, tokens: number): number {
    const costPerToken = {
      [ModelType.GPT4]: 0.00003,
      [ModelType.GPT35]: 0.000001,
      [ModelType.CLAUDE]: 0.00002,
      [ModelType.QWEN]: 0.00001,
      [ModelType.ERNIE]: 0.000012,
      [ModelType.SPARK]: 0.000015,
      [ModelType.KIMI]: 0.000012,
      [ModelType.DEEPSEEK]: 0.000008,
      [ModelType.DOUBAO]: 0.00001,
      [ModelType.HUNYUAN]: 0.000011
    };

    return tokens * (costPerToken[model] || 0.00001);
  }
}

export const generationService = new GenerationService();