import { OpenAI } from "openai";
import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces/index.js";
import {
  MAX_BATCH_TOKENS,
  MAX_ITEM_TOKENS,
  MAX_BATCH_RETRIES,
  INITIAL_RETRY_DELAY_MS,
} from "../constants/index.js";
import { formatEmbeddingError, withValidationErrorHandling, HttpError } from "../utils/validation-helpers.js";

/**
 * OpenAI implementation of the embedder interface with batching and rate limiting
 */
export class OpenAiEmbedder implements IEmbedder {
  private embeddingsClient: OpenAI;
  private readonly defaultModelId: string;

  constructor(options: { apiKey: string; modelId?: string }) {
    if (!options.apiKey) {
      throw new Error("OpenAI API key is required");
    }
    
    this.embeddingsClient = new OpenAI({ apiKey: options.apiKey });
    this.defaultModelId = options.modelId || "text-embedding-3-small";
  }

  /**
   * Creates embeddings for the given texts with batching and rate limiting
   */
  async createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse> {
    const modelToUse = model || this.defaultModelId;

    // Process texts and handle prefixes if needed
    const processedTexts = texts.map((text, index) => {
      const estimatedTokens = Math.ceil(text.length / 4);
      if (estimatedTokens > MAX_ITEM_TOKENS) {
        console.warn(`Text at index ${index} exceeds token limit (${estimatedTokens} > ${MAX_ITEM_TOKENS})`);
        // Truncate text if it's too long
        return text.substring(0, MAX_ITEM_TOKENS * 4);
      }
      return text;
    });

    const allEmbeddings: number[][] = [];
    const usage = { promptTokens: 0, totalTokens: 0 };
    const remainingTexts = [...processedTexts];

    while (remainingTexts.length > 0) {
      const currentBatch: string[] = [];
      let currentBatchTokens = 0;
      const processedIndices: number[] = [];

      for (let i = 0; i < remainingTexts.length; i++) {
        const text = remainingTexts[i];
        const itemTokens = Math.ceil(text.length / 4);

        if (itemTokens > MAX_ITEM_TOKENS) {
          console.warn(`Skipping text at index ${i} due to excessive token count: ${itemTokens}`);
          processedIndices.push(i);
          continue;
        }

        if (currentBatchTokens + itemTokens <= MAX_BATCH_TOKENS) {
          currentBatch.push(text);
          currentBatchTokens += itemTokens;
          processedIndices.push(i);
        } else {
          break;
        }
      }

      // Remove processed items from remainingTexts (in reverse order to maintain correct indices)
      for (let i = processedIndices.length - 1; i >= 0; i--) {
        remainingTexts.splice(processedIndices[i], 1);
      }

      if (currentBatch.length > 0) {
        const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse);
        allEmbeddings.push(...batchResult.embeddings);
        usage.promptTokens += batchResult.usage.promptTokens;
        usage.totalTokens += batchResult.usage.totalTokens;
      }
    }

    return { embeddings: allEmbeddings, usage };
  }

  /**
   * Helper method to handle batch embedding with retries and exponential backoff
   */
  private async _embedBatchWithRetries(
    batchTexts: string[],
    model: string,
  ): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> {
    for (let attempts = 0; attempts < MAX_BATCH_RETRIES; attempts++) {
      try {
        const response = await this.embeddingsClient.embeddings.create({
          input: batchTexts,
          model: model,
        });

        return {
          embeddings: response.data.map((item) => item.embedding),
          usage: {
            promptTokens: response.usage?.prompt_tokens || 0,
            totalTokens: response.usage?.total_tokens || 0,
          },
        };
      } catch (error: any) {
        const hasMoreAttempts = attempts < MAX_BATCH_RETRIES - 1;

        // Check if it's a rate limit error
        const httpError = error as HttpError;
        if (httpError?.status === 429 && hasMoreAttempts) {
          const delayMs = INITIAL_RETRY_DELAY_MS * Math.pow(2, attempts);
          console.warn(
            `Rate limit hit, retrying in ${delayMs}ms (attempt ${attempts + 1}/${MAX_BATCH_RETRIES})`
          );
          await new Promise((resolve) => setTimeout(resolve, delayMs));
          continue;
        }

        console.error(`OpenAI embedder error (attempt ${attempts + 1}/${MAX_BATCH_RETRIES}):`, error);
        throw formatEmbeddingError(error, MAX_BATCH_RETRIES);
      }
    }

    throw new Error(`Failed to create embeddings after ${MAX_BATCH_RETRIES} attempts`);
  }

  /**
   * Validates the OpenAI embedder configuration
   */
  async validateConfiguration(): Promise<{ valid: boolean; error?: string }> {
    return withValidationErrorHandling(async () => {
      try {
        // Test with a minimal embedding request
        const response = await this.embeddingsClient.embeddings.create({
          input: ["test"],
          model: this.defaultModelId,
        });

        // Check if we got a valid response
        if (!response.data || response.data.length === 0) {
          return {
            valid: false,
            error: "Invalid response format from OpenAI API",
          };
        }

        return { valid: true };
      } catch (error) {
        throw error;
      }
    }, "openai");
  }

  get embedderInfo(): EmbedderInfo {
    return {
      name: "openai",
    };
  }
}
