import { BaseAgentBackend, BackendTypes } from "../base";
import { getTextEmbedding as _getTextEmbedding } from "./embedding";
import { getChatCompletion as _getChatCompletion } from "./chat";
import { modelTokenLimit } from "./config";

export class OpenAiBackend extends BaseAgentBackend {
    static async getTextEmbedding(
        request: BackendTypes.TextEmbeddingRequest
    ): Promise<BackendTypes.TextEmbeddingResponse> {
        return await _getTextEmbedding(request, request.apiKey);
    }
    static async getChatCompletion(
        request: BackendTypes.ChatCompletionRequest,
        modelConfig: Record<string, any>
    ): Promise<BackendTypes.ChatCompletionResponse> {
        const response = await _getChatCompletion(request, request.apiKey, modelConfig);
        return {
            id: response.id,
            created: response.created,
            model: response.model,
            message: { role: "assistant", content: response.choices[0].message.content }
        };
    }
    static getTokenLimit(modelType: string): number {
        const result = modelTokenLimit[modelType];
        if (result) {
            return result;
        } else {
            throw new Error("Invalid model type");
        }
    }
}
