import {BedrockRuntimeClientConfigType, InferenceConfiguration} from '@aws-sdk/client-bedrock-runtime';
import {ChatAdapterBuilder as CoreChatAdapterBuilder, DataTransferMode, StandardChatAdapter} from '@nlux/core';

export interface ChatAdapterBuilder<AiMsg>
    extends CoreChatAdapterBuilder<AiMsg> {
    /**
     * Create a new Bedrock Inference API adapter.
     * Adapter users don't need to call this method directly. It will be called by nlux when the adapter is expected
     * to be created.
     *
     * @returns {StandardChatAdapter}
     */
    create(): StandardChatAdapter<AiMsg>;

    /**
     * The authorization token to use for Bedrock Inference API.
     * This will be passed to the `Authorization` header of the HTTP request.
     * If no token is provided, the request will be sent without an `Authorization` header as in this example:
     * `"Authorization": f"Bearer {AUTH_TOKEN}"`.
     *
     * Public models do not require an authorization token, but if your model is private, you will need to provide one.
     *
     * @optional
     * @param {string} cred
     * @returns {ChatAdapterBuilder}
     */
    withCredintial(
        cred: BedrockRuntimeClientConfigType['credentials'],
    ): ChatAdapterBuilder<AiMsg>;

    /**
     * Instruct the adapter to connect to API and load data either in streaming mode or in batch mode.
     * The `stream` mode would use protocols such as websockets or server-side events, and nlux will display data as
     * it's being generated by the server. The `batch` mode would use a single request to fetch data, and the response
     * would only be displayed once the entire message is loaded.
     *
     * @optional
     * @default 'stream'
     * @returns {ChatAdapterBuilder}
     */
    withDataTransferMode(mode: DataTransferMode): ChatAdapterBuilder<AiMsg>;
    /**
     * Inference parameters to pass to the model. <code>Converse</code> supports a base
     * set of inference parameters. If you need to pass additional parameters that the model
     * supports, use the <code>additionalModelRequestFields</code> request field.
     *
     * @param {InferenceConfiguration} inferenceConfig
     * @returns {ChatAdapterBuilder}
     */
    withInferenceConfig(
        inferenceConfig: InferenceConfiguration,
    ): ChatAdapterBuilder<AiMsg>;
    /**
     * The model or the endpoint to use for Bedrock Inference API.
     * You should provide either a model or an endpoint, but not both.
     *
     * @param {string} model
     * @returns {ChatAdapterBuilder}
     */
    withModel(model: string): ChatAdapterBuilder<AiMsg>;
    /**
     * The endpoint to use for Bedrock Inference API.
     *
     * @optional
     * @param {string} region
     * @returns {ChatAdapterBuilder}
     */
    withRegion(region: string): ChatAdapterBuilder<AiMsg>;
}
