/*
Humanloop API

The Humanloop API allows you to interact with Humanloop from your product or service.

You can do this through HTTP requests from any language or via our official Python or TypeScript SDK.

To install the official [Python SDK](https://pypi.org/project/humanloop/), run the following command:

```bash
pip install humanloop
```

To install the official [TypeScript SDK](https://www.npmjs.com/package/humanloop), run the following command:

```bash
npm i humanloop
```

---

Guides and further details about key concepts can be found in [our docs](https://docs.humanloop.com/).

The version of the OpenAPI document: 4.0.1


NOTE: This file is auto generated by Konfig (https://konfigthis.com).
*/
import type * as buffer from "buffer"

import { ChatMessage } from './chat-message';
import { FunctionTool } from './function-tool';
import { ToolCall } from './tool-call';
import { ToolResultResponse } from './tool-result-response';

/**
 * Overwrite DataResponse for chat.
 * @export
 * @interface ChatDataResponse
 */
export interface ChatDataResponse {
    /**
     * Unique ID for the model inputs and output logged to Humanloop. Use this when recording feedback later.
     * @type {string}
     * @memberof ChatDataResponse
     */
    'id': string;
    /**
     * The index for the sampled generation for a given input. The num_samples request parameter controls how many samples are generated.
     * @type {number}
     * @memberof ChatDataResponse
     */
    'index': number;
    /**
     * Output text returned from the provider model with leading and trailing whitespaces stripped.
     * @type {string}
     * @memberof ChatDataResponse
     */
    'output': string;
    /**
     * Raw output text returned from the provider model.
     * @type {string}
     * @memberof ChatDataResponse
     */
    'raw_output': string;
    /**
     * The inputs passed to the chat template.
     * @type {object}
     * @memberof ChatDataResponse
     */
    'inputs'?: object;
    /**
     * Why the generation ended. One of \'stop\' (indicating a stop token was encountered), or \'length\' (indicating the max tokens limit has been reached), or \'tool_call\' (indicating that the model has chosen to call a tool - in which case the tool_call parameter of the response will be populated). It will be set as null for the intermediary responses during a stream, and will only be set as non-null for the final streamed token.
     * @type {string}
     * @memberof ChatDataResponse
     */
    'finish_reason'?: string;
    /**
     * The model configuration used to create the generation.
     * @type {string}
     * @memberof ChatDataResponse
     */
    'model_config_id': string;
    /**
     * Results of any tools run during the generation.
     * @type {Array<ToolResultResponse>}
     * @memberof ChatDataResponse
     */
    'tool_results'?: Array<ToolResultResponse>;
    /**
     * The messages passed to the to provider chat endpoint.
     * @type {Array<ChatMessage>}
     * @memberof ChatDataResponse
     */
    'messages'?: Array<ChatMessage>;
    /**
     * JSON definition of the tool to call and the corresponding argument values. Will be populated when finish_reason=\'tool_call\'.
     * @type {FunctionTool}
     * @memberof ChatDataResponse
     * @deprecated
     */
    'tool_call'?: FunctionTool;
    /**
     * JSON definition of the tools to call and the corresponding argument values. Will be populated when finish_reason=\'tool_call\'.
     * @type {Array<ToolCall>}
     * @memberof ChatDataResponse
     */
    'tool_calls'?: Array<ToolCall>;
    /**
     * The message returned by the provider.
     * @type {ChatMessage}
     * @memberof ChatDataResponse
     */
    'output_message': ChatMessage;
}

