package com.gwz.wxtime.openai.com.theokanning.openai.completion;

import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;

import java.util.List;

/**
 * A request for OpenAi to generate a predicted completion for a prompt.
 * All fields are nullable.
 * <p>
 * https://beta.openai.com/docs/api-reference/completions/create
 */
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class ChatCompletionRequest {

    /**
     * The name of the model to use.
     * Required if specifying a fine tuned model or if using the new v1/completions endpoint.
     */
    String model;

    /**
     * An optional prompt to complete from
     */
    List<ChatRoleChat> messages;

    //List<ChatRoleChat> message;
    //String api_key;


    /**
     * How many completions to generate for each prompt.
     * <p>
     * Because this parameter generates many completions, it can quickly consume your token quota.
     * Use carefully and ensure that you have reasonable settings for {@link ChatCompletionRequest#maxTokens} and {@link ChatCompletionRequest#stop}.
     */
    Integer n;

    /**
     * Whether to stream back partial progress.
     * If set, tokens will be sent as data-only server-sent events as they become available,
     * with the stream terminated by a data: DONE message.
     */
    Boolean stream;


    /**
     * Up to 4 sequences where the API will stop generating further tokens.
     * The returned text will not contain the stop sequence.
     */
    List<String> stop;


    /**
     * A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
     */
    String user;

    float temperature;

    int presence_penalty;
    int frequency_penalty;
    int top_p;
    int max_tokens;

}
