package com.alone.openai.api.core.param;

import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.Data;

import java.util.Map;

/**
 * @author Alone
 * @date 2023/3/22 3:42 下午
 */
@Data
public class CompletionParam {

    /**
     * Required
     * <p>
     * ID of the model to use.
     * You can use the List models API to see all of your available models,
     * or see our Model overview for descriptions of them.
     * <p>
     * 必填项
     * 要使用的模型的ID。您可以使用 “列表模型” API查看所有可用的模型，或者查看我们的模型概述以了解它们的描述。
     */
    private String model;

    /**
     * The prompt(s) to generate completions for, encoded as a string,
     * array of strings, array of tokens, or array of token arrays.
     * <p>
     * Note that <|endoftext|> is the document separator that the model sees during training,
     * so if a prompt is not specified the model will generate as if from the beginning of a new document.
     * <p>
     * 为字符串，字符串数组，令牌数组或令牌数组生成完成的提示。
     * <p>
     * 请注意，<| endoftext |> 是模型在训练过程中看到的文档分隔符，因此，如果未指定提示，则模型将像从新文档的开头开始生成。
     */
    private String prompt;

    /**
     * The suffix that comes after a completion of inserted text.
     * <p>
     * 插入文本完成后的后缀。
     */
    @JsonInclude(JsonInclude.Include.NON_EMPTY)
    private String suffix;

    /**
     * The maximum number of tokens to generate in the completion.
     * <p>
     * The token count of your prompt plus max_tokens cannot exceed the model's context length.
     * Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
     * <p>
     * 完成中要生成的令牌的最大数量。
     * <p>
     * 提示加上max_token的令牌计数不能超过模型的上下文长度。大多数模型都有上下文长度2048年令牌 (最新模型除外，支持4096)。
     */
    private int max_tokens = 16;

    /**
     * What sampling temperature to use, between 0 and 2.
     * Higher values like 0.8 will make the output more random,
     * while lower values like 0.2 will make it more focused and deterministic.
     * <p>
     * We generally recommend altering this or top_p but not both.
     * <p>
     * 使用什么采样温度，在0到2之间。
     * 较高的值 (如0.8) 将使输出更具随机性，而较低的值 (如0.2) 将使其更加集中和确定性。
     * <p>
     * 我们通常建议更改此或top_p，但不能同时更改。
     */
    private float temperature = 1;

    /**
     * An alternative to sampling with temperature, called nucleus sampling,
     * where the model considers the results of the tokens with top_p probability mass.
     * So 0.1 means only the tokens comprising the top 10% probability mass are considered.
     * <p>
     * We generally recommend altering this or temperature but not both.
     * <p>
     * 用温度采样的一种替代方法，称为核采样，其中模型考虑具有top_p概率质量的令牌的结果。
     * 因此，0.1意味着仅考虑包括顶部10% 概率质量的标记。
     * <p>
     * 我们通常建议更改此温度或温度，但不要同时更改。
     */
    private float top_p = 1;

    /**
     * How many completions to generate for each prompt.
     * <p>
     * Note: Because this parameter generates many completions, it can quickly consume your token quota.
     * Use carefully and ensure that you have reasonable settings for max_tokens and stop.
     * <p>
     * 每个提示要生成多少个完成。
     * <p>
     * 注意: 由于此参数会生成许多完成，因此可以快速消耗您的令牌配额。仔细使用，并确保您对max_token和stop有合理的设置。
     */
    private int n = 1;

    /**
     * Whether to stream back partial progress.
     * If set, tokens will be sent as data-only server-sent events as they become available,
     * with the stream terminated by a data: [DONE] message.
     * <p>
     * 是否流回部分进度。
     * 如果设置，令牌将在可用时作为仅数据服务器发送的事件发送，
     * 并且流由data: [DONE] 消息终止。
     */
    private boolean stream = false;

    /**
     * Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
     * For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
     * The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
     * <p>
     * The maximum value for logprobs is 5.
     * If you need more than this, please contact us through our Help center and describe your use case.
     * <p>
     * 包括对数概率最可能的令牌上的日志概率，以及所选令牌。
     * 例如，如果logprobs为5，则API将返回5个最有可能的令牌的列表。API将始终返回采样令牌的logprob，
     * 因此响应中可能有多达logprobs 1个元素。
     * <p>
     * 对数的最大值为5。
     * 如果您需要的不止这些，请通过我们的帮助中心与我们联系并描述您的用例。
     */
    @JsonInclude(JsonInclude.Include.NON_NULL)
    private Integer logprobs = null;

    /**
     * Echo back the prompt in addition to the completion
     * <p>
     * 除了完成之外，回显提示
     */
    private boolean echo = false;

    /**
     * Up to 4 sequences where the API will stop generating further tokens.
     * The returned text will not contain the stop sequence.
     * <p>
     * 多达4个序列，API将停止生成进一步的令牌。返回的文本将不包含停止序列。
     */
    @JsonInclude(JsonInclude.Include.NON_EMPTY)
    private String stop;

    /**
     * Number between -2.0 and 2.0.
     * Positive values penalize new tokens based on whether they appear in the text so far,
     * increasing the model's likelihood to talk about new topics.
     * <p>
     * -2.0和2.0之间的数字。
     * 正值会根据到目前为止它们是否出现在文本中而对新令牌进行惩罚，从而增加了模型谈论新主题的可能性。
     * <p>
     * https://platform.openai.com/docs/api-reference/parameter-details
     */
    private float presence_penalty = 0;

    /**
     * Number between -2.0 and 2.0.
     * Positive values penalize new tokens based on their existing frequency in the text so far,
     * decreasing the model's likelihood to repeat the same line verbatim.
     * <p>
     * -2.0和2.0之间的数字。
     * 正值会根据到目前为止文本中的现有频率来惩罚新令牌，从而降低了模型逐字重复同一行的可能性。
     * <p>
     * https://platform.openai.com/docs/api-reference/parameter-details
     */
    private float frequency_penalty = 0;

    /**
     * Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token).Results cannot be streamed.
     * <p>
     * When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
     * <p>
     * Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.
     * <p>
     * 生成best_of compleions服务器端，并返回 “最佳” (每个令牌的日志概率最高)。结果无法流式传输。
     * <p>
     * 当与n一起使用时，best_of控制候选完成的数量，n指定要返回多少-best_of必须大于n。
     * <p>
     * 注意: 由于此参数会生成许多完成，因此可以快速消耗您的令牌配额。仔细使用，并确保您对max_token和stop有合理的设置。
     */
    private int best_of = 1;

    /**
     * Modify the likelihood of specified tokens appearing in the completion.
     * <p>
     * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100.
     * You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs.
     * Mathematically, the bias is added to the logits generated by the model prior to sampling.
     * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
     * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
     * <p>
     * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
     * <p>
     * 修改完成中出现指定令牌的可能性。
     * <p>
     * 接受一个json对象，该对象将令牌 (由其令牌ID在GPT标记器中指定) 映射到从-100到100的关联偏置值。
     * 您可以使用此标记器工具 (适用于GPT-2和GPT-3) 将文本转换为令牌id。
     * 从数学上讲，偏差被添加到采样之前由模型生成的logit中。
     * 每个模型的确切效果会有所不同，但是-1和1之间的值应该会降低或增加选择的可能性;
     * 类似-100或100的值应该会导致相关令牌的禁止或排他性选择。
     * <p>
     * 例如，您可以通过 {“50256”: -100} 来防止生成 <| endoftext |> 令牌。
     */
    @JsonInclude(JsonInclude.Include.NON_NULL)
    private Map<String, Integer> logit_bias = null;

    /**
     * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
     * <p>
     * 代表您的最终用户的唯一标识符，可以帮助OpenAI监视和检测滥用行为。
     * <p>
     * https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
     */
    @JsonInclude(JsonInclude.Include.NON_EMPTY)
    private String user = null;

    public static CompletionParam of(String msg) {
        CompletionParam param = new CompletionParam();
        param.setModel("text-davinci-003");
        param.setPrompt(msg);
        param.setMax_tokens(500);
        param.setTemperature(0);
        return param;
    }

}
