package com.yhq.yhqproject.base.config;

import com.yhq.yhqproject.base.modules.chat.entity.GptConfigInfo;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration
public class GptConfigInfoConfig {

//    @Value("${openai.chat.completion.url}")
    private String url;

    /**
     * openai chat请求参数model
     */
//    @Value("${openai.chat.param.model:gpt-3.5-turbo}")
    private String model;

    /**
     * openai chat maxTokens
     */
//    @Value("${openai.chat.param.max.tokens:2000}")
    private Integer maxTks;

    /**
     * openai chat maxTokens
     */
//    @Value("${openai.chat.param.temperature:0.7}")
    private Float temperature;

    /**
     * openai chat maxTokens
     */
//    @Value("${openai.chat.param.top_p:1}")
    private Float top;

//    @Value("${openai.chat.retry.limit:2}")
    private Integer limit;

    @Bean
    public GptConfigInfo gptConfigInfo() {
        return new GptConfigInfo(url, model, maxTks, temperature, top, limit);
    }
}
