package com.example.demo.controller;

import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.ollama.OllamaChatModel;
import org.springframework.ai.ollama.api.OllamaApi;
import org.springframework.ai.ollama.api.OllamaOptions;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;


/**
 * @author Administrator
 * @version 1.0
 * @description: TODO
 * @date 2024/8/25 11:53
 */
@RestController
public class AIController {

    @Autowired
    OllamaApi ollamaApi;

    @RequestMapping("/ai")
    public String chat(@RequestParam("msg")String msg) {
        /*下面的配置也可以在yaml中配置如：
server:
  port: 8888
spring:
  application:
    name: Cleaner-AI
  ai:
    ollama:
      # ollama API Server 地址
      base-url: http://localhost:11434
      chat:
        enabled: true
        # 使用的模型名称
        model:
          llama3.1:8b
        options:
          temperature: 0.7
          */
        var chatModel = new OllamaChatModel(ollamaApi,
                OllamaOptions.create()
                        .withModel("llama3.1:8b")
                        /*模型思考时间*/
                        .withTemperature(0.1f));
        ChatResponse response = chatModel.call(
                new Prompt(msg));

        return response.getResult().getOutput().toString();
    }


}
