package io.ai.arch.java.ai;

import lombok.RequiredArgsConstructor;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.AssistantMessage;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.openai.OpenAiChatOptions;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;

import java.util.Map;

/**
 * https://build.nvidia.com/tokyotech-llm/llama-3-swallow-70b-instruct-v01?snippet_tab=Python
 */
@RestController
@RequestMapping("api/ai/model")
@RequiredArgsConstructor
public class ChatModelApi {

    private final ChatModel chatModel;


    @GetMapping("/modelString")
    public Map chatString(@RequestParam(value = "message", defaultValue = "讲个笑话") String message) {
        return Map.of("generation", this.chatModel.call(message));
    }

    @GetMapping("/modelPrompt")
    public String chatPrompt() {
        ChatResponse chatResponse = chatModel.call(new Prompt("Generate the names of 5 famous pirates.", OpenAiChatOptions.builder().temperature(0.4).build()));
        return chatResponse.getResult().getOutput().getText();
    }


    @GetMapping("/modelMessage")
    public Map chatMessage(@RequestParam(value = "message", defaultValue = "讲个笑话") String message1) {
        UserMessage userMessage = new UserMessage(message1);
        return Map.of("generation", this.chatModel.call(userMessage));
    }


    @GetMapping("/modelMoreMessage")
    public Map chatMoreMessage(@RequestParam(value = "message", defaultValue = "讲个笑话") String message2) {
        AssistantMessage assistantMessage = new AssistantMessage("你是一个幼儿园的幼师");
        UserMessage userMessage = new UserMessage(message2);
        return Map.of("generation", this.chatModel.call(assistantMessage, userMessage));
    }

    @GetMapping("/modelStream")
    public Flux<ChatResponse> chatStream(@RequestParam(value = "message", defaultValue = "讲个笑话") String message3) {
        Prompt prompt = new Prompt(new UserMessage(message3));
        return this.chatModel.stream(prompt);
    }

    @GetMapping("/modelStreamMoreMessage")
    public Flux<ChatResponse> chatStreamMoreMessage(@RequestParam(value = "message", defaultValue = "讲个笑话") String message4) {
        AssistantMessage assistantMessage = new AssistantMessage("你是一个幼儿园的幼师");
        UserMessage userMessage = new UserMessage(message4);
        Prompt prompt = new Prompt(assistantMessage, userMessage);
        return this.chatModel.stream(prompt);
    }

    /**
     * from openai import OpenAI
     *
     * client = OpenAI(
     *   base_url = "https://integrate.api.nvidia.com/v1",
     *   api_key = "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"
     * )
     *
     * completion = client.chat.completions.create(
     *   model="tokyotech-llm/llama-3-swallow-70b-instruct-v0.1",
     *   messages=[{"role":"user","content":""}],
     *   temperature=0.5,
     *   top_p=1,
     *   max_tokens=1024,
     *   stream=True
     * )
     *
     * for chunk in completion:
     *   if chunk.choices[0].delta.content is not None:
     *     print(chunk.choices[0].delta.content, end="")
     */
}
