package dev.langchain4j.model.openai;

import static dev.langchain4j.data.message.UserMessage.userMessage;
import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_4_O_MINI;
import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_5_NANO;
import static dev.langchain4j.model.openai.OpenAiChatModelName.O3_MINI;
import static dev.langchain4j.model.output.FinishReason.LENGTH;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.params.provider.EnumSource.Mode.EXCLUDE;

import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.AudioContent;
import dev.langchain4j.data.message.PdfFileContent;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.TextContent;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.pdf.PdfFile;
import dev.langchain4j.http.client.MockHttpClient;
import dev.langchain4j.http.client.MockHttpClientBuilder;
import dev.langchain4j.http.client.SuccessfulHttpResponse;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.request.ToolChoice;
import dev.langchain4j.model.chat.request.json.JsonObjectSchema;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.output.TokenUsage;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Base64;
import java.util.Map;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.junitpioneer.jupiter.RetryingTest;

@EnabledIfEnvironmentVariable(named = "OPENAI_API_KEY", matches = ".+")
class OpenAiChatModelIT {

    @ParameterizedTest
    @EnumSource(
            value = OpenAiChatModelName.class,
            mode = EXCLUDE,
            names = {
                "GPT_4_32K", // don't have access
                "GPT_4_32K_0613", // don't have access
                "O3", // don't have access
                "O3_2025_04_16", // don't have access
                "O1_MINI", // does not support 'system' role with this model
                "O1_MINI_2024_09_12", // does not support 'system' role with this model
            })
    void should_support_all_model_names(OpenAiChatModelName modelName) {

        // given
        OpenAiChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(modelName)
                .logRequests(true)
                .logResponses(true)
                .build();

        ChatRequest chatRequest = ChatRequest.builder()
                .messages(SystemMessage.from("Be concise"), UserMessage.from("What is the capital of Germany?"))
                .build();

        // when
        ChatResponse chatResponse = model.chat(chatRequest);

        // then
        assertThat(chatResponse.aiMessage().text()).containsIgnoringCase("Berlin");
    }

    @Test
    void should_respect_deprecated_maxTokens() {

        // given
        int maxTokens = 1;

        ChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(GPT_4_O_MINI)
                .maxTokens(maxTokens)
                .temperature(0.0)
                .logRequests(true)
                .logResponses(true)
                .build();

        UserMessage userMessage = userMessage("Tell me a long story");

        // when
        ChatResponse response = model.chat(userMessage);

        // then
        assertThat(response.aiMessage().text()).isNotBlank();

        TokenUsage tokenUsage = response.tokenUsage();
        assertThat(tokenUsage.outputTokenCount()).isEqualTo(maxTokens);

        assertThat(response.finishReason()).isEqualTo(LENGTH);
    }

    @Test
    void should_respect_maxCompletionTokens() {

        // given
        int maxCompletionTokens = 1;

        ChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(GPT_4_O_MINI)
                .maxCompletionTokens(maxCompletionTokens)
                .logRequests(true)
                .logResponses(true)
                .build();

        UserMessage userMessage = userMessage("Tell me a long story");

        // when
        ChatResponse response = model.chat(userMessage);

        // then
        assertThat(response.aiMessage().text()).isNotBlank();

        TokenUsage tokenUsage = response.tokenUsage();
        assertThat(tokenUsage.outputTokenCount()).isEqualTo(maxCompletionTokens);

        assertThat(response.finishReason()).isEqualTo(LENGTH);
    }

    @Test
    void test_toolChoice_none() {
        // given
        UserMessage userMessage = userMessage("What's the weather in SF and NYC, and what time is it there?");
        ToolSpecification getWeather = ToolSpecification.builder()
                .name("get_weather")
                .description("Get the current weather in a given location")
                .parameters(JsonObjectSchema.builder()
                        .addStringProperty("location")
                        .required("location")
                        .build())
                .build();
        ToolSpecification getTime = ToolSpecification.builder()
                .name("get_time")
                .description("Get the current time in a given timezone")
                .parameters(JsonObjectSchema.builder()
                        .addStringProperty("timezone")
                        .required("timezone")
                        .build())
                .build();

        ChatRequest request = ChatRequest.builder()
                .messages(userMessage)
                .toolSpecifications(getTime, getWeather)
                .toolChoice(ToolChoice.NONE)
                .build();

        ChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(GPT_4_O_MINI)
                .temperature(0.0)
                .logRequests(true)
                .logResponses(true)
                .build();

        // when
        ChatResponse response = model.chat(request);
        // then
        AiMessage aiMessage = response.aiMessage();

        assertThat(aiMessage.toolExecutionRequests()).isEmpty();
    }

    @Test
    void should_generate_valid_json() throws JsonProcessingException {

        // given
        @JsonIgnoreProperties(ignoreUnknown = true) // to ignore the "joke" field
        record Person(String name, String surname) {}

        String userMessage = "Return JSON with two fields: name and surname of Klaus Heisler. "
                + "Before returning, tell me a joke."; // nudging it to say something additionally to json

        String responseFormat = "json_object";

        ChatModel modelGeneratingJson = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(GPT_4_O_MINI)
                .responseFormat(responseFormat)
                .temperature(0.0)
                .logRequests(true)
                .logResponses(true)
                .build();

        // when
        String json = modelGeneratingJson.chat(userMessage);

        // then
        Person person = new ObjectMapper().readValue(json, Person.class);
        assertThat(person.name).isEqualTo("Klaus");
        assertThat(person.surname).isEqualTo("Heisler");
    }

    @RetryingTest(3)
    void should_accept_audio_content() throws Exception {

        // given
        OpenAiChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName("gpt-4o-audio-preview")
                .temperature(0.0)
                .logRequests(false) // avoid base64 output
                .logResponses(true)
                .build();

        Path file =
                Paths.get(getClass().getClassLoader().getResource("sample.wav").toURI());
        String audioBase64 = Base64.getEncoder().encodeToString(Files.readAllBytes(file));

        UserMessage userMessage = UserMessage.from(
                TextContent.from("What is on the audio?"), AudioContent.from(audioBase64, "audio/wav"));

        // when
        ChatResponse response = model.chat(userMessage);

        // then
        assertThat(response.aiMessage().text()).containsIgnoringCase("hello");
    }

    @Test
    void should_answer_with_reasoning_effort() {

        // given
        OpenAiChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(O3_MINI)
                .logRequests(true)
                .logResponses(true)
                .build();

        UserMessage userMessage = UserMessage.from("What is the capital of Germany?");

        ChatRequest chatRequestWithLowReasoningEffort = ChatRequest.builder()
                .messages(userMessage)
                .parameters(OpenAiChatRequestParameters.builder()
                        .reasoningEffort("low")
                        .build())
                .build();

        ChatRequest chatRequestWithMediumReasoningEffort = ChatRequest.builder()
                .messages(userMessage)
                .parameters(OpenAiChatRequestParameters.builder()
                        .reasoningEffort("high")
                        .build())
                .build();

        // when
        ChatResponse chatResponseWithLowReasoningEffort = model.chat(chatRequestWithLowReasoningEffort);
        ChatResponse chatResponseWithMediumReasoningEffort = model.chat(chatRequestWithMediumReasoningEffort);

        // then
        Integer lowReasoningTokens = ((OpenAiTokenUsage) chatResponseWithLowReasoningEffort.tokenUsage())
                .outputTokensDetails()
                .reasoningTokens();
        Integer mediumReasoningTokens = ((OpenAiTokenUsage) chatResponseWithMediumReasoningEffort.tokenUsage())
                .outputTokensDetails()
                .reasoningTokens();
        assertThat(lowReasoningTokens).isLessThan(mediumReasoningTokens);
    }

    @Test
    void should_accept_pdf_file_content() throws Exception {

        // given
        OpenAiChatModel model = OpenAiChatModel.builder()
                .baseUrl(System.getenv("OPENAI_BASE_URL"))
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .organizationId(System.getenv("OPENAI_ORGANIZATION_ID"))
                .modelName(GPT_5_NANO)
                .logRequests(false) // PDF is huge
                .logResponses(true)
                .build();

        Path file =
                Paths.get(getClass().getClassLoader().getResource("sample.pdf").toURI());
        String pdfBase64 = Base64.getEncoder().encodeToString(Files.readAllBytes(file));
        PdfFile pdfFile = PdfFile.builder()
                .base64Data(pdfBase64)
                .mimeType("application/pdf")
                .build();

        UserMessage userMessage = UserMessage.builder()
                .addContent(TextContent.from("What information is in the attached PDF? Return only the exacted text."))
                .addContent(PdfFileContent.from(pdfFile))
                .build();

        // when
        ChatResponse response = model.chat(userMessage);

        // then
        assertThat(response.aiMessage().text())
                .containsIgnoringCase("Berlin")
                .containsIgnoringCase("capital")
                .containsIgnoringCase("Germany");
    }

    @Test
    void should_set_custom_parameters_and_get_raw_response() throws JsonProcessingException {

        // given
        String city = "Munich";

        record ApproximateLocation(String city) {}
        record UserLocation(String type, ApproximateLocation approximate) {}
        record WebSearchOptions(@JsonProperty("user_location") UserLocation userLocation) {}

        WebSearchOptions webSearchOptions =
                new WebSearchOptions(new UserLocation("approximate", new ApproximateLocation(city)));
        Map<String, Object> customParameters = Map.of("web_search_options", webSearchOptions);

        ChatRequest chatRequest = ChatRequest.builder()
                .messages(UserMessage.from("Where can I buy good coffee?"))
                .parameters(OpenAiChatRequestParameters.builder()
                        .customParameters(customParameters)
                        .build())
                .build();

        SuccessfulHttpResponse httpResponse = SuccessfulHttpResponse.builder()
                .statusCode(200)
                .body(
                        """
                        {
                          "id": "chatcmpl-C9QWFjhlUn7vBERtBTMFbbgoKqTDh",
                          "object": "chat.completion",
                          "created": 1756362927,
                          "model": "gpt-4o-mini-2024-07-18",
                          "choices": [
                            {
                              "index": 0,
                              "message": {
                                "role": "assistant",
                                "content": "Bla bla bla",
                                "refusal": null,
                                "annotations": []
                              },
                              "logprobs": null,
                              "finish_reason": "stop"
                            }
                          ],
                          "usage": {
                            "prompt_tokens": 14,
                            "completion_tokens": 7,
                            "total_tokens": 21,
                            "prompt_tokens_details": {
                              "cached_tokens": 0,
                              "audio_tokens": 0
                            },
                            "completion_tokens_details": {
                              "reasoning_tokens": 0,
                              "audio_tokens": 0,
                              "accepted_prediction_tokens": 0,
                              "rejected_prediction_tokens": 0
                            }
                          },
                          "service_tier": "default",
                          "system_fingerprint": "fp_560af6e559"
                        }
                        """)
                .build();

        MockHttpClient mockHttpClient = MockHttpClient.thatAlwaysResponds(httpResponse);

        ChatModel model = OpenAiChatModel.builder()
                .httpClientBuilder(new MockHttpClientBuilder(mockHttpClient))
                .build();

        // when
        ChatResponse chatResponse = model.chat(chatRequest);

        // then
        assertThat(mockHttpClient.request().body())
                .isEqualToIgnoringWhitespace(
                        """
                {
                  "messages" : [ {
                    "role" : "user",
                    "content" : "Where can I buy good coffee?"
                  } ],
                  "stream" : false,
                  "web_search_options" : {
                    "user_location" : {
                      "type" : "approximate",
                      "approximate" : {
                        "city" : "Munich"
                      }
                    }
                  }
                }
                """);

        SuccessfulHttpResponse rawResponse = ((OpenAiChatResponseMetadata) chatResponse.metadata()).rawHttpResponse();
        assertThat(rawResponse).isEqualTo(httpResponse);
    }
}
