package sundeinfo.ai.controller;


import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.generativeai.ContentMaker;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.PartMaker;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.tags.Tag;
import lombok.RequiredArgsConstructor;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;


import java.io.IOException;
import java.util.Map;

@RestController
@RequestMapping("ai")
@RequiredArgsConstructor
@Tag(name = "AI测试")
public class DomainQAController {
    public static void main(String[] args) throws IOException {
        // TODO(developer): Replace these variables before running the sample.
        String projectId = "openid-394005";
        String location = "us-central1";
        String modelName = "gemini-1.0-pro-vision";

        String output = quickstart(projectId, location, modelName);
        System.out.println(output);
    }

    // Analyzes the provided Multimodal input.
    public static String quickstart(String projectId, String location, String modelName)
            throws IOException {

        try (VertexAI vertexAI = new VertexAI(projectId, location)) {
            String imageUri = "gs://generativeai-downloads/images/scones.jpg";

            GenerativeModel model = new GenerativeModel(modelName, vertexAI);
            GenerateContentResponse response = model.generateContent(ContentMaker.fromMultiModalData(
                    PartMaker.fromMimeTypeAndData("image/jpg", imageUri),
                    "What's in this photo"
            ));

            return response.toString();
        }
    }
}

