﻿using feiyun0112.SemanticKernel.Connectors.OnnxRuntimeGenAI;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ImageToText;

Kernel kernel = Kernel.CreateBuilder()
    .AddOnnxRuntimeGenAIChatCompletion(
        modelPath: @"D:\Labs\LLModels\Phi-3-vision-128k-instruct-onnx-cuda\cuda-int4-rtn-block-32")
    .Build();

string prompt = @"Write a joke";

await foreach (string text in kernel.InvokePromptStreamingAsync<string>(prompt,
                   new KernelArguments(new OnnxRuntimeGenAIPromptExecutionSettings() { MaxLength = 2048 })))
{
    Console.Write(text);
}
// kernel.GetRequiredService<IImageToTextService>()