﻿// Copyright (c) Microsoft. All rights reserved.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.SemanticKernel.ChatCompletion;

namespace Microsoft.SemanticKernel.TextGeneration;

/// <summary>
/// Class sponsor that holds extension methods for <see cref ="ITextGenerationService" /> interface.
/// </summary>
public static class TextGenerationExtensions
{
    /// <summary>
    /// Get a single text generation result for the prompt and settings.
    /// </summary>
    /// <param name="textGenerationService">Text generation service</param>
    /// <param name="prompt">The standardized prompt input.</param>
    /// <param name="executionSettings">The AI execution settings (optional).</param>
    /// <param name="kernel">The <see cref="Kernel"/> containing services, plugins, and other state for use throughout the operation.</param>
    /// <param name="cancellationToken">The <see cref="CancellationToken"/> to monitor for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
    /// <returns>Single text content generated by the remote model.</returns>
    public static async Task<TextContent> GetTextContentAsync(
        this ITextGenerationService textGenerationService,
        string prompt,
        PromptExecutionSettings? executionSettings = null,
        Kernel? kernel = null,
        CancellationToken cancellationToken = default)
        => (await textGenerationService.GetTextContentsAsync(prompt, executionSettings, kernel, cancellationToken).ConfigureAwait(false))
            .Single();

    /// <summary>
    /// Get a text generation results for the standardized prompt and settings.
    /// </summary>
    /// <param name="textGenerationService">Text generation service</param>
    /// <param name="prompt">The standardized prompt input.</param>
    /// <param name="executionSettings">The AI execution settings (optional).</param>
    /// <param name="kernel">The <see cref="Kernel"/> containing services, plugins, and other state for use throughout the operation.</param>
    /// <param name="cancellationToken">The <see cref="CancellationToken"/> to monitor for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
    /// <returns>List of different text results generated by the remote model</returns>
    internal static async Task<IReadOnlyList<TextContent>> GetTextContentsWithDefaultParserAsync(
        this ITextGenerationService textGenerationService,
        string prompt,
        PromptExecutionSettings? executionSettings = null,
        Kernel? kernel = null,
        CancellationToken cancellationToken = default)
    {
        if (textGenerationService is IChatCompletionService chatCompletion
            && ChatPromptParser.TryParse(prompt, out var chatHistory))
        {
            var chatMessages = await chatCompletion.GetChatMessageContentsAsync(chatHistory, executionSettings, kernel, cancellationToken).ConfigureAwait(false);
            return chatMessages
                .Select(chatMessage => new TextContent(chatMessage.Content, chatMessage.ModelId, chatMessage.InnerContent, chatMessage.Encoding, chatMessage.Metadata))
                .ToArray();
        }

        // When using against text generations, the prompt will be used as is.
        return await textGenerationService.GetTextContentsAsync(prompt, executionSettings, kernel, cancellationToken).ConfigureAwait(false);
    }

    /// <summary>
    /// Get streaming results for the standardized prompt using the specified settings.
    /// Each modality may support for different types of streaming contents.
    /// </summary>
    /// <remarks>
    /// Usage of this method with value types may be more efficient if the connector supports it.
    /// </remarks>
    /// <exception cref="NotSupportedException">Throws if the specified type is not the same or fail to cast</exception>
    /// <param name="textGenerationService">Text generation service</param>
    /// <param name="prompt">The standardized prompt to complete.</param>
    /// <param name="executionSettings">The AI execution settings (optional).</param>
    /// <param name="kernel">The <see cref="Kernel"/> containing services, plugins, and other state for use throughout the operation.</param>
    /// <param name="cancellationToken">The <see cref="CancellationToken"/> to monitor for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
    /// <returns>Streaming list of different generation streaming string updates generated by the remote model</returns>
    internal static async IAsyncEnumerable<StreamingTextContent> GetStreamingTextContentsWithDefaultParserAsync(
        this ITextGenerationService textGenerationService,
        string prompt,
        PromptExecutionSettings? executionSettings = null,
        Kernel? kernel = null,
        [EnumeratorCancellation] CancellationToken cancellationToken = default)
    {
        if (textGenerationService is IChatCompletionService chatCompletion
            && ChatPromptParser.TryParse(prompt, out var chatHistory))
        {
            await foreach (var chatMessage in chatCompletion.GetStreamingChatMessageContentsAsync(chatHistory, executionSettings, kernel, cancellationToken).ConfigureAwait(false))
            {
                yield return new StreamingTextContent(chatMessage.Content, chatMessage.ChoiceIndex, chatMessage.ModelId, chatMessage, chatMessage.Encoding, chatMessage.Metadata);
            }

            yield break;
        }

        // When using against text generations, the prompt will be used as is.
        await foreach (var textChunk in textGenerationService.GetStreamingTextContentsAsync(prompt, executionSettings, kernel, cancellationToken).ConfigureAwait(false))
        {
            yield return textChunk;
        }
    }
}
