﻿using System.ComponentModel;

namespace OpenAiClient.Models
{
    public enum ChatModel
    {
        /// <summary>
        /// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of
        /// text-davinci-003. Will be updated with the latest model iteration.
        /// </summary>
        [Description("gpt-3.5-turbo")] ChatGptTurbo = 0,

        /// <summary>
        /// Snapshot of gpt-3.5-turbo from March 1st 2023. Unlike gpt-3.5-turbo,
        /// this model will not receive updates, and will only be supported for a
        /// three month period ending on June 1st 2023.
        /// </summary>
        [Description("gpt-3.5-turbo-0301")] ChatGptTurbo0301,

        /// <summary>
        /// More capable than any GPT-3.5 model, able to do more complex tasks,
        /// and optimized for chat. Will be updated with the latest model iteration.
        /// Currently in limited beta so your OpenAI account needs to be whitelisted
        /// to use this.
        /// </summary>
        [Description("gpt-4")] Gpt4,

        /// <summary>
        /// Same capabilities as the base gpt-4 mode but with 4x the context length.
        /// Will be updated with the latest model iteration.
        /// Currently in limited beta so your OpenAI account needs to be whitelisted
        /// to use this.
        /// </summary>
        [Description("gpt-4-32k")] Gpt4With32KContext,

        [Description("gpt-4-turbo")] Gpt4Turbo,
        [Description("gpt-4o")] Gpt4o,
        [Description("gpt-4o-mini")] Gpt4oMini,

        /// <summary>
        /// Stable text moderation model that may provide lower accuracy compared to TextModerationLatest.
        /// OpenAI states they will provide advanced notice before updating this model.
        /// </summary>
        [Description("text-moderation-stable")]
        TextModerationStable,

        /// <summary>
        /// The latest text moderation model.
        /// This model will be automatically upgraded over time.
        /// </summary>
        [Description("text-moderation-latest")]
        TextModerationLatest,

        /// <summary>
        /// Capable of very simple tasks,
        /// usually the fastest model in the GPT-3 series,
        /// and lowest cost
        /// </summary>
        [Description("text-ada-001")] AdaText,

        /// <summary>
        /// Capable of straightforward tasks, very fast, and lower cost.
        /// </summary>
        [Description("text-babbage-001")] BabbageText,

        /// <summary>
        /// Very capable, but faster and lower cost than Davinci.
        /// </summary>
        [Description("text-curie-001")] CurieText,

        /// <summary>
        /// Most capable GPT-3 model. Can do any task the other models can do,
        /// often with higher quality, longer output and better instruction-following.
        /// Also supports inserting completions within text.
        /// </summary>
        // ReSharper disable once IdentifierTypo
        [Description("text-davinci-003")] DavinciText,

        /// <summary>
        /// Almost as capable as Davinci Codex, but slightly faster.
        /// This speed advantage may make it preferable for real-time applications.
        /// </summary>
        // ReSharper disable once IdentifierTypo
        [Description("code-cushman-001")] CushmanCode,

        /// <summary>
        /// Most capable Codex model. Particularly good at translating natural language
        /// to code. In addition to completing code, also supports inserting completions
        /// within code.
        /// </summary>
        // ReSharper disable once IdentifierTypo
        [Description("code-davinci-002")] DavinciCode,

        /// <summary>
        /// OpenAI offers one second-generation embedding model for use with the
        /// embeddings API endpoint.
        /// </summary>
        [Description("text-embedding-ada-002")]
        AdaTextEmbedding
    }
}
