// Licensed to Elasticsearch B.V under one or more agreements.
// Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
// See the LICENSE file in the project root for more information.
//
// ███╗   ██╗ ██████╗ ████████╗██╗ ██████╗███████╗
// ████╗  ██║██╔═══██╗╚══██╔══╝██║██╔════╝██╔════╝
// ██╔██╗ ██║██║   ██║   ██║   ██║██║     █████╗
// ██║╚██╗██║██║   ██║   ██║   ██║██║     ██╔══╝
// ██║ ╚████║╚██████╔╝   ██║   ██║╚██████╗███████╗
// ╚═╝  ╚═══╝ ╚═════╝    ╚═╝   ╚═╝ ╚═════╝╚══════╝
// ------------------------------------------------
//
// This file is automatically generated.
// Please do not edit these files manually.
//
// ------------------------------------------------

#nullable restore

using Elastic.Clients.Elasticsearch.Serverless.Fluent;
using Elastic.Clients.Elasticsearch.Serverless.Requests;
using Elastic.Clients.Elasticsearch.Serverless.Serialization;
using Elastic.Transport;
using Elastic.Transport.Extensions;
using System;
using System.Collections.Generic;
using System.Linq.Expressions;
using System.Text.Json;
using System.Text.Json.Serialization;

namespace Elastic.Clients.Elasticsearch.Serverless.MachineLearning;

public sealed partial class PutTrainedModelRequestParameters : RequestParameters
{
	/// <summary>
	/// <para>
	/// If set to <c>true</c> and a <c>compressed_definition</c> is provided,
	/// the request defers definition decompression and skips relevant
	/// validations.
	/// </para>
	/// </summary>
	public bool? DeferDefinitionDecompression { get => Q<bool?>("defer_definition_decompression"); set => Q("defer_definition_decompression", value); }

	/// <summary>
	/// <para>
	/// Whether to wait for all child operations (e.g. model download)
	/// to complete.
	/// </para>
	/// </summary>
	public bool? WaitForCompletion { get => Q<bool?>("wait_for_completion"); set => Q("wait_for_completion", value); }
}

/// <summary>
/// <para>
/// Create a trained model.
/// Enable you to supply a trained model that is not created by data frame analytics.
/// </para>
/// </summary>
public sealed partial class PutTrainedModelRequest : PlainRequest<PutTrainedModelRequestParameters>
{
	public PutTrainedModelRequest(Elastic.Clients.Elasticsearch.Serverless.Id modelId) : base(r => r.Required("model_id", modelId))
	{
	}

	internal override ApiUrls ApiUrls => ApiUrlLookup.MachineLearningPutTrainedModel;

	protected override HttpMethod StaticHttpMethod => HttpMethod.PUT;

	internal override bool SupportsBody => true;

	internal override string OperationName => "ml.put_trained_model";

	/// <summary>
	/// <para>
	/// If set to <c>true</c> and a <c>compressed_definition</c> is provided,
	/// the request defers definition decompression and skips relevant
	/// validations.
	/// </para>
	/// </summary>
	[JsonIgnore]
	public bool? DeferDefinitionDecompression { get => Q<bool?>("defer_definition_decompression"); set => Q("defer_definition_decompression", value); }

	/// <summary>
	/// <para>
	/// Whether to wait for all child operations (e.g. model download)
	/// to complete.
	/// </para>
	/// </summary>
	[JsonIgnore]
	public bool? WaitForCompletion { get => Q<bool?>("wait_for_completion"); set => Q("wait_for_completion", value); }

	/// <summary>
	/// <para>
	/// The compressed (GZipped and Base64 encoded) inference definition of the
	/// model. If compressed_definition is specified, then definition cannot be
	/// specified.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("compressed_definition")]
	public string? CompressedDefinition { get; set; }

	/// <summary>
	/// <para>
	/// The inference definition for the model. If definition is specified, then
	/// compressed_definition cannot be specified.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("definition")]
	public Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Definition? Definition { get; set; }

	/// <summary>
	/// <para>
	/// A human-readable description of the inference trained model.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("description")]
	public string? Description { get; set; }

	/// <summary>
	/// <para>
	/// The default configuration for inference. This can be either a regression
	/// or classification configuration. It must match the underlying
	/// definition.trained_model's target_type. For pre-packaged models such as
	/// ELSER the config is not required.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("inference_config")]
	public Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreate? InferenceConfig { get; set; }

	/// <summary>
	/// <para>
	/// The input field names for the model definition.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("input")]
	public Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Input? Input { get; set; }

	/// <summary>
	/// <para>
	/// An object map that contains metadata about the model.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("metadata")]
	public object? Metadata { get; set; }

	/// <summary>
	/// <para>
	/// The estimated memory usage in bytes to keep the trained model in memory.
	/// This property is supported only if defer_definition_decompression is true
	/// or the model definition is not supplied.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("model_size_bytes")]
	public long? ModelSizeBytes { get; set; }

	/// <summary>
	/// <para>
	/// The model type.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("model_type")]
	public Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelType? ModelType { get; set; }

	/// <summary>
	/// <para>
	/// The platform architecture (if applicable) of the trained mode. If the model
	/// only works on one platform, because it is heavily optimized for a particular
	/// processor architecture and OS combination, then this field specifies which.
	/// The format of the string must match the platform identifiers used by Elasticsearch,
	/// so one of, <c>linux-x86_64</c>, <c>linux-aarch64</c>, <c>darwin-x86_64</c>, <c>darwin-aarch64</c>,
	/// or <c>windows-x86_64</c>. For portable models (those that work independent of processor
	/// architecture or OS features), leave this field unset.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("platform_architecture")]
	public string? PlatformArchitecture { get; set; }

	/// <summary>
	/// <para>
	/// Optional prefix strings applied at inference
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("prefix_strings")]
	public Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStrings? PrefixStrings { get; set; }

	/// <summary>
	/// <para>
	/// An array of tags to organize the model.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("tags")]
	public ICollection<string>? Tags { get; set; }
}

/// <summary>
/// <para>
/// Create a trained model.
/// Enable you to supply a trained model that is not created by data frame analytics.
/// </para>
/// </summary>
public sealed partial class PutTrainedModelRequestDescriptor<TDocument> : RequestDescriptor<PutTrainedModelRequestDescriptor<TDocument>, PutTrainedModelRequestParameters>
{
	internal PutTrainedModelRequestDescriptor(Action<PutTrainedModelRequestDescriptor<TDocument>> configure) => configure.Invoke(this);

	public PutTrainedModelRequestDescriptor(Elastic.Clients.Elasticsearch.Serverless.Id modelId) : base(r => r.Required("model_id", modelId))
	{
	}

	internal override ApiUrls ApiUrls => ApiUrlLookup.MachineLearningPutTrainedModel;

	protected override HttpMethod StaticHttpMethod => HttpMethod.PUT;

	internal override bool SupportsBody => true;

	internal override string OperationName => "ml.put_trained_model";

	public PutTrainedModelRequestDescriptor<TDocument> DeferDefinitionDecompression(bool? deferDefinitionDecompression = true) => Qs("defer_definition_decompression", deferDefinitionDecompression);
	public PutTrainedModelRequestDescriptor<TDocument> WaitForCompletion(bool? waitForCompletion = true) => Qs("wait_for_completion", waitForCompletion);

	public PutTrainedModelRequestDescriptor<TDocument> ModelId(Elastic.Clients.Elasticsearch.Serverless.Id modelId)
	{
		RouteValues.Required("model_id", modelId);
		return Self;
	}

	private string? CompressedDefinitionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Definition? DefinitionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor DefinitionDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor> DefinitionDescriptorAction { get; set; }
	private string? DescriptionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreate? InferenceConfigValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor<TDocument> InferenceConfigDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor<TDocument>> InferenceConfigDescriptorAction { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Input? InputValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor InputDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor> InputDescriptorAction { get; set; }
	private object? MetadataValue { get; set; }
	private long? ModelSizeBytesValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelType? ModelTypeValue { get; set; }
	private string? PlatformArchitectureValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStrings? PrefixStringsValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor PrefixStringsDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor> PrefixStringsDescriptorAction { get; set; }
	private ICollection<string>? TagsValue { get; set; }

	/// <summary>
	/// <para>
	/// The compressed (GZipped and Base64 encoded) inference definition of the
	/// model. If compressed_definition is specified, then definition cannot be
	/// specified.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> CompressedDefinition(string? compressedDefinition)
	{
		CompressedDefinitionValue = compressedDefinition;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The inference definition for the model. If definition is specified, then
	/// compressed_definition cannot be specified.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> Definition(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Definition? definition)
	{
		DefinitionDescriptor = null;
		DefinitionDescriptorAction = null;
		DefinitionValue = definition;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> Definition(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor descriptor)
	{
		DefinitionValue = null;
		DefinitionDescriptorAction = null;
		DefinitionDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> Definition(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor> configure)
	{
		DefinitionValue = null;
		DefinitionDescriptor = null;
		DefinitionDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// A human-readable description of the inference trained model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> Description(string? description)
	{
		DescriptionValue = description;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The default configuration for inference. This can be either a regression
	/// or classification configuration. It must match the underlying
	/// definition.trained_model's target_type. For pre-packaged models such as
	/// ELSER the config is not required.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> InferenceConfig(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreate? inferenceConfig)
	{
		InferenceConfigDescriptor = null;
		InferenceConfigDescriptorAction = null;
		InferenceConfigValue = inferenceConfig;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> InferenceConfig(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor<TDocument> descriptor)
	{
		InferenceConfigValue = null;
		InferenceConfigDescriptorAction = null;
		InferenceConfigDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> InferenceConfig(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor<TDocument>> configure)
	{
		InferenceConfigValue = null;
		InferenceConfigDescriptor = null;
		InferenceConfigDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The input field names for the model definition.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> Input(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Input? input)
	{
		InputDescriptor = null;
		InputDescriptorAction = null;
		InputValue = input;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> Input(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor descriptor)
	{
		InputValue = null;
		InputDescriptorAction = null;
		InputDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> Input(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor> configure)
	{
		InputValue = null;
		InputDescriptor = null;
		InputDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// An object map that contains metadata about the model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> Metadata(object? metadata)
	{
		MetadataValue = metadata;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The estimated memory usage in bytes to keep the trained model in memory.
	/// This property is supported only if defer_definition_decompression is true
	/// or the model definition is not supplied.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> ModelSizeBytes(long? modelSizeBytes)
	{
		ModelSizeBytesValue = modelSizeBytes;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The model type.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> ModelType(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelType? modelType)
	{
		ModelTypeValue = modelType;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The platform architecture (if applicable) of the trained mode. If the model
	/// only works on one platform, because it is heavily optimized for a particular
	/// processor architecture and OS combination, then this field specifies which.
	/// The format of the string must match the platform identifiers used by Elasticsearch,
	/// so one of, <c>linux-x86_64</c>, <c>linux-aarch64</c>, <c>darwin-x86_64</c>, <c>darwin-aarch64</c>,
	/// or <c>windows-x86_64</c>. For portable models (those that work independent of processor
	/// architecture or OS features), leave this field unset.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> PlatformArchitecture(string? platformArchitecture)
	{
		PlatformArchitectureValue = platformArchitecture;
		return Self;
	}

	/// <summary>
	/// <para>
	/// Optional prefix strings applied at inference
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> PrefixStrings(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStrings? prefixStrings)
	{
		PrefixStringsDescriptor = null;
		PrefixStringsDescriptorAction = null;
		PrefixStringsValue = prefixStrings;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> PrefixStrings(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor descriptor)
	{
		PrefixStringsValue = null;
		PrefixStringsDescriptorAction = null;
		PrefixStringsDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor<TDocument> PrefixStrings(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor> configure)
	{
		PrefixStringsValue = null;
		PrefixStringsDescriptor = null;
		PrefixStringsDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// An array of tags to organize the model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor<TDocument> Tags(ICollection<string>? tags)
	{
		TagsValue = tags;
		return Self;
	}

	protected override void Serialize(Utf8JsonWriter writer, JsonSerializerOptions options, IElasticsearchClientSettings settings)
	{
		writer.WriteStartObject();
		if (!string.IsNullOrEmpty(CompressedDefinitionValue))
		{
			writer.WritePropertyName("compressed_definition");
			writer.WriteStringValue(CompressedDefinitionValue);
		}

		if (DefinitionDescriptor is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, DefinitionDescriptor, options);
		}
		else if (DefinitionDescriptorAction is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor(DefinitionDescriptorAction), options);
		}
		else if (DefinitionValue is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, DefinitionValue, options);
		}

		if (!string.IsNullOrEmpty(DescriptionValue))
		{
			writer.WritePropertyName("description");
			writer.WriteStringValue(DescriptionValue);
		}

		if (InferenceConfigDescriptor is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, InferenceConfigDescriptor, options);
		}
		else if (InferenceConfigDescriptorAction is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor<TDocument>(InferenceConfigDescriptorAction), options);
		}
		else if (InferenceConfigValue is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, InferenceConfigValue, options);
		}

		if (InputDescriptor is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, InputDescriptor, options);
		}
		else if (InputDescriptorAction is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor(InputDescriptorAction), options);
		}
		else if (InputValue is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, InputValue, options);
		}

		if (MetadataValue is not null)
		{
			writer.WritePropertyName("metadata");
			JsonSerializer.Serialize(writer, MetadataValue, options);
		}

		if (ModelSizeBytesValue.HasValue)
		{
			writer.WritePropertyName("model_size_bytes");
			writer.WriteNumberValue(ModelSizeBytesValue.Value);
		}

		if (ModelTypeValue is not null)
		{
			writer.WritePropertyName("model_type");
			JsonSerializer.Serialize(writer, ModelTypeValue, options);
		}

		if (!string.IsNullOrEmpty(PlatformArchitectureValue))
		{
			writer.WritePropertyName("platform_architecture");
			writer.WriteStringValue(PlatformArchitectureValue);
		}

		if (PrefixStringsDescriptor is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, PrefixStringsDescriptor, options);
		}
		else if (PrefixStringsDescriptorAction is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor(PrefixStringsDescriptorAction), options);
		}
		else if (PrefixStringsValue is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, PrefixStringsValue, options);
		}

		if (TagsValue is not null)
		{
			writer.WritePropertyName("tags");
			JsonSerializer.Serialize(writer, TagsValue, options);
		}

		writer.WriteEndObject();
	}
}

/// <summary>
/// <para>
/// Create a trained model.
/// Enable you to supply a trained model that is not created by data frame analytics.
/// </para>
/// </summary>
public sealed partial class PutTrainedModelRequestDescriptor : RequestDescriptor<PutTrainedModelRequestDescriptor, PutTrainedModelRequestParameters>
{
	internal PutTrainedModelRequestDescriptor(Action<PutTrainedModelRequestDescriptor> configure) => configure.Invoke(this);

	public PutTrainedModelRequestDescriptor(Elastic.Clients.Elasticsearch.Serverless.Id modelId) : base(r => r.Required("model_id", modelId))
	{
	}

	internal override ApiUrls ApiUrls => ApiUrlLookup.MachineLearningPutTrainedModel;

	protected override HttpMethod StaticHttpMethod => HttpMethod.PUT;

	internal override bool SupportsBody => true;

	internal override string OperationName => "ml.put_trained_model";

	public PutTrainedModelRequestDescriptor DeferDefinitionDecompression(bool? deferDefinitionDecompression = true) => Qs("defer_definition_decompression", deferDefinitionDecompression);
	public PutTrainedModelRequestDescriptor WaitForCompletion(bool? waitForCompletion = true) => Qs("wait_for_completion", waitForCompletion);

	public PutTrainedModelRequestDescriptor ModelId(Elastic.Clients.Elasticsearch.Serverless.Id modelId)
	{
		RouteValues.Required("model_id", modelId);
		return Self;
	}

	private string? CompressedDefinitionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Definition? DefinitionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor DefinitionDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor> DefinitionDescriptorAction { get; set; }
	private string? DescriptionValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreate? InferenceConfigValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor InferenceConfigDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor> InferenceConfigDescriptorAction { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Input? InputValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor InputDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor> InputDescriptorAction { get; set; }
	private object? MetadataValue { get; set; }
	private long? ModelSizeBytesValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelType? ModelTypeValue { get; set; }
	private string? PlatformArchitectureValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStrings? PrefixStringsValue { get; set; }
	private Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor PrefixStringsDescriptor { get; set; }
	private Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor> PrefixStringsDescriptorAction { get; set; }
	private ICollection<string>? TagsValue { get; set; }

	/// <summary>
	/// <para>
	/// The compressed (GZipped and Base64 encoded) inference definition of the
	/// model. If compressed_definition is specified, then definition cannot be
	/// specified.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor CompressedDefinition(string? compressedDefinition)
	{
		CompressedDefinitionValue = compressedDefinition;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The inference definition for the model. If definition is specified, then
	/// compressed_definition cannot be specified.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor Definition(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Definition? definition)
	{
		DefinitionDescriptor = null;
		DefinitionDescriptorAction = null;
		DefinitionValue = definition;
		return Self;
	}

	public PutTrainedModelRequestDescriptor Definition(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor descriptor)
	{
		DefinitionValue = null;
		DefinitionDescriptorAction = null;
		DefinitionDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor Definition(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor> configure)
	{
		DefinitionValue = null;
		DefinitionDescriptor = null;
		DefinitionDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// A human-readable description of the inference trained model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor Description(string? description)
	{
		DescriptionValue = description;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The default configuration for inference. This can be either a regression
	/// or classification configuration. It must match the underlying
	/// definition.trained_model's target_type. For pre-packaged models such as
	/// ELSER the config is not required.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor InferenceConfig(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreate? inferenceConfig)
	{
		InferenceConfigDescriptor = null;
		InferenceConfigDescriptorAction = null;
		InferenceConfigValue = inferenceConfig;
		return Self;
	}

	public PutTrainedModelRequestDescriptor InferenceConfig(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor descriptor)
	{
		InferenceConfigValue = null;
		InferenceConfigDescriptorAction = null;
		InferenceConfigDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor InferenceConfig(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor> configure)
	{
		InferenceConfigValue = null;
		InferenceConfigDescriptor = null;
		InferenceConfigDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The input field names for the model definition.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor Input(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.Input? input)
	{
		InputDescriptor = null;
		InputDescriptorAction = null;
		InputValue = input;
		return Self;
	}

	public PutTrainedModelRequestDescriptor Input(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor descriptor)
	{
		InputValue = null;
		InputDescriptorAction = null;
		InputDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor Input(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor> configure)
	{
		InputValue = null;
		InputDescriptor = null;
		InputDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// An object map that contains metadata about the model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor Metadata(object? metadata)
	{
		MetadataValue = metadata;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The estimated memory usage in bytes to keep the trained model in memory.
	/// This property is supported only if defer_definition_decompression is true
	/// or the model definition is not supplied.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor ModelSizeBytes(long? modelSizeBytes)
	{
		ModelSizeBytesValue = modelSizeBytes;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The model type.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor ModelType(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelType? modelType)
	{
		ModelTypeValue = modelType;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The platform architecture (if applicable) of the trained mode. If the model
	/// only works on one platform, because it is heavily optimized for a particular
	/// processor architecture and OS combination, then this field specifies which.
	/// The format of the string must match the platform identifiers used by Elasticsearch,
	/// so one of, <c>linux-x86_64</c>, <c>linux-aarch64</c>, <c>darwin-x86_64</c>, <c>darwin-aarch64</c>,
	/// or <c>windows-x86_64</c>. For portable models (those that work independent of processor
	/// architecture or OS features), leave this field unset.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor PlatformArchitecture(string? platformArchitecture)
	{
		PlatformArchitectureValue = platformArchitecture;
		return Self;
	}

	/// <summary>
	/// <para>
	/// Optional prefix strings applied at inference
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor PrefixStrings(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStrings? prefixStrings)
	{
		PrefixStringsDescriptor = null;
		PrefixStringsDescriptorAction = null;
		PrefixStringsValue = prefixStrings;
		return Self;
	}

	public PutTrainedModelRequestDescriptor PrefixStrings(Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor descriptor)
	{
		PrefixStringsValue = null;
		PrefixStringsDescriptorAction = null;
		PrefixStringsDescriptor = descriptor;
		return Self;
	}

	public PutTrainedModelRequestDescriptor PrefixStrings(Action<Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor> configure)
	{
		PrefixStringsValue = null;
		PrefixStringsDescriptor = null;
		PrefixStringsDescriptorAction = configure;
		return Self;
	}

	/// <summary>
	/// <para>
	/// An array of tags to organize the model.
	/// </para>
	/// </summary>
	public PutTrainedModelRequestDescriptor Tags(ICollection<string>? tags)
	{
		TagsValue = tags;
		return Self;
	}

	protected override void Serialize(Utf8JsonWriter writer, JsonSerializerOptions options, IElasticsearchClientSettings settings)
	{
		writer.WriteStartObject();
		if (!string.IsNullOrEmpty(CompressedDefinitionValue))
		{
			writer.WritePropertyName("compressed_definition");
			writer.WriteStringValue(CompressedDefinitionValue);
		}

		if (DefinitionDescriptor is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, DefinitionDescriptor, options);
		}
		else if (DefinitionDescriptorAction is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.DefinitionDescriptor(DefinitionDescriptorAction), options);
		}
		else if (DefinitionValue is not null)
		{
			writer.WritePropertyName("definition");
			JsonSerializer.Serialize(writer, DefinitionValue, options);
		}

		if (!string.IsNullOrEmpty(DescriptionValue))
		{
			writer.WritePropertyName("description");
			writer.WriteStringValue(DescriptionValue);
		}

		if (InferenceConfigDescriptor is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, InferenceConfigDescriptor, options);
		}
		else if (InferenceConfigDescriptorAction is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InferenceConfigCreateDescriptor(InferenceConfigDescriptorAction), options);
		}
		else if (InferenceConfigValue is not null)
		{
			writer.WritePropertyName("inference_config");
			JsonSerializer.Serialize(writer, InferenceConfigValue, options);
		}

		if (InputDescriptor is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, InputDescriptor, options);
		}
		else if (InputDescriptorAction is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.InputDescriptor(InputDescriptorAction), options);
		}
		else if (InputValue is not null)
		{
			writer.WritePropertyName("input");
			JsonSerializer.Serialize(writer, InputValue, options);
		}

		if (MetadataValue is not null)
		{
			writer.WritePropertyName("metadata");
			JsonSerializer.Serialize(writer, MetadataValue, options);
		}

		if (ModelSizeBytesValue.HasValue)
		{
			writer.WritePropertyName("model_size_bytes");
			writer.WriteNumberValue(ModelSizeBytesValue.Value);
		}

		if (ModelTypeValue is not null)
		{
			writer.WritePropertyName("model_type");
			JsonSerializer.Serialize(writer, ModelTypeValue, options);
		}

		if (!string.IsNullOrEmpty(PlatformArchitectureValue))
		{
			writer.WritePropertyName("platform_architecture");
			writer.WriteStringValue(PlatformArchitectureValue);
		}

		if (PrefixStringsDescriptor is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, PrefixStringsDescriptor, options);
		}
		else if (PrefixStringsDescriptorAction is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, new Elastic.Clients.Elasticsearch.Serverless.MachineLearning.TrainedModelPrefixStringsDescriptor(PrefixStringsDescriptorAction), options);
		}
		else if (PrefixStringsValue is not null)
		{
			writer.WritePropertyName("prefix_strings");
			JsonSerializer.Serialize(writer, PrefixStringsValue, options);
		}

		if (TagsValue is not null)
		{
			writer.WritePropertyName("tags");
			JsonSerializer.Serialize(writer, TagsValue, options);
		}

		writer.WriteEndObject();
	}
}