// Licensed to Elasticsearch B.V under one or more agreements.
// Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
// See the LICENSE file in the project root for more information.
//
// ███╗   ██╗ ██████╗ ████████╗██╗ ██████╗███████╗
// ████╗  ██║██╔═══██╗╚══██╔══╝██║██╔════╝██╔════╝
// ██╔██╗ ██║██║   ██║   ██║   ██║██║     █████╗
// ██║╚██╗██║██║   ██║   ██║   ██║██║     ██╔══╝
// ██║ ╚████║╚██████╔╝   ██║   ██║╚██████╗███████╗
// ╚═╝  ╚═══╝ ╚═════╝    ╚═╝   ╚═╝ ╚═════╝╚══════╝
// ------------------------------------------------
//
// This file is automatically generated.
// Please do not edit these files manually.
//
// ------------------------------------------------

#nullable restore

using Elastic.Clients.Elasticsearch.Fluent;
using Elastic.Clients.Elasticsearch.Serialization;
using System;
using System.Collections.Generic;
using System.Linq.Expressions;
using System.Text.Json;
using System.Text.Json.Serialization;

namespace Elastic.Clients.Elasticsearch.Aggregations;

/// <summary>
/// <para>
/// A multi-bucket aggregation that groups semi-structured text into buckets. Each text
/// field is re-analyzed using a custom analyzer. The resulting tokens are then categorized
/// creating buckets of similarly formatted text values. This aggregation works best with machine
/// generated text like system logs. Only the first 100 analyzed tokens are used to categorize the text.
/// </para>
/// </summary>
public sealed partial class CategorizeTextAggregation
{
	/// <summary>
	/// <para>
	/// The categorization analyzer specifies how the text is analyzed and tokenized before being categorized.
	/// The syntax is very similar to that used to define the analyzer in the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html">Analyze endpoint</a>. This property
	/// cannot be used at the same time as categorization_filters.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("categorization_analyzer")]
	public Elastic.Clients.Elasticsearch.Aggregations.CategorizeTextAnalyzer? CategorizationAnalyzer { get; set; }

	/// <summary>
	/// <para>
	/// This property expects an array of regular expressions. The expressions are used to filter out matching
	/// sequences from the categorization field values. You can use this functionality to fine tune the categorization
	/// by excluding sequences from consideration when categories are defined. For example, you can exclude SQL
	/// statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer.
	/// If you only want to define simple regular expression filters that are applied prior to tokenization, setting
	/// this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering,
	/// use the categorization_analyzer property instead and include the filters as pattern_replace character filters.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("categorization_filters")]
	public ICollection<string>? CategorizationFilters { get; set; }

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("field")]
	public Elastic.Clients.Elasticsearch.Field Field { get; set; }

	/// <summary>
	/// <para>
	/// The maximum number of token positions to match on before attempting to merge categories. Larger
	/// values will use more memory and create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("max_matched_tokens")]
	public int? MaxMatchedTokens { get; set; }

	/// <summary>
	/// <para>
	/// The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.
	/// Smaller values use less memory and create fewer categories. Larger values will use more memory and
	/// create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("max_unique_tokens")]
	public int? MaxUniqueTokens { get; set; }

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned to the results.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("min_doc_count")]
	public int? MinDocCount { get; set; }

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned from the shard before merging.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("shard_min_doc_count")]
	public int? ShardMinDocCount { get; set; }

	/// <summary>
	/// <para>
	/// The number of categorization buckets to return from each shard before merging all the results.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("shard_size")]
	public int? ShardSize { get; set; }

	/// <summary>
	/// <para>
	/// The minimum percentage of tokens that must match for text to be added to the category bucket. Must
	/// be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory
	/// usage and create narrower categories.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("similarity_threshold")]
	public int? SimilarityThreshold { get; set; }

	/// <summary>
	/// <para>
	/// The number of buckets to return.
	/// </para>
	/// </summary>
	[JsonInclude, JsonPropertyName("size")]
	public int? Size { get; set; }

	public static implicit operator Elastic.Clients.Elasticsearch.Aggregations.Aggregation(CategorizeTextAggregation categorizeTextAggregation) => Elastic.Clients.Elasticsearch.Aggregations.Aggregation.CategorizeText(categorizeTextAggregation);
}

/// <summary>
/// <para>
/// A multi-bucket aggregation that groups semi-structured text into buckets. Each text
/// field is re-analyzed using a custom analyzer. The resulting tokens are then categorized
/// creating buckets of similarly formatted text values. This aggregation works best with machine
/// generated text like system logs. Only the first 100 analyzed tokens are used to categorize the text.
/// </para>
/// </summary>
public sealed partial class CategorizeTextAggregationDescriptor<TDocument> : SerializableDescriptor<CategorizeTextAggregationDescriptor<TDocument>>
{
	internal CategorizeTextAggregationDescriptor(Action<CategorizeTextAggregationDescriptor<TDocument>> configure) => configure.Invoke(this);

	public CategorizeTextAggregationDescriptor() : base()
	{
	}

	private Elastic.Clients.Elasticsearch.Aggregations.CategorizeTextAnalyzer? CategorizationAnalyzerValue { get; set; }
	private ICollection<string>? CategorizationFiltersValue { get; set; }
	private Elastic.Clients.Elasticsearch.Field FieldValue { get; set; }
	private int? MaxMatchedTokensValue { get; set; }
	private int? MaxUniqueTokensValue { get; set; }
	private int? MinDocCountValue { get; set; }
	private int? ShardMinDocCountValue { get; set; }
	private int? ShardSizeValue { get; set; }
	private int? SimilarityThresholdValue { get; set; }
	private int? SizeValue { get; set; }

	/// <summary>
	/// <para>
	/// The categorization analyzer specifies how the text is analyzed and tokenized before being categorized.
	/// The syntax is very similar to that used to define the analyzer in the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html">Analyze endpoint</a>. This property
	/// cannot be used at the same time as categorization_filters.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> CategorizationAnalyzer(Elastic.Clients.Elasticsearch.Aggregations.CategorizeTextAnalyzer? categorizationAnalyzer)
	{
		CategorizationAnalyzerValue = categorizationAnalyzer;
		return Self;
	}

	/// <summary>
	/// <para>
	/// This property expects an array of regular expressions. The expressions are used to filter out matching
	/// sequences from the categorization field values. You can use this functionality to fine tune the categorization
	/// by excluding sequences from consideration when categories are defined. For example, you can exclude SQL
	/// statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer.
	/// If you only want to define simple regular expression filters that are applied prior to tokenization, setting
	/// this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering,
	/// use the categorization_analyzer property instead and include the filters as pattern_replace character filters.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> CategorizationFilters(ICollection<string>? categorizationFilters)
	{
		CategorizationFiltersValue = categorizationFilters;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> Field(Elastic.Clients.Elasticsearch.Field field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> Field<TValue>(Expression<Func<TDocument, TValue>> field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> Field(Expression<Func<TDocument, object>> field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The maximum number of token positions to match on before attempting to merge categories. Larger
	/// values will use more memory and create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> MaxMatchedTokens(int? maxMatchedTokens)
	{
		MaxMatchedTokensValue = maxMatchedTokens;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.
	/// Smaller values use less memory and create fewer categories. Larger values will use more memory and
	/// create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> MaxUniqueTokens(int? maxUniqueTokens)
	{
		MaxUniqueTokensValue = maxUniqueTokens;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned to the results.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> MinDocCount(int? minDocCount)
	{
		MinDocCountValue = minDocCount;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned from the shard before merging.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> ShardMinDocCount(int? shardMinDocCount)
	{
		ShardMinDocCountValue = shardMinDocCount;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The number of categorization buckets to return from each shard before merging all the results.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> ShardSize(int? shardSize)
	{
		ShardSizeValue = shardSize;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum percentage of tokens that must match for text to be added to the category bucket. Must
	/// be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory
	/// usage and create narrower categories.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> SimilarityThreshold(int? similarityThreshold)
	{
		SimilarityThresholdValue = similarityThreshold;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The number of buckets to return.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor<TDocument> Size(int? size)
	{
		SizeValue = size;
		return Self;
	}

	protected override void Serialize(Utf8JsonWriter writer, JsonSerializerOptions options, IElasticsearchClientSettings settings)
	{
		writer.WriteStartObject();
		if (CategorizationAnalyzerValue is not null)
		{
			writer.WritePropertyName("categorization_analyzer");
			JsonSerializer.Serialize(writer, CategorizationAnalyzerValue, options);
		}

		if (CategorizationFiltersValue is not null)
		{
			writer.WritePropertyName("categorization_filters");
			JsonSerializer.Serialize(writer, CategorizationFiltersValue, options);
		}

		writer.WritePropertyName("field");
		JsonSerializer.Serialize(writer, FieldValue, options);
		if (MaxMatchedTokensValue.HasValue)
		{
			writer.WritePropertyName("max_matched_tokens");
			writer.WriteNumberValue(MaxMatchedTokensValue.Value);
		}

		if (MaxUniqueTokensValue.HasValue)
		{
			writer.WritePropertyName("max_unique_tokens");
			writer.WriteNumberValue(MaxUniqueTokensValue.Value);
		}

		if (MinDocCountValue.HasValue)
		{
			writer.WritePropertyName("min_doc_count");
			writer.WriteNumberValue(MinDocCountValue.Value);
		}

		if (ShardMinDocCountValue.HasValue)
		{
			writer.WritePropertyName("shard_min_doc_count");
			writer.WriteNumberValue(ShardMinDocCountValue.Value);
		}

		if (ShardSizeValue.HasValue)
		{
			writer.WritePropertyName("shard_size");
			writer.WriteNumberValue(ShardSizeValue.Value);
		}

		if (SimilarityThresholdValue.HasValue)
		{
			writer.WritePropertyName("similarity_threshold");
			writer.WriteNumberValue(SimilarityThresholdValue.Value);
		}

		if (SizeValue.HasValue)
		{
			writer.WritePropertyName("size");
			writer.WriteNumberValue(SizeValue.Value);
		}

		writer.WriteEndObject();
	}
}

/// <summary>
/// <para>
/// A multi-bucket aggregation that groups semi-structured text into buckets. Each text
/// field is re-analyzed using a custom analyzer. The resulting tokens are then categorized
/// creating buckets of similarly formatted text values. This aggregation works best with machine
/// generated text like system logs. Only the first 100 analyzed tokens are used to categorize the text.
/// </para>
/// </summary>
public sealed partial class CategorizeTextAggregationDescriptor : SerializableDescriptor<CategorizeTextAggregationDescriptor>
{
	internal CategorizeTextAggregationDescriptor(Action<CategorizeTextAggregationDescriptor> configure) => configure.Invoke(this);

	public CategorizeTextAggregationDescriptor() : base()
	{
	}

	private Elastic.Clients.Elasticsearch.Aggregations.CategorizeTextAnalyzer? CategorizationAnalyzerValue { get; set; }
	private ICollection<string>? CategorizationFiltersValue { get; set; }
	private Elastic.Clients.Elasticsearch.Field FieldValue { get; set; }
	private int? MaxMatchedTokensValue { get; set; }
	private int? MaxUniqueTokensValue { get; set; }
	private int? MinDocCountValue { get; set; }
	private int? ShardMinDocCountValue { get; set; }
	private int? ShardSizeValue { get; set; }
	private int? SimilarityThresholdValue { get; set; }
	private int? SizeValue { get; set; }

	/// <summary>
	/// <para>
	/// The categorization analyzer specifies how the text is analyzed and tokenized before being categorized.
	/// The syntax is very similar to that used to define the analyzer in the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html">Analyze endpoint</a>. This property
	/// cannot be used at the same time as categorization_filters.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor CategorizationAnalyzer(Elastic.Clients.Elasticsearch.Aggregations.CategorizeTextAnalyzer? categorizationAnalyzer)
	{
		CategorizationAnalyzerValue = categorizationAnalyzer;
		return Self;
	}

	/// <summary>
	/// <para>
	/// This property expects an array of regular expressions. The expressions are used to filter out matching
	/// sequences from the categorization field values. You can use this functionality to fine tune the categorization
	/// by excluding sequences from consideration when categories are defined. For example, you can exclude SQL
	/// statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer.
	/// If you only want to define simple regular expression filters that are applied prior to tokenization, setting
	/// this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering,
	/// use the categorization_analyzer property instead and include the filters as pattern_replace character filters.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor CategorizationFilters(ICollection<string>? categorizationFilters)
	{
		CategorizationFiltersValue = categorizationFilters;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor Field(Elastic.Clients.Elasticsearch.Field field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor Field<TDocument, TValue>(Expression<Func<TDocument, TValue>> field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The semi-structured text field to categorize.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor Field<TDocument>(Expression<Func<TDocument, object>> field)
	{
		FieldValue = field;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The maximum number of token positions to match on before attempting to merge categories. Larger
	/// values will use more memory and create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor MaxMatchedTokens(int? maxMatchedTokens)
	{
		MaxMatchedTokensValue = maxMatchedTokens;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.
	/// Smaller values use less memory and create fewer categories. Larger values will use more memory and
	/// create narrower categories. Max allowed value is 100.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor MaxUniqueTokens(int? maxUniqueTokens)
	{
		MaxUniqueTokensValue = maxUniqueTokens;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned to the results.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor MinDocCount(int? minDocCount)
	{
		MinDocCountValue = minDocCount;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum number of documents in a bucket to be returned from the shard before merging.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor ShardMinDocCount(int? shardMinDocCount)
	{
		ShardMinDocCountValue = shardMinDocCount;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The number of categorization buckets to return from each shard before merging all the results.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor ShardSize(int? shardSize)
	{
		ShardSizeValue = shardSize;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The minimum percentage of tokens that must match for text to be added to the category bucket. Must
	/// be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory
	/// usage and create narrower categories.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor SimilarityThreshold(int? similarityThreshold)
	{
		SimilarityThresholdValue = similarityThreshold;
		return Self;
	}

	/// <summary>
	/// <para>
	/// The number of buckets to return.
	/// </para>
	/// </summary>
	public CategorizeTextAggregationDescriptor Size(int? size)
	{
		SizeValue = size;
		return Self;
	}

	protected override void Serialize(Utf8JsonWriter writer, JsonSerializerOptions options, IElasticsearchClientSettings settings)
	{
		writer.WriteStartObject();
		if (CategorizationAnalyzerValue is not null)
		{
			writer.WritePropertyName("categorization_analyzer");
			JsonSerializer.Serialize(writer, CategorizationAnalyzerValue, options);
		}

		if (CategorizationFiltersValue is not null)
		{
			writer.WritePropertyName("categorization_filters");
			JsonSerializer.Serialize(writer, CategorizationFiltersValue, options);
		}

		writer.WritePropertyName("field");
		JsonSerializer.Serialize(writer, FieldValue, options);
		if (MaxMatchedTokensValue.HasValue)
		{
			writer.WritePropertyName("max_matched_tokens");
			writer.WriteNumberValue(MaxMatchedTokensValue.Value);
		}

		if (MaxUniqueTokensValue.HasValue)
		{
			writer.WritePropertyName("max_unique_tokens");
			writer.WriteNumberValue(MaxUniqueTokensValue.Value);
		}

		if (MinDocCountValue.HasValue)
		{
			writer.WritePropertyName("min_doc_count");
			writer.WriteNumberValue(MinDocCountValue.Value);
		}

		if (ShardMinDocCountValue.HasValue)
		{
			writer.WritePropertyName("shard_min_doc_count");
			writer.WriteNumberValue(ShardMinDocCountValue.Value);
		}

		if (ShardSizeValue.HasValue)
		{
			writer.WritePropertyName("shard_size");
			writer.WriteNumberValue(ShardSizeValue.Value);
		}

		if (SimilarityThresholdValue.HasValue)
		{
			writer.WritePropertyName("similarity_threshold");
			writer.WriteNumberValue(SimilarityThresholdValue.Value);
		}

		if (SizeValue.HasValue)
		{
			writer.WritePropertyName("size");
			writer.WriteNumberValue(SizeValue.Value);
		}

		writer.WriteEndObject();
	}
}