id
stringlengths
20
153
type
stringclasses
1 value
granularity
stringclasses
14 values
content
stringlengths
16
84.3k
metadata
dict
connector-service_snippet_-8421620542795988629_75_15
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs // Return JSON as bytes Ok(Bytes::from(json_string.into_bytes())) } /// Flattens a nested JSON structure, extracting values from "$text" fields pub fn flatten_json_structure(json_value: Value) -> Value { let mut flattened = Map::new(); // Extract txn object if present let txn_obj = if let Some(obj) = json_value.as_object() { if let Some(txn) = obj.get("txn") { txn.as_object() } else { Some(obj)
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8421620542795988629_75_30
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs // Return JSON as bytes Ok(Bytes::from(json_string.into_bytes())) } /// Flattens a nested JSON structure, extracting values from "$text" fields pub fn flatten_json_structure(json_value: Value) -> Value { let mut flattened = Map::new(); // Extract txn object if present let txn_obj = if let Some(obj) = json_value.as_object() { if let Some(txn) = obj.get("txn") { txn.as_object() } else { Some(obj) } } else { None }; // Process the fields if let Some(obj) = txn_obj { for (key, value) in obj { // Handle nested "$text" fields if let Some(value_obj) = value.as_object() { if let Some(text_value) = value_obj.get("$text") { // Extract the value from "$text" field flattened.insert(key.clone(), text_value.clone()); } else if value_obj.is_empty() { // Empty object, insert empty string
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8421620542795988629_75_50
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs // Return JSON as bytes Ok(Bytes::from(json_string.into_bytes())) } /// Flattens a nested JSON structure, extracting values from "$text" fields pub fn flatten_json_structure(json_value: Value) -> Value { let mut flattened = Map::new(); // Extract txn object if present let txn_obj = if let Some(obj) = json_value.as_object() { if let Some(txn) = obj.get("txn") { txn.as_object() } else { Some(obj) } } else { None }; // Process the fields if let Some(obj) = txn_obj { for (key, value) in obj { // Handle nested "$text" fields if let Some(value_obj) = value.as_object() { if let Some(text_value) = value_obj.get("$text") { // Extract the value from "$text" field flattened.insert(key.clone(), text_value.clone()); } else if value_obj.is_empty() { // Empty object, insert empty string flattened.insert(key.clone(), Value::String("".to_string())); } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } } Value::Object(flattened) }
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 44, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8421620542795988629_100_15
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs if let Some(text_value) = value_obj.get("$text") { // Extract the value from "$text" field flattened.insert(key.clone(), text_value.clone()); } else if value_obj.is_empty() { // Empty object, insert empty string flattened.insert(key.clone(), Value::String("".to_string())); } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } }
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8421620542795988629_100_30
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs if let Some(text_value) = value_obj.get("$text") { // Extract the value from "$text" field flattened.insert(key.clone(), text_value.clone()); } else if value_obj.is_empty() { // Empty object, insert empty string flattened.insert(key.clone(), Value::String("".to_string())); } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } } Value::Object(flattened) }
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 19, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8421620542795988629_100_50
clm
snippet
// connector-service/backend/connector-integration/src/utils/xml_utils.rs if let Some(text_value) = value_obj.get("$text") { // Extract the value from "$text" field flattened.insert(key.clone(), text_value.clone()); } else if value_obj.is_empty() { // Empty object, insert empty string flattened.insert(key.clone(), Value::String("".to_string())); } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } else { // Use the value as is flattened.insert(key.clone(), value.clone()); } } } Value::Object(flattened) }
{ "chunk": null, "crate": "connector-integration", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 19, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_0_15
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! A Kafka tracing layer that integrates with the tracing ecosystem. //! //! This crate provides a simple way to send tracing logs to Kafka while maintaining //! consistent JSON formatting through the log_utils infrastructure. //! //! # Examples //! ```no_run //! use tracing_kafka::KafkaLayer; //! use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; //! //! let kafka_layer = KafkaLayer::builder() //! .brokers(&["localhost:9092"]) //! .topic("application-logs") //! .build() //! .expect("Failed to create Kafka layer");
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_0_30
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! A Kafka tracing layer that integrates with the tracing ecosystem. //! //! This crate provides a simple way to send tracing logs to Kafka while maintaining //! consistent JSON formatting through the log_utils infrastructure. //! //! # Examples //! ```no_run //! use tracing_kafka::KafkaLayer; //! use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; //! //! let kafka_layer = KafkaLayer::builder() //! .brokers(&["localhost:9092"]) //! .topic("application-logs") //! .build() //! .expect("Failed to create Kafka layer"); //! //! tracing_subscriber::registry() //! .with(kafka_layer) //! .init(); //! ``` //! //! # Publishing Custom Events //! //! In addition to logging, the `KafkaWriter` can be used to publish custom events to Kafka. //! The `publish_event` method allows you to send a payload to a specific topic with an optional key and headers. //! //! ```no_run //! use tracing_kafka::KafkaWriter; //! use rdkafka::message::OwnedHeaders; //!
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_0_50
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! A Kafka tracing layer that integrates with the tracing ecosystem. //! //! This crate provides a simple way to send tracing logs to Kafka while maintaining //! consistent JSON formatting through the log_utils infrastructure. //! //! # Examples //! ```no_run //! use tracing_kafka::KafkaLayer; //! use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; //! //! let kafka_layer = KafkaLayer::builder() //! .brokers(&["localhost:9092"]) //! .topic("application-logs") //! .build() //! .expect("Failed to create Kafka layer"); //! //! tracing_subscriber::registry() //! .with(kafka_layer) //! .init(); //! ``` //! //! # Publishing Custom Events //! //! In addition to logging, the `KafkaWriter` can be used to publish custom events to Kafka. //! The `publish_event` method allows you to send a payload to a specific topic with an optional key and headers. //! //! ```no_run //! use tracing_kafka::KafkaWriter; //! use rdkafka::message::OwnedHeaders; //! //! let writer = KafkaWriter::new( //! vec!["localhost:9092".to_string()], //! "default-topic".to_string(), //! None, None, None, None, None, None //! ).expect("Failed to create KafkaWriter"); //! //! let headers = OwnedHeaders::new().add("my-header", "my-value"); //! //! let result = writer.publish_event( //! "custom-events", //! Some("event-key"), //! b"event-payload", //! Some(headers), //! ); //! //! if let Err(e) = result { //! eprintln!("Failed to publish event: {}", e); //! } //! ```
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_25_15
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! //! ```no_run //! use tracing_kafka::KafkaWriter; //! use rdkafka::message::OwnedHeaders; //! //! let writer = KafkaWriter::new( //! vec!["localhost:9092".to_string()], //! "default-topic".to_string(), //! None, None, None, None, None, None //! ).expect("Failed to create KafkaWriter"); //! //! let headers = OwnedHeaders::new().add("my-header", "my-value"); //! //! let result = writer.publish_event( //! "custom-events",
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_25_30
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! //! ```no_run //! use tracing_kafka::KafkaWriter; //! use rdkafka::message::OwnedHeaders; //! //! let writer = KafkaWriter::new( //! vec!["localhost:9092".to_string()], //! "default-topic".to_string(), //! None, None, None, None, None, None //! ).expect("Failed to create KafkaWriter"); //! //! let headers = OwnedHeaders::new().add("my-header", "my-value"); //! //! let result = writer.publish_event( //! "custom-events", //! Some("event-key"), //! b"event-payload", //! Some(headers), //! ); //! //! if let Err(e) = result { //! eprintln!("Failed to publish event: {}", e); //! } //! ``` pub mod builder; mod layer; mod writer; pub use layer::{KafkaLayer, KafkaLayerError};
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_25_50
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs //! //! ```no_run //! use tracing_kafka::KafkaWriter; //! use rdkafka::message::OwnedHeaders; //! //! let writer = KafkaWriter::new( //! vec!["localhost:9092".to_string()], //! "default-topic".to_string(), //! None, None, None, None, None, None //! ).expect("Failed to create KafkaWriter"); //! //! let headers = OwnedHeaders::new().add("my-header", "my-value"); //! //! let result = writer.publish_event( //! "custom-events", //! Some("event-key"), //! b"event-payload", //! Some(headers), //! ); //! //! if let Err(e) = result { //! eprintln!("Failed to publish event: {}", e); //! } //! ``` pub mod builder; mod layer; mod writer; pub use layer::{KafkaLayer, KafkaLayerError}; pub use writer::{KafkaWriter, KafkaWriterError}; #[cfg(feature = "kafka-metrics")] mod metrics; /// Initializes the metrics for the tracing kafka. /// This function should be called once at application startup. #[cfg(feature = "kafka-metrics")] pub fn init() { metrics::initialize_all_metrics(); } #[cfg(not(feature = "kafka-metrics"))] pub fn init() { tracing::warn!("Kafka metrics feature is not enabled. Metrics will not be collected."); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 46, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_50_15
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs pub mod builder; mod layer; mod writer; pub use layer::{KafkaLayer, KafkaLayerError}; pub use writer::{KafkaWriter, KafkaWriterError}; #[cfg(feature = "kafka-metrics")] mod metrics; /// Initializes the metrics for the tracing kafka. /// This function should be called once at application startup. #[cfg(feature = "kafka-metrics")] pub fn init() { metrics::initialize_all_metrics();
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_50_30
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs pub mod builder; mod layer; mod writer; pub use layer::{KafkaLayer, KafkaLayerError}; pub use writer::{KafkaWriter, KafkaWriterError}; #[cfg(feature = "kafka-metrics")] mod metrics; /// Initializes the metrics for the tracing kafka. /// This function should be called once at application startup. #[cfg(feature = "kafka-metrics")] pub fn init() { metrics::initialize_all_metrics(); } #[cfg(not(feature = "kafka-metrics"))] pub fn init() { tracing::warn!("Kafka metrics feature is not enabled. Metrics will not be collected."); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 21, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_5501410502799828961_50_50
clm
snippet
// connector-service/backend/tracing-kafka/src/lib.rs pub mod builder; mod layer; mod writer; pub use layer::{KafkaLayer, KafkaLayerError}; pub use writer::{KafkaWriter, KafkaWriterError}; #[cfg(feature = "kafka-metrics")] mod metrics; /// Initializes the metrics for the tracing kafka. /// This function should be called once at application startup. #[cfg(feature = "kafka-metrics")] pub fn init() { metrics::initialize_all_metrics(); } #[cfg(not(feature = "kafka-metrics"))] pub fn init() { tracing::warn!("Kafka metrics feature is not enabled. Metrics will not be collected."); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 21, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_0_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs //! Prometheus metrics for Kafka writer use std::sync::LazyLock; use prometheus::{register_int_counter, register_int_gauge, IntCounter, IntGauge}; /// Total number of logs successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_LOGS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_logs_sent_total", "Total number of logs successfully sent to Kafka" ) .expect("Failed to register kafka_logs_sent_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_0_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs //! Prometheus metrics for Kafka writer use std::sync::LazyLock; use prometheus::{register_int_counter, register_int_gauge, IntCounter, IntGauge}; /// Total number of logs successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_LOGS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_logs_sent_total", "Total number of logs successfully sent to Kafka" ) .expect("Failed to register kafka_logs_sent_total metric") }); /// Total number of logs dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_LOGS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_logs_dropped_total", "Total number of logs dropped due to Kafka queue full or errors" ) .expect("Failed to register kafka_logs_dropped_total metric") }); /// Current size of Kafka producer queue #[allow(clippy::expect_used)] pub static KAFKA_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_0_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs //! Prometheus metrics for Kafka writer use std::sync::LazyLock; use prometheus::{register_int_counter, register_int_gauge, IntCounter, IntGauge}; /// Total number of logs successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_LOGS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_logs_sent_total", "Total number of logs successfully sent to Kafka" ) .expect("Failed to register kafka_logs_sent_total metric") }); /// Total number of logs dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_LOGS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_logs_dropped_total", "Total number of logs dropped due to Kafka queue full or errors" ) .expect("Failed to register kafka_logs_dropped_total metric") }); /// Current size of Kafka producer queue #[allow(clippy::expect_used)] pub static KAFKA_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_producer_queue_size", "Current size of Kafka producer queue" ) .expect("Failed to register kafka_producer_queue_size metric") }); /// Logs dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_queue_full_total", "Total number of logs dropped due to Kafka queue being full" ) .expect("Failed to register kafka_drops_queue_full_total metric") }); /// Logs dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_25_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Current size of Kafka producer queue #[allow(clippy::expect_used)] pub static KAFKA_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_producer_queue_size", "Current size of Kafka producer queue" ) .expect("Failed to register kafka_producer_queue_size metric") }); /// Logs dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_25_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Current size of Kafka producer queue #[allow(clippy::expect_used)] pub static KAFKA_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_producer_queue_size", "Current size of Kafka producer queue" ) .expect("Failed to register kafka_producer_queue_size metric") }); /// Logs dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_queue_full_total", "Total number of logs dropped due to Kafka queue being full" ) .expect("Failed to register kafka_drops_queue_full_total metric") }); /// Logs dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_msg_too_large_total", "Total number of logs dropped due to message size exceeding limit" ) .expect("Failed to register kafka_drops_msg_too_large_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_25_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Current size of Kafka producer queue #[allow(clippy::expect_used)] pub static KAFKA_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_producer_queue_size", "Current size of Kafka producer queue" ) .expect("Failed to register kafka_producer_queue_size metric") }); /// Logs dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_queue_full_total", "Total number of logs dropped due to Kafka queue being full" ) .expect("Failed to register kafka_drops_queue_full_total metric") }); /// Logs dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_msg_too_large_total", "Total number of logs dropped due to message size exceeding limit" ) .expect("Failed to register kafka_drops_msg_too_large_total metric") }); /// Logs dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_timeout_total", "Total number of logs dropped due to timeout" ) .expect("Failed to register kafka_drops_timeout_total metric") }); /// Logs dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_other_total", "Total number of logs dropped due to other errors" ) .expect("Failed to register kafka_drops_other_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_50_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_drops_msg_too_large_total", "Total number of logs dropped due to message size exceeding limit" ) .expect("Failed to register kafka_drops_msg_too_large_total metric") }); /// Logs dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_timeout_total", "Total number of logs dropped due to timeout" ) .expect("Failed to register kafka_drops_timeout_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_50_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_drops_msg_too_large_total", "Total number of logs dropped due to message size exceeding limit" ) .expect("Failed to register kafka_drops_msg_too_large_total metric") }); /// Logs dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_timeout_total", "Total number of logs dropped due to timeout" ) .expect("Failed to register kafka_drops_timeout_total metric") }); /// Logs dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_other_total", "Total number of logs dropped due to other errors" ) .expect("Failed to register kafka_drops_other_total metric") }); /// Total number of audit events successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_50_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_drops_msg_too_large_total", "Total number of logs dropped due to message size exceeding limit" ) .expect("Failed to register kafka_drops_msg_too_large_total metric") }); /// Logs dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_timeout_total", "Total number of logs dropped due to timeout" ) .expect("Failed to register kafka_drops_timeout_total metric") }); /// Logs dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_drops_other_total", "Total number of logs dropped due to other errors" ) .expect("Failed to register kafka_drops_other_total metric") }); /// Total number of audit events successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_sent_total", "Total number of audit events successfully sent to Kafka" ) .expect("Failed to register kafka_audit_events_sent_total metric") }); /// Total number of audit events dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_dropped_total", "Total number of audit events dropped due to Kafka queue full or errors" ) .expect("Failed to register kafka_audit_events_dropped_total metric") }); /// Current size of Kafka audit event producer queue #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENT_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_75_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Total number of audit events successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_sent_total", "Total number of audit events successfully sent to Kafka" ) .expect("Failed to register kafka_audit_events_sent_total metric") }); /// Total number of audit events dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_75_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Total number of audit events successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_sent_total", "Total number of audit events successfully sent to Kafka" ) .expect("Failed to register kafka_audit_events_sent_total metric") }); /// Total number of audit events dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_dropped_total", "Total number of audit events dropped due to Kafka queue full or errors" ) .expect("Failed to register kafka_audit_events_dropped_total metric") }); /// Current size of Kafka audit event producer queue #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENT_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_audit_event_queue_size", "Current size of Kafka audit event producer queue" ) .expect("Failed to register kafka_audit_event_queue_size metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_75_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Total number of audit events successfully sent to Kafka #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_SENT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_sent_total", "Total number of audit events successfully sent to Kafka" ) .expect("Failed to register kafka_audit_events_sent_total metric") }); /// Total number of audit events dropped due to Kafka queue full or errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENTS_DROPPED: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_events_dropped_total", "Total number of audit events dropped due to Kafka queue full or errors" ) .expect("Failed to register kafka_audit_events_dropped_total metric") }); /// Current size of Kafka audit event producer queue #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_EVENT_QUEUE_SIZE: LazyLock<IntGauge> = LazyLock::new(|| { register_int_gauge!( "kafka_audit_event_queue_size", "Current size of Kafka audit event producer queue" ) .expect("Failed to register kafka_audit_event_queue_size metric") }); /// Audit events dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_queue_full_total", "Total number of audit events dropped due to Kafka queue being full" ) .expect("Failed to register kafka_audit_drops_queue_full_total metric") }); /// Audit events dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_msg_too_large_total", "Total number of audit events dropped due to message size exceeding limit" ) .expect("Failed to register kafka_audit_drops_msg_too_large_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_100_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_audit_event_queue_size", "Current size of Kafka audit event producer queue" ) .expect("Failed to register kafka_audit_event_queue_size metric") }); /// Audit events dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_queue_full_total", "Total number of audit events dropped due to Kafka queue being full" ) .expect("Failed to register kafka_audit_drops_queue_full_total metric") });
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_100_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_audit_event_queue_size", "Current size of Kafka audit event producer queue" ) .expect("Failed to register kafka_audit_event_queue_size metric") }); /// Audit events dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_queue_full_total", "Total number of audit events dropped due to Kafka queue being full" ) .expect("Failed to register kafka_audit_drops_queue_full_total metric") }); /// Audit events dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_msg_too_large_total", "Total number of audit events dropped due to message size exceeding limit" ) .expect("Failed to register kafka_audit_drops_msg_too_large_total metric") }); /// Audit events dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_100_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs "kafka_audit_event_queue_size", "Current size of Kafka audit event producer queue" ) .expect("Failed to register kafka_audit_event_queue_size metric") }); /// Audit events dropped due to queue full #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_QUEUE_FULL: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_queue_full_total", "Total number of audit events dropped due to Kafka queue being full" ) .expect("Failed to register kafka_audit_drops_queue_full_total metric") }); /// Audit events dropped due to message too large #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_MSG_TOO_LARGE: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_msg_too_large_total", "Total number of audit events dropped due to message size exceeding limit" ) .expect("Failed to register kafka_audit_drops_msg_too_large_total metric") }); /// Audit events dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_timeout_total", "Total number of audit events dropped due to timeout" ) .expect("Failed to register kafka_audit_drops_timeout_total metric") }); /// Audit events dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_other_total", "Total number of audit events dropped due to other errors" ) .expect("Failed to register kafka_audit_drops_other_total metric") }); /// Forces the initialization of all metrics in this module. /// /// This function should be called once at application startup to ensure that all metrics /// are registered upfront. If any metric registration fails (e.g., due to a duplicate
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_125_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Audit events dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_timeout_total", "Total number of audit events dropped due to timeout" ) .expect("Failed to register kafka_audit_drops_timeout_total metric") }); /// Audit events dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!(
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_125_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Audit events dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_timeout_total", "Total number of audit events dropped due to timeout" ) .expect("Failed to register kafka_audit_drops_timeout_total metric") }); /// Audit events dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_other_total", "Total number of audit events dropped due to other errors" ) .expect("Failed to register kafka_audit_drops_other_total metric") }); /// Forces the initialization of all metrics in this module. /// /// This function should be called once at application startup to ensure that all metrics /// are registered upfront. If any metric registration fails (e.g., due to a duplicate /// metric name), the application will panic immediately. #[cfg(feature = "kafka-metrics")] pub fn initialize_all_metrics() { // Force evaluation of all lazy metrics to fail fast if registration fails. let _ = &*KAFKA_LOGS_SENT;
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_125_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// Audit events dropped due to timeout #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_TIMEOUT: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_timeout_total", "Total number of audit events dropped due to timeout" ) .expect("Failed to register kafka_audit_drops_timeout_total metric") }); /// Audit events dropped due to other errors #[allow(clippy::expect_used)] pub static KAFKA_AUDIT_DROPS_OTHER: LazyLock<IntCounter> = LazyLock::new(|| { register_int_counter!( "kafka_audit_drops_other_total", "Total number of audit events dropped due to other errors" ) .expect("Failed to register kafka_audit_drops_other_total metric") }); /// Forces the initialization of all metrics in this module. /// /// This function should be called once at application startup to ensure that all metrics /// are registered upfront. If any metric registration fails (e.g., due to a duplicate /// metric name), the application will panic immediately. #[cfg(feature = "kafka-metrics")] pub fn initialize_all_metrics() { // Force evaluation of all lazy metrics to fail fast if registration fails. let _ = &*KAFKA_LOGS_SENT; let _ = &*KAFKA_LOGS_DROPPED; let _ = &*KAFKA_QUEUE_SIZE; let _ = &*KAFKA_DROPS_QUEUE_FULL; let _ = &*KAFKA_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_DROPS_TIMEOUT; let _ = &*KAFKA_DROPS_OTHER; let _ = &*KAFKA_AUDIT_EVENTS_SENT; let _ = &*KAFKA_AUDIT_EVENTS_DROPPED; let _ = &*KAFKA_AUDIT_EVENT_QUEUE_SIZE; let _ = &*KAFKA_AUDIT_DROPS_QUEUE_FULL; let _ = &*KAFKA_AUDIT_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_AUDIT_DROPS_TIMEOUT; let _ = &*KAFKA_AUDIT_DROPS_OTHER; }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 44, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_150_15
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// metric name), the application will panic immediately. #[cfg(feature = "kafka-metrics")] pub fn initialize_all_metrics() { // Force evaluation of all lazy metrics to fail fast if registration fails. let _ = &*KAFKA_LOGS_SENT; let _ = &*KAFKA_LOGS_DROPPED; let _ = &*KAFKA_QUEUE_SIZE; let _ = &*KAFKA_DROPS_QUEUE_FULL; let _ = &*KAFKA_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_DROPS_TIMEOUT; let _ = &*KAFKA_DROPS_OTHER; let _ = &*KAFKA_AUDIT_EVENTS_SENT; let _ = &*KAFKA_AUDIT_EVENTS_DROPPED; let _ = &*KAFKA_AUDIT_EVENT_QUEUE_SIZE; let _ = &*KAFKA_AUDIT_DROPS_QUEUE_FULL;
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_150_30
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// metric name), the application will panic immediately. #[cfg(feature = "kafka-metrics")] pub fn initialize_all_metrics() { // Force evaluation of all lazy metrics to fail fast if registration fails. let _ = &*KAFKA_LOGS_SENT; let _ = &*KAFKA_LOGS_DROPPED; let _ = &*KAFKA_QUEUE_SIZE; let _ = &*KAFKA_DROPS_QUEUE_FULL; let _ = &*KAFKA_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_DROPS_TIMEOUT; let _ = &*KAFKA_DROPS_OTHER; let _ = &*KAFKA_AUDIT_EVENTS_SENT; let _ = &*KAFKA_AUDIT_EVENTS_DROPPED; let _ = &*KAFKA_AUDIT_EVENT_QUEUE_SIZE; let _ = &*KAFKA_AUDIT_DROPS_QUEUE_FULL; let _ = &*KAFKA_AUDIT_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_AUDIT_DROPS_TIMEOUT; let _ = &*KAFKA_AUDIT_DROPS_OTHER; }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 19, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_863436229918585844_150_50
clm
snippet
// connector-service/backend/tracing-kafka/src/metrics.rs /// metric name), the application will panic immediately. #[cfg(feature = "kafka-metrics")] pub fn initialize_all_metrics() { // Force evaluation of all lazy metrics to fail fast if registration fails. let _ = &*KAFKA_LOGS_SENT; let _ = &*KAFKA_LOGS_DROPPED; let _ = &*KAFKA_QUEUE_SIZE; let _ = &*KAFKA_DROPS_QUEUE_FULL; let _ = &*KAFKA_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_DROPS_TIMEOUT; let _ = &*KAFKA_DROPS_OTHER; let _ = &*KAFKA_AUDIT_EVENTS_SENT; let _ = &*KAFKA_AUDIT_EVENTS_DROPPED; let _ = &*KAFKA_AUDIT_EVENT_QUEUE_SIZE; let _ = &*KAFKA_AUDIT_DROPS_QUEUE_FULL; let _ = &*KAFKA_AUDIT_DROPS_MSG_TOO_LARGE; let _ = &*KAFKA_AUDIT_DROPS_TIMEOUT; let _ = &*KAFKA_AUDIT_DROPS_OTHER; }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 19, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_0_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs //! Kafka writer implementation for sending formatted log messages to Kafka. use std::{ io::{self, Write}, sync::Arc, time::Duration, }; use rdkafka::{ config::ClientConfig, error::{KafkaError, RDKafkaErrorCode}, message::OwnedHeaders, producer::{BaseRecord, DeliveryResult, Producer, ProducerContext, ThreadedProducer}, ClientContext, };
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_0_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs //! Kafka writer implementation for sending formatted log messages to Kafka. use std::{ io::{self, Write}, sync::Arc, time::Duration, }; use rdkafka::{ config::ClientConfig, error::{KafkaError, RDKafkaErrorCode}, message::OwnedHeaders, producer::{BaseRecord, DeliveryResult, Producer, ProducerContext, ThreadedProducer}, ClientContext, }; #[cfg(feature = "kafka-metrics")] use super::metrics::{ KAFKA_AUDIT_DROPS_MSG_TOO_LARGE, KAFKA_AUDIT_DROPS_OTHER, KAFKA_AUDIT_DROPS_QUEUE_FULL, KAFKA_AUDIT_DROPS_TIMEOUT, KAFKA_AUDIT_EVENTS_DROPPED, KAFKA_AUDIT_EVENTS_SENT, KAFKA_AUDIT_EVENT_QUEUE_SIZE, KAFKA_DROPS_MSG_TOO_LARGE, KAFKA_DROPS_OTHER, KAFKA_DROPS_QUEUE_FULL, KAFKA_DROPS_TIMEOUT, KAFKA_LOGS_DROPPED, KAFKA_LOGS_SENT, KAFKA_QUEUE_SIZE, }; /// A `ProducerContext` that handles delivery callbacks to increment metrics. #[derive(Clone)] struct MetricsProducerContext; impl ClientContext for MetricsProducerContext {}
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_0_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs //! Kafka writer implementation for sending formatted log messages to Kafka. use std::{ io::{self, Write}, sync::Arc, time::Duration, }; use rdkafka::{ config::ClientConfig, error::{KafkaError, RDKafkaErrorCode}, message::OwnedHeaders, producer::{BaseRecord, DeliveryResult, Producer, ProducerContext, ThreadedProducer}, ClientContext, }; #[cfg(feature = "kafka-metrics")] use super::metrics::{ KAFKA_AUDIT_DROPS_MSG_TOO_LARGE, KAFKA_AUDIT_DROPS_OTHER, KAFKA_AUDIT_DROPS_QUEUE_FULL, KAFKA_AUDIT_DROPS_TIMEOUT, KAFKA_AUDIT_EVENTS_DROPPED, KAFKA_AUDIT_EVENTS_SENT, KAFKA_AUDIT_EVENT_QUEUE_SIZE, KAFKA_DROPS_MSG_TOO_LARGE, KAFKA_DROPS_OTHER, KAFKA_DROPS_QUEUE_FULL, KAFKA_DROPS_TIMEOUT, KAFKA_LOGS_DROPPED, KAFKA_LOGS_SENT, KAFKA_QUEUE_SIZE, }; /// A `ProducerContext` that handles delivery callbacks to increment metrics. #[derive(Clone)] struct MetricsProducerContext; impl ClientContext for MetricsProducerContext {} impl ProducerContext for MetricsProducerContext { type DeliveryOpaque = Box<KafkaMessageType>; fn delivery(&self, delivery_result: &DeliveryResult<'_>, opaque: Self::DeliveryOpaque) { let message_type = *opaque; let is_success = delivery_result.is_ok(); #[cfg(feature = "kafka-metrics")] { match (message_type, is_success) { (KafkaMessageType::Event, true) => KAFKA_AUDIT_EVENTS_SENT.inc(), (KafkaMessageType::Event, false) => KAFKA_AUDIT_EVENTS_DROPPED.inc(), (KafkaMessageType::Log, true) => KAFKA_LOGS_SENT.inc(), (KafkaMessageType::Log, false) => KAFKA_LOGS_DROPPED.inc(), } } if let Err((kafka_error, _)) = delivery_result { #[cfg(feature = "kafka-metrics")]
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_25_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// A `ProducerContext` that handles delivery callbacks to increment metrics. #[derive(Clone)] struct MetricsProducerContext; impl ClientContext for MetricsProducerContext {} impl ProducerContext for MetricsProducerContext { type DeliveryOpaque = Box<KafkaMessageType>; fn delivery(&self, delivery_result: &DeliveryResult<'_>, opaque: Self::DeliveryOpaque) { let message_type = *opaque; let is_success = delivery_result.is_ok(); #[cfg(feature = "kafka-metrics")] {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_25_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// A `ProducerContext` that handles delivery callbacks to increment metrics. #[derive(Clone)] struct MetricsProducerContext; impl ClientContext for MetricsProducerContext {} impl ProducerContext for MetricsProducerContext { type DeliveryOpaque = Box<KafkaMessageType>; fn delivery(&self, delivery_result: &DeliveryResult<'_>, opaque: Self::DeliveryOpaque) { let message_type = *opaque; let is_success = delivery_result.is_ok(); #[cfg(feature = "kafka-metrics")] { match (message_type, is_success) { (KafkaMessageType::Event, true) => KAFKA_AUDIT_EVENTS_SENT.inc(), (KafkaMessageType::Event, false) => KAFKA_AUDIT_EVENTS_DROPPED.inc(), (KafkaMessageType::Log, true) => KAFKA_LOGS_SENT.inc(), (KafkaMessageType::Log, false) => KAFKA_LOGS_DROPPED.inc(), } } if let Err((kafka_error, _)) = delivery_result { #[cfg(feature = "kafka-metrics")] match (message_type, &kafka_error) { ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_25_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// A `ProducerContext` that handles delivery callbacks to increment metrics. #[derive(Clone)] struct MetricsProducerContext; impl ClientContext for MetricsProducerContext {} impl ProducerContext for MetricsProducerContext { type DeliveryOpaque = Box<KafkaMessageType>; fn delivery(&self, delivery_result: &DeliveryResult<'_>, opaque: Self::DeliveryOpaque) { let message_type = *opaque; let is_success = delivery_result.is_ok(); #[cfg(feature = "kafka-metrics")] { match (message_type, is_success) { (KafkaMessageType::Event, true) => KAFKA_AUDIT_EVENTS_SENT.inc(), (KafkaMessageType::Event, false) => KAFKA_AUDIT_EVENTS_DROPPED.inc(), (KafkaMessageType::Log, true) => KAFKA_LOGS_SENT.inc(), (KafkaMessageType::Log, false) => KAFKA_LOGS_DROPPED.inc(), } } if let Err((kafka_error, _)) = delivery_result { #[cfg(feature = "kafka-metrics")] match (message_type, &kafka_error) { ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_AUDIT_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_AUDIT_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Event, _) => { KAFKA_AUDIT_DROPS_OTHER.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull),
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_50_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs match (message_type, &kafka_error) { ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_AUDIT_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Event,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_50_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs match (message_type, &kafka_error) { ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_AUDIT_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_AUDIT_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Event, _) => { KAFKA_AUDIT_DROPS_OTHER.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Log,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_50_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs match (message_type, &kafka_error) { ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_AUDIT_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Event, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_AUDIT_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Event, _) => { KAFKA_AUDIT_DROPS_OTHER.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), ) => { KAFKA_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Log, _) => { KAFKA_DROPS_OTHER.inc(); } } } } } /// This enum helps the callback distinguish between logs and events. #[derive(Clone, Copy, Debug)]
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_75_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs ) => { KAFKA_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_DROPS_TIMEOUT.inc(); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_75_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs ) => { KAFKA_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Log, _) => { KAFKA_DROPS_OTHER.inc(); } } } } } /// This enum helps the callback distinguish between logs and events. #[derive(Clone, Copy, Debug)] enum KafkaMessageType { Event, Log, }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_75_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs ) => { KAFKA_DROPS_QUEUE_FULL.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageSizeTooLarge), ) => { KAFKA_DROPS_MSG_TOO_LARGE.inc(); } ( KafkaMessageType::Log, KafkaError::MessageProduction(RDKafkaErrorCode::MessageTimedOut), ) => { KAFKA_DROPS_TIMEOUT.inc(); } (KafkaMessageType::Log, _) => { KAFKA_DROPS_OTHER.inc(); } } } } } /// This enum helps the callback distinguish between logs and events. #[derive(Clone, Copy, Debug)] enum KafkaMessageType { Event, Log, } /// Kafka writer that implements std::io::Write for seamless integration with tracing #[derive(Clone)] pub struct KafkaWriter { producer: Arc<ThreadedProducer<MetricsProducerContext>>, topic: String, } impl std::fmt::Debug for KafkaWriter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("KafkaWriter") .field("topic", &self.topic) .finish() } } impl KafkaWriter { /// Creates a new KafkaWriter with the specified brokers and topic. #[allow(clippy::too_many_arguments)] pub fn new( brokers: Vec<String>,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_100_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs enum KafkaMessageType { Event, Log, } /// Kafka writer that implements std::io::Write for seamless integration with tracing #[derive(Clone)] pub struct KafkaWriter { producer: Arc<ThreadedProducer<MetricsProducerContext>>, topic: String, } impl std::fmt::Debug for KafkaWriter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("KafkaWriter")
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_100_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs enum KafkaMessageType { Event, Log, } /// Kafka writer that implements std::io::Write for seamless integration with tracing #[derive(Clone)] pub struct KafkaWriter { producer: Arc<ThreadedProducer<MetricsProducerContext>>, topic: String, } impl std::fmt::Debug for KafkaWriter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("KafkaWriter") .field("topic", &self.topic) .finish() } } impl KafkaWriter { /// Creates a new KafkaWriter with the specified brokers and topic. #[allow(clippy::too_many_arguments)] pub fn new( brokers: Vec<String>, topic: String, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_100_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs enum KafkaMessageType { Event, Log, } /// Kafka writer that implements std::io::Write for seamless integration with tracing #[derive(Clone)] pub struct KafkaWriter { producer: Arc<ThreadedProducer<MetricsProducerContext>>, topic: String, } impl std::fmt::Debug for KafkaWriter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("KafkaWriter") .field("topic", &self.topic) .finish() } } impl KafkaWriter { /// Creates a new KafkaWriter with the specified brokers and topic. #[allow(clippy::too_many_arguments)] pub fn new( brokers: Vec<String>, topic: String, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, ) -> Result<Self, KafkaWriterError> { let mut config = ClientConfig::new(); config.set("bootstrap.servers", brokers.join(",")); if let Some(min_backoff) = reconnect_backoff_min_ms { config.set("reconnect.backoff.ms", min_backoff.to_string()); } if let Some(max_backoff) = reconnect_backoff_max_ms { config.set("reconnect.backoff.max.ms", max_backoff.to_string()); } if let Some(max_messages) = queue_buffering_max_messages { config.set("queue.buffering.max.messages", max_messages.to_string()); } if let Some(max_kbytes) = queue_buffering_max_kbytes { config.set("queue.buffering.max.kbytes", max_kbytes.to_string()); } if let Some(size) = batch_size { config.set("batch.size", size.to_string());
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_125_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs topic: String, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, ) -> Result<Self, KafkaWriterError> { let mut config = ClientConfig::new(); config.set("bootstrap.servers", brokers.join(",")); if let Some(min_backoff) = reconnect_backoff_min_ms { config.set("reconnect.backoff.ms", min_backoff.to_string()); } if let Some(max_backoff) = reconnect_backoff_max_ms {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_125_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs topic: String, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, ) -> Result<Self, KafkaWriterError> { let mut config = ClientConfig::new(); config.set("bootstrap.servers", brokers.join(",")); if let Some(min_backoff) = reconnect_backoff_min_ms { config.set("reconnect.backoff.ms", min_backoff.to_string()); } if let Some(max_backoff) = reconnect_backoff_max_ms { config.set("reconnect.backoff.max.ms", max_backoff.to_string()); } if let Some(max_messages) = queue_buffering_max_messages { config.set("queue.buffering.max.messages", max_messages.to_string()); } if let Some(max_kbytes) = queue_buffering_max_kbytes { config.set("queue.buffering.max.kbytes", max_kbytes.to_string()); } if let Some(size) = batch_size { config.set("batch.size", size.to_string()); } if let Some(ms) = linger_ms { config.set("linger.ms", ms.to_string()); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_125_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs topic: String, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, ) -> Result<Self, KafkaWriterError> { let mut config = ClientConfig::new(); config.set("bootstrap.servers", brokers.join(",")); if let Some(min_backoff) = reconnect_backoff_min_ms { config.set("reconnect.backoff.ms", min_backoff.to_string()); } if let Some(max_backoff) = reconnect_backoff_max_ms { config.set("reconnect.backoff.max.ms", max_backoff.to_string()); } if let Some(max_messages) = queue_buffering_max_messages { config.set("queue.buffering.max.messages", max_messages.to_string()); } if let Some(max_kbytes) = queue_buffering_max_kbytes { config.set("queue.buffering.max.kbytes", max_kbytes.to_string()); } if let Some(size) = batch_size { config.set("batch.size", size.to_string()); } if let Some(ms) = linger_ms { config.set("linger.ms", ms.to_string()); } let producer: ThreadedProducer<MetricsProducerContext> = config .create_with_context(MetricsProducerContext) .map_err(KafkaWriterError::ProducerCreation)?; producer .client() .fetch_metadata(Some(&topic), Duration::from_secs(5)) .map_err(KafkaWriterError::MetadataFetch)?; Ok(Self { producer: Arc::new(producer), topic, }) } /// Publishes a single event to Kafka. This method is non-blocking. /// Returns an error if the message cannot be enqueued to the producer's buffer. pub fn publish_event( &self, topic: &str,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_150_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } if let Some(ms) = linger_ms { config.set("linger.ms", ms.to_string()); } let producer: ThreadedProducer<MetricsProducerContext> = config .create_with_context(MetricsProducerContext) .map_err(KafkaWriterError::ProducerCreation)?; producer .client() .fetch_metadata(Some(&topic), Duration::from_secs(5)) .map_err(KafkaWriterError::MetadataFetch)?; Ok(Self {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_150_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } if let Some(ms) = linger_ms { config.set("linger.ms", ms.to_string()); } let producer: ThreadedProducer<MetricsProducerContext> = config .create_with_context(MetricsProducerContext) .map_err(KafkaWriterError::ProducerCreation)?; producer .client() .fetch_metadata(Some(&topic), Duration::from_secs(5)) .map_err(KafkaWriterError::MetadataFetch)?; Ok(Self { producer: Arc::new(producer), topic, }) } /// Publishes a single event to Kafka. This method is non-blocking. /// Returns an error if the message cannot be enqueued to the producer's buffer. pub fn publish_event( &self, topic: &str, key: Option<&str>, payload: &[u8], headers: Option<OwnedHeaders>, ) -> Result<(), KafkaError> { #[cfg(feature = "kafka-metrics")]
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_150_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } if let Some(ms) = linger_ms { config.set("linger.ms", ms.to_string()); } let producer: ThreadedProducer<MetricsProducerContext> = config .create_with_context(MetricsProducerContext) .map_err(KafkaWriterError::ProducerCreation)?; producer .client() .fetch_metadata(Some(&topic), Duration::from_secs(5)) .map_err(KafkaWriterError::MetadataFetch)?; Ok(Self { producer: Arc::new(producer), topic, }) } /// Publishes a single event to Kafka. This method is non-blocking. /// Returns an error if the message cannot be enqueued to the producer's buffer. pub fn publish_event( &self, topic: &str, key: Option<&str>, payload: &[u8], headers: Option<OwnedHeaders>, ) -> Result<(), KafkaError> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_AUDIT_EVENT_QUEUE_SIZE.set(queue_size.into()); } let mut record = BaseRecord::with_opaque_to(topic, Box::new(KafkaMessageType::Event)) .payload(payload) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Some(k) = key { record = record.key(k); } if let Some(h) = headers { record = record.headers(h);
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_175_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs key: Option<&str>, payload: &[u8], headers: Option<OwnedHeaders>, ) -> Result<(), KafkaError> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_AUDIT_EVENT_QUEUE_SIZE.set(queue_size.into()); } let mut record = BaseRecord::with_opaque_to(topic, Box::new(KafkaMessageType::Event)) .payload(payload) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH)
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_175_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs key: Option<&str>, payload: &[u8], headers: Option<OwnedHeaders>, ) -> Result<(), KafkaError> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_AUDIT_EVENT_QUEUE_SIZE.set(queue_size.into()); } let mut record = BaseRecord::with_opaque_to(topic, Box::new(KafkaMessageType::Event)) .payload(payload) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Some(k) = key { record = record.key(k); } if let Some(h) = headers { record = record.headers(h); } match self.producer.send(record) { Ok(_) => Ok(()), Err((kafka_error, _)) => {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_175_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs key: Option<&str>, payload: &[u8], headers: Option<OwnedHeaders>, ) -> Result<(), KafkaError> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_AUDIT_EVENT_QUEUE_SIZE.set(queue_size.into()); } let mut record = BaseRecord::with_opaque_to(topic, Box::new(KafkaMessageType::Event)) .payload(payload) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Some(k) = key { record = record.key(k); } if let Some(h) = headers { record = record.headers(h); } match self.producer.send(record) { Ok(_) => Ok(()), Err((kafka_error, _)) => { #[cfg(feature = "kafka-metrics")] { KAFKA_AUDIT_EVENTS_DROPPED.inc(); // Only QUEUE_FULL can happen during send() - others happen during delivery match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_AUDIT_DROPS_OTHER.inc(); } } } Err(kafka_error) } } } /// Creates a new builder for constructing a KafkaWriter
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_200_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } match self.producer.send(record) { Ok(_) => Ok(()), Err((kafka_error, _)) => { #[cfg(feature = "kafka-metrics")] { KAFKA_AUDIT_EVENTS_DROPPED.inc(); // Only QUEUE_FULL can happen during send() - others happen during delivery match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } _ => {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 200, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_200_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } match self.producer.send(record) { Ok(_) => Ok(()), Err((kafka_error, _)) => { #[cfg(feature = "kafka-metrics")] { KAFKA_AUDIT_EVENTS_DROPPED.inc(); // Only QUEUE_FULL can happen during send() - others happen during delivery match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_AUDIT_DROPS_OTHER.inc(); } } } Err(kafka_error) } } } /// Creates a new builder for constructing a KafkaWriter pub fn builder() -> crate::builder::KafkaWriterBuilder { crate::builder::KafkaWriterBuilder::new() } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 200, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_200_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs } match self.producer.send(record) { Ok(_) => Ok(()), Err((kafka_error, _)) => { #[cfg(feature = "kafka-metrics")] { KAFKA_AUDIT_EVENTS_DROPPED.inc(); // Only QUEUE_FULL can happen during send() - others happen during delivery match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_AUDIT_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_AUDIT_DROPS_OTHER.inc(); } } } Err(kafka_error) } } } /// Creates a new builder for constructing a KafkaWriter pub fn builder() -> crate::builder::KafkaWriterBuilder { crate::builder::KafkaWriterBuilder::new() } } impl Write for KafkaWriter { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_QUEUE_SIZE.set(queue_size.into()); } let record = BaseRecord::with_opaque_to(&self.topic, Box::new(KafkaMessageType::Log)) .payload(buf) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Err((kafka_error, _)) = self.producer.send::<(), [u8]>(record) { #[cfg(feature = "kafka-metrics")] {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 200, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_225_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs pub fn builder() -> crate::builder::KafkaWriterBuilder { crate::builder::KafkaWriterBuilder::new() } } impl Write for KafkaWriter { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_QUEUE_SIZE.set(queue_size.into()); } let record = BaseRecord::with_opaque_to(&self.topic, Box::new(KafkaMessageType::Log)) .payload(buf)
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 225, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_225_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs pub fn builder() -> crate::builder::KafkaWriterBuilder { crate::builder::KafkaWriterBuilder::new() } } impl Write for KafkaWriter { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_QUEUE_SIZE.set(queue_size.into()); } let record = BaseRecord::with_opaque_to(&self.topic, Box::new(KafkaMessageType::Log)) .payload(buf) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Err((kafka_error, _)) = self.producer.send::<(), [u8]>(record) { #[cfg(feature = "kafka-metrics")] { KAFKA_LOGS_DROPPED.inc(); match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_DROPS_QUEUE_FULL.inc();
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 225, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_225_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs pub fn builder() -> crate::builder::KafkaWriterBuilder { crate::builder::KafkaWriterBuilder::new() } } impl Write for KafkaWriter { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { #[cfg(feature = "kafka-metrics")] { let queue_size = self.producer.in_flight_count(); KAFKA_QUEUE_SIZE.set(queue_size.into()); } let record = BaseRecord::with_opaque_to(&self.topic, Box::new(KafkaMessageType::Log)) .payload(buf) .timestamp( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis().try_into().unwrap_or(0)) .unwrap_or(0), ); if let Err((kafka_error, _)) = self.producer.send::<(), [u8]>(record) { #[cfg(feature = "kafka-metrics")] { KAFKA_LOGS_DROPPED.inc(); match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_DROPS_OTHER.inc(); } } } } // Return Ok to not block the application. The actual delivery result // is handled by the callback in the background. Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { self.producer .flush(rdkafka::util::Timeout::After(Duration::from_secs(5))) .map_err(|e: KafkaError| io::Error::other(format!("Kafka flush failed: {e}"))) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 225, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_250_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs KAFKA_LOGS_DROPPED.inc(); match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_DROPS_OTHER.inc(); } } } } // Return Ok to not block the application. The actual delivery result // is handled by the callback in the background.
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 250, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_250_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs KAFKA_LOGS_DROPPED.inc(); match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_DROPS_OTHER.inc(); } } } } // Return Ok to not block the application. The actual delivery result // is handled by the callback in the background. Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { self.producer .flush(rdkafka::util::Timeout::After(Duration::from_secs(5))) .map_err(|e: KafkaError| io::Error::other(format!("Kafka flush failed: {e}"))) } } /// Errors that can occur when creating or using a KafkaWriter. #[derive(Debug, thiserror::Error)] pub enum KafkaWriterError { #[error("Failed to create Kafka producer: {0}")] ProducerCreation(KafkaError),
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 250, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_250_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs KAFKA_LOGS_DROPPED.inc(); match &kafka_error { KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull) => { KAFKA_DROPS_QUEUE_FULL.inc(); } _ => { KAFKA_DROPS_OTHER.inc(); } } } } // Return Ok to not block the application. The actual delivery result // is handled by the callback in the background. Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { self.producer .flush(rdkafka::util::Timeout::After(Duration::from_secs(5))) .map_err(|e: KafkaError| io::Error::other(format!("Kafka flush failed: {e}"))) } } /// Errors that can occur when creating or using a KafkaWriter. #[derive(Debug, thiserror::Error)] pub enum KafkaWriterError { #[error("Failed to create Kafka producer: {0}")] ProducerCreation(KafkaError), #[error("Failed to fetch Kafka metadata: {0}")] MetadataFetch(KafkaError), } /// Make KafkaWriter compatible with tracing_appender's MakeWriter trait. impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for KafkaWriter { type Writer = Self; fn make_writer(&'a self) -> Self::Writer { self.clone() } } /// Graceful shutdown - flush pending messages when dropping impl Drop for KafkaWriter { fn drop(&mut self) { // Only flush if this is the last reference to the producer if Arc::strong_count(&self.producer) == 1 { // Try to flush pending messages with a 5 second timeout let _ = self
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 250, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_275_15
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// Errors that can occur when creating or using a KafkaWriter. #[derive(Debug, thiserror::Error)] pub enum KafkaWriterError { #[error("Failed to create Kafka producer: {0}")] ProducerCreation(KafkaError), #[error("Failed to fetch Kafka metadata: {0}")] MetadataFetch(KafkaError), } /// Make KafkaWriter compatible with tracing_appender's MakeWriter trait. impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for KafkaWriter { type Writer = Self; fn make_writer(&'a self) -> Self::Writer { self.clone()
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 275, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_275_30
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// Errors that can occur when creating or using a KafkaWriter. #[derive(Debug, thiserror::Error)] pub enum KafkaWriterError { #[error("Failed to create Kafka producer: {0}")] ProducerCreation(KafkaError), #[error("Failed to fetch Kafka metadata: {0}")] MetadataFetch(KafkaError), } /// Make KafkaWriter compatible with tracing_appender's MakeWriter trait. impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for KafkaWriter { type Writer = Self; fn make_writer(&'a self) -> Self::Writer { self.clone() } } /// Graceful shutdown - flush pending messages when dropping impl Drop for KafkaWriter { fn drop(&mut self) { // Only flush if this is the last reference to the producer if Arc::strong_count(&self.producer) == 1 { // Try to flush pending messages with a 5 second timeout let _ = self .producer .flush(rdkafka::util::Timeout::After(Duration::from_secs(5))); } } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 275, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-8840925468162898005_275_50
clm
snippet
// connector-service/backend/tracing-kafka/src/writer.rs /// Errors that can occur when creating or using a KafkaWriter. #[derive(Debug, thiserror::Error)] pub enum KafkaWriterError { #[error("Failed to create Kafka producer: {0}")] ProducerCreation(KafkaError), #[error("Failed to fetch Kafka metadata: {0}")] MetadataFetch(KafkaError), } /// Make KafkaWriter compatible with tracing_appender's MakeWriter trait. impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for KafkaWriter { type Writer = Self; fn make_writer(&'a self) -> Self::Writer { self.clone() } } /// Graceful shutdown - flush pending messages when dropping impl Drop for KafkaWriter { fn drop(&mut self) { // Only flush if this is the last reference to the producer if Arc::strong_count(&self.producer) == 1 { // Try to flush pending messages with a 5 second timeout let _ = self .producer .flush(rdkafka::util::Timeout::After(Duration::from_secs(5))); } } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 275, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_0_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs //! Kafka layer implementation that reuses log_utils formatting. use std::{ collections::{HashMap, HashSet}, time::Duration, }; use log_utils::{ AdditionalFieldsPlacement, JsonFormattingLayer, JsonFormattingLayerConfig, LoggerError, }; use tracing::Subscriber; use tracing_subscriber::Layer; use crate::{ builder::KafkaWriterBuilder,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_0_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs //! Kafka layer implementation that reuses log_utils formatting. use std::{ collections::{HashMap, HashSet}, time::Duration, }; use log_utils::{ AdditionalFieldsPlacement, JsonFormattingLayer, JsonFormattingLayerConfig, LoggerError, }; use tracing::Subscriber; use tracing_subscriber::Layer; use crate::{ builder::KafkaWriterBuilder, writer::{KafkaWriter, KafkaWriterError}, }; /// Tracing layer that sends JSON-formatted logs to Kafka /// /// Wraps log_utils' JsonFormattingLayer pub struct KafkaLayer { inner: JsonFormattingLayer<KafkaWriter, serde_json::ser::CompactFormatter>, } impl KafkaLayer { /// Creates a new builder for configuring a KafkaLayer. pub fn builder() -> KafkaLayerBuilder { KafkaLayerBuilder::new() }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_0_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs //! Kafka layer implementation that reuses log_utils formatting. use std::{ collections::{HashMap, HashSet}, time::Duration, }; use log_utils::{ AdditionalFieldsPlacement, JsonFormattingLayer, JsonFormattingLayerConfig, LoggerError, }; use tracing::Subscriber; use tracing_subscriber::Layer; use crate::{ builder::KafkaWriterBuilder, writer::{KafkaWriter, KafkaWriterError}, }; /// Tracing layer that sends JSON-formatted logs to Kafka /// /// Wraps log_utils' JsonFormattingLayer pub struct KafkaLayer { inner: JsonFormattingLayer<KafkaWriter, serde_json::ser::CompactFormatter>, } impl KafkaLayer { /// Creates a new builder for configuring a KafkaLayer. pub fn builder() -> KafkaLayerBuilder { KafkaLayerBuilder::new() } /// Creates a new KafkaLayer from a pre-configured KafkaWriter. /// This is primarily used internally by the builder. pub(crate) fn from_writer( kafka_writer: KafkaWriter, static_fields: HashMap<String, serde_json::Value>, ) -> Result<Self, KafkaLayerError> { let config = JsonFormattingLayerConfig { static_top_level_fields: static_fields, top_level_keys: HashSet::new(), log_span_lifecycles: true, additional_fields_placement: AdditionalFieldsPlacement::TopLevel, }; let inner: JsonFormattingLayer<KafkaWriter, serde_json::ser::CompactFormatter> = JsonFormattingLayer::new(config, kafka_writer, serde_json::ser::CompactFormatter)?; Ok(Self { inner }) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_25_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl KafkaLayer { /// Creates a new builder for configuring a KafkaLayer. pub fn builder() -> KafkaLayerBuilder { KafkaLayerBuilder::new() } /// Creates a new KafkaLayer from a pre-configured KafkaWriter. /// This is primarily used internally by the builder. pub(crate) fn from_writer( kafka_writer: KafkaWriter, static_fields: HashMap<String, serde_json::Value>, ) -> Result<Self, KafkaLayerError> { let config = JsonFormattingLayerConfig { static_top_level_fields: static_fields, top_level_keys: HashSet::new(),
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_25_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl KafkaLayer { /// Creates a new builder for configuring a KafkaLayer. pub fn builder() -> KafkaLayerBuilder { KafkaLayerBuilder::new() } /// Creates a new KafkaLayer from a pre-configured KafkaWriter. /// This is primarily used internally by the builder. pub(crate) fn from_writer( kafka_writer: KafkaWriter, static_fields: HashMap<String, serde_json::Value>, ) -> Result<Self, KafkaLayerError> { let config = JsonFormattingLayerConfig { static_top_level_fields: static_fields, top_level_keys: HashSet::new(), log_span_lifecycles: true, additional_fields_placement: AdditionalFieldsPlacement::TopLevel, }; let inner: JsonFormattingLayer<KafkaWriter, serde_json::ser::CompactFormatter> = JsonFormattingLayer::new(config, kafka_writer, serde_json::ser::CompactFormatter)?; Ok(Self { inner }) } } impl<S> Layer<S> for KafkaLayer where S: Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_25_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl KafkaLayer { /// Creates a new builder for configuring a KafkaLayer. pub fn builder() -> KafkaLayerBuilder { KafkaLayerBuilder::new() } /// Creates a new KafkaLayer from a pre-configured KafkaWriter. /// This is primarily used internally by the builder. pub(crate) fn from_writer( kafka_writer: KafkaWriter, static_fields: HashMap<String, serde_json::Value>, ) -> Result<Self, KafkaLayerError> { let config = JsonFormattingLayerConfig { static_top_level_fields: static_fields, top_level_keys: HashSet::new(), log_span_lifecycles: true, additional_fields_placement: AdditionalFieldsPlacement::TopLevel, }; let inner: JsonFormattingLayer<KafkaWriter, serde_json::ser::CompactFormatter> = JsonFormattingLayer::new(config, kafka_writer, serde_json::ser::CompactFormatter)?; Ok(Self { inner }) } } impl<S> Layer<S> for KafkaLayer where S: Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, { fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_event(event, ctx); } fn on_new_span( &self, attrs: &tracing::span::Attributes<'_>, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>, ) { self.inner.on_new_span(attrs, id, ctx); } fn on_enter(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_enter(id, ctx); } fn on_exit(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_exit(id, ctx); }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_50_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl<S> Layer<S> for KafkaLayer where S: Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, { fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_event(event, ctx); } fn on_new_span( &self, attrs: &tracing::span::Attributes<'_>, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>, ) {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_50_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl<S> Layer<S> for KafkaLayer where S: Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, { fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_event(event, ctx); } fn on_new_span( &self, attrs: &tracing::span::Attributes<'_>, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>, ) { self.inner.on_new_span(attrs, id, ctx); } fn on_enter(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_enter(id, ctx); } fn on_exit(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_exit(id, ctx); } fn on_close(&self, id: tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_close(id, ctx); } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_50_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs impl<S> Layer<S> for KafkaLayer where S: Subscriber + for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, { fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_event(event, ctx); } fn on_new_span( &self, attrs: &tracing::span::Attributes<'_>, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>, ) { self.inner.on_new_span(attrs, id, ctx); } fn on_enter(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_enter(id, ctx); } fn on_exit(&self, id: &tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_exit(id, ctx); } fn on_close(&self, id: tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_close(id, ctx); } } impl KafkaLayer { /// Boxes the layer, making it easier to compose with other layers. pub fn boxed<S>(self) -> Box<dyn Layer<S> + Send + Sync + 'static> where Self: Layer<S> + Sized + Send + Sync + 'static, S: Subscriber + for<'span> tracing_subscriber::registry::LookupSpan<'span>, { Box::new(self) } } /// Errors that can occur when creating a KafkaLayer. #[derive(Debug, thiserror::Error)] pub enum KafkaLayerError { #[error("Kafka writer error: {0}")] Writer(#[from] KafkaWriterError), #[error("Logger configuration error: {0}")] Logger(#[from] LoggerError),
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 50, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_75_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs fn on_close(&self, id: tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_close(id, ctx); } } impl KafkaLayer { /// Boxes the layer, making it easier to compose with other layers. pub fn boxed<S>(self) -> Box<dyn Layer<S> + Send + Sync + 'static> where Self: Layer<S> + Sized + Send + Sync + 'static, S: Subscriber + for<'span> tracing_subscriber::registry::LookupSpan<'span>, { Box::new(self) }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_75_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs fn on_close(&self, id: tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_close(id, ctx); } } impl KafkaLayer { /// Boxes the layer, making it easier to compose with other layers. pub fn boxed<S>(self) -> Box<dyn Layer<S> + Send + Sync + 'static> where Self: Layer<S> + Sized + Send + Sync + 'static, S: Subscriber + for<'span> tracing_subscriber::registry::LookupSpan<'span>, { Box::new(self) } } /// Errors that can occur when creating a KafkaLayer. #[derive(Debug, thiserror::Error)] pub enum KafkaLayerError { #[error("Kafka writer error: {0}")] Writer(#[from] KafkaWriterError), #[error("Logger configuration error: {0}")] Logger(#[from] LoggerError), #[error("Missing brokers configuration")] MissingBrokers, #[error("Missing topic configuration")]
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_75_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs fn on_close(&self, id: tracing::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { self.inner.on_close(id, ctx); } } impl KafkaLayer { /// Boxes the layer, making it easier to compose with other layers. pub fn boxed<S>(self) -> Box<dyn Layer<S> + Send + Sync + 'static> where Self: Layer<S> + Sized + Send + Sync + 'static, S: Subscriber + for<'span> tracing_subscriber::registry::LookupSpan<'span>, { Box::new(self) } } /// Errors that can occur when creating a KafkaLayer. #[derive(Debug, thiserror::Error)] pub enum KafkaLayerError { #[error("Kafka writer error: {0}")] Writer(#[from] KafkaWriterError), #[error("Logger configuration error: {0}")] Logger(#[from] LoggerError), #[error("Missing brokers configuration")] MissingBrokers, #[error("Missing topic configuration")] MissingTopic, } /// Builder for creating a KafkaLayer with custom configuration. #[derive(Debug, Clone, Default)] pub struct KafkaLayerBuilder { writer_builder: KafkaWriterBuilder, static_fields: HashMap<String, serde_json::Value>, } impl KafkaLayerBuilder { /// Creates a new builder with default settings. pub fn new() -> Self { Self::default() } /// Sets the Kafka brokers to connect to. pub fn brokers(mut self, brokers: &[&str]) -> Self { self.writer_builder = self .writer_builder
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 75, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_100_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs #[error("Missing brokers configuration")] MissingBrokers, #[error("Missing topic configuration")] MissingTopic, } /// Builder for creating a KafkaLayer with custom configuration. #[derive(Debug, Clone, Default)] pub struct KafkaLayerBuilder { writer_builder: KafkaWriterBuilder, static_fields: HashMap<String, serde_json::Value>, }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_100_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs #[error("Missing brokers configuration")] MissingBrokers, #[error("Missing topic configuration")] MissingTopic, } /// Builder for creating a KafkaLayer with custom configuration. #[derive(Debug, Clone, Default)] pub struct KafkaLayerBuilder { writer_builder: KafkaWriterBuilder, static_fields: HashMap<String, serde_json::Value>, } impl KafkaLayerBuilder { /// Creates a new builder with default settings. pub fn new() -> Self { Self::default() } /// Sets the Kafka brokers to connect to. pub fn brokers(mut self, brokers: &[&str]) -> Self { self.writer_builder = self .writer_builder .brokers(brokers.iter().map(|s| s.to_string()).collect()); self } /// Sets the Kafka topic to send logs to.
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_100_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs #[error("Missing brokers configuration")] MissingBrokers, #[error("Missing topic configuration")] MissingTopic, } /// Builder for creating a KafkaLayer with custom configuration. #[derive(Debug, Clone, Default)] pub struct KafkaLayerBuilder { writer_builder: KafkaWriterBuilder, static_fields: HashMap<String, serde_json::Value>, } impl KafkaLayerBuilder { /// Creates a new builder with default settings. pub fn new() -> Self { Self::default() } /// Sets the Kafka brokers to connect to. pub fn brokers(mut self, brokers: &[&str]) -> Self { self.writer_builder = self .writer_builder .brokers(brokers.iter().map(|s| s.to_string()).collect()); self } /// Sets the Kafka topic to send logs to. pub fn topic(mut self, topic: impl Into<String>) -> Self { self.writer_builder = self.writer_builder.topic(topic); self } /// Sets the batch size for buffering messages before sending. pub fn batch_size(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.batch_size(size); self } /// Sets the linger time in milliseconds. pub fn linger_ms(mut self, ms: u64) -> Self { self.writer_builder = self.writer_builder.linger_ms(ms); self } /// Sets the linger time as a Duration. pub fn linger(mut self, duration: Duration) -> Self { self.writer_builder = self.writer_builder.linger(duration);
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 100, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_125_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs .brokers(brokers.iter().map(|s| s.to_string()).collect()); self } /// Sets the Kafka topic to send logs to. pub fn topic(mut self, topic: impl Into<String>) -> Self { self.writer_builder = self.writer_builder.topic(topic); self } /// Sets the batch size for buffering messages before sending. pub fn batch_size(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.batch_size(size); self }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_125_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs .brokers(brokers.iter().map(|s| s.to_string()).collect()); self } /// Sets the Kafka topic to send logs to. pub fn topic(mut self, topic: impl Into<String>) -> Self { self.writer_builder = self.writer_builder.topic(topic); self } /// Sets the batch size for buffering messages before sending. pub fn batch_size(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.batch_size(size); self } /// Sets the linger time in milliseconds. pub fn linger_ms(mut self, ms: u64) -> Self { self.writer_builder = self.writer_builder.linger_ms(ms); self } /// Sets the linger time as a Duration. pub fn linger(mut self, duration: Duration) -> Self { self.writer_builder = self.writer_builder.linger(duration); self } /// Sets the maximum number of messages to buffer in the producer's queue. pub fn queue_buffering_max_messages(mut self, size: usize) -> Self {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_125_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs .brokers(brokers.iter().map(|s| s.to_string()).collect()); self } /// Sets the Kafka topic to send logs to. pub fn topic(mut self, topic: impl Into<String>) -> Self { self.writer_builder = self.writer_builder.topic(topic); self } /// Sets the batch size for buffering messages before sending. pub fn batch_size(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.batch_size(size); self } /// Sets the linger time in milliseconds. pub fn linger_ms(mut self, ms: u64) -> Self { self.writer_builder = self.writer_builder.linger_ms(ms); self } /// Sets the linger time as a Duration. pub fn linger(mut self, duration: Duration) -> Self { self.writer_builder = self.writer_builder.linger(duration); self } /// Sets the maximum number of messages to buffer in the producer's queue. pub fn queue_buffering_max_messages(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_messages(size); self } /// Sets the maximum size of the producer's queue in kilobytes. pub fn queue_buffering_max_kbytes(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_kbytes(size); self } /// Sets the reconnect backoff times. pub fn reconnect_backoff(mut self, min: Duration, max: Duration) -> Self { self.writer_builder = self.writer_builder.reconnect_backoff(min, max); self } /// Adds static fields that will be included in every log entry. /// These fields are added at the top level of the JSON output. pub fn static_fields(mut self, fields: HashMap<String, serde_json::Value>) -> Self { self.static_fields = fields;
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 125, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_150_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Sets the maximum number of messages to buffer in the producer's queue. pub fn queue_buffering_max_messages(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_messages(size); self } /// Sets the maximum size of the producer's queue in kilobytes. pub fn queue_buffering_max_kbytes(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_kbytes(size); self }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_150_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Sets the maximum number of messages to buffer in the producer's queue. pub fn queue_buffering_max_messages(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_messages(size); self } /// Sets the maximum size of the producer's queue in kilobytes. pub fn queue_buffering_max_kbytes(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_kbytes(size); self } /// Sets the reconnect backoff times. pub fn reconnect_backoff(mut self, min: Duration, max: Duration) -> Self { self.writer_builder = self.writer_builder.reconnect_backoff(min, max); self } /// Adds static fields that will be included in every log entry. /// These fields are added at the top level of the JSON output. pub fn static_fields(mut self, fields: HashMap<String, serde_json::Value>) -> Self { self.static_fields = fields; self } /// Adds a single static field that will be included in every log entry. pub fn add_static_field(mut self, key: String, value: serde_json::Value) -> Self {
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_150_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Sets the maximum number of messages to buffer in the producer's queue. pub fn queue_buffering_max_messages(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_messages(size); self } /// Sets the maximum size of the producer's queue in kilobytes. pub fn queue_buffering_max_kbytes(mut self, size: usize) -> Self { self.writer_builder = self.writer_builder.queue_buffering_max_kbytes(size); self } /// Sets the reconnect backoff times. pub fn reconnect_backoff(mut self, min: Duration, max: Duration) -> Self { self.writer_builder = self.writer_builder.reconnect_backoff(min, max); self } /// Adds static fields that will be included in every log entry. /// These fields are added at the top level of the JSON output. pub fn static_fields(mut self, fields: HashMap<String, serde_json::Value>) -> Self { self.static_fields = fields; self } /// Adds a single static field that will be included in every log entry. pub fn add_static_field(mut self, key: String, value: serde_json::Value) -> Self { self.static_fields.insert(key, value); self } /// Builds the KafkaLayer with the configured settings. pub fn build(self) -> Result<KafkaLayer, KafkaLayerError> { let kafka_writer = self.writer_builder.build()?; KafkaLayer::from_writer(kafka_writer, self.static_fields) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 40, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 150, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_175_15
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Adds a single static field that will be included in every log entry. pub fn add_static_field(mut self, key: String, value: serde_json::Value) -> Self { self.static_fields.insert(key, value); self } /// Builds the KafkaLayer with the configured settings. pub fn build(self) -> Result<KafkaLayer, KafkaLayerError> { let kafka_writer = self.writer_builder.build()?; KafkaLayer::from_writer(kafka_writer, self.static_fields) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_175_30
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Adds a single static field that will be included in every log entry. pub fn add_static_field(mut self, key: String, value: serde_json::Value) -> Self { self.static_fields.insert(key, value); self } /// Builds the KafkaLayer with the configured settings. pub fn build(self) -> Result<KafkaLayer, KafkaLayerError> { let kafka_writer = self.writer_builder.build()?; KafkaLayer::from_writer(kafka_writer, self.static_fields) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-6998519293309712771_175_50
clm
snippet
// connector-service/backend/tracing-kafka/src/layer.rs self } /// Adds a single static field that will be included in every log entry. pub fn add_static_field(mut self, key: String, value: serde_json::Value) -> Self { self.static_fields.insert(key, value); self } /// Builds the KafkaLayer with the configured settings. pub fn build(self) -> Result<KafkaLayer, KafkaLayerError> { let kafka_writer = self.writer_builder.build()?; KafkaLayer::from_writer(kafka_writer, self.static_fields) } }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 175, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-7694171944301536872_0_15
clm
snippet
// connector-service/backend/tracing-kafka/src/builder.rs //! Builder pattern implementation for KafkaWriter use std::time::Duration; use super::writer::{KafkaWriter, KafkaWriterError}; /// Builder for creating a KafkaWriter with custom configuration #[derive(Debug, Clone, Default)] pub struct KafkaWriterBuilder { brokers: Option<Vec<String>>, topic: Option<String>, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>,
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-7694171944301536872_0_30
clm
snippet
// connector-service/backend/tracing-kafka/src/builder.rs //! Builder pattern implementation for KafkaWriter use std::time::Duration; use super::writer::{KafkaWriter, KafkaWriterError}; /// Builder for creating a KafkaWriter with custom configuration #[derive(Debug, Clone, Default)] pub struct KafkaWriterBuilder { brokers: Option<Vec<String>>, topic: Option<String>, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, } impl KafkaWriterBuilder { /// Creates a new builder with default settings pub fn new() -> Self { Self::default() } /// Sets the Kafka brokers to connect to pub fn brokers(mut self, brokers: Vec<String>) -> Self { self.brokers = Some(brokers); self }
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 30, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-7694171944301536872_0_50
clm
snippet
// connector-service/backend/tracing-kafka/src/builder.rs //! Builder pattern implementation for KafkaWriter use std::time::Duration; use super::writer::{KafkaWriter, KafkaWriterError}; /// Builder for creating a KafkaWriter with custom configuration #[derive(Debug, Clone, Default)] pub struct KafkaWriterBuilder { brokers: Option<Vec<String>>, topic: Option<String>, batch_size: Option<usize>, linger_ms: Option<u64>, queue_buffering_max_messages: Option<usize>, queue_buffering_max_kbytes: Option<usize>, reconnect_backoff_min_ms: Option<u64>, reconnect_backoff_max_ms: Option<u64>, } impl KafkaWriterBuilder { /// Creates a new builder with default settings pub fn new() -> Self { Self::default() } /// Sets the Kafka brokers to connect to pub fn brokers(mut self, brokers: Vec<String>) -> Self { self.brokers = Some(brokers); self } /// Sets the Kafka topic to send logs to pub fn topic(mut self, topic: impl Into<String>) -> Self { self.topic = Some(topic.into()); self } /// Sets the batch size for buffering messages before sending pub fn batch_size(mut self, size: usize) -> Self { self.batch_size = Some(size); self } /// Sets the linger time in milliseconds pub fn linger_ms(mut self, ms: u64) -> Self { self.linger_ms = Some(ms); self } /// Sets the linger time as a Duration
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 50, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 0, "struct_name": null, "total_crates": null, "trait_name": null }
connector-service_snippet_-7694171944301536872_25_15
clm
snippet
// connector-service/backend/tracing-kafka/src/builder.rs /// Sets the Kafka brokers to connect to pub fn brokers(mut self, brokers: Vec<String>) -> Self { self.brokers = Some(brokers); self } /// Sets the Kafka topic to send logs to pub fn topic(mut self, topic: impl Into<String>) -> Self { self.topic = Some(topic.into()); self } /// Sets the batch size for buffering messages before sending pub fn batch_size(mut self, size: usize) -> Self { self.batch_size = Some(size);
{ "chunk": null, "crate": "tracing-kafka", "enum_name": null, "file_size": null, "for_type": null, "function_name": null, "is_async": null, "is_pub": null, "lines": 15, "method_name": null, "num_enums": null, "num_items": null, "num_structs": null, "repo": "connector-service", "start_line": 25, "struct_name": null, "total_crates": null, "trait_name": null }