hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
720e03597546e9fd6172431c482bab2de47f9578
3,753
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow; use argh::FromArgs; use fidl::encoding::Decodable; use fidl::endpoints::create_endpoints; use fidl_fuchsia_media_sessions2::*; use fuchsia_async as fasync; use fuchsia_component::client; use futures::prelude::*; #[derive(FromArgs)] /// A tool for inspecting and controlling registered media sessions. struct Invocation { #[argh(subcommand)] command: Command, } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand)] enum Command { Ls(Ls), Info(Info), Control(Control), } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "ls")] /// List registered session states as they arrive. struct Ls {} #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "info")] /// Announce info about a session as it arrives. struct Info { #[argh(option, short = 'i')] /// id of a session session_id: u64, } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "control")] /// Issue a control command to a session. struct Control { #[argh(option, short = 'i')] /// id of a session session_id: u64, #[argh(subcommand)] command: ControlCommand, } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand)] enum ControlCommand { Play(Play), Pause(Pause), Stop(Stop), } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "play")] /// Initiates playback. struct Play {} #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "pause")] /// Pauses playback. struct Pause {} #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "stop")] /// Tears down the session. struct Stop {} #[fasync::run_singlethreaded] async fn main() -> Result<(), anyhow::Error> { let invocation: Invocation = argh::from_env(); let discovery = client::connect_to_protocol::<DiscoveryMarker>()?; let (watcher_client, watcher_server) = create_endpoints()?; let mut watcher_requests = watcher_server.into_stream()?; match invocation.command { Command::Ls(_) => { discovery.watch_sessions(WatchOptions::new_empty(), watcher_client)?; while let Some((id, delta, responder)) = watcher_requests .try_next() .await? .and_then(SessionsWatcherRequest::into_session_updated) { responder.send()?; println!( "[{}] State: {:?}", id, delta.player_status.and_then(|ps| ps.player_state) ); } } Command::Info(info) => { discovery.watch_sessions(WatchOptions::new_empty(), watcher_client)?; while let Some((id, delta, responder)) = watcher_requests .try_next() .await? .and_then(SessionsWatcherRequest::into_session_updated) { if id == info.session_id { println!("{:#?}", delta); break; } else { responder.send()?; } } } Command::Control(control) => { let (session_client, session_request) = create_endpoints()?; let proxy: SessionControlProxy = session_client.into_proxy()?; match control.command { ControlCommand::Play(_) => proxy.play()?, ControlCommand::Pause(_) => proxy.pause()?, ControlCommand::Stop(_) => proxy.stop()?, } discovery.connect_to_session(control.session_id, session_request)?; } } Ok(()) }
29.093023
81
0.594991
3a91608908f0b1615c08de9e093a8c32ced82d9b
214
extern crate slog_stdlog; extern crate slog_envlogger; #[macro_use] extern crate log; fn main() { let _guard = slog_envlogger::init().unwrap(); error!("error"); info!("info"); trace!("trace"); }
15.285714
49
0.640187
e546e921924144e06a49e91e22e46654f3f0c378
963
// just hard code all special cases const VERSES: [&'static str; 3] = ["No more bottles of beer on the wall, no more bottles of beer.\nGo to the store and buy \ some more, 99 bottles of beer on the wall.\n", "1 bottle of beer on the wall, 1 bottle of beer.\nTake it down and pass it around, no more \ bottles of beer on the wall.\n", "2 bottles of beer on the wall, 2 bottles of beer.\nTake one down and pass it around, 1 \ bottle of beer on the wall.\n"]; pub fn verse(i: usize) -> String { match i { 0 | 1 | 2 => VERSES[i].to_string(), _ => { format!("{0} bottles of beer on the wall, {0} bottles of beer.\nTake one down and \ pass it around, {1} bottles of beer on the wall.\n", i, i - 1) } } } pub fn sing(start: usize, end: usize) -> String { (end..start + 1).rev().map(verse).collect::<Vec<String>>().join("\n") }
38.52
97
0.563863
14974eb89b1aebd874514d25a043809e81828c2a
2,072
use crate::buf::IoBuf; use crate::driver::{Op, SharedFd}; use crate::BufResult; use socket2::SockAddr; use std::io::IoSlice; use std::task::{Context, Poll}; use std::{boxed::Box, io, net::SocketAddr}; pub(crate) struct SendTo<T> { #[allow(dead_code)] fd: SharedFd, pub(crate) buf: T, #[allow(dead_code)] io_slices: Vec<IoSlice<'static>>, #[allow(dead_code)] socket_addr: Box<SockAddr>, pub(crate) msghdr: Box<libc::msghdr>, } impl<T: IoBuf> Op<SendTo<T>> { pub(crate) fn send_to( fd: &SharedFd, buf: T, socket_addr: SocketAddr, ) -> io::Result<Op<SendTo<T>>> { use io_uring::{opcode, types}; let io_slices = vec![IoSlice::new(unsafe { std::slice::from_raw_parts(buf.stable_ptr(), buf.bytes_init()) })]; let socket_addr = Box::new(SockAddr::from(socket_addr)); let mut msghdr: Box<libc::msghdr> = Box::new(unsafe { std::mem::zeroed() }); msghdr.msg_iov = io_slices.as_ptr() as *mut _; msghdr.msg_iovlen = io_slices.len() as _; msghdr.msg_name = socket_addr.as_ptr() as *mut libc::c_void; msghdr.msg_namelen = socket_addr.len(); Op::submit_with( SendTo { fd: fd.clone(), buf, io_slices, socket_addr, msghdr, }, |send_to| { opcode::SendMsg::new( types::Fd(send_to.fd.raw_fd()), send_to.msghdr.as_ref() as *const _, ) .build() }, ) } pub(crate) async fn send(mut self) -> BufResult<usize, T> { use crate::future::poll_fn; poll_fn(move |cx| self.poll_send(cx)).await } pub(crate) fn poll_send(&mut self, cx: &mut Context<'_>) -> Poll<BufResult<usize, T>> { use std::future::Future; use std::pin::Pin; let complete = ready!(Pin::new(self).poll(cx)); Poll::Ready((complete.result.map(|v| v as _), complete.data.buf)) } }
28.777778
91
0.539093
e966b4f1ece918c9fa1b99096b41139c2923078e
494,250
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[derive(Debug)] pub(crate) struct Handle< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { pub(crate) client: aws_smithy_client::Client<C, M, R>, pub(crate) conf: crate::Config, } /// Client for AWS WAF Regional /// /// Client for invoking operations on AWS WAF Regional. Each operation on AWS WAF Regional is a method on this /// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service. /// /// # Examples /// **Constructing a client and invoking an operation** /// ```rust,no_run /// # async fn docs() { /// // create a shared configuration. This can be used & shared between multiple service clients. /// let shared_config = aws_config::load_from_env().await; /// let client = aws_sdk_wafregional::Client::new(&shared_config); /// // invoke an operation /// /* let rsp = client /// .<operation_name>(). /// .<param>("some value") /// .send().await; */ /// # } /// ``` /// **Constructing a client with custom configuration** /// ```rust,no_run /// use aws_config::RetryConfig; /// # async fn docs() { /// let shared_config = aws_config::load_from_env().await; /// let config = aws_sdk_wafregional::config::Builder::from(&shared_config) /// .retry_config(RetryConfig::disabled()) /// .build(); /// let client = aws_sdk_wafregional::Client::from_conf(config); /// # } #[derive(std::fmt::Debug)] pub struct Client< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<Handle<C, M, R>>, } impl<C, M, R> std::clone::Clone for Client<C, M, R> { fn clone(&self) -> Self { Self { handle: self.handle.clone(), } } } #[doc(inline)] pub use aws_smithy_client::Builder; impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> { fn from(client: aws_smithy_client::Client<C, M, R>) -> Self { Self::with_config(client, crate::Config::builder().build()) } } impl<C, M, R> Client<C, M, R> { /// Creates a client with the given service configuration. pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self { Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Returns the client's configuration. pub fn conf(&self) -> &crate::Config { &self.handle.conf } } impl<C, M, R> Client<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Constructs a fluent builder for the `AssociateWebACL` operation. /// /// See [`AssociateWebACL`](crate::client::fluent_builders::AssociateWebACL) for more information about the /// operation and its arguments. pub fn associate_web_acl(&self) -> fluent_builders::AssociateWebACL<C, M, R> { fluent_builders::AssociateWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateByteMatchSet` operation. /// /// See [`CreateByteMatchSet`](crate::client::fluent_builders::CreateByteMatchSet) for more information about the /// operation and its arguments. pub fn create_byte_match_set(&self) -> fluent_builders::CreateByteMatchSet<C, M, R> { fluent_builders::CreateByteMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateGeoMatchSet` operation. /// /// See [`CreateGeoMatchSet`](crate::client::fluent_builders::CreateGeoMatchSet) for more information about the /// operation and its arguments. pub fn create_geo_match_set(&self) -> fluent_builders::CreateGeoMatchSet<C, M, R> { fluent_builders::CreateGeoMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateIPSet` operation. /// /// See [`CreateIPSet`](crate::client::fluent_builders::CreateIPSet) for more information about the /// operation and its arguments. pub fn create_ip_set(&self) -> fluent_builders::CreateIPSet<C, M, R> { fluent_builders::CreateIPSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateRateBasedRule` operation. /// /// See [`CreateRateBasedRule`](crate::client::fluent_builders::CreateRateBasedRule) for more information about the /// operation and its arguments. pub fn create_rate_based_rule(&self) -> fluent_builders::CreateRateBasedRule<C, M, R> { fluent_builders::CreateRateBasedRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateRegexMatchSet` operation. /// /// See [`CreateRegexMatchSet`](crate::client::fluent_builders::CreateRegexMatchSet) for more information about the /// operation and its arguments. pub fn create_regex_match_set(&self) -> fluent_builders::CreateRegexMatchSet<C, M, R> { fluent_builders::CreateRegexMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateRegexPatternSet` operation. /// /// See [`CreateRegexPatternSet`](crate::client::fluent_builders::CreateRegexPatternSet) for more information about the /// operation and its arguments. pub fn create_regex_pattern_set(&self) -> fluent_builders::CreateRegexPatternSet<C, M, R> { fluent_builders::CreateRegexPatternSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateRule` operation. /// /// See [`CreateRule`](crate::client::fluent_builders::CreateRule) for more information about the /// operation and its arguments. pub fn create_rule(&self) -> fluent_builders::CreateRule<C, M, R> { fluent_builders::CreateRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateRuleGroup` operation. /// /// See [`CreateRuleGroup`](crate::client::fluent_builders::CreateRuleGroup) for more information about the /// operation and its arguments. pub fn create_rule_group(&self) -> fluent_builders::CreateRuleGroup<C, M, R> { fluent_builders::CreateRuleGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateSizeConstraintSet` operation. /// /// See [`CreateSizeConstraintSet`](crate::client::fluent_builders::CreateSizeConstraintSet) for more information about the /// operation and its arguments. pub fn create_size_constraint_set(&self) -> fluent_builders::CreateSizeConstraintSet<C, M, R> { fluent_builders::CreateSizeConstraintSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateSqlInjectionMatchSet` operation. /// /// See [`CreateSqlInjectionMatchSet`](crate::client::fluent_builders::CreateSqlInjectionMatchSet) for more information about the /// operation and its arguments. pub fn create_sql_injection_match_set( &self, ) -> fluent_builders::CreateSqlInjectionMatchSet<C, M, R> { fluent_builders::CreateSqlInjectionMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateWebACL` operation. /// /// See [`CreateWebACL`](crate::client::fluent_builders::CreateWebACL) for more information about the /// operation and its arguments. pub fn create_web_acl(&self) -> fluent_builders::CreateWebACL<C, M, R> { fluent_builders::CreateWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateWebACLMigrationStack` operation. /// /// See [`CreateWebACLMigrationStack`](crate::client::fluent_builders::CreateWebACLMigrationStack) for more information about the /// operation and its arguments. pub fn create_web_acl_migration_stack( &self, ) -> fluent_builders::CreateWebACLMigrationStack<C, M, R> { fluent_builders::CreateWebACLMigrationStack::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateXssMatchSet` operation. /// /// See [`CreateXssMatchSet`](crate::client::fluent_builders::CreateXssMatchSet) for more information about the /// operation and its arguments. pub fn create_xss_match_set(&self) -> fluent_builders::CreateXssMatchSet<C, M, R> { fluent_builders::CreateXssMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteByteMatchSet` operation. /// /// See [`DeleteByteMatchSet`](crate::client::fluent_builders::DeleteByteMatchSet) for more information about the /// operation and its arguments. pub fn delete_byte_match_set(&self) -> fluent_builders::DeleteByteMatchSet<C, M, R> { fluent_builders::DeleteByteMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteGeoMatchSet` operation. /// /// See [`DeleteGeoMatchSet`](crate::client::fluent_builders::DeleteGeoMatchSet) for more information about the /// operation and its arguments. pub fn delete_geo_match_set(&self) -> fluent_builders::DeleteGeoMatchSet<C, M, R> { fluent_builders::DeleteGeoMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteIPSet` operation. /// /// See [`DeleteIPSet`](crate::client::fluent_builders::DeleteIPSet) for more information about the /// operation and its arguments. pub fn delete_ip_set(&self) -> fluent_builders::DeleteIPSet<C, M, R> { fluent_builders::DeleteIPSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteLoggingConfiguration` operation. /// /// See [`DeleteLoggingConfiguration`](crate::client::fluent_builders::DeleteLoggingConfiguration) for more information about the /// operation and its arguments. pub fn delete_logging_configuration( &self, ) -> fluent_builders::DeleteLoggingConfiguration<C, M, R> { fluent_builders::DeleteLoggingConfiguration::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeletePermissionPolicy` operation. /// /// See [`DeletePermissionPolicy`](crate::client::fluent_builders::DeletePermissionPolicy) for more information about the /// operation and its arguments. pub fn delete_permission_policy(&self) -> fluent_builders::DeletePermissionPolicy<C, M, R> { fluent_builders::DeletePermissionPolicy::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteRateBasedRule` operation. /// /// See [`DeleteRateBasedRule`](crate::client::fluent_builders::DeleteRateBasedRule) for more information about the /// operation and its arguments. pub fn delete_rate_based_rule(&self) -> fluent_builders::DeleteRateBasedRule<C, M, R> { fluent_builders::DeleteRateBasedRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteRegexMatchSet` operation. /// /// See [`DeleteRegexMatchSet`](crate::client::fluent_builders::DeleteRegexMatchSet) for more information about the /// operation and its arguments. pub fn delete_regex_match_set(&self) -> fluent_builders::DeleteRegexMatchSet<C, M, R> { fluent_builders::DeleteRegexMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteRegexPatternSet` operation. /// /// See [`DeleteRegexPatternSet`](crate::client::fluent_builders::DeleteRegexPatternSet) for more information about the /// operation and its arguments. pub fn delete_regex_pattern_set(&self) -> fluent_builders::DeleteRegexPatternSet<C, M, R> { fluent_builders::DeleteRegexPatternSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteRule` operation. /// /// See [`DeleteRule`](crate::client::fluent_builders::DeleteRule) for more information about the /// operation and its arguments. pub fn delete_rule(&self) -> fluent_builders::DeleteRule<C, M, R> { fluent_builders::DeleteRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteRuleGroup` operation. /// /// See [`DeleteRuleGroup`](crate::client::fluent_builders::DeleteRuleGroup) for more information about the /// operation and its arguments. pub fn delete_rule_group(&self) -> fluent_builders::DeleteRuleGroup<C, M, R> { fluent_builders::DeleteRuleGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteSizeConstraintSet` operation. /// /// See [`DeleteSizeConstraintSet`](crate::client::fluent_builders::DeleteSizeConstraintSet) for more information about the /// operation and its arguments. pub fn delete_size_constraint_set(&self) -> fluent_builders::DeleteSizeConstraintSet<C, M, R> { fluent_builders::DeleteSizeConstraintSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteSqlInjectionMatchSet` operation. /// /// See [`DeleteSqlInjectionMatchSet`](crate::client::fluent_builders::DeleteSqlInjectionMatchSet) for more information about the /// operation and its arguments. pub fn delete_sql_injection_match_set( &self, ) -> fluent_builders::DeleteSqlInjectionMatchSet<C, M, R> { fluent_builders::DeleteSqlInjectionMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteWebACL` operation. /// /// See [`DeleteWebACL`](crate::client::fluent_builders::DeleteWebACL) for more information about the /// operation and its arguments. pub fn delete_web_acl(&self) -> fluent_builders::DeleteWebACL<C, M, R> { fluent_builders::DeleteWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteXssMatchSet` operation. /// /// See [`DeleteXssMatchSet`](crate::client::fluent_builders::DeleteXssMatchSet) for more information about the /// operation and its arguments. pub fn delete_xss_match_set(&self) -> fluent_builders::DeleteXssMatchSet<C, M, R> { fluent_builders::DeleteXssMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `DisassociateWebACL` operation. /// /// See [`DisassociateWebACL`](crate::client::fluent_builders::DisassociateWebACL) for more information about the /// operation and its arguments. pub fn disassociate_web_acl(&self) -> fluent_builders::DisassociateWebACL<C, M, R> { fluent_builders::DisassociateWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetByteMatchSet` operation. /// /// See [`GetByteMatchSet`](crate::client::fluent_builders::GetByteMatchSet) for more information about the /// operation and its arguments. pub fn get_byte_match_set(&self) -> fluent_builders::GetByteMatchSet<C, M, R> { fluent_builders::GetByteMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetChangeToken` operation. /// /// See [`GetChangeToken`](crate::client::fluent_builders::GetChangeToken) for more information about the /// operation and its arguments. pub fn get_change_token(&self) -> fluent_builders::GetChangeToken<C, M, R> { fluent_builders::GetChangeToken::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetChangeTokenStatus` operation. /// /// See [`GetChangeTokenStatus`](crate::client::fluent_builders::GetChangeTokenStatus) for more information about the /// operation and its arguments. pub fn get_change_token_status(&self) -> fluent_builders::GetChangeTokenStatus<C, M, R> { fluent_builders::GetChangeTokenStatus::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetGeoMatchSet` operation. /// /// See [`GetGeoMatchSet`](crate::client::fluent_builders::GetGeoMatchSet) for more information about the /// operation and its arguments. pub fn get_geo_match_set(&self) -> fluent_builders::GetGeoMatchSet<C, M, R> { fluent_builders::GetGeoMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetIPSet` operation. /// /// See [`GetIPSet`](crate::client::fluent_builders::GetIPSet) for more information about the /// operation and its arguments. pub fn get_ip_set(&self) -> fluent_builders::GetIPSet<C, M, R> { fluent_builders::GetIPSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetLoggingConfiguration` operation. /// /// See [`GetLoggingConfiguration`](crate::client::fluent_builders::GetLoggingConfiguration) for more information about the /// operation and its arguments. pub fn get_logging_configuration(&self) -> fluent_builders::GetLoggingConfiguration<C, M, R> { fluent_builders::GetLoggingConfiguration::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetPermissionPolicy` operation. /// /// See [`GetPermissionPolicy`](crate::client::fluent_builders::GetPermissionPolicy) for more information about the /// operation and its arguments. pub fn get_permission_policy(&self) -> fluent_builders::GetPermissionPolicy<C, M, R> { fluent_builders::GetPermissionPolicy::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRateBasedRule` operation. /// /// See [`GetRateBasedRule`](crate::client::fluent_builders::GetRateBasedRule) for more information about the /// operation and its arguments. pub fn get_rate_based_rule(&self) -> fluent_builders::GetRateBasedRule<C, M, R> { fluent_builders::GetRateBasedRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRateBasedRuleManagedKeys` operation. /// /// See [`GetRateBasedRuleManagedKeys`](crate::client::fluent_builders::GetRateBasedRuleManagedKeys) for more information about the /// operation and its arguments. pub fn get_rate_based_rule_managed_keys( &self, ) -> fluent_builders::GetRateBasedRuleManagedKeys<C, M, R> { fluent_builders::GetRateBasedRuleManagedKeys::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRegexMatchSet` operation. /// /// See [`GetRegexMatchSet`](crate::client::fluent_builders::GetRegexMatchSet) for more information about the /// operation and its arguments. pub fn get_regex_match_set(&self) -> fluent_builders::GetRegexMatchSet<C, M, R> { fluent_builders::GetRegexMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRegexPatternSet` operation. /// /// See [`GetRegexPatternSet`](crate::client::fluent_builders::GetRegexPatternSet) for more information about the /// operation and its arguments. pub fn get_regex_pattern_set(&self) -> fluent_builders::GetRegexPatternSet<C, M, R> { fluent_builders::GetRegexPatternSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRule` operation. /// /// See [`GetRule`](crate::client::fluent_builders::GetRule) for more information about the /// operation and its arguments. pub fn get_rule(&self) -> fluent_builders::GetRule<C, M, R> { fluent_builders::GetRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetRuleGroup` operation. /// /// See [`GetRuleGroup`](crate::client::fluent_builders::GetRuleGroup) for more information about the /// operation and its arguments. pub fn get_rule_group(&self) -> fluent_builders::GetRuleGroup<C, M, R> { fluent_builders::GetRuleGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetSampledRequests` operation. /// /// See [`GetSampledRequests`](crate::client::fluent_builders::GetSampledRequests) for more information about the /// operation and its arguments. pub fn get_sampled_requests(&self) -> fluent_builders::GetSampledRequests<C, M, R> { fluent_builders::GetSampledRequests::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetSizeConstraintSet` operation. /// /// See [`GetSizeConstraintSet`](crate::client::fluent_builders::GetSizeConstraintSet) for more information about the /// operation and its arguments. pub fn get_size_constraint_set(&self) -> fluent_builders::GetSizeConstraintSet<C, M, R> { fluent_builders::GetSizeConstraintSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetSqlInjectionMatchSet` operation. /// /// See [`GetSqlInjectionMatchSet`](crate::client::fluent_builders::GetSqlInjectionMatchSet) for more information about the /// operation and its arguments. pub fn get_sql_injection_match_set(&self) -> fluent_builders::GetSqlInjectionMatchSet<C, M, R> { fluent_builders::GetSqlInjectionMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetWebACL` operation. /// /// See [`GetWebACL`](crate::client::fluent_builders::GetWebACL) for more information about the /// operation and its arguments. pub fn get_web_acl(&self) -> fluent_builders::GetWebACL<C, M, R> { fluent_builders::GetWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetWebACLForResource` operation. /// /// See [`GetWebACLForResource`](crate::client::fluent_builders::GetWebACLForResource) for more information about the /// operation and its arguments. pub fn get_web_acl_for_resource(&self) -> fluent_builders::GetWebACLForResource<C, M, R> { fluent_builders::GetWebACLForResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `GetXssMatchSet` operation. /// /// See [`GetXssMatchSet`](crate::client::fluent_builders::GetXssMatchSet) for more information about the /// operation and its arguments. pub fn get_xss_match_set(&self) -> fluent_builders::GetXssMatchSet<C, M, R> { fluent_builders::GetXssMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListActivatedRulesInRuleGroup` operation. /// /// See [`ListActivatedRulesInRuleGroup`](crate::client::fluent_builders::ListActivatedRulesInRuleGroup) for more information about the /// operation and its arguments. pub fn list_activated_rules_in_rule_group( &self, ) -> fluent_builders::ListActivatedRulesInRuleGroup<C, M, R> { fluent_builders::ListActivatedRulesInRuleGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListByteMatchSets` operation. /// /// See [`ListByteMatchSets`](crate::client::fluent_builders::ListByteMatchSets) for more information about the /// operation and its arguments. pub fn list_byte_match_sets(&self) -> fluent_builders::ListByteMatchSets<C, M, R> { fluent_builders::ListByteMatchSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListGeoMatchSets` operation. /// /// See [`ListGeoMatchSets`](crate::client::fluent_builders::ListGeoMatchSets) for more information about the /// operation and its arguments. pub fn list_geo_match_sets(&self) -> fluent_builders::ListGeoMatchSets<C, M, R> { fluent_builders::ListGeoMatchSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListIPSets` operation. /// /// See [`ListIPSets`](crate::client::fluent_builders::ListIPSets) for more information about the /// operation and its arguments. pub fn list_ip_sets(&self) -> fluent_builders::ListIPSets<C, M, R> { fluent_builders::ListIPSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListLoggingConfigurations` operation. /// /// See [`ListLoggingConfigurations`](crate::client::fluent_builders::ListLoggingConfigurations) for more information about the /// operation and its arguments. pub fn list_logging_configurations( &self, ) -> fluent_builders::ListLoggingConfigurations<C, M, R> { fluent_builders::ListLoggingConfigurations::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListRateBasedRules` operation. /// /// See [`ListRateBasedRules`](crate::client::fluent_builders::ListRateBasedRules) for more information about the /// operation and its arguments. pub fn list_rate_based_rules(&self) -> fluent_builders::ListRateBasedRules<C, M, R> { fluent_builders::ListRateBasedRules::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListRegexMatchSets` operation. /// /// See [`ListRegexMatchSets`](crate::client::fluent_builders::ListRegexMatchSets) for more information about the /// operation and its arguments. pub fn list_regex_match_sets(&self) -> fluent_builders::ListRegexMatchSets<C, M, R> { fluent_builders::ListRegexMatchSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListRegexPatternSets` operation. /// /// See [`ListRegexPatternSets`](crate::client::fluent_builders::ListRegexPatternSets) for more information about the /// operation and its arguments. pub fn list_regex_pattern_sets(&self) -> fluent_builders::ListRegexPatternSets<C, M, R> { fluent_builders::ListRegexPatternSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListResourcesForWebACL` operation. /// /// See [`ListResourcesForWebACL`](crate::client::fluent_builders::ListResourcesForWebACL) for more information about the /// operation and its arguments. pub fn list_resources_for_web_acl(&self) -> fluent_builders::ListResourcesForWebACL<C, M, R> { fluent_builders::ListResourcesForWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListRuleGroups` operation. /// /// See [`ListRuleGroups`](crate::client::fluent_builders::ListRuleGroups) for more information about the /// operation and its arguments. pub fn list_rule_groups(&self) -> fluent_builders::ListRuleGroups<C, M, R> { fluent_builders::ListRuleGroups::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListRules` operation. /// /// See [`ListRules`](crate::client::fluent_builders::ListRules) for more information about the /// operation and its arguments. pub fn list_rules(&self) -> fluent_builders::ListRules<C, M, R> { fluent_builders::ListRules::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListSizeConstraintSets` operation. /// /// See [`ListSizeConstraintSets`](crate::client::fluent_builders::ListSizeConstraintSets) for more information about the /// operation and its arguments. pub fn list_size_constraint_sets(&self) -> fluent_builders::ListSizeConstraintSets<C, M, R> { fluent_builders::ListSizeConstraintSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListSqlInjectionMatchSets` operation. /// /// See [`ListSqlInjectionMatchSets`](crate::client::fluent_builders::ListSqlInjectionMatchSets) for more information about the /// operation and its arguments. pub fn list_sql_injection_match_sets( &self, ) -> fluent_builders::ListSqlInjectionMatchSets<C, M, R> { fluent_builders::ListSqlInjectionMatchSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListSubscribedRuleGroups` operation. /// /// See [`ListSubscribedRuleGroups`](crate::client::fluent_builders::ListSubscribedRuleGroups) for more information about the /// operation and its arguments. pub fn list_subscribed_rule_groups( &self, ) -> fluent_builders::ListSubscribedRuleGroups<C, M, R> { fluent_builders::ListSubscribedRuleGroups::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListTagsForResource` operation. /// /// See [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) for more information about the /// operation and its arguments. pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C, M, R> { fluent_builders::ListTagsForResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListWebACLs` operation. /// /// See [`ListWebACLs`](crate::client::fluent_builders::ListWebACLs) for more information about the /// operation and its arguments. pub fn list_web_ac_ls(&self) -> fluent_builders::ListWebACLs<C, M, R> { fluent_builders::ListWebACLs::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListXssMatchSets` operation. /// /// See [`ListXssMatchSets`](crate::client::fluent_builders::ListXssMatchSets) for more information about the /// operation and its arguments. pub fn list_xss_match_sets(&self) -> fluent_builders::ListXssMatchSets<C, M, R> { fluent_builders::ListXssMatchSets::new(self.handle.clone()) } /// Constructs a fluent builder for the `PutLoggingConfiguration` operation. /// /// See [`PutLoggingConfiguration`](crate::client::fluent_builders::PutLoggingConfiguration) for more information about the /// operation and its arguments. pub fn put_logging_configuration(&self) -> fluent_builders::PutLoggingConfiguration<C, M, R> { fluent_builders::PutLoggingConfiguration::new(self.handle.clone()) } /// Constructs a fluent builder for the `PutPermissionPolicy` operation. /// /// See [`PutPermissionPolicy`](crate::client::fluent_builders::PutPermissionPolicy) for more information about the /// operation and its arguments. pub fn put_permission_policy(&self) -> fluent_builders::PutPermissionPolicy<C, M, R> { fluent_builders::PutPermissionPolicy::new(self.handle.clone()) } /// Constructs a fluent builder for the `TagResource` operation. /// /// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the /// operation and its arguments. pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> { fluent_builders::TagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `UntagResource` operation. /// /// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the /// operation and its arguments. pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> { fluent_builders::UntagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateByteMatchSet` operation. /// /// See [`UpdateByteMatchSet`](crate::client::fluent_builders::UpdateByteMatchSet) for more information about the /// operation and its arguments. pub fn update_byte_match_set(&self) -> fluent_builders::UpdateByteMatchSet<C, M, R> { fluent_builders::UpdateByteMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateGeoMatchSet` operation. /// /// See [`UpdateGeoMatchSet`](crate::client::fluent_builders::UpdateGeoMatchSet) for more information about the /// operation and its arguments. pub fn update_geo_match_set(&self) -> fluent_builders::UpdateGeoMatchSet<C, M, R> { fluent_builders::UpdateGeoMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateIPSet` operation. /// /// See [`UpdateIPSet`](crate::client::fluent_builders::UpdateIPSet) for more information about the /// operation and its arguments. pub fn update_ip_set(&self) -> fluent_builders::UpdateIPSet<C, M, R> { fluent_builders::UpdateIPSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateRateBasedRule` operation. /// /// See [`UpdateRateBasedRule`](crate::client::fluent_builders::UpdateRateBasedRule) for more information about the /// operation and its arguments. pub fn update_rate_based_rule(&self) -> fluent_builders::UpdateRateBasedRule<C, M, R> { fluent_builders::UpdateRateBasedRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateRegexMatchSet` operation. /// /// See [`UpdateRegexMatchSet`](crate::client::fluent_builders::UpdateRegexMatchSet) for more information about the /// operation and its arguments. pub fn update_regex_match_set(&self) -> fluent_builders::UpdateRegexMatchSet<C, M, R> { fluent_builders::UpdateRegexMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateRegexPatternSet` operation. /// /// See [`UpdateRegexPatternSet`](crate::client::fluent_builders::UpdateRegexPatternSet) for more information about the /// operation and its arguments. pub fn update_regex_pattern_set(&self) -> fluent_builders::UpdateRegexPatternSet<C, M, R> { fluent_builders::UpdateRegexPatternSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateRule` operation. /// /// See [`UpdateRule`](crate::client::fluent_builders::UpdateRule) for more information about the /// operation and its arguments. pub fn update_rule(&self) -> fluent_builders::UpdateRule<C, M, R> { fluent_builders::UpdateRule::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateRuleGroup` operation. /// /// See [`UpdateRuleGroup`](crate::client::fluent_builders::UpdateRuleGroup) for more information about the /// operation and its arguments. pub fn update_rule_group(&self) -> fluent_builders::UpdateRuleGroup<C, M, R> { fluent_builders::UpdateRuleGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateSizeConstraintSet` operation. /// /// See [`UpdateSizeConstraintSet`](crate::client::fluent_builders::UpdateSizeConstraintSet) for more information about the /// operation and its arguments. pub fn update_size_constraint_set(&self) -> fluent_builders::UpdateSizeConstraintSet<C, M, R> { fluent_builders::UpdateSizeConstraintSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateSqlInjectionMatchSet` operation. /// /// See [`UpdateSqlInjectionMatchSet`](crate::client::fluent_builders::UpdateSqlInjectionMatchSet) for more information about the /// operation and its arguments. pub fn update_sql_injection_match_set( &self, ) -> fluent_builders::UpdateSqlInjectionMatchSet<C, M, R> { fluent_builders::UpdateSqlInjectionMatchSet::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateWebACL` operation. /// /// See [`UpdateWebACL`](crate::client::fluent_builders::UpdateWebACL) for more information about the /// operation and its arguments. pub fn update_web_acl(&self) -> fluent_builders::UpdateWebACL<C, M, R> { fluent_builders::UpdateWebACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateXssMatchSet` operation. /// /// See [`UpdateXssMatchSet`](crate::client::fluent_builders::UpdateXssMatchSet) for more information about the /// operation and its arguments. pub fn update_xss_match_set(&self) -> fluent_builders::UpdateXssMatchSet<C, M, R> { fluent_builders::UpdateXssMatchSet::new(self.handle.clone()) } } pub mod fluent_builders { //! //! Utilities to ergonomically construct a request to the service. //! //! Fluent builders are created through the [`Client`](crate::client::Client) by calling //! one if its operation methods. After parameters are set using the builder methods, //! the `send` method can be called to initiate the request. //! /// Fluent builder constructing a request to `AssociateWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic Regional</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Associates a web ACL with a resource, either an application load balancer or Amazon API Gateway stage.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct AssociateWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::associate_web_acl_input::Builder, } impl<C, M, R> AssociateWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `AssociateWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::AssociateWebAclOutput, aws_smithy_http::result::SdkError<crate::error::AssociateWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::AssociateWebAclInputOperationOutputAlias, crate::output::AssociateWebAclOutput, crate::error::AssociateWebACLError, crate::input::AssociateWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A unique identifier (ID) for the web ACL. </p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>A unique identifier (ID) for the web ACL. </p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p>The ARN (Amazon Resource Name) of the resource to be protected, either an application load balancer or Amazon API Gateway stage. </p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The ARN (Amazon Resource Name) of the resource to be protected, either an application load balancer or Amazon API Gateway stage. </p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `CreateByteMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>ByteMatchSet</code>. You then use <code>UpdateByteMatchSet</code> to identify the part of a web request that you want AWS WAF to inspect, such as the values of the <code>User-Agent</code> header or the query string. For example, you can create a <code>ByteMatchSet</code> that matches any requests with <code>User-Agent</code> headers that contain the string <code>BadBot</code>. You can then configure AWS WAF to reject those requests.</p> /// <p>To create and configure a <code>ByteMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateByteMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateByteMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateByteMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateByteMatchSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateByteMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_byte_match_set_input::Builder, } impl<C, M, R> CreateByteMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateByteMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateByteMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateByteMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateByteMatchSetInputOperationOutputAlias, crate::output::CreateByteMatchSetOutput, crate::error::CreateByteMatchSetError, crate::input::CreateByteMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>ByteMatchSet</code>. You can't change <code>Name</code> after you create a <code>ByteMatchSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>ByteMatchSet</code>. You can't change <code>Name</code> after you create a <code>ByteMatchSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateGeoMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates an <code>GeoMatchSet</code>, which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you're receiving a lot of requests from one or more countries and you want to block the requests, you can create an <code>GeoMatchSet</code> that contains those countries and then configure AWS WAF to block the requests. </p> /// <p>To create and configure a <code>GeoMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateGeoMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateGeoMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateGeoMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateGeoMatchSetSet</code> request to specify the countries that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateGeoMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_geo_match_set_input::Builder, } impl<C, M, R> CreateGeoMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateGeoMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateGeoMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateGeoMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateGeoMatchSetInputOperationOutputAlias, crate::output::CreateGeoMatchSetOutput, crate::error::CreateGeoMatchSetError, crate::input::CreateGeoMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>GeoMatchSet</code>. You can't change <code>Name</code> after you create the <code>GeoMatchSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>GeoMatchSet</code>. You can't change <code>Name</code> after you create the <code>GeoMatchSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateIPSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates an <code>IPSet</code>, which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an <code>IPSet</code> that contains those IP addresses and then configure AWS WAF to block the requests. </p> /// <p>To create and configure an <code>IPSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateIPSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateIPSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateIPSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateIPSet</code> request to specify the IP addresses that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateIPSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_ip_set_input::Builder, } impl<C, M, R> CreateIPSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateIPSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateIpSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateIPSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateIpSetInputOperationOutputAlias, crate::output::CreateIpSetOutput, crate::error::CreateIPSetError, crate::input::CreateIpSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>IPSet</code>. You can't change <code>Name</code> after you create the <code>IPSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>IPSet</code>. You can't change <code>Name</code> after you create the <code>IPSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateRateBasedRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>RateBasedRule</code>. The <code>RateBasedRule</code> contains a <code>RateLimit</code>, which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The <code>RateBasedRule</code> also contains the <code>IPSet</code> objects, <code>ByteMatchSet</code> objects, and other predicates that identify the requests that you want to count or block if these requests exceed the <code>RateLimit</code>.</p> /// <p>If you add more than one predicate to a <code>RateBasedRule</code>, a request not only must exceed the <code>RateLimit</code>, but it also must match all the conditions to be counted or blocked. For example, suppose you add the following to a <code>RateBasedRule</code>:</p> /// <ul> /// <li> <p>An <code>IPSet</code> that matches the IP address <code>192.0.2.44/32</code> </p> </li> /// <li> <p>A <code>ByteMatchSet</code> that matches <code>BadBot</code> in the <code>User-Agent</code> header</p> </li> /// </ul> /// <p>Further, you specify a <code>RateLimit</code> of 1,000.</p> /// <p>You then add the <code>RateBasedRule</code> to a <code>WebACL</code> and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 <i>and</i> the <code>User-Agent</code> header in the request must contain the value <code>BadBot</code>. Further, requests that match these two conditions must be received at a rate of more than 1,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no longer blocks the requests.</p> /// <p>As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a <code>RateBasedRule</code>:</p> /// <ul> /// <li> <p>A <code>ByteMatchSet</code> with <code>FieldToMatch</code> of <code>URI</code> </p> </li> /// <li> <p>A <code>PositionalConstraint</code> of <code>STARTS_WITH</code> </p> </li> /// <li> <p>A <code>TargetString</code> of <code>login</code> </p> </li> /// </ul> /// <p>Further, you specify a <code>RateLimit</code> of 1,000.</p> /// <p>By adding this <code>RateBasedRule</code> to a <code>WebACL</code>, you could limit requests to your login page without affecting the rest of your site.</p> /// <p>To create and configure a <code>RateBasedRule</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the predicates that you want to include in the rule. For more information, see <code>CreateByteMatchSet</code>, <code>CreateIPSet</code>, and <code>CreateSqlInjectionMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateRule</code> request.</p> </li> /// <li> <p>Submit a <code>CreateRateBasedRule</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRule</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRateBasedRule</code> request to specify the predicates that you want to include in the rule.</p> </li> /// <li> <p>Create and update a <code>WebACL</code> that contains the <code>RateBasedRule</code>. For more information, see <code>CreateWebACL</code>.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateRateBasedRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_rate_based_rule_input::Builder, } impl<C, M, R> CreateRateBasedRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateRateBasedRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateRateBasedRuleOutput, aws_smithy_http::result::SdkError<crate::error::CreateRateBasedRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateRateBasedRuleInputOperationOutputAlias, crate::output::CreateRateBasedRuleOutput, crate::error::CreateRateBasedRuleError, crate::input::CreateRateBasedRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>RateBasedRule</code>. You can't change the name of a <code>RateBasedRule</code> after you create it.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>RateBasedRule</code>. You can't change the name of a <code>RateBasedRule</code> after you create it.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>A friendly name or description for the metrics for this <code>RateBasedRule</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>RateBasedRule</code>.</p> pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.metric_name(input.into()); self } /// <p>A friendly name or description for the metrics for this <code>RateBasedRule</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>RateBasedRule</code>.</p> pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_metric_name(input); self } /// <p>The field that AWS WAF uses to determine if requests are likely arriving from a single source and thus subject to rate monitoring. The only valid value for <code>RateKey</code> is <code>IP</code>. <code>IP</code> indicates that requests that arrive from the same IP address are subject to the <code>RateLimit</code> that is specified in the <code>RateBasedRule</code>.</p> pub fn rate_key(mut self, input: crate::model::RateKey) -> Self { self.inner = self.inner.rate_key(input); self } /// <p>The field that AWS WAF uses to determine if requests are likely arriving from a single source and thus subject to rate monitoring. The only valid value for <code>RateKey</code> is <code>IP</code>. <code>IP</code> indicates that requests that arrive from the same IP address are subject to the <code>RateLimit</code> that is specified in the <code>RateBasedRule</code>.</p> pub fn set_rate_key(mut self, input: std::option::Option<crate::model::RateKey>) -> Self { self.inner = self.inner.set_rate_key(input); self } /// <p>The maximum number of requests, which have an identical value in the field that is specified by <code>RateKey</code>, allowed in a five-minute period. If the number of requests exceeds the <code>RateLimit</code> and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.</p> pub fn rate_limit(mut self, input: i64) -> Self { self.inner = self.inner.rate_limit(input); self } /// <p>The maximum number of requests, which have an identical value in the field that is specified by <code>RateKey</code>, allowed in a five-minute period. If the number of requests exceeds the <code>RateLimit</code> and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.</p> pub fn set_rate_limit(mut self, input: std::option::Option<i64>) -> Self { self.inner = self.inner.set_rate_limit(input); self } /// <p>The <code>ChangeToken</code> that you used to submit the <code>CreateRateBasedRule</code> request. You can also use this value to query the status of the request. For more information, see <code>GetChangeTokenStatus</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The <code>ChangeToken</code> that you used to submit the <code>CreateRateBasedRule</code> request. You can also use this value to query the status of the request. For more information, see <code>GetChangeTokenStatus</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p></p> pub fn tags(mut self, input: crate::model::Tag) -> Self { self.inner = self.inner.tags(input); self } /// <p></p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateRegexMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>RegexMatchSet</code>. You then use <code>UpdateRegexMatchSet</code> to identify the part of a web request that you want AWS WAF to inspect, such as the values of the <code>User-Agent</code> header or the query string. For example, you can create a <code>RegexMatchSet</code> that contains a <code>RegexMatchTuple</code> that looks for any requests with <code>User-Agent</code> headers that match a <code>RegexPatternSet</code> with pattern <code>B[a@]dB[o0]t</code>. You can then configure AWS WAF to reject those requests.</p> /// <p>To create and configure a <code>RegexMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateRegexMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateRegexMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRegexMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRegexMatchSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value, using a <code>RegexPatternSet</code>, that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateRegexMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_regex_match_set_input::Builder, } impl<C, M, R> CreateRegexMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateRegexMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateRegexMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateRegexMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateRegexMatchSetInputOperationOutputAlias, crate::output::CreateRegexMatchSetOutput, crate::error::CreateRegexMatchSetError, crate::input::CreateRegexMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>RegexMatchSet</code>. You can't change <code>Name</code> after you create a <code>RegexMatchSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>RegexMatchSet</code>. You can't change <code>Name</code> after you create a <code>RegexMatchSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateRegexPatternSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>RegexPatternSet</code>. You then use <code>UpdateRegexPatternSet</code> to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as <code>B[a@]dB[o0]t</code>. You can then configure AWS WAF to reject those requests.</p> /// <p>To create and configure a <code>RegexPatternSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateRegexPatternSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateRegexPatternSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRegexPatternSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRegexPatternSet</code> request to specify the string that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateRegexPatternSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_regex_pattern_set_input::Builder, } impl<C, M, R> CreateRegexPatternSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateRegexPatternSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateRegexPatternSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateRegexPatternSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateRegexPatternSetInputOperationOutputAlias, crate::output::CreateRegexPatternSetOutput, crate::error::CreateRegexPatternSetError, crate::input::CreateRegexPatternSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>RegexPatternSet</code>. You can't change <code>Name</code> after you create a <code>RegexPatternSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>RegexPatternSet</code>. You can't change <code>Name</code> after you create a <code>RegexPatternSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>Rule</code>, which contains the <code>IPSet</code> objects, <code>ByteMatchSet</code> objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a <code>Rule</code>, a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a <code>Rule</code>:</p> /// <ul> /// <li> <p>An <code>IPSet</code> that matches the IP address <code>192.0.2.44/32</code> </p> </li> /// <li> <p>A <code>ByteMatchSet</code> that matches <code>BadBot</code> in the <code>User-Agent</code> header</p> </li> /// </ul> /// <p>You then add the <code>Rule</code> to a <code>WebACL</code> and specify that you want to blocks requests that satisfy the <code>Rule</code>. For a request to be blocked, it must come from the IP address 192.0.2.44 <i>and</i> the <code>User-Agent</code> header in the request must contain the value <code>BadBot</code>.</p> /// <p>To create and configure a <code>Rule</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the predicates that you want to include in the <code>Rule</code>. For more information, see <code>CreateByteMatchSet</code>, <code>CreateIPSet</code>, and <code>CreateSqlInjectionMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateRule</code> request.</p> </li> /// <li> <p>Submit a <code>CreateRule</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRule</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRule</code> request to specify the predicates that you want to include in the <code>Rule</code>.</p> </li> /// <li> <p>Create and update a <code>WebACL</code> that contains the <code>Rule</code>. For more information, see <code>CreateWebACL</code>.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_rule_input::Builder, } impl<C, M, R> CreateRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateRuleOutput, aws_smithy_http::result::SdkError<crate::error::CreateRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateRuleInputOperationOutputAlias, crate::output::CreateRuleOutput, crate::error::CreateRuleError, crate::input::CreateRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>Rule</code>. You can't change the name of a <code>Rule</code> after you create it.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>Rule</code>. You can't change the name of a <code>Rule</code> after you create it.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>A friendly name or description for the metrics for this <code>Rule</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>Rule</code>.</p> pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.metric_name(input.into()); self } /// <p>A friendly name or description for the metrics for this <code>Rule</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>Rule</code>.</p> pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_metric_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p></p> pub fn tags(mut self, input: crate::model::Tag) -> Self { self.inner = self.inner.tags(input); self } /// <p></p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateRuleGroup`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>RuleGroup</code>. A rule group is a collection of predefined rules that you add to a web ACL. You use <code>UpdateRuleGroup</code> to add rules to the rule group.</p> /// <p>Rule groups are subject to the following limits:</p> /// <ul> /// <li> <p>Three rule groups per account. You can request an increase to this limit by contacting customer support.</p> </li> /// <li> <p>One rule group per web ACL.</p> </li> /// <li> <p>Ten rules per rule group.</p> </li> /// </ul> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateRuleGroup< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_rule_group_input::Builder, } impl<C, M, R> CreateRuleGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateRuleGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateRuleGroupOutput, aws_smithy_http::result::SdkError<crate::error::CreateRuleGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateRuleGroupInputOperationOutputAlias, crate::output::CreateRuleGroupOutput, crate::error::CreateRuleGroupError, crate::input::CreateRuleGroupInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>RuleGroup</code>. You can't change <code>Name</code> after you create a <code>RuleGroup</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>RuleGroup</code>. You can't change <code>Name</code> after you create a <code>RuleGroup</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>A friendly name or description for the metrics for this <code>RuleGroup</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>RuleGroup</code>.</p> pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.metric_name(input.into()); self } /// <p>A friendly name or description for the metrics for this <code>RuleGroup</code>. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change the name of the metric after you create the <code>RuleGroup</code>.</p> pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_metric_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p></p> pub fn tags(mut self, input: crate::model::Tag) -> Self { self.inner = self.inner.tags(input); self } /// <p></p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateSizeConstraintSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>SizeConstraintSet</code>. You then use <code>UpdateSizeConstraintSet</code> to identify the part of a web request that you want AWS WAF to check for length, such as the length of the <code>User-Agent</code> header or the length of the query string. For example, you can create a <code>SizeConstraintSet</code> that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.</p> /// <p>To create and configure a <code>SizeConstraintSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateSizeConstraintSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateSizeConstraintSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateSizeConstraintSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateSizeConstraintSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateSizeConstraintSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_size_constraint_set_input::Builder, } impl<C, M, R> CreateSizeConstraintSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateSizeConstraintSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateSizeConstraintSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateSizeConstraintSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateSizeConstraintSetInputOperationOutputAlias, crate::output::CreateSizeConstraintSetOutput, crate::error::CreateSizeConstraintSetError, crate::input::CreateSizeConstraintSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>SizeConstraintSet</code>. You can't change <code>Name</code> after you create a <code>SizeConstraintSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>SizeConstraintSet</code>. You can't change <code>Name</code> after you create a <code>SizeConstraintSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateSqlInjectionMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>SqlInjectionMatchSet</code>, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.</p> /// <p>To create and configure a <code>SqlInjectionMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateSqlInjectionMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateSqlInjectionMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateSqlInjectionMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateSqlInjectionMatchSet</code> request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateSqlInjectionMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_sql_injection_match_set_input::Builder, } impl<C, M, R> CreateSqlInjectionMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateSqlInjectionMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateSqlInjectionMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateSqlInjectionMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateSqlInjectionMatchSetInputOperationOutputAlias, crate::output::CreateSqlInjectionMatchSetOutput, crate::error::CreateSqlInjectionMatchSetError, crate::input::CreateSqlInjectionMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description for the <code>SqlInjectionMatchSet</code> that you're creating. You can't change <code>Name</code> after you create the <code>SqlInjectionMatchSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description for the <code>SqlInjectionMatchSet</code> that you're creating. You can't change <code>Name</code> after you create the <code>SqlInjectionMatchSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `CreateWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates a <code>WebACL</code>, which contains the <code>Rules</code> that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates <code>Rules</code> in order based on the value of <code>Priority</code> for each <code>Rule</code>.</p> /// <p>You also specify a default action, either <code>ALLOW</code> or <code>BLOCK</code>. If a web request doesn't match any of the <code>Rules</code> in a <code>WebACL</code>, AWS WAF responds to the request with the default action. </p> /// <p>To create and configure a <code>WebACL</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the <code>ByteMatchSet</code> objects and other predicates that you want to include in <code>Rules</code>. For more information, see <code>CreateByteMatchSet</code>, <code>UpdateByteMatchSet</code>, <code>CreateIPSet</code>, <code>UpdateIPSet</code>, <code>CreateSqlInjectionMatchSet</code>, and <code>UpdateSqlInjectionMatchSet</code>.</p> </li> /// <li> <p>Create and update the <code>Rules</code> that you want to include in the <code>WebACL</code>. For more information, see <code>CreateRule</code> and <code>UpdateRule</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateWebACL</code> request.</p> </li> /// <li> <p>Submit a <code>CreateWebACL</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateWebACL</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateWebACL</code> request to specify the <code>Rules</code> that you want to include in the <code>WebACL</code>, to specify the default action, and to associate the <code>WebACL</code> with a CloudFront distribution.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_web_acl_input::Builder, } impl<C, M, R> CreateWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateWebAclOutput, aws_smithy_http::result::SdkError<crate::error::CreateWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateWebAclInputOperationOutputAlias, crate::output::CreateWebAclOutput, crate::error::CreateWebACLError, crate::input::CreateWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description of the <code>WebACL</code>. You can't change <code>Name</code> after you create the <code>WebACL</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description of the <code>WebACL</code>. You can't change <code>Name</code> after you create the <code>WebACL</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>A friendly name or description for the metrics for this <code>WebACL</code>.The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change <code>MetricName</code> after you create the <code>WebACL</code>.</p> pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.metric_name(input.into()); self } /// <p>A friendly name or description for the metrics for this <code>WebACL</code>.The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can't change <code>MetricName</code> after you create the <code>WebACL</code>.</p> pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_metric_name(input); self } /// <p>The action that you want AWS WAF to take when a request doesn't match the criteria specified in any of the <code>Rule</code> objects that are associated with the <code>WebACL</code>.</p> pub fn default_action(mut self, input: crate::model::WafAction) -> Self { self.inner = self.inner.default_action(input); self } /// <p>The action that you want AWS WAF to take when a request doesn't match the criteria specified in any of the <code>Rule</code> objects that are associated with the <code>WebACL</code>.</p> pub fn set_default_action( mut self, input: std::option::Option<crate::model::WafAction>, ) -> Self { self.inner = self.inner.set_default_action(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p></p> pub fn tags(mut self, input: crate::model::Tag) -> Self { self.inner = self.inner.tags(input); self } /// <p></p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateWebACLMigrationStack`. /// /// <p>Creates an AWS CloudFormation WAFV2 template for the specified web ACL in the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack from the template, to create the web ACL and its resources in AWS WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest version of AWS WAF.</p> /// <p>This is part of a larger migration procedure for web ACLs from AWS WAF Classic to the latest version of AWS WAF. For the full procedure, including caveats and manual steps to complete the migration and switch over to the new web ACL, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html">Migrating your AWS WAF Classic resources to AWS WAF</a> in the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. </p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateWebACLMigrationStack< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_web_acl_migration_stack_input::Builder, } impl<C, M, R> CreateWebACLMigrationStack<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateWebACLMigrationStack`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateWebAclMigrationStackOutput, aws_smithy_http::result::SdkError<crate::error::CreateWebACLMigrationStackError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateWebAclMigrationStackInputOperationOutputAlias, crate::output::CreateWebAclMigrationStackOutput, crate::error::CreateWebACLMigrationStackError, crate::input::CreateWebAclMigrationStackInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p>The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 bucket must be configured as follows for the migration: </p> /// <ul> /// <li> <p>The bucket name must start with <code>aws-waf-migration-</code>. For example, <code>aws-waf-migration-my-web-acl</code>.</p> </li> /// <li> <p>The bucket must be in the Region where you are deploying the template. For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and you must deploy the template stack to us-west-2. </p> </li> /// <li> <p>The bucket policies must permit the migration process to write data. For listings of the bucket policies, see the Examples section. </p> </li> /// </ul> pub fn s3_bucket_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.s3_bucket_name(input.into()); self } /// <p>The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 bucket must be configured as follows for the migration: </p> /// <ul> /// <li> <p>The bucket name must start with <code>aws-waf-migration-</code>. For example, <code>aws-waf-migration-my-web-acl</code>.</p> </li> /// <li> <p>The bucket must be in the Region where you are deploying the template. For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and you must deploy the template stack to us-west-2. </p> </li> /// <li> <p>The bucket policies must permit the migration process to write data. For listings of the bucket policies, see the Examples section. </p> </li> /// </ul> pub fn set_s3_bucket_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_s3_bucket_name(input); self } /// <p>Indicates whether to exclude entities that can't be migrated or to stop the migration. Set this to true to ignore unsupported entities in the web ACL during the migration. Otherwise, if AWS WAF encounters unsupported entities, it stops the process and throws an exception. </p> pub fn ignore_unsupported_type(mut self, input: bool) -> Self { self.inner = self.inner.ignore_unsupported_type(input); self } /// <p>Indicates whether to exclude entities that can't be migrated or to stop the migration. Set this to true to ignore unsupported entities in the web ACL during the migration. Otherwise, if AWS WAF encounters unsupported entities, it stops the process and throws an exception. </p> pub fn set_ignore_unsupported_type(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_ignore_unsupported_type(input); self } } /// Fluent builder constructing a request to `CreateXssMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Creates an <code>XssMatchSet</code>, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.</p> /// <p>To create and configure an <code>XssMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>CreateXssMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>CreateXssMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateXssMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateXssMatchSet</code> request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateXssMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_xss_match_set_input::Builder, } impl<C, M, R> CreateXssMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateXssMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateXssMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::CreateXssMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateXssMatchSetInputOperationOutputAlias, crate::output::CreateXssMatchSetOutput, crate::error::CreateXssMatchSetError, crate::input::CreateXssMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A friendly name or description for the <code>XssMatchSet</code> that you're creating. You can't change <code>Name</code> after you create the <code>XssMatchSet</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>A friendly name or description for the <code>XssMatchSet</code> that you're creating. You can't change <code>Name</code> after you create the <code>XssMatchSet</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteByteMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>ByteMatchSet</code>. You can't delete a <code>ByteMatchSet</code> if it's still used in any <code>Rules</code> or if it still includes any <code>ByteMatchTuple</code> objects (any filters).</p> /// <p>If you just want to remove a <code>ByteMatchSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete a <code>ByteMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>ByteMatchSet</code> to remove filters, if any. For more information, see <code>UpdateByteMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteByteMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteByteMatchSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteByteMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_byte_match_set_input::Builder, } impl<C, M, R> DeleteByteMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteByteMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteByteMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteByteMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteByteMatchSetInputOperationOutputAlias, crate::output::DeleteByteMatchSetOutput, crate::error::DeleteByteMatchSetError, crate::input::DeleteByteMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to delete. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn byte_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.byte_match_set_id(input.into()); self } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to delete. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn set_byte_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_byte_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteGeoMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>GeoMatchSet</code>. You can't delete a <code>GeoMatchSet</code> if it's still used in any <code>Rules</code> or if it still includes any countries.</p> /// <p>If you just want to remove a <code>GeoMatchSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete a <code>GeoMatchSet</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>GeoMatchSet</code> to remove any countries. For more information, see <code>UpdateGeoMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteGeoMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteGeoMatchSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteGeoMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_geo_match_set_input::Builder, } impl<C, M, R> DeleteGeoMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteGeoMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteGeoMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteGeoMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteGeoMatchSetInputOperationOutputAlias, crate::output::DeleteGeoMatchSetOutput, crate::error::DeleteGeoMatchSetError, crate::input::DeleteGeoMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>GeoMatchSetID</code> of the <code>GeoMatchSet</code> that you want to delete. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn geo_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.geo_match_set_id(input.into()); self } /// <p>The <code>GeoMatchSetID</code> of the <code>GeoMatchSet</code> that you want to delete. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn set_geo_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_geo_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteIPSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes an <code>IPSet</code>. You can't delete an <code>IPSet</code> if it's still used in any <code>Rules</code> or if it still includes any IP addresses.</p> /// <p>If you just want to remove an <code>IPSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete an <code>IPSet</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>IPSet</code> to remove IP address ranges, if any. For more information, see <code>UpdateIPSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteIPSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteIPSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteIPSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_ip_set_input::Builder, } impl<C, M, R> DeleteIPSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteIPSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteIpSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteIPSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteIpSetInputOperationOutputAlias, crate::output::DeleteIpSetOutput, crate::error::DeleteIPSetError, crate::input::DeleteIpSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to delete. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn ip_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.ip_set_id(input.into()); self } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to delete. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn set_ip_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_ip_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteLoggingConfiguration`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes the <code>LoggingConfiguration</code> from the specified web ACL.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteLoggingConfiguration< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_logging_configuration_input::Builder, } impl<C, M, R> DeleteLoggingConfiguration<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteLoggingConfiguration`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteLoggingConfigurationOutput, aws_smithy_http::result::SdkError<crate::error::DeleteLoggingConfigurationError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteLoggingConfigurationInputOperationOutputAlias, crate::output::DeleteLoggingConfigurationOutput, crate::error::DeleteLoggingConfigurationError, crate::input::DeleteLoggingConfigurationInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the web ACL from which you want to delete the <code>LoggingConfiguration</code>.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the web ACL from which you want to delete the <code>LoggingConfiguration</code>.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `DeletePermissionPolicy`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes an IAM policy from the specified RuleGroup.</p> /// <p>The user making the request must be the owner of the RuleGroup.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeletePermissionPolicy< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_permission_policy_input::Builder, } impl<C, M, R> DeletePermissionPolicy<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeletePermissionPolicy`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeletePermissionPolicyOutput, aws_smithy_http::result::SdkError<crate::error::DeletePermissionPolicyError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeletePermissionPolicyInputOperationOutputAlias, crate::output::DeletePermissionPolicyOutput, crate::error::DeletePermissionPolicyError, crate::input::DeletePermissionPolicyInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the RuleGroup from which you want to delete the policy.</p> /// <p>The user making the request must be the owner of the RuleGroup.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the RuleGroup from which you want to delete the policy.</p> /// <p>The user making the request must be the owner of the RuleGroup.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `DeleteRateBasedRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>RateBasedRule</code>. You can't delete a rule if it's still used in any <code>WebACL</code> objects or if it still includes any predicates, such as <code>ByteMatchSet</code> objects.</p> /// <p>If you just want to remove a rule from a <code>WebACL</code>, use <code>UpdateWebACL</code>.</p> /// <p>To permanently delete a <code>RateBasedRule</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>RateBasedRule</code> to remove predicates, if any. For more information, see <code>UpdateRateBasedRule</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteRateBasedRule</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteRateBasedRule</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteRateBasedRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_rate_based_rule_input::Builder, } impl<C, M, R> DeleteRateBasedRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteRateBasedRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteRateBasedRuleOutput, aws_smithy_http::result::SdkError<crate::error::DeleteRateBasedRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteRateBasedRuleInputOperationOutputAlias, crate::output::DeleteRateBasedRuleOutput, crate::error::DeleteRateBasedRuleError, crate::input::DeleteRateBasedRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to delete. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to delete. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteRegexMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>RegexMatchSet</code>. You can't delete a <code>RegexMatchSet</code> if it's still used in any <code>Rules</code> or if it still includes any <code>RegexMatchTuples</code> objects (any filters).</p> /// <p>If you just want to remove a <code>RegexMatchSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete a <code>RegexMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>RegexMatchSet</code> to remove filters, if any. For more information, see <code>UpdateRegexMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteRegexMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteRegexMatchSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteRegexMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_regex_match_set_input::Builder, } impl<C, M, R> DeleteRegexMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteRegexMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteRegexMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteRegexMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteRegexMatchSetInputOperationOutputAlias, crate::output::DeleteRegexMatchSetOutput, crate::error::DeleteRegexMatchSetError, crate::input::DeleteRegexMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to delete. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn regex_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_match_set_id(input.into()); self } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to delete. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn set_regex_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteRegexPatternSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>RegexPatternSet</code>. You can't delete a <code>RegexPatternSet</code> if it's still used in any <code>RegexMatchSet</code> or if the <code>RegexPatternSet</code> is not empty. </p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteRegexPatternSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_regex_pattern_set_input::Builder, } impl<C, M, R> DeleteRegexPatternSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteRegexPatternSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteRegexPatternSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteRegexPatternSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteRegexPatternSetInputOperationOutputAlias, crate::output::DeleteRegexPatternSetOutput, crate::error::DeleteRegexPatternSetError, crate::input::DeleteRegexPatternSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to delete. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn regex_pattern_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_pattern_set_id(input.into()); self } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to delete. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn set_regex_pattern_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_pattern_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>Rule</code>. You can't delete a <code>Rule</code> if it's still used in any <code>WebACL</code> objects or if it still includes any predicates, such as <code>ByteMatchSet</code> objects.</p> /// <p>If you just want to remove a <code>Rule</code> from a <code>WebACL</code>, use <code>UpdateWebACL</code>.</p> /// <p>To permanently delete a <code>Rule</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>Rule</code> to remove predicates, if any. For more information, see <code>UpdateRule</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteRule</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteRule</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_rule_input::Builder, } impl<C, M, R> DeleteRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteRuleOutput, aws_smithy_http::result::SdkError<crate::error::DeleteRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteRuleInputOperationOutputAlias, crate::output::DeleteRuleOutput, crate::error::DeleteRuleError, crate::input::DeleteRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to delete. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to delete. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteRuleGroup`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>RuleGroup</code>. You can't delete a <code>RuleGroup</code> if it's still used in any <code>WebACL</code> objects or if it still includes any rules.</p> /// <p>If you just want to remove a <code>RuleGroup</code> from a <code>WebACL</code>, use <code>UpdateWebACL</code>.</p> /// <p>To permanently delete a <code>RuleGroup</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>RuleGroup</code> to remove rules, if any. For more information, see <code>UpdateRuleGroup</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteRuleGroup</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteRuleGroup</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteRuleGroup< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_rule_group_input::Builder, } impl<C, M, R> DeleteRuleGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteRuleGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteRuleGroupOutput, aws_smithy_http::result::SdkError<crate::error::DeleteRuleGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteRuleGroupInputOperationOutputAlias, crate::output::DeleteRuleGroupOutput, crate::error::DeleteRuleGroupError, crate::input::DeleteRuleGroupInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to delete. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn rule_group_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_group_id(input.into()); self } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to delete. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn set_rule_group_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_rule_group_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteSizeConstraintSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>SizeConstraintSet</code>. You can't delete a <code>SizeConstraintSet</code> if it's still used in any <code>Rules</code> or if it still includes any <code>SizeConstraint</code> objects (any filters).</p> /// <p>If you just want to remove a <code>SizeConstraintSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete a <code>SizeConstraintSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>SizeConstraintSet</code> to remove filters, if any. For more information, see <code>UpdateSizeConstraintSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteSizeConstraintSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteSizeConstraintSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteSizeConstraintSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_size_constraint_set_input::Builder, } impl<C, M, R> DeleteSizeConstraintSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteSizeConstraintSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteSizeConstraintSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteSizeConstraintSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteSizeConstraintSetInputOperationOutputAlias, crate::output::DeleteSizeConstraintSetOutput, crate::error::DeleteSizeConstraintSetError, crate::input::DeleteSizeConstraintSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to delete. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn size_constraint_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.size_constraint_set_id(input.into()); self } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to delete. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn set_size_constraint_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_size_constraint_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteSqlInjectionMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>SqlInjectionMatchSet</code>. You can't delete a <code>SqlInjectionMatchSet</code> if it's still used in any <code>Rules</code> or if it still contains any <code>SqlInjectionMatchTuple</code> objects.</p> /// <p>If you just want to remove a <code>SqlInjectionMatchSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete a <code>SqlInjectionMatchSet</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>SqlInjectionMatchSet</code> to remove filters, if any. For more information, see <code>UpdateSqlInjectionMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteSqlInjectionMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteSqlInjectionMatchSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteSqlInjectionMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_sql_injection_match_set_input::Builder, } impl<C, M, R> DeleteSqlInjectionMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteSqlInjectionMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteSqlInjectionMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteSqlInjectionMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteSqlInjectionMatchSetInputOperationOutputAlias, crate::output::DeleteSqlInjectionMatchSetOutput, crate::error::DeleteSqlInjectionMatchSetError, crate::input::DeleteSqlInjectionMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to delete. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn sql_injection_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.sql_injection_match_set_id(input.into()); self } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to delete. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn set_sql_injection_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sql_injection_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes a <code>WebACL</code>. You can't delete a <code>WebACL</code> if it still contains any <code>Rules</code>.</p> /// <p>To delete a <code>WebACL</code>, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>WebACL</code> to remove <code>Rules</code>, if any. For more information, see <code>UpdateWebACL</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteWebACL</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteWebACL</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_web_acl_input::Builder, } impl<C, M, R> DeleteWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteWebAclOutput, aws_smithy_http::result::SdkError<crate::error::DeleteWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteWebAclInputOperationOutputAlias, crate::output::DeleteWebAclOutput, crate::error::DeleteWebACLError, crate::input::DeleteWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to delete. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to delete. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DeleteXssMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Permanently deletes an <code>XssMatchSet</code>. You can't delete an <code>XssMatchSet</code> if it's still used in any <code>Rules</code> or if it still contains any <code>XssMatchTuple</code> objects.</p> /// <p>If you just want to remove an <code>XssMatchSet</code> from a <code>Rule</code>, use <code>UpdateRule</code>.</p> /// <p>To permanently delete an <code>XssMatchSet</code> from AWS WAF, perform the following steps:</p> /// <ol> /// <li> <p>Update the <code>XssMatchSet</code> to remove filters, if any. For more information, see <code>UpdateXssMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of a <code>DeleteXssMatchSet</code> request.</p> </li> /// <li> <p>Submit a <code>DeleteXssMatchSet</code> request.</p> </li> /// </ol> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteXssMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_xss_match_set_input::Builder, } impl<C, M, R> DeleteXssMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteXssMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteXssMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::DeleteXssMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteXssMatchSetInputOperationOutputAlias, crate::output::DeleteXssMatchSetOutput, crate::error::DeleteXssMatchSetError, crate::input::DeleteXssMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to delete. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn xss_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.xss_match_set_id(input.into()); self } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to delete. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn set_xss_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_xss_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `DisassociateWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic Regional</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Removes a web ACL from the specified resource, either an application load balancer or Amazon API Gateway stage.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DisassociateWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::disassociate_web_acl_input::Builder, } impl<C, M, R> DisassociateWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DisassociateWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DisassociateWebAclOutput, aws_smithy_http::result::SdkError<crate::error::DisassociateWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DisassociateWebAclInputOperationOutputAlias, crate::output::DisassociateWebAclOutput, crate::error::DisassociateWebACLError, crate::input::DisassociateWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The ARN (Amazon Resource Name) of the resource from which the web ACL is being removed, either an application load balancer or Amazon API Gateway stage.</p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The ARN (Amazon Resource Name) of the resource from which the web ACL is being removed, either an application load balancer or Amazon API Gateway stage.</p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `GetByteMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>ByteMatchSet</code> specified by <code>ByteMatchSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetByteMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_byte_match_set_input::Builder, } impl<C, M, R> GetByteMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetByteMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetByteMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::GetByteMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetByteMatchSetInputOperationOutputAlias, crate::output::GetByteMatchSetOutput, crate::error::GetByteMatchSetError, crate::input::GetByteMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to get. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn byte_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.byte_match_set_id(input.into()); self } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to get. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn set_byte_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_byte_match_set_id(input); self } } /// Fluent builder constructing a request to `GetChangeToken`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.</p> /// <p>Each create, update, or delete request must use a unique change token. If your application submits a <code>GetChangeToken</code> request and then submits a second <code>GetChangeToken</code> request before submitting a create, update, or delete request, the second <code>GetChangeToken</code> request returns the same value as the first <code>GetChangeToken</code> request.</p> /// <p>When you use a change token in a create, update, or delete request, the status of the change token changes to <code>PENDING</code>, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use <code>GetChangeTokenStatus</code> to determine the status of your change token.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetChangeToken< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_change_token_input::Builder, } impl<C, M, R> GetChangeToken<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetChangeToken`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetChangeTokenOutput, aws_smithy_http::result::SdkError<crate::error::GetChangeTokenError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetChangeTokenInputOperationOutputAlias, crate::output::GetChangeTokenOutput, crate::error::GetChangeTokenError, crate::input::GetChangeTokenInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } } /// Fluent builder constructing a request to `GetChangeTokenStatus`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the status of a <code>ChangeToken</code> that you got by calling <code>GetChangeToken</code>. <code>ChangeTokenStatus</code> is one of the following values:</p> /// <ul> /// <li> <p> <code>PROVISIONED</code>: You requested the change token by calling <code>GetChangeToken</code>, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.</p> </li> /// <li> <p> <code>PENDING</code>: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.</p> </li> /// <li> <p> <code>INSYNC</code>: Propagation is complete.</p> </li> /// </ul> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetChangeTokenStatus< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_change_token_status_input::Builder, } impl<C, M, R> GetChangeTokenStatus<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetChangeTokenStatus`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetChangeTokenStatusOutput, aws_smithy_http::result::SdkError<crate::error::GetChangeTokenStatusError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetChangeTokenStatusInputOperationOutputAlias, crate::output::GetChangeTokenStatusOutput, crate::error::GetChangeTokenStatusError, crate::input::GetChangeTokenStatusInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The change token for which you want to get the status. This change token was previously returned in the <code>GetChangeToken</code> response.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The change token for which you want to get the status. This change token was previously returned in the <code>GetChangeToken</code> response.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `GetGeoMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>GeoMatchSet</code> that is specified by <code>GeoMatchSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetGeoMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_geo_match_set_input::Builder, } impl<C, M, R> GetGeoMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetGeoMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetGeoMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::GetGeoMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetGeoMatchSetInputOperationOutputAlias, crate::output::GetGeoMatchSetOutput, crate::error::GetGeoMatchSetError, crate::input::GetGeoMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>GeoMatchSetId</code> of the <code>GeoMatchSet</code> that you want to get. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn geo_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.geo_match_set_id(input.into()); self } /// <p>The <code>GeoMatchSetId</code> of the <code>GeoMatchSet</code> that you want to get. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn set_geo_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_geo_match_set_id(input); self } } /// Fluent builder constructing a request to `GetIPSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>IPSet</code> that is specified by <code>IPSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetIPSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_ip_set_input::Builder, } impl<C, M, R> GetIPSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetIPSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetIpSetOutput, aws_smithy_http::result::SdkError<crate::error::GetIPSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetIpSetInputOperationOutputAlias, crate::output::GetIpSetOutput, crate::error::GetIPSetError, crate::input::GetIpSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to get. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn ip_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.ip_set_id(input.into()); self } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to get. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn set_ip_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_ip_set_id(input); self } } /// Fluent builder constructing a request to `GetLoggingConfiguration`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>LoggingConfiguration</code> for the specified web ACL.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetLoggingConfiguration< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_logging_configuration_input::Builder, } impl<C, M, R> GetLoggingConfiguration<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetLoggingConfiguration`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetLoggingConfigurationOutput, aws_smithy_http::result::SdkError<crate::error::GetLoggingConfigurationError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetLoggingConfigurationInputOperationOutputAlias, crate::output::GetLoggingConfigurationOutput, crate::error::GetLoggingConfigurationError, crate::input::GetLoggingConfigurationInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the web ACL for which you want to get the <code>LoggingConfiguration</code>.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the web ACL for which you want to get the <code>LoggingConfiguration</code>.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `GetPermissionPolicy`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the IAM policy attached to the RuleGroup.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetPermissionPolicy< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_permission_policy_input::Builder, } impl<C, M, R> GetPermissionPolicy<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetPermissionPolicy`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetPermissionPolicyOutput, aws_smithy_http::result::SdkError<crate::error::GetPermissionPolicyError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetPermissionPolicyInputOperationOutputAlias, crate::output::GetPermissionPolicyOutput, crate::error::GetPermissionPolicyError, crate::input::GetPermissionPolicyInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the RuleGroup for which you want to get the policy.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the RuleGroup for which you want to get the policy.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `GetRateBasedRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>RateBasedRule</code> that is specified by the <code>RuleId</code> that you included in the <code>GetRateBasedRule</code> request.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRateBasedRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_rate_based_rule_input::Builder, } impl<C, M, R> GetRateBasedRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRateBasedRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRateBasedRuleOutput, aws_smithy_http::result::SdkError<crate::error::GetRateBasedRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRateBasedRuleInputOperationOutputAlias, crate::output::GetRateBasedRuleOutput, crate::error::GetRateBasedRuleError, crate::input::GetRateBasedRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to get. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to get. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } } /// Fluent builder constructing a request to `GetRateBasedRuleManagedKeys`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of IP addresses currently being blocked by the <code>RateBasedRule</code> that is specified by the <code>RuleId</code>. The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRateBasedRuleManagedKeys< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_rate_based_rule_managed_keys_input::Builder, } impl<C, M, R> GetRateBasedRuleManagedKeys<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRateBasedRuleManagedKeys`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRateBasedRuleManagedKeysOutput, aws_smithy_http::result::SdkError<crate::error::GetRateBasedRuleManagedKeysError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRateBasedRuleManagedKeysInputOperationOutputAlias, crate::output::GetRateBasedRuleManagedKeysOutput, crate::error::GetRateBasedRuleManagedKeysError, crate::input::GetRateBasedRuleManagedKeysInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> for which you want to get a list of <code>ManagedKeys</code>. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> for which you want to get a list of <code>ManagedKeys</code>. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>A null value and not currently used. Do not include this in your request.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>A null value and not currently used. Do not include this in your request.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } } /// Fluent builder constructing a request to `GetRegexMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>RegexMatchSet</code> specified by <code>RegexMatchSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRegexMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_regex_match_set_input::Builder, } impl<C, M, R> GetRegexMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRegexMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRegexMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::GetRegexMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRegexMatchSetInputOperationOutputAlias, crate::output::GetRegexMatchSetOutput, crate::error::GetRegexMatchSetError, crate::input::GetRegexMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to get. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn regex_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_match_set_id(input.into()); self } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to get. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn set_regex_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_match_set_id(input); self } } /// Fluent builder constructing a request to `GetRegexPatternSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>RegexPatternSet</code> specified by <code>RegexPatternSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRegexPatternSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_regex_pattern_set_input::Builder, } impl<C, M, R> GetRegexPatternSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRegexPatternSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRegexPatternSetOutput, aws_smithy_http::result::SdkError<crate::error::GetRegexPatternSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRegexPatternSetInputOperationOutputAlias, crate::output::GetRegexPatternSetOutput, crate::error::GetRegexPatternSetError, crate::input::GetRegexPatternSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to get. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn regex_pattern_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_pattern_set_id(input.into()); self } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to get. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn set_regex_pattern_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_pattern_set_id(input); self } } /// Fluent builder constructing a request to `GetRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>Rule</code> that is specified by the <code>RuleId</code> that you included in the <code>GetRule</code> request.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_rule_input::Builder, } impl<C, M, R> GetRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRuleOutput, aws_smithy_http::result::SdkError<crate::error::GetRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRuleInputOperationOutputAlias, crate::output::GetRuleOutput, crate::error::GetRuleError, crate::input::GetRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to get. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to get. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } } /// Fluent builder constructing a request to `GetRuleGroup`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>RuleGroup</code> that is specified by the <code>RuleGroupId</code> that you included in the <code>GetRuleGroup</code> request.</p> /// <p>To view the rules in a rule group, use <code>ListActivatedRulesInRuleGroup</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetRuleGroup< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_rule_group_input::Builder, } impl<C, M, R> GetRuleGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetRuleGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetRuleGroupOutput, aws_smithy_http::result::SdkError<crate::error::GetRuleGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetRuleGroupInputOperationOutputAlias, crate::output::GetRuleGroupOutput, crate::error::GetRuleGroupError, crate::input::GetRuleGroupInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to get. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn rule_group_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_group_id(input.into()); self } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to get. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn set_rule_group_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_rule_group_id(input); self } } /// Fluent builder constructing a request to `GetSampledRequests`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.</p> /// <p> <code>GetSampledRequests</code> returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, <code>GetSampledRequests</code> returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetSampledRequests< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_sampled_requests_input::Builder, } impl<C, M, R> GetSampledRequests<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetSampledRequests`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetSampledRequestsOutput, aws_smithy_http::result::SdkError<crate::error::GetSampledRequestsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetSampledRequestsInputOperationOutputAlias, crate::output::GetSampledRequestsOutput, crate::error::GetSampledRequestsError, crate::input::GetSampledRequestsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> for which you want <code>GetSampledRequests</code> to return a sample of requests.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> for which you want <code>GetSampledRequests</code> to return a sample of requests.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p> <code>RuleId</code> is one of three values:</p> /// <ul> /// <li> <p>The <code>RuleId</code> of the <code>Rule</code> or the <code>RuleGroupId</code> of the <code>RuleGroup</code> for which you want <code>GetSampledRequests</code> to return a sample of requests.</p> </li> /// <li> <p> <code>Default_Action</code>, which causes <code>GetSampledRequests</code> to return a sample of the requests that didn't match any of the rules in the specified <code>WebACL</code>.</p> </li> /// </ul> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p> <code>RuleId</code> is one of three values:</p> /// <ul> /// <li> <p>The <code>RuleId</code> of the <code>Rule</code> or the <code>RuleGroupId</code> of the <code>RuleGroup</code> for which you want <code>GetSampledRequests</code> to return a sample of requests.</p> </li> /// <li> <p> <code>Default_Action</code>, which causes <code>GetSampledRequests</code> to return a sample of the requests that didn't match any of the rules in the specified <code>WebACL</code>.</p> </li> /// </ul> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>The start date and time and the end date and time of the range for which you want <code>GetSampledRequests</code> to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, <code>Z</code>. For example, <code>"2016-09-27T14:50Z"</code>. You can specify any time range in the previous three hours.</p> pub fn time_window(mut self, input: crate::model::TimeWindow) -> Self { self.inner = self.inner.time_window(input); self } /// <p>The start date and time and the end date and time of the range for which you want <code>GetSampledRequests</code> to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, <code>Z</code>. For example, <code>"2016-09-27T14:50Z"</code>. You can specify any time range in the previous three hours.</p> pub fn set_time_window( mut self, input: std::option::Option<crate::model::TimeWindow>, ) -> Self { self.inner = self.inner.set_time_window(input); self } /// <p>The number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of <code>MaxItems</code>, <code>GetSampledRequests</code> returns information about all of them. </p> pub fn max_items(mut self, input: i64) -> Self { self.inner = self.inner.max_items(input); self } /// <p>The number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of <code>MaxItems</code>, <code>GetSampledRequests</code> returns information about all of them. </p> pub fn set_max_items(mut self, input: std::option::Option<i64>) -> Self { self.inner = self.inner.set_max_items(input); self } } /// Fluent builder constructing a request to `GetSizeConstraintSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>SizeConstraintSet</code> specified by <code>SizeConstraintSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetSizeConstraintSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_size_constraint_set_input::Builder, } impl<C, M, R> GetSizeConstraintSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetSizeConstraintSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetSizeConstraintSetOutput, aws_smithy_http::result::SdkError<crate::error::GetSizeConstraintSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetSizeConstraintSetInputOperationOutputAlias, crate::output::GetSizeConstraintSetOutput, crate::error::GetSizeConstraintSetError, crate::input::GetSizeConstraintSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to get. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn size_constraint_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.size_constraint_set_id(input.into()); self } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to get. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn set_size_constraint_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_size_constraint_set_id(input); self } } /// Fluent builder constructing a request to `GetSqlInjectionMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>SqlInjectionMatchSet</code> that is specified by <code>SqlInjectionMatchSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetSqlInjectionMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_sql_injection_match_set_input::Builder, } impl<C, M, R> GetSqlInjectionMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetSqlInjectionMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetSqlInjectionMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::GetSqlInjectionMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetSqlInjectionMatchSetInputOperationOutputAlias, crate::output::GetSqlInjectionMatchSetOutput, crate::error::GetSqlInjectionMatchSetError, crate::input::GetSqlInjectionMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to get. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn sql_injection_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.sql_injection_match_set_id(input.into()); self } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to get. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn set_sql_injection_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sql_injection_match_set_id(input); self } } /// Fluent builder constructing a request to `GetWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>WebACL</code> that is specified by <code>WebACLId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_web_acl_input::Builder, } impl<C, M, R> GetWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetWebAclOutput, aws_smithy_http::result::SdkError<crate::error::GetWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetWebAclInputOperationOutputAlias, crate::output::GetWebAclOutput, crate::error::GetWebACLError, crate::input::GetWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to get. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to get. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } } /// Fluent builder constructing a request to `GetWebACLForResource`. /// /// <note> /// <p>This is <b>AWS WAF Classic Regional</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the web ACL for the specified resource, either an application load balancer or Amazon API Gateway stage.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetWebACLForResource< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_web_acl_for_resource_input::Builder, } impl<C, M, R> GetWebACLForResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetWebACLForResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetWebAclForResourceOutput, aws_smithy_http::result::SdkError<crate::error::GetWebACLForResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetWebAclForResourceInputOperationOutputAlias, crate::output::GetWebAclForResourceOutput, crate::error::GetWebACLForResourceError, crate::input::GetWebAclForResourceInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The ARN (Amazon Resource Name) of the resource for which to get the web ACL, either an application load balancer or Amazon API Gateway stage.</p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The ARN (Amazon Resource Name) of the resource for which to get the web ACL, either an application load balancer or Amazon API Gateway stage.</p> /// <p>The ARN should be in one of the following formats:</p> /// <ul> /// <li> <p>For an Application Load Balancer: <code>arn:aws:elasticloadbalancing:<i>region</i>:<i>account-id</i>:loadbalancer/app/<i>load-balancer-name</i>/<i>load-balancer-id</i> </code> </p> </li> /// <li> <p>For an Amazon API Gateway stage: <code>arn:aws:apigateway:<i>region</i>::/restapis/<i>api-id</i>/stages/<i>stage-name</i> </code> </p> </li> /// </ul> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `GetXssMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns the <code>XssMatchSet</code> that is specified by <code>XssMatchSetId</code>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetXssMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::get_xss_match_set_input::Builder, } impl<C, M, R> GetXssMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `GetXssMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetXssMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::GetXssMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::GetXssMatchSetInputOperationOutputAlias, crate::output::GetXssMatchSetOutput, crate::error::GetXssMatchSetError, crate::input::GetXssMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to get. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn xss_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.xss_match_set_id(input.into()); self } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to get. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn set_xss_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_xss_match_set_id(input); self } } /// Fluent builder constructing a request to `ListActivatedRulesInRuleGroup`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>ActivatedRule</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListActivatedRulesInRuleGroup< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_activated_rules_in_rule_group_input::Builder, } impl<C, M, R> ListActivatedRulesInRuleGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListActivatedRulesInRuleGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListActivatedRulesInRuleGroupOutput, aws_smithy_http::result::SdkError<crate::error::ListActivatedRulesInRuleGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListActivatedRulesInRuleGroupInputOperationOutputAlias, crate::output::ListActivatedRulesInRuleGroupOutput, crate::error::ListActivatedRulesInRuleGroupError, crate::input::ListActivatedRulesInRuleGroupInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> for which you want to get a list of <code>ActivatedRule</code> objects.</p> pub fn rule_group_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_group_id(input.into()); self } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> for which you want to get a list of <code>ActivatedRule</code> objects.</p> pub fn set_rule_group_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_rule_group_id(input); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ActivatedRules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ActivatedRules</code>. For the second and subsequent <code>ListActivatedRulesInRuleGroup</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ActivatedRules</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ActivatedRules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ActivatedRules</code>. For the second and subsequent <code>ListActivatedRulesInRuleGroup</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ActivatedRules</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>ActivatedRules</code> that you want AWS WAF to return for this request. If you have more <code>ActivatedRules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>ActivatedRules</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>ActivatedRules</code> that you want AWS WAF to return for this request. If you have more <code>ActivatedRules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>ActivatedRules</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListByteMatchSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>ByteMatchSetSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListByteMatchSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_byte_match_sets_input::Builder, } impl<C, M, R> ListByteMatchSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListByteMatchSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListByteMatchSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListByteMatchSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListByteMatchSetsInputOperationOutputAlias, crate::output::ListByteMatchSetsOutput, crate::error::ListByteMatchSetsError, crate::input::ListByteMatchSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ByteMatchSets</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ByteMatchSets</code>. For the second and subsequent <code>ListByteMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ByteMatchSets</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ByteMatchSets</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ByteMatchSets</code>. For the second and subsequent <code>ListByteMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ByteMatchSets</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>ByteMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>ByteMatchSets</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>ByteMatchSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>ByteMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>ByteMatchSets</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>ByteMatchSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListGeoMatchSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>GeoMatchSetSummary</code> objects in the response.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListGeoMatchSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_geo_match_sets_input::Builder, } impl<C, M, R> ListGeoMatchSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListGeoMatchSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListGeoMatchSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListGeoMatchSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListGeoMatchSetsInputOperationOutputAlias, crate::output::ListGeoMatchSetsOutput, crate::error::ListGeoMatchSetsError, crate::input::ListGeoMatchSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>GeoMatchSet</code>s than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>GeoMatchSet</code> objects. For the second and subsequent <code>ListGeoMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>GeoMatchSet</code> objects.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>GeoMatchSet</code>s than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>GeoMatchSet</code> objects. For the second and subsequent <code>ListGeoMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>GeoMatchSet</code> objects.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>GeoMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>GeoMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>GeoMatchSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>GeoMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>GeoMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>GeoMatchSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListIPSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>IPSetSummary</code> objects in the response.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListIPSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_ip_sets_input::Builder, } impl<C, M, R> ListIPSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListIPSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListIpSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListIPSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListIpSetsInputOperationOutputAlias, crate::output::ListIpSetsOutput, crate::error::ListIPSetsError, crate::input::ListIpSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>IPSets</code>. For the second and subsequent <code>ListIPSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>IPSets</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>IPSets</code>. For the second and subsequent <code>ListIPSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>IPSets</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>IPSet</code> objects that you want AWS WAF to return for this request. If you have more <code>IPSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>IPSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>IPSet</code> objects that you want AWS WAF to return for this request. If you have more <code>IPSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>IPSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListLoggingConfigurations`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>LoggingConfiguration</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListLoggingConfigurations< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_logging_configurations_input::Builder, } impl<C, M, R> ListLoggingConfigurations<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListLoggingConfigurations`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListLoggingConfigurationsOutput, aws_smithy_http::result::SdkError<crate::error::ListLoggingConfigurationsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListLoggingConfigurationsInputOperationOutputAlias, crate::output::ListLoggingConfigurationsOutput, crate::error::ListLoggingConfigurationsError, crate::input::ListLoggingConfigurationsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>LoggingConfigurations</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>LoggingConfigurations</code>. For the second and subsequent <code>ListLoggingConfigurations</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ListLoggingConfigurations</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>LoggingConfigurations</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>LoggingConfigurations</code>. For the second and subsequent <code>ListLoggingConfigurations</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>ListLoggingConfigurations</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>LoggingConfigurations</code> that you want AWS WAF to return for this request. If you have more <code>LoggingConfigurations</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>LoggingConfigurations</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>LoggingConfigurations</code> that you want AWS WAF to return for this request. If you have more <code>LoggingConfigurations</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>LoggingConfigurations</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListRateBasedRules`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RuleSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListRateBasedRules< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_rate_based_rules_input::Builder, } impl<C, M, R> ListRateBasedRules<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListRateBasedRules`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListRateBasedRulesOutput, aws_smithy_http::result::SdkError<crate::error::ListRateBasedRulesError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListRateBasedRulesInputOperationOutputAlias, crate::output::ListRateBasedRulesOutput, crate::error::ListRateBasedRulesError, crate::input::ListRateBasedRulesInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>Rules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>Rules</code>. For the second and subsequent <code>ListRateBasedRules</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>Rules</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>Rules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>Rules</code>. For the second and subsequent <code>ListRateBasedRules</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>Rules</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>Rules</code> that you want AWS WAF to return for this request. If you have more <code>Rules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>Rules</code> that you want AWS WAF to return for this request. If you have more <code>Rules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListRegexMatchSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RegexMatchSetSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListRegexMatchSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_regex_match_sets_input::Builder, } impl<C, M, R> ListRegexMatchSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListRegexMatchSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListRegexMatchSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListRegexMatchSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListRegexMatchSetsInputOperationOutputAlias, crate::output::ListRegexMatchSetsOutput, crate::error::ListRegexMatchSetsError, crate::input::ListRegexMatchSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RegexMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ByteMatchSets</code>. For the second and subsequent <code>ListRegexMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RegexMatchSet</code> objects.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RegexMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>ByteMatchSets</code>. For the second and subsequent <code>ListRegexMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RegexMatchSet</code> objects.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>RegexMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>RegexMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RegexMatchSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>RegexMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>RegexMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RegexMatchSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListRegexPatternSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RegexPatternSetSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListRegexPatternSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_regex_pattern_sets_input::Builder, } impl<C, M, R> ListRegexPatternSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListRegexPatternSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListRegexPatternSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListRegexPatternSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListRegexPatternSetsInputOperationOutputAlias, crate::output::ListRegexPatternSetsOutput, crate::error::ListRegexPatternSetsError, crate::input::ListRegexPatternSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RegexPatternSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>RegexPatternSet</code> objects. For the second and subsequent <code>ListRegexPatternSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RegexPatternSet</code> objects.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RegexPatternSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>RegexPatternSet</code> objects. For the second and subsequent <code>ListRegexPatternSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RegexPatternSet</code> objects.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>RegexPatternSet</code> objects that you want AWS WAF to return for this request. If you have more <code>RegexPatternSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RegexPatternSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>RegexPatternSet</code> objects that you want AWS WAF to return for this request. If you have more <code>RegexPatternSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RegexPatternSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListResourcesForWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic Regional</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of resources associated with the specified web ACL.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListResourcesForWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_resources_for_web_acl_input::Builder, } impl<C, M, R> ListResourcesForWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListResourcesForWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListResourcesForWebAclOutput, aws_smithy_http::result::SdkError<crate::error::ListResourcesForWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListResourcesForWebAclInputOperationOutputAlias, crate::output::ListResourcesForWebAclOutput, crate::error::ListResourcesForWebACLError, crate::input::ListResourcesForWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The unique identifier (ID) of the web ACL for which to list the associated resources.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The unique identifier (ID) of the web ACL for which to list the associated resources.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p>The type of resource to list, either an application load balancer or Amazon API Gateway.</p> pub fn resource_type(mut self, input: crate::model::ResourceType) -> Self { self.inner = self.inner.resource_type(input); self } /// <p>The type of resource to list, either an application load balancer or Amazon API Gateway.</p> pub fn set_resource_type( mut self, input: std::option::Option<crate::model::ResourceType>, ) -> Self { self.inner = self.inner.set_resource_type(input); self } } /// Fluent builder constructing a request to `ListRuleGroups`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RuleGroup</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListRuleGroups< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_rule_groups_input::Builder, } impl<C, M, R> ListRuleGroups<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListRuleGroups`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListRuleGroupsOutput, aws_smithy_http::result::SdkError<crate::error::ListRuleGroupsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListRuleGroupsInputOperationOutputAlias, crate::output::ListRuleGroupsOutput, crate::error::ListRuleGroupsError, crate::input::ListRuleGroupsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RuleGroups</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>RuleGroups</code>. For the second and subsequent <code>ListRuleGroups</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RuleGroups</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>RuleGroups</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>RuleGroups</code>. For the second and subsequent <code>ListRuleGroups</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>RuleGroups</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>RuleGroups</code> that you want AWS WAF to return for this request. If you have more <code>RuleGroups</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RuleGroups</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>RuleGroups</code> that you want AWS WAF to return for this request. If you have more <code>RuleGroups</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>RuleGroups</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListRules`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RuleSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListRules< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_rules_input::Builder, } impl<C, M, R> ListRules<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListRules`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListRulesOutput, aws_smithy_http::result::SdkError<crate::error::ListRulesError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListRulesInputOperationOutputAlias, crate::output::ListRulesOutput, crate::error::ListRulesError, crate::input::ListRulesInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>Rules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>Rules</code>. For the second and subsequent <code>ListRules</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>Rules</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>Rules</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>Rules</code>. For the second and subsequent <code>ListRules</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>Rules</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>Rules</code> that you want AWS WAF to return for this request. If you have more <code>Rules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>Rules</code> that you want AWS WAF to return for this request. If you have more <code>Rules</code> than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListSizeConstraintSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>SizeConstraintSetSummary</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListSizeConstraintSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_size_constraint_sets_input::Builder, } impl<C, M, R> ListSizeConstraintSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListSizeConstraintSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListSizeConstraintSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListSizeConstraintSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListSizeConstraintSetsInputOperationOutputAlias, crate::output::ListSizeConstraintSetsOutput, crate::error::ListSizeConstraintSetsError, crate::input::ListSizeConstraintSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>SizeConstraintSets</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>SizeConstraintSets</code>. For the second and subsequent <code>ListSizeConstraintSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>SizeConstraintSets</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>SizeConstraintSets</code> than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>SizeConstraintSets</code>. For the second and subsequent <code>ListSizeConstraintSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>SizeConstraintSets</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>SizeConstraintSet</code> objects that you want AWS WAF to return for this request. If you have more <code>SizeConstraintSets</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>SizeConstraintSet</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>SizeConstraintSet</code> objects that you want AWS WAF to return for this request. If you have more <code>SizeConstraintSets</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>SizeConstraintSet</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListSqlInjectionMatchSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>SqlInjectionMatchSet</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListSqlInjectionMatchSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_sql_injection_match_sets_input::Builder, } impl<C, M, R> ListSqlInjectionMatchSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListSqlInjectionMatchSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListSqlInjectionMatchSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListSqlInjectionMatchSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListSqlInjectionMatchSetsInputOperationOutputAlias, crate::output::ListSqlInjectionMatchSetsOutput, crate::error::ListSqlInjectionMatchSetsError, crate::input::ListSqlInjectionMatchSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>SqlInjectionMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>SqlInjectionMatchSets</code>. For the second and subsequent <code>ListSqlInjectionMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>SqlInjectionMatchSets</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>SqlInjectionMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>SqlInjectionMatchSets</code>. For the second and subsequent <code>ListSqlInjectionMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>SqlInjectionMatchSets</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>SqlInjectionMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>SqlInjectionMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>SqlInjectionMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>SqlInjectionMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListSubscribedRuleGroups`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>RuleGroup</code> objects that you are subscribed to.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListSubscribedRuleGroups< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_subscribed_rule_groups_input::Builder, } impl<C, M, R> ListSubscribedRuleGroups<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListSubscribedRuleGroups`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListSubscribedRuleGroupsOutput, aws_smithy_http::result::SdkError<crate::error::ListSubscribedRuleGroupsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListSubscribedRuleGroupsInputOperationOutputAlias, crate::output::ListSubscribedRuleGroupsOutput, crate::error::ListSubscribedRuleGroupsError, crate::input::ListSubscribedRuleGroupsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ByteMatchSets</code>subscribed rule groups than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of subscribed rule groups. For the second and subsequent <code>ListSubscribedRuleGroupsRequest</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of subscribed rule groups.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>ByteMatchSets</code>subscribed rule groups than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of subscribed rule groups. For the second and subsequent <code>ListSubscribedRuleGroupsRequest</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of subscribed rule groups.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of subscribed rule groups that you want AWS WAF to return for this request. If you have more objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of subscribed rule groups that you want AWS WAF to return for this request. If you have more objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListTagsForResource`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Retrieves the tags associated with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.</p> /// <p>Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. </p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListTagsForResource< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_tags_for_resource_input::Builder, } impl<C, M, R> ListTagsForResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListTagsForResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListTagsForResourceOutput, aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListTagsForResourceInputOperationOutputAlias, crate::output::ListTagsForResourceOutput, crate::error::ListTagsForResourceError, crate::input::ListTagsForResourceInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p></p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p></p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p></p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p></p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } /// <p></p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p></p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `ListWebACLs`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>WebACLSummary</code> objects in the response.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListWebACLs< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_web_ac_ls_input::Builder, } impl<C, M, R> ListWebACLs<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListWebACLs`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListWebAcLsOutput, aws_smithy_http::result::SdkError<crate::error::ListWebACLsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListWebAcLsInputOperationOutputAlias, crate::output::ListWebAcLsOutput, crate::error::ListWebACLsError, crate::input::ListWebAcLsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>WebACL</code> objects than the number that you specify for <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>WebACL</code> objects. For the second and subsequent <code>ListWebACLs</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>WebACL</code> objects.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>WebACL</code> objects than the number that you specify for <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>WebACL</code> objects. For the second and subsequent <code>ListWebACLs</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>WebACL</code> objects.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>WebACL</code> objects that you want AWS WAF to return for this request. If you have more <code>WebACL</code> objects than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>WebACL</code> objects.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>WebACL</code> objects that you want AWS WAF to return for this request. If you have more <code>WebACL</code> objects than the number that you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>WebACL</code> objects.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `ListXssMatchSets`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Returns an array of <code>XssMatchSet</code> objects.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListXssMatchSets< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_xss_match_sets_input::Builder, } impl<C, M, R> ListXssMatchSets<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListXssMatchSets`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListXssMatchSetsOutput, aws_smithy_http::result::SdkError<crate::error::ListXssMatchSetsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListXssMatchSetsInputOperationOutputAlias, crate::output::ListXssMatchSetsOutput, crate::error::ListXssMatchSetsError, crate::input::ListXssMatchSetsInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>If you specify a value for <code>Limit</code> and you have more <code>XssMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>XssMatchSets</code>. For the second and subsequent <code>ListXssMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>XssMatchSets</code>.</p> pub fn next_marker(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_marker(input.into()); self } /// <p>If you specify a value for <code>Limit</code> and you have more <code>XssMatchSet</code> objects than the value of <code>Limit</code>, AWS WAF returns a <code>NextMarker</code> value in the response that allows you to list another group of <code>XssMatchSets</code>. For the second and subsequent <code>ListXssMatchSets</code> requests, specify the value of <code>NextMarker</code> from the previous response to get information about another batch of <code>XssMatchSets</code>.</p> pub fn set_next_marker(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_marker(input); self } /// <p>Specifies the number of <code>XssMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>XssMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn limit(mut self, input: i32) -> Self { self.inner = self.inner.limit(input); self } /// <p>Specifies the number of <code>XssMatchSet</code> objects that you want AWS WAF to return for this request. If you have more <code>XssMatchSet</code> objects than the number you specify for <code>Limit</code>, the response includes a <code>NextMarker</code> value that you can use to get another batch of <code>Rules</code>.</p> pub fn set_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_limit(input); self } } /// Fluent builder constructing a request to `PutLoggingConfiguration`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Associates a <code>LoggingConfiguration</code> with a specified web ACL.</p> /// <p>You can access information about all traffic that AWS WAF inspects using the following steps:</p> /// <ol> /// <li> <p>Create an Amazon Kinesis Data Firehose. </p> <p>Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia). </p> <note> /// <p>Do not create the data firehose using a <code>Kinesis stream</code> as your source.</p> /// </note> </li> /// <li> <p>Associate that firehose to your web ACL using a <code>PutLoggingConfiguration</code> request.</p> </li> /// </ol> /// <p>When you successfully enable logging using a <code>PutLoggingConfiguration</code> request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/logging.html">Logging Web ACL Traffic Information</a> in the <i>AWS WAF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct PutLoggingConfiguration< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::put_logging_configuration_input::Builder, } impl<C, M, R> PutLoggingConfiguration<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `PutLoggingConfiguration`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::PutLoggingConfigurationOutput, aws_smithy_http::result::SdkError<crate::error::PutLoggingConfigurationError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::PutLoggingConfigurationInputOperationOutputAlias, crate::output::PutLoggingConfigurationOutput, crate::error::PutLoggingConfigurationError, crate::input::PutLoggingConfigurationInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.</p> <note> /// <p>When specifying <code>Type</code> in <code>RedactedFields</code>, you must use one of the following values: <code>URI</code>, <code>QUERY_STRING</code>, <code>HEADER</code>, or <code>METHOD</code>.</p> /// </note> pub fn logging_configuration(mut self, input: crate::model::LoggingConfiguration) -> Self { self.inner = self.inner.logging_configuration(input); self } /// <p>The Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.</p> <note> /// <p>When specifying <code>Type</code> in <code>RedactedFields</code>, you must use one of the following values: <code>URI</code>, <code>QUERY_STRING</code>, <code>HEADER</code>, or <code>METHOD</code>.</p> /// </note> pub fn set_logging_configuration( mut self, input: std::option::Option<crate::model::LoggingConfiguration>, ) -> Self { self.inner = self.inner.set_logging_configuration(input); self } } /// Fluent builder constructing a request to `PutPermissionPolicy`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Attaches an IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.</p> /// <p>The <code>PutPermissionPolicy</code> is subject to the following restrictions:</p> /// <ul> /// <li> <p>You can attach only one policy with each <code>PutPermissionPolicy</code> request.</p> </li> /// <li> <p>The policy must include an <code>Effect</code>, <code>Action</code> and <code>Principal</code>. </p> </li> /// <li> <p> <code>Effect</code> must specify <code>Allow</code>.</p> </li> /// <li> <p>The <code>Action</code> in the policy must be <code>waf:UpdateWebACL</code>, <code>waf-regional:UpdateWebACL</code>, <code>waf:GetRuleGroup</code> and <code>waf-regional:GetRuleGroup</code> . Any extra or wildcard actions in the policy will be rejected.</p> </li> /// <li> <p>The policy cannot include a <code>Resource</code> parameter.</p> </li> /// <li> <p>The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.</p> </li> /// <li> <p>The user making the request must be the owner of the RuleGroup.</p> </li> /// <li> <p>Your policy must be composed using IAM Policy version 2012-10-17.</p> </li> /// </ul> /// <p>For more information, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html">IAM Policies</a>. </p> /// <p>An example of a valid policy parameter is shown in the Examples section below.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct PutPermissionPolicy< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::put_permission_policy_input::Builder, } impl<C, M, R> PutPermissionPolicy<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `PutPermissionPolicy`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::PutPermissionPolicyOutput, aws_smithy_http::result::SdkError<crate::error::PutPermissionPolicyError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::PutPermissionPolicyInputOperationOutputAlias, crate::output::PutPermissionPolicyOutput, crate::error::PutPermissionPolicyError, crate::input::PutPermissionPolicyInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// <p>The policy to attach to the specified RuleGroup.</p> pub fn policy(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.policy(input.into()); self } /// <p>The policy to attach to the specified RuleGroup.</p> pub fn set_policy(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_policy(input); self } } /// Fluent builder constructing a request to `TagResource`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.</p> /// <p>Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can use this action to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. </p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct TagResource< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::tag_resource_input::Builder, } impl<C, M, R> TagResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `TagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::TagResourceOutput, aws_smithy_http::result::SdkError<crate::error::TagResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::TagResourceInputOperationOutputAlias, crate::output::TagResourceOutput, crate::error::TagResourceError, crate::input::TagResourceInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p></p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p></p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p></p> pub fn tags(mut self, input: crate::model::Tag) -> Self { self.inner = self.inner.tags(input); self } /// <p></p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `UntagResource`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p></p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UntagResource< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::untag_resource_input::Builder, } impl<C, M, R> UntagResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UntagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UntagResourceOutput, aws_smithy_http::result::SdkError<crate::error::UntagResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UntagResourceInputOperationOutputAlias, crate::output::UntagResourceOutput, crate::error::UntagResourceError, crate::input::UntagResourceInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p></p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p></p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `TagKeys`. /// /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys). /// /// <p></p> pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.tag_keys(input.into()); self } /// <p></p> pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_tag_keys(input); self } } /// Fluent builder constructing a request to `UpdateByteMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>ByteMatchTuple</code> objects (filters) in a <code>ByteMatchSet</code>. For each <code>ByteMatchTuple</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the object from the array. If you want to change a <code>ByteMatchSetUpdate</code> object, you delete the existing object and add a new one.</p> </li> /// <li> <p>The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the <code>User-Agent</code> header. </p> </li> /// <li> <p>The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see <code>TargetString</code> in the <code>ByteMatchTuple</code> data type. </p> </li> /// <li> <p>Where to look, such as at the beginning or the end of a query string.</p> </li> /// <li> <p>Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.</p> </li> /// </ul> /// <p>For example, you can add a <code>ByteMatchSetUpdate</code> object that matches web requests in which <code>User-Agent</code> headers contain the string <code>BadBot</code>. You can then configure AWS WAF to block those requests.</p> /// <p>To create and configure a <code>ByteMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create a <code>ByteMatchSet.</code> For more information, see <code>CreateByteMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateByteMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateByteMatchSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateByteMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_byte_match_set_input::Builder, } impl<C, M, R> UpdateByteMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateByteMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateByteMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateByteMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateByteMatchSetInputOperationOutputAlias, crate::output::UpdateByteMatchSetOutput, crate::error::UpdateByteMatchSetError, crate::input::UpdateByteMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to update. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn byte_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.byte_match_set_id(input.into()); self } /// <p>The <code>ByteMatchSetId</code> of the <code>ByteMatchSet</code> that you want to update. <code>ByteMatchSetId</code> is returned by <code>CreateByteMatchSet</code> and by <code>ListByteMatchSets</code>.</p> pub fn set_byte_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_byte_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>ByteMatchSetUpdate</code> objects that you want to insert into or delete from a <code>ByteMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>ByteMatchSetUpdate</code>: Contains <code>Action</code> and <code>ByteMatchTuple</code> </p> </li> /// <li> <p> <code>ByteMatchTuple</code>: Contains <code>FieldToMatch</code>, <code>PositionalConstraint</code>, <code>TargetString</code>, and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::ByteMatchSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>ByteMatchSetUpdate</code> objects that you want to insert into or delete from a <code>ByteMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>ByteMatchSetUpdate</code>: Contains <code>Action</code> and <code>ByteMatchTuple</code> </p> </li> /// <li> <p> <code>ByteMatchTuple</code>: Contains <code>FieldToMatch</code>, <code>PositionalConstraint</code>, <code>TargetString</code>, and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::ByteMatchSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateGeoMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>GeoMatchConstraint</code> objects in an <code>GeoMatchSet</code>. For each <code>GeoMatchConstraint</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the object from the array. If you want to change an <code>GeoMatchConstraint</code> object, you delete the existing object and add a new one.</p> </li> /// <li> <p>The <code>Type</code>. The only valid value for <code>Type</code> is <code>Country</code>.</p> </li> /// <li> <p>The <code>Value</code>, which is a two character code for the country to add to the <code>GeoMatchConstraint</code> object. Valid codes are listed in <code>GeoMatchConstraint$Value</code>.</p> </li> /// </ul> /// <p>To create and configure an <code>GeoMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Submit a <code>CreateGeoMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateGeoMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateGeoMatchSet</code> request to specify the country that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>When you update an <code>GeoMatchSet</code>, you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.</p> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateGeoMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_geo_match_set_input::Builder, } impl<C, M, R> UpdateGeoMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateGeoMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateGeoMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateGeoMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateGeoMatchSetInputOperationOutputAlias, crate::output::UpdateGeoMatchSetOutput, crate::error::UpdateGeoMatchSetError, crate::input::UpdateGeoMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>GeoMatchSetId</code> of the <code>GeoMatchSet</code> that you want to update. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn geo_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.geo_match_set_id(input.into()); self } /// <p>The <code>GeoMatchSetId</code> of the <code>GeoMatchSet</code> that you want to update. <code>GeoMatchSetId</code> is returned by <code>CreateGeoMatchSet</code> and by <code>ListGeoMatchSets</code>.</p> pub fn set_geo_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_geo_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>GeoMatchSetUpdate</code> objects that you want to insert into or delete from an <code>GeoMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>GeoMatchSetUpdate</code>: Contains <code>Action</code> and <code>GeoMatchConstraint</code> </p> </li> /// <li> <p> <code>GeoMatchConstraint</code>: Contains <code>Type</code> and <code>Value</code> </p> <p>You can have only one <code>Type</code> and <code>Value</code> per <code>GeoMatchConstraint</code>. To add multiple countries, include multiple <code>GeoMatchSetUpdate</code> objects in your request.</p> </li> /// </ul> pub fn updates(mut self, input: crate::model::GeoMatchSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>GeoMatchSetUpdate</code> objects that you want to insert into or delete from an <code>GeoMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>GeoMatchSetUpdate</code>: Contains <code>Action</code> and <code>GeoMatchConstraint</code> </p> </li> /// <li> <p> <code>GeoMatchConstraint</code>: Contains <code>Type</code> and <code>Value</code> </p> <p>You can have only one <code>Type</code> and <code>Value</code> per <code>GeoMatchConstraint</code>. To add multiple countries, include multiple <code>GeoMatchSetUpdate</code> objects in your request.</p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::GeoMatchSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateIPSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>IPSetDescriptor</code> objects in an <code>IPSet</code>. For each <code>IPSetDescriptor</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the object from the array. If you want to change an <code>IPSetDescriptor</code> object, you delete the existing object and add a new one.</p> </li> /// <li> <p>The IP address version, <code>IPv4</code> or <code>IPv6</code>. </p> </li> /// <li> <p>The IP address in CIDR notation, for example, <code>192.0.2.0/24</code> (for the range of IP addresses from <code>192.0.2.0</code> to <code>192.0.2.255</code>) or <code>192.0.2.44/32</code> (for the individual IP address <code>192.0.2.44</code>). </p> </li> /// </ul> /// <p>AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry <a href="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless Inter-Domain Routing</a>.</p> /// <p>IPv6 addresses can be represented using any of the following formats:</p> /// <ul> /// <li> <p>1111:0000:0000:0000:0000:0000:0000:0111/128</p> </li> /// <li> <p>1111:0:0:0:0:0:0:0111/128</p> </li> /// <li> <p>1111::0111/128</p> </li> /// <li> <p>1111::111/128</p> </li> /// </ul> /// <p>You use an <code>IPSet</code> to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an <code>IPSet</code> that specifies those IP addresses, and then configure AWS WAF to block the requests. </p> /// <p>To create and configure an <code>IPSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Submit a <code>CreateIPSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateIPSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateIPSet</code> request to specify the IP addresses that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>When you update an <code>IPSet</code>, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.</p> /// <p>You can insert a maximum of 1000 addresses in a single request.</p> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateIPSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_ip_set_input::Builder, } impl<C, M, R> UpdateIPSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateIPSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateIpSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateIPSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateIpSetInputOperationOutputAlias, crate::output::UpdateIpSetOutput, crate::error::UpdateIPSetError, crate::input::UpdateIpSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to update. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn ip_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.ip_set_id(input.into()); self } /// <p>The <code>IPSetId</code> of the <code>IPSet</code> that you want to update. <code>IPSetId</code> is returned by <code>CreateIPSet</code> and by <code>ListIPSets</code>.</p> pub fn set_ip_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_ip_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>IPSetUpdate</code> objects that you want to insert into or delete from an <code>IPSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>IPSetUpdate</code>: Contains <code>Action</code> and <code>IPSetDescriptor</code> </p> </li> /// <li> <p> <code>IPSetDescriptor</code>: Contains <code>Type</code> and <code>Value</code> </p> </li> /// </ul> /// <p>You can insert a maximum of 1000 addresses in a single request.</p> pub fn updates(mut self, input: crate::model::IpSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>IPSetUpdate</code> objects that you want to insert into or delete from an <code>IPSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>IPSetUpdate</code>: Contains <code>Action</code> and <code>IPSetDescriptor</code> </p> </li> /// <li> <p> <code>IPSetDescriptor</code>: Contains <code>Type</code> and <code>Value</code> </p> </li> /// </ul> /// <p>You can insert a maximum of 1000 addresses in a single request.</p> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::IpSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateRateBasedRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>Predicate</code> objects in a rule and updates the <code>RateLimit</code> in the rule. </p> /// <p>Each <code>Predicate</code> object identifies a predicate, such as a <code>ByteMatchSet</code> or an <code>IPSet</code>, that specifies the web requests that you want to block or count. The <code>RateLimit</code> specifies the number of requests every five minutes that triggers the rule.</p> /// <p>If you add more than one predicate to a <code>RateBasedRule</code>, a request must match all the predicates and exceed the <code>RateLimit</code> to be counted or blocked. For example, suppose you add the following to a <code>RateBasedRule</code>:</p> /// <ul> /// <li> <p>An <code>IPSet</code> that matches the IP address <code>192.0.2.44/32</code> </p> </li> /// <li> <p>A <code>ByteMatchSet</code> that matches <code>BadBot</code> in the <code>User-Agent</code> header</p> </li> /// </ul> /// <p>Further, you specify a <code>RateLimit</code> of 1,000.</p> /// <p>You then add the <code>RateBasedRule</code> to a <code>WebACL</code> and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 <i>and</i> the <code>User-Agent</code> header in the request must contain the value <code>BadBot</code>. Further, requests that match these two conditions much be received at a rate of more than 1,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.</p> /// <p>As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a <code>RateBasedRule</code>:</p> /// <ul> /// <li> <p>A <code>ByteMatchSet</code> with <code>FieldToMatch</code> of <code>URI</code> </p> </li> /// <li> <p>A <code>PositionalConstraint</code> of <code>STARTS_WITH</code> </p> </li> /// <li> <p>A <code>TargetString</code> of <code>login</code> </p> </li> /// </ul> /// <p>Further, you specify a <code>RateLimit</code> of 1,000.</p> /// <p>By adding this <code>RateBasedRule</code> to a <code>WebACL</code>, you could limit requests to your login page without affecting the rest of your site.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateRateBasedRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_rate_based_rule_input::Builder, } impl<C, M, R> UpdateRateBasedRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateRateBasedRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateRateBasedRuleOutput, aws_smithy_http::result::SdkError<crate::error::UpdateRateBasedRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateRateBasedRuleInputOperationOutputAlias, crate::output::UpdateRateBasedRuleOutput, crate::error::UpdateRateBasedRuleError, crate::input::UpdateRateBasedRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to update. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>RateBasedRule</code> that you want to update. <code>RuleId</code> is returned by <code>CreateRateBasedRule</code> and by <code>ListRateBasedRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>RuleUpdate</code> objects that you want to insert into or delete from a <code>RateBasedRule</code>. </p> pub fn updates(mut self, input: crate::model::RuleUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>RuleUpdate</code> objects that you want to insert into or delete from a <code>RateBasedRule</code>. </p> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::RuleUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } /// <p>The maximum number of requests, which have an identical value in the field specified by the <code>RateKey</code>, allowed in a five-minute period. If the number of requests exceeds the <code>RateLimit</code> and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.</p> pub fn rate_limit(mut self, input: i64) -> Self { self.inner = self.inner.rate_limit(input); self } /// <p>The maximum number of requests, which have an identical value in the field specified by the <code>RateKey</code>, allowed in a five-minute period. If the number of requests exceeds the <code>RateLimit</code> and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.</p> pub fn set_rate_limit(mut self, input: std::option::Option<i64>) -> Self { self.inner = self.inner.set_rate_limit(input); self } } /// Fluent builder constructing a request to `UpdateRegexMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>RegexMatchTuple</code> objects (filters) in a <code>RegexMatchSet</code>. For each <code>RegexMatchSetUpdate</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the object from the array. If you want to change a <code>RegexMatchSetUpdate</code> object, you delete the existing object and add a new one.</p> </li> /// <li> <p>The part of a web request that you want AWS WAF to inspectupdate, such as a query string or the value of the <code>User-Agent</code> header. </p> </li> /// <li> <p>The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see <code>RegexPatternSet</code>. </p> </li> /// <li> <p>Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.</p> </li> /// </ul> /// <p> For example, you can create a <code>RegexPatternSet</code> that matches any requests with <code>User-Agent</code> headers that contain the string <code>B[a@]dB[o0]t</code>. You can then configure AWS WAF to reject those requests.</p> /// <p>To create and configure a <code>RegexMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create a <code>RegexMatchSet.</code> For more information, see <code>CreateRegexMatchSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRegexMatchSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRegexMatchSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the <code>RegexPatternSet</code> that contain the regular expression patters you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateRegexMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_regex_match_set_input::Builder, } impl<C, M, R> UpdateRegexMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateRegexMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateRegexMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateRegexMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateRegexMatchSetInputOperationOutputAlias, crate::output::UpdateRegexMatchSetOutput, crate::error::UpdateRegexMatchSetError, crate::input::UpdateRegexMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to update. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn regex_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_match_set_id(input.into()); self } /// <p>The <code>RegexMatchSetId</code> of the <code>RegexMatchSet</code> that you want to update. <code>RegexMatchSetId</code> is returned by <code>CreateRegexMatchSet</code> and by <code>ListRegexMatchSets</code>.</p> pub fn set_regex_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_match_set_id(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>RegexMatchSetUpdate</code> objects that you want to insert into or delete from a <code>RegexMatchSet</code>. For more information, see <code>RegexMatchTuple</code>.</p> pub fn updates(mut self, input: crate::model::RegexMatchSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>RegexMatchSetUpdate</code> objects that you want to insert into or delete from a <code>RegexMatchSet</code>. For more information, see <code>RegexMatchTuple</code>.</p> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::RegexMatchSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `UpdateRegexPatternSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>RegexPatternString</code> objects in a <code>RegexPatternSet</code>. For each <code>RegexPatternString</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the <code>RegexPatternString</code>.</p> </li> /// <li> <p>The regular expression pattern that you want to insert or delete. For more information, see <code>RegexPatternSet</code>. </p> </li> /// </ul> /// <p> For example, you can create a <code>RegexPatternString</code> such as <code>B[a@]dB[o0]t</code>. AWS WAF will match this <code>RegexPatternString</code> to:</p> /// <ul> /// <li> <p>BadBot</p> </li> /// <li> <p>BadB0t</p> </li> /// <li> <p>B@dBot</p> </li> /// <li> <p>B@dB0t</p> </li> /// </ul> /// <p>To create and configure a <code>RegexPatternSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create a <code>RegexPatternSet.</code> For more information, see <code>CreateRegexPatternSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRegexPatternSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRegexPatternSet</code> request to specify the regular expression pattern that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateRegexPatternSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_regex_pattern_set_input::Builder, } impl<C, M, R> UpdateRegexPatternSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateRegexPatternSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateRegexPatternSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateRegexPatternSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateRegexPatternSetInputOperationOutputAlias, crate::output::UpdateRegexPatternSetOutput, crate::error::UpdateRegexPatternSetError, crate::input::UpdateRegexPatternSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to update. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn regex_pattern_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.regex_pattern_set_id(input.into()); self } /// <p>The <code>RegexPatternSetId</code> of the <code>RegexPatternSet</code> that you want to update. <code>RegexPatternSetId</code> is returned by <code>CreateRegexPatternSet</code> and by <code>ListRegexPatternSets</code>.</p> pub fn set_regex_pattern_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_regex_pattern_set_id(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>RegexPatternSetUpdate</code> objects that you want to insert into or delete from a <code>RegexPatternSet</code>.</p> pub fn updates(mut self, input: crate::model::RegexPatternSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>RegexPatternSetUpdate</code> objects that you want to insert into or delete from a <code>RegexPatternSet</code>.</p> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::RegexPatternSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `UpdateRule`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>Predicate</code> objects in a <code>Rule</code>. Each <code>Predicate</code> object identifies a predicate, such as a <code>ByteMatchSet</code> or an <code>IPSet</code>, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a <code>Rule</code>, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a <code>Rule</code>: </p> /// <ul> /// <li> <p>A <code>ByteMatchSet</code> that matches the value <code>BadBot</code> in the <code>User-Agent</code> header</p> </li> /// <li> <p>An <code>IPSet</code> that matches the IP address <code>192.0.2.44</code> </p> </li> /// </ul> /// <p>You then add the <code>Rule</code> to a <code>WebACL</code> and specify that you want to block requests that satisfy the <code>Rule</code>. For a request to be blocked, the <code>User-Agent</code> header in the request must contain the value <code>BadBot</code> <i>and</i> the request must originate from the IP address 192.0.2.44.</p> /// <p>To create and configure a <code>Rule</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the predicates that you want to include in the <code>Rule</code>.</p> </li> /// <li> <p>Create the <code>Rule</code>. See <code>CreateRule</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRule</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRule</code> request to add predicates to the <code>Rule</code>.</p> </li> /// <li> <p>Create and update a <code>WebACL</code> that contains the <code>Rule</code>. See <code>CreateWebACL</code>.</p> </li> /// </ol> /// <p>If you want to replace one <code>ByteMatchSet</code> or <code>IPSet</code> with another, you delete the existing one and add the new one.</p> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateRule< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_rule_input::Builder, } impl<C, M, R> UpdateRule<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateRule`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateRuleOutput, aws_smithy_http::result::SdkError<crate::error::UpdateRuleError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateRuleInputOperationOutputAlias, crate::output::UpdateRuleOutput, crate::error::UpdateRuleError, crate::input::UpdateRuleInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to update. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn rule_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_id(input.into()); self } /// <p>The <code>RuleId</code> of the <code>Rule</code> that you want to update. <code>RuleId</code> is returned by <code>CreateRule</code> and by <code>ListRules</code>.</p> pub fn set_rule_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_rule_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>RuleUpdate</code> objects that you want to insert into or delete from a <code>Rule</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>RuleUpdate</code>: Contains <code>Action</code> and <code>Predicate</code> </p> </li> /// <li> <p> <code>Predicate</code>: Contains <code>DataId</code>, <code>Negated</code>, and <code>Type</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::RuleUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>RuleUpdate</code> objects that you want to insert into or delete from a <code>Rule</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>RuleUpdate</code>: Contains <code>Action</code> and <code>Predicate</code> </p> </li> /// <li> <p> <code>Predicate</code>: Contains <code>DataId</code>, <code>Negated</code>, and <code>Type</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::RuleUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateRuleGroup`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>ActivatedRule</code> objects in a <code>RuleGroup</code>.</p> /// <p>You can only insert <code>REGULAR</code> rules into a rule group.</p> /// <p>You can have a maximum of ten rules per rule group.</p> /// <p>To create and configure a <code>RuleGroup</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the <code>Rules</code> that you want to include in the <code>RuleGroup</code>. See <code>CreateRule</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateRuleGroup</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateRuleGroup</code> request to add <code>Rules</code> to the <code>RuleGroup</code>.</p> </li> /// <li> <p>Create and update a <code>WebACL</code> that contains the <code>RuleGroup</code>. See <code>CreateWebACL</code>.</p> </li> /// </ol> /// <p>If you want to replace one <code>Rule</code> with another, you delete the existing one and add the new one.</p> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateRuleGroup< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_rule_group_input::Builder, } impl<C, M, R> UpdateRuleGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateRuleGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateRuleGroupOutput, aws_smithy_http::result::SdkError<crate::error::UpdateRuleGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateRuleGroupInputOperationOutputAlias, crate::output::UpdateRuleGroupOutput, crate::error::UpdateRuleGroupError, crate::input::UpdateRuleGroupInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to update. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn rule_group_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.rule_group_id(input.into()); self } /// <p>The <code>RuleGroupId</code> of the <code>RuleGroup</code> that you want to update. <code>RuleGroupId</code> is returned by <code>CreateRuleGroup</code> and by <code>ListRuleGroups</code>.</p> pub fn set_rule_group_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_rule_group_id(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>RuleGroupUpdate</code> objects that you want to insert into or delete from a <code>RuleGroup</code>.</p> /// <p>You can only insert <code>REGULAR</code> rules into a rule group.</p> /// <p> <code>ActivatedRule|OverrideAction</code> applies only when updating or adding a <code>RuleGroup</code> to a <code>WebACL</code>. In this case you do not use <code>ActivatedRule|Action</code>. For all other update requests, <code>ActivatedRule|Action</code> is used instead of <code>ActivatedRule|OverrideAction</code>.</p> pub fn updates(mut self, input: crate::model::RuleGroupUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>RuleGroupUpdate</code> objects that you want to insert into or delete from a <code>RuleGroup</code>.</p> /// <p>You can only insert <code>REGULAR</code> rules into a rule group.</p> /// <p> <code>ActivatedRule|OverrideAction</code> applies only when updating or adding a <code>RuleGroup</code> to a <code>WebACL</code>. In this case you do not use <code>ActivatedRule|Action</code>. For all other update requests, <code>ActivatedRule|Action</code> is used instead of <code>ActivatedRule|OverrideAction</code>.</p> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::RuleGroupUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } } /// Fluent builder constructing a request to `UpdateSizeConstraintSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>SizeConstraint</code> objects (filters) in a <code>SizeConstraintSet</code>. For each <code>SizeConstraint</code> object, you specify the following values: </p> /// <ul> /// <li> <p>Whether to insert or delete the object from the array. If you want to change a <code>SizeConstraintSetUpdate</code> object, you delete the existing object and add a new one.</p> </li> /// <li> <p>The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the <code>User-Agent</code> header.</p> </li> /// <li> <p>Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first <code>8192</code> bytes of your request to AWS WAF.</p> <p>You can only specify a single type of TextTransformation.</p> </li> /// <li> <p>A <code>ComparisonOperator</code> used for evaluating the selected part of the request against the specified <code>Size</code>, such as equals, greater than, less than, and so on.</p> </li> /// <li> <p>The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.</p> </li> /// </ul> /// <p>For example, you can add a <code>SizeConstraintSetUpdate</code> object that matches web requests in which the length of the <code>User-Agent</code> header is greater than 100 bytes. You can then configure AWS WAF to block those requests.</p> /// <p>To create and configure a <code>SizeConstraintSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create a <code>SizeConstraintSet.</code> For more information, see <code>CreateSizeConstraintSet</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateSizeConstraintSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateSizeConstraintSet</code> request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateSizeConstraintSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_size_constraint_set_input::Builder, } impl<C, M, R> UpdateSizeConstraintSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateSizeConstraintSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateSizeConstraintSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateSizeConstraintSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateSizeConstraintSetInputOperationOutputAlias, crate::output::UpdateSizeConstraintSetOutput, crate::error::UpdateSizeConstraintSetError, crate::input::UpdateSizeConstraintSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to update. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn size_constraint_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.size_constraint_set_id(input.into()); self } /// <p>The <code>SizeConstraintSetId</code> of the <code>SizeConstraintSet</code> that you want to update. <code>SizeConstraintSetId</code> is returned by <code>CreateSizeConstraintSet</code> and by <code>ListSizeConstraintSets</code>.</p> pub fn set_size_constraint_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_size_constraint_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>SizeConstraintSetUpdate</code> objects that you want to insert into or delete from a <code>SizeConstraintSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>SizeConstraintSetUpdate</code>: Contains <code>Action</code> and <code>SizeConstraint</code> </p> </li> /// <li> <p> <code>SizeConstraint</code>: Contains <code>FieldToMatch</code>, <code>TextTransformation</code>, <code>ComparisonOperator</code>, and <code>Size</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::SizeConstraintSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>SizeConstraintSetUpdate</code> objects that you want to insert into or delete from a <code>SizeConstraintSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>SizeConstraintSetUpdate</code>: Contains <code>Action</code> and <code>SizeConstraint</code> </p> </li> /// <li> <p> <code>SizeConstraint</code>: Contains <code>FieldToMatch</code>, <code>TextTransformation</code>, <code>ComparisonOperator</code>, and <code>Size</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::SizeConstraintSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateSqlInjectionMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>SqlInjectionMatchTuple</code> objects (filters) in a <code>SqlInjectionMatchSet</code>. For each <code>SqlInjectionMatchTuple</code> object, you specify the following values:</p> /// <ul> /// <li> <p> <code>Action</code>: Whether to insert the object into or delete the object from the array. To change a <code>SqlInjectionMatchTuple</code>, you delete the existing object and add a new one.</p> </li> /// <li> <p> <code>FieldToMatch</code>: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.</p> </li> /// <li> <p> <code>TextTransformation</code>: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.</p> <p>You can only specify a single type of TextTransformation.</p> </li> /// </ul> /// <p>You use <code>SqlInjectionMatchSet</code> objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a <code>SqlInjectionMatchSet</code> with the applicable settings, and then configure AWS WAF to block the requests. </p> /// <p>To create and configure a <code>SqlInjectionMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Submit a <code>CreateSqlInjectionMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateIPSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateSqlInjectionMatchSet</code> request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateSqlInjectionMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_sql_injection_match_set_input::Builder, } impl<C, M, R> UpdateSqlInjectionMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateSqlInjectionMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateSqlInjectionMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateSqlInjectionMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateSqlInjectionMatchSetInputOperationOutputAlias, crate::output::UpdateSqlInjectionMatchSetOutput, crate::error::UpdateSqlInjectionMatchSetError, crate::input::UpdateSqlInjectionMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to update. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn sql_injection_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.sql_injection_match_set_id(input.into()); self } /// <p>The <code>SqlInjectionMatchSetId</code> of the <code>SqlInjectionMatchSet</code> that you want to update. <code>SqlInjectionMatchSetId</code> is returned by <code>CreateSqlInjectionMatchSet</code> and by <code>ListSqlInjectionMatchSets</code>.</p> pub fn set_sql_injection_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sql_injection_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>SqlInjectionMatchSetUpdate</code> objects that you want to insert into or delete from a <code>SqlInjectionMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>SqlInjectionMatchSetUpdate</code>: Contains <code>Action</code> and <code>SqlInjectionMatchTuple</code> </p> </li> /// <li> <p> <code>SqlInjectionMatchTuple</code>: Contains <code>FieldToMatch</code> and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::SqlInjectionMatchSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>SqlInjectionMatchSetUpdate</code> objects that you want to insert into or delete from a <code>SqlInjectionMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>SqlInjectionMatchSetUpdate</code>: Contains <code>Action</code> and <code>SqlInjectionMatchTuple</code> </p> </li> /// <li> <p> <code>SqlInjectionMatchTuple</code>: Contains <code>FieldToMatch</code> and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::SqlInjectionMatchSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } /// Fluent builder constructing a request to `UpdateWebACL`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>ActivatedRule</code> objects in a <code>WebACL</code>. Each <code>Rule</code> identifies web requests that you want to allow, block, or count. When you update a <code>WebACL</code>, you specify the following values:</p> /// <ul> /// <li> <p>A default action for the <code>WebACL</code>, either <code>ALLOW</code> or <code>BLOCK</code>. AWS WAF performs the default action if a request doesn't match the criteria in any of the <code>Rules</code> in a <code>WebACL</code>.</p> </li> /// <li> <p>The <code>Rules</code> that you want to add or delete. If you want to replace one <code>Rule</code> with another, you delete the existing <code>Rule</code> and add the new one.</p> </li> /// <li> <p>For each <code>Rule</code>, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the <code>Rule</code>.</p> </li> /// <li> <p>The order in which you want AWS WAF to evaluate the <code>Rules</code> in a <code>WebACL</code>. If you add more than one <code>Rule</code> to a <code>WebACL</code>, AWS WAF evaluates each request against the <code>Rules</code> in order based on the value of <code>Priority</code>. (The <code>Rule</code> that has the lowest value for <code>Priority</code> is evaluated first.) When a web request matches all the predicates (such as <code>ByteMatchSets</code> and <code>IPSets</code>) in a <code>Rule</code>, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining <code>Rules</code> in the <code>WebACL</code>, if any. </p> </li> /// </ul> /// <p>To create and configure a <code>WebACL</code>, perform the following steps:</p> /// <ol> /// <li> <p>Create and update the predicates that you want to include in <code>Rules</code>. For more information, see <code>CreateByteMatchSet</code>, <code>UpdateByteMatchSet</code>, <code>CreateIPSet</code>, <code>UpdateIPSet</code>, <code>CreateSqlInjectionMatchSet</code>, and <code>UpdateSqlInjectionMatchSet</code>.</p> </li> /// <li> <p>Create and update the <code>Rules</code> that you want to include in the <code>WebACL</code>. For more information, see <code>CreateRule</code> and <code>UpdateRule</code>.</p> </li> /// <li> <p>Create a <code>WebACL</code>. See <code>CreateWebACL</code>.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateWebACL</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateWebACL</code> request to specify the <code>Rules</code> that you want to include in the <code>WebACL</code>, to specify the default action, and to associate the <code>WebACL</code> with a CloudFront distribution. </p> <p>The <code>ActivatedRule</code> can be a rule group. If you specify a rule group as your <code>ActivatedRule</code> , you can exclude specific rules from that rule group.</p> <p>If you already have a rule group associated with a web ACL and want to submit an <code>UpdateWebACL</code> request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see <code>ActivatedRule$ExcludedRules</code> . </p> </li> /// </ol> /// <p>Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the <code>UpdateWebACL</code> request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist. </p> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateWebACL< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_web_acl_input::Builder, } impl<C, M, R> UpdateWebACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateWebACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateWebAclOutput, aws_smithy_http::result::SdkError<crate::error::UpdateWebACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateWebAclInputOperationOutputAlias, crate::output::UpdateWebAclOutput, crate::error::UpdateWebACLError, crate::input::UpdateWebAclInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to update. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn web_acl_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.web_acl_id(input.into()); self } /// <p>The <code>WebACLId</code> of the <code>WebACL</code> that you want to update. <code>WebACLId</code> is returned by <code>CreateWebACL</code> and by <code>ListWebACLs</code>.</p> pub fn set_web_acl_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_web_acl_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of updates to make to the <code>WebACL</code>.</p> /// <p>An array of <code>WebACLUpdate</code> objects that you want to insert into or delete from a <code>WebACL</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>WebACLUpdate</code>: Contains <code>Action</code> and <code>ActivatedRule</code> </p> </li> /// <li> <p> <code>ActivatedRule</code>: Contains <code>Action</code>, <code>OverrideAction</code>, <code>Priority</code>, <code>RuleId</code>, and <code>Type</code>. <code>ActivatedRule|OverrideAction</code> applies only when updating or adding a <code>RuleGroup</code> to a <code>WebACL</code>. In this case, you do not use <code>ActivatedRule|Action</code>. For all other update requests, <code>ActivatedRule|Action</code> is used instead of <code>ActivatedRule|OverrideAction</code>. </p> </li> /// <li> <p> <code>WafAction</code>: Contains <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::WebAclUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of updates to make to the <code>WebACL</code>.</p> /// <p>An array of <code>WebACLUpdate</code> objects that you want to insert into or delete from a <code>WebACL</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>WebACLUpdate</code>: Contains <code>Action</code> and <code>ActivatedRule</code> </p> </li> /// <li> <p> <code>ActivatedRule</code>: Contains <code>Action</code>, <code>OverrideAction</code>, <code>Priority</code>, <code>RuleId</code>, and <code>Type</code>. <code>ActivatedRule|OverrideAction</code> applies only when updating or adding a <code>RuleGroup</code> to a <code>WebACL</code>. In this case, you do not use <code>ActivatedRule|Action</code>. For all other update requests, <code>ActivatedRule|Action</code> is used instead of <code>ActivatedRule|OverrideAction</code>. </p> </li> /// <li> <p> <code>WafAction</code>: Contains <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::WebAclUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } /// <p>A default action for the web ACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the rules in a web ACL.</p> pub fn default_action(mut self, input: crate::model::WafAction) -> Self { self.inner = self.inner.default_action(input); self } /// <p>A default action for the web ACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the rules in a web ACL.</p> pub fn set_default_action( mut self, input: std::option::Option<crate::model::WafAction>, ) -> Self { self.inner = self.inner.set_default_action(input); self } } /// Fluent builder constructing a request to `UpdateXssMatchSet`. /// /// <note> /// <p>This is <b>AWS WAF Classic</b> documentation. For more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS WAF Classic</a> in the developer guide.</p> /// <p> <b>For the latest version of AWS WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p> /// </note> /// <p>Inserts or deletes <code>XssMatchTuple</code> objects (filters) in an <code>XssMatchSet</code>. For each <code>XssMatchTuple</code> object, you specify the following values:</p> /// <ul> /// <li> <p> <code>Action</code>: Whether to insert the object into or delete the object from the array. To change an <code>XssMatchTuple</code>, you delete the existing object and add a new one.</p> </li> /// <li> <p> <code>FieldToMatch</code>: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.</p> </li> /// <li> <p> <code>TextTransformation</code>: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.</p> <p>You can only specify a single type of TextTransformation.</p> </li> /// </ul> /// <p>You use <code>XssMatchSet</code> objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an <code>XssMatchSet</code> with the applicable settings, and then configure AWS WAF to block the requests. </p> /// <p>To create and configure an <code>XssMatchSet</code>, perform the following steps:</p> /// <ol> /// <li> <p>Submit a <code>CreateXssMatchSet</code> request.</p> </li> /// <li> <p>Use <code>GetChangeToken</code> to get the change token that you provide in the <code>ChangeToken</code> parameter of an <code>UpdateIPSet</code> request.</p> </li> /// <li> <p>Submit an <code>UpdateXssMatchSet</code> request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.</p> </li> /// </ol> /// <p>For more information about how to use the AWS WAF API to allow or block HTTP requests, see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/">AWS WAF Developer Guide</a>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UpdateXssMatchSet< C = aws_smithy_client::erase::DynConnector, M = crate::middleware::DefaultMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_xss_match_set_input::Builder, } impl<C, M, R> UpdateXssMatchSet<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateXssMatchSet`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateXssMatchSetOutput, aws_smithy_http::result::SdkError<crate::error::UpdateXssMatchSetError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateXssMatchSetInputOperationOutputAlias, crate::output::UpdateXssMatchSetOutput, crate::error::UpdateXssMatchSetError, crate::input::UpdateXssMatchSetInputOperationRetryAlias, >, { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to update. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn xss_match_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.xss_match_set_id(input.into()); self } /// <p>The <code>XssMatchSetId</code> of the <code>XssMatchSet</code> that you want to update. <code>XssMatchSetId</code> is returned by <code>CreateXssMatchSet</code> and by <code>ListXssMatchSets</code>.</p> pub fn set_xss_match_set_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_xss_match_set_id(input); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn change_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.change_token(input.into()); self } /// <p>The value returned by the most recent call to <code>GetChangeToken</code>.</p> pub fn set_change_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_change_token(input); self } /// Appends an item to `Updates`. /// /// To override the contents of this collection use [`set_updates`](Self::set_updates). /// /// <p>An array of <code>XssMatchSetUpdate</code> objects that you want to insert into or delete from an <code>XssMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>XssMatchSetUpdate</code>: Contains <code>Action</code> and <code>XssMatchTuple</code> </p> </li> /// <li> <p> <code>XssMatchTuple</code>: Contains <code>FieldToMatch</code> and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn updates(mut self, input: crate::model::XssMatchSetUpdate) -> Self { self.inner = self.inner.updates(input); self } /// <p>An array of <code>XssMatchSetUpdate</code> objects that you want to insert into or delete from an <code>XssMatchSet</code>. For more information, see the applicable data types:</p> /// <ul> /// <li> <p> <code>XssMatchSetUpdate</code>: Contains <code>Action</code> and <code>XssMatchTuple</code> </p> </li> /// <li> <p> <code>XssMatchTuple</code>: Contains <code>FieldToMatch</code> and <code>TextTransformation</code> </p> </li> /// <li> <p> <code>FieldToMatch</code>: Contains <code>Data</code> and <code>Type</code> </p> </li> /// </ul> pub fn set_updates( mut self, input: std::option::Option<std::vec::Vec<crate::model::XssMatchSetUpdate>>, ) -> Self { self.inner = self.inner.set_updates(input); self } } } impl<C> Client<C, crate::middleware::DefaultMiddleware, aws_smithy_client::retry::Standard> { /// Creates a client with the given service config and connector override. pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::new() .connector(conn) .middleware(crate::middleware::DefaultMiddleware::new()); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } } impl Client< aws_smithy_client::erase::DynConnector, crate::middleware::DefaultMiddleware, aws_smithy_client::retry::Standard, > { /// Creates a new client from a shared config. #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn new(config: &aws_types::config::Config) -> Self { Self::from_conf(config.into()) } /// Creates a new client from the service [`Config`](crate::Config). #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn from_conf(conf: crate::Config) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::dyn_https() .middleware(crate::middleware::DefaultMiddleware::new()); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); // the builder maintains a try-state. To avoid suppressing the warning when sleep is unset, // only set it if we actually have a sleep impl. if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } }
58.909416
778
0.629685
b945e2b05e8b28ea314712f4ba4302eb4fe9e29c
622
use crate::{NodeCommand, WalletCommand}; use structopt::StructOpt; #[derive(Debug, StructOpt)] #[structopt(name = "cardano", about = "Manage cardano components")] pub enum CardanoCommand { Node(NodeCommand), Wallet(WalletCommand), // Tx(TxCommand), // Mint(MintCommand), // Address(AddressCommand), // Db(DbCommand), // Graphql(GraphqlCommand), // Ledger(LedgerCommand), // Rosetta(RosettaCommand), // Plutus(PlutusCommand), // Marlowe(MarloweCommand), // Explorer(ExplorerCommand), // Smash(SmashCommand), // Update(UpdateCommand), // Config(ConfigCommand), }
27.043478
67
0.663987
6a3cf0cac9fcbf6f7b8e80c63794fa47afb1378a
2,151
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } impl Config { pub fn new(mut args: std::env::Args) -> Result<Config, &'static str> { args.next(); let query = match args.next() { Some(arg) => arg, None => return Err("Didn't get a query string"), }; let filename = match args.next() { Some(arg) => arg, None => return Err("Didn't get a file name"), }; let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) } } pub fn run(config: Config) -> Result<(), Box<dyn Error>> { let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents) } else { search_case_insensitive(&config.query, &contents) }; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { contents .lines() .filter(|line| line.contains(query)) .collect() } pub fn search_case_insensitive<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { let query = query.to_lowercase(); let mut results = Vec::new(); for line in contents.lines() { if line.to_lowercase().contains(&query) { results.push(line); } } results } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!( vec!["safe, fast, productive."], search(query, contents) ); } #[test] fn case_insensitive() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ); } }
20.485714
84
0.529521
d7aadfda9469a0e336c33fe66e68e2bec3813535
1,075
use std::path::Path; use config::Config; use monsterapi::{database, scraping, Result}; fn main() -> Result<()> { let config = Config::builder() .add_source(config::File::with_name("config.toml")) .add_source(config::Environment::default()) .build()?; let dal = database::AccessLayer::new(config.get_string("database.dsn")?.as_str())?; let output_path = Path::new("./html/"); let index_path = scraping::download_products_index(output_path)?; let categories = scraping::parse_products_index(&index_path)?; for category in &categories { let new_category = database::models::NewCategory::from(category); dal.insert_category(new_category)?; for product in &category.products { let page_path = scraping::download_product_page(category, product, output_path)?; let product = scraping::parse_product_page(&page_path, product)?; let new_product = database::models::NewProduct::from(&product); dal.insert_product(new_product)?; } } Ok(()) }
32.575758
93
0.650233
fe6616e134656e935b2bdf767f7ae9696a2739cc
33,270
pub extern crate pulldown_cmark; pub extern crate serde_yaml; #[macro_use] extern crate lazy_static; mod context; mod frontmatter; mod references; mod walker; pub use context::Context; pub use frontmatter::{Frontmatter, FrontmatterStrategy}; pub use walker::{vault_contents, WalkOptions}; use frontmatter::{frontmatter_from_str, frontmatter_to_str}; use pathdiff::diff_paths; use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS}; use pulldown_cmark::{CodeBlockKind, CowStr, Event, Options, Parser, Tag}; use pulldown_cmark_to_cmark::cmark_with_options; use rayon::prelude::*; use references::*; use slug::slugify; use snafu::{ResultExt, Snafu}; use std::ffi::OsString; use std::fmt; use std::fs::{self, File}; use std::io::prelude::*; use std::io::ErrorKind; use std::path::{Path, PathBuf}; use std::str; /// A series of markdown [Event]s that are generated while traversing an Obsidian markdown note. pub type MarkdownEvents<'a> = Vec<Event<'a>>; /// A post-processing function that is to be called after an Obsidian note has been fully parsed and /// converted to regular markdown syntax. /// /// Postprocessors are called in the order they've been added through [Exporter::add_postprocessor] /// just before notes are written out to their final destination. /// They may be used to achieve the following: /// /// 1. Modify a note's [Context], for example to change the destination filename or update its [Frontmatter] (see [Context::frontmatter]). /// 2. Change a note's contents by altering [MarkdownEvents]. /// 3. Prevent later postprocessors from running ([PostprocessorResult::StopHere]) or cause a note /// to be skipped entirely ([PostprocessorResult::StopAndSkipNote]). /// /// # Examples /// /// ## Update frontmatter /// /// This example shows how to make changes a note's frontmatter. In this case, the postprocessor is /// defined inline as a closure. /// /// ``` /// use obsidian_export::{Context, Exporter, MarkdownEvents, PostprocessorResult}; /// use obsidian_export::pulldown_cmark::{CowStr, Event}; /// use obsidian_export::serde_yaml::Value; /// # use std::path::PathBuf; /// # use tempfile::TempDir; /// /// # let tmp_dir = TempDir::new().expect("failed to make tempdir"); /// # let source = PathBuf::from("tests/testdata/input/postprocessors"); /// # let destination = tmp_dir.path().to_path_buf(); /// let mut exporter = Exporter::new(source, destination); /// /// // add_postprocessor registers a new postprocessor. In this example we use a closure. /// exporter.add_postprocessor(&|mut context, events| { /// // This is the key we'll insert into the frontmatter. In this case, the string "foo". /// let key = Value::String("foo".to_string()); /// // This is the value we'll insert into the frontmatter. In this case, the string "bar". /// let value = Value::String("baz".to_string()); /// /// // Frontmatter can be updated in-place, so we can call insert on it directly. /// context.frontmatter.insert(key, value); /// /// // Postprocessors must return their (modified) context, the markdown events that make /// // up the note and a next action to take. /// (context, events, PostprocessorResult::Continue) /// }); /// /// exporter.run().unwrap(); /// ``` /// /// ## Change note contents /// /// In this example a note's markdown content is changed by iterating over the [MarkdownEvents] and /// changing the text when we encounter a [text element][Event::Text]. /// /// Instead of using a closure like above, this example shows how to use a separate function /// definition. /// ``` /// # use obsidian_export::{Context, Exporter, MarkdownEvents, PostprocessorResult}; /// # use pulldown_cmark::{CowStr, Event}; /// # use std::path::PathBuf; /// # use tempfile::TempDir; /// # /// /// This postprocessor replaces any instance of "foo" with "bar" in the note body. /// fn foo_to_bar( /// context: Context, /// events: MarkdownEvents, /// ) -> (Context, MarkdownEvents, PostprocessorResult) { /// let events = events /// .into_iter() /// .map(|event| match event { /// Event::Text(text) => Event::Text(CowStr::from(text.replace("foo", "bar"))), /// event => event, /// }) /// .collect(); /// (context, events, PostprocessorResult::Continue) /// } /// /// # let tmp_dir = TempDir::new().expect("failed to make tempdir"); /// # let source = PathBuf::from("tests/testdata/input/postprocessors"); /// # let destination = tmp_dir.path().to_path_buf(); /// # let mut exporter = Exporter::new(source, destination); /// exporter.add_postprocessor(&foo_to_bar); /// # exporter.run().unwrap(); /// ``` pub type Postprocessor = dyn Fn(Context, MarkdownEvents) -> (Context, MarkdownEvents, PostprocessorResult) + Send + Sync; type Result<T, E = ExportError> = std::result::Result<T, E>; const PERCENTENCODE_CHARS: &AsciiSet = &CONTROLS.add(b' ').add(b'(').add(b')').add(b'%').add(b'?'); const NOTE_RECURSION_LIMIT: usize = 10; #[non_exhaustive] #[derive(Debug, Snafu)] /// ExportError represents all errors which may be returned when using this crate. pub enum ExportError { #[snafu(display("failed to read from '{}'", path.display()))] /// This occurs when a read IO operation fails. ReadError { path: PathBuf, source: std::io::Error, }, #[snafu(display("failed to write to '{}'", path.display()))] /// This occurs when a write IO operation fails. WriteError { path: PathBuf, source: std::io::Error, }, #[snafu(display("Encountered an error while trying to walk '{}'", path.display()))] /// This occurs when an error is encountered while trying to walk a directory. WalkDirError { path: PathBuf, source: ignore::Error, }, #[snafu(display("No such file or directory: {}", path.display()))] /// This occurs when an operation is requested on a file or directory which does not exist. PathDoesNotExist { path: PathBuf }, #[snafu(display("Invalid character encoding encountered"))] /// This error may occur when invalid UTF8 is encountered. /// /// Currently, operations which assume UTF8 perform lossy encoding however. CharacterEncodingError { source: str::Utf8Error }, #[snafu(display("Recursion limit exceeded"))] /// This error occurs when embedded notes are too deeply nested or cause an infinite loop. /// /// When this happens, `file_tree` contains a list of all the files which were processed /// leading up to this error. RecursionLimitExceeded { file_tree: Vec<PathBuf> }, #[snafu(display("Failed to export '{}'", path.display()))] /// This occurs when a file fails to export successfully. FileExportError { path: PathBuf, #[snafu(source(from(ExportError, Box::new)))] source: Box<ExportError>, }, #[snafu(display("Failed to decode YAML frontmatter in '{}'", path.display()))] FrontMatterDecodeError { path: PathBuf, #[snafu(source(from(serde_yaml::Error, Box::new)))] source: Box<serde_yaml::Error>, }, #[snafu(display("Failed to encode YAML frontmatter for '{}'", path.display()))] FrontMatterEncodeError { path: PathBuf, #[snafu(source(from(serde_yaml::Error, Box::new)))] source: Box<serde_yaml::Error>, }, } #[derive(Debug, Clone, Copy, PartialEq)] /// Emitted by [Postprocessor]s to signal the next action to take. pub enum PostprocessorResult { /// Continue with the next post-processor (if any). Continue, /// Use this note, but don't run any more post-processors after this one. StopHere, /// Skip this note (don't export it) and don't run any more post-processors. StopAndSkipNote, } #[derive(Clone)] /// Exporter provides the main interface to this library. /// /// Users are expected to create an Exporter using [`Exporter::new`], optionally followed by /// customization using [`Exporter::frontmatter_strategy`] and [`Exporter::walk_options`]. /// /// After that, calling [`Exporter::run`] will start the export process. pub struct Exporter<'a> { root: PathBuf, destination: PathBuf, frontmatter_strategy: FrontmatterStrategy, vault_contents: Option<Vec<PathBuf>>, walk_options: WalkOptions<'a>, process_embeds_recursively: bool, postprocessors: Vec<&'a Postprocessor>, } impl<'a> fmt::Debug for Exporter<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("WalkOptions") .field("root", &self.root) .field("destination", &self.destination) .field("frontmatter_strategy", &self.frontmatter_strategy) .field("vault_contents", &self.vault_contents) .field("walk_options", &self.walk_options) .field( "process_embeds_recursively", &self.process_embeds_recursively, ) .field( "postprocessors", &format!("<{} postprocessors active>", self.postprocessors.len()), ) .finish() } } impl<'a> Exporter<'a> { /// Create a new exporter which reads notes from `source` and exports these to /// `destination`. pub fn new(source: PathBuf, destination: PathBuf) -> Exporter<'a> { Exporter { root: source, destination, frontmatter_strategy: FrontmatterStrategy::Auto, walk_options: WalkOptions::default(), process_embeds_recursively: true, vault_contents: None, postprocessors: vec![], } } /// Set the [`WalkOptions`] to be used for this exporter. pub fn walk_options(&mut self, options: WalkOptions<'a>) -> &mut Exporter<'a> { self.walk_options = options; self } /// Set the [`FrontmatterStrategy`] to be used for this exporter. pub fn frontmatter_strategy(&mut self, strategy: FrontmatterStrategy) -> &mut Exporter<'a> { self.frontmatter_strategy = strategy; self } /// Set the behavior when recursive embeds are encountered. /// /// When `recursive` is true (the default), emdeds are always processed recursively. This may /// lead to infinite recursion when note A embeds B, but B also embeds A. /// (When this happens, [ExportError::RecursionLimitExceeded] will be returned by [Exporter::run]). /// /// When `recursive` is false, if a note is encountered for a second time while processing the /// original note, instead of embedding it again a link to the note is inserted instead. pub fn process_embeds_recursively(&mut self, recursive: bool) -> &mut Exporter<'a> { self.process_embeds_recursively = recursive; self } /// Append a function to the chain of [postprocessors][Postprocessor] to run on exported Obsidian Markdown notes. pub fn add_postprocessor(&mut self, processor: &'a Postprocessor) -> &mut Exporter<'a> { self.postprocessors.push(processor); self } /// Export notes using the settings configured on this exporter. pub fn run(&mut self) -> Result<()> { if !self.root.exists() { return Err(ExportError::PathDoesNotExist { path: self.root.clone(), }); } // When a single file is specified, we can short-circuit contruction of walk and associated // directory traversal. This also allows us to accept destination as either a file or a // directory name. if self.root.is_file() { self.vault_contents = Some(vec![self.root.clone()]); let source_filename = self .root .file_name() .expect("File without a filename? How is that possible?") .to_string_lossy(); let destination = match self.destination.is_dir() { true => self.destination.join(String::from(source_filename)), false => { let parent = self.destination.parent().unwrap_or(&self.destination); // Avoid recursively creating self.destination through the call to // export_note when the parent directory doesn't exist. if !parent.exists() { return Err(ExportError::PathDoesNotExist { path: parent.to_path_buf(), }); } self.destination.clone() } }; return Ok(self.export_note(&self.root, &destination)?); } if !self.destination.exists() { return Err(ExportError::PathDoesNotExist { path: self.destination.clone(), }); } self.vault_contents = Some(vault_contents( self.root.as_path(), self.walk_options.clone(), )?); self.vault_contents .as_ref() .unwrap() .clone() .into_par_iter() .try_for_each(|file| { let relative_path = file .strip_prefix(&self.root.clone()) .expect("file should always be nested under root") .to_path_buf(); let destination = &self.destination.join(&relative_path); self.export_note(&file, destination) })?; Ok(()) } fn export_note(&self, src: &Path, dest: &Path) -> Result<()> { match is_markdown_file(src) { true => self.parse_and_export_obsidian_note(src, dest), false => copy_file(src, dest), } .context(FileExportError { path: src }) } fn parse_and_export_obsidian_note(&self, src: &Path, dest: &Path) -> Result<()> { let mut context = Context::new(src.to_path_buf(), dest.to_path_buf()); let (frontmatter, mut markdown_events) = self.parse_obsidian_note(&src, &context)?; context.frontmatter = frontmatter; for func in &self.postprocessors { let res = func(context, markdown_events); context = res.0; markdown_events = res.1; match res.2 { PostprocessorResult::StopHere => break, PostprocessorResult::StopAndSkipNote => return Ok(()), _ => (), } } let dest = context.destination; let mut outfile = create_file(&dest)?; let write_frontmatter = match self.frontmatter_strategy { FrontmatterStrategy::Always => true, FrontmatterStrategy::Never => false, FrontmatterStrategy::Auto => !context.frontmatter.is_empty(), }; if write_frontmatter { let mut frontmatter_str = frontmatter_to_str(context.frontmatter) .context(FrontMatterEncodeError { path: src })?; frontmatter_str.push('\n'); outfile .write_all(frontmatter_str.as_bytes()) .context(WriteError { path: &dest })?; } outfile .write_all(render_mdevents_to_mdtext(markdown_events).as_bytes()) .context(WriteError { path: &dest })?; Ok(()) } fn parse_obsidian_note<'b>( &self, path: &Path, context: &Context, ) -> Result<(Frontmatter, MarkdownEvents<'b>)> { if context.note_depth() > NOTE_RECURSION_LIMIT { return Err(ExportError::RecursionLimitExceeded { file_tree: context.file_tree(), }); } let content = fs::read_to_string(&path).context(ReadError { path })?; let (frontmatter, content) = matter::matter(&content).unwrap_or(("".to_string(), content.to_string())); let frontmatter = frontmatter_from_str(&frontmatter).context(FrontMatterDecodeError { path })?; let mut parser_options = Options::empty(); parser_options.insert(Options::ENABLE_TABLES); parser_options.insert(Options::ENABLE_FOOTNOTES); parser_options.insert(Options::ENABLE_STRIKETHROUGH); parser_options.insert(Options::ENABLE_TASKLISTS); let mut ref_parser = RefParser::new(); let mut events = vec![]; // Most of the time, a reference triggers 5 events: [ or ![, [, <text>, ], ] let mut buffer = Vec::with_capacity(5); for event in Parser::new_ext(&content, parser_options) { if ref_parser.state == RefParserState::Resetting { events.append(&mut buffer); buffer.clear(); ref_parser.reset(); } buffer.push(event.clone()); match ref_parser.state { RefParserState::NoState => { match event { Event::Text(CowStr::Borrowed("![")) => { ref_parser.ref_type = Some(RefType::Embed); ref_parser.transition(RefParserState::ExpectSecondOpenBracket); } Event::Text(CowStr::Borrowed("[")) => { ref_parser.ref_type = Some(RefType::Link); ref_parser.transition(RefParserState::ExpectSecondOpenBracket); } _ => { events.push(event); buffer.clear(); }, }; } RefParserState::ExpectSecondOpenBracket => match event { Event::Text(CowStr::Borrowed("[")) => { ref_parser.transition(RefParserState::ExpectRefText); } _ => { ref_parser.transition(RefParserState::Resetting); } }, RefParserState::ExpectRefText => match event { Event::Text(CowStr::Borrowed("]")) => { ref_parser.transition(RefParserState::Resetting); } Event::Text(text) => { ref_parser.ref_text.push_str(&text); ref_parser.transition(RefParserState::ExpectRefTextOrCloseBracket); } _ => { ref_parser.transition(RefParserState::Resetting); } }, RefParserState::ExpectRefTextOrCloseBracket => match event { Event::Text(CowStr::Borrowed("]")) => { ref_parser.transition(RefParserState::ExpectFinalCloseBracket); } Event::Text(text) => { ref_parser.ref_text.push_str(&text); } _ => { ref_parser.transition(RefParserState::Resetting); } }, RefParserState::ExpectFinalCloseBracket => match event { Event::Text(CowStr::Borrowed("]")) => match ref_parser.ref_type { Some(RefType::Link) => { let mut elements = self.make_link_to_file( ObsidianNoteReference::from_str( ref_parser.ref_text.clone().as_ref() ), context, ); events.append(&mut elements); buffer.clear(); ref_parser.transition(RefParserState::Resetting); } Some(RefType::Embed) => { let mut elements = self.embed_file( ref_parser.ref_text.clone().as_ref(), context )?; events.append(&mut elements); buffer.clear(); ref_parser.transition(RefParserState::Resetting); } None => panic!("In state ExpectFinalCloseBracket but ref_type is None"), }, _ => { ref_parser.transition(RefParserState::Resetting); } }, RefParserState::Resetting => panic!("Reached Resetting state, but it should have been handled prior to this match block"), } } if !buffer.is_empty() { events.append(&mut buffer); } Ok(( frontmatter, events.into_iter().map(event_to_owned).collect(), )) } // Generate markdown elements for a file that is embedded within another note. // // - If the file being embedded is a note, it's content is included at the point of embed. // - If the file is an image, an image tag is generated. // - For other types of file, a regular link is created instead. fn embed_file<'b>( &self, link_text: &'a str, context: &'a Context, ) -> Result<MarkdownEvents<'b>> { let note_ref = ObsidianNoteReference::from_str(link_text); let path = match note_ref.file { Some(file) => lookup_filename_in_vault(file, &self.vault_contents.as_ref().unwrap()), // If we have None file it is either to a section or id within the same file and thus // the current embed logic will fail, recurssing until it reaches it's limit. // For now we just bail early. None => return Ok(self.make_link_to_file(note_ref, &context)), }; if path.is_none() { // TODO: Extract into configurable function. eprintln!( "Warning: Unable to find embedded note\n\tReference: '{}'\n\tSource: '{}'\n", note_ref .file .unwrap_or_else(|| context.current_file().to_str().unwrap()), context.current_file().display(), ); return Ok(vec![]); } let path = path.unwrap(); let child_context = Context::from_parent(context, path); let no_ext = OsString::new(); if !self.process_embeds_recursively && context.file_tree().contains(path) { return Ok([ vec![Event::Text(CowStr::Borrowed("→ "))], self.make_link_to_file(note_ref, &child_context), ] .concat()); } let events = match path.extension().unwrap_or(&no_ext).to_str() { Some("md") => { let (_frontmatter, mut events) = self.parse_obsidian_note(&path, &child_context)?; if let Some(section) = note_ref.section { events = reduce_to_section(events, section); } events } Some("png") | Some("jpg") | Some("jpeg") | Some("gif") | Some("webp") => { self.make_link_to_file(note_ref, &child_context) .into_iter() .map(|event| match event { // make_link_to_file returns a link to a file. With this we turn the link // into an image reference instead. Slightly hacky, but avoids needing // to keep another utility function around for this, or introducing an // extra parameter on make_link_to_file. Event::Start(Tag::Link(linktype, cowstr1, cowstr2)) => { Event::Start(Tag::Image( linktype, CowStr::from(cowstr1.into_string()), CowStr::from(cowstr2.into_string()), )) } Event::End(Tag::Link(linktype, cowstr1, cowstr2)) => { Event::End(Tag::Image( linktype, CowStr::from(cowstr1.into_string()), CowStr::from(cowstr2.into_string()), )) } _ => event, }) .collect() } _ => self.make_link_to_file(note_ref, &child_context), }; Ok(events) } fn make_link_to_file<'b, 'c>( &self, reference: ObsidianNoteReference<'b>, context: &Context, ) -> MarkdownEvents<'c> { let target_file = reference .file .map(|file| lookup_filename_in_vault(file, &self.vault_contents.as_ref().unwrap())) .unwrap_or_else(|| Some(context.current_file())); if target_file.is_none() { // TODO: Extract into configurable function. eprintln!( "Warning: Unable to find referenced note\n\tReference: '{}'\n\tSource: '{}'\n", reference .file .unwrap_or_else(|| context.current_file().to_str().unwrap()), context.current_file().display(), ); return vec![ Event::Start(Tag::Emphasis), Event::Text(CowStr::from(reference.display())), Event::End(Tag::Emphasis), ]; } let target_file = target_file.unwrap(); // We use root_file() rather than current_file() here to make sure links are always // relative to the outer-most note, which is the note which this content is inserted into // in case of embedded notes. let rel_link = diff_paths( target_file, &context .root_file() .parent() .expect("obsidian content files should always have a parent"), ) .expect("should be able to build relative path when target file is found in vault"); let rel_link = rel_link.to_string_lossy(); let mut link = utf8_percent_encode(&rel_link, PERCENTENCODE_CHARS).to_string(); if let Some(section) = reference.section { link.push('#'); link.push_str(&slugify(section)); } let link_tag = pulldown_cmark::Tag::Link( pulldown_cmark::LinkType::Inline, CowStr::from(link), CowStr::from(""), ); vec![ Event::Start(link_tag.clone()), Event::Text(CowStr::from(reference.display())), Event::End(link_tag.clone()), ] } } fn lookup_filename_in_vault<'a>( filename: &str, vault_contents: &'a [PathBuf], ) -> Option<&'a PathBuf> { // Markdown files don't have their .md extension added by Obsidian, but other files (images, // PDFs, etc) do so we match on both possibilities. // // References can also refer to notes in a different case (to lowercase text in a // sentence even if the note is capitalized for example) so we also try a case-insensitive // lookup. vault_contents.iter().find(|path| { let path_lowered = PathBuf::from(path.to_string_lossy().to_lowercase()); path.ends_with(&filename) || path_lowered.ends_with(&filename.to_lowercase()) || path.ends_with(format!("{}.md", &filename)) || path_lowered.ends_with(format!("{}.md", &filename.to_lowercase())) }) } fn render_mdevents_to_mdtext(markdown: MarkdownEvents) -> String { let mut buffer = String::new(); cmark_with_options( markdown.iter(), &mut buffer, None, pulldown_cmark_to_cmark::Options::default(), ) .expect("formatting to string not expected to fail"); buffer.push('\n'); buffer } fn create_file(dest: &Path) -> Result<File> { let file = File::create(&dest) .or_else(|err| { if err.kind() == ErrorKind::NotFound { let parent = dest.parent().expect("file should have a parent directory"); if let Err(err) = std::fs::create_dir_all(&parent) { return Err(err); } } File::create(&dest) }) .context(WriteError { path: dest })?; Ok(file) } fn copy_file(src: &Path, dest: &Path) -> Result<()> { std::fs::copy(&src, &dest) .or_else(|err| { if err.kind() == ErrorKind::NotFound { let parent = dest.parent().expect("file should have a parent directory"); if let Err(err) = std::fs::create_dir_all(&parent) { return Err(err); } } std::fs::copy(&src, &dest) }) .context(WriteError { path: dest })?; Ok(()) } fn is_markdown_file(file: &Path) -> bool { let no_ext = OsString::new(); let ext = file.extension().unwrap_or(&no_ext).to_string_lossy(); ext == "md" } /// Reduce a given `MarkdownEvents` to just those elements which are children of the given section /// (heading name). fn reduce_to_section<'a, 'b>(events: MarkdownEvents<'a>, section: &'b str) -> MarkdownEvents<'a> { let mut filtered_events = Vec::with_capacity(events.len()); let mut target_section_encountered = false; let mut currently_in_target_section = false; let mut section_level = 0; let mut last_level = 0; let mut last_tag_was_heading = false; for event in events.into_iter() { filtered_events.push(event.clone()); match event { Event::Start(Tag::Heading(level)) => { last_tag_was_heading = true; last_level = level; if currently_in_target_section && level <= section_level { currently_in_target_section = false; filtered_events.pop(); } } Event::Text(cowstr) => { if !last_tag_was_heading { last_tag_was_heading = false; continue; } last_tag_was_heading = false; if cowstr.to_string().to_lowercase() == section.to_lowercase() { target_section_encountered = true; currently_in_target_section = true; section_level = last_level; let current_event = filtered_events.pop().unwrap(); let heading_start_event = filtered_events.pop().unwrap(); filtered_events.clear(); filtered_events.push(heading_start_event); filtered_events.push(current_event); } } _ => {} } if target_section_encountered && !currently_in_target_section { return filtered_events; } } filtered_events } fn event_to_owned<'a>(event: Event) -> Event<'a> { match event { Event::Start(tag) => Event::Start(tag_to_owned(tag)), Event::End(tag) => Event::End(tag_to_owned(tag)), Event::Text(cowstr) => Event::Text(CowStr::from(cowstr.into_string())), Event::Code(cowstr) => Event::Code(CowStr::from(cowstr.into_string())), Event::Html(cowstr) => Event::Html(CowStr::from(cowstr.into_string())), Event::FootnoteReference(cowstr) => { Event::FootnoteReference(CowStr::from(cowstr.into_string())) } Event::SoftBreak => Event::SoftBreak, Event::HardBreak => Event::HardBreak, Event::Rule => Event::Rule, Event::TaskListMarker(checked) => Event::TaskListMarker(checked), } } fn tag_to_owned<'a>(tag: Tag) -> Tag<'a> { match tag { Tag::Paragraph => Tag::Paragraph, Tag::Heading(level) => Tag::Heading(level), Tag::BlockQuote => Tag::BlockQuote, Tag::CodeBlock(codeblock_kind) => Tag::CodeBlock(codeblock_kind_to_owned(codeblock_kind)), Tag::List(optional) => Tag::List(optional), Tag::Item => Tag::Item, Tag::FootnoteDefinition(cowstr) => { Tag::FootnoteDefinition(CowStr::from(cowstr.into_string())) } Tag::Table(alignment_vector) => Tag::Table(alignment_vector), Tag::TableHead => Tag::TableHead, Tag::TableRow => Tag::TableRow, Tag::TableCell => Tag::TableCell, Tag::Emphasis => Tag::Emphasis, Tag::Strong => Tag::Strong, Tag::Strikethrough => Tag::Strikethrough, Tag::Link(linktype, cowstr1, cowstr2) => Tag::Link( linktype, CowStr::from(cowstr1.into_string()), CowStr::from(cowstr2.into_string()), ), Tag::Image(linktype, cowstr1, cowstr2) => Tag::Image( linktype, CowStr::from(cowstr1.into_string()), CowStr::from(cowstr2.into_string()), ), } } fn codeblock_kind_to_owned<'a>(codeblock_kind: CodeBlockKind) -> CodeBlockKind<'a> { match codeblock_kind { CodeBlockKind::Indented => CodeBlockKind::Indented, CodeBlockKind::Fenced(cowstr) => CodeBlockKind::Fenced(CowStr::from(cowstr.into_string())), } }
40.036101
138
0.566757
d9ec5a5fb00b92293c68ec87063cd08f586aa34d
16,160
//! ACME supports. //! //! Reference: <https://datatracker.ietf.org/doc/html/rfc8555> //! Reference: <https://datatracker.ietf.org/doc/html/rfc8737> //! //! * HTTP-01 //! //! # Example //! //! ```no_run //! use salvo_core::listener::{AcmeListener, TcpListener}; //! use salvo_core::prelude::*; //! //! #[fn_handler] //! async fn hello_world() -> &'static str { //! "Hello World" //! } //! //! #[tokio::main] //! async fn main() { //! let mut router = Router::new().get(hello_world); //! let listener = AcmeListener::builder() //! // .directory("letsencrypt", salvo::listener::acme::LETS_ENCRYPT_STAGING) //! .cache_path("acme/letsencrypt") //! .add_domain("acme-http01.salvo.rs") //! .http01_challege(&mut router) //! .bind("0.0.0.0:443") //! .await; //! tracing::info!("Listening on https://0.0.0.0:443"); //! Server::new(listener.join(TcpListener::bind("0.0.0.0:80"))) //! .serve(router) //! .await; //! } //! ``` //! //! * TLS ALPN-01 //! //! # Example //! //! ```no_run //! use salvo_core::listener::AcmeListener; //! use salvo_core::prelude::*; //! //! #[fn_handler] //! async fn hello_world() -> &'static str { //! "Hello World" //! } //! //! #[tokio::main] //! async fn main() { //! let router = Router::new().get(hello_world); //! let listener = AcmeListener::builder() //! // .directory("letsencrypt", salvo::listener::acme::LETS_ENCRYPT_STAGING) //! .cache_path("acme/letsencrypt") //! .add_domain("acme-tls-alpn01.salvo.rs") //! .bind("0.0.0.0:443") //! .await; //! tracing::info!("Listening on https://0.0.0.0:443"); //! Server::new(listener).serve(router).await; //! } //! ``` pub mod cache; mod client; mod config; mod issuer; mod jose; mod key_pair; mod resolver; use std::collections::{HashMap, HashSet}; use std::fmt::{self, Display, Formatter}; use std::io::{self, Error as IoError, Result as IoResult}; use std::path::PathBuf; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::{Context, Poll}; use std::time::Duration; use async_trait::async_trait; use client::AcmeClient; use futures_util::ready; use futures_util::Future; use hyper::server::accept::Accept; use hyper::server::conn::{AddrIncoming, AddrStream}; use parking_lot::RwLock; use resolver::{ResolveServerCert, ACME_TLS_ALPN_NAME}; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_rustls::rustls::server::ServerConfig; use tokio_rustls::rustls::sign::{any_ecdsa_type, CertifiedKey}; use tokio_rustls::rustls::PrivateKey; use crate::addr::SocketAddr; use crate::http::StatusError; use crate::listener::{IntoAddrIncoming, Listener}; use crate::routing::FlowCtrl; use crate::transport::Transport; use crate::{Depot, Handler, Request, Response, Router}; use cache::AcmeCache; pub use config::{AcmeConfig, AcmeConfigBuilder}; /// Letsencrypt production directory url pub const LETS_ENCRYPT_PRODUCTION: &str = "https://acme-v02.api.letsencrypt.org/directory"; /// Letsencrypt stagging directory url pub const LETS_ENCRYPT_STAGING: &str = "https://acme-staging-v02.api.letsencrypt.org/directory"; /// Well known acme challenge path pub(crate) const WELL_KNOWN_PATH: &str = "/.well-known/acme-challenge"; /// HTTP-01 challenge const CHALLENGE_TYPE_HTTP_01: &str = "http-01"; /// TLS-ALPN-01 challenge const CHALLENGE_TYPE_TLS_ALPN_01: &str = "tls-alpn-01"; /// Challenge type #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum ChallengeType { /// HTTP-01 challenge /// /// Reference: <https://letsencrypt.org/docs/challenge-types/#http-01-challenge> Http01, /// TLS-ALPN-01 /// /// Reference: <https://letsencrypt.org/docs/challenge-types/#tls-alpn-01> TlsAlpn01, } impl Display for ChallengeType { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { ChallengeType::Http01 => f.write_str(CHALLENGE_TYPE_HTTP_01), ChallengeType::TlsAlpn01 => f.write_str(CHALLENGE_TYPE_TLS_ALPN_01), } } } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Directory { pub(crate) new_nonce: String, pub(crate) new_account: String, pub(crate) new_order: String, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Identifier { #[serde(rename = "type")] pub(crate) kind: String, pub(crate) value: String, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Problem { pub(crate) detail: String, } #[derive(Debug, Deserialize)] pub(crate) struct Challenge { #[serde(rename = "type")] pub(crate) kind: String, pub(crate) url: String, pub(crate) token: String, } /// Handler for `HTTP-01` challenge. pub(crate) struct Http01Handler { pub(crate) keys: Arc<RwLock<HashMap<String, String>>>, } #[async_trait] impl Handler for Http01Handler { #[inline] async fn handle(&self, req: &mut Request, _depot: &mut Depot, res: &mut Response, _ctrl: &mut FlowCtrl) { if let Some(token) = req.params().get("token") { let keys = self.keys.read(); if let Some(value) = keys.get(token) { res.render(value); } else { tracing::error!(token = %token, "keys not found for token"); res.render(token); } } else { res.set_status_error(StatusError::not_found().with_summary("token is not provide")); } } } /// A wrapper around an underlying listener which implements the ACME. pub struct AcmeListener { incoming: AddrIncoming, server_config: Arc<ServerConfig>, } impl AcmeListener { /// Create `AcmeListenerBuilder` pub fn builder() -> AcmeListenerBuilder { AcmeListenerBuilder::new() } } /// AcmeListenerBuilder pub struct AcmeListenerBuilder { config_builder: AcmeConfigBuilder, check_duration: Duration, } impl AcmeListenerBuilder { #[inline] fn new() -> Self { let config_builder = AcmeConfig::builder(); Self { config_builder, check_duration: Duration::from_secs(10 * 60), } } /// Sets the directory. /// /// Defaults to lets encrypt. #[inline] pub fn get_directory(self, name: impl Into<String>, url: impl Into<String>) -> Self { Self { config_builder: self.config_builder.directory(name, url), ..self } } /// Set domains. #[inline] pub fn domains(self, domains: impl Into<HashSet<String>>) -> Self { Self { config_builder: self.config_builder.domains(domains), ..self } } /// Add a domain. #[inline] pub fn add_domain(self, domain: impl Into<String>) -> Self { Self { config_builder: self.config_builder.add_domain(domain), ..self } } /// Add contact emails for the ACME account. #[inline] pub fn contacts(self, contacts: impl Into<HashSet<String>>) -> Self { Self { config_builder: self.config_builder.contacts(contacts.into()), ..self } } /// Add a contact email for the ACME account. #[inline] pub fn add_contact(self, contact: impl Into<String>) -> Self { Self { config_builder: self.config_builder.add_contact(contact.into()), ..self } } /// Create an handler for HTTP-01 challenge #[inline] pub fn http01_challege(self, router: &mut Router) -> Self { let config_builder = self.config_builder.http01_challege(); if let Some(keys_for_http01) = &config_builder.keys_for_http01 { let handler = Http01Handler { keys: keys_for_http01.clone(), }; router .routers .push(Router::with_path(format!("{}/<token>", WELL_KNOWN_PATH)).handle(handler)); } else { panic!("`HTTP-01` challage's key should not none"); } Self { config_builder, ..self } } /// Create an handler for HTTP-01 challenge #[inline] pub fn tls_alpn01_challege(self) -> Self { Self { config_builder: self.config_builder.tls_alpn01_challege(), ..self } } /// Sets the cache path for caching certificates. /// /// This is not a necessary option. If you do not configure the cache path, /// the obtained certificate will be stored in memory and will need to be /// obtained again when the server is restarted next time. #[inline] pub fn cache_path(self, path: impl Into<PathBuf>) -> Self { Self { config_builder: self.config_builder.cache_path(path), ..self } } /// Consumes this builder and returns a [`AcmeListener`] object. #[inline] pub async fn bind(self, incoming: impl IntoAddrIncoming) -> AcmeListener { self.try_bind(incoming).await.unwrap() } /// Consumes this builder and returns a [`Result<AcmeListener, std::IoError>`] object. pub async fn try_bind(self, incoming: impl IntoAddrIncoming) -> IoResult<AcmeListener> { let Self { config_builder, check_duration, } = self; let acme_config = config_builder.build()?; let mut client = AcmeClient::try_new( &acme_config.directory_url, acme_config.key_pair.clone(), acme_config.contacts.clone(), ) .await?; let mut cached_pkey = None; let mut cached_cert = None; if let Some(cache_path) = &acme_config.cache_path { let pkey_data = cache_path .read_pkey(&acme_config.directory_name, &acme_config.domains) .await?; if let Some(pkey_data) = pkey_data { tracing::debug!("load private key from cache"); match rustls_pemfile::pkcs8_private_keys(&mut pkey_data.as_slice()) { Ok(pkey) => cached_pkey = pkey.into_iter().next(), Err(err) => { tracing::warn!("failed to parse cached private key: {}", err) } }; } let cert_data = cache_path .read_cert(&acme_config.directory_name, &acme_config.domains) .await?; if let Some(cert_data) = cert_data { tracing::debug!("load certificate from cache"); match rustls_pemfile::certs(&mut cert_data.as_slice()) { Ok(cert) => cached_cert = Some(cert), Err(err) => { tracing::warn!("failed to parse cached tls certificates: {}", err) } }; } }; let cert_resolver = Arc::new(ResolveServerCert::default()); if let (Some(cached_cert), Some(cached_pkey)) = (cached_cert, cached_pkey) { let certs = cached_cert .into_iter() .map(tokio_rustls::rustls::Certificate) .collect::<Vec<_>>(); tracing::debug!("using cached tls certificates"); *cert_resolver.cert.write() = Some(Arc::new(CertifiedKey::new( certs, any_ecdsa_type(&PrivateKey(cached_pkey)).unwrap(), ))); } let weak_cert_resolver = Arc::downgrade(&cert_resolver); let mut server_config = ServerConfig::builder() .with_safe_defaults() .with_no_client_auth() .with_cert_resolver(cert_resolver); server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; if acme_config.challenge_type == ChallengeType::TlsAlpn01 { server_config.alpn_protocols.push(ACME_TLS_ALPN_NAME.to_vec()); } let listener = AcmeListener { incoming: incoming.into_incoming(), server_config: Arc::new(server_config), }; tokio::spawn(async move { while let Some(cert_resolver) = Weak::upgrade(&weak_cert_resolver) { if cert_resolver.will_expired(acme_config.before_expired) { if let Err(err) = issuer::issue_cert(&mut client, &acme_config, &cert_resolver).await { tracing::error!(error = %err, "failed to issue certificate"); } } tokio::time::sleep(check_duration).await; } }); Ok(listener) } } impl Listener for AcmeListener {} #[async_trait::async_trait] impl Accept for AcmeListener { type Conn = AcmeStream; type Error = IoError; #[inline] fn poll_accept(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<Self::Conn, Self::Error>>> { let this = self.get_mut(); match ready!(Pin::new(&mut this.incoming).poll_accept(cx)) { Some(Ok(sock)) => Poll::Ready(Some(Ok(AcmeStream::new(sock, this.server_config.clone())))), Some(Err(e)) => Poll::Ready(Some(Err(e))), None => Poll::Ready(None), } } } enum AcmeState { Handshaking(tokio_rustls::Accept<AddrStream>), Streaming(tokio_rustls::server::TlsStream<AddrStream>), } /// tokio_rustls::server::TlsStream doesn't expose constructor methods, /// so we have to TlsAcceptor::accept and handshake to have access to it /// AcmeStream implements AsyncRead/AsyncWrite handshaking tokio_rustls::Accept first pub struct AcmeStream { state: AcmeState, remote_addr: SocketAddr, } impl Transport for AcmeStream { #[inline] fn remote_addr(&self) -> Option<SocketAddr> { Some(self.remote_addr.clone()) } } impl AcmeStream { #[inline] fn new(stream: AddrStream, config: Arc<ServerConfig>) -> Self { let remote_addr = stream.remote_addr(); let accept = tokio_rustls::TlsAcceptor::from(config).accept(stream); AcmeStream { state: AcmeState::Handshaking(accept), remote_addr: remote_addr.into(), } } } impl AsyncRead for AcmeStream { #[inline] fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf) -> Poll<io::Result<()>> { let pin = self.get_mut(); match pin.state { AcmeState::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) { Ok(mut stream) => { let result = Pin::new(&mut stream).poll_read(cx, buf); pin.state = AcmeState::Streaming(stream); result } Err(err) => Poll::Ready(Err(err)), }, AcmeState::Streaming(ref mut stream) => Pin::new(stream).poll_read(cx, buf), } } } impl AsyncWrite for AcmeStream { #[inline] fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> { let pin = self.get_mut(); match pin.state { AcmeState::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) { Ok(mut stream) => { let result = Pin::new(&mut stream).poll_write(cx, buf); pin.state = AcmeState::Streaming(stream); result } Err(err) => Poll::Ready(Err(err)), }, AcmeState::Streaming(ref mut stream) => Pin::new(stream).poll_write(cx, buf), } } #[inline] fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { match self.state { AcmeState::Handshaking(_) => Poll::Ready(Ok(())), AcmeState::Streaming(ref mut stream) => Pin::new(stream).poll_flush(cx), } } #[inline] fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { match self.state { AcmeState::Handshaking(_) => Poll::Ready(Ok(())), AcmeState::Streaming(ref mut stream) => Pin::new(stream).poll_shutdown(cx), } } }
32.646465
113
0.591213
4a01370aefbdcaa7e0896f2538b50449438d824f
129,350
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; pub mod domains { use crate::models::*; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, ) -> std::result::Result<Domain, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Domain = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, domain_info: &Domain, ) -> std::result::Result<Domain, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(domain_info).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Domain = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, domain_update_parameters: &DomainUpdateParameters, ) -> std::result::Result<Domain, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(domain_update_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Domain = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, filter: Option<&str>, top: Option<i64>, ) -> std::result::Result<DomainsListResult, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/domains", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_subscription::Error::DefaultResponse { status_code }), } } pub mod list_by_subscription { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, filter: Option<&str>, top: Option<i64>, ) -> std::result::Result<DomainsListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_resource_group::Error::DefaultResponse { status_code }), } } pub mod list_by_resource_group { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_shared_access_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, ) -> std::result::Result<DomainSharedAccessKeys, list_shared_access_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/listKeys", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(list_shared_access_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_shared_access_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_shared_access_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_shared_access_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainSharedAccessKeys = serde_json::from_slice(rsp_body) .map_err(|source| list_shared_access_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_shared_access_keys::Error::DefaultResponse { status_code }), } } pub mod list_shared_access_keys { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn regenerate_key( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, regenerate_key_request: &DomainRegenerateKeyRequest, ) -> std::result::Result<DomainSharedAccessKeys, regenerate_key::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/regenerateKey", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(regenerate_key::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(regenerate_key::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(regenerate_key_request).map_err(regenerate_key::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(regenerate_key::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(regenerate_key::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainSharedAccessKeys = serde_json::from_slice(rsp_body).map_err(|source| regenerate_key::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(regenerate_key::Error::DefaultResponse { status_code }), } } pub mod regenerate_key { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod domain_topics { use crate::models::*; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, domain_topic_name: &str, ) -> std::result::Result<DomainTopic, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name, domain_topic_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainTopic = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, domain_topic_name: &str, ) -> std::result::Result<DomainTopic, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name, domain_topic_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: DomainTopic = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, domain_topic_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, domain_name, domain_topic_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_domain( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, filter: Option<&str>, top: Option<i64>, ) -> std::result::Result<DomainTopicsListResult, list_by_domain::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/topics", operation_config.base_path(), subscription_id, resource_group_name, domain_name ); let mut url = url::Url::parse(url_str).map_err(list_by_domain::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_domain::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_domain::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_domain::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DomainTopicsListResult = serde_json::from_slice(rsp_body).map_err(|source| list_by_domain::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_domain::Error::DefaultResponse { status_code }), } } pub mod list_by_domain { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod event_subscriptions { use crate::models::*; pub async fn get( operation_config: &crate::OperationConfig, scope: &str, event_subscription_name: &str, ) -> std::result::Result<EventSubscription, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.EventGrid/eventSubscriptions/{}", operation_config.base_path(), scope, event_subscription_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscription = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, scope: &str, event_subscription_name: &str, event_subscription_info: &EventSubscription, ) -> std::result::Result<EventSubscription, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.EventGrid/eventSubscriptions/{}", operation_config.base_path(), scope, event_subscription_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(event_subscription_info).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: EventSubscription = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, scope: &str, event_subscription_name: &str, event_subscription_update_parameters: &EventSubscriptionUpdateParameters, ) -> std::result::Result<EventSubscription, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.EventGrid/eventSubscriptions/{}", operation_config.base_path(), scope, event_subscription_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(event_subscription_update_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: EventSubscription = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, scope: &str, event_subscription_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.EventGrid/eventSubscriptions/{}", operation_config.base_path(), scope, event_subscription_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_full_url( operation_config: &crate::OperationConfig, scope: &str, event_subscription_name: &str, ) -> std::result::Result<EventSubscriptionFullUrl, get_full_url::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.EventGrid/eventSubscriptions/{}/getFullUrl", operation_config.base_path(), scope, event_subscription_name ); let mut url = url::Url::parse(url_str).map_err(get_full_url::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_full_url::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_full_url::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_full_url::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionFullUrl = serde_json::from_slice(rsp_body).map_err(|source| get_full_url::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get_full_url::Error::DefaultResponse { status_code }), } } pub mod get_full_url { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_global_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_global_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/eventSubscriptions", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_global_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_global_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_global_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_global_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_global_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_global_by_subscription::Error::DefaultResponse { status_code }), } } pub mod list_global_by_subscription { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_global_by_subscription_for_topic_type( operation_config: &crate::OperationConfig, subscription_id: &str, topic_type_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_global_by_subscription_for_topic_type::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/topicTypes/{}/eventSubscriptions", operation_config.base_path(), subscription_id, topic_type_name ); let mut url = url::Url::parse(url_str).map_err(list_global_by_subscription_for_topic_type::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_global_by_subscription_for_topic_type::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_global_by_subscription_for_topic_type::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_global_by_subscription_for_topic_type::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_global_by_subscription_for_topic_type::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_global_by_subscription_for_topic_type::Error::DefaultResponse { status_code }), } } pub mod list_global_by_subscription_for_topic_type { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_global_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_global_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/eventSubscriptions", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_global_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_global_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_global_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_global_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_global_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_global_by_resource_group::Error::DefaultResponse { status_code }), } } pub mod list_global_by_resource_group { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_global_by_resource_group_for_topic_type( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_type_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_global_by_resource_group_for_topic_type::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topicTypes/{}/eventSubscriptions", operation_config.base_path(), subscription_id, resource_group_name, topic_type_name ); let mut url = url::Url::parse(url_str).map_err(list_global_by_resource_group_for_topic_type::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_global_by_resource_group_for_topic_type::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_global_by_resource_group_for_topic_type::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_global_by_resource_group_for_topic_type::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_global_by_resource_group_for_topic_type::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_global_by_resource_group_for_topic_type::Error::DefaultResponse { status_code }), } } pub mod list_global_by_resource_group_for_topic_type { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_regional_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_regional_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/locations/{}/eventSubscriptions", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(list_regional_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_regional_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_regional_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_regional_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_regional_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_regional_by_subscription::Error::DefaultResponse { status_code }), } } pub mod list_regional_by_subscription { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_regional_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, location: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_regional_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/locations/{}/eventSubscriptions", operation_config.base_path(), subscription_id, resource_group_name, location ); let mut url = url::Url::parse(url_str).map_err(list_regional_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_regional_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_regional_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_regional_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_regional_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_regional_by_resource_group::Error::DefaultResponse { status_code }), } } pub mod list_regional_by_resource_group { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_regional_by_subscription_for_topic_type( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, topic_type_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_regional_by_subscription_for_topic_type::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/locations/{}/topicTypes/{}/eventSubscriptions", operation_config.base_path(), subscription_id, location, topic_type_name ); let mut url = url::Url::parse(url_str).map_err(list_regional_by_subscription_for_topic_type::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_regional_by_subscription_for_topic_type::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_regional_by_subscription_for_topic_type::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_regional_by_subscription_for_topic_type::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_regional_by_subscription_for_topic_type::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_regional_by_subscription_for_topic_type::Error::DefaultResponse { status_code }), } } pub mod list_regional_by_subscription_for_topic_type { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_regional_by_resource_group_for_topic_type( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, location: &str, topic_type_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_regional_by_resource_group_for_topic_type::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/locations/{}/topicTypes/{}/eventSubscriptions", operation_config.base_path(), subscription_id, resource_group_name, location, topic_type_name ); let mut url = url::Url::parse(url_str).map_err(list_regional_by_resource_group_for_topic_type::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_regional_by_resource_group_for_topic_type::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_regional_by_resource_group_for_topic_type::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_regional_by_resource_group_for_topic_type::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_regional_by_resource_group_for_topic_type::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_regional_by_resource_group_for_topic_type::Error::DefaultResponse { status_code }), } } pub mod list_regional_by_resource_group_for_topic_type { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, provider_namespace: &str, resource_type_name: &str, resource_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_by_resource::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/{}/{}/{}/providers/Microsoft.EventGrid/eventSubscriptions", operation_config.base_path(), subscription_id, resource_group_name, provider_namespace, resource_type_name, resource_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_resource::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_resource::Error::DefaultResponse { status_code }), } } pub mod list_by_resource { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_domain_topic( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, domain_name: &str, topic_name: &str, filter: Option<&str>, top: Option<i64>, label: Option<&str>, ) -> std::result::Result<EventSubscriptionsListResult, list_by_domain_topic::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/domains/{}/topics/{}/providers/Microsoft.EventGrid/eventSubscriptions" , operation_config . base_path () , subscription_id , resource_group_name , domain_name , topic_name) ; let mut url = url::Url::parse(url_str).map_err(list_by_domain_topic::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_domain_topic::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(label) = label { url.query_pairs_mut().append_pair("label", label); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_domain_topic::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_domain_topic::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventSubscriptionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_domain_topic::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_domain_topic::Error::DefaultResponse { status_code }), } } pub mod list_by_domain_topic { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod operations { use crate::models::*; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationsListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.EventGrid/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationsListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod topics { use crate::models::*; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, ) -> std::result::Result<Topic, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Topic = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, topic_info: &Topic, ) -> std::result::Result<Topic, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(topic_info).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Topic = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, topic_update_parameters: &TopicUpdateParameters, ) -> std::result::Result<Topic, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(topic_update_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Topic = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, filter: Option<&str>, top: Option<i64>, ) -> std::result::Result<TopicsListResult, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.EventGrid/topics", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_subscription::Error::DefaultResponse { status_code }), } } pub mod list_by_subscription { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, filter: Option<&str>, top: Option<i64>, ) -> std::result::Result<TopicsListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_resource_group::Error::DefaultResponse { status_code }), } } pub mod list_by_resource_group { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_shared_access_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, ) -> std::result::Result<TopicSharedAccessKeys, list_shared_access_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}/listKeys", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(list_shared_access_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_shared_access_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_shared_access_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_shared_access_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicSharedAccessKeys = serde_json::from_slice(rsp_body) .map_err(|source| list_shared_access_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_shared_access_keys::Error::DefaultResponse { status_code }), } } pub mod list_shared_access_keys { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn regenerate_key( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, topic_name: &str, regenerate_key_request: &TopicRegenerateKeyRequest, ) -> std::result::Result<TopicSharedAccessKeys, regenerate_key::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.EventGrid/topics/{}/regenerateKey", operation_config.base_path(), subscription_id, resource_group_name, topic_name ); let mut url = url::Url::parse(url_str).map_err(regenerate_key::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(regenerate_key::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(regenerate_key_request).map_err(regenerate_key::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(regenerate_key::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(regenerate_key::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicSharedAccessKeys = serde_json::from_slice(rsp_body).map_err(|source| regenerate_key::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(regenerate_key::Error::DefaultResponse { status_code }), } } pub mod regenerate_key { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_event_types( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, provider_namespace: &str, resource_type_name: &str, resource_name: &str, ) -> std::result::Result<EventTypesListResult, list_event_types::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/{}/{}/{}/providers/Microsoft.EventGrid/eventTypes", operation_config.base_path(), subscription_id, resource_group_name, provider_namespace, resource_type_name, resource_name ); let mut url = url::Url::parse(url_str).map_err(list_event_types::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_event_types::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_event_types::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_event_types::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventTypesListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_event_types::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_event_types::Error::DefaultResponse { status_code }), } } pub mod list_event_types { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod topic_types { use crate::models::*; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<TopicTypesListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.EventGrid/topicTypes", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicTypesListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get(operation_config: &crate::OperationConfig, topic_type_name: &str) -> std::result::Result<TopicTypeInfo, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.EventGrid/topicTypes/{}", operation_config.base_path(), topic_type_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TopicTypeInfo = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_event_types( operation_config: &crate::OperationConfig, topic_type_name: &str, ) -> std::result::Result<EventTypesListResult, list_event_types::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.EventGrid/topicTypes/{}/eventTypes", operation_config.base_path(), topic_type_name ); let mut url = url::Url::parse(url_str).map_err(list_event_types::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_event_types::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_event_types::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_event_types::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventTypesListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_event_types::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_event_types::Error::DefaultResponse { status_code }), } } pub mod list_event_types { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
49.578383
277
0.605528
09020080d662b38284b11ec10104575a52681ce0
4,662
use std::convert::{TryFrom, TryInto}; use std::mem; use std::sync::Arc; use std::time::{UNIX_EPOCH, Duration}; use anyhow::{anyhow, Result}; use flate2::{Compression, write::GzEncoder}; use log::{debug, error, warn}; use parking_lot::Mutex; use reqwest::{Client as HttpClient, Method, Request, Url}; use reqwest::header::{CONTENT_TYPE, CONTENT_ENCODING, HeaderMap, HeaderValue}; use serde_json::json; use tokio::time::interval; use crate::data::Record; use super::Args; pub struct NewRelicClient { sender: Arc<Sender>, } struct Sender { client: HttpClient, endpoint: Url, records: Mutex<Vec<Record>>, } impl NewRelicClient { pub fn new(args: Args) -> Result<Self> { let account = args.get("account")?; let key = args.get("key")?; let region = args.opt("region").unwrap_or("US"); let host = match region.to_ascii_uppercase().as_str() { "US" => "insights-collector.newrelic.com", "EU" => "insights-collector.eu01.nr-data.net", _ => return Err(anyhow!("invalid region: {}", region)), }; let endpoint = format!("https://{}/v1/accounts/{}/events", host, account); let endpoint = Url::parse(&endpoint)?; let mut headers = HeaderMap::new(); headers.insert("X-Insert-Key", HeaderValue::from_str(key)?); headers.insert(CONTENT_TYPE, "application/json".try_into()?); headers.insert(CONTENT_ENCODING, "gzip".try_into()?); let client = HttpClient::builder().default_headers(headers).build()?; let sender = Arc::new(Sender::new(client, endpoint)); let sender2 = sender.clone(); tokio::spawn(async move { match sender2.exec().await { Ok(()) => debug!("sender finished"), Err(e) => error!("sender failed: {:?}", e), } }); Ok(Self { sender }) } pub fn send(&self, record: Record) -> Result<()> { self.sender.push(record); Ok(()) } } impl Sender { fn new(client: HttpClient, endpoint: Url) -> Self { let records = Mutex::new(Vec::new()); Self { client, endpoint, records } } fn push(&self, record: Record) { self.records.lock().push(record); } fn drain(&self) -> Vec<Record> { let mut records = self.records.lock(); let empty = Vec::with_capacity(records.len()); mem::replace(&mut records, empty) } async fn exec(&self) -> Result<()> { let mut interval = interval(Duration::from_secs(10)); loop { interval.tick().await; let payload = self.drain().iter().map(|record| { let (id, name, image) = record.process.container.as_ref().map(|c| { (c.id.as_str(), c.name.as_str(), c.image.as_str()) }).unwrap_or_default(); let timestamp = record.timestamp.duration_since(UNIX_EPOCH)?; let timestamp = u64::try_from(timestamp.as_millis())?; Ok(json!({ "eventType": "ContainerVisibility", "timestamp": timestamp, "event": &record.event, "source.ip": record.src.ip(), "source.port": record.src.port(), "source.host": &record.hostname, "destination.ip": record.dst.ip(), "destination.port": record.dst.port(), "process.pid": record.process.pid, "process.cmd": &record.process.command.join(" "), "container.id": id, "container.name": name, "container.image": image, "bytes.rx": record.rx, "bytes.tx": record.tx, })) }).collect::<Result<Vec<_>>>()?; debug!("sending {} records", payload.len()); for chunk in payload.chunks(2000) { let mut e = GzEncoder::new(Vec::new(), Compression::default()); serde_json::to_writer(&mut e, chunk)?; let body = e.finish()?; let endpoint = self.endpoint.clone(); let mut req = Request::new(Method::POST, endpoint); *req.body_mut() = Some(body.into()); let res = self.client.execute(req).await?; if !res.status().is_success() { let body = res.text().await?; warn!("send failed: {}", body); } } } } }
34.279412
83
0.516088
6ae2ec5b52e4c7e337df1ebcef6a26cc8d98487c
24,914
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Encoding/decoding of messages in pgwire. See "[Frontend/Backend Protocol: //! Message Formats][1]" in the PostgreSQL reference for the specification. //! //! See the [crate docs](crate) for higher level concerns. //! //! [1]: https://www.postgresql.org/docs/11/protocol-message-formats.html use std::collections::HashMap; use std::convert::TryFrom; use std::error::Error; use std::fmt; use std::str; use async_trait::async_trait; use byteorder::{ByteOrder, NetworkEndian}; use bytes::{Buf, BufMut, BytesMut}; use futures::{sink, SinkExt, TryStreamExt}; use lazy_static::lazy_static; use log::trace; use prometheus::{register_uint_counter, UIntCounter}; use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, Interest, Ready}; use tokio_util::codec::{Decoder, Encoder, Framed}; use ore::cast::CastFrom; use ore::future::OreSinkExt; use ore::netio::{self, AsyncReady}; use crate::message::{ BackendMessage, ErrorResponse, FrontendMessage, FrontendStartupMessage, TransactionStatus, VERSION_CANCEL, VERSION_GSSENC, VERSION_SSL, }; use crate::server::Conn; lazy_static! { static ref BYTES_SENT: UIntCounter = register_uint_counter!( "mz_pg_sent_bytes", "total number of bytes sent to clients from pgwire" ) .unwrap(); } pub const REJECT_ENCRYPTION: u8 = b'N'; pub const ACCEPT_SSL_ENCRYPTION: u8 = b'S'; #[derive(Debug)] enum CodecError { StringNoTerminator, } impl Error for CodecError {} impl fmt::Display for CodecError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match self { CodecError::StringNoTerminator => "The string does not have a terminator", }) } } /// A connection that manages the encoding and decoding of pgwire frames. pub struct FramedConn<A> { conn_id: u32, inner: sink::Buffer<Framed<Conn<A>, Codec>, BackendMessage>, } impl<A> FramedConn<A> where A: AsyncRead + AsyncWrite + Unpin, { /// Constructs a new framed connection. /// /// The underlying connection, `inner`, is expected to be something like a /// TCP stream. Anything that implements [`AsyncRead`] and [`AsyncWrite`] /// will do. /// /// The supplied `conn_id` is used to identify the connection in logging /// messages. pub fn new(conn_id: u32, inner: Conn<A>) -> FramedConn<A> { FramedConn { conn_id, inner: Framed::new(inner, Codec::new()).buffer(32), } } /// Reads and decodes one frontend message from the client. /// /// Blocks until the client sends a complete message. If the client /// terminates the stream, returns `None`. Returns an error if the client /// sends a malformatted message or if the connection underlying is broken. pub async fn recv(&mut self) -> Result<Option<FrontendMessage>, io::Error> { let message = self.inner.try_next().await?; match &message { Some(message) => trace!("cid={} recv={:?}", self.conn_id, message), None => trace!("cid={} recv=<eof>", self.conn_id), } Ok(message) } /// Encodes and sends one backend message to the client. /// /// Note that the connection is not flushed after calling this method. You /// must call [`FramedConn::flush`] explicitly. Returns an error if the /// underlying connection is broken. pub async fn send(&mut self, message: BackendMessage) -> Result<(), io::Error> { trace!("cid={} send={:?}", self.conn_id, message); Ok(self.inner.enqueue(message).await?) } /// Encodes and sends the backend messages in the `messages` iterator to the /// client. /// /// As with [`FramedConn::send`], the connection is not flushed after /// calling this method. You must call [`FramedConn::flush`] explicitly. /// Returns an error if the underlying connection is broken. pub async fn send_all( &mut self, messages: impl IntoIterator<Item = BackendMessage>, ) -> Result<(), io::Error> { // N.B. we intentionally don't use `self.conn.send_all` here to avoid // flushing the sink unnecessarily. for m in messages { self.send(m).await?; } Ok(()) } /// Flushes all outstanding messages. pub async fn flush(&mut self) -> Result<(), io::Error> { self.inner.flush().await } /// Injects state that affects how certain backend messages are encoded. /// /// Specifically, the encoding of `BackendMessage::DataRow` depends upon the /// types of the datums in the row. To avoid including the same type /// information in each message, we use this side channel to install the /// type information in the codec before sending any data row messages. This /// violates the abstraction boundary a bit but results in much better /// performance. pub fn set_encode_state(&mut self, encode_state: Vec<(pgrepr::Type, pgrepr::Format)>) { self.inner.get_mut().codec_mut().encode_state = encode_state; } } impl<A> FramedConn<A> where A: AsyncRead + AsyncWrite + Unpin, { pub fn inner(&self) -> &Conn<A> { self.inner.get_ref().get_ref() } } #[async_trait] impl<A> AsyncReady for FramedConn<A> where A: AsyncRead + AsyncWrite + AsyncReady + Send + Sync + Unpin, { async fn ready(&self, interest: Interest) -> io::Result<Ready> { self.inner.get_ref().get_ref().ready(interest).await } } struct Codec { decode_state: DecodeState, encode_state: Vec<(pgrepr::Type, pgrepr::Format)>, } impl Codec { /// Creates a new `Codec`. pub fn new() -> Codec { Codec { decode_state: DecodeState::Head, encode_state: vec![], } } } impl Default for Codec { fn default() -> Codec { Codec::new() } } impl Encoder<BackendMessage> for Codec { type Error = io::Error; fn encode(&mut self, msg: BackendMessage, dst: &mut BytesMut) -> Result<(), io::Error> { // Write type byte. let byte = match &msg { BackendMessage::AuthenticationOk => b'R', BackendMessage::RowDescription(_) => b'T', BackendMessage::DataRow(_) => b'D', BackendMessage::CommandComplete { .. } => b'C', BackendMessage::EmptyQueryResponse => b'I', BackendMessage::ReadyForQuery(_) => b'Z', BackendMessage::NoData => b'n', BackendMessage::ParameterStatus(_, _) => b'S', BackendMessage::PortalSuspended => b's', BackendMessage::BackendKeyData { .. } => b'K', BackendMessage::ParameterDescription(_) => b't', BackendMessage::ParseComplete => b'1', BackendMessage::BindComplete => b'2', BackendMessage::CloseComplete => b'3', BackendMessage::ErrorResponse(r) => { if r.severity.is_error() { b'E' } else { b'N' } } BackendMessage::CopyOutResponse { .. } => b'H', BackendMessage::CopyData(_) => b'd', BackendMessage::CopyDone => b'c', }; dst.put_u8(byte); // Write message length placeholder. The true length is filled in later. let base = dst.len(); dst.put_u32(0); // Write message contents. match msg { BackendMessage::CopyOutResponse { overall_format, column_formats, } => { dst.put_format_i8(overall_format); dst.put_length_i16(column_formats.len())?; for format in column_formats { dst.put_format_i16(format); } } BackendMessage::CopyData(data) => { dst.put_slice(&data); } BackendMessage::CopyDone => (), BackendMessage::AuthenticationOk => { dst.put_u32(0); } BackendMessage::RowDescription(fields) => { dst.put_length_i16(fields.len())?; for f in &fields { dst.put_string(&f.name.to_string()); dst.put_u32(f.table_id); dst.put_u16(f.column_id); dst.put_u32(f.type_oid); dst.put_i16(f.type_len); dst.put_i32(f.type_mod); // TODO: make the format correct dst.put_format_i16(f.format); } } BackendMessage::DataRow(fields) => { dst.put_length_i16(fields.len())?; for (f, (ty, format)) in fields.iter().zip(&self.encode_state) { if let Some(f) = f { let base = dst.len(); dst.put_u32(0); f.encode(ty, *format, dst)?; let len = dst.len() - base - 4; let len = i32::try_from(len).map_err(|_| { io::Error::new( io::ErrorKind::Other, "length of encoded data row field does not fit into an i32", ) })?; dst[base..base + 4].copy_from_slice(&len.to_be_bytes()); } else { dst.put_i32(-1); } } } BackendMessage::CommandComplete { tag } => { dst.put_string(&tag); } BackendMessage::ParseComplete => (), BackendMessage::BindComplete => (), BackendMessage::CloseComplete => (), BackendMessage::EmptyQueryResponse => (), BackendMessage::ReadyForQuery(status) => { dst.put_u8(match status { TransactionStatus::Idle => b'I', TransactionStatus::InTransaction => b'T', TransactionStatus::Failed => b'E', }); } BackendMessage::ParameterStatus(name, value) => { dst.put_string(name); dst.put_string(&value); } BackendMessage::PortalSuspended => (), BackendMessage::NoData => (), BackendMessage::BackendKeyData { conn_id, secret_key, } => { dst.put_u32(conn_id); dst.put_u32(secret_key); } BackendMessage::ParameterDescription(params) => { dst.put_length_i16(params.len())?; for param in params { dst.put_u32(param.oid()); } } BackendMessage::ErrorResponse(ErrorResponse { severity, code, message, detail, hint, position, }) => { dst.put_u8(b'S'); dst.put_string(severity.as_str()); dst.put_u8(b'C'); dst.put_string(code.code()); dst.put_u8(b'M'); dst.put_string(&message); if let Some(detail) = &detail { dst.put_u8(b'D'); dst.put_string(detail); } if let Some(hint) = &hint { dst.put_u8(b'H'); dst.put_string(hint); } if let Some(position) = &position { dst.put_u8(b'P'); dst.put_string(&position.to_string()); } dst.put_u8(b'\0'); } } let len = dst.len() - base; // TODO: consider finding some way to not do this per-row BYTES_SENT.inc_by(u64::cast_from(dst.len() - base)); // Overwrite length placeholder with true length. let len = i32::try_from(len).map_err(|_| { io::Error::new( io::ErrorKind::Other, "length of encoded message does not fit into an i32", ) })?; dst[base..base + 4].copy_from_slice(&len.to_be_bytes()); Ok(()) } } trait Pgbuf: BufMut { fn put_string(&mut self, s: &str); fn put_length_i16(&mut self, len: usize) -> Result<(), io::Error>; fn put_format_i8(&mut self, format: pgrepr::Format); fn put_format_i16(&mut self, format: pgrepr::Format); } impl<B: BufMut> Pgbuf for B { fn put_string(&mut self, s: &str) { self.put(s.as_bytes()); self.put_u8(b'\0'); } fn put_length_i16(&mut self, len: usize) -> Result<(), io::Error> { let len = i16::try_from(len) .map_err(|_| io::Error::new(io::ErrorKind::Other, "length does not fit in an i16"))?; self.put_i16(len); Ok(()) } fn put_format_i8(&mut self, format: pgrepr::Format) { self.put_i8(match format { pgrepr::Format::Text => 0, pgrepr::Format::Binary => 1, }) } fn put_format_i16(&mut self, format: pgrepr::Format) { self.put_i8(0); self.put_format_i8(format); } } pub async fn decode_startup<A>(mut conn: A) -> Result<Option<FrontendStartupMessage>, io::Error> where A: AsyncRead + Unpin, { let mut frame_len = [0; 4]; let nread = netio::read_exact_or_eof(&mut conn, &mut frame_len).await?; match nread { // Complete frame length. Continue. 4 => (), // Connection closed cleanly. Indicate that the startup sequence has // been terminated by the client. 0 => return Ok(None), // Partial frame length. Likely a client bug or network glitch, so // surface the unexpected EOF. _ => return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "early eof")), }; let frame_len = parse_frame_len(&frame_len)?; let mut buf = BytesMut::new(); buf.resize(frame_len, b'0'); conn.read_exact(&mut buf).await?; let mut buf = Cursor::new(&buf); let version = buf.read_i32()?; let message = match version { VERSION_CANCEL => FrontendStartupMessage::CancelRequest { conn_id: buf.read_u32()?, secret_key: buf.read_u32()?, }, VERSION_SSL => FrontendStartupMessage::SslRequest, VERSION_GSSENC => FrontendStartupMessage::GssEncRequest, _ => { let mut params = HashMap::new(); while buf.peek_byte()? != 0 { let name = buf.read_cstr()?.to_owned(); let value = buf.read_cstr()?.to_owned(); params.insert(name, value); } FrontendStartupMessage::Startup { version, params } } }; Ok(Some(message)) } #[derive(Debug)] enum DecodeState { Head, Data(u8, usize), } fn parse_frame_len(src: &[u8]) -> Result<usize, io::Error> { let n = usize::cast_from(NetworkEndian::read_u32(src)); if n > netio::MAX_FRAME_SIZE { return Err(io::Error::new( io::ErrorKind::InvalidData, netio::FrameTooBig, )); } else if n < 4 { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid frame length", )); } Ok(n - 4) } impl Decoder for Codec { type Item = FrontendMessage; type Error = io::Error; fn decode(&mut self, src: &mut BytesMut) -> Result<Option<FrontendMessage>, io::Error> { loop { match self.decode_state { DecodeState::Head => { if src.len() < 5 { return Ok(None); } let msg_type = src[0]; let frame_len = parse_frame_len(&src[1..])?; src.advance(5); src.reserve(frame_len); self.decode_state = DecodeState::Data(msg_type, frame_len); } DecodeState::Data(msg_type, frame_len) => { if src.len() < frame_len { return Ok(None); } let buf = src.split_to(frame_len).freeze(); let buf = Cursor::new(&buf); let msg = match msg_type { // Simple query flow. b'Q' => decode_query(buf)?, // Extended query flow. b'P' => decode_parse(buf)?, b'D' => decode_describe(buf)?, b'B' => decode_bind(buf)?, b'E' => decode_execute(buf)?, b'H' => decode_flush(buf)?, b'S' => decode_sync(buf)?, b'C' => decode_close(buf)?, // Termination. b'X' => decode_terminate(buf)?, // Invalid. _ => { return Err(io::Error::new( io::ErrorKind::InvalidData, format!("unknown message type {}", msg_type), )); } }; src.reserve(5); self.decode_state = DecodeState::Head; return Ok(Some(msg)); } } } } } fn decode_terminate(mut _buf: Cursor) -> Result<FrontendMessage, io::Error> { // Nothing more to decode. Ok(FrontendMessage::Terminate) } fn decode_query(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { Ok(FrontendMessage::Query { sql: buf.read_cstr()?.to_string(), }) } fn decode_parse(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { let name = buf.read_cstr()?; let sql = buf.read_cstr()?; let mut param_types = vec![]; for _ in 0..buf.read_i16()? { param_types.push(buf.read_u32()?); } Ok(FrontendMessage::Parse { name: name.into(), sql: sql.into(), param_types, }) } fn decode_close(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { match buf.read_byte()? { b'S' => Ok(FrontendMessage::CloseStatement { name: buf.read_cstr()?.to_owned(), }), b'P' => Ok(FrontendMessage::ClosePortal { name: buf.read_cstr()?.to_owned(), }), b => Err(input_err(format!( "invalid type byte in close message: {}", b ))), } } fn decode_describe(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { let first_char = buf.read_byte()?; let name = buf.read_cstr()?.to_string(); match first_char { b'S' => Ok(FrontendMessage::DescribeStatement { name }), b'P' => Ok(FrontendMessage::DescribePortal { name }), other => Err(input_err(format!("Invalid describe type: {:#x?}", other))), } } fn decode_bind(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { let portal_name = buf.read_cstr()?.to_string(); let statement_name = buf.read_cstr()?.to_string(); let mut param_formats = Vec::new(); for _ in 0..buf.read_i16()? { param_formats.push(buf.read_format()?); } let mut raw_params = Vec::new(); for _ in 0..buf.read_i16()? { let len = buf.read_i32()?; if len == -1 { raw_params.push(None); // NULL } else { // TODO(benesch): this should use bytes::Bytes to avoid the copy. let mut value = Vec::new(); for _ in 0..len { value.push(buf.read_byte()?); } raw_params.push(Some(value)); } } let mut result_formats = Vec::new(); for _ in 0..buf.read_i16()? { result_formats.push(buf.read_format()?); } Ok(FrontendMessage::Bind { portal_name, statement_name, param_formats, raw_params, result_formats, }) } fn decode_execute(mut buf: Cursor) -> Result<FrontendMessage, io::Error> { let portal_name = buf.read_cstr()?.to_string(); let max_rows = buf.read_i32()?; Ok(FrontendMessage::Execute { portal_name, max_rows, }) } fn decode_flush(mut _buf: Cursor) -> Result<FrontendMessage, io::Error> { // Nothing more to decode. Ok(FrontendMessage::Flush) } fn decode_sync(mut _buf: Cursor) -> Result<FrontendMessage, io::Error> { // Nothing more to decode. Ok(FrontendMessage::Sync) } /// Decodes data within pgwire messages. /// /// The API provided is very similar to [`bytes::Buf`], but operations return /// errors rather than panicking. This is important for safety, as we don't want /// to crash if the user sends us malformatted pgwire messages. /// /// There are also some special-purpose methods, like [`Cursor::read_cstr`], /// that are specific to pgwire messages. #[derive(Debug)] struct Cursor<'a> { buf: &'a [u8], } impl<'a> Cursor<'a> { /// Constructs a new `Cursor` from a byte slice. The cursor will begin /// decoding from the beginning of the slice. fn new(buf: &'a [u8]) -> Cursor { Cursor { buf } } /// Returns the next byte without advancing the cursor. fn peek_byte(&self) -> Result<u8, io::Error> { self.buf .get(0) .copied() .ok_or_else(|| input_err("No byte to read")) } /// Returns the next byte, advancing the cursor by one byte. fn read_byte(&mut self) -> Result<u8, io::Error> { let byte = self.peek_byte()?; self.advance(1); Ok(byte) } /// Returns the next null-terminated string. The null character is not /// included the returned string. The cursor is advanced past the null- /// terminated string. /// /// If there is no null byte remaining in the string, returns /// `CodecError::StringNoTerminator`. If the string is not valid UTF-8, /// returns an `io::Error` with an error kind of /// `io::ErrorKind::InvalidInput`. /// /// NOTE(benesch): it is possible that returning a string here is wrong, and /// we should be returning bytes, so that we can support messages that are /// not UTF-8 encoded. At the moment, we've not discovered a need for this, /// though, and using proper strings is convenient. fn read_cstr(&mut self) -> Result<&'a str, io::Error> { if let Some(pos) = self.buf.iter().position(|b| *b == 0) { let val = std::str::from_utf8(&self.buf[..pos]).map_err(input_err)?; self.advance(pos + 1); Ok(val) } else { Err(input_err(CodecError::StringNoTerminator)) } } /// Reads the next 16-bit signed integer, advancing the cursor by two /// bytes. fn read_i16(&mut self) -> Result<i16, io::Error> { if self.buf.len() < 2 { return Err(input_err("not enough buffer for an Int16")); } let val = NetworkEndian::read_i16(self.buf); self.advance(2); Ok(val) } /// Reads the next 32-bit signed integer, advancing the cursor by four /// bytes. fn read_i32(&mut self) -> Result<i32, io::Error> { if self.buf.len() < 4 { return Err(input_err("not enough buffer for an Int32")); } let val = NetworkEndian::read_i32(self.buf); self.advance(4); Ok(val) } /// Reads the next 32-bit unsigned integer, advancing the cursor by four /// bytes. fn read_u32(&mut self) -> Result<u32, io::Error> { if self.buf.len() < 4 { return Err(input_err("not enough buffer for an Int32")); } let val = NetworkEndian::read_u32(self.buf); self.advance(4); Ok(val) } /// Reads the next 16-bit format code, advancing the cursor by two bytes. fn read_format(&mut self) -> Result<pgrepr::Format, io::Error> { match self.read_i16()? { 0 => Ok(pgrepr::Format::Text), 1 => Ok(pgrepr::Format::Binary), n => Err(input_err(format!("unknown format code: {}", n))), } } /// Advances the cursor by `n` bytes. fn advance(&mut self, n: usize) { self.buf = &self.buf[n..] } } /// Constructs an error indicating that the client has violated the pgwire /// protocol. fn input_err(source: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> io::Error { io::Error::new(io::ErrorKind::InvalidInput, source.into()) }
33.713126
97
0.546319
0ac7f69d2c5c40cf9fb364af1efa2eed3e58398a
31,496
use crate::{ arena::{Arena, Handle}, BinaryOperator, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner, UnaryOperator, }; #[derive(Debug)] pub struct ConstantSolver<'a> { pub types: &'a Arena<Type>, pub expressions: &'a Arena<Expression>, pub constants: &'a mut Arena<Constant>, } #[derive(Clone, Debug, PartialEq, thiserror::Error)] pub enum ConstantSolvingError { #[error("Constants cannot access function arguments")] FunctionArg, #[error("Constants cannot access global variables")] GlobalVariable, #[error("Constants cannot access local variables")] LocalVariable, #[error("Cannot get the array length of a non array type")] InvalidArrayLengthArg, #[error("Constants cannot get the array length of a dynamically sized array")] ArrayLengthDynamic, #[error("Constants cannot call functions")] Call, #[error("Constants don't support atomic functions")] Atomic, #[error("Constants don't support relational functions")] Relational, #[error("Constants don't support derivative functions")] Derivative, #[error("Constants don't support select expressions")] Select, #[error("Constants don't support load expressions")] Load, #[error("Constants don't support image expressions")] ImageExpression, #[error("Cannot access the type")] InvalidAccessBase, #[error("Cannot access at the index")] InvalidAccessIndex, #[error("Cannot access with index of type")] InvalidAccessIndexTy, #[error("Constants don't support bitcasts")] Bitcast, #[error("Cannot cast type")] InvalidCastArg, #[error("Cannot apply the unary op to the argument")] InvalidUnaryOpArg, #[error("Cannot apply the binary op to the arguments")] InvalidBinaryOpArgs, #[error("Cannot apply math function to type")] InvalidMathArg, #[error("Splat/swizzle type is not registered")] DestinationTypeNotFound, #[error("Not implemented: {0}")] NotImplemented(String), } impl<'a> ConstantSolver<'a> { pub fn solve( &mut self, expr: Handle<Expression>, ) -> Result<Handle<Constant>, ConstantSolvingError> { let span = self.expressions.get_span(expr).clone(); match self.expressions[expr] { Expression::Constant(constant) => Ok(constant), Expression::AccessIndex { base, index } => self.access(base, index as usize), Expression::Access { base, index } => { let index = self.solve(index)?; self.access(base, self.constant_index(index)?) } Expression::Splat { size, value: splat_value, } => { let value_constant = self.solve(splat_value)?; let ty = match self.constants[value_constant].inner { ConstantInner::Scalar { ref value, width } => { let kind = value.scalar_kind(); self.types .fetch_if(|t| t.inner == crate::TypeInner::Vector { size, kind, width }) } ConstantInner::Composite { .. } => None, }; //TODO: register the new type if needed let ty = ty.ok_or(ConstantSolvingError::DestinationTypeNotFound)?; Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty, components: vec![value_constant; size as usize], }, }, span, )) } Expression::Swizzle { size, vector: src_vector, pattern, } => { let src_constant = self.solve(src_vector)?; let (ty, src_components) = match self.constants[src_constant].inner { ConstantInner::Scalar { .. } => (None, &[][..]), ConstantInner::Composite { ty, components: ref src_components, } => match self.types[ty].inner { crate::TypeInner::Vector { size: _, kind, width, } => { let dst_ty = self.types.fetch_if(|t| { t.inner == crate::TypeInner::Vector { size, kind, width } }); (dst_ty, &src_components[..]) } _ => (None, &[][..]), }, }; //TODO: register the new type if needed let ty = ty.ok_or(ConstantSolvingError::DestinationTypeNotFound)?; let components = pattern .iter() .map(|&sc| src_components[sc as usize]) .collect(); Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty, components }, }, span, )) } Expression::Compose { ty, ref components } => { let components = components .iter() .map(|c| self.solve(*c)) .collect::<Result<_, _>>()?; Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty, components }, }, span, )) } Expression::Unary { expr, op } => { let expr_constant = self.solve(expr)?; self.unary_op(op, expr_constant, span) } Expression::Binary { left, right, op } => { let left_constant = self.solve(left)?; let right_constant = self.solve(right)?; self.binary_op(op, left_constant, right_constant, span) } Expression::Math { fun, arg, arg1, .. } => { let arg = self.solve(arg)?; let arg1 = arg1.map(|arg| self.solve(arg)).transpose()?; let const0 = &self.constants[arg].inner; let const1 = arg1.map(|arg| &self.constants[arg].inner); match fun { crate::MathFunction::Pow => { let (value, width) = match (const0, const1.unwrap()) { ( &ConstantInner::Scalar { width, value: value0, }, &ConstantInner::Scalar { value: value1, .. }, ) => ( match (value0, value1) { (ScalarValue::Sint(a), ScalarValue::Sint(b)) => { ScalarValue::Sint(a.pow(b as u32)) } (ScalarValue::Uint(a), ScalarValue::Uint(b)) => { ScalarValue::Uint(a.pow(b as u32)) } (ScalarValue::Float(a), ScalarValue::Float(b)) => { ScalarValue::Float(a.powf(b)) } _ => return Err(ConstantSolvingError::InvalidMathArg), }, width, ), _ => return Err(ConstantSolvingError::InvalidMathArg), }; Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width, value }, }, span, )) } _ => Err(ConstantSolvingError::NotImplemented(format!("{:?}", fun))), } } Expression::As { convert, expr, kind, } => { let expr_constant = self.solve(expr)?; match convert { Some(width) => self.cast(expr_constant, kind, width, span), None => Err(ConstantSolvingError::Bitcast), } } Expression::ArrayLength(expr) => { let array = self.solve(expr)?; match self.constants[array].inner { ConstantInner::Scalar { .. } => { Err(ConstantSolvingError::InvalidArrayLengthArg) } ConstantInner::Composite { ty, .. } => match self.types[ty].inner { TypeInner::Array { size, .. } => match size { crate::ArraySize::Constant(constant) => Ok(constant), crate::ArraySize::Dynamic => { Err(ConstantSolvingError::ArrayLengthDynamic) } }, _ => Err(ConstantSolvingError::InvalidArrayLengthArg), }, } } Expression::Load { .. } => Err(ConstantSolvingError::Load), Expression::Select { .. } => Err(ConstantSolvingError::Select), Expression::LocalVariable(_) => Err(ConstantSolvingError::LocalVariable), Expression::Derivative { .. } => Err(ConstantSolvingError::Derivative), Expression::Relational { .. } => Err(ConstantSolvingError::Relational), Expression::CallResult { .. } => Err(ConstantSolvingError::Call), Expression::AtomicResult { .. } => Err(ConstantSolvingError::Atomic), Expression::FunctionArgument(_) => Err(ConstantSolvingError::FunctionArg), Expression::GlobalVariable(_) => Err(ConstantSolvingError::GlobalVariable), Expression::ImageSample { .. } | Expression::ImageLoad { .. } | Expression::ImageQuery { .. } => Err(ConstantSolvingError::ImageExpression), } } fn access( &mut self, base: Handle<Expression>, index: usize, ) -> Result<Handle<Constant>, ConstantSolvingError> { let base = self.solve(base)?; match self.constants[base].inner { ConstantInner::Scalar { .. } => Err(ConstantSolvingError::InvalidAccessBase), ConstantInner::Composite { ty, ref components } => { match self.types[ty].inner { TypeInner::Vector { .. } | TypeInner::Matrix { .. } | TypeInner::Array { .. } | TypeInner::Struct { .. } => (), _ => return Err(ConstantSolvingError::InvalidAccessBase), } components .get(index) .copied() .ok_or(ConstantSolvingError::InvalidAccessIndex) } } } fn constant_index(&self, constant: Handle<Constant>) -> Result<usize, ConstantSolvingError> { match self.constants[constant].inner { ConstantInner::Scalar { value: ScalarValue::Uint(index), .. } => Ok(index as usize), _ => Err(ConstantSolvingError::InvalidAccessIndexTy), } } fn cast( &mut self, constant: Handle<Constant>, kind: ScalarKind, target_width: crate::Bytes, span: crate::Span, ) -> Result<Handle<Constant>, ConstantSolvingError> { fn inner_cast<A: num_traits::FromPrimitive>(value: ScalarValue) -> A { match value { ScalarValue::Sint(v) => A::from_i64(v), ScalarValue::Uint(v) => A::from_u64(v), ScalarValue::Float(v) => A::from_f64(v), ScalarValue::Bool(v) => A::from_u64(v as u64), } .unwrap() } let mut inner = self.constants[constant].inner.clone(); match inner { ConstantInner::Scalar { ref mut value, ref mut width, } => { *width = target_width; *value = match kind { ScalarKind::Sint => ScalarValue::Sint(inner_cast(*value)), ScalarKind::Uint => ScalarValue::Uint(inner_cast(*value)), ScalarKind::Float => ScalarValue::Float(inner_cast(*value)), ScalarKind::Bool => ScalarValue::Bool(inner_cast::<u64>(*value) != 0), } } ConstantInner::Composite { ty, ref mut components, } => { match self.types[ty].inner { TypeInner::Vector { .. } | TypeInner::Matrix { .. } => (), _ => return Err(ConstantSolvingError::InvalidCastArg), } for component in components { *component = self.cast(*component, kind, target_width, span.clone())?; } } } Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner, }, span, )) } fn unary_op( &mut self, op: UnaryOperator, constant: Handle<Constant>, span: crate::Span, ) -> Result<Handle<Constant>, ConstantSolvingError> { let mut inner = self.constants[constant].inner.clone(); match inner { ConstantInner::Scalar { ref mut value, .. } => match op { UnaryOperator::Negate => match *value { ScalarValue::Sint(ref mut v) => *v = -*v, ScalarValue::Float(ref mut v) => *v = -*v, _ => return Err(ConstantSolvingError::InvalidUnaryOpArg), }, UnaryOperator::Not => match *value { ScalarValue::Sint(ref mut v) => *v = !*v, ScalarValue::Uint(ref mut v) => *v = !*v, ScalarValue::Bool(ref mut v) => *v = !*v, _ => return Err(ConstantSolvingError::InvalidUnaryOpArg), }, }, ConstantInner::Composite { ty, ref mut components, } => { match self.types[ty].inner { TypeInner::Vector { .. } | TypeInner::Matrix { .. } => (), _ => return Err(ConstantSolvingError::InvalidCastArg), } for component in components { *component = self.unary_op(op, *component, span.clone())? } } } Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner, }, span, )) } fn binary_op( &mut self, op: BinaryOperator, left: Handle<Constant>, right: Handle<Constant>, span: crate::Span, ) -> Result<Handle<Constant>, ConstantSolvingError> { let left_inner = &self.constants[left].inner; let right_inner = &self.constants[right].inner; let inner = match (left_inner, right_inner) { ( &ConstantInner::Scalar { value: left_value, width, }, &ConstantInner::Scalar { value: right_value, width: _, }, ) => { let value = match op { BinaryOperator::Equal => ScalarValue::Bool(left_value == right_value), BinaryOperator::NotEqual => ScalarValue::Bool(left_value != right_value), BinaryOperator::Less => ScalarValue::Bool(left_value < right_value), BinaryOperator::LessEqual => ScalarValue::Bool(left_value <= right_value), BinaryOperator::Greater => ScalarValue::Bool(left_value > right_value), BinaryOperator::GreaterEqual => ScalarValue::Bool(left_value >= right_value), _ => match (left_value, right_value) { (ScalarValue::Sint(a), ScalarValue::Sint(b)) => { ScalarValue::Sint(match op { BinaryOperator::Add => a + b, BinaryOperator::Subtract => a - b, BinaryOperator::Multiply => a * b, BinaryOperator::Divide => a / b, BinaryOperator::Modulo => a % b, BinaryOperator::And => a & b, BinaryOperator::ExclusiveOr => a ^ b, BinaryOperator::InclusiveOr => a | b, BinaryOperator::ShiftLeft => a << b, BinaryOperator::ShiftRight => a >> b, _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }) } (ScalarValue::Uint(a), ScalarValue::Uint(b)) => { ScalarValue::Uint(match op { BinaryOperator::Add => a + b, BinaryOperator::Subtract => a - b, BinaryOperator::Multiply => a * b, BinaryOperator::Divide => a / b, BinaryOperator::Modulo => a % b, BinaryOperator::And => a & b, BinaryOperator::ExclusiveOr => a ^ b, BinaryOperator::InclusiveOr => a | b, BinaryOperator::ShiftLeft => a << b, BinaryOperator::ShiftRight => a >> b, _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }) } (ScalarValue::Float(a), ScalarValue::Float(b)) => { ScalarValue::Float(match op { BinaryOperator::Add => a + b, BinaryOperator::Subtract => a - b, BinaryOperator::Multiply => a * b, BinaryOperator::Divide => a / b, BinaryOperator::Modulo => a % b, _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }) } (ScalarValue::Bool(a), ScalarValue::Bool(b)) => { ScalarValue::Bool(match op { BinaryOperator::LogicalAnd => a && b, BinaryOperator::LogicalOr => a || b, _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }) } _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }, }; ConstantInner::Scalar { value, width } } (&ConstantInner::Composite { ref components, ty }, &ConstantInner::Scalar { .. }) => { let mut components = components.clone(); for comp in components.iter_mut() { *comp = self.binary_op(op, *comp, right, span.clone())?; } ConstantInner::Composite { ty, components } } (&ConstantInner::Scalar { .. }, &ConstantInner::Composite { ref components, ty }) => { let mut components = components.clone(); for comp in components.iter_mut() { *comp = self.binary_op(op, left, *comp, span.clone())?; } ConstantInner::Composite { ty, components } } _ => return Err(ConstantSolvingError::InvalidBinaryOpArgs), }; Ok(self.constants.fetch_or_append( Constant { name: None, specialization: None, inner, }, span, )) } } #[cfg(test)] mod tests { use std::vec; use crate::{ Arena, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner, UnaryOperator, VectorSize, }; use super::ConstantSolver; #[test] fn unary_op() { let mut types = Arena::new(); let mut expressions = Arena::new(); let mut constants = Arena::new(); let vec_ty = types.append( Type { name: None, inner: TypeInner::Vector { size: VectorSize::Bi, kind: ScalarKind::Sint, width: 4, }, }, Default::default(), ); let h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(4), }, }, Default::default(), ); let h1 = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(8), }, }, Default::default(), ); let vec_h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty: vec_ty, components: vec![h, h1], }, }, Default::default(), ); let expr = expressions.append(Expression::Constant(h), Default::default()); let expr1 = expressions.append(Expression::Constant(vec_h), Default::default()); let root1 = expressions.append( Expression::Unary { op: UnaryOperator::Negate, expr, }, Default::default(), ); let root2 = expressions.append( Expression::Unary { op: UnaryOperator::Not, expr, }, Default::default(), ); let root3 = expressions.append( Expression::Unary { op: UnaryOperator::Not, expr: expr1, }, Default::default(), ); let mut solver = ConstantSolver { types: &types, expressions: &expressions, constants: &mut constants, }; let res1 = solver.solve(root1).unwrap(); let res2 = solver.solve(root2).unwrap(); let res3 = solver.solve(root3).unwrap(); assert_eq!( constants[res1].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(-4), }, ); assert_eq!( constants[res2].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(!4), }, ); let res3_inner = &constants[res3].inner; match res3_inner { ConstantInner::Composite { ty, components } => { assert_eq!(*ty, vec_ty); let mut components_iter = components.iter().copied(); assert_eq!( constants[components_iter.next().unwrap()].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(!4), }, ); assert_eq!( constants[components_iter.next().unwrap()].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(!8), }, ); assert!(components_iter.next().is_none()); } _ => panic!("Expected vector"), } } #[test] fn cast() { let mut expressions = Arena::new(); let mut constants = Arena::new(); let h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width: 4, value: ScalarValue::Sint(4), }, }, Default::default(), ); let expr = expressions.append(Expression::Constant(h), Default::default()); let root = expressions.append( Expression::As { expr, kind: ScalarKind::Bool, convert: Some(crate::BOOL_WIDTH), }, Default::default(), ); let mut solver = ConstantSolver { types: &Arena::new(), expressions: &expressions, constants: &mut constants, }; let res = solver.solve(root).unwrap(); assert_eq!( constants[res].inner, ConstantInner::Scalar { width: crate::BOOL_WIDTH, value: ScalarValue::Bool(true), }, ); } #[test] fn access() { let mut types = Arena::new(); let mut expressions = Arena::new(); let mut constants = Arena::new(); let matrix_ty = types.append( Type { name: None, inner: TypeInner::Matrix { columns: VectorSize::Bi, rows: VectorSize::Tri, width: 4, }, }, Default::default(), ); let vec_ty = types.append( Type { name: None, inner: TypeInner::Vector { size: VectorSize::Tri, kind: ScalarKind::Float, width: 4, }, }, Default::default(), ); let mut vec1_components = Vec::with_capacity(3); let mut vec2_components = Vec::with_capacity(3); for i in 0..3 { let h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width: 4, value: ScalarValue::Float(i as f64), }, }, Default::default(), ); vec1_components.push(h) } for i in 3..6 { let h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Scalar { width: 4, value: ScalarValue::Float(i as f64), }, }, Default::default(), ); vec2_components.push(h) } let vec1 = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty: vec_ty, components: vec1_components, }, }, Default::default(), ); let vec2 = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty: vec_ty, components: vec2_components, }, }, Default::default(), ); let h = constants.append( Constant { name: None, specialization: None, inner: ConstantInner::Composite { ty: matrix_ty, components: vec![vec1, vec2], }, }, Default::default(), ); let base = expressions.append(Expression::Constant(h), Default::default()); let root1 = expressions.append( Expression::AccessIndex { base, index: 1 }, Default::default(), ); let root2 = expressions.append( Expression::AccessIndex { base: root1, index: 2, }, Default::default(), ); let mut solver = ConstantSolver { types: &types, expressions: &expressions, constants: &mut constants, }; let res1 = solver.solve(root1).unwrap(); let res2 = solver.solve(root2).unwrap(); let res1_inner = &constants[res1].inner; match res1_inner { ConstantInner::Composite { ty, components } => { assert_eq!(*ty, vec_ty); let mut components_iter = components.iter().copied(); assert_eq!( constants[components_iter.next().unwrap()].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Float(3.), }, ); assert_eq!( constants[components_iter.next().unwrap()].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Float(4.), }, ); assert_eq!( constants[components_iter.next().unwrap()].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Float(5.), }, ); assert!(components_iter.next().is_none()); } _ => panic!("Expected vector"), } assert_eq!( constants[res2].inner, ConstantInner::Scalar { width: 4, value: ScalarValue::Float(5.), }, ); } }
36.327566
100
0.438246
08e9f77d2325a0df104d8fc0cf4140b4e74c020a
2,516
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. pub mod ap; pub mod client; pub mod clone_utils; pub mod mesh; pub mod phy_selection; mod sink; #[cfg(test)] pub mod test_utils; pub mod timer; use fidl_fuchsia_wlan_common as fidl_common; use fidl_fuchsia_wlan_mlme as fidl_mlme; use futures::channel::mpsc; use crate::client::InfoEvent; use crate::timer::TimedEvent; use wlan_common::mac::MacAddr; pub type Ssid = Vec<u8>; #[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd)] pub struct Config { pub wep_supported: bool, pub wpa1_supported: bool, } impl Config { pub fn with_wep(mut self) -> Self { self.wep_supported = true; self } pub fn with_wpa1(mut self) -> Self { self.wpa1_supported = true; self } } pub struct DeviceInfo { pub addr: [u8; 6], pub bands: Vec<fidl_mlme::BandCapabilities>, pub driver_features: Vec<fidl_common::DriverFeature>, } #[derive(Debug)] pub enum MlmeRequest { Scan(fidl_mlme::ScanRequest), Join(fidl_mlme::JoinRequest), Authenticate(fidl_mlme::AuthenticateRequest), AuthResponse(fidl_mlme::AuthenticateResponse), Associate(fidl_mlme::AssociateRequest), AssocResponse(fidl_mlme::AssociateResponse), Deauthenticate(fidl_mlme::DeauthenticateRequest), Eapol(fidl_mlme::EapolRequest), SetKeys(fidl_mlme::SetKeysRequest), SetCtrlPort(fidl_mlme::SetControlledPortRequest), Start(fidl_mlme::StartRequest), Stop(fidl_mlme::StopRequest), SendMpOpenAction(fidl_mlme::MeshPeeringOpenAction), SendMpConfirmAction(fidl_mlme::MeshPeeringConfirmAction), MeshPeeringEstablished(fidl_mlme::MeshPeeringParams), } pub trait Station { type Event; fn on_mlme_event(&mut self, event: fidl_mlme::MlmeEvent); fn on_timeout(&mut self, timed_event: TimedEvent<Self::Event>); } pub type MlmeStream = mpsc::UnboundedReceiver<MlmeRequest>; pub type InfoStream = mpsc::UnboundedReceiver<InfoEvent>; mod responder { use futures::channel::oneshot; #[derive(Debug)] pub struct Responder<T>(oneshot::Sender<T>); impl<T> Responder<T> { pub fn new() -> (Self, oneshot::Receiver<T>) { let (sender, receiver) = oneshot::channel(); (Responder(sender), receiver) } pub fn respond(self, result: T) { self.0.send(result).unwrap_or_else(|_| ()); } } }
26.484211
73
0.697933
ab09bb5e523d1517d1760d5d6b118a76c73df2ae
2,309
use duct::cmd; use std::{ffi::OsString, fmt, io}; /// Creates a [`DisplayCmd`] instance. /// /// Arguments must implement `Into<`[`OsString`]`>`. macro_rules! display_cmd { ($name:expr $(, $arg:expr)* $(,)?) => { $crate::cmd::DisplayCmd::new($name)$(.arg($arg))* }; } /// A representation of a shell command that implements /// [`Display`][fmt::Display]. #[derive(Clone)] pub(crate) struct DisplayCmd { name: OsString, args: Vec<OsString>, } impl DisplayCmd { pub(crate) fn new(name: impl Into<OsString>) -> Self { Self { name: name.into(), args: Vec::new(), } } pub(crate) fn arg(mut self, arg: impl Into<OsString>) -> Self { self.args.push(arg.into()); self } pub(crate) fn args<I, A>(mut self, args: I) -> Self where I: IntoIterator<Item = A>, A: Into<OsString>, { self.args.extend(args.into_iter().map(|arg| arg.into())); self } /// Prepend `name` to this command. /// /// Essentially, this funtion sets the command name to `name`, /// makes the old command name into the first argument, and pushes /// every other argument forward by one. pub(crate) fn prepend(self, name: impl Into<OsString>) -> Self { let args = std::iter::once(self.name) .chain(self.args.into_iter()) .collect(); Self { name: name.into(), args, } } /// Runs this command. /// /// Returns [`io::Error`] if an error occurs when trying to run /// the command or if the command runs, but exits with a non-zero /// exit code. pub(crate) fn run(&self) -> io::Result<()> { cmd(&self.name, &self.args).run().map(|_| ()) } } impl fmt::Display for DisplayCmd { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.name.to_string_lossy().fmt(f)?; self.args .iter() .map(|arg| { let arg = arg.to_string_lossy(); // Quote any arguments that contain spaces if arg.contains(' ') { write!(f, " '{}'", arg) } else { write!(f, " {}", arg) } }) .collect() } }
26.848837
70
0.51191
39b2510af1ab8c1797f5f538eff0d765705e9415
4,496
use std::collections::{HashMap, HashSet}; use std::error::Error; use std::fmt::{Debug, Display, Formatter}; use std::hash::Hash; #[derive(Debug, Clone, Hash, Eq, PartialEq)] pub struct CircularDependencyError; impl CircularDependencyError { pub fn new() -> Self { Self } } impl Display for CircularDependencyError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { Debug::fmt(&self, f) } } impl Error for CircularDependencyError {} pub fn topological_sort<T>( data: HashMap<T, HashSet<T>>, ) -> Result<Vec<HashSet<T>>, CircularDependencyError> where T: Eq + Hash + Clone, { if data.is_empty() { return Ok(vec![]); } // Copy the input so as to leave it unmodified. // Discard self-dependencies and copy two levels deep. let mut data = data .into_iter() .map(|(item, deps)| { let deps = deps .into_iter() .filter(|e| !item.eq(e)) .collect::<HashSet<_>>(); (item, deps) }) .collect::<HashMap<_, _>>(); // Find all items that don't depend on anything. let t = data .values() .cloned() .reduce(|l, r| l.union(&r).cloned().collect::<HashSet<_>>()) .unwrap_or_default(); let v = data.keys().cloned().collect::<HashSet<_>>(); let extra_items_in_deps = t.difference(&v).collect::<HashSet<_>>(); // Add empty dependencies where needed. data.extend( extra_items_in_deps .into_iter() .map(|item| (item.clone(), HashSet::new())), ); let mut result = vec![]; loop { let ordered = data .iter() .filter(|(_, deps)| deps.is_empty()) .map(|(item, _)| item) .cloned() .collect::<HashSet<_>>(); if ordered.is_empty() { break; } data = data .into_iter() .filter(|(item, _)| !ordered.contains(item)) .map(|(item, dep)| { ( item, dep.difference(&ordered).cloned().collect::<HashSet<_>>(), ) }) .collect(); result.push(ordered); } if data.is_empty() { Ok(result) } else { Err(CircularDependencyError::new()) } } #[cfg(test)] mod tests { use super::{topological_sort, CircularDependencyError}; use std::collections::{HashMap, HashSet}; #[test] fn empty() { assert_eq!(topological_sort(HashMap::<&str, _>::new()), Ok(vec![])) } #[test] fn no_dependencies() { let data = HashMap::from([("a", HashSet::new())]); assert_eq!(topological_sort(data), Ok(vec![HashSet::from(["a"])])) } #[test] fn one_dependencies() { let data = HashMap::from([("a", HashSet::from(["b"])), ("b", HashSet::new())]); assert_eq!( topological_sort(data), Ok(vec![HashSet::from(["b"]), HashSet::from(["a"])]) ) } #[test] fn two_dependencies() { let data = HashMap::from([ ("a", HashSet::from(["b", "c"])), ("b", HashSet::new()), ("c", HashSet::new()), ]); assert_eq!( topological_sort(data), Ok(vec![HashSet::from(["b", "c"]), HashSet::from(["a"])]) ) } #[test] fn many_dependencies() { let data = HashMap::from([ (2, HashSet::from([11])), (9, HashSet::from([11, 8, 10])), (10, HashSet::from([11, 3])), (11, HashSet::from([7, 5])), (8, HashSet::from([7, 3])), ]); assert_eq!( topological_sort(data), Ok(vec![ HashSet::from([3, 5, 7]), HashSet::from([8, 11]), HashSet::from([2, 10]), HashSet::from([9]) ]) ); } #[test] fn circular_dependency_each() { let data = HashMap::from([("a", HashSet::from(["b"])), ("b", HashSet::from(["a"]))]); assert_eq!(topological_sort(data), Err(CircularDependencyError::new())) } #[test] fn circular_dependency_triple() { let data = HashMap::from([ ("a", HashSet::from(["b"])), ("b", HashSet::from(["c"])), ("c", HashSet::from(["a"])), ]); assert_eq!(topological_sort(data), Err(CircularDependencyError::new())) } }
26.139535
93
0.487767
e5713b73eb94b4cfc7de2c4181487e808b49826e
5,515
use crate::array::*; use crate::bitmap::Bitmap; use crate::datatypes::{DataType, IntervalUnit}; use crate::types::days_ms; fn validity_size(validity: &Option<Bitmap>) -> usize { validity.as_ref().map(|b| b.bytes().len()).unwrap_or(0) } macro_rules! dyn_primitive { ($array:expr, $ty:ty) => {{ let array = $array .as_any() .downcast_ref::<PrimitiveArray<$ty>>() .unwrap(); array.values().len() * std::mem::size_of::<$ty>() + validity_size(array.validity()) }}; } macro_rules! dyn_binary { ($array:expr, $ty:ty, $o:ty) => {{ let array = $array.as_any().downcast_ref::<$ty>().unwrap(); array.values().len() + array.offsets().len() * std::mem::size_of::<$o>() + validity_size(array.validity()) }}; } macro_rules! dyn_dict { ($array:expr, $ty:ty) => {{ let array = $array .as_any() .downcast_ref::<DictionaryArray<$ty>>() .unwrap(); estimated_bytes_size(array.keys()) + estimated_bytes_size(array.values().as_ref()) }}; } /// Returns the total (heap) allocated size of the array in bytes. /// # Implementation /// This estimation is the sum of the size of its buffers, validity, including nested arrays. /// Multiple arrays may share buffers and bitmaps. Therefore, the size of 2 arrays is not the /// sum of the sizes computed from this function. In particular, [`StructArray`]'s size is an upper bound. /// /// When an array is sliced, its allocated size remains constant because the buffer unchanged. /// However, this function will yield a smaller number. This is because this function returns /// the visible size of the buffer, not its total capacity. /// /// FFI buffers are included in this estimation. pub fn estimated_bytes_size(array: &dyn Array) -> usize { use DataType::*; match array.data_type() { Null => 0, Boolean => { let array = array.as_any().downcast_ref::<BooleanArray>().unwrap(); array.values().bytes().len() + validity_size(array.validity()) } Int8 => dyn_primitive!(array, i8), Int16 => dyn_primitive!(array, i16), Int32 | Date32 | Time32(_) | Interval(IntervalUnit::YearMonth) => { dyn_primitive!(array, i32) } Int64 | Date64 | Timestamp(_, _) | Time64(_) | Duration(_) => dyn_primitive!(array, i64), UInt8 => dyn_primitive!(array, u16), UInt16 => dyn_primitive!(array, u16), UInt32 => dyn_primitive!(array, u32), UInt64 => dyn_primitive!(array, u64), Float16 => unreachable!(), Float32 => dyn_primitive!(array, f32), Float64 => dyn_primitive!(array, f64), Decimal(_, _) => dyn_primitive!(array, i128), Interval(IntervalUnit::DayTime) => dyn_primitive!(array, days_ms), Binary => dyn_binary!(array, BinaryArray<i32>, i32), FixedSizeBinary(_) => { let array = array .as_any() .downcast_ref::<FixedSizeBinaryArray>() .unwrap(); array.values().len() + validity_size(array.validity()) } LargeBinary => dyn_binary!(array, BinaryArray<i64>, i64), Utf8 => dyn_binary!(array, Utf8Array<i32>, i32), LargeUtf8 => dyn_binary!(array, Utf8Array<i64>, i64), List(_) => { let array = array.as_any().downcast_ref::<ListArray<i32>>().unwrap(); estimated_bytes_size(array.values().as_ref()) + array.offsets().len() * std::mem::size_of::<i32>() + validity_size(array.validity()) } FixedSizeList(_, _) => { let array = array.as_any().downcast_ref::<ListArray<i64>>().unwrap(); estimated_bytes_size(array.values().as_ref()) + validity_size(array.validity()) } LargeList(_) => { let array = array.as_any().downcast_ref::<ListArray<i64>>().unwrap(); estimated_bytes_size(array.values().as_ref()) + array.offsets().len() * std::mem::size_of::<i64>() + validity_size(array.validity()) } Struct(_) => { let array = array.as_any().downcast_ref::<StructArray>().unwrap(); array .values() .iter() .map(|x| x.as_ref()) .map(estimated_bytes_size) .sum::<usize>() + validity_size(array.validity()) } Union(_) => unreachable!(), Dictionary(keys, _) => match keys.as_ref() { Int8 => dyn_dict!(array, i8), Int16 => dyn_dict!(array, i16), Int32 => dyn_dict!(array, i32), Int64 => dyn_dict!(array, i64), UInt8 => dyn_dict!(array, u8), UInt16 => dyn_dict!(array, u16), UInt32 => dyn_dict!(array, u32), UInt64 => dyn_dict!(array, u64), _ => unreachable!(), }, } } #[cfg(test)] mod tests { use super::*; #[test] fn primitive() { let a = Int32Array::from_slice(&[1, 2, 3, 4, 5]); assert_eq!(5 * std::mem::size_of::<i32>(), estimated_bytes_size(&a)); } #[test] fn boolean() { let a = BooleanArray::from_slice(&[true]); assert_eq!(1, estimated_bytes_size(&a)); } #[test] fn utf8() { let a = Utf8Array::<i32>::from_slice(&["aaa"]); assert_eq!(3 + 2 * std::mem::size_of::<i32>(), estimated_bytes_size(&a)); } }
37.013423
106
0.558477
c1fe95d6e997b9287e1f3c1125617faff8ee58ed
48,994
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::Error, argh::FromArgs, carnelian::{ make_app_assistant, render::*, AnimationMode, App, AppAssistant, Color, Point, RenderOptions, Size, ViewAssistant, ViewAssistantContext, ViewAssistantPtr, ViewKey, ViewMode, }, euclid::{Angle, Point2D, Rect, Size2D, Transform2D, Vector2D}, fidl_fuchsia_hardware_input as hid, fidl_fuchsia_input_report as hid_input_report, fuchsia_trace::{self, duration}, fuchsia_trace_provider, fuchsia_zircon::{self as zx, AsHandleRef, ClockId, Event, Signals, Time}, itertools::izip, rand::{thread_rng, Rng}, std::{collections::BTreeMap, f32, fs, ops::Range}, }; const BACKGROUND_COLOR: Color = Color { r: 255, g: 255, b: 255, a: 255 }; // Stroke constants. const STROKE_START_RADIUS: f32 = 0.25; const STROKE_RADIUS_ADJUSTMENT_AMOUNT: f32 = 0.1; const MAX_STROKES: usize = 1000; // Toolbar constants. const TOOL_RADIUS: f32 = 25.0; const TOOL_PADDING: f32 = 12.5; // Color palette constants. const COLORS: [Color; 6] = [ Color { r: 0, g: 0, b: 0, a: 255 }, Color { r: 255, g: 255, b: 255, a: 255 }, Color { r: 187, g: 74, b: 72, a: 205 }, Color { r: 225, g: 210, b: 92, a: 205 }, Color { r: 61, g: 133, b: 177, a: 205 }, Color { r: 36, g: 128, b: 108, a: 205 }, ]; // Pencil constants. const PENCILS: [f32; 3] = [1.5, 3.0, 10.0]; // Delay before starting to draw flowers after clearing the screen. const FLOWER_DELAY_SECONDS: i64 = 10; fn lerp(t: f32, p0: Point, p1: Point) -> Point { Point::new(p0.x * (1.0 - t) + p1.x * t, p0.y * (1.0 - t) + p1.y * t) } trait InkPathBuilder { fn line_to(&mut self, p: Point); fn cubic_to(&mut self, p0: Point, p1: Point, p2: Point, p3: Point, offset: Vector2D<f32>) { let deviation_x = (p0.x + p2.x - 3.0 * (p1.x + p2.x)).abs(); let deviation_y = (p0.y + p2.y - 3.0 * (p1.y + p2.y)).abs(); let deviation_squared = deviation_x * deviation_x + deviation_y * deviation_y; const PIXEL_ACCURACY: f32 = 0.25; if deviation_squared < PIXEL_ACCURACY { self.line_to(Point::new(p3.x, p3.y) + offset); return; } const TOLERANCE: f32 = 3.0; let subdivisions = 1 + (TOLERANCE * deviation_squared).sqrt().sqrt().floor() as usize; let increment = (subdivisions as f32).recip(); let mut t = 0.0; for _ in 0..subdivisions - 1 { t += increment; let p_next = lerp( t, lerp(t, lerp(t, p0, p1), lerp(t, p1, p2)), lerp(t, lerp(t, p1, p2), lerp(t, p2, p3)), ); self.line_to(Point::new(p_next.x, p_next.y) + offset); } self.line_to(Point::new(p3.x, p3.y) + offset); } } struct PointPathBuilder<'a> { points: &'a mut Vec<Point>, } impl<'a> PointPathBuilder<'a> { fn new(points: &'a mut Vec<Point>) -> Self { Self { points } } } impl<'a> InkPathBuilder for PointPathBuilder<'a> { fn line_to(&mut self, p: Point) { self.points.push(p); } } struct PathBuilderWrapper<'a> { path_builder: &'a mut PathBuilder, } impl<'a> PathBuilderWrapper<'a> { fn new(path_builder: &'a mut PathBuilder) -> Self { Self { path_builder } } } impl<'a> InkPathBuilder for PathBuilderWrapper<'a> { fn line_to(&mut self, p: Point) { self.path_builder.line_to(p); } fn cubic_to(&mut self, _p0: Point, p1: Point, p2: Point, p3: Point, offset: Vector2D<f32>) { let p1 = p1 + offset; let p2 = p2 + offset; let p3 = p3 + offset; self.path_builder.cubic_to(p1, p2, p3); } } struct Circle { points: Vec<Point>, } impl Circle { fn new(center: Point, radius: f32) -> Self { let offset = center.to_vector(); let dist = 4.0 / 3.0 * (f32::consts::PI / 8.0).tan(); let control_dist = dist * radius; let t = Point::new(0.0, -radius); let r = Point::new(radius, 0.0); let b = Point::new(0.0, radius); let l = Point::new(-radius, 0.0); let ct = Point::new(0.0, -control_dist).to_vector(); let cr = Point::new(control_dist, 0.0).to_vector(); let cb = Point::new(0.0, control_dist).to_vector(); let cl = Point::new(-control_dist, 0.0).to_vector(); let mut points = Vec::new(); points.push(t + offset); let mut path_builder = PointPathBuilder::new(&mut points); path_builder.cubic_to(t, t + cr, r + ct, r, offset); path_builder.cubic_to(r, r + cb, b + cr, b, offset); path_builder.cubic_to(b, b + cl, l + cb, l, offset); path_builder.cubic_to(l, l + ct, t + cl, t, offset); Self { points } } } struct Flower { points: Vec<Point>, } impl Flower { fn new(width: f32, height: f32) -> Self { const FLOWER_SIZE: f32 = 100.0; const FLOWER_MIN_PETALS: usize = 3; const FLOWER_MAX_PETALS: usize = 8; const FLOWER_MIN_R1: f32 = 60.0; const FLOWER_MAX_R1: f32 = 95.0; const FLOWER_MIN_R2: f32 = 20.0; const FLOWER_MAX_R2: f32 = 60.0; let mut rng = thread_rng(); let petal_count: usize = rng.gen_range(FLOWER_MIN_PETALS, FLOWER_MAX_PETALS); let r1: f32 = rng.gen_range(FLOWER_MIN_R1, FLOWER_MAX_R1); let r2: f32 = rng.gen_range(FLOWER_MIN_R2, FLOWER_MAX_R2); // Random location in canvas. let offset = Vector2D::new( rng.gen_range(FLOWER_SIZE, width - FLOWER_SIZE), rng.gen_range(FLOWER_SIZE, height - FLOWER_SIZE), ); let mut points = Vec::new(); let u: f32 = rng.gen_range(10.0, FLOWER_SIZE) / FLOWER_SIZE; let v: f32 = rng.gen_range(0.0, FLOWER_SIZE - 10.0) / FLOWER_SIZE; let dt: f32 = f32::consts::PI / (petal_count as f32); let mut t: f32 = 0.0; let mut p0 = Point::new(t.cos() * r1, t.sin() * r1); points.push(p0 + offset); let mut path_builder = PointPathBuilder::new(&mut points); for _ in 0..petal_count { let x1 = t.cos() * r1; let y1 = t.sin() * r1; let x2 = (t + dt).cos() * r2; let y2 = (t + dt).sin() * r2; let x3 = (t + 2.0 * dt).cos() * r1; let y3 = (t + 2.0 * dt).sin() * r1; let p1 = Point::new(x1 - y1 * u, y1 + x1 * u); let p2 = Point::new(x2 + y2 * v, y2 - x2 * v); let p3 = Point::new(x2, y2); let p4 = Point::new(x2 - y2 * v, y2 + x2 * v); let p5 = Point::new(x3 + y3 * u, y3 - x3 * u); let p6 = Point::new(x3, y3); path_builder.cubic_to(p0, p1, p2, p3, offset); path_builder.cubic_to(p3, p4, p5, p6, offset); p0 = p6; t += dt * 2.0; } Self { points } } } /// Ink. #[derive(Debug, FromArgs)] #[argh(name = "ink_rs")] struct Args { /// use spinel (GPU rendering back-end) #[argh(switch, short = 's')] use_spinel: bool, } #[derive(Default)] struct InkAppAssistant { use_spinel: bool, } impl AppAssistant for InkAppAssistant { fn setup(&mut self) -> Result<(), Error> { let args: Args = argh::from_env(); self.use_spinel = args.use_spinel; Ok(()) } fn create_view_assistant_render(&mut self, _: ViewKey) -> Result<ViewAssistantPtr, Error> { Ok(Box::new(InkViewAssistant::new())) } fn get_mode(&self) -> ViewMode { ViewMode::Render(RenderOptions { use_spinel: self.use_spinel }) } } struct InkFill { raster: Raster, color: Color, } impl InkFill { fn new(context: &mut Context, color: &Color, points: &Vec<Point>) -> Self { let path = { let mut path_builder = context.path_builder().unwrap(); let mut p0 = Point::zero(); for (i, &p) in points.iter().enumerate() { if i == 0 { path_builder.move_to(p); p0 = p; } else { path_builder.line_to(p); } } path_builder.line_to(p0); path_builder.build() }; let mut raster_builder = context.raster_builder().unwrap(); raster_builder.add(&path, None); let raster = raster_builder.build(); Self { raster, color: *color } } } struct Segment { path: Path, raster: Option<Raster>, } struct StrokePoint { point: Point, normal0: Vector2D<f32>, normal1: Vector2D<f32>, thickness: f32, } struct CurveFitter { first_control_points: Vec<Vector2D<f32>>, second_control_points: Vec<Vector2D<f32>>, end_points: Vec<Vector2D<f32>>, coefficients: Vec<f32>, } impl CurveFitter { fn new() -> Self { Self { first_control_points: Vec::new(), second_control_points: Vec::new(), end_points: Vec::new(), coefficients: Vec::new(), } } // Takes a set of |points| and generates a fitted curve with two control points in between each // point. Returns an iterator to (first control point, second control point, end point) // for |range|. These items are ready to be used to build a path using cubic_to(). // // Guided and simplified from // https://ovpwp.wordpress.com/2008/12/17/how-to-draw-a-smooth-curve-through-a-set-of-2d-points-with-bezier-methods/ fn compute_control_points( &mut self, points: impl Iterator<Item = Point>, range: Range<usize>, ) -> impl Iterator<Item = (Point, Point, Point)> + '_ { duration!("gfx", "CurveFitter::compute_control_points"); self.end_points.splice(.., points.map(|p| p.to_vector())); self.first_control_points.clear(); self.second_control_points.clear(); self.coefficients.clear(); let num_control_points = self.end_points.len() - 1; match num_control_points { // Do nothing for a single point. 0 => {} // Calculate average for two points. 1 => { let p0 = self.end_points[0]; let p1 = self.end_points[1]; self.first_control_points.push((p0 * 2.0 + p1) / 3.0); self.second_control_points.push((p0 + p1 * 2.0) / 3.0); } // Run the algorithm to generate two control points. _ => { // Compute first control points. let mut b: f32 = 2.0; let p0 = self.end_points[0]; let p1 = self.end_points[1]; let mut rhs = p0 + p1 * 2.0; self.coefficients.push(0.0); self.first_control_points.push(rhs / b); for i in 1..num_control_points - 1 { self.coefficients.push(1.0 / b); b = 4.0 - self.coefficients[i]; let p = self.end_points[i]; let p_next = self.end_points[i + 1]; rhs = p * 4.0 + p_next * 2.0; self.first_control_points.push((rhs - self.first_control_points[i - 1]) / b); } self.coefficients.push(1.0 / b); let p_prev = self.end_points[num_control_points - 1]; let p_last = self.end_points[num_control_points]; rhs = (p_prev * 8.0 + p_last) / 2.0; b = 3.5 - self.coefficients[num_control_points - 1]; self.first_control_points .push((rhs - self.first_control_points[num_control_points - 2]) / b); // Back substitution. for i in 1..num_control_points { let fcp = self.first_control_points[num_control_points - i - 1]; let fcp_next = self.first_control_points[num_control_points - i]; let c = self.coefficients[num_control_points - i]; self.first_control_points[num_control_points - i - 1] = fcp - fcp_next * c; } // Compute second control points. for i in 0..num_control_points - 1 { let p_next = self.end_points[i + 1]; let fcp_next = self.first_control_points[i + 1]; self.second_control_points.push(p_next * 2.0 - fcp_next); } let fcp_last = self.first_control_points[num_control_points - 1]; self.second_control_points.push((p_last + fcp_last) / 2.0); } } izip!( self.first_control_points[range.start..range.end - 1].iter().map(|v| v.to_point()), self.second_control_points[range.start..range.end - 1].iter().map(|v| v.to_point()), self.end_points[range.start + 1..range.end].iter().map(|v| v.to_point()) ) } } struct InkStroke { points: Vec<StrokePoint>, segments: Vec<(usize, Segment)>, color: Color, thickness: f32, transform: Transform2D<f32>, curve_fitter: CurveFitter, } impl InkStroke { fn new(color: Color, thickness: f32, transform: Transform2D<f32>) -> Self { Self { points: Vec::new(), segments: Vec::new(), color, thickness, transform, curve_fitter: CurveFitter::new(), } } fn raster(context: &mut Context, path: &Path, transform: &Transform2D<f32>) -> Raster { let mut raster_builder = context.raster_builder().unwrap(); raster_builder.add(path, Some(transform)); raster_builder.build() } fn push_point(&mut self, p: &Point) { match self.points.len() { // Just add the first point. 0 => self.points.push(StrokePoint { point: *p, normal0: Vector2D::zero(), normal1: Vector2D::zero(), thickness: STROKE_START_RADIUS, }), // Add second point and compute the normal for line between points. 1 => { let p0 = self.points.pop().unwrap(); let e = p0.point - *p; let n = Vector2D::new(-e.y, e.x).normalize(); self.points.push(StrokePoint { point: p0.point, normal0: n, normal1: n, thickness: p0.thickness, }); self.points.push(StrokePoint { point: *p, normal0: n, normal1: n, thickness: STROKE_START_RADIUS, }); } // Add new point, compute the normal, and the average normal for last // two lines. We also make a limited adjustment to the average normal // distance to maintain the correct line thickness. _ => { let p1 = self.points.pop().unwrap(); let p0 = self.points.pop().unwrap(); let e = p1.point - *p; let n = Vector2D::new(-e.y, e.x).normalize(); let mut t1 = (p1.normal1 + n) / 2.0; let l = t1.square_length().max(0.1); t1 *= 1.0 / l; self.points.push(StrokePoint { point: p0.point, normal0: p0.normal0, normal1: p0.normal1, thickness: p0.thickness, }); self.points.push(StrokePoint { point: p1.point, normal0: p1.normal1, normal1: t1, thickness: p1.thickness, }); self.points.push(StrokePoint { point: *p, normal0: n, normal1: n, thickness: STROKE_START_RADIUS, }); } } } fn push_segment(&mut self, context: &mut Context, i0: usize, i1: usize) { let path = { let mut path_builder = context.path_builder().unwrap(); // // Convert stroke to fill and compute a bounding box. // let mut p_draw_start = Point::zero(); if i1 > i0 { p_draw_start = self.points[i0].point + self.points[i0].normal1 * self.points[i0].thickness; path_builder.move_to(p_draw_start); } for p in self.curve_fitter.compute_control_points( self.points.iter().map(|p| p.point + p.normal1 * p.thickness), i0..i1, ) { path_builder.cubic_to(p.0, p.1, p.2); } let p_first = &self.points.first().unwrap(); let p_last = &self.points.last().unwrap(); macro_rules! cap { ( $p:expr, $w:expr ) => { let offset = $p.point.to_vector(); let n = Vector2D::new($p.normal0.y, -$p.normal0.x); let p0 = Point::zero() + $p.normal1 * $w; let p1 = Point::zero() - n * $w; let p2 = Point::zero() - $p.normal1 * $w; let dist = 4.0 / 3.0 * (f32::consts::PI / 8.0).tan(); let control_dist = dist * $w; let c0 = p0 - n * control_dist; let c1 = p1 + $p.normal1 * control_dist; let c2 = p1 - $p.normal1 * control_dist; let c3 = p2 - n * control_dist; let mut wrapper = PathBuilderWrapper::new(&mut path_builder); wrapper.cubic_to(p0, c0, c1, p1, offset); wrapper.cubic_to(p1, c2, c3, p2, offset); }; } // Produce end-cap if at the end of the line and not connected to first point. if i1 == self.points.len() && p_first.point != p_last.point { cap!(p_last, p_last.thickness); } // Walk from point i1 back to i0 and offset by radius at each point. if i1 > i0 { let p = &self.points[i1 - 1]; path_builder.line_to(p.point - p.normal1 * p.thickness); } for p in self.curve_fitter.compute_control_points( self.points.iter().rev().map(|p| p.point - p.normal1 * p.thickness), self.points.len() - i1..self.points.len() - i0, ) { path_builder.cubic_to(p.0, p.1, p.2); } // Produce start-cap if at the beginning of line and not connected to last point. if i0 == 0 && p_first.point != p_last.point { cap!(p_first, -p_first.thickness); } path_builder.line_to(p_draw_start); path_builder.build() }; self.segments.push((i0, Segment { path, raster: None })); } fn update_thickness(&mut self, context: &mut Context) { assert_eq!(self.points.is_empty(), false); // No update needed if last point has correct thickness. This assumes // that last point always needs most adjustment. if self.points.last().unwrap().thickness == self.thickness { return; } let adjustment_amount = self.thickness * STROKE_RADIUS_ADJUSTMENT_AMOUNT; for p in self.points.iter_mut().rev() { if p.thickness == self.thickness { break; } p.thickness = if p.thickness > self.thickness { (p.thickness - adjustment_amount).max(self.thickness) } else { (p.thickness + adjustment_amount).min(self.thickness) }; } // Remove and get index of first point in last segment. let mut i0 = self.segments.pop().map_or(0, |v| v.0); // Index of last point with final thickness. let i1 = self.points.iter().rposition(|v| v.thickness == self.thickness).unwrap_or(i0); const SEGMENT_SIZE: usize = 256; // Add segments with final thickness. while (i1 - i0) > SEGMENT_SIZE { let i = i0 + SEGMENT_SIZE; self.push_segment(context, i0, i); i0 = i - 1; } // Add any remaining points to last segment. if (self.points.len() - i0) > 0 { self.push_segment(context, i0, self.points.len()); } } fn update(&mut self, context: &mut Context) -> bool { self.update_thickness(context); let mut changed = false; for (_, segment) in self.segments.iter_mut() { if segment.raster.is_none() { segment.raster = Some(Self::raster(context, &segment.path, &self.transform)); changed = true; } } changed } fn transform(&mut self, transform: &Transform2D<f32>) { self.transform = self.transform.post_mul(transform); // Re-create rasters during next call to update. for (_, segment) in self.segments.iter_mut() { segment.raster = None; } } } struct Scene { tools: Vec<(InkStroke, InkFill, Point)>, strokes: Vec<InkStroke>, } impl Scene { fn new() -> Self { Self { tools: Vec::new(), strokes: Vec::new() } } fn setup(&mut self, context: &mut Context, size: Size, tools: &Vec<(&Color, &f32)>) { const TOOL_SIZE: f32 = (TOOL_RADIUS + TOOL_PADDING) * 2.0; // Layout tools at top-center. let mut x = size.width / 2.0 - (tools.len() as f32 * TOOL_SIZE) / 2.0; let y = TOOL_PADDING * 2.0 + TOOL_RADIUS; for (color, size) in tools { let center = Point::new(x, y); let circle = Circle::new(center, TOOL_RADIUS); let mut stroke = InkStroke::new(Color { r: 0, g: 0, b: 0, a: 255 }, 1.0, Transform2D::identity()); while stroke.points.len() < circle.points.len() { let p = &circle.points[stroke.points.len()]; stroke.push_point(p); } let circle = Circle::new(center, **size); let fill = InkFill::new(context, color, &circle.points); self.tools.push((stroke, fill, center)); x += TOOL_SIZE; } } fn hit_test(&mut self, point: Point) -> Option<usize> { for (i, (_, _, center)) in self.tools.iter().enumerate() { if (point - *center).length() < TOOL_RADIUS { return Some(i); } } None } fn select_tools(&mut self, indices: &Vec<usize>) { for (i, (stroke, _, _)) in self.tools.iter_mut().enumerate() { stroke.thickness = if indices.contains(&i) { 2.0 } else { 1.0 }; } } fn push_stroke(&mut self, color: Color, radius: f32, p: &Point) { let mut stroke = InkStroke::new(color, radius, Transform2D::identity()); stroke.push_point(p); self.strokes.push(stroke); } fn last_stroke(&mut self) -> Option<&mut InkStroke> { self.strokes.last_mut() } fn clear_strokes(&mut self) { self.strokes.clear(); } fn update_tools(&mut self, context: &mut Context) -> Option<Range<usize>> { let mut damage: Option<Range<usize>> = None; for (i, (stroke, _, _)) in self.tools.iter_mut().enumerate() { let changed = stroke.update(context); if changed { if let Some(damage) = &mut damage { damage.end = i + 1; } else { damage = Some(Range { start: i, end: i + 1 }); } } } damage } fn update_strokes(&mut self, context: &mut Context) -> Option<Range<usize>> { let mut damage: Option<Range<usize>> = None; for (i, stroke) in self.strokes.iter_mut().enumerate() { let changed = stroke.update(context); if changed { if let Some(value) = damage.take() { damage = Some(Range { start: value.start, end: i + 1 }); } else { damage = Some(Range { start: i, end: i + 1 }); } } } damage } fn transform(&mut self, transform: &Transform2D<f32>) { for stroke in self.strokes.iter_mut() { stroke.transform(transform); } } } struct Contents { image: Image, composition: Composition, size: Size, tool_count: usize, tool_damage: Option<Range<usize>>, stroke_count: usize, stroke_damage: Option<Range<usize>>, } impl Contents { fn new(image: Image) -> Self { let composition = Composition::new(BACKGROUND_COLOR); Self { image, composition, size: Size::zero(), tool_count: 0, tool_damage: None, stroke_count: 0, stroke_damage: None, } } fn update(&mut self, context: &mut Context, scene: &Scene, size: &Size) { let clip = Rect::new( Point2D::new(0, 0), Size2D::new(size.width.floor() as u32, size.height.floor() as u32), ); let ext = if self.size != *size { self.size = *size; self.tool_damage = Some(Range { start: 0, end: scene.tools.len() }); self.stroke_damage = Some(Range { start: 0, end: scene.strokes.len() }); RenderExt { pre_clear: Some(PreClear { color: BACKGROUND_COLOR }), ..Default::default() } } else { RenderExt::default() }; // Update damaged tool layers. if let Some(damage) = self.tool_damage.take() { let layers = scene.tools[damage.start..damage.end].iter().flat_map(|(stroke, fill, _)| { std::iter::once(Layer { raster: stroke .segments .iter() .fold(None, |raster_union: Option<Raster>, segment| { if let Some(raster) = &segment.1.raster { if let Some(raster_union) = raster_union { Some(raster_union + raster.clone()) } else { Some(raster.clone()) } } else { raster_union } }) .unwrap(), style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(stroke.color), blend_mode: BlendMode::Over, }, }) .chain(std::iter::once(Layer { raster: fill.raster.clone(), style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(fill.color), blend_mode: BlendMode::Over, }, })) }); let range = (damage.start * 2)..(damage.end * 2); // Add tool layers if needed. if self.tool_count < damage.end { self.composition.replace(range.start.., layers); self.tool_count = damage.end; } else { self.composition.replace(range, layers); } } let bottom = self.tool_count * 2 + scene.strokes.len(); // Update damaged stroke layers. if let Some(damage) = self.stroke_damage.take() { let layers = scene.strokes[damage.start..damage.end].iter().rev().map(|stroke| Layer { raster: stroke .segments .iter() .fold(None, |raster_union: Option<Raster>, segment| { if let Some(raster) = &segment.1.raster { if let Some(raster_union) = raster_union { Some(raster_union + raster.clone()) } else { Some(raster.clone()) } } else { raster_union } }) .unwrap(), style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(stroke.color), blend_mode: BlendMode::Over, }, }); // Reverse range. let range = (bottom - damage.end)..(bottom - damage.start); // Add more stroke layers if needed. if self.stroke_count < scene.strokes.len() { let count = scene.strokes.len() - self.stroke_count; self.composition.replace(range.start..(range.end - count), layers); self.stroke_count = scene.strokes.len(); } else { self.composition.replace(range, layers); } } // Remove strokes that are no longer part of the scene. if self.stroke_count > scene.strokes.len() { self.composition.replace(bottom.., std::iter::empty::<Layer>()); self.stroke_count = scene.strokes.len(); } context.render(&self.composition, Some(clip), self.image, &ext); } fn add_tool_damage(&mut self, range: &Range<usize>) { self.tool_damage = Some(if let Some(damage) = self.tool_damage.take() { Range { start: range.start.min(damage.start), end: range.end.max(damage.end) } } else { range.clone() }); } fn add_stroke_damage(&mut self, range: &Range<usize>) { self.stroke_damage = Some(if let Some(damage) = self.stroke_damage.take() { Range { start: range.start.min(damage.start), end: range.end.max(damage.end) } } else { range.clone() }); } fn full_damage(&mut self) { // Empty size will trigger a clear during next update. self.size = Size::zero(); } } struct Stylus { _rpt_id: u8, status: u8, x: u16, y: u16, } // TODO: Remove stylus device when supported by carnelian. struct StylusDevice { device: hid::DeviceSynchronousProxy, x_max: u16, y_max: u16, } impl StylusDevice { fn open_input_device(path: &str) -> Result<hid::DeviceSynchronousProxy, Error> { let (client, server) = zx::Channel::create()?; fdio::service_connect(path, server)?; Ok(hid::DeviceSynchronousProxy::new(client)) } fn create() -> Result<StylusDevice, Error> { static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input"; let path = std::path::Path::new(INPUT_DEVICES_DIRECTORY); let entries = fs::read_dir(path)?; for entry in entries { let entry = entry?; let entry_path = entry.path(); let path = entry_path.to_str().expect("bad path"); let mut device = Self::open_input_device(path)?; if let Ok(hid::DeviceIds { vendor_id: 0x00002d1f, product_id, .. }) = device.get_device_ids(zx::Time::INFINITE) { // Paradise if product_id == 0x00005143 { println!("found stylus at {0}", path); const PARADISE_STYLUS_X_MAX: u16 = 25919; const PARADISE_STYLUS_Y_MAX: u16 = 17279; return Ok(StylusDevice { device, x_max: PARADISE_STYLUS_X_MAX, y_max: PARADISE_STYLUS_Y_MAX, }); } // Slate if product_id == 0x0000486c { println!("found stylus at {0}", path); const SLATE_STYLUS_X_MAX: u16 = 26009; const SLATE_STYLUS_Y_MAX: u16 = 17339; return Ok(StylusDevice { device, x_max: SLATE_STYLUS_X_MAX, y_max: SLATE_STYLUS_Y_MAX, }); } } } Err(std::io::Error::new(std::io::ErrorKind::NotFound, "no touch found").into()) } fn get_events(&mut self) -> Result<Vec<Stylus>, Error> { let mut stylus_events = Vec::<Stylus>::new(); let reports = self.device.read_reports(zx::Time::INFINITE)?; let reports = reports.1; let mut report_index = 0; while report_index < reports.len() { let report = &reports[report_index..]; if report[0] != 6 { report_index += 55; continue; } report_index += 20; stylus_events.push(Stylus { _rpt_id: report[0], status: report[1], x: report[2] as u16 + ((report[3] as u16) << 8), y: report[4] as u16 + ((report[5] as u16) << 8), }); } Ok(stylus_events) } } // TODO: Remove touch device when supported by carnelian. struct TouchDevice { device: hid_input_report::InputDeviceSynchronousProxy, x_range: hid_input_report::Range, y_range: hid_input_report::Range, } impl TouchDevice { fn create() -> Result<TouchDevice, Error> { let input_devices_directory = "/dev/class/input-report"; let path = std::path::Path::new(input_devices_directory); let entries = fs::read_dir(path)?; for entry in entries { let entry = entry?; let (client, server) = zx::Channel::create()?; fdio::service_connect(entry.path().to_str().expect("bad path"), server)?; let mut device = hid_input_report::InputDeviceSynchronousProxy::new(client); let descriptor = device.get_descriptor(zx::Time::INFINITE)?; match descriptor.touch { None => continue, Some(touch) => match touch.input { None => continue, Some(input) => { println!("touch device: {0}", entry.path().to_str().unwrap()); let contact_descriptor = &input.contacts.as_ref().unwrap()[0]; let x_range = contact_descriptor.position_x.as_ref().unwrap().range; let y_range = contact_descriptor.position_y.as_ref().unwrap().range; return Ok(TouchDevice { device, x_range, y_range }); } }, } } Err(std::io::Error::new(std::io::ErrorKind::NotFound, "no touch device found").into()) } fn get_events(&mut self) -> Result<Vec<hid_input_report::InputReport>, Error> { Ok(self.device.get_reports(zx::Time::INFINITE)?) } } struct Ink { scene: Scene, contents: BTreeMap<u64, Contents>, touch_device: Option<TouchDevice>, touch_points: Vec<Point>, stylus_device: Option<StylusDevice>, last_stylus_x: u16, last_stylus_y: u16, last_stylus_point: Option<Point>, flower: Option<Flower>, flower_start: Time, color: usize, pencil: usize, pan_origin: Vector2D<f32>, scale_distance: f32, rotation_angle: f32, clear_origin: Vector2D<f32>, } impl Ink { pub fn new(context: &mut Context, size: Size) -> Self { let mut scene = Scene::new(); let color_iter = COLORS.iter().map(|color| (color, &TOOL_RADIUS)); let pencil_iter = PENCILS.iter().map(|size| (&Color { r: 0, g: 0, b: 0, a: 255 }, size)); let tools = color_iter.chain(pencil_iter).collect::<Vec<_>>(); scene.setup(context, size, &tools); let color = 0; let pencil = 1; scene.select_tools(&vec![color, COLORS.len() + pencil]); let touch_device = TouchDevice::create().ok(); let stylus_device = StylusDevice::create().ok(); let flower_start = Time::from_nanos( Time::get(ClockId::Monotonic) .into_nanos() .saturating_add(zx::Duration::from_seconds(FLOWER_DELAY_SECONDS).into_nanos()), ); Self { scene, contents: BTreeMap::new(), touch_device, touch_points: Vec::new(), stylus_device, last_stylus_x: std::u16::MAX, last_stylus_y: std::u16::MAX, last_stylus_point: None, flower: None, flower_start, color, pencil, pan_origin: Vector2D::zero(), scale_distance: 0.0, rotation_angle: 0.0, clear_origin: Vector2D::zero(), } } fn update( &mut self, render_context: &mut Context, context: &ViewAssistantContext<'_>, ) -> Result<(), Error> { duration!("gfx", "update"); let time_now = Time::get(ClockId::Monotonic); let size = &context.logical_size; let mut full_damage = false; // Process touch device input. if let Some(device) = self.touch_device.as_mut() { let previous_touch_points_count = self.touch_points.len(); let reports = device.get_events()?; for report in &reports { let touch = report.touch.as_ref(); let contacts = touch.unwrap().contacts.as_ref().unwrap(); self.touch_points.clear(); for contact in contacts { let point = Point::new( size.width * contact.position_x.unwrap() as f32 / device.x_range.max as f32, size.height * contact.position_y.unwrap() as f32 / device.y_range.max as f32, ); self.touch_points.push(point); } } let mut transform = Transform2D::identity(); // Pan and select color. match self.touch_points.len() { 1 | 2 => { let mut origin = Vector2D::zero(); for point in &self.touch_points { origin += point.to_vector(); } origin /= self.touch_points.len() as f32; if self.touch_points.len() != previous_touch_points_count { if let Some(index) = self.scene.hit_test(origin.to_point()) { if index < COLORS.len() { self.color = index; } else { self.pencil = index - COLORS.len(); } self.scene.select_tools(&vec![self.color, COLORS.len() + self.pencil]); } self.pan_origin = origin; } let distance = origin - self.pan_origin; transform = transform.post_translate(distance); self.pan_origin = origin; } _ => {} } // Rotation & zoom. if self.touch_points.len() == 2 { let mut iter = self.touch_points.iter(); let point0 = iter.next().unwrap(); let point1 = iter.next().unwrap(); let origin = (point0.to_vector() + point1.to_vector()) / 2.0; transform = transform.post_translate(-origin); // Rotation. let line = *point0 - *point1; let angle = line.x.atan2(line.y); if self.touch_points.len() != previous_touch_points_count { self.rotation_angle = angle; } let rotation_angle = angle - self.rotation_angle; transform = transform.post_rotate(Angle::radians(rotation_angle)); self.rotation_angle = angle; // Pinch to zoom. let distance = (*point0 - *point1).length(); if distance != 0.0 { if self.touch_points.len() != previous_touch_points_count { self.scale_distance = distance; } let sxsy = distance / self.scale_distance; transform = transform.post_scale(sxsy, sxsy); self.scale_distance = distance; } transform = transform.post_translate(origin); } // Clear using 3 finger swipe across screen. if self.touch_points.len() >= 3 { let mut origin = Vector2D::zero(); for point in &self.touch_points { origin += point.to_vector(); } origin /= self.touch_points.len() as f32; if self.touch_points.len() != previous_touch_points_count { self.clear_origin = origin; } const MIN_CLEAR_SWIPE_DISTANCE: f32 = 512.0; let distance = (origin - self.clear_origin).length(); if distance >= MIN_CLEAR_SWIPE_DISTANCE { self.flower_start = Time::from_nanos(time_now.into_nanos().saturating_add( zx::Duration::from_seconds(FLOWER_DELAY_SECONDS).into_nanos(), )); self.flower = None; self.scene.clear_strokes(); full_damage = true; } } if transform != Transform2D::identity() { self.scene.transform(&transform); full_damage = true; } } // Process stylus device input. if let Some(device) = self.stylus_device.as_mut() { let reports = device.get_events()?; for report in &reports { const STYLUS_STATUS_TSWITCH: u8 = 0x01; if (report.status & STYLUS_STATUS_TSWITCH) != 0 { if report.x != self.last_stylus_x || report.y != self.last_stylus_y { let point = Point::new( size.width * report.x as f32 / device.x_max as f32, size.height * report.y as f32 / device.y_max as f32, ); // Start new stroke or select color. if self.last_stylus_x == std::u16::MAX || self.last_stylus_y == std::u16::MAX { if let Some(index) = self.scene.hit_test(point) { if index < COLORS.len() { self.color = index; } else { self.pencil = index - COLORS.len(); } self.scene .select_tools(&vec![self.color, COLORS.len() + self.pencil]); } else { // Start stroke if we haven't reached the limit. if self.scene.strokes.len() < MAX_STROKES { self.scene.push_stroke( COLORS[self.color], PENCILS[self.pencil], &point, ); self.last_stylus_point = Some(point); } // Disable flower demo. self.flower_start = zx::Time::INFINITE; self.flower = None; } } // Update stroke if distance from last point surpassed radius. if let Some(last_stylus_point) = self.last_stylus_point { if (point - last_stylus_point).length() > PENCILS[self.pencil] { self.scene.last_stroke().unwrap().push_point(&point); self.last_stylus_point = Some(point); } } self.last_stylus_x = report.x; self.last_stylus_y = report.y; } } else { self.last_stylus_x = std::u16::MAX; self.last_stylus_y = std::u16::MAX; self.last_stylus_point = None; } } } // Generate flower when idle after clearing screen. if time_now.into_nanos() > self.flower_start.into_nanos() { let flower = self.flower.take().unwrap_or_else(|| { let flower = Flower::new(size.width, size.height); self.scene.push_stroke(COLORS[self.color], PENCILS[self.pencil], &flower.points[0]); flower }); // Points per second. const SPEED: f32 = 100.0; const SECONDS_PER_NANOSECOND: f32 = 1e-9; let n = ((time_now.into_nanos() - self.flower_start.into_nanos()) as f32 * SECONDS_PER_NANOSECOND * SPEED) as usize; let stroke = self.scene.last_stroke().unwrap(); // Extend set of points for current stroke. while n > stroke.points.len() && stroke.points.len() < flower.points.len() { let p = &flower.points[stroke.points.len()]; stroke.push_point(p); } if stroke.points.len() == flower.points.len() { self.flower_start = if self.scene.strokes.len() < MAX_STROKES { time_now } else { zx::Time::INFINITE }; } else { self.flower = Some(flower); } } // Full damage for changes that require some amount of clearing. if full_damage { for content in self.contents.values_mut() { content.full_damage(); } } // Update tools and add damage to each content. if let Some(tool_damage) = self.scene.update_tools(render_context) { for content in self.contents.values_mut() { content.add_tool_damage(&tool_damage); } } // Update strokes and add damage to each content. if let Some(stroke_damage) = self.scene.update_strokes(render_context) { for content in self.contents.values_mut() { content.add_stroke_damage(&stroke_damage); } } let image_id = context.image_id; let image = render_context.get_current_image(context); let content = self.contents.entry(image_id).or_insert_with(|| Contents::new(image)); content.update(render_context, &self.scene, size); Ok(()) } } struct InkViewAssistant { size: Size, ink: Option<Ink>, } impl InkViewAssistant { pub fn new() -> Self { Self { size: Size::zero(), ink: None } } } impl ViewAssistant for InkViewAssistant { fn setup(&mut self, _context: &ViewAssistantContext<'_>) -> Result<(), Error> { Ok(()) } fn update(&mut self, _context: &ViewAssistantContext<'_>) -> Result<(), Error> { Ok(()) } fn render( &mut self, render_context: &mut Context, ready_event: Event, context: &ViewAssistantContext<'_>, ) -> Result<(), Error> { if context.logical_size != self.size || self.ink.is_none() { let ink = Ink::new(render_context, context.logical_size); self.size = context.logical_size; self.ink = Some(ink); } if let Some(ink) = self.ink.as_mut() { ink.update(render_context, context).expect("ink.update"); } ready_event.as_handle_ref().signal(Signals::NONE, Signals::EVENT_SIGNALED)?; Ok(()) } fn initial_animation_mode(&mut self) -> AnimationMode { return AnimationMode::EveryFrame; } } fn main() -> Result<(), Error> { fuchsia_trace_provider::trace_provider_create_with_fdio(); println!("Ink Example"); App::run(make_app_assistant::<InkAppAssistant>()) }
36.078056
120
0.505184
1121a397c6a2a1baf33f3a8e9ac37bf6003fec6d
17,572
#[doc = "Reader of register XTALCTRL"] pub type R = crate::R<u32, super::XTALCTRL>; #[doc = "Writer for register XTALCTRL"] pub type W = crate::W<u32, super::XTALCTRL>; #[doc = "Register XTALCTRL `reset()`'s with value 0x0358"] impl crate::ResetValue for super::XTALCTRL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x0358 } } #[doc = "Reader of field `XTALICOMPTRIM`"] pub type XTALICOMPTRIM_R = crate::R<u8, u8>; #[doc = "Write proxy for field `XTALICOMPTRIM`"] pub struct XTALICOMPTRIM_W<'a> { w: &'a mut W, } impl<'a> XTALICOMPTRIM_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8); self.w } } #[doc = "Reader of field `XTALIBUFTRIM`"] pub type XTALIBUFTRIM_R = crate::R<u8, u8>; #[doc = "Write proxy for field `XTALIBUFTRIM`"] pub struct XTALIBUFTRIM_W<'a> { w: &'a mut W, } impl<'a> XTALIBUFTRIM_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6); self.w } } #[doc = "XTAL Power down on brown out.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PWDBODXTAL_A { #[doc = "0: Power up XTAL on BOD."] PWRUPBOD = 0, #[doc = "1: Power down XTAL on BOD."] PWRDNBOD = 1, } impl From<PWDBODXTAL_A> for bool { #[inline(always)] fn from(variant: PWDBODXTAL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PWDBODXTAL`"] pub type PWDBODXTAL_R = crate::R<bool, PWDBODXTAL_A>; impl PWDBODXTAL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PWDBODXTAL_A { match self.bits { false => PWDBODXTAL_A::PWRUPBOD, true => PWDBODXTAL_A::PWRDNBOD, } } #[doc = "Checks if the value of the field is `PWRUPBOD`"] #[inline(always)] pub fn is_pwrupbod(&self) -> bool { *self == PWDBODXTAL_A::PWRUPBOD } #[doc = "Checks if the value of the field is `PWRDNBOD`"] #[inline(always)] pub fn is_pwrdnbod(&self) -> bool { *self == PWDBODXTAL_A::PWRDNBOD } } #[doc = "Write proxy for field `PWDBODXTAL`"] pub struct PWDBODXTAL_W<'a> { w: &'a mut W, } impl<'a> PWDBODXTAL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PWDBODXTAL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Power up XTAL on BOD."] #[inline(always)] pub fn pwrupbod(self) -> &'a mut W { self.variant(PWDBODXTAL_A::PWRUPBOD) } #[doc = "Power down XTAL on BOD."] #[inline(always)] pub fn pwrdnbod(self) -> &'a mut W { self.variant(PWDBODXTAL_A::PWRDNBOD) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "XTAL Oscillator Power Down Comparator.\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PDNBCMPRXTAL_A { #[doc = "1: Power up XTAL oscillator comparator."] PWRUPCOMP = 1, #[doc = "0: Power down XTAL oscillator comparator."] PWRDNCOMP = 0, } impl From<PDNBCMPRXTAL_A> for bool { #[inline(always)] fn from(variant: PDNBCMPRXTAL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PDNBCMPRXTAL`"] pub type PDNBCMPRXTAL_R = crate::R<bool, PDNBCMPRXTAL_A>; impl PDNBCMPRXTAL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PDNBCMPRXTAL_A { match self.bits { true => PDNBCMPRXTAL_A::PWRUPCOMP, false => PDNBCMPRXTAL_A::PWRDNCOMP, } } #[doc = "Checks if the value of the field is `PWRUPCOMP`"] #[inline(always)] pub fn is_pwrupcomp(&self) -> bool { *self == PDNBCMPRXTAL_A::PWRUPCOMP } #[doc = "Checks if the value of the field is `PWRDNCOMP`"] #[inline(always)] pub fn is_pwrdncomp(&self) -> bool { *self == PDNBCMPRXTAL_A::PWRDNCOMP } } #[doc = "Write proxy for field `PDNBCMPRXTAL`"] pub struct PDNBCMPRXTAL_W<'a> { w: &'a mut W, } impl<'a> PDNBCMPRXTAL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PDNBCMPRXTAL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Power up XTAL oscillator comparator."] #[inline(always)] pub fn pwrupcomp(self) -> &'a mut W { self.variant(PDNBCMPRXTAL_A::PWRUPCOMP) } #[doc = "Power down XTAL oscillator comparator."] #[inline(always)] pub fn pwrdncomp(self) -> &'a mut W { self.variant(PDNBCMPRXTAL_A::PWRDNCOMP) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "XTAL Oscillator Power Down Core.\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PDNBCOREXTAL_A { #[doc = "1: Power up XTAL oscillator core."] PWRUPCORE = 1, #[doc = "0: Power down XTAL oscillator core."] PWRDNCORE = 0, } impl From<PDNBCOREXTAL_A> for bool { #[inline(always)] fn from(variant: PDNBCOREXTAL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PDNBCOREXTAL`"] pub type PDNBCOREXTAL_R = crate::R<bool, PDNBCOREXTAL_A>; impl PDNBCOREXTAL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PDNBCOREXTAL_A { match self.bits { true => PDNBCOREXTAL_A::PWRUPCORE, false => PDNBCOREXTAL_A::PWRDNCORE, } } #[doc = "Checks if the value of the field is `PWRUPCORE`"] #[inline(always)] pub fn is_pwrupcore(&self) -> bool { *self == PDNBCOREXTAL_A::PWRUPCORE } #[doc = "Checks if the value of the field is `PWRDNCORE`"] #[inline(always)] pub fn is_pwrdncore(&self) -> bool { *self == PDNBCOREXTAL_A::PWRDNCORE } } #[doc = "Write proxy for field `PDNBCOREXTAL`"] pub struct PDNBCOREXTAL_W<'a> { w: &'a mut W, } impl<'a> PDNBCOREXTAL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PDNBCOREXTAL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Power up XTAL oscillator core."] #[inline(always)] pub fn pwrupcore(self) -> &'a mut W { self.variant(PDNBCOREXTAL_A::PWRUPCORE) } #[doc = "Power down XTAL oscillator core."] #[inline(always)] pub fn pwrdncore(self) -> &'a mut W { self.variant(PDNBCOREXTAL_A::PWRDNCORE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "XTAL Oscillator Bypass Comparator.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BYPCMPRXTAL_A { #[doc = "0: Use the XTAL oscillator comparator."] USECOMP = 0, #[doc = "1: Bypass the XTAL oscillator comparator."] BYPCOMP = 1, } impl From<BYPCMPRXTAL_A> for bool { #[inline(always)] fn from(variant: BYPCMPRXTAL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `BYPCMPRXTAL`"] pub type BYPCMPRXTAL_R = crate::R<bool, BYPCMPRXTAL_A>; impl BYPCMPRXTAL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BYPCMPRXTAL_A { match self.bits { false => BYPCMPRXTAL_A::USECOMP, true => BYPCMPRXTAL_A::BYPCOMP, } } #[doc = "Checks if the value of the field is `USECOMP`"] #[inline(always)] pub fn is_usecomp(&self) -> bool { *self == BYPCMPRXTAL_A::USECOMP } #[doc = "Checks if the value of the field is `BYPCOMP`"] #[inline(always)] pub fn is_bypcomp(&self) -> bool { *self == BYPCMPRXTAL_A::BYPCOMP } } #[doc = "Write proxy for field `BYPCMPRXTAL`"] pub struct BYPCMPRXTAL_W<'a> { w: &'a mut W, } impl<'a> BYPCMPRXTAL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BYPCMPRXTAL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Use the XTAL oscillator comparator."] #[inline(always)] pub fn usecomp(self) -> &'a mut W { self.variant(BYPCMPRXTAL_A::USECOMP) } #[doc = "Bypass the XTAL oscillator comparator."] #[inline(always)] pub fn bypcomp(self) -> &'a mut W { self.variant(BYPCMPRXTAL_A::BYPCOMP) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "XTAL Oscillator Disable Feedback.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FDBKDSBLXTAL_A { #[doc = "0: Enable XTAL oscillator comparator."] EN = 0, #[doc = "1: Disable XTAL oscillator comparator."] DIS = 1, } impl From<FDBKDSBLXTAL_A> for bool { #[inline(always)] fn from(variant: FDBKDSBLXTAL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `FDBKDSBLXTAL`"] pub type FDBKDSBLXTAL_R = crate::R<bool, FDBKDSBLXTAL_A>; impl FDBKDSBLXTAL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FDBKDSBLXTAL_A { match self.bits { false => FDBKDSBLXTAL_A::EN, true => FDBKDSBLXTAL_A::DIS, } } #[doc = "Checks if the value of the field is `EN`"] #[inline(always)] pub fn is_en(&self) -> bool { *self == FDBKDSBLXTAL_A::EN } #[doc = "Checks if the value of the field is `DIS`"] #[inline(always)] pub fn is_dis(&self) -> bool { *self == FDBKDSBLXTAL_A::DIS } } #[doc = "Write proxy for field `FDBKDSBLXTAL`"] pub struct FDBKDSBLXTAL_W<'a> { w: &'a mut W, } impl<'a> FDBKDSBLXTAL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FDBKDSBLXTAL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable XTAL oscillator comparator."] #[inline(always)] pub fn en(self) -> &'a mut W { self.variant(FDBKDSBLXTAL_A::EN) } #[doc = "Disable XTAL oscillator comparator."] #[inline(always)] pub fn dis(self) -> &'a mut W { self.variant(FDBKDSBLXTAL_A::DIS) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "XTAL Software Override Enable.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum XTALSWE_A { #[doc = "0: XTAL Software Override Disable."] OVERRIDE_DIS = 0, #[doc = "1: XTAL Software Override Enable."] OVERRIDE_EN = 1, } impl From<XTALSWE_A> for bool { #[inline(always)] fn from(variant: XTALSWE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `XTALSWE`"] pub type XTALSWE_R = crate::R<bool, XTALSWE_A>; impl XTALSWE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> XTALSWE_A { match self.bits { false => XTALSWE_A::OVERRIDE_DIS, true => XTALSWE_A::OVERRIDE_EN, } } #[doc = "Checks if the value of the field is `OVERRIDE_DIS`"] #[inline(always)] pub fn is_override_dis(&self) -> bool { *self == XTALSWE_A::OVERRIDE_DIS } #[doc = "Checks if the value of the field is `OVERRIDE_EN`"] #[inline(always)] pub fn is_override_en(&self) -> bool { *self == XTALSWE_A::OVERRIDE_EN } } #[doc = "Write proxy for field `XTALSWE`"] pub struct XTALSWE_W<'a> { w: &'a mut W, } impl<'a> XTALSWE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: XTALSWE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "XTAL Software Override Disable."] #[inline(always)] pub fn override_dis(self) -> &'a mut W { self.variant(XTALSWE_A::OVERRIDE_DIS) } #[doc = "XTAL Software Override Enable."] #[inline(always)] pub fn override_en(self) -> &'a mut W { self.variant(XTALSWE_A::OVERRIDE_EN) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bits 8:9 - XTAL ICOMP trim"] #[inline(always)] pub fn xtalicomptrim(&self) -> XTALICOMPTRIM_R { XTALICOMPTRIM_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bits 6:7 - XTAL IBUFF trim"] #[inline(always)] pub fn xtalibuftrim(&self) -> XTALIBUFTRIM_R { XTALIBUFTRIM_R::new(((self.bits >> 6) & 0x03) as u8) } #[doc = "Bit 5 - XTAL Power down on brown out."] #[inline(always)] pub fn pwdbodxtal(&self) -> PWDBODXTAL_R { PWDBODXTAL_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 4 - XTAL Oscillator Power Down Comparator."] #[inline(always)] pub fn pdnbcmprxtal(&self) -> PDNBCMPRXTAL_R { PDNBCMPRXTAL_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 3 - XTAL Oscillator Power Down Core."] #[inline(always)] pub fn pdnbcorextal(&self) -> PDNBCOREXTAL_R { PDNBCOREXTAL_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 2 - XTAL Oscillator Bypass Comparator."] #[inline(always)] pub fn bypcmprxtal(&self) -> BYPCMPRXTAL_R { BYPCMPRXTAL_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1 - XTAL Oscillator Disable Feedback."] #[inline(always)] pub fn fdbkdsblxtal(&self) -> FDBKDSBLXTAL_R { FDBKDSBLXTAL_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0 - XTAL Software Override Enable."] #[inline(always)] pub fn xtalswe(&self) -> XTALSWE_R { XTALSWE_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bits 8:9 - XTAL ICOMP trim"] #[inline(always)] pub fn xtalicomptrim(&mut self) -> XTALICOMPTRIM_W { XTALICOMPTRIM_W { w: self } } #[doc = "Bits 6:7 - XTAL IBUFF trim"] #[inline(always)] pub fn xtalibuftrim(&mut self) -> XTALIBUFTRIM_W { XTALIBUFTRIM_W { w: self } } #[doc = "Bit 5 - XTAL Power down on brown out."] #[inline(always)] pub fn pwdbodxtal(&mut self) -> PWDBODXTAL_W { PWDBODXTAL_W { w: self } } #[doc = "Bit 4 - XTAL Oscillator Power Down Comparator."] #[inline(always)] pub fn pdnbcmprxtal(&mut self) -> PDNBCMPRXTAL_W { PDNBCMPRXTAL_W { w: self } } #[doc = "Bit 3 - XTAL Oscillator Power Down Core."] #[inline(always)] pub fn pdnbcorextal(&mut self) -> PDNBCOREXTAL_W { PDNBCOREXTAL_W { w: self } } #[doc = "Bit 2 - XTAL Oscillator Bypass Comparator."] #[inline(always)] pub fn bypcmprxtal(&mut self) -> BYPCMPRXTAL_W { BYPCMPRXTAL_W { w: self } } #[doc = "Bit 1 - XTAL Oscillator Disable Feedback."] #[inline(always)] pub fn fdbkdsblxtal(&mut self) -> FDBKDSBLXTAL_W { FDBKDSBLXTAL_W { w: self } } #[doc = "Bit 0 - XTAL Software Override Enable."] #[inline(always)] pub fn xtalswe(&mut self) -> XTALSWE_W { XTALSWE_W { w: self } } }
30.56
84
0.574437
56d40f00897cc029b4d4637c96715043fbc01277
1,111
use crate::fs::{Metadata, MetadataExt}; use posish::fs::{statat, AtFlags}; use std::{ fs, io, sync::atomic::{AtomicBool, Ordering::Relaxed}, }; /// Like `file.metadata()`, but works with `O_PATH` descriptors on old (pre 3.6) /// versions of Linux too. pub(super) fn file_metadata(file: &fs::File) -> io::Result<Metadata> { // Record whether we've seen an `EBADF` from an `fstat` on an `O_PATH` // file descriptor, meaning we're on a Linux that doesn't support it. static FSTAT_PATH_BADF: AtomicBool = AtomicBool::new(false); if !FSTAT_PATH_BADF.load(Relaxed) { match file.metadata() { Ok(metadata) => return Ok(Metadata::from_std(metadata)), Err(e) => match e.raw_os_error() { // Before Linux 3.6, `fstat` with `O_PATH` returned `EBADF`. Some(libc::EBADF) => FSTAT_PATH_BADF.store(true, Relaxed), _ => return Err(e), }, } } // If `fstat` with `O_PATH` isn't supported, use `statat` with `AT_EMPTY_PATH`. statat(file, "", AtFlags::EMPTY_PATH).map(MetadataExt::from_libc) }
38.310345
83
0.612961
d62d64f13a0a441559aa0d505d4d281a71ed09db
3,630
// This file is part of Substrate. // Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Transaction pool Prometheus metrics. use std::sync::Arc; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; #[derive(Clone, Default)] pub struct MetricsLink(Arc<Option<Metrics>>); impl MetricsLink { pub fn new(registry: Option<&Registry>) -> Self { Self(Arc::new(registry.and_then(|registry| { Metrics::register(registry) .map_err(|err| { log::warn!("Failed to register prometheus metrics: {}", err); }) .ok() }))) } pub fn report(&self, do_this: impl FnOnce(&Metrics)) { if let Some(metrics) = self.0.as_ref() { do_this(metrics); } } } /// Transaction pool Prometheus metrics. pub struct Metrics { pub submitted_transactions: Counter<U64>, pub validations_invalid: Counter<U64>, pub block_transactions_pruned: Counter<U64>, pub block_transactions_resubmitted: Counter<U64>, } impl Metrics { pub fn register(registry: &Registry) -> Result<Self, PrometheusError> { Ok(Self { submitted_transactions: register( Counter::new( "sub_txpool_submitted_transactions", "Total number of transactions submitted", )?, registry, )?, validations_invalid: register( Counter::new( "sub_txpool_validations_invalid", "Total number of transactions that were removed from the pool as invalid", )?, registry, )?, block_transactions_pruned: register( Counter::new( "sub_txpool_block_transactions_pruned", "Total number of transactions that was requested to be pruned by block events", )?, registry, )?, block_transactions_resubmitted: register( Counter::new( "sub_txpool_block_transactions_resubmitted", "Total number of transactions that was requested to be resubmitted by block events", )?, registry, )?, }) } } /// Transaction pool api Prometheus metrics. pub struct ApiMetrics { pub validations_scheduled: Counter<U64>, pub validations_finished: Counter<U64>, } impl ApiMetrics { /// Register the metrics at the given Prometheus registry. pub fn register(registry: &Registry) -> Result<Self, PrometheusError> { Ok(Self { validations_scheduled: register( Counter::new( "sub_txpool_validations_scheduled", "Total number of transactions scheduled for validation", )?, registry, )?, validations_finished: register( Counter::new( "sub_txpool_validations_finished", "Total number of transactions that finished validation", )?, registry, )?, }) } } /// An extension trait for [`ApiMetrics`]. pub trait ApiMetricsExt { /// Report an event to the metrics. fn report(&self, report: impl FnOnce(&ApiMetrics)); } impl ApiMetricsExt for Option<Arc<ApiMetrics>> { fn report(&self, report: impl FnOnce(&ApiMetrics)) { if let Some(metrics) = self.as_ref() { report(metrics) } } }
27.923077
89
0.707163
f5cd21f3cb8b268791bd71d4f36b7f3e07d1520b
1,188
/// Algebra - Assign Monoidal Act use crate::algebra::act::*; use crate::algebra::monoid::*; #[derive(Debug, Clone, Copy)] pub enum Assign<X> { Some(X), None, } impl<X> std::ops::Mul for Assign<X> { type Output = Self; fn mul(self, other: Self) -> Self { match (self, &other) { (x, Assign::None) => x, _ => other, } } } impl<X: Copy> Act<X> for Assign<X> { fn act(&self, other: X) -> X { match *self { Assign::None => other, Assign::Some(x) => x, } } } impl<X: Copy> Monoid for Assign<X> { fn unit() -> Self { Assign::None } } #[cfg(test)] mod test_ratio { use crate::algebra::act_assign::*; #[test] fn it_works() { assert_eq!(Assign::Some(1).act(0), 1); assert_eq!(Assign::None.act(0), 0); assert_eq!((Assign::Some(1) * Assign::Some(2)).act(0), 2); assert_eq!((Assign::None * Assign::Some(2)).act(0), 2); assert_eq!((Assign::Some(1) * Assign::None).act(0), 1); assert_eq!((Assign::None * Assign::None).act(0), 0); assert_eq!((Assign::None * Assign::None * Assign::Some(9)).act(0), 9); } }
25.276596
78
0.51431
c17d78646e73df04ae4606d6099e739b63f10e1a
1,333
//! Support for building the types in `pact_matching::models`. This could //! theoretically be moved into `pact_matching::models` at some future date, //! but that's currently undergoing heavy construction. mod interaction_builder; mod http_part_builder; mod pact_builder; mod request_builder; mod response_builder; pub use self::interaction_builder::*; pub use self::http_part_builder::*; pub use self::pact_builder::*; pub use self::request_builder::*; pub use self::response_builder::*; #[test] fn basic_builder_example() { let pact = PactBuilder::new("Consumer", "Provider") .interaction("GET /greeting/hello", |i| { i.given("a greeting named hello"); i.request.method("GET").path("/greeting/hello"); i.response .status(200) .header("Content-Type", "application/json") .json_body(json_pattern!({ "message": "Hello!", })); }) .build(); assert_eq!(pact.consumer().name, "Consumer"); assert_eq!(pact.provider().name, "Provider"); assert_eq!(pact.interactions().len(), 1); let interaction = pact.interactions()[0]; assert_eq!(&interaction.description(), "GET /greeting/hello"); assert_eq!(interaction.provider_states()[0].name, "a greeting named hello"); }
34.179487
80
0.63991
d53f1cddae0fa87ee3cafd934b94f5db0f49f7a4
1,733
use bitcoin::Error as BitcoinError; use hex::FromHexError; use jsonrpc_core_client::RpcError; use jsonrpc_http_server::jsonrpc_core::Error as JsonRpcError; use parity_scale_codec::Error as CodecError; use runtime::{substrate_subxt::Error as XtError, Error as RuntimeError}; use std::net::AddrParseError; use thiserror::Error; #[derive(Error, Debug)] pub enum Error { #[error("Insufficient funds available")] InsufficientFunds, #[error("Open time inconsistent with chain height")] InvalidOpenTime, #[error("Channel unexpectedly closed")] ChannelClosed, #[error("Invalid Bitcoin network")] InvalidBitcoinNetwork, #[error("Expected blocks but got none")] NoIncomingBlocks, #[error("Failed to load or create bitcoin wallet: {0}")] WalletInitializationFailure(BitcoinError), #[error("Transaction contains more than one return-to-self uxto")] TooManyReturnToSelfAddresses, #[error("Mathematical operation caused an overflow")] ArithmeticOverflow, #[error("Mathematical operation caused an underflow")] ArithmeticUnderflow, #[error("Mathematical operation error")] MathError, #[error("RPC error: {0}")] RpcError(#[from] RpcError), #[error("Hex conversion error: {0}")] FromHexError(#[from] FromHexError), #[error("BitcoinError: {0}")] BitcoinError(#[from] BitcoinError), #[error("RuntimeError: {0}")] RuntimeError(#[from] RuntimeError), #[error("SubXtError: {0}")] SubXtError(#[from] XtError), #[error("JsonRpcError: {0}")] JsonRpcError(#[from] JsonRpcError), #[error("CodecError: {0}")] CodecError(#[from] CodecError), #[error("AddrParseError: {0}")] AddrParseError(#[from] AddrParseError), }
34.66
72
0.694749
e2320e53b7eaa9faf20831877e0a63f4e80c9a9c
17,064
use crate::repository::{MessageData, MsgParams as repoMsgParams, Repository, TokenData}; use message::Msg; use std::collections::HashMap; use std::sync::mpsc::{Receiver as mpscReceiver, Sender as mpscSender}; use std::sync::{mpsc, Arc, Mutex}; use std::thread; use std::time::Duration; use ws::{Builder, CloseCode, Handler, Handshake, Message, Result, Sender, Settings}; pub mod message; const DEFAULT_PAGE_SIZE: i64 = 30; const DEFAULT_PAGE_INDEX: i64 = 0; const WS_MAX_CONNECTIONS: usize = 60_000; pub struct Chat { repository: Arc<Mutex<Box<dyn Repository>>>, params: Params, ws_server: Arc<Mutex<Server>>, } struct Server { connections: HashMap<String, HashMap<u32, Client>>, user_names: HashMap<u32, String>, init_pool: HashMap<u32, Client>, } impl Default for Server { fn default() -> Self { let connections = HashMap::new(); let init_pool = HashMap::new(); let user_names = HashMap::new(); Server { connections, init_pool, user_names, } } } struct Client { sender: Sender, addr: String, connection_id: u32, room_name: String, } struct WsHandler { sender: Sender, addr: String, room_name: String, client_tx: mpsc::Sender<Client>, data_tx: mpsc::Sender<message::Data>, id: u32, } impl WsHandler { fn terminate_connection(&self) { let terminate_conn = message::Data::Terminate(message::Terminate { connection_id: self.id, room_name: self.room_name.clone(), }); match self.data_tx.send(terminate_conn) { Ok(_) => {} Err(e) => { error!("sending data by channel error: {}", e); } } } } impl Handler for WsHandler { fn on_shutdown(&mut self) { info!("Handler received WebSocket shutdown request."); self.terminate_connection(); } fn on_open(&mut self, shake: Handshake) -> Result<()> { if let Ok(addr_opt) = shake.remote_addr() { let addr = match addr_opt { Some(a) => a, None => String::from("Unknown"), }; info!("Connection with {} now open", addr); self.addr = addr.clone(); let client = Client { sender: self.sender.clone(), addr, connection_id: self.id, room_name: String::from("Unassigned"), }; match self.client_tx.send(client) { Ok(_) => {} Err(e) => { error!("sending data by channel error: {}", e); } }; } Ok(()) } fn on_message(&mut self, msg: Message) -> Result<()> { debug!("Server got message '{}' from client {}. ", msg, self.addr); let ws_data_str = match msg.as_text() { Ok(str) => str, Err(e) => { error!("on_message error: {}", e); return Ok(()); } }; let ws_data: message::WsData = match serde_json::from_str(ws_data_str) { Ok(d) => d, Err(e) => { error!("on_message error: {}", e); return Ok(()); } }; let data: message::Data = match ws_data { message::WsData::Message(m) => message::Data::Message(message::Msg { msg: m.msg, connection_id: self.id, room_name: self.room_name.clone(), }), message::WsData::Login(l) => { self.room_name = l.room_name.clone(); message::Data::Login(message::Login { connection_id: self.id, room_name: l.room_name, token: l.token, name: l.name, }) } }; match self.data_tx.send(data) { Ok(_) => {} Err(e) => { error!("sending data by channel error: {}", e); } } Ok(()) } fn on_close(&mut self, code: ws::CloseCode, reason: &str) { info!("Connection closing due to ({:?}) {}", code, reason); self.terminate_connection(); } } pub struct Params { pub(crate) ws_address: String, } pub fn new(params: Params, repository: Arc<Mutex<Box<dyn Repository>>>) -> Chat { let s = Server::default(); let ws_server = Arc::new(Mutex::new(s)); Chat { ws_server, params, repository, } } impl Chat { pub fn start(&self) { let (client_tx, client_rx): (mpscSender<Client>, mpscReceiver<Client>) = mpsc::channel(); let (msg_tx, msg_rx): (mpscSender<message::Data>, mpscReceiver<message::Data>) = mpsc::channel(); self.listen_ws(client_tx.clone(), msg_tx.clone()); self.handle_ws_client(client_rx); self.handle_ws_data(msg_rx); } fn listen_ws(&self, client_tx: mpscSender<Client>, data_tx: mpscSender<message::Data>) { { let c_tx = client_tx; let d_tx = data_tx; let ws_addr = self.params.ws_address.clone(); thread::spawn(move || { let mut connection_id = 0; let res = Builder::new() .with_settings(Settings { max_connections: WS_MAX_CONNECTIONS, ..Settings::default() }) .build(|out: Sender| { connection_id += 1; WsHandler { room_name: String::from("not initiated"), sender: out, client_tx: c_tx.clone(), data_tx: d_tx.clone(), addr: String::new(), id: connection_id, } }) .unwrap() .listen(ws_addr); match res { Ok(_) => {} Err(e) => { error!("error starting websocket service: {}", e); } } }); } } fn handle_ws_client(&self, client_rx: mpscReceiver<Client>) { { let client_rx = client_rx; let ws_server = self.ws_server.clone(); thread::spawn(move || loop { let cl = client_rx.recv(); { match cl { Ok(client) => { let mut server = match ws_server.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on server: {}", e); continue; } }; info!("Client connected with addr:{}", client.addr); server.init_pool.insert(client.connection_id, client); let count = server.connections.keys().len(); debug!("hashmap size after adding client:{}", count); } Err(e) => { error!("receiving client error: {}", e); } }; } }); } } fn broadcast(server: &Server, room_name: String, user_name: String, message: &Msg) { debug!("getting connections of room: {}", room_name); let connections_res = server.connections.get(&room_name); match connections_res { Some(connections) => { let front_msg = message::WsFrontMsg { user_name, msg: message.msg.clone(), }; let ws_msg_res = serde_json::to_string(&front_msg); let ws_msg_opt = match ws_msg_res { Ok(msg) => Some(msg), Err(e) => { error!("error while inserting message to db: {}", e); None } }; if let Some(ws_msg) = ws_msg_opt { for (id, s) in connections.iter() { if *id != message.connection_id { let send_res = s.sender.send(ws_msg.clone().as_str()); match send_res { Ok(_) => debug!("sent msg to {}", s.addr), Err(e) => error!("error while inserting message to db: {}", e), } } } } } None => {} } } fn handle_message( msg: message::Msg, ws_server: &Arc<Mutex<Server>>, rep_mtx: &Arc<Mutex<Box<dyn Repository>>>, ) { debug!("Msg received"); let server = match ws_server.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on server: {}", e); return; } }; let count = server.connections.keys().len(); debug!("hashmap size:{}", count); if let Some(user_name) = server.user_names.get(&msg.connection_id).clone() { let rep = match rep_mtx.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on repository: {}", e); return; } }; let message_r = rep.message(); let m_msg = MessageData { message: msg.msg.clone(), user_name: user_name.clone(), room_name: msg.room_name.clone(), }; let insert_res = message_r.insert(m_msg); match insert_res { Ok(_) => {} Err(e) => error!("error while inserting message to db: {}", e), } Chat::broadcast(&server, msg.room_name.clone(), user_name.clone(), &msg); } else { error!("could not get name of user") } } fn handle_login( login: message::Login, ws_server: &Arc<Mutex<Server>>, rep_mtx: &Arc<Mutex<Box<dyn Repository>>>, ) { debug!("Login received"); let repo = match rep_mtx.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on repository: {}", e); return; } }; let token_r = repo.token(); let mut server = match ws_server.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on server: {}", e); return; } }; match token_r.get_valid(TokenData { token: login.token.as_str(), room_name: login.room_name.as_str(), }) { Ok(true) => { let client_res = server.init_pool.remove(&login.connection_id); if let Some(mut client) = client_res { client.room_name = login.room_name.clone(); server.user_names.insert(login.connection_id, login.name); let message_r = repo.message(); let params = repoMsgParams { page: DEFAULT_PAGE_INDEX, room_name: String::from(client.room_name.clone()), size: DEFAULT_PAGE_SIZE, }; let messages = message_r.get(params); match messages { Ok(messages) => { for m in messages { let front_msg = message::WsFrontMsg { user_name: m.user_name.clone(), msg: m.message.clone(), }; if let Ok(ws_msg) = serde_json::to_string(&front_msg) { debug!("sending: {}", ws_msg); match client.sender.send(ws_msg) { Ok(_) => {} Err(e) => error!("sending to web socket error: {}", e), } thread::sleep(Duration::from_millis(100)); // flutter ws can not handle messages without pause } } } Err(e) => error!("could not get messages from DB: {}", e), } let mut room_res = server.connections.get_mut(client.room_name.as_str()); let room_key = client.room_name.clone(); match room_res.as_mut() { Some(room) => { let count = room.len(); info!( "number of connections for room {} is: {}", client.room_name, count ); room.insert(client.connection_id, client); info!("adding to by room_key: {}", room_key); } None => { let mut room = HashMap::new(); let room_key = client.room_name.clone(); room.insert(client.connection_id, client); info!("inserting by room_key: {}", room_key); server.connections.insert(room_key, room); } } } else { error!("could not get client from map"); } } Ok(false) => { let client_res = server.init_pool.remove(&login.connection_id); match client_res { Some(client) => match client.sender.close(CloseCode::Status) { Ok(_) => {} Err(e) => error!("closing socket error: {}", e), }, None => error!("could not get client from map"), } } Err(e) => error!("login err: {}", e), }; let del_res = token_r.delete(TokenData { token: login.token.as_str(), room_name: login.room_name.as_str(), }); match del_res { Err(e) => { warn!("error while deleting token after login {}", e); } Ok(_) => {} } } fn handle_terminate(terminate: message::Terminate, ws_server: &Arc<Mutex<Server>>) { let mut server = match ws_server.lock() { Ok(r) => r, Err(e) => { error!("error while getting lock on server: {}", e); return; } }; match server.connections.get_mut(terminate.room_name.as_str()) { Some(room_connections) => match room_connections.remove(&terminate.connection_id) { Some(_) => debug!( "successfully removed connection: {} from room {}", terminate.connection_id, terminate.room_name.as_str() ), None => warn!( "could not get connections for room: {}", terminate.room_name.as_str() ), }, None => warn!( "could not get connections for room: {}", terminate.room_name.as_str() ), } } fn handle_ws_data(&self, msg_rx: mpscReceiver<message::Data>) { { let msg_rx = msg_rx; let ws_server = self.ws_server.clone(); let rep_mtx = self.repository.clone(); thread::spawn(move || loop { match msg_rx.recv() { Ok(data) => match data { message::Data::Message(msg) => { Chat::handle_message(msg, &ws_server, &rep_mtx); } message::Data::Login(login) => { Chat::handle_login(login, &ws_server, &rep_mtx) } message::Data::Terminate(terminate) => { Chat::handle_terminate(terminate, &ws_server) } }, Err(e) => { println!("receiving data: {}", e); break; } }; }); } } }
33.992032
130
0.43038
91d80ca4aa16886f6b1319a47ddb11f34380ab7c
618
use std::env::args; use pdf::error::PdfError; use pdf::file::File; /// extract and print a PDF's metadata fn main() -> Result<(), PdfError> { let path = args() .nth(1) .expect("Please provide a file path to the PDF you want to explore."); let file = File::<Vec<u8>>::open(&path).unwrap(); if let Some(ref info) = file.trailer.info_dict { info.iter() .filter(|(_, primitive)| primitive.to_string_lossy().is_ok()) .for_each(|(key, value)| { eprintln!("{:>15}: {}", key, value.to_string_lossy().unwrap()); }); } Ok(()) }
26.869565
79
0.545307
794a0c23b0feea01bbbfb4ab2ca74bc86e681685
344
fn main() { let path = "../../kernel/target/x86_64/release/deps"; println!("cargo:rustc-link-search=all={}", path); let path = "../../kernel/target/aarch64/release/deps"; println!("cargo:rustc-link-search=all={}", path); let path = "../../kernel/target/release/deps"; println!("cargo:rustc-link-search=all={}", path); }
38.222222
58
0.613372
48ecad1c7e7f0f0f8167cbad2a20f2777a085be3
932
use ignite_plugin_utils::{console::log, editor::ignite}; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; #[derive(Debug, Clone, Serialize, Deserialize)] struct Payload { pub message: String, } #[wasm_bindgen] pub fn query(query: &str, data: JsValue) -> Result<(), JsValue> { match query { "editor-loaded" => { log("EDITOR HAS LOADED"); let payload = Payload { message: "WEWOWEWOWEWO!!!".to_owned(), }; if let Ok(payload) = JsValue::from_serde(&payload) { ignite("~%IGNITE_ID%~", "alert", payload)?; } } "alert" => { if let Ok(payload) = data.into_serde::<Payload>() { log(&payload.message); } ignite("?", "ping", JsValue::UNDEFINED)?; } "ping" => { log("pong"); } _ => {} } Ok(()) }
26.628571
65
0.492489
3ae0749ac75480ebe634f686a2a2cafb277d0428
5,554
<?xml version="1.0" encoding="UTF-8"?> <MobileElementEntity> <description></description> <name>TextView - ADD TO CART</name> <tag></tag> <elementGuidId>00000000-0000-0000-0000-000000000000</elementGuidId> <selectorMethod>BASIC</selectorMethod> <useRalativeImagePath>false</useRalativeImagePath> <webElementProperties> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>class</name> <type>Main</type> <value>android.widget.TextView</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>index</name> <type>Main</type> <value>0</value> </webElementProperties> <webElementProperties> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>text</name> <type>Main</type> <value>ADD TO CART</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>package</name> <type>Main</type> <value>com.swaglabsmobileapp</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>checkable</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>checked</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>clickable</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>enabled</name> <type>Main</type> <value>true</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>focusable</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>focused</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>scrollable</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>long-clickable</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>password</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>selected</name> <type>Main</type> <value>false</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>x</name> <type>Main</type> <value>240</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>y</name> <type>Main</type> <value>1301</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>width</name> <type>Main</type> <value>240</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>height</name> <type>Main</type> <value>47</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>bounds</name> <type>Main</type> <value>[240,1301][480,1348]</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>displayed</name> <type>Main</type> <value>true</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath</name> <type>Main</type> <value>//hierarchy/android.widget.FrameLayout[1]/android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.widget.FrameLayout[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.widget.ScrollView[1]/android.view.ViewGroup[1]/android.view.ViewGroup[1]/android.view.ViewGroup[4]/android.widget.TextView[1]</value> </webElementProperties> <locator>//*[@class = 'android.widget.TextView' and (@text = 'ADD TO CART' or . = 'ADD TO CART')]</locator> <locatorStrategy>ATTRIBUTES</locatorStrategy> </MobileElementEntity>
34.930818
556
0.67951
db2961fcc6e88335ae320310a0c3b591dd114305
2,051
use crate::base_inference::resolve_to_base_inferred::ResolveToBaseInferred; use crate::base_inference::{BaseInference, BaseInferenceTables}; use crate::results::TypeCheckResults; use crate::TypeCheckDatabase; use crate::TypeChecker; use crate::UniverseBinder; use generational_arena::Arena; use lark_collections::{FxIndexMap, IndexVec}; use lark_entity::Entity; use lark_error::Diagnostic; use lark_error::WithError; use lark_ty::base_inferred::BaseInferred; use lark_ty::map_family::Map; use lark_unify::UnificationTable; use std::sync::Arc; crate fn base_type_check( db: &impl TypeCheckDatabase, fn_entity: Entity, ) -> WithError<Arc<TypeCheckResults<BaseInferred>>> { let fn_body = db.fn_body(fn_entity).into_value(); let interners = BaseInferenceTables::default(); let mut base_type_checker: TypeChecker<'_, BaseInference, _> = TypeChecker { db, fn_entity, f_tables: interners.clone(), hir: fn_body.clone(), ops_arena: Arena::new(), ops_blocked: FxIndexMap::default(), unify: UnificationTable::new(interners.clone()), storage: TypeCheckResults::default(), universe_binders: IndexVec::from(vec![UniverseBinder::Root]), errors: vec![], }; let mut unresolved_variables = base_type_checker.check_fn_body(); // Record the final results. If any unresolved type variables are // encountered, report an error. let inferred_results = base_type_checker .storage .map(&mut ResolveToBaseInferred::new( &mut base_type_checker.unify, db.as_ref(), &mut unresolved_variables, )); let mut errors = base_type_checker.errors; for _ in unresolved_variables { // FIXME: Decent diagnostics for unresolved inference // variables. errors.push(Diagnostic::new( "Unresolved variable".into(), fn_body.span(fn_body.root_expression), )); } WithError { value: Arc::new(inferred_results), errors, } }
32.555556
80
0.680644
61dd994e614224dd3a1267ec52664bdb65638075
67,279
//! The `pubsub` module implements a threaded subscription service on client RPC request use crate::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, rpc::{get_parsed_token_account, get_parsed_token_accounts}, }; use core::hash::Hash; use jsonrpc_core::futures::Future; use jsonrpc_pubsub::{ typed::{Sink, Subscriber}, SubscriptionId, }; use serde::Serialize; use solana_account_decoder::{parse_token::spl_token_id_v2_0, UiAccount, UiAccountEncoding}; use solana_client::{ rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig}, rpc_filter::RpcFilterType, rpc_response::{ ProcessedSignatureResult, ReceivedSignatureResult, Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult, SlotInfo, }, }; use solana_measure::measure::Measure; use solana_runtime::{ bank::Bank, bank_forks::BankForks, commitment::{BlockCommitmentCache, CommitmentSlots}, }; use solana_sdk::{ account::Account, clock::{Slot, UnixTimestamp}, commitment_config::{CommitmentConfig, CommitmentLevel}, pubkey::Pubkey, signature::Signature, transaction, }; use solana_vote_program::vote_state::Vote; use std::sync::{ atomic::{AtomicBool, Ordering}, mpsc::{Receiver, RecvTimeoutError, SendError, Sender}, }; use std::thread::{Builder, JoinHandle}; use std::time::Duration; use std::{ collections::{HashMap, HashSet}, iter, sync::{Arc, Mutex, RwLock}, }; // Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2 use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor}; const RECEIVE_DELAY_MILLIS: u64 = 100; // A more human-friendly version of Vote, with the bank state signature base58 encoded. #[derive(Serialize, Deserialize, Debug)] pub struct RpcVote { pub slots: Vec<Slot>, pub hash: String, pub timestamp: Option<UnixTimestamp>, } enum NotificationEntry { Slot(SlotInfo), Vote(Vote), Root(Slot), Bank(CommitmentSlots), Gossip(Slot), SignaturesReceived((Slot, Vec<Signature>)), } impl std::fmt::Debug for NotificationEntry { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { NotificationEntry::Root(root) => write!(f, "Root({})", root), NotificationEntry::Vote(vote) => write!(f, "Vote({:?})", vote), NotificationEntry::Slot(slot_info) => write!(f, "Slot({:?})", slot_info), NotificationEntry::Bank(commitment_slots) => { write!(f, "Bank({{slot: {:?}}})", commitment_slots.slot) } NotificationEntry::SignaturesReceived(slot_signatures) => { write!(f, "SignaturesReceived({:?})", slot_signatures) } NotificationEntry::Gossip(slot) => write!(f, "Gossip({:?})", slot), } } } struct SubscriptionData<S, T> { sink: Sink<S>, commitment: CommitmentConfig, last_notified_slot: RwLock<Slot>, config: Option<T>, } #[derive(Default, Clone)] struct ProgramConfig { filters: Vec<RpcFilterType>, encoding: Option<UiAccountEncoding>, } type RpcAccountSubscriptions = RwLock< HashMap< Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<UiAccount>, UiAccountEncoding>>, >, >; type RpcProgramSubscriptions = RwLock< HashMap< Pubkey, HashMap<SubscriptionId, SubscriptionData<Response<RpcKeyedAccount>, ProgramConfig>>, >, >; type RpcSignatureSubscriptions = RwLock< HashMap< Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, bool>>, >, >; type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>; type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>; type RpcRootSubscriptions = RwLock<HashMap<SubscriptionId, Sink<Slot>>>; fn add_subscription<K, S, T>( subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>, hashmap_key: K, commitment: Option<CommitmentConfig>, sub_id: SubscriptionId, subscriber: Subscriber<S>, last_notified_slot: Slot, config: Option<T>, ) where K: Eq + Hash, S: Clone, { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); let commitment = commitment.unwrap_or_else(CommitmentConfig::single); let subscription_data = SubscriptionData { sink, commitment, last_notified_slot: RwLock::new(last_notified_slot), config, }; subscriptions .entry(hashmap_key) .or_default() .insert(sub_id, subscription_data); } fn remove_subscription<K, S, T>( subscriptions: &mut HashMap<K, HashMap<SubscriptionId, SubscriptionData<S, T>>>, sub_id: &SubscriptionId, ) -> bool where K: Eq + Hash, S: Clone, { let mut found = false; subscriptions.retain(|_, v| { v.retain(|k, _| { let retain = k != sub_id; if !retain { found = true; } retain }); !v.is_empty() }); found } #[allow(clippy::type_complexity)] fn check_commitment_and_notify<K, S, B, F, X, T>( subscriptions: &HashMap<K, HashMap<SubscriptionId, SubscriptionData<Response<S>, T>>>, hashmap_key: &K, bank_forks: &Arc<RwLock<BankForks>>, commitment_slots: &CommitmentSlots, bank_method: B, filter_results: F, notifier: &RpcNotifier, ) -> HashSet<SubscriptionId> where K: Eq + Hash + Clone + Copy, S: Clone + Serialize, B: Fn(&Bank, &K) -> X, F: Fn(X, &K, Slot, Option<T>, Arc<Bank>) -> (Box<dyn Iterator<Item = S>>, Slot), X: Clone + Serialize + Default, T: Clone, { let mut notified_set: HashSet<SubscriptionId> = HashSet::new(); if let Some(hashmap) = subscriptions.get(hashmap_key) { for ( sub_id, SubscriptionData { sink, commitment, last_notified_slot, config, }, ) in hashmap.iter() { let slot = match commitment.commitment { CommitmentLevel::Max => commitment_slots.highest_confirmed_root, CommitmentLevel::Recent => commitment_slots.slot, CommitmentLevel::Root => commitment_slots.root, CommitmentLevel::Single | CommitmentLevel::SingleGossip => { commitment_slots.highest_confirmed_slot } }; if let Some(bank) = bank_forks.read().unwrap().get(slot).cloned() { let results = bank_method(&bank, hashmap_key); let mut w_last_notified_slot = last_notified_slot.write().unwrap(); let (filter_results, result_slot) = filter_results( results, hashmap_key, *w_last_notified_slot, config.as_ref().cloned(), bank, ); for result in filter_results { notifier.notify( Response { context: RpcResponseContext { slot }, value: result, }, sink, ); notified_set.insert(sub_id.clone()); *w_last_notified_slot = result_slot; } } } } notified_set } struct RpcNotifier(TaskExecutor); impl RpcNotifier { fn notify<T>(&self, value: T, sink: &Sink<T>) where T: serde::Serialize, { self.0 .spawn(sink.notify(Ok(value)).map(|_| ()).map_err(|_| ())); } } fn filter_account_result( result: Option<(Account, Slot)>, pubkey: &Pubkey, last_notified_slot: Slot, encoding: Option<UiAccountEncoding>, bank: Arc<Bank>, ) -> (Box<dyn Iterator<Item = UiAccount>>, Slot) { // If the account is not found, `last_modified_slot` will default to zero and // we will notify clients that the account no longer exists if we haven't already let (account, last_modified_slot) = result.unwrap_or_default(); // If last_modified_slot < last_notified_slot this means that we last notified for a fork // and should notify that the account state has been reverted. let results: Box<dyn Iterator<Item = UiAccount>> = if last_modified_slot != last_notified_slot { let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); if account.owner == spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed { Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))) } else { Box::new(iter::once(UiAccount::encode( pubkey, account, encoding, None, None, ))) } } else { Box::new(iter::empty()) }; (results, last_modified_slot) } fn filter_signature_result( result: Option<transaction::Result<()>>, _signature: &Signature, last_notified_slot: Slot, _config: Option<bool>, _bank: Arc<Bank>, ) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) { ( Box::new(result.into_iter().map(|result| { RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: result.err() }) })), last_notified_slot, ) } fn filter_program_results( accounts: Vec<(Pubkey, Account)>, program_id: &Pubkey, last_notified_slot: Slot, config: Option<ProgramConfig>, bank: Arc<Bank>, ) -> (Box<dyn Iterator<Item = RpcKeyedAccount>>, Slot) { let config = config.unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let filters = config.filters; let accounts_is_empty = accounts.is_empty(); let keyed_accounts = accounts.into_iter().filter(move |(_, account)| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data.len() as u64 == *size, RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data), }) }); let accounts: Box<dyn Iterator<Item = RpcKeyedAccount>> = if program_id == &spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed && !accounts_is_empty { Box::new(get_parsed_token_accounts(bank, keyed_accounts)) } else { Box::new( keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode(&pubkey, account, encoding.clone(), None, None), }), ) }; (accounts, last_notified_slot) } #[derive(Clone)] struct Subscriptions { account_subscriptions: Arc<RpcAccountSubscriptions>, program_subscriptions: Arc<RpcProgramSubscriptions>, signature_subscriptions: Arc<RpcSignatureSubscriptions>, gossip_account_subscriptions: Arc<RpcAccountSubscriptions>, gossip_program_subscriptions: Arc<RpcProgramSubscriptions>, gossip_signature_subscriptions: Arc<RpcSignatureSubscriptions>, slot_subscriptions: Arc<RpcSlotSubscriptions>, vote_subscriptions: Arc<RpcVoteSubscriptions>, root_subscriptions: Arc<RpcRootSubscriptions>, } pub struct RpcSubscriptions { subscriptions: Subscriptions, notification_sender: Arc<Mutex<Sender<NotificationEntry>>>, t_cleanup: Option<JoinHandle<()>>, notifier_runtime: Option<Runtime>, bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, exit: Arc<AtomicBool>, enable_vote_subscription: bool, } impl Drop for RpcSubscriptions { fn drop(&mut self) { self.shutdown().unwrap_or_else(|err| { warn!("RPC Notification - shutdown error: {:?}", err); }); } } impl RpcSubscriptions { pub fn new( exit: &Arc<AtomicBool>, bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, ) -> Self { Self::new_with_vote_subscription( exit, bank_forks, block_commitment_cache, optimistically_confirmed_bank, false, ) } pub fn new_with_vote_subscription( exit: &Arc<AtomicBool>, bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, enable_vote_subscription: bool, ) -> Self { let (notification_sender, notification_receiver): ( Sender<NotificationEntry>, Receiver<NotificationEntry>, ) = std::sync::mpsc::channel(); let account_subscriptions = Arc::new(RpcAccountSubscriptions::default()); let program_subscriptions = Arc::new(RpcProgramSubscriptions::default()); let signature_subscriptions = Arc::new(RpcSignatureSubscriptions::default()); let gossip_account_subscriptions = Arc::new(RpcAccountSubscriptions::default()); let gossip_program_subscriptions = Arc::new(RpcProgramSubscriptions::default()); let gossip_signature_subscriptions = Arc::new(RpcSignatureSubscriptions::default()); let slot_subscriptions = Arc::new(RpcSlotSubscriptions::default()); let vote_subscriptions = Arc::new(RpcVoteSubscriptions::default()); let root_subscriptions = Arc::new(RpcRootSubscriptions::default()); let notification_sender = Arc::new(Mutex::new(notification_sender)); let _bank_forks = bank_forks.clone(); let _block_commitment_cache = block_commitment_cache.clone(); let exit_clone = exit.clone(); let subscriptions = Subscriptions { account_subscriptions, program_subscriptions, signature_subscriptions, gossip_account_subscriptions, gossip_program_subscriptions, gossip_signature_subscriptions, slot_subscriptions, vote_subscriptions, root_subscriptions, }; let _subscriptions = subscriptions.clone(); let notifier_runtime = RuntimeBuilder::new() .core_threads(1) .name_prefix("solana-rpc-notifier-") .build() .unwrap(); let notifier = RpcNotifier(notifier_runtime.executor()); let t_cleanup = Builder::new() .name("solana-rpc-notifications".to_string()) .spawn(move || { Self::process_notifications( exit_clone, notifier, notification_receiver, _subscriptions, _bank_forks, ); }) .unwrap(); Self { subscriptions, notification_sender, notifier_runtime: Some(notifier_runtime), t_cleanup: Some(t_cleanup), bank_forks, block_commitment_cache, optimistically_confirmed_bank, exit: exit.clone(), enable_vote_subscription, } } // For tests only... pub fn default_with_bank_forks(bank_forks: Arc<RwLock<BankForks>>) -> Self { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); Self::new_with_vote_subscription( &Arc::new(AtomicBool::new(false)), bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, true, ) } fn check_account( pubkey: &Pubkey, bank_forks: &Arc<RwLock<BankForks>>, account_subscriptions: Arc<RpcAccountSubscriptions>, notifier: &RpcNotifier, commitment_slots: &CommitmentSlots, ) -> HashSet<SubscriptionId> { let subscriptions = account_subscriptions.read().unwrap(); check_commitment_and_notify( &subscriptions, pubkey, bank_forks, commitment_slots, Bank::get_account_modified_slot, filter_account_result, notifier, ) } fn check_program( program_id: &Pubkey, bank_forks: &Arc<RwLock<BankForks>>, program_subscriptions: Arc<RpcProgramSubscriptions>, notifier: &RpcNotifier, commitment_slots: &CommitmentSlots, ) -> HashSet<SubscriptionId> { let subscriptions = program_subscriptions.read().unwrap(); check_commitment_and_notify( &subscriptions, program_id, bank_forks, commitment_slots, Bank::get_program_accounts_modified_since_parent, filter_program_results, notifier, ) } fn check_signature( signature: &Signature, bank_forks: &Arc<RwLock<BankForks>>, signature_subscriptions: Arc<RpcSignatureSubscriptions>, notifier: &RpcNotifier, commitment_slots: &CommitmentSlots, ) -> HashSet<SubscriptionId> { let mut subscriptions = signature_subscriptions.write().unwrap(); let notified_ids = check_commitment_and_notify( &subscriptions, signature, bank_forks, commitment_slots, Bank::get_signature_status_processed_since_parent, filter_signature_result, notifier, ); if let Some(subscription_ids) = subscriptions.get_mut(signature) { subscription_ids.retain(|k, _| !notified_ids.contains(k)); if subscription_ids.is_empty() { subscriptions.remove(&signature); } } notified_ids } pub fn add_account_subscription( &self, pubkey: Pubkey, config: Option<RpcAccountInfoConfig>, sub_id: SubscriptionId, subscriber: Subscriber<Response<UiAccount>>, ) { let config = config.unwrap_or_default(); let commitment_level = config .commitment .unwrap_or_else(CommitmentConfig::single) .commitment; let slot = match commitment_level { CommitmentLevel::Max => self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(), CommitmentLevel::Recent => self.block_commitment_cache.read().unwrap().slot(), CommitmentLevel::Root => self.block_commitment_cache.read().unwrap().root(), CommitmentLevel::Single => self .block_commitment_cache .read() .unwrap() .highest_confirmed_slot(), CommitmentLevel::SingleGossip => self .optimistically_confirmed_bank .read() .unwrap() .bank .slot(), }; let last_notified_slot = if let Some((_account, slot)) = self .bank_forks .read() .unwrap() .get(slot) .and_then(|bank| bank.get_account_modified_slot(&pubkey)) { slot } else { 0 }; let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip { self.subscriptions .gossip_account_subscriptions .write() .unwrap() } else { self.subscriptions.account_subscriptions.write().unwrap() }; add_subscription( &mut subscriptions, pubkey, config.commitment, sub_id, subscriber, last_notified_slot, config.encoding, ); } pub fn remove_account_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.account_subscriptions.write().unwrap(); if remove_subscription(&mut subscriptions, id) { true } else { let mut subscriptions = self .subscriptions .gossip_account_subscriptions .write() .unwrap(); remove_subscription(&mut subscriptions, id) } } pub fn add_program_subscription( &self, program_id: Pubkey, config: Option<RpcProgramAccountsConfig>, sub_id: SubscriptionId, subscriber: Subscriber<Response<RpcKeyedAccount>>, ) { let config = config.unwrap_or_default(); let commitment_level = config .account_config .commitment .unwrap_or_else(CommitmentConfig::recent) .commitment; let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip { self.subscriptions .gossip_program_subscriptions .write() .unwrap() } else { self.subscriptions.program_subscriptions.write().unwrap() }; add_subscription( &mut subscriptions, program_id, config.account_config.commitment, sub_id, subscriber, 0, // last_notified_slot is not utilized for program subscriptions Some(ProgramConfig { filters: config.filters.unwrap_or_default(), encoding: config.account_config.encoding, }), ); } pub fn remove_program_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.program_subscriptions.write().unwrap(); if remove_subscription(&mut subscriptions, id) { true } else { let mut subscriptions = self .subscriptions .gossip_program_subscriptions .write() .unwrap(); remove_subscription(&mut subscriptions, id) } } pub fn add_signature_subscription( &self, signature: Signature, signature_subscribe_config: Option<RpcSignatureSubscribeConfig>, sub_id: SubscriptionId, subscriber: Subscriber<Response<RpcSignatureResult>>, ) { let (commitment, enable_received_notification) = signature_subscribe_config .map(|config| (config.commitment, config.enable_received_notification)) .unwrap_or_default(); let commitment_level = commitment .unwrap_or_else(CommitmentConfig::recent) .commitment; let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip { self.subscriptions .gossip_signature_subscriptions .write() .unwrap() } else { self.subscriptions.signature_subscriptions.write().unwrap() }; add_subscription( &mut subscriptions, signature, commitment, sub_id, subscriber, 0, // last_notified_slot is not utilized for signature subscriptions enable_received_notification, ); } pub fn remove_signature_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.signature_subscriptions.write().unwrap(); if remove_subscription(&mut subscriptions, id) { true } else { let mut subscriptions = self .subscriptions .gossip_signature_subscriptions .write() .unwrap(); remove_subscription(&mut subscriptions, id) } } /// Notify subscribers of changes to any accounts or new signatures since /// the bank's last checkpoint. pub fn notify_subscribers(&self, commitment_slots: CommitmentSlots) { self.enqueue_notification(NotificationEntry::Bank(commitment_slots)); } /// Notify SingleGossip commitment-level subscribers of changes to any accounts or new /// signatures. pub fn notify_gossip_subscribers(&self, slot: Slot) { self.enqueue_notification(NotificationEntry::Gossip(slot)); } pub fn add_slot_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<SlotInfo>) { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); let mut subscriptions = self.subscriptions.slot_subscriptions.write().unwrap(); subscriptions.insert(sub_id, sink); } pub fn remove_slot_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.slot_subscriptions.write().unwrap(); subscriptions.remove(id).is_some() } pub fn notify_slot(&self, slot: Slot, parent: Slot, root: Slot) { self.enqueue_notification(NotificationEntry::Slot(SlotInfo { slot, parent, root })); } pub fn notify_signatures_received(&self, slot_signatures: (Slot, Vec<Signature>)) { self.enqueue_notification(NotificationEntry::SignaturesReceived(slot_signatures)); } pub fn add_vote_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<RpcVote>) { if self.enable_vote_subscription { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap(); subscriptions.insert(sub_id, sink); } else { let _ = subscriber.reject(jsonrpc_core::Error::new( jsonrpc_core::ErrorCode::MethodNotFound, )); } } pub fn remove_vote_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap(); subscriptions.remove(id).is_some() } pub fn notify_vote(&self, vote: &Vote) { self.enqueue_notification(NotificationEntry::Vote(vote.clone())); } pub fn add_root_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<Slot>) { let sink = subscriber.assign_id(sub_id.clone()).unwrap(); let mut subscriptions = self.subscriptions.root_subscriptions.write().unwrap(); subscriptions.insert(sub_id, sink); } pub fn remove_root_subscription(&self, id: &SubscriptionId) -> bool { let mut subscriptions = self.subscriptions.root_subscriptions.write().unwrap(); subscriptions.remove(id).is_some() } pub fn notify_roots(&self, mut rooted_slots: Vec<Slot>) { rooted_slots.sort(); rooted_slots.into_iter().for_each(|root| { self.enqueue_notification(NotificationEntry::Root(root)); }); } fn enqueue_notification(&self, notification_entry: NotificationEntry) { match self .notification_sender .lock() .unwrap() .send(notification_entry) { Ok(()) => (), Err(SendError(notification)) => { warn!( "Dropped RPC Notification - receiver disconnected : {:?}", notification ); } } } fn process_notifications( exit: Arc<AtomicBool>, notifier: RpcNotifier, notification_receiver: Receiver<NotificationEntry>, subscriptions: Subscriptions, bank_forks: Arc<RwLock<BankForks>>, ) { loop { if exit.load(Ordering::Relaxed) { break; } match notification_receiver.recv_timeout(Duration::from_millis(RECEIVE_DELAY_MILLIS)) { Ok(notification_entry) => match notification_entry { NotificationEntry::Slot(slot_info) => { debug!("slot notify: {:?}", slot_info); let subscriptions = subscriptions.slot_subscriptions.read().unwrap(); for (_, sink) in subscriptions.iter() { inc_new_counter_info!("rpc-subscription-notify-slot", 1); notifier.notify(slot_info, sink); } } // These notifications are only triggered by votes observed on gossip, // unlike `NotificationEntry::Gossip`, which also accounts for slots seen // in VoteState's from bank states built in ReplayStage. NotificationEntry::Vote(ref vote_info) => { debug!("vote notify: {:?}", vote_info); let subscriptions = subscriptions.vote_subscriptions.read().unwrap(); for (_, sink) in subscriptions.iter() { inc_new_counter_info!("rpc-subscription-notify-vote", 1); notifier.notify( RpcVote { slots: vote_info.slots.clone(), hash: bs58::encode(vote_info.hash).into_string(), timestamp: vote_info.timestamp, }, sink, ); } } NotificationEntry::Root(root) => { debug!("root notify: {:?}", root); let subscriptions = subscriptions.root_subscriptions.read().unwrap(); for (_, sink) in subscriptions.iter() { inc_new_counter_info!("rpc-subscription-notify-root", 1); notifier.notify(root, sink); } } NotificationEntry::Bank(commitment_slots) => { RpcSubscriptions::notify_accounts_programs_signatures( &subscriptions.account_subscriptions, &subscriptions.program_subscriptions, &subscriptions.signature_subscriptions, &bank_forks, &commitment_slots, &notifier, "bank", ) } NotificationEntry::Gossip(slot) => { Self::process_gossip_notification( slot, &notifier, &subscriptions, &bank_forks, ); } NotificationEntry::SignaturesReceived(slot_signatures) => { RpcSubscriptions::process_signatures_received( &slot_signatures, &subscriptions.signature_subscriptions, &notifier, ) } }, Err(RecvTimeoutError::Timeout) => { // not a problem - try reading again } Err(RecvTimeoutError::Disconnected) => { warn!("RPC Notification thread - sender disconnected"); break; } } } } fn process_gossip_notification( slot: Slot, notifier: &RpcNotifier, subscriptions: &Subscriptions, bank_forks: &Arc<RwLock<BankForks>>, ) { let commitment_slots = CommitmentSlots { highest_confirmed_slot: slot, ..CommitmentSlots::default() }; RpcSubscriptions::notify_accounts_programs_signatures( &subscriptions.gossip_account_subscriptions, &subscriptions.gossip_program_subscriptions, &subscriptions.gossip_signature_subscriptions, bank_forks, &commitment_slots, &notifier, "gossip", ); } fn notify_accounts_programs_signatures( account_subscriptions: &Arc<RpcAccountSubscriptions>, program_subscriptions: &Arc<RpcProgramSubscriptions>, signature_subscriptions: &Arc<RpcSignatureSubscriptions>, bank_forks: &Arc<RwLock<BankForks>>, commitment_slots: &CommitmentSlots, notifier: &RpcNotifier, source: &'static str, ) { let mut accounts_time = Measure::start("accounts"); let pubkeys: Vec<_> = { let subs = account_subscriptions.read().unwrap(); subs.keys().cloned().collect() }; let mut num_pubkeys_notified = 0; for pubkey in &pubkeys { num_pubkeys_notified += Self::check_account( pubkey, bank_forks, account_subscriptions.clone(), &notifier, &commitment_slots, ) .len(); } accounts_time.stop(); let mut programs_time = Measure::start("programs"); let programs: Vec<_> = { let subs = program_subscriptions.read().unwrap(); subs.keys().cloned().collect() }; let mut num_programs_notified = 0; for program_id in &programs { num_programs_notified += Self::check_program( program_id, bank_forks, program_subscriptions.clone(), &notifier, &commitment_slots, ) .len(); } programs_time.stop(); let mut signatures_time = Measure::start("signatures"); let signatures: Vec<_> = { let subs = signature_subscriptions.read().unwrap(); subs.keys().cloned().collect() }; let mut num_signatures_notified = 0; for signature in &signatures { num_signatures_notified += Self::check_signature( signature, bank_forks, signature_subscriptions.clone(), &notifier, &commitment_slots, ) .len(); } signatures_time.stop(); let total_notified = num_pubkeys_notified + num_programs_notified + num_signatures_notified; let total_ms = accounts_time.as_ms() + programs_time.as_ms() + signatures_time.as_ms(); if total_notified > 0 || total_ms > 10 { debug!( "notified({}): accounts: {} / {} ({}) programs: {} / {} ({}) signatures: {} / {} ({})", source, pubkeys.len(), num_pubkeys_notified, accounts_time, programs.len(), num_programs_notified, programs_time, signatures.len(), num_signatures_notified, signatures_time, ); inc_new_counter_info!("rpc-subscription-notify-bank-or-gossip", total_notified); datapoint_info!( "rpc_subscriptions", ("source", source.to_string(), String), ("num_account_subscriptions", pubkeys.len(), i64), ("num_account_pubkeys_notified", num_pubkeys_notified, i64), ("accounts_time", accounts_time.as_us() as i64, i64), ("num_program_subscriptions", programs.len(), i64), ("num_programs_notified", num_programs_notified, i64), ("programs_time", programs_time.as_us() as i64, i64), ("num_signature_subscriptions", signatures.len(), i64), ("num_signatures_notified", num_signatures_notified, i64), ("signatures_time", signatures_time.as_us() as i64, i64) ); } } fn process_signatures_received( (received_slot, signatures): &(Slot, Vec<Signature>), signature_subscriptions: &Arc<RpcSignatureSubscriptions>, notifier: &RpcNotifier, ) { for signature in signatures { if let Some(hashmap) = signature_subscriptions.read().unwrap().get(signature) { for ( _, SubscriptionData { sink, config: is_received_notification_enabled, .. }, ) in hashmap.iter() { if is_received_notification_enabled.unwrap_or_default() { notifier.notify( Response { context: RpcResponseContext { slot: *received_slot, }, value: RpcSignatureResult::ReceivedSignature( ReceivedSignatureResult::ReceivedSignature, ), }, &sink, ); } } } } } fn shutdown(&mut self) -> std::thread::Result<()> { if let Some(runtime) = self.notifier_runtime.take() { info!("RPC Notifier runtime - shutting down"); let _ = runtime.shutdown_now().wait(); info!("RPC Notifier runtime - shut down"); } if self.t_cleanup.is_some() { info!("RPC Notification thread - shutting down"); self.exit.store(true, Ordering::Relaxed); let x = self.t_cleanup.take().unwrap().join(); info!("RPC Notification thread - shut down."); x } else { warn!("RPC Notification thread - already shut down."); Ok(()) } } } #[cfg(test)] pub(crate) mod tests { use super::*; use crate::optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker, }; use jsonrpc_core::futures::{self, stream::Stream}; use jsonrpc_pubsub::typed::Subscriber; use serial_test_derive::serial; use solana_runtime::{ commitment::BlockCommitment, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }; use solana_sdk::{ message::Message, signature::{Keypair, Signer}, system_instruction, system_program, system_transaction, transaction::Transaction, }; use std::{fmt::Debug, sync::mpsc::channel, time::Instant}; use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay}; pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>( receiver: futures::sync::mpsc::Receiver<T>, ) -> (T, futures::sync::mpsc::Receiver<T>) { let (inner_sender, inner_receiver) = channel(); let mut rt = Runtime::new().unwrap(); rt.spawn(futures::lazy(|| { let recv_timeout = receiver .into_future() .timeout(Duration::from_millis(RECEIVE_DELAY_MILLIS)) .map(move |result| match result { (Some(value), receiver) => { inner_sender.send((value, receiver)).expect("send error") } (None, _) => panic!("unexpected end of stream"), }) .map_err(|err| panic!("stream error {:?}", err)); const INITIAL_DELAY_MS: u64 = RECEIVE_DELAY_MILLIS * 2; Delay::new(Instant::now() + Duration::from_millis(INITIAL_DELAY_MS)) .and_then(|_| recv_timeout) .map_err(|err| panic!("timer error {:?}", err)) })); inner_receiver.recv().expect("recv error") } #[test] #[serial] fn test_check_account_subscribe() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank = Bank::new(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let alice = Keypair::new(); let (create_sub, _id_receiver, create_recv) = Subscriber::new_test("accountNotification"); let (close_sub, _id_receiver, close_recv) = Subscriber::new_test("accountNotification"); let create_sub_id = SubscriptionId::Number(0 as u64); let close_sub_id = SubscriptionId::Number(1 as u64); let exit = Arc::new(AtomicBool::new(false)); let subscriptions = RpcSubscriptions::new( &exit, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, ))), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), ); subscriptions.add_account_subscription( alice.pubkey(), Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::recent()), encoding: None, data_slice: None, }), create_sub_id.clone(), create_sub, ); assert!(subscriptions .subscriptions .account_subscriptions .read() .unwrap() .contains_key(&alice.pubkey())); let tx = system_transaction::create_account( &mint_keypair, &alice, blockhash, 1, 0, &system_program::id(), ); bank_forks .write() .unwrap() .get(1) .unwrap() .process_transaction(&tx) .unwrap(); let mut commitment_slots = CommitmentSlots::default(); commitment_slots.slot = 1; subscriptions.notify_subscribers(commitment_slots); let (response, _) = robust_poll_or_panic(create_recv); let expected = json!({ "jsonrpc": "2.0", "method": "accountNotification", "params": { "result": { "context": { "slot": 1 }, "value": { "data": "", "executable": false, "lamports": 1, "owner": "11111111111111111111111111111111", "rentEpoch": 0, }, }, "subscription": 0, } }); assert_eq!(serde_json::to_string(&expected).unwrap(), response); subscriptions.remove_account_subscription(&create_sub_id); subscriptions.add_account_subscription( alice.pubkey(), Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::recent()), encoding: None, data_slice: None, }), close_sub_id.clone(), close_sub, ); let tx = { let instruction = system_instruction::transfer(&alice.pubkey(), &mint_keypair.pubkey(), 1); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); Transaction::new(&[&alice, &mint_keypair], message, blockhash) }; bank_forks .write() .unwrap() .get(1) .unwrap() .process_transaction(&tx) .unwrap(); subscriptions.notify_subscribers(commitment_slots); let (response, _) = robust_poll_or_panic(close_recv); let expected = json!({ "jsonrpc": "2.0", "method": "accountNotification", "params": { "result": { "context": { "slot": 1 }, "value": { "data": "", "executable": false, "lamports": 0, "owner": "11111111111111111111111111111111", "rentEpoch": 0, }, }, "subscription": 1, } }); assert_eq!(serde_json::to_string(&expected).unwrap(), response); subscriptions.remove_account_subscription(&close_sub_id); assert!(!subscriptions .subscriptions .account_subscriptions .read() .unwrap() .contains_key(&alice.pubkey())); } #[test] #[serial] fn test_check_program_subscribe() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank = Bank::new(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let alice = Keypair::new(); let tx = system_transaction::create_account( &mint_keypair, &alice, blockhash, 1, 16, &solana_stake_program::id(), ); bank_forks .write() .unwrap() .get(0) .unwrap() .process_transaction(&tx) .unwrap(); let (subscriber, _id_receiver, transport_receiver) = Subscriber::new_test("programNotification"); let sub_id = SubscriptionId::Number(0 as u64); let exit = Arc::new(AtomicBool::new(false)); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = RpcSubscriptions::new( &exit, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, ); subscriptions.add_program_subscription( solana_stake_program::id(), None, sub_id.clone(), subscriber, ); assert!(subscriptions .subscriptions .program_subscriptions .read() .unwrap() .contains_key(&solana_stake_program::id())); subscriptions.notify_subscribers(CommitmentSlots::default()); let (response, _) = robust_poll_or_panic(transport_receiver); let expected = json!({ "jsonrpc": "2.0", "method": "programNotification", "params": { "result": { "context": { "slot": 0 }, "value": { "account": { "data": "1111111111111111", "executable": false, "lamports": 1, "owner": "Stake11111111111111111111111111111111111111", "rentEpoch": 0, }, "pubkey": alice.pubkey().to_string(), }, }, "subscription": 0, } }); assert_eq!(serde_json::to_string(&expected).unwrap(), response); subscriptions.remove_program_subscription(&sub_id); assert!(!subscriptions .subscriptions .program_subscriptions .read() .unwrap() .contains_key(&solana_stake_program::id())); } #[test] #[serial] fn test_check_signature_subscribe() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank = Bank::new(&genesis_config); let blockhash = bank.last_blockhash(); let mut bank_forks = BankForks::new(bank); let alice = Keypair::new(); let past_bank_tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, blockhash); let unprocessed_tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, blockhash); let processed_tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 3, blockhash); bank_forks .get(0) .unwrap() .process_transaction(&past_bank_tx) .unwrap(); let next_bank = Bank::new_from_parent( &bank_forks.banks[&0].clone(), &solana_sdk::pubkey::new_rand(), 1, ); bank_forks.insert(next_bank); bank_forks .get(1) .unwrap() .process_transaction(&processed_tx) .unwrap(); let bank1 = bank_forks[1].clone(); let bank_forks = Arc::new(RwLock::new(bank_forks)); let mut cache0 = BlockCommitment::default(); cache0.increase_confirmation_stake(1, 10); let cache1 = BlockCommitment::default(); let mut block_commitment = HashMap::new(); block_commitment.entry(0).or_insert(cache0); block_commitment.entry(1).or_insert(cache1); let block_commitment_cache = BlockCommitmentCache::new( block_commitment, 10, CommitmentSlots { slot: bank1.slot(), ..CommitmentSlots::default() }, ); let exit = Arc::new(AtomicBool::new(false)); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = RpcSubscriptions::new( &exit, bank_forks, Arc::new(RwLock::new(block_commitment_cache)), optimistically_confirmed_bank, ); let (past_bank_sub1, _id_receiver, past_bank_recv1) = Subscriber::new_test("signatureNotification"); let (past_bank_sub2, _id_receiver, past_bank_recv2) = Subscriber::new_test("signatureNotification"); let (processed_sub, _id_receiver, processed_recv) = Subscriber::new_test("signatureNotification"); let (processed_sub3, _id_receiver, processed_recv3) = Subscriber::new_test("signatureNotification"); subscriptions.add_signature_subscription( past_bank_tx.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::recent()), enable_received_notification: Some(false), }), SubscriptionId::Number(1 as u64), past_bank_sub1, ); subscriptions.add_signature_subscription( past_bank_tx.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::root()), enable_received_notification: Some(false), }), SubscriptionId::Number(2 as u64), past_bank_sub2, ); subscriptions.add_signature_subscription( processed_tx.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::recent()), enable_received_notification: Some(false), }), SubscriptionId::Number(3 as u64), processed_sub, ); subscriptions.add_signature_subscription( unprocessed_tx.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::recent()), enable_received_notification: Some(false), }), SubscriptionId::Number(4 as u64), Subscriber::new_test("signatureNotification").0, ); // Add a subscription that gets `received` notifications subscriptions.add_signature_subscription( unprocessed_tx.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::recent()), enable_received_notification: Some(true), }), SubscriptionId::Number(5 as u64), processed_sub3, ); { let sig_subs = subscriptions .subscriptions .signature_subscriptions .read() .unwrap(); assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 2); assert!(sig_subs.contains_key(&unprocessed_tx.signatures[0])); assert!(sig_subs.contains_key(&processed_tx.signatures[0])); } let mut commitment_slots = CommitmentSlots::default(); let received_slot = 1; commitment_slots.slot = received_slot; subscriptions .notify_signatures_received((received_slot, vec![unprocessed_tx.signatures[0]])); subscriptions.notify_subscribers(commitment_slots); let expected_res = RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: None }); let received_expected_res = RpcSignatureResult::ReceivedSignature(ReceivedSignatureResult::ReceivedSignature); struct Notification { slot: Slot, id: u64, } let expected_notification = |exp: Notification, expected_res: &RpcSignatureResult| -> String { let json = json!({ "jsonrpc": "2.0", "method": "signatureNotification", "params": { "result": { "context": { "slot": exp.slot }, "value": expected_res, }, "subscription": exp.id, } }); serde_json::to_string(&json).unwrap() }; // Expect to receive a notification from bank 1 because this subscription is // looking for 0 confirmations and so checks the current bank let expected = expected_notification(Notification { slot: 1, id: 1 }, &expected_res); let (response, _) = robust_poll_or_panic(past_bank_recv1); assert_eq!(expected, response); // Expect to receive a notification from bank 0 because this subscription is // looking for 1 confirmation and so checks the past bank let expected = expected_notification(Notification { slot: 0, id: 2 }, &expected_res); let (response, _) = robust_poll_or_panic(past_bank_recv2); assert_eq!(expected, response); let expected = expected_notification(Notification { slot: 1, id: 3 }, &expected_res); let (response, _) = robust_poll_or_panic(processed_recv); assert_eq!(expected, response); // Expect a "received" notification let expected = expected_notification( Notification { slot: received_slot, id: 5, }, &received_expected_res, ); let (response, _) = robust_poll_or_panic(processed_recv3); assert_eq!(expected, response); // Subscription should be automatically removed after notification let sig_subs = subscriptions .subscriptions .signature_subscriptions .read() .unwrap(); assert!(!sig_subs.contains_key(&processed_tx.signatures[0])); assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0])); // Unprocessed signature subscription should not be removed assert_eq!( sig_subs.get(&unprocessed_tx.signatures[0]).unwrap().len(), 2 ); } #[test] #[serial] fn test_check_slot_subscribe() { let (subscriber, _id_receiver, transport_receiver) = Subscriber::new_test("slotNotification"); let sub_id = SubscriptionId::Number(0 as u64); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = RpcSubscriptions::new( &exit, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, ); subscriptions.add_slot_subscription(sub_id.clone(), subscriber); assert!(subscriptions .subscriptions .slot_subscriptions .read() .unwrap() .contains_key(&sub_id)); subscriptions.notify_slot(0, 0, 0); let (response, _) = robust_poll_or_panic(transport_receiver); let expected_res = SlotInfo { parent: 0, slot: 0, root: 0, }; let expected_res_str = serde_json::to_string(&serde_json::to_value(expected_res).unwrap()).unwrap(); let expected = format!( r#"{{"jsonrpc":"2.0","method":"slotNotification","params":{{"result":{},"subscription":0}}}}"#, expected_res_str ); assert_eq!(expected, response); subscriptions.remove_slot_subscription(&sub_id); assert!(!subscriptions .subscriptions .slot_subscriptions .read() .unwrap() .contains_key(&sub_id)); } #[test] #[serial] fn test_check_root_subscribe() { let (subscriber, _id_receiver, mut transport_receiver) = Subscriber::new_test("rootNotification"); let sub_id = SubscriptionId::Number(0 as u64); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = RpcSubscriptions::new( &exit, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, ); subscriptions.add_root_subscription(sub_id.clone(), subscriber); assert!(subscriptions .subscriptions .root_subscriptions .read() .unwrap() .contains_key(&sub_id)); subscriptions.notify_roots(vec![2, 1, 3]); for expected_root in 1..=3 { let (response, receiver) = robust_poll_or_panic(transport_receiver); transport_receiver = receiver; let expected_res_str = serde_json::to_string(&serde_json::to_value(expected_root).unwrap()).unwrap(); let expected = format!( r#"{{"jsonrpc":"2.0","method":"rootNotification","params":{{"result":{},"subscription":0}}}}"#, expected_res_str ); assert_eq!(expected, response); } subscriptions.remove_root_subscription(&sub_id); assert!(!subscriptions .subscriptions .root_subscriptions .read() .unwrap() .contains_key(&sub_id)); } #[test] #[serial] fn test_add_and_remove_subscription() { let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, SubscriptionData<(), ()>>> = HashMap::new(); let num_keys = 5; for key in 0..num_keys { let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification"); let sub_id = SubscriptionId::Number(key); add_subscription(&mut subscriptions, key, None, sub_id, subscriber, 0, None); } // Add another subscription to the "0" key let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification"); let extra_sub_id = SubscriptionId::Number(num_keys); add_subscription( &mut subscriptions, 0, None, extra_sub_id.clone(), subscriber, 0, None, ); assert_eq!(subscriptions.len(), num_keys as usize); assert_eq!(subscriptions.get(&0).unwrap().len(), 2); assert_eq!(subscriptions.get(&1).unwrap().len(), 1); assert_eq!( remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)), true ); assert_eq!(subscriptions.len(), num_keys as usize); assert_eq!(subscriptions.get(&0).unwrap().len(), 1); assert_eq!( remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)), false ); assert_eq!(remove_subscription(&mut subscriptions, &extra_sub_id), true); assert_eq!(subscriptions.len(), (num_keys - 1) as usize); assert!(subscriptions.get(&0).is_none()); } #[test] #[serial] fn test_gossip_separate_account_notifications() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank = Bank::new(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bank2 = Bank::new_from_parent(&bank0, &Pubkey::default(), 2); bank_forks.write().unwrap().insert(bank2); let alice = Keypair::new(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); let (subscriber0, _id_receiver, transport_receiver0) = Subscriber::new_test("accountNotification"); let (subscriber1, _id_receiver, transport_receiver1) = Subscriber::new_test("accountNotification"); let exit = Arc::new(AtomicBool::new(false)); let subscriptions = Arc::new(RpcSubscriptions::new( &exit, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, ))), optimistically_confirmed_bank.clone(), )); let sub_id0 = SubscriptionId::Number(0 as u64); subscriptions.add_account_subscription( alice.pubkey(), Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::single_gossip()), encoding: None, data_slice: None, }), sub_id0.clone(), subscriber0, ); assert!(subscriptions .subscriptions .gossip_account_subscriptions .read() .unwrap() .contains_key(&alice.pubkey())); let tx = system_transaction::create_account( &mint_keypair, &alice, blockhash, 1, 16, &solana_stake_program::id(), ); // Add the transaction to the 1st bank and then freeze the bank let bank1 = bank_forks.write().unwrap().get(1).cloned().unwrap(); bank1.process_transaction(&tx).unwrap(); bank1.freeze(); // Add the same transaction to the unfrozen 2nd bank bank_forks .write() .unwrap() .get(2) .unwrap() .process_transaction(&tx) .unwrap(); // First, notify the unfrozen bank first to queue pending notification OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(2), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); // Now, notify the frozen bank and ensure its notifications are processed OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(1), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let (response, _) = robust_poll_or_panic(transport_receiver0); let expected = json!({ "jsonrpc": "2.0", "method": "accountNotification", "params": { "result": { "context": { "slot": 1 }, "value": { "data": "1111111111111111", "executable": false, "lamports": 1, "owner": "Stake11111111111111111111111111111111111111", "rentEpoch": 0, }, }, "subscription": 0, } }); assert_eq!(serde_json::to_string(&expected).unwrap(), response); subscriptions.remove_account_subscription(&sub_id0); let sub_id1 = SubscriptionId::Number(1 as u64); subscriptions.add_account_subscription( alice.pubkey(), Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::single_gossip()), encoding: None, data_slice: None, }), sub_id1.clone(), subscriber1, ); let bank2 = bank_forks.read().unwrap().get(2).unwrap().clone(); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::Frozen(bank2), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let (response, _) = robust_poll_or_panic(transport_receiver1); let expected = json!({ "jsonrpc": "2.0", "method": "accountNotification", "params": { "result": { "context": { "slot": 2 }, "value": { "data": "1111111111111111", "executable": false, "lamports": 1, "owner": "Stake11111111111111111111111111111111111111", "rentEpoch": 0, }, }, "subscription": 1, } }); assert_eq!(serde_json::to_string(&expected).unwrap(), response); subscriptions.remove_account_subscription(&sub_id1); assert!(!subscriptions .subscriptions .gossip_account_subscriptions .read() .unwrap() .contains_key(&alice.pubkey())); } }
36.624388
111
0.566284
dd5a1a46d9ba46251dc0228edffdcaa544124381
6,633
// Copyright (c) The Dijets Core Contributors // SPDX-License-Identifier: Apache-2.0 pub mod command_adapter; pub mod local_fs; #[cfg(test)] mod test_util; #[cfg(test)] mod tests; use crate::storage::{ command_adapter::{CommandAdapter, CommandAdapterOpt}, local_fs::{LocalFs, LocalFsOpt}, }; use anyhow::{ensure, Result}; use async_trait::async_trait; use once_cell::sync::Lazy; #[cfg(test)] use proptest::prelude::*; use regex::Regex; #[cfg(test)] use std::convert::TryInto; use std::{convert::TryFrom, ops::Deref, str::FromStr, sync::Arc}; use structopt::StructOpt; use tokio::io::{AsyncRead, AsyncWrite}; /// String returned by a specific storage implementation to identify a backup, probably a folder name /// which is exactly the same with the backup name we pass into `create_backup()` /// This is created and returned by the storage when `create_backup()`, passed back to the storage /// when `create_for_write()` and persisted nowhere (once a backup is created, files are referred to /// by `FileHandle`s). pub type BackupHandle = String; pub type BackupHandleRef = str; /// URI pointing to a file in a backup storage, like "s3:///bucket/path/file". /// These are created by the storage when `create_for_write()`, stored in manifests by the backup /// controller, and passed back to the storage when `open_for_read()` by the restore controller /// to retrieve a file referred to in the manifest. pub type FileHandle = String; pub type FileHandleRef = str; /// Through this, the backup controller promises to the storage the names passed to /// `create_backup()` and `create_for_write()` don't contain funny characters tricky to deal with /// in shell commands. /// Specifically, names follow the pattern "\A[a-zA-Z0-9][a-zA-Z0-9._-]{0,126}\z" #[cfg_attr(test, derive(Hash, Eq, PartialEq))] #[derive(Debug)] pub struct ShellSafeName(String); impl ShellSafeName { const PATTERN: &'static str = r"\A[a-zA-Z0-9][a-zA-Z0-9._-]{0,126}\z"; fn sanitize(name: &str) -> Result<()> { static RE: Lazy<Regex> = Lazy::new(|| Regex::new(ShellSafeName::PATTERN).unwrap()); ensure!(RE.is_match(name), "Illegal name: {}", name,); Ok(()) } } impl TryFrom<String> for ShellSafeName { type Error = anyhow::Error; fn try_from(value: String) -> Result<Self> { Self::sanitize(&value).map(|_| Self(value)) } } impl FromStr for ShellSafeName { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self> { Self::sanitize(s).map(|_| Self(s.to_string())) } } impl Deref for ShellSafeName { type Target = String; fn deref(&self) -> &Self::Target { &self.0 } } impl AsRef<str> for ShellSafeName { fn as_ref(&self) -> &str { &self.0 } } #[cfg(test)] impl Arbitrary for ShellSafeName { type Parameters = (); type Strategy = BoxedStrategy<Self>; fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { (&ShellSafeName::PATTERN[2..(ShellSafeName::PATTERN.len() - 2)]) // remove \A and \z .prop_map(|s| s.try_into().unwrap()) .boxed() } } #[cfg_attr(test, derive(Debug, Hash, Eq, Ord, PartialEq, PartialOrd))] pub struct TextLine(String); impl TextLine { pub fn new(value: &str) -> Result<Self> { let newlines: &[_] = &['\n', '\r']; ensure!(value.find(newlines).is_none(), "Newline not allowed."); let mut ret = value.to_string(); ret.push('\n'); Ok(Self(ret)) } } impl AsRef<str> for TextLine { fn as_ref(&self) -> &str { &self.0 } } #[cfg(test)] impl Arbitrary for TextLine { type Parameters = (); type Strategy = BoxedStrategy<Self>; fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { ("[^\r\n]{0,1024}") .prop_map(|s| TextLine::new(&s).unwrap()) .boxed() } } #[async_trait] pub trait BackupStorage: Send + Sync { /// Hint that a bunch of files are gonna be created related to a backup identified by `name`, /// which is unique to the content of the backup, i.e. it won't be the same name unless you are /// backing up exactly the same thing. /// Storage can choose to take actions like create a dedicated folder or do nothing. /// Returns a string to identify this operation in potential succeeding file creation requests. async fn create_backup(&self, name: &ShellSafeName) -> Result<BackupHandle>; /// Ask to create a file for write, `backup_handle` was returned by `create_backup` to identify /// the current backup. async fn create_for_write( &self, backup_handle: &BackupHandleRef, name: &ShellSafeName, ) -> Result<(FileHandle, Box<dyn AsyncWrite + Send + Unpin>)>; /// Open file for reading. async fn open_for_read( &self, file_handle: &FileHandleRef, ) -> Result<Box<dyn AsyncRead + Send + Unpin>>; /// Asks to save a metadata entry. A metadata entry is one line of text. /// The backup system doesn't expect a metadata entry to exclusively map to a single file /// handle, or the same file handle when accessed later, so there's no need to return one. This /// also means a local cache must download each metadata file from remote at least once, to /// uncover potential storage glitch sooner. /// Behavior on duplicated names is undefined, overwriting the content upon an existing name /// is straightforward and acceptable. /// See `list_metadata_files`. async fn save_metadata_line(&self, name: &ShellSafeName, content: &TextLine) -> Result<()>; /// The backup system always asks for all metadata files and cache and build index on top of /// the content of them. This means: /// 1. The storage is free to reorganise the metadata files, like combining multiple ones to /// reduce fragmentation. /// 2. But the cache does expect the content stays the same for a file handle, so when /// reorganising metadata files, give them new unique names. async fn list_metadata_files(&self) -> Result<Vec<FileHandle>>; } #[derive(StructOpt)] pub enum StorageOpt { #[structopt(about = "Select the LocalFs backup store.")] LocalFs(LocalFsOpt), #[structopt(about = "Select the CommandAdapter backup store.")] CommandAdapter(CommandAdapterOpt), } impl StorageOpt { pub async fn init_storage(self) -> Result<Arc<dyn BackupStorage>> { Ok(match self { StorageOpt::LocalFs(opt) => Arc::new(LocalFs::new_with_opt(opt)), StorageOpt::CommandAdapter(opt) => Arc::new(CommandAdapter::new_with_opt(opt).await?), }) } }
35.281915
101
0.665008
abd1aa29d780d498b6c6a0034f55a063e36472f0
6,794
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #![warn(missing_docs)] //! [DataFusion-ObjectStore-S3](https://github.com/datafusion-contrib/datafusion-objectstore-s3) //! provides a `TableProvider` interface for using `Datafusion` to query data in S3. This includes AWS S3 //! and services such as MinIO that implement the S3 API. //! //! ## Examples //! Examples for querying AWS and other implementors, such as MinIO, are shown below. //! //! Load credentials from default AWS credential provider (such as environment or ~/.aws/credentials) //! //! ```rust //! # use std::sync::Arc; //! # use datafusion::error::Result; //! # use datafusion_objectstore_s3::object_store::s3::S3FileSystem; //! # #[tokio::main] //! # async fn main() -> Result<()> { //! let s3_file_system = Arc::new(S3FileSystem::default().await); //! # Ok(()) //! # } //! ``` //! //! `S3FileSystem::default()` is a convenience wrapper for `S3FileSystem::new(None, None, None, None, None, None)`. //! //! Connect to implementor of S3 API (MinIO, in this case) using access key and secret. //! //! ```rust //! use datafusion_objectstore_s3::object_store::s3::S3FileSystem; //! //! use aws_types::credentials::SharedCredentialsProvider; //! use aws_types::credentials::Credentials; //! use aws_sdk_s3::Endpoint; //! use http::Uri; //! //! # #[tokio::main] //! # async fn main() { //! // Example credentials provided by MinIO //! const MINIO_ACCESS_KEY_ID: &str = "AKIAIOSFODNN7EXAMPLE"; //! const MINIO_SECRET_ACCESS_KEY: &str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; //! const PROVIDER_NAME: &str = "Static"; //! const MINIO_ENDPOINT: &str = "http://localhost:9000"; //! //! let s3_file_system = S3FileSystem::new( //! Some(SharedCredentialsProvider::new(Credentials::new( //! MINIO_ACCESS_KEY_ID, //! MINIO_SECRET_ACCESS_KEY, //! None, //! None, //! PROVIDER_NAME, //! ))), // SharedCredentialsProvider //! None, //Region //! Some(Endpoint::immutable(Uri::from_static(MINIO_ENDPOINT))), //Endpoint //! None, // RetryConfig //! None, // AsyncSleep //! None, // TimeoutConfig //! ) //! .await; //! # } //! ``` //! //! Using DataFusion's `ListingOtions` and `ListingTable` we register a table into a DataFusion `ExecutionContext` so that it can be queried. //! //! ```rust //! use std::sync::Arc; //! //! use datafusion::datasource::listing::*; //! use datafusion::datasource::TableProvider; //! use datafusion::prelude::ExecutionContext; //! use datafusion::datasource::file_format::parquet::ParquetFormat; //! use datafusion::error::Result; //! //! use datafusion_objectstore_s3::object_store::s3::S3FileSystem; //! //! use aws_types::credentials::SharedCredentialsProvider; //! use aws_types::credentials::Credentials; //! use aws_sdk_s3::Endpoint; //! use http::Uri; //! //! # const MINIO_ACCESS_KEY_ID: &str = "AKIAIOSFODNN7EXAMPLE"; //! # const MINIO_SECRET_ACCESS_KEY: &str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; //! # const PROVIDER_NAME: &str = "Static"; //! # const MINIO_ENDPOINT: &str = "http://localhost:9000"; //! //! # #[tokio::main] //! # async fn main() -> Result<()> { //! let filename = "data/alltypes_plain.snappy.parquet"; //! //! # let s3_file_system = Arc::new(S3FileSystem::new( //! # Some(SharedCredentialsProvider::new(Credentials::new( //! # MINIO_ACCESS_KEY_ID, //! # MINIO_SECRET_ACCESS_KEY, //! # None, //! # None, //! # PROVIDER_NAME, //! # ))), //! # None, //! # Some(Endpoint::immutable(Uri::from_static(MINIO_ENDPOINT))), //! # None, //! # None, //! # None, //! # ) //! # .await); //! //! let config = ListingTableConfig::new(s3_file_system, filename).infer().await?; //! //! let table = ListingTable::try_new(config)?; //! //! let mut ctx = ExecutionContext::new(); //! //! ctx.register_table("tbl", Arc::new(table))?; //! //! let df = ctx.sql("SELECT * FROM tbl").await?; //! df.show(); //! # Ok(()) //! # } //! ``` //! //! We can also register the `S3FileSystem` directly as an `ObjectStore` on an `ExecutionContext`. This provides an idiomatic way of creating `TableProviders` that can be queried. //! //! ```rust //! use std::sync::Arc; //! //! use datafusion::datasource::listing::*; //! use datafusion::datasource::TableProvider; //! use datafusion::prelude::ExecutionContext; //! use datafusion::datasource::file_format::parquet::ParquetFormat; //! use datafusion::error::Result; //! //! use datafusion_objectstore_s3::object_store::s3::S3FileSystem; //! //! use aws_types::credentials::SharedCredentialsProvider; //! use aws_types::credentials::Credentials; //! use aws_sdk_s3::Endpoint; //! use http::Uri; //! //! # const MINIO_ACCESS_KEY_ID: &str = "AKIAIOSFODNN7EXAMPLE"; //! # const MINIO_SECRET_ACCESS_KEY: &str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; //! # const PROVIDER_NAME: &str = "Static"; //! # const MINIO_ENDPOINT: &str = "http://localhost:9000"; //! //! # #[tokio::main] //! # async fn main() -> Result<()> { //! # let s3_file_system = Arc::new(S3FileSystem::new( //! # Some(SharedCredentialsProvider::new(Credentials::new( //! # MINIO_ACCESS_KEY_ID, //! # MINIO_SECRET_ACCESS_KEY, //! # None, //! # None, //! # PROVIDER_NAME, //! # ))), //! # None, //! # Some(Endpoint::immutable(Uri::from_static(MINIO_ENDPOINT))), //! # None, //! # None, //! # None, //! # ) //! # .await); //! let mut ctx = ExecutionContext::new(); //! //! let uri = "s3://data/alltypes_plain.snappy.parquet"; //! let (_, filename) = uri.split_once("://").unwrap(); //! //! ctx.register_object_store("s3", s3_file_system.clone()); //! //! let (object_store, name) = ctx.object_store(uri)?; //! //! let config = ListingTableConfig::new(s3_file_system, filename).infer().await?; //! //! let table = ListingTable::try_new(config)?; //! //! ctx.register_table("tbl", Arc::new(table))?; //! //! let df = ctx.sql("SELECT * FROM tbl").await?; //! df.show(); //! # Ok(()) //! # } //! ``` //! pub mod error; pub mod object_store;
34.140704
179
0.650574
d7efe18690b370e33069909b9e8075d7f7dc5c24
5,129
use std::sync::Arc; use bonsaidb_core::{permissions::PermissionDenied, schema, schema::InsertError, AnyError}; use schema::InvalidNameError; /// An error occurred while interacting with a [`Server`](crate::Server). #[derive(Debug, thiserror::Error)] pub enum Error { /// An error occurred from the QUIC transport layer. #[error("a networking error occurred: '{0}'")] Transport(String), #[cfg(feature = "websockets")] /// An error occurred from the Websocket transport layer. #[error("a websocket error occurred: '{0}'")] WebSocket(#[from] tokio_tungstenite::tungstenite::Error), /// An error occurred from IO #[error("a networking error occurred: '{0}'")] Io(#[from] tokio::io::Error), /// An error occurred while processing a request #[error("an error occurred processing a request: '{0}'")] Request(Arc<dyn AnyError>), /// An error occurred from within the schema. #[error("error from core {0}")] Core(#[from] bonsaidb_core::Error), /// An internal error occurred while waiting for a message. #[error("error while waiting for a message")] InternalCommunication, /// An error occurred while interacting with a local database. #[error("an error occurred interacting with a database: {0}")] Database(#[from] bonsaidb_local::Error), /// An error occurred with a certificate. #[error("a certificate error: {0}")] Certificate(#[from] fabruic::error::Certificate), /// An error occurred parsing a PEM file. #[error("an invalid PEM file: {0}")] #[cfg(feature = "pem")] Pem(#[from] pem::PemError), /// An error occurred requesting an ACME certificate. #[error("an error requesting an ACME certificate: {0}")] #[cfg(feature = "acme")] Acme(#[from] async_acme::acme::AcmeError), /// An error occurred while processing an ACME order. #[error("an error occurred while processing an ACME order: {0}")] #[cfg(feature = "acme")] AcmeOrder(#[from] async_acme::rustls_helper::OrderError), /// An error occurred during tls signing. #[error("an error occurred during tls signing")] TlsSigningError, } impl From<Error> for bonsaidb_core::Error { fn from(other: Error) -> Self { // without it, there's no way to get this to_string() easily. match other { Error::Core(core) | Error::Database(bonsaidb_local::Error::Core(core)) => core, Error::Database(storage) => Self::Database(storage.to_string()), Error::Io(io) => Self::Io(io.to_string()), Error::Transport(networking) => Self::Transport(networking), #[cfg(feature = "websockets")] Error::WebSocket(err) => Self::Websocket(err.to_string()), err => Self::Server(err.to_string()), } } } impl From<flume::RecvError> for Error { fn from(_: flume::RecvError) -> Self { Self::InternalCommunication } } impl From<tokio::sync::oneshot::error::RecvError> for Error { fn from(_: tokio::sync::oneshot::error::RecvError) -> Self { Self::InternalCommunication } } impl From<tokio::sync::oneshot::error::TryRecvError> for Error { fn from(_: tokio::sync::oneshot::error::TryRecvError) -> Self { Self::InternalCommunication } } impl From<PermissionDenied> for Error { fn from(err: PermissionDenied) -> Self { Self::Core(bonsaidb_core::Error::PermissionDenied(err)) } } impl From<InvalidNameError> for Error { fn from(err: InvalidNameError) -> Self { Self::Core(bonsaidb_core::Error::InvalidName(err)) } } impl From<rustls::sign::SignError> for Error { fn from(_: rustls::sign::SignError) -> Self { Self::TlsSigningError } } impl<T> From<InsertError<T>> for Error { fn from(error: InsertError<T>) -> Self { Self::from(error.error) } } pub trait ResultExt<R> { fn map_err_to_core(self) -> Result<R, bonsaidb_core::Error> where Self: Sized; } impl<R> ResultExt<R> for Result<R, Error> { fn map_err_to_core(self) -> Result<R, bonsaidb_core::Error> where Self: Sized, { self.map_err(bonsaidb_core::Error::from) } } #[cfg(feature = "websockets")] impl From<bincode::Error> for Error { fn from(other: bincode::Error) -> Self { Self::Core(bonsaidb_core::Error::Websocket(format!( "error deserializing message: {:?}", other ))) } } macro_rules! impl_from_fabruic { ($error:ty) => { impl From<$error> for Error { fn from(other: $error) -> Self { Self::Core(bonsaidb_core::Error::Transport(other.to_string())) } } }; } impl_from_fabruic!(fabruic::error::CertificateChain); impl_from_fabruic!(fabruic::error::Receiver); impl_from_fabruic!(fabruic::error::Connecting); impl_from_fabruic!(fabruic::error::PrivateKey); impl_from_fabruic!(fabruic::error::KeyPair); impl_from_fabruic!(fabruic::error::Connection); impl_from_fabruic!(fabruic::error::Incoming); impl_from_fabruic!(fabruic::error::AlreadyClosed); impl_from_fabruic!(fabruic::error::Config);
31.466258
91
0.643205
d70c037d32a5a04e95eed4c28fffcab974528a35
36,526
use copyless::BoxHelper; use serde::{Deserialize, Serialize}; use swc_atoms::{js_word, JsWord}; use swc_common::Spanned; use swc_ecma_ast::{ Accessibility, Expr, MemberProp, Pat, TruePlusMinus, TsArrayType, TsAsExpr, TsCallSignatureDecl, TsConditionalType, TsConstAssertion, TsConstructSignatureDecl, TsConstructorType, TsEntityName, TsEnumDecl, TsEnumMember, TsEnumMemberId, TsExportAssignment, TsExprWithTypeArgs, TsExternalModuleRef, TsFnOrConstructorType, TsFnParam, TsFnType, TsImportEqualsDecl, TsImportType, TsIndexSignature, TsIndexedAccessType, TsInferType, TsInterfaceBody, TsInterfaceDecl, TsIntersectionType, TsKeywordType, TsKeywordTypeKind, TsLit, TsLitType, TsMappedType, TsMethodSignature, TsModuleBlock, TsModuleDecl, TsModuleName, TsModuleRef, TsNamespaceBody, TsNamespaceDecl, TsNamespaceExportDecl, TsNonNullExpr, TsOptionalType, TsParamProp, TsParamPropParam, TsParenthesizedType, TsPropertySignature, TsQualifiedName, TsRestType, TsThisType, TsThisTypeOrIdent, TsTplLitType, TsTupleElement, TsTupleType, TsType, TsTypeAliasDecl, TsTypeAnn, TsTypeAssertion, TsTypeElement, TsTypeLit, TsTypeOperator, TsTypeOperatorOp, TsTypeParam, TsTypeParamDecl, TsTypeParamInstantiation, TsTypePredicate, TsTypeQuery, TsTypeQueryExpr, TsTypeRef, TsUnionOrIntersectionType, TsUnionType, }; use swc_estree_ast::{ Access, ArrayPattern, IdOrRest, IdOrString, Identifier, ObjectPattern, RestElement, TSAnyKeyword, TSArrayType, TSAsExpression, TSBigIntKeyword, TSBooleanKeyword, TSCallSignatureDeclaration, TSConditionalType, TSConstructSignatureDeclaration, TSConstructorType, TSEntityName, TSEnumDeclaration, TSEnumMember, TSExportAssignment, TSExpressionWithTypeArguments, TSExternalModuleReference, TSFunctionType, TSImportEqualsDeclModuleRef, TSImportEqualsDeclaration, TSImportType, TSIndexSignature, TSIndexedAccessType, TSInferType, TSInterfaceBody, TSInterfaceDeclaration, TSIntersectionType, TSIntrinsicKeyword, TSLiteralType, TSLiteralTypeLiteral, TSMappedType, TSMethodSignature, TSModuleBlock, TSModuleDeclBody, TSModuleDeclaration, TSNamedTupleMember, TSNamespaceExportDeclaration, TSNeverKeyword, TSNonNullExpression, TSNullKeyword, TSNumberKeyword, TSObjectKeyword, TSOptionalType, TSParamPropParam, TSParameterProperty, TSParenthesizedType, TSPropertySignature, TSQualifiedName, TSRestType, TSStringKeyword, TSSymbolKeyword, TSThisType, TSTupleType, TSTupleTypeElType, TSType, TSTypeAliasDeclaration, TSTypeAnnotation, TSTypeAssertion, TSTypeElement, TSTypeLiteral, TSTypeOperator, TSTypeParameter, TSTypeParameterDeclaration, TSTypeParameterInstantiation, TSTypePredicate, TSTypePredicateParamName, TSTypeQuery, TSTypeQueryExprName, TSTypeReference, TSUndefinedKeyword, TSUnionType, TSUnknownKeyword, TSVoidKeyword, }; use crate::babelify::{Babelify, Context}; impl Babelify for TsTypeAnn { type Output = TSTypeAnnotation; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeAnnotation { base: ctx.base(self.span), type_annotation: self.type_ann.babelify(ctx), } } } impl Babelify for TsFnType { type Output = TSFunctionType; fn babelify(self, ctx: &Context) -> Self::Output { TSFunctionType { base: ctx.base(self.span), parameters: self .params .into_iter() .map(|p| p.babelify(ctx).into()) .collect(), type_parameters: self.type_params.babelify(ctx), type_annotation: Some(Box::alloc().init(self.type_ann.babelify(ctx))), } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TsFnParamOutput { Id(Identifier), Array(ArrayPattern), Rest(RestElement), Object(ObjectPattern), } impl Babelify for TsFnParam { type Output = TsFnParamOutput; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsFnParam::Ident(i) => TsFnParamOutput::Id(i.babelify(ctx)), TsFnParam::Array(a) => TsFnParamOutput::Array(a.babelify(ctx)), TsFnParam::Rest(r) => TsFnParamOutput::Rest(r.babelify(ctx)), TsFnParam::Object(o) => TsFnParamOutput::Object(o.babelify(ctx)), } } } impl From<TsFnParamOutput> for IdOrRest { fn from(o: TsFnParamOutput) -> Self { match o { TsFnParamOutput::Id(i) => IdOrRest::Id(i), TsFnParamOutput::Rest(r) => IdOrRest::Rest(r), _ => panic!("illegal conversion: Cannot convert {:?} to IdOrRest", &o), } } } impl From<TsFnParamOutput> for Identifier { fn from(o: TsFnParamOutput) -> Self { match o { TsFnParamOutput::Id(i) => i, _ => panic!("illegal conversion: Cannot convert {:?} to Identifier", &o), } } } impl Babelify for TsTypeParamDecl { type Output = TSTypeParameterDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeParameterDeclaration { base: ctx.base(self.span), params: self.params.babelify(ctx), } } } impl Babelify for TsTypeParam { type Output = TSTypeParameter; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeParameter { base: ctx.base(self.span), name: self.name.sym, is_in: self.is_in, is_out: self.is_out, constraint: self.constraint.map(|c| Box::alloc().init(c.babelify(ctx))), default: self.default.map(|d| Box::alloc().init(d.babelify(ctx))), } } } impl Babelify for TsTypeParamInstantiation { type Output = TSTypeParameterInstantiation; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeParameterInstantiation { base: ctx.base(self.span), params: self.params.into_iter().map(|v| v.babelify(ctx)).collect(), } } } impl Babelify for TsParamProp { type Output = TSParameterProperty; fn babelify(self, ctx: &Context) -> Self::Output { TSParameterProperty { base: ctx.base(self.span), parameter: self.param.babelify(ctx), accessibility: self.accessibility.map(|access| access.babelify(ctx)), readonly: Some(self.readonly), } } } impl Babelify for TsParamPropParam { type Output = TSParamPropParam; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsParamPropParam::Ident(i) => TSParamPropParam::Id(i.babelify(ctx)), TsParamPropParam::Assign(a) => TSParamPropParam::Assignment(a.babelify(ctx)), } } } impl Babelify for TsQualifiedName { type Output = TSQualifiedName; fn babelify(self, ctx: &Context) -> Self::Output { TSQualifiedName { base: ctx.base(self.span()), left: Box::alloc().init(self.left.babelify(ctx)), right: self.right.babelify(ctx), } } } impl Babelify for TsEntityName { type Output = TSEntityName; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsEntityName::TsQualifiedName(n) => TSEntityName::Qualified(n.babelify(ctx)), TsEntityName::Ident(i) => TSEntityName::Id(i.babelify(ctx)), } } } impl Babelify for TsTypeElement { type Output = TSTypeElement; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsTypeElement::TsCallSignatureDecl(t) => { TSTypeElement::CallSignatureDecl(t.babelify(ctx)) } TsTypeElement::TsConstructSignatureDecl(t) => { TSTypeElement::ConstructSignatureDecl(t.babelify(ctx)) } TsTypeElement::TsPropertySignature(t) => TSTypeElement::PropSignature(t.babelify(ctx)), TsTypeElement::TsMethodSignature(t) => TSTypeElement::MethodSignature(t.babelify(ctx)), TsTypeElement::TsIndexSignature(t) => TSTypeElement::IndexSignature(t.babelify(ctx)), TsTypeElement::TsGetterSignature(_) => panic!("unimplemented"), TsTypeElement::TsSetterSignature(_) => panic!("unimplemented"), } } } impl Babelify for TsCallSignatureDecl { type Output = TSCallSignatureDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSCallSignatureDeclaration { base: ctx.base(self.span), type_parameters: self.type_params.map(|t| t.babelify(ctx)), parameters: self .params .into_iter() .map(|param| param.babelify(ctx).into()) .collect(), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), } } } impl Babelify for TsConstructSignatureDecl { type Output = TSConstructSignatureDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSConstructSignatureDeclaration { base: ctx.base(self.span), type_parameters: self.type_params.map(|t| t.babelify(ctx)), parameters: self .params .into_iter() .map(|param| param.babelify(ctx).into()) .collect(), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), } } } impl Babelify for TsPropertySignature { type Output = TSPropertySignature; fn babelify(self, ctx: &Context) -> Self::Output { TSPropertySignature { base: ctx.base(self.span), key: Box::alloc().init(self.key.babelify(ctx).into()), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), initializer: self.init.map(|i| Box::alloc().init(i.babelify(ctx).into())), computed: Some(self.computed), optional: Some(self.optional), readonly: Some(self.readonly), } } } impl Babelify for TsMethodSignature { type Output = TSMethodSignature; fn babelify(self, ctx: &Context) -> Self::Output { TSMethodSignature { base: ctx.base(self.span), key: Box::alloc().init(self.key.babelify(ctx).into()), type_parameters: self.type_params.map(|t| t.babelify(ctx)), parameters: self .params .into_iter() .map(|param| param.babelify(ctx).into()) .collect(), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), computed: Some(self.computed), optional: Some(self.optional), } } } impl Babelify for TsIndexSignature { type Output = TSIndexSignature; fn babelify(self, ctx: &Context) -> Self::Output { TSIndexSignature { base: ctx.base(self.span), parameters: self .params .into_iter() .map(|param| param.babelify(ctx).into()) .collect(), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), readonly: Some(self.readonly), } } } impl Babelify for TsType { type Output = TSType; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsType::TsKeywordType(t) => match t.babelify(ctx) { TsKeywordTypeOutput::Any(a) => TSType::AnyKeyword(a), TsKeywordTypeOutput::Unknown(u) => TSType::UnknownKeyword(u), TsKeywordTypeOutput::Number(n) => TSType::NumberKeyword(n), TsKeywordTypeOutput::Object(o) => TSType::ObjectKeyword(o), TsKeywordTypeOutput::Boolean(b) => TSType::BooleanKeyword(b), TsKeywordTypeOutput::BigInt(i) => TSType::BigIntKeyword(i), TsKeywordTypeOutput::String(s) => TSType::StringKeyword(s), TsKeywordTypeOutput::Symbol(s) => TSType::SymbolKeyword(s), TsKeywordTypeOutput::Void(v) => TSType::VoidKeyword(v), TsKeywordTypeOutput::Undefined(u) => TSType::UndefinedKeyword(u), TsKeywordTypeOutput::Null(n) => TSType::NullKeyword(n), TsKeywordTypeOutput::Never(n) => TSType::NeverKeyword(n), TsKeywordTypeOutput::Intrinsic(i) => TSType::IntrinsicKeyword(i), }, TsType::TsThisType(t) => TSType::This(t.babelify(ctx)), TsType::TsFnOrConstructorType(t) => match t.babelify(ctx) { TsFnOrConstructorTypeOutput::Func(f) => TSType::Function(f), TsFnOrConstructorTypeOutput::Constructor(c) => TSType::Constructor(c), }, TsType::TsTypeRef(r) => TSType::TypeRef(r.babelify(ctx)), TsType::TsTypeQuery(q) => TSType::TypeQuery(q.babelify(ctx)), TsType::TsTypeLit(l) => TSType::TypeLiteral(l.babelify(ctx)), TsType::TsArrayType(a) => TSType::Array(a.babelify(ctx)), TsType::TsTupleType(t) => TSType::Tuple(t.babelify(ctx)), TsType::TsOptionalType(o) => TSType::Optional(o.babelify(ctx)), TsType::TsRestType(r) => TSType::Rest(r.babelify(ctx)), TsType::TsUnionOrIntersectionType(t) => match t.babelify(ctx) { TsUnionOrIntersectionTypeOutput::Union(u) => TSType::Union(u), TsUnionOrIntersectionTypeOutput::Intersection(i) => TSType::Intersection(i), }, TsType::TsConditionalType(c) => TSType::Conditional(c.babelify(ctx)), TsType::TsInferType(i) => TSType::Infer(i.babelify(ctx)), TsType::TsParenthesizedType(p) => TSType::Parenthesized(p.babelify(ctx)), TsType::TsTypeOperator(o) => TSType::TypeOp(o.babelify(ctx)), TsType::TsIndexedAccessType(a) => TSType::IndexedAccess(a.babelify(ctx)), TsType::TsMappedType(m) => TSType::Mapped(m.babelify(ctx)), TsType::TsLitType(l) => TSType::Literal(l.babelify(ctx)), TsType::TsTypePredicate(p) => TSType::TypePredicate(p.babelify(ctx)), TsType::TsImportType(i) => TSType::Import(i.babelify(ctx)), } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TsFnOrConstructorTypeOutput { Func(TSFunctionType), Constructor(TSConstructorType), } impl Babelify for TsFnOrConstructorType { type Output = TsFnOrConstructorTypeOutput; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsFnOrConstructorType::TsFnType(t) => { TsFnOrConstructorTypeOutput::Func(t.babelify(ctx)) } TsFnOrConstructorType::TsConstructorType(t) => { TsFnOrConstructorTypeOutput::Constructor(t.babelify(ctx)) } } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TsKeywordTypeOutput { Any(TSAnyKeyword), Unknown(TSUnknownKeyword), Number(TSNumberKeyword), Object(TSObjectKeyword), Boolean(TSBooleanKeyword), BigInt(TSBigIntKeyword), String(TSStringKeyword), Symbol(TSSymbolKeyword), Void(TSVoidKeyword), Undefined(TSUndefinedKeyword), Null(TSNullKeyword), Never(TSNeverKeyword), Intrinsic(TSIntrinsicKeyword), } impl Babelify for TsKeywordType { type Output = TsKeywordTypeOutput; fn babelify(self, ctx: &Context) -> Self::Output { match self.kind { TsKeywordTypeKind::TsAnyKeyword => TsKeywordTypeOutput::Any(TSAnyKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsUnknownKeyword => TsKeywordTypeOutput::Unknown(TSUnknownKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsNumberKeyword => TsKeywordTypeOutput::Number(TSNumberKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsObjectKeyword => TsKeywordTypeOutput::Object(TSObjectKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsBooleanKeyword => TsKeywordTypeOutput::Boolean(TSBooleanKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsBigIntKeyword => TsKeywordTypeOutput::BigInt(TSBigIntKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsStringKeyword => TsKeywordTypeOutput::String(TSStringKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsSymbolKeyword => TsKeywordTypeOutput::Symbol(TSSymbolKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsVoidKeyword => TsKeywordTypeOutput::Void(TSVoidKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsUndefinedKeyword => { TsKeywordTypeOutput::Undefined(TSUndefinedKeyword { base: ctx.base(self.span), }) } TsKeywordTypeKind::TsNullKeyword => TsKeywordTypeOutput::Null(TSNullKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsNeverKeyword => TsKeywordTypeOutput::Never(TSNeverKeyword { base: ctx.base(self.span), }), TsKeywordTypeKind::TsIntrinsicKeyword => { TsKeywordTypeOutput::Intrinsic(TSIntrinsicKeyword { base: ctx.base(self.span), }) } } } } impl Babelify for TsThisType { type Output = TSThisType; fn babelify(self, ctx: &Context) -> Self::Output { TSThisType { base: ctx.base(self.span), } } } impl Babelify for TsConstructorType { type Output = TSConstructorType; fn babelify(self, ctx: &Context) -> Self::Output { TSConstructorType { base: ctx.base(self.span), parameters: self .params .into_iter() .map(|param| param.babelify(ctx).into()) .collect(), type_parameters: self.type_params.map(|decl| decl.babelify(ctx)), type_annotation: Some(Box::alloc().init(self.type_ann.babelify(ctx))), is_abstract: Some(self.is_abstract), } } } impl Babelify for TsTypeRef { type Output = TSTypeReference; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeReference { base: ctx.base(self.span), type_name: self.type_name.babelify(ctx), type_parameters: self.type_params.map(|t| t.babelify(ctx)), } } } impl Babelify for TsTypePredicate { type Output = TSTypePredicate; fn babelify(self, ctx: &Context) -> Self::Output { TSTypePredicate { base: ctx.base(self.span), parameter_name: self.param_name.babelify(ctx), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), asserts: Some(self.asserts), } } } impl Babelify for TsThisTypeOrIdent { type Output = TSTypePredicateParamName; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsThisTypeOrIdent::Ident(i) => TSTypePredicateParamName::Id(i.babelify(ctx)), TsThisTypeOrIdent::TsThisType(t) => TSTypePredicateParamName::This(t.babelify(ctx)), } } } impl Babelify for TsTypeQuery { type Output = TSTypeQuery; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeQuery { base: ctx.base(self.span), expr_name: self.expr_name.babelify(ctx), } } } impl Babelify for TsTypeQueryExpr { type Output = TSTypeQueryExprName; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsTypeQueryExpr::TsEntityName(n) => TSTypeQueryExprName::EntityName(n.babelify(ctx)), TsTypeQueryExpr::Import(i) => TSTypeQueryExprName::ImportType(i.babelify(ctx)), } } } impl Babelify for TsImportType { type Output = TSImportType; fn babelify(self, ctx: &Context) -> Self::Output { TSImportType { base: ctx.base(self.span), argument: self.arg.babelify(ctx), qualifier: self.qualifier.map(|qual| qual.babelify(ctx)), type_parameters: self.type_args.map(|param| param.babelify(ctx)), } } } impl Babelify for TsTypeLit { type Output = TSTypeLiteral; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeLiteral { base: ctx.base(self.span), members: self.members.babelify(ctx), } } } impl Babelify for TsArrayType { type Output = TSArrayType; fn babelify(self, ctx: &Context) -> Self::Output { TSArrayType { base: ctx.base(self.span), element_type: Box::alloc().init(self.elem_type.babelify(ctx)), } } } impl Babelify for TsTupleType { type Output = TSTupleType; fn babelify(self, ctx: &Context) -> Self::Output { TSTupleType { base: ctx.base(self.span), element_types: self.elem_types.babelify(ctx), } } } impl Babelify for TsTupleElement { type Output = TSTupleTypeElType; fn babelify(self, ctx: &Context) -> Self::Output { match self.label { None => TSTupleTypeElType::TSType(self.ty.babelify(ctx)), Some(pat) => TSTupleTypeElType::Member(TSNamedTupleMember { base: ctx.base(self.span), label: match pat { Pat::Ident(id) => id.babelify(ctx), Pat::Rest(rest) => match *rest.arg { Pat::Ident(id) => id.babelify(ctx), _ => panic!( "illegal conversion: Cannot convert {:?} to Identifier", &rest.arg ), }, _ => panic!( "illegal conversion: Cannot convert {:?} to Identifier", &pat ), }, element_type: self.ty.babelify(ctx), optional: Default::default(), }), } } } impl Babelify for TsOptionalType { type Output = TSOptionalType; fn babelify(self, ctx: &Context) -> Self::Output { TSOptionalType { base: ctx.base(self.span), type_annotation: Box::alloc().init(self.type_ann.babelify(ctx)), } } } impl Babelify for TsRestType { type Output = TSRestType; fn babelify(self, ctx: &Context) -> Self::Output { TSRestType { base: ctx.base(self.span), type_annotation: Box::alloc().init(self.type_ann.babelify(ctx)), } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TsUnionOrIntersectionTypeOutput { Union(TSUnionType), Intersection(TSIntersectionType), } impl Babelify for TsUnionOrIntersectionType { type Output = TsUnionOrIntersectionTypeOutput; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsUnionOrIntersectionType::TsUnionType(u) => { TsUnionOrIntersectionTypeOutput::Union(u.babelify(ctx)) } TsUnionOrIntersectionType::TsIntersectionType(i) => { TsUnionOrIntersectionTypeOutput::Intersection(i.babelify(ctx)) } } } } impl Babelify for TsUnionType { type Output = TSUnionType; fn babelify(self, ctx: &Context) -> Self::Output { TSUnionType { base: ctx.base(self.span), types: self.types.into_iter().map(|t| t.babelify(ctx)).collect(), } } } impl Babelify for TsIntersectionType { type Output = TSIntersectionType; fn babelify(self, ctx: &Context) -> Self::Output { TSIntersectionType { base: ctx.base(self.span), types: self.types.into_iter().map(|t| t.babelify(ctx)).collect(), } } } impl Babelify for TsConditionalType { type Output = TSConditionalType; fn babelify(self, ctx: &Context) -> Self::Output { TSConditionalType { base: ctx.base(self.span), check_type: Box::alloc().init(self.check_type.babelify(ctx)), extends_type: Box::alloc().init(self.extends_type.babelify(ctx)), true_type: Box::alloc().init(self.true_type.babelify(ctx)), false_type: Box::alloc().init(self.false_type.babelify(ctx)), } } } impl Babelify for TsInferType { type Output = TSInferType; fn babelify(self, ctx: &Context) -> Self::Output { TSInferType { base: ctx.base(self.span), type_parameter: Box::alloc().init(self.type_param.babelify(ctx)), } } } impl Babelify for TsParenthesizedType { type Output = TSParenthesizedType; fn babelify(self, ctx: &Context) -> Self::Output { TSParenthesizedType { base: ctx.base(self.span), type_annotation: Box::alloc().init(self.type_ann.babelify(ctx)), } } } impl Babelify for TsTypeOperator { type Output = TSTypeOperator; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeOperator { base: ctx.base(self.span), type_annotation: Box::alloc().init(self.type_ann.babelify(ctx)), operator: self.op.babelify(ctx), } } } impl Babelify for TsTypeOperatorOp { type Output = JsWord; fn babelify(self, _ctx: &Context) -> Self::Output { match self { TsTypeOperatorOp::KeyOf => js_word!("keyof"), TsTypeOperatorOp::Unique => js_word!("unique"), TsTypeOperatorOp::ReadOnly => js_word!("readonly"), } } } impl Babelify for TsIndexedAccessType { type Output = TSIndexedAccessType; fn babelify(self, ctx: &Context) -> Self::Output { TSIndexedAccessType { base: ctx.base(self.span), object_type: Box::alloc().init(self.obj_type.babelify(ctx)), index_type: Box::alloc().init(self.index_type.babelify(ctx)), } } } // TODO(dwoznicki): I don't understand how Babel handles the +/- symbol, so this // conversion will not work properly yet. impl Babelify for TsMappedType { type Output = TSMappedType; fn babelify(self, ctx: &Context) -> Self::Output { TSMappedType { base: ctx.base(self.span), type_parameter: Box::alloc().init(self.type_param.babelify(ctx)), type_annotation: self .type_ann .map(|ann| Box::alloc().init(ann.babelify(ctx))), name_type: self.name_type.map(|t| Box::alloc().init(t.babelify(ctx))), optional: self.optional.map(|val| val == TruePlusMinus::True), readonly: self.readonly.map(|val| val == TruePlusMinus::True), } } } impl Babelify for TsLitType { type Output = TSLiteralType; fn babelify(self, ctx: &Context) -> Self::Output { TSLiteralType { base: ctx.base(self.span), literal: self.lit.babelify(ctx), } } } impl Babelify for TsLit { type Output = TSLiteralTypeLiteral; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsLit::Number(n) => TSLiteralTypeLiteral::Numeric(n.babelify(ctx)), TsLit::Str(s) => TSLiteralTypeLiteral::String(s.babelify(ctx)), TsLit::Bool(b) => TSLiteralTypeLiteral::Boolean(b.babelify(ctx)), TsLit::BigInt(i) => TSLiteralTypeLiteral::BigInt(i.babelify(ctx)), _ => panic!( "illegal conversion: Cannot convert {:?} to TSLiteralTypeLiteral", &self ), } } } // TODO(dwoznicki): Babel does not appear to have a corresponding template // literal TS node. impl Babelify for TsTplLitType { type Output = String; fn babelify(self, _ctx: &Context) -> Self::Output { panic!("unimplemented"); } } impl Babelify for TsInterfaceDecl { type Output = TSInterfaceDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSInterfaceDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), type_parameters: self.type_params.map(|t| t.babelify(ctx)), extends: self.extends.into_iter().next().babelify(ctx), body: self.body.babelify(ctx), declare: Some(self.declare), } } } impl Babelify for TsInterfaceBody { type Output = TSInterfaceBody; fn babelify(self, ctx: &Context) -> Self::Output { TSInterfaceBody { base: ctx.base(self.span), body: self.body.babelify(ctx), } } } impl Babelify for TsExprWithTypeArgs { type Output = TSExpressionWithTypeArguments; fn babelify(self, ctx: &Context) -> Self::Output { fn babelify_expr(expr: Expr, ctx: &Context) -> TSEntityName { match expr { Expr::Ident(id) => TSEntityName::Id(id.babelify(ctx)), Expr::Member(e) => TSEntityName::Qualified(TSQualifiedName { base: ctx.base(e.span), left: Box::new(babelify_expr(*e.obj, ctx)), right: match e.prop { MemberProp::Ident(id) => id.babelify(ctx), _ => unreachable!(), }, }), _ => unreachable!(), } } TSExpressionWithTypeArguments { base: ctx.base(self.span), expression: babelify_expr(*self.expr, ctx), type_parameters: self.type_args.map(|arg| arg.babelify(ctx)), } } } impl Babelify for TsTypeAliasDecl { type Output = TSTypeAliasDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeAliasDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), type_parameters: self.type_params.map(|t| t.babelify(ctx)), type_annotation: self.type_ann.babelify(ctx), declare: Some(self.declare), } } } impl Babelify for TsEnumDecl { type Output = TSEnumDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSEnumDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), members: self.members.babelify(ctx), is_const: Some(self.is_const), declare: Some(self.declare), initializer: Default::default(), } } } impl Babelify for TsEnumMember { type Output = TSEnumMember; fn babelify(self, ctx: &Context) -> Self::Output { TSEnumMember { base: ctx.base(self.span), id: self.id.babelify(ctx), initializer: self.init.map(|i| Box::alloc().init(i.babelify(ctx).into())), } } } impl Babelify for TsEnumMemberId { type Output = IdOrString; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsEnumMemberId::Ident(i) => IdOrString::Id(i.babelify(ctx)), TsEnumMemberId::Str(s) => IdOrString::String(s.babelify(ctx)), } } } impl Babelify for TsModuleDecl { type Output = TSModuleDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSModuleDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), body: Box::alloc().init(self.body.unwrap().babelify(ctx)), declare: Some(self.declare), global: Some(self.global), } } } impl Babelify for TsNamespaceBody { type Output = TSModuleDeclBody; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsNamespaceBody::TsModuleBlock(b) => TSModuleDeclBody::Block(b.babelify(ctx)), TsNamespaceBody::TsNamespaceDecl(d) => TSModuleDeclBody::Decl(d.babelify(ctx)), } } } impl Babelify for TsModuleBlock { type Output = TSModuleBlock; fn babelify(self, ctx: &Context) -> Self::Output { TSModuleBlock { base: ctx.base(self.span), body: self .body .into_iter() .map(|m| m.babelify(ctx).into()) .collect(), } } } impl Babelify for TsNamespaceDecl { type Output = TSModuleDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSModuleDeclaration { base: ctx.base(self.span), id: IdOrString::Id(self.id.babelify(ctx)), body: Box::alloc().init(self.body.babelify(ctx)), declare: Some(self.declare), global: Some(self.global), } } } impl Babelify for TsModuleName { type Output = IdOrString; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsModuleName::Ident(i) => IdOrString::Id(i.babelify(ctx)), TsModuleName::Str(s) => IdOrString::String(s.babelify(ctx)), } } } impl Babelify for TsImportEqualsDecl { type Output = TSImportEqualsDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSImportEqualsDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), module_reference: self.module_ref.babelify(ctx), is_export: self.is_export, } } } impl Babelify for TsModuleRef { type Output = TSImportEqualsDeclModuleRef; fn babelify(self, ctx: &Context) -> Self::Output { match self { TsModuleRef::TsEntityName(n) => TSImportEqualsDeclModuleRef::Name(n.babelify(ctx)), TsModuleRef::TsExternalModuleRef(e) => { TSImportEqualsDeclModuleRef::External(e.babelify(ctx)) } } } } impl Babelify for TsExternalModuleRef { type Output = TSExternalModuleReference; fn babelify(self, ctx: &Context) -> Self::Output { TSExternalModuleReference { base: ctx.base(self.span), expression: self.expr.babelify(ctx), } } } impl Babelify for TsExportAssignment { type Output = TSExportAssignment; fn babelify(self, ctx: &Context) -> Self::Output { TSExportAssignment { base: ctx.base(self.span), expression: Box::alloc().init(self.expr.babelify(ctx).into()), } } } impl Babelify for TsNamespaceExportDecl { type Output = TSNamespaceExportDeclaration; fn babelify(self, ctx: &Context) -> Self::Output { TSNamespaceExportDeclaration { base: ctx.base(self.span), id: self.id.babelify(ctx), } } } impl Babelify for TsAsExpr { type Output = TSAsExpression; fn babelify(self, ctx: &Context) -> Self::Output { TSAsExpression { base: ctx.base(self.span), expression: Box::alloc().init(self.expr.babelify(ctx).into()), type_annotation: self.type_ann.babelify(ctx), } } } impl Babelify for TsTypeAssertion { type Output = TSTypeAssertion; fn babelify(self, ctx: &Context) -> Self::Output { TSTypeAssertion { base: ctx.base(self.span), expression: Box::alloc().init(self.expr.babelify(ctx).into()), type_annotation: self.type_ann.babelify(ctx), } } } impl Babelify for TsNonNullExpr { type Output = TSNonNullExpression; fn babelify(self, ctx: &Context) -> Self::Output { TSNonNullExpression { base: ctx.base(self.span), expression: Box::alloc().init(self.expr.babelify(ctx).into()), } } } impl Babelify for Accessibility { type Output = Access; fn babelify(self, _ctx: &Context) -> Self::Output { match self { Accessibility::Public => Access::Public, Accessibility::Protected => Access::Protected, Accessibility::Private => Access::Private, } } } // TODO(dwoznicki): There does not appear to be a corresponding Babel node for // this. impl Babelify for TsConstAssertion { type Output = String; fn babelify(self, _ctx: &Context) -> Self::Output { panic!("unimplemented"); } }
33.448718
99
0.600887
fe5042f51a1c3c0c2040fcb3448c634204f790e9
36,366
use std::{ borrow::Cow, convert::{TryFrom, TryInto}, fmt, vec, }; use serde::de::{ self, Deserialize, DeserializeSeed, Deserializer as _, EnumAccess, Error, MapAccess, SeqAccess, Unexpected, VariantAccess, Visitor, }; use serde_bytes::ByteBuf; use crate::{ bson::{Binary, Bson, DbPointer, JavaScriptCodeWithScope, Regex, Timestamp}, datetime::DateTime, document::{Document, IntoIter}, oid::ObjectId, raw::RawBsonRef, spec::BinarySubtype, uuid::UUID_NEWTYPE_NAME, Decimal128, }; use super::raw::Decimal128Access; pub(crate) struct BsonVisitor; struct ObjectIdVisitor; impl<'de> Visitor<'de> for ObjectIdVisitor { type Value = ObjectId; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("expecting an ObjectId") } #[inline] fn visit_str<E>(self, value: &str) -> std::result::Result<Self::Value, E> where E: serde::de::Error, { ObjectId::parse_str(value).map_err(|_| { E::invalid_value( Unexpected::Str(value), &"24-character, big-endian hex string", ) }) } #[inline] fn visit_bytes<E>(self, v: &[u8]) -> std::result::Result<Self::Value, E> where E: serde::de::Error, { let bytes: [u8; 12] = v .try_into() .map_err(|_| E::invalid_length(v.len(), &"12 bytes"))?; Ok(ObjectId::from_bytes(bytes)) } #[inline] fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de>, { match BsonVisitor.visit_map(&mut visitor)? { Bson::ObjectId(oid) => Ok(oid), bson => { let err = format!( "expected map containing extended-JSON formatted ObjectId, instead found {}", bson ); Err(de::Error::custom(err)) } } } } impl<'de> Deserialize<'de> for ObjectId { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: serde::Deserializer<'de>, { if !deserializer.is_human_readable() { deserializer.deserialize_bytes(ObjectIdVisitor) } else { deserializer.deserialize_any(ObjectIdVisitor) } } } impl<'de> Deserialize<'de> for Document { /// Deserialize this value given this `Deserializer`. fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_map(BsonVisitor).and_then(|bson| { if let Bson::Document(doc) = bson { Ok(doc) } else { let err = format!("expected document, found extended JSON data type: {}", bson); Err(de::Error::invalid_type(Unexpected::Map, &&err[..])) } }) } } impl<'de> Deserialize<'de> for Bson { #[inline] fn deserialize<D>(deserializer: D) -> Result<Bson, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_any(BsonVisitor) } } impl<'de> Visitor<'de> for BsonVisitor { type Value = Bson; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("a Bson") } #[inline] fn visit_bool<E>(self, value: bool) -> Result<Bson, E> where E: Error, { Ok(Bson::Boolean(value)) } #[inline] fn visit_i8<E>(self, value: i8) -> Result<Bson, E> where E: Error, { Ok(Bson::Int32(value as i32)) } #[inline] fn visit_u8<E>(self, value: u8) -> Result<Bson, E> where E: Error, { convert_unsigned_to_signed(value as u64) } #[inline] fn visit_i16<E>(self, value: i16) -> Result<Bson, E> where E: Error, { Ok(Bson::Int32(value as i32)) } #[inline] fn visit_u16<E>(self, value: u16) -> Result<Bson, E> where E: Error, { convert_unsigned_to_signed(value as u64) } #[inline] fn visit_i32<E>(self, value: i32) -> Result<Bson, E> where E: Error, { Ok(Bson::Int32(value)) } #[inline] fn visit_u32<E>(self, value: u32) -> Result<Bson, E> where E: Error, { convert_unsigned_to_signed(value as u64) } #[inline] fn visit_i64<E>(self, value: i64) -> Result<Bson, E> where E: Error, { Ok(Bson::Int64(value)) } #[inline] fn visit_u64<E>(self, value: u64) -> Result<Bson, E> where E: Error, { convert_unsigned_to_signed(value) } #[inline] fn visit_f64<E>(self, value: f64) -> Result<Bson, E> { Ok(Bson::Double(value)) } #[inline] fn visit_str<E>(self, value: &str) -> Result<Bson, E> where E: de::Error, { self.visit_string(String::from(value)) } #[inline] fn visit_string<E>(self, value: String) -> Result<Bson, E> { Ok(Bson::String(value)) } #[inline] fn visit_none<E>(self) -> Result<Bson, E> { Ok(Bson::Null) } #[inline] fn visit_some<D>(self, deserializer: D) -> Result<Bson, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_any(self) } #[inline] fn visit_unit<E>(self) -> Result<Bson, E> { Ok(Bson::Null) } #[inline] fn visit_seq<V>(self, mut visitor: V) -> Result<Bson, V::Error> where V: SeqAccess<'de>, { let mut values = Vec::new(); while let Some(elem) = visitor.next_element()? { values.push(elem); } Ok(Bson::Array(values)) } fn visit_map<V>(self, mut visitor: V) -> Result<Bson, V::Error> where V: MapAccess<'de>, { use crate::extjson; let mut doc = Document::new(); while let Some(k) = visitor.next_key::<String>()? { match k.as_str() { "$oid" => { enum BytesOrHex<'a> { Bytes([u8; 12]), Hex(Cow<'a, str>), } impl<'a, 'de: 'a> Deserialize<'de> for BytesOrHex<'a> { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct BytesOrHexVisitor; impl<'de> Visitor<'de> for BytesOrHexVisitor { type Value = BytesOrHex<'de>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!(formatter, "hexstring or byte array") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: Error, { Ok(BytesOrHex::Hex(Cow::Owned(v.to_string()))) } fn visit_borrowed_str<E>( self, v: &'de str, ) -> Result<Self::Value, E> where E: Error, { Ok(BytesOrHex::Hex(Cow::Borrowed(v))) } fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E> where E: Error, { Ok(BytesOrHex::Bytes(v.try_into().map_err(Error::custom)?)) } } deserializer.deserialize_any(BytesOrHexVisitor) } } let bytes_or_hex: BytesOrHex = visitor.next_value()?; match bytes_or_hex { BytesOrHex::Bytes(b) => return Ok(Bson::ObjectId(ObjectId::from_bytes(b))), BytesOrHex::Hex(hex) => { return Ok(Bson::ObjectId(ObjectId::parse_str(&hex).map_err( |_| { V::Error::invalid_value( Unexpected::Str(&hex), &"24-character, big-endian hex string", ) }, )?)); } } } "$symbol" => { let string: String = visitor.next_value()?; return Ok(Bson::Symbol(string)); } "$numberInt" => { let string: String = visitor.next_value()?; return Ok(Bson::Int32(string.parse().map_err(|_| { V::Error::invalid_value( Unexpected::Str(&string), &"32-bit signed integer as a string", ) })?)); } "$numberLong" => { let string: String = visitor.next_value()?; return Ok(Bson::Int64(string.parse().map_err(|_| { V::Error::invalid_value( Unexpected::Str(&string), &"64-bit signed integer as a string", ) })?)); } "$numberDouble" => { let string: String = visitor.next_value()?; let val = match string.as_str() { "Infinity" => Bson::Double(std::f64::INFINITY), "-Infinity" => Bson::Double(std::f64::NEG_INFINITY), "NaN" => Bson::Double(std::f64::NAN), _ => Bson::Int64(string.parse().map_err(|_| { V::Error::invalid_value( Unexpected::Str(&string), &"64-bit signed integer as a string", ) })?), }; return Ok(val); } "$binary" => { let v = visitor.next_value::<extjson::models::BinaryBody>()?; return Ok(Bson::Binary( extjson::models::Binary { body: v } .parse() .map_err(Error::custom)?, )); } "$code" => { let code = visitor.next_value::<String>()?; if let Some(key) = visitor.next_key::<String>()? { if key.as_str() == "$scope" { let scope = visitor.next_value::<Document>()?; return Ok(Bson::JavaScriptCodeWithScope(JavaScriptCodeWithScope { code, scope, })); } else { return Err(Error::unknown_field(key.as_str(), &["$scope"])); } } else { return Ok(Bson::JavaScriptCode(code)); } } "$scope" => { let scope = visitor.next_value::<Document>()?; if let Some(key) = visitor.next_key::<String>()? { if key.as_str() == "$code" { let code = visitor.next_value::<String>()?; return Ok(Bson::JavaScriptCodeWithScope(JavaScriptCodeWithScope { code, scope, })); } else { return Err(Error::unknown_field(key.as_str(), &["$code"])); } } else { return Err(Error::missing_field("$code")); } } "$timestamp" => { let ts = visitor.next_value::<extjson::models::TimestampBody>()?; return Ok(Bson::Timestamp(Timestamp { time: ts.t, increment: ts.i, })); } "$regularExpression" => { let re = visitor.next_value::<extjson::models::RegexBody>()?; return Ok(Bson::RegularExpression(Regex::new(re.pattern, re.options))); } "$dbPointer" => { let dbp = visitor.next_value::<extjson::models::DbPointerBody>()?; return Ok(Bson::DbPointer(DbPointer { id: dbp.id.parse().map_err(Error::custom)?, namespace: dbp.ref_ns, })); } "$date" => { let dt = visitor.next_value::<extjson::models::DateTimeBody>()?; return Ok(Bson::DateTime( extjson::models::DateTime { body: dt } .parse() .map_err(Error::custom)?, )); } "$maxKey" => { let i = visitor.next_value::<u8>()?; return extjson::models::MaxKey { value: i } .parse() .map_err(Error::custom); } "$minKey" => { let i = visitor.next_value::<u8>()?; return extjson::models::MinKey { value: i } .parse() .map_err(Error::custom); } "$undefined" => { let b = visitor.next_value::<bool>()?; return extjson::models::Undefined { value: b } .parse() .map_err(Error::custom); } "$numberDecimal" => { return Err(Error::custom( "deserializing decimal128 values from strings is not currently supported" .to_string(), )); } "$numberDecimalBytes" => { let bytes = visitor.next_value::<ByteBuf>()?; return Ok(Bson::Decimal128(Decimal128::deserialize_from_slice( &bytes, )?)); } k => { let v = visitor.next_value::<Bson>()?; doc.insert(k, v); } } } Ok(Bson::Document(doc)) } #[inline] fn visit_bytes<E>(self, v: &[u8]) -> Result<Bson, E> where E: Error, { Ok(Bson::Binary(Binary { subtype: BinarySubtype::Generic, bytes: v.to_vec(), })) } #[inline] fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Bson, E> where E: Error, { Ok(Bson::Binary(Binary { subtype: BinarySubtype::Generic, bytes: v, })) } #[inline] fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_any(self) } } enum BsonInteger { Int32(i32), Int64(i64), } fn _convert_unsigned<E: Error>(value: u64) -> Result<BsonInteger, E> { if let Ok(int32) = i32::try_from(value) { Ok(BsonInteger::Int32(int32)) } else if let Ok(int64) = i64::try_from(value) { Ok(BsonInteger::Int64(int64)) } else { Err(Error::custom(format!( "cannot represent {} as a signed number", value ))) } } fn convert_unsigned_to_signed<E>(value: u64) -> Result<Bson, E> where E: Error, { let bi = _convert_unsigned(value)?; match bi { BsonInteger::Int32(i) => Ok(Bson::Int32(i)), BsonInteger::Int64(i) => Ok(Bson::Int64(i)), } } pub(crate) fn convert_unsigned_to_signed_raw<'a, E>(value: u64) -> Result<RawBsonRef<'a>, E> where E: Error, { let bi = _convert_unsigned(value)?; match bi { BsonInteger::Int32(i) => Ok(RawBsonRef::Int32(i)), BsonInteger::Int64(i) => Ok(RawBsonRef::Int64(i)), } } /// Serde Deserializer pub struct Deserializer { value: Option<Bson>, options: DeserializerOptions, } /// Options used to configure a [`Deserializer`]. These can also be passed into /// [`crate::from_bson_with_options`] and [`crate::from_document_with_options`]. #[derive(Debug, Clone, Default)] #[non_exhaustive] pub struct DeserializerOptions { /// Whether the [`Deserializer`] should present itself as human readable or not. /// The default is true. pub human_readable: Option<bool>, } impl DeserializerOptions { /// Create a builder struct used to construct a [`DeserializerOptions`]. pub fn builder() -> DeserializerOptionsBuilder { DeserializerOptionsBuilder { options: Default::default(), } } } /// Builder used to construct a [`DeserializerOptions`]. pub struct DeserializerOptionsBuilder { options: DeserializerOptions, } impl DeserializerOptionsBuilder { /// Set the value for [`DeserializerOptions::human_readable`]. pub fn human_readable(mut self, val: impl Into<Option<bool>>) -> Self { self.options.human_readable = val.into(); self } /// Consume this builder and produce a [`DeserializerOptions`]. pub fn build(self) -> DeserializerOptions { self.options } } impl Deserializer { /// Construct a new [`Deserializer`] using the default options. pub fn new(value: Bson) -> Deserializer { Deserializer::new_with_options(value, Default::default()) } /// Create a new [`Deserializer`] using the provided options. pub fn new_with_options(value: Bson, options: DeserializerOptions) -> Self { Deserializer { value: Some(value), options, } } } macro_rules! forward_to_deserialize { ($( $name:ident ( $( $arg:ident : $ty:ty ),* ); )*) => { $( forward_to_deserialize!{ func: $name ( $( $arg: $ty ),* ); } )* }; (func: deserialize_enum ( $( $arg:ident : $ty:ty ),* );) => { fn deserialize_enum<V>( self, $(_: $ty,)* _visitor: V, ) -> ::std::result::Result<V::Value, Self::Error> where V: ::serde::de::Visitor<'de> { Err(::serde::de::Error::custom("unexpected Enum")) } }; (func: $name:ident ( $( $arg:ident : $ty:ty ),* );) => { #[inline] fn $name<V>( self, $(_: $ty,)* visitor: V, ) -> ::std::result::Result<V::Value, Self::Error> where V: ::serde::de::Visitor<'de> { self.deserialize_any(visitor) } }; } impl<'de> de::Deserializer<'de> for Deserializer { type Error = crate::de::Error; fn is_human_readable(&self) -> bool { self.options.human_readable.unwrap_or(true) } #[inline] fn deserialize_any<V>(mut self, visitor: V) -> crate::de::Result<V::Value> where V: Visitor<'de>, { let value = match self.value.take() { Some(value) => value, None => return Err(crate::de::Error::EndOfStream), }; match value { Bson::Double(v) => visitor.visit_f64(v), Bson::String(v) => visitor.visit_string(v), Bson::Array(v) => { let len = v.len(); visitor.visit_seq(SeqDeserializer { iter: v.into_iter(), options: self.options, len, }) } Bson::Document(v) => { let len = v.len(); visitor.visit_map(MapDeserializer { iter: v.into_iter(), value: None, len, options: self.options, }) } Bson::Boolean(v) => visitor.visit_bool(v), Bson::Null => visitor.visit_unit(), Bson::Int32(v) => visitor.visit_i32(v), Bson::Int64(v) => visitor.visit_i64(v), Bson::Binary(Binary { subtype: BinarySubtype::Generic, bytes, }) => visitor.visit_byte_buf(bytes), binary @ Bson::Binary(..) => visitor.visit_map(MapDeserializer { iter: binary.into_extended_document().into_iter(), value: None, len: 2, options: self.options, }), Bson::Decimal128(d) => visitor.visit_map(Decimal128Access::new(d)), _ => { let doc = value.into_extended_document(); let len = doc.len(); visitor.visit_map(MapDeserializer { iter: doc.into_iter(), value: None, len, options: self.options, }) } } } #[inline] fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { match self.value { Some(Bson::ObjectId(oid)) if !self.is_human_readable() => { visitor.visit_bytes(&oid.bytes()) } _ => self.deserialize_any(visitor), } } #[inline] fn deserialize_option<V>(self, visitor: V) -> crate::de::Result<V::Value> where V: Visitor<'de>, { match self.value { Some(Bson::Null) => visitor.visit_none(), Some(_) => visitor.visit_some(self), None => Err(crate::de::Error::EndOfStream), } } #[inline] fn deserialize_enum<V>( mut self, _name: &str, _variants: &'static [&'static str], visitor: V, ) -> crate::de::Result<V::Value> where V: Visitor<'de>, { let value = match self.value.take() { Some(Bson::Document(value)) => value, Some(Bson::String(variant)) => { return visitor.visit_enum(EnumDeserializer { val: Bson::String(variant), deserializer: VariantDeserializer { val: None, options: self.options, }, }); } Some(v) => { return Err(crate::de::Error::invalid_type( v.as_unexpected(), &"expected an enum", )); } None => { return Err(crate::de::Error::EndOfStream); } }; let mut iter = value.into_iter(); let (variant, value) = match iter.next() { Some(v) => v, None => { return Err(crate::de::Error::invalid_value( Unexpected::Other("empty document"), &"variant name", )) } }; // enums are encoded in json as maps with a single key:value pair match iter.next() { Some((k, _)) => Err(crate::de::Error::invalid_value( Unexpected::Map, &format!("expected map with a single key, got extra key \"{}\"", k).as_str(), )), None => visitor.visit_enum(EnumDeserializer { val: Bson::String(variant), deserializer: VariantDeserializer { val: Some(value), options: self.options, }, }), } } #[inline] fn deserialize_newtype_struct<V>( self, name: &'static str, visitor: V, ) -> crate::de::Result<V::Value> where V: Visitor<'de>, { // if this is a UUID, ensure that value is a subtype 4 binary if name == UUID_NEWTYPE_NAME { match self.value { Some(Bson::Binary(ref b)) if b.subtype == BinarySubtype::Uuid => { self.deserialize_any(visitor) } b => Err(Error::custom(format!( "expected Binary with subtype 4, instead got {:?}", b ))), } } else { visitor.visit_newtype_struct(self) } } forward_to_deserialize! { deserialize_bool(); deserialize_u8(); deserialize_u16(); deserialize_u32(); deserialize_u64(); deserialize_i8(); deserialize_i16(); deserialize_i32(); deserialize_i64(); deserialize_f32(); deserialize_f64(); deserialize_char(); deserialize_str(); deserialize_string(); deserialize_unit(); deserialize_seq(); deserialize_map(); deserialize_unit_struct(name: &'static str); deserialize_tuple_struct(name: &'static str, len: usize); deserialize_struct(name: &'static str, fields: &'static [&'static str]); deserialize_tuple(len: usize); deserialize_identifier(); deserialize_ignored_any(); deserialize_byte_buf(); } } struct EnumDeserializer { val: Bson, deserializer: VariantDeserializer, } impl<'de> EnumAccess<'de> for EnumDeserializer { type Error = crate::de::Error; type Variant = VariantDeserializer; fn variant_seed<V>(self, seed: V) -> crate::de::Result<(V::Value, Self::Variant)> where V: DeserializeSeed<'de>, { let dec = Deserializer::new_with_options(self.val, self.deserializer.options.clone()); let value = seed.deserialize(dec)?; Ok((value, self.deserializer)) } } struct VariantDeserializer { val: Option<Bson>, options: DeserializerOptions, } impl<'de> VariantAccess<'de> for VariantDeserializer { type Error = crate::de::Error; fn unit_variant(mut self) -> crate::de::Result<()> { match self.val.take() { None => Ok(()), Some(val) => { Bson::deserialize(Deserializer::new_with_options(val, self.options)).map(|_| ()) } } } fn newtype_variant_seed<T>(mut self, seed: T) -> crate::de::Result<T::Value> where T: DeserializeSeed<'de>, { let dec = Deserializer::new_with_options( self.val.take().ok_or(crate::de::Error::EndOfStream)?, self.options, ); seed.deserialize(dec) } fn tuple_variant<V>(mut self, _len: usize, visitor: V) -> crate::de::Result<V::Value> where V: Visitor<'de>, { match self.val.take().ok_or(crate::de::Error::EndOfStream)? { Bson::Array(fields) => { let de = SeqDeserializer { len: fields.len(), iter: fields.into_iter(), options: self.options, }; de.deserialize_any(visitor) } other => Err(crate::de::Error::invalid_type( other.as_unexpected(), &"expected a tuple", )), } } fn struct_variant<V>( mut self, _fields: &'static [&'static str], visitor: V, ) -> crate::de::Result<V::Value> where V: Visitor<'de>, { match self.val.take().ok_or(crate::de::Error::EndOfStream)? { Bson::Document(fields) => { let de = MapDeserializer { len: fields.len(), iter: fields.into_iter(), value: None, options: self.options, }; de.deserialize_any(visitor) } ref other => Err(crate::de::Error::invalid_type( other.as_unexpected(), &"expected a struct", )), } } } struct SeqDeserializer { iter: vec::IntoIter<Bson>, len: usize, options: DeserializerOptions, } impl<'de> de::Deserializer<'de> for SeqDeserializer { type Error = crate::de::Error; #[inline] fn deserialize_any<V>(self, visitor: V) -> crate::de::Result<V::Value> where V: Visitor<'de>, { if self.len == 0 { visitor.visit_unit() } else { visitor.visit_seq(self) } } forward_to_deserialize! { deserialize_bool(); deserialize_u8(); deserialize_u16(); deserialize_u32(); deserialize_u64(); deserialize_i8(); deserialize_i16(); deserialize_i32(); deserialize_i64(); deserialize_f32(); deserialize_f64(); deserialize_char(); deserialize_str(); deserialize_string(); deserialize_unit(); deserialize_option(); deserialize_seq(); deserialize_bytes(); deserialize_map(); deserialize_unit_struct(name: &'static str); deserialize_newtype_struct(name: &'static str); deserialize_tuple_struct(name: &'static str, len: usize); deserialize_struct(name: &'static str, fields: &'static [&'static str]); deserialize_tuple(len: usize); deserialize_enum(name: &'static str, variants: &'static [&'static str]); deserialize_identifier(); deserialize_ignored_any(); deserialize_byte_buf(); } } impl<'de> SeqAccess<'de> for SeqDeserializer { type Error = crate::de::Error; fn next_element_seed<T>(&mut self, seed: T) -> crate::de::Result<Option<T::Value>> where T: DeserializeSeed<'de>, { match self.iter.next() { None => Ok(None), Some(value) => { self.len -= 1; let de = Deserializer::new_with_options(value, self.options.clone()); match seed.deserialize(de) { Ok(value) => Ok(Some(value)), Err(err) => Err(err), } } } } fn size_hint(&self) -> Option<usize> { Some(self.len) } } pub(crate) struct MapDeserializer { pub(crate) iter: IntoIter, pub(crate) value: Option<Bson>, pub(crate) len: usize, pub(crate) options: DeserializerOptions, } impl MapDeserializer { pub(crate) fn new(doc: Document) -> Self { let len = doc.len(); MapDeserializer { iter: doc.into_iter(), len, value: None, options: Default::default(), } } } impl<'de> MapAccess<'de> for MapDeserializer { type Error = crate::de::Error; fn next_key_seed<K>(&mut self, seed: K) -> crate::de::Result<Option<K::Value>> where K: DeserializeSeed<'de>, { match self.iter.next() { Some((key, value)) => { self.len -= 1; self.value = Some(value); let de = Deserializer::new_with_options(Bson::String(key), self.options.clone()); match seed.deserialize(de) { Ok(val) => Ok(Some(val)), Err(e) => Err(e), } } None => Ok(None), } } fn next_value_seed<V>(&mut self, seed: V) -> crate::de::Result<V::Value> where V: DeserializeSeed<'de>, { let value = self.value.take().ok_or(crate::de::Error::EndOfStream)?; let de = Deserializer::new_with_options(value, self.options.clone()); seed.deserialize(de) } fn size_hint(&self) -> Option<usize> { Some(self.len) } } impl<'de> de::Deserializer<'de> for MapDeserializer { type Error = crate::de::Error; #[inline] fn deserialize_any<V>(self, visitor: V) -> crate::de::Result<V::Value> where V: Visitor<'de>, { visitor.visit_map(self) } forward_to_deserialize! { deserialize_bool(); deserialize_u8(); deserialize_u16(); deserialize_u32(); deserialize_u64(); deserialize_i8(); deserialize_i16(); deserialize_i32(); deserialize_i64(); deserialize_f32(); deserialize_f64(); deserialize_char(); deserialize_str(); deserialize_string(); deserialize_unit(); deserialize_option(); deserialize_seq(); deserialize_bytes(); deserialize_map(); deserialize_unit_struct(name: &'static str); deserialize_newtype_struct(name: &'static str); deserialize_tuple_struct(name: &'static str, len: usize); deserialize_struct(name: &'static str, fields: &'static [&'static str]); deserialize_tuple(len: usize); deserialize_enum(name: &'static str, variants: &'static [&'static str]); deserialize_identifier(); deserialize_ignored_any(); deserialize_byte_buf(); } } impl<'de> Deserialize<'de> for Timestamp { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::Timestamp(timestamp) => Ok(timestamp), _ => Err(D::Error::custom("expecting Timestamp")), } } } impl<'de> Deserialize<'de> for Regex { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::RegularExpression(regex) => Ok(regex), _ => Err(D::Error::custom("expecting Regex")), } } } impl<'de> Deserialize<'de> for JavaScriptCodeWithScope { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::JavaScriptCodeWithScope(code_with_scope) => Ok(code_with_scope), _ => Err(D::Error::custom("expecting JavaScriptCodeWithScope")), } } } impl<'de> Deserialize<'de> for Binary { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::Binary(binary) => Ok(binary), d => Err(D::Error::custom(format!( "expecting Binary but got {:?} instead", d ))), } } } impl<'de> Deserialize<'de> for Decimal128 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::Decimal128(d128) => Ok(d128), o => Err(D::Error::custom(format!( "expecting Decimal128, got {:?}", o ))), } } } impl<'de> Deserialize<'de> for DateTime { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::DateTime(dt) => Ok(dt), _ => Err(D::Error::custom("expecting DateTime")), } } } impl<'de> Deserialize<'de> for DbPointer { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de>, { match Bson::deserialize(deserializer)? { Bson::DbPointer(db_pointer) => Ok(db_pointer), _ => Err(D::Error::custom("expecting DbPointer")), } } }
29.881676
100
0.478139
d5e1c061bf0bab92f8c1c9b8ca551a27e33d329a
109,411
//! Error Reporting Code for the inference engine //! //! Because of the way inference, and in particular region inference, //! works, it often happens that errors are not detected until far after //! the relevant line of code has been type-checked. Therefore, there is //! an elaborate system to track why a particular constraint in the //! inference graph arose so that we can explain to the user what gave //! rise to a particular error. //! //! The basis of the system are the "origin" types. An "origin" is the //! reason that a constraint or inference variable arose. There are //! different "origin" enums for different kinds of constraints/variables //! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has //! a span, but also more information so that we can generate a meaningful //! error message. //! //! Having a catalog of all the different reasons an error can arise is //! also useful for other reasons, like cross-referencing FAQs etc, though //! we are not really taking advantage of this yet. //! //! # Region Inference //! //! Region inference is particularly tricky because it always succeeds "in //! the moment" and simply registers a constraint. Then, at the end, we //! can compute the full graph and report errors, so we need to be able to //! store and later report what gave rise to the conflicting constraints. //! //! # Subtype Trace //! //! Determining whether `T1 <: T2` often involves a number of subtypes and //! subconstraints along the way. A "TypeTrace" is an extended version //! of an origin that traces the types and other values that were being //! compared. It is not necessarily comprehensive (in fact, at the time of //! this writing it only tracks the root values being compared) but I'd //! like to extend it to include significant "waypoints". For example, if //! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2 //! <: T4` fails, I'd like the trace to include enough information to say //! "in the 2nd element of the tuple". Similarly, failures when comparing //! arguments or return types in fn types should be able to cite the //! specific position, etc. //! //! # Reality vs plan //! //! Of course, there is still a LOT of code in typeck that has yet to be //! ported to this system, and which relies on string concatenation at the //! time of error detection. use super::lexical_region_resolve::RegionResolutionError; use super::region_constraints::GenericKind; use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs}; use crate::infer; use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type; use crate::traits::error_reporting::report_object_safety_error; use crate::traits::{ IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode, StatementAsExpression, }; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_errors::{pluralize, struct_span_err}; use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_hir::lang_items::LangItem; use rustc_hir::{Item, ItemKind, Node}; use rustc_middle::dep_graph::DepContext; use rustc_middle::ty::error::TypeError; use rustc_middle::ty::{ self, subst::{GenericArgKind, Subst, SubstsRef}, Region, Ty, TyCtxt, TypeFoldable, }; use rustc_span::{sym, BytePos, DesugaringKind, MultiSpan, Pos, Span}; use rustc_target::spec::abi; use std::ops::ControlFlow; use std::{cmp, fmt, iter}; mod note; mod need_type_info; pub use need_type_info::TypeAnnotationNeeded; pub mod nice_region_error; pub(super) fn note_and_explain_region( tcx: TyCtxt<'tcx>, err: &mut DiagnosticBuilder<'_>, prefix: &str, region: ty::Region<'tcx>, suffix: &str, alt_span: Option<Span>, ) { let (description, span) = match *region { ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic => { msg_span_from_free_region(tcx, region, alt_span) } ty::ReEmpty(ty::UniverseIndex::ROOT) => ("the empty lifetime".to_owned(), alt_span), // uh oh, hope no user ever sees THIS ty::ReEmpty(ui) => (format!("the empty lifetime in universe {:?}", ui), alt_span), ty::RePlaceholder(_) => return, // FIXME(#13998) RePlaceholder should probably print like // ReFree rather than dumping Debug output on the user. // // We shouldn't really be having unification failures with ReVar // and ReLateBound though. ty::ReVar(_) | ty::ReLateBound(..) | ty::ReErased => { (format!("lifetime {:?}", region), alt_span) } }; emit_msg_span(err, prefix, description, span, suffix); } pub(super) fn note_and_explain_free_region( tcx: TyCtxt<'tcx>, err: &mut DiagnosticBuilder<'_>, prefix: &str, region: ty::Region<'tcx>, suffix: &str, ) { let (description, span) = msg_span_from_free_region(tcx, region, None); emit_msg_span(err, prefix, description, span, suffix); } fn msg_span_from_free_region( tcx: TyCtxt<'tcx>, region: ty::Region<'tcx>, alt_span: Option<Span>, ) -> (String, Option<Span>) { match *region { ty::ReEarlyBound(_) | ty::ReFree(_) => { msg_span_from_early_bound_and_free_regions(tcx, region) } ty::ReStatic => ("the static lifetime".to_owned(), alt_span), ty::ReEmpty(ty::UniverseIndex::ROOT) => ("an empty lifetime".to_owned(), alt_span), ty::ReEmpty(ui) => (format!("an empty lifetime in universe {:?}", ui), alt_span), _ => bug!("{:?}", region), } } fn msg_span_from_early_bound_and_free_regions( tcx: TyCtxt<'tcx>, region: ty::Region<'tcx>, ) -> (String, Option<Span>) { let sm = tcx.sess.source_map(); let scope = region.free_region_binding_scope(tcx); let node = tcx.hir().local_def_id_to_hir_id(scope.expect_local()); let tag = match tcx.hir().find(node) { Some(Node::Block(_) | Node::Expr(_)) => "body", Some(Node::Item(it)) => item_scope_tag(&it), Some(Node::TraitItem(it)) => trait_item_scope_tag(&it), Some(Node::ImplItem(it)) => impl_item_scope_tag(&it), Some(Node::ForeignItem(it)) => foreign_item_scope_tag(&it), _ => unreachable!(), }; let (prefix, span) = match *region { ty::ReEarlyBound(ref br) => { let mut sp = sm.guess_head_span(tcx.hir().span(node)); if let Some(param) = tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(br.name)) { sp = param.span; } (format!("the lifetime `{}` as defined on", br.name), sp) } ty::ReFree(ty::FreeRegion { bound_region: ty::BoundRegionKind::BrNamed(_, name), .. }) => { let mut sp = sm.guess_head_span(tcx.hir().span(node)); if let Some(param) = tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(name)) { sp = param.span; } (format!("the lifetime `{}` as defined on", name), sp) } ty::ReFree(ref fr) => match fr.bound_region { ty::BrAnon(idx) => { if let Some((ty, _)) = find_anon_type(tcx, region, &fr.bound_region) { ("the anonymous lifetime defined on".to_string(), ty.span) } else { ( format!("the anonymous lifetime #{} defined on", idx + 1), tcx.hir().span(node), ) } } _ => ( format!("the lifetime `{}` as defined on", region), sm.guess_head_span(tcx.hir().span(node)), ), }, _ => bug!(), }; let (msg, opt_span) = explain_span(tcx, tag, span); (format!("{} {}", prefix, msg), opt_span) } fn emit_msg_span( err: &mut DiagnosticBuilder<'_>, prefix: &str, description: String, span: Option<Span>, suffix: &str, ) { let message = format!("{}{}{}", prefix, description, suffix); if let Some(span) = span { err.span_note(span, &message); } else { err.note(&message); } } fn item_scope_tag(item: &hir::Item<'_>) -> &'static str { match item.kind { hir::ItemKind::Impl { .. } => "impl", hir::ItemKind::Struct(..) => "struct", hir::ItemKind::Union(..) => "union", hir::ItemKind::Enum(..) => "enum", hir::ItemKind::Trait(..) => "trait", hir::ItemKind::Fn(..) => "function body", _ => "item", } } fn trait_item_scope_tag(item: &hir::TraitItem<'_>) -> &'static str { match item.kind { hir::TraitItemKind::Fn(..) => "method body", hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => "associated item", } } fn impl_item_scope_tag(item: &hir::ImplItem<'_>) -> &'static str { match item.kind { hir::ImplItemKind::Fn(..) => "method body", hir::ImplItemKind::Const(..) | hir::ImplItemKind::TyAlias(..) => "associated item", } } fn foreign_item_scope_tag(item: &hir::ForeignItem<'_>) -> &'static str { match item.kind { hir::ForeignItemKind::Fn(..) => "method body", hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => "associated item", } } fn explain_span(tcx: TyCtxt<'tcx>, heading: &str, span: Span) -> (String, Option<Span>) { let lo = tcx.sess.source_map().lookup_char_pos(span.lo()); (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize() + 1), Some(span)) } pub fn unexpected_hidden_region_diagnostic( tcx: TyCtxt<'tcx>, span: Span, hidden_ty: Ty<'tcx>, hidden_region: ty::Region<'tcx>, ) -> DiagnosticBuilder<'tcx> { let mut err = struct_span_err!( tcx.sess, span, E0700, "hidden type for `impl Trait` captures lifetime that does not appear in bounds", ); // Explain the region we are capturing. match hidden_region { ty::ReEmpty(ty::UniverseIndex::ROOT) => { // All lifetimes shorter than the function body are `empty` in // lexical region resolution. The default explanation of "an empty // lifetime" isn't really accurate here. let message = format!( "hidden type `{}` captures lifetime smaller than the function body", hidden_ty ); err.span_note(span, &message); } ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic | ty::ReEmpty(_) => { // Assuming regionck succeeded (*), we ought to always be // capturing *some* region from the fn header, and hence it // ought to be free. So under normal circumstances, we will go // down this path which gives a decent human readable // explanation. // // (*) if not, the `tainted_by_errors` field would be set to // `Some(ErrorReported)` in any case, so we wouldn't be here at all. note_and_explain_free_region( tcx, &mut err, &format!("hidden type `{}` captures ", hidden_ty), hidden_region, "", ); } _ => { // Ugh. This is a painful case: the hidden region is not one // that we can easily summarize or explain. This can happen // in a case like // `src/test/ui/multiple-lifetimes/ordinary-bounds-unsuited.rs`: // // ``` // fn upper_bounds<'a, 'b>(a: Ordinary<'a>, b: Ordinary<'b>) -> impl Trait<'a, 'b> { // if condition() { a } else { b } // } // ``` // // Here the captured lifetime is the intersection of `'a` and // `'b`, which we can't quite express. // We can at least report a really cryptic error for now. note_and_explain_region( tcx, &mut err, &format!("hidden type `{}` captures ", hidden_ty), hidden_region, "", None, ); } } err } impl<'a, 'tcx> InferCtxt<'a, 'tcx> { pub fn report_region_errors(&self, errors: &Vec<RegionResolutionError<'tcx>>) { debug!("report_region_errors(): {} errors to start", errors.len()); // try to pre-process the errors, which will group some of them // together into a `ProcessedErrors` group: let errors = self.process_errors(errors); debug!("report_region_errors: {} errors after preprocessing", errors.len()); for error in errors { debug!("report_region_errors: error = {:?}", error); if !self.try_report_nice_region_error(&error) { match error.clone() { // These errors could indicate all manner of different // problems with many different solutions. Rather // than generate a "one size fits all" error, what we // attempt to do is go through a number of specific // scenarios and try to find the best way to present // the error. If all of these fails, we fall back to a rather // general bit of code that displays the error information RegionResolutionError::ConcreteFailure(origin, sub, sup) => { if sub.is_placeholder() || sup.is_placeholder() { self.report_placeholder_failure(origin, sub, sup).emit(); } else { self.report_concrete_failure(origin, sub, sup).emit(); } } RegionResolutionError::GenericBoundFailure(origin, param_ty, sub) => { self.report_generic_bound_failure( origin.span(), Some(origin), param_ty, sub, ); } RegionResolutionError::SubSupConflict( _, var_origin, sub_origin, sub_r, sup_origin, sup_r, ) => { if sub_r.is_placeholder() { self.report_placeholder_failure(sub_origin, sub_r, sup_r).emit(); } else if sup_r.is_placeholder() { self.report_placeholder_failure(sup_origin, sub_r, sup_r).emit(); } else { self.report_sub_sup_conflict( var_origin, sub_origin, sub_r, sup_origin, sup_r, ); } } RegionResolutionError::UpperBoundUniverseConflict( _, _, var_universe, sup_origin, sup_r, ) => { assert!(sup_r.is_placeholder()); // Make a dummy value for the "sub region" -- // this is the initial value of the // placeholder. In practice, we expect more // tailored errors that don't really use this // value. let sub_r = self.tcx.mk_region(ty::ReEmpty(var_universe)); self.report_placeholder_failure(sup_origin, sub_r, sup_r).emit(); } RegionResolutionError::MemberConstraintFailure { hidden_ty, member_region, span, } => { let hidden_ty = self.resolve_vars_if_possible(hidden_ty); unexpected_hidden_region_diagnostic( self.tcx, span, hidden_ty, member_region, ) .emit(); } } } } } // This method goes through all the errors and try to group certain types // of error together, for the purpose of suggesting explicit lifetime // parameters to the user. This is done so that we can have a more // complete view of what lifetimes should be the same. // If the return value is an empty vector, it means that processing // failed (so the return value of this method should not be used). // // The method also attempts to weed out messages that seem like // duplicates that will be unhelpful to the end-user. But // obviously it never weeds out ALL errors. fn process_errors( &self, errors: &[RegionResolutionError<'tcx>], ) -> Vec<RegionResolutionError<'tcx>> { debug!("process_errors()"); // We want to avoid reporting generic-bound failures if we can // avoid it: these have a very high rate of being unhelpful in // practice. This is because they are basically secondary // checks that test the state of the region graph after the // rest of inference is done, and the other kinds of errors // indicate that the region constraint graph is internally // inconsistent, so these test results are likely to be // meaningless. // // Therefore, we filter them out of the list unless they are // the only thing in the list. let is_bound_failure = |e: &RegionResolutionError<'tcx>| match *e { RegionResolutionError::GenericBoundFailure(..) => true, RegionResolutionError::ConcreteFailure(..) | RegionResolutionError::SubSupConflict(..) | RegionResolutionError::UpperBoundUniverseConflict(..) | RegionResolutionError::MemberConstraintFailure { .. } => false, }; let mut errors = if errors.iter().all(|e| is_bound_failure(e)) { errors.to_owned() } else { errors.iter().filter(|&e| !is_bound_failure(e)).cloned().collect() }; // sort the errors by span, for better error message stability. errors.sort_by_key(|u| match *u { RegionResolutionError::ConcreteFailure(ref sro, _, _) => sro.span(), RegionResolutionError::GenericBoundFailure(ref sro, _, _) => sro.span(), RegionResolutionError::SubSupConflict(_, ref rvo, _, _, _, _) => rvo.span(), RegionResolutionError::UpperBoundUniverseConflict(_, ref rvo, _, _, _) => rvo.span(), RegionResolutionError::MemberConstraintFailure { span, .. } => span, }); errors } /// Adds a note if the types come from similarly named crates fn check_and_note_conflicting_crates( &self, err: &mut DiagnosticBuilder<'_>, terr: &TypeError<'tcx>, ) { use hir::def_id::CrateNum; use rustc_hir::definitions::DisambiguatedDefPathData; use ty::print::Printer; use ty::subst::GenericArg; struct AbsolutePathPrinter<'tcx> { tcx: TyCtxt<'tcx>, } struct NonTrivialPath; impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { type Error = NonTrivialPath; type Path = Vec<String>; type Region = !; type Type = !; type DynExistential = !; type Const = !; fn tcx<'a>(&'a self) -> TyCtxt<'tcx> { self.tcx } fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> { Err(NonTrivialPath) } fn print_type(self, _ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> { Err(NonTrivialPath) } fn print_dyn_existential( self, _predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>, ) -> Result<Self::DynExistential, Self::Error> { Err(NonTrivialPath) } fn print_const(self, _ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> { Err(NonTrivialPath) } fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error> { Ok(vec![self.tcx.crate_name(cnum).to_string()]) } fn path_qualified( self, _self_ty: Ty<'tcx>, _trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { Err(NonTrivialPath) } fn path_append_impl( self, _print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, _disambiguated_data: &DisambiguatedDefPathData, _self_ty: Ty<'tcx>, _trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { Err(NonTrivialPath) } fn path_append( self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, disambiguated_data: &DisambiguatedDefPathData, ) -> Result<Self::Path, Self::Error> { let mut path = print_prefix(self)?; path.push(disambiguated_data.to_string()); Ok(path) } fn path_generic_args( self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, _args: &[GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { print_prefix(self) } } let report_path_match = |err: &mut DiagnosticBuilder<'_>, did1: DefId, did2: DefId| { // Only external crates, if either is from a local // module we could have false positives if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate { let abs_path = |def_id| AbsolutePathPrinter { tcx: self.tcx }.print_def_path(def_id, &[]); // We compare strings because DefPath can be different // for imported and non-imported crates let same_path = || -> Result<_, NonTrivialPath> { Ok(self.tcx.def_path_str(did1) == self.tcx.def_path_str(did2) || abs_path(did1)? == abs_path(did2)?) }; if same_path().unwrap_or(false) { let crate_name = self.tcx.crate_name(did1.krate); err.note(&format!( "perhaps two different versions of crate `{}` are being used?", crate_name )); } } }; match *terr { TypeError::Sorts(ref exp_found) => { // if they are both "path types", there's a chance of ambiguity // due to different versions of the same crate if let (&ty::Adt(exp_adt, _), &ty::Adt(found_adt, _)) = (exp_found.expected.kind(), exp_found.found.kind()) { report_path_match(err, exp_adt.did, found_adt.did); } } TypeError::Traits(ref exp_found) => { report_path_match(err, exp_found.expected, exp_found.found); } _ => (), // FIXME(#22750) handle traits and stuff } } fn note_error_origin( &self, err: &mut DiagnosticBuilder<'tcx>, cause: &ObligationCause<'tcx>, exp_found: Option<ty::error::ExpectedFound<Ty<'tcx>>>, ) { match cause.code { ObligationCauseCode::Pattern { origin_expr: true, span: Some(span), root_ty } => { let ty = self.resolve_vars_if_possible(root_ty); if ty.is_suggestable() { // don't show type `_` err.span_label(span, format!("this expression has type `{}`", ty)); } if let Some(ty::error::ExpectedFound { found, .. }) = exp_found { if ty.is_box() && ty.boxed_ty() == found { if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) { err.span_suggestion( span, "consider dereferencing the boxed value", format!("*{}", snippet), Applicability::MachineApplicable, ); } } } } ObligationCauseCode::Pattern { origin_expr: false, span: Some(span), .. } => { err.span_label(span, "expected due to this"); } ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause { semi_span, source, ref prior_arms, last_ty, scrut_hir_id, opt_suggest_box_span, arm_span, scrut_span, .. }) => match source { hir::MatchSource::IfLetDesugar { .. } => { let msg = "`if let` arms have incompatible types"; err.span_label(cause.span, msg); if let Some(ret_sp) = opt_suggest_box_span { self.suggest_boxing_for_return_impl_trait( err, ret_sp, prior_arms.iter().chain(std::iter::once(&arm_span)).map(|s| *s), ); } } hir::MatchSource::TryDesugar => { if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found { let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id); let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind { let arg_expr = args.first().expect("try desugaring call w/out arg"); self.in_progress_typeck_results.and_then(|typeck_results| { typeck_results.borrow().expr_ty_opt(arg_expr) }) } else { bug!("try desugaring w/out call expr as scrutinee"); }; match scrut_ty { Some(ty) if expected == ty => { let source_map = self.tcx.sess.source_map(); err.span_suggestion( source_map.end_point(cause.span), "try removing this `?`", "".to_string(), Applicability::MachineApplicable, ); } _ => {} } } } _ => { // `last_ty` can be `!`, `expected` will have better info when present. let t = self.resolve_vars_if_possible(match exp_found { Some(ty::error::ExpectedFound { expected, .. }) => expected, _ => last_ty, }); let source_map = self.tcx.sess.source_map(); let mut any_multiline_arm = source_map.is_multiline(arm_span); if prior_arms.len() <= 4 { for sp in prior_arms { any_multiline_arm |= source_map.is_multiline(*sp); err.span_label(*sp, format!("this is found to be of type `{}`", t)); } } else if let Some(sp) = prior_arms.last() { any_multiline_arm |= source_map.is_multiline(*sp); err.span_label( *sp, format!("this and all prior arms are found to be of type `{}`", t), ); } let outer_error_span = if any_multiline_arm { // Cover just `match` and the scrutinee expression, not // the entire match body, to reduce diagram noise. cause.span.shrink_to_lo().to(scrut_span) } else { cause.span }; let msg = "`match` arms have incompatible types"; err.span_label(outer_error_span, msg); if let Some((sp, boxed)) = semi_span { if let (StatementAsExpression::NeedsBoxing, [.., prior_arm]) = (boxed, &prior_arms[..]) { err.multipart_suggestion( "consider removing this semicolon and boxing the expressions", vec![ (prior_arm.shrink_to_lo(), "Box::new(".to_string()), (prior_arm.shrink_to_hi(), ")".to_string()), (arm_span.shrink_to_lo(), "Box::new(".to_string()), (arm_span.shrink_to_hi(), ")".to_string()), (sp, String::new()), ], Applicability::HasPlaceholders, ); } else if matches!(boxed, StatementAsExpression::NeedsBoxing) { err.span_suggestion_short( sp, "consider removing this semicolon and boxing the expressions", String::new(), Applicability::MachineApplicable, ); } else { err.span_suggestion_short( sp, "consider removing this semicolon", String::new(), Applicability::MachineApplicable, ); } } if let Some(ret_sp) = opt_suggest_box_span { // Get return type span and point to it. self.suggest_boxing_for_return_impl_trait( err, ret_sp, prior_arms.iter().chain(std::iter::once(&arm_span)).map(|s| *s), ); } } }, ObligationCauseCode::IfExpression(box IfExpressionCause { then, else_sp, outer, semicolon, opt_suggest_box_span, }) => { err.span_label(then, "expected because of this"); if let Some(sp) = outer { err.span_label(sp, "`if` and `else` have incompatible types"); } if let Some((sp, boxed)) = semicolon { if matches!(boxed, StatementAsExpression::NeedsBoxing) { err.multipart_suggestion( "consider removing this semicolon and boxing the expression", vec![ (then.shrink_to_lo(), "Box::new(".to_string()), (then.shrink_to_hi(), ")".to_string()), (else_sp.shrink_to_lo(), "Box::new(".to_string()), (else_sp.shrink_to_hi(), ")".to_string()), (sp, String::new()), ], Applicability::MachineApplicable, ); } else { err.span_suggestion_short( sp, "consider removing this semicolon", String::new(), Applicability::MachineApplicable, ); } } if let Some(ret_sp) = opt_suggest_box_span { self.suggest_boxing_for_return_impl_trait( err, ret_sp, vec![then, else_sp].into_iter(), ); } } _ => (), } } fn suggest_boxing_for_return_impl_trait( &self, err: &mut DiagnosticBuilder<'tcx>, return_sp: Span, arm_spans: impl Iterator<Item = Span>, ) { err.multipart_suggestion( "you could change the return type to be a boxed trait object", vec![ (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box<dyn".to_string()), (return_sp.shrink_to_hi(), ">".to_string()), ], Applicability::MaybeIncorrect, ); let sugg = arm_spans .flat_map(|sp| { vec![ (sp.shrink_to_lo(), "Box::new(".to_string()), (sp.shrink_to_hi(), ")".to_string()), ] .into_iter() }) .collect::<Vec<_>>(); err.multipart_suggestion( "if you change the return type to expect trait objects, box the returned expressions", sugg, Applicability::MaybeIncorrect, ); } /// Given that `other_ty` is the same as a type argument for `name` in `sub`, populate `value` /// highlighting `name` and every type argument that isn't at `pos` (which is `other_ty`), and /// populate `other_value` with `other_ty`. /// /// ```text /// Foo<Bar<Qux>> /// ^^^^--------^ this is highlighted /// | | /// | this type argument is exactly the same as the other type, not highlighted /// this is highlighted /// Bar<Qux> /// -------- this type is the same as a type argument in the other type, not highlighted /// ``` fn highlight_outer( &self, value: &mut DiagnosticStyledString, other_value: &mut DiagnosticStyledString, name: String, sub: ty::subst::SubstsRef<'tcx>, pos: usize, other_ty: Ty<'tcx>, ) { // `value` and `other_value` hold two incomplete type representation for display. // `name` is the path of both types being compared. `sub` value.push_highlighted(name); let len = sub.len(); if len > 0 { value.push_highlighted("<"); } // Output the lifetimes for the first type let lifetimes = sub .regions() .map(|lifetime| { let s = lifetime.to_string(); if s.is_empty() { "'_".to_string() } else { s } }) .collect::<Vec<_>>() .join(", "); if !lifetimes.is_empty() { if sub.regions().count() < len { value.push_normal(lifetimes + ", "); } else { value.push_normal(lifetimes); } } // Highlight all the type arguments that aren't at `pos` and compare the type argument at // `pos` and `other_ty`. for (i, type_arg) in sub.types().enumerate() { if i == pos { let values = self.cmp(type_arg, other_ty); value.0.extend((values.0).0); other_value.0.extend((values.1).0); } else { value.push_highlighted(type_arg.to_string()); } if len > 0 && i != len - 1 { value.push_normal(", "); } } if len > 0 { value.push_highlighted(">"); } } /// If `other_ty` is the same as a type argument present in `sub`, highlight `path` in `t1_out`, /// as that is the difference to the other type. /// /// For the following code: /// /// ```no_run /// let x: Foo<Bar<Qux>> = foo::<Bar<Qux>>(); /// ``` /// /// The type error output will behave in the following way: /// /// ```text /// Foo<Bar<Qux>> /// ^^^^--------^ this is highlighted /// | | /// | this type argument is exactly the same as the other type, not highlighted /// this is highlighted /// Bar<Qux> /// -------- this type is the same as a type argument in the other type, not highlighted /// ``` fn cmp_type_arg( &self, mut t1_out: &mut DiagnosticStyledString, mut t2_out: &mut DiagnosticStyledString, path: String, sub: ty::subst::SubstsRef<'tcx>, other_path: String, other_ty: Ty<'tcx>, ) -> Option<()> { for (i, ta) in sub.types().enumerate() { if ta == other_ty { self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty); return Some(()); } if let ty::Adt(def, _) = ta.kind() { let path_ = self.tcx.def_path_str(def.did); if path_ == other_path { self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty); return Some(()); } } } None } /// Adds a `,` to the type representation only if it is appropriate. fn push_comma( &self, value: &mut DiagnosticStyledString, other_value: &mut DiagnosticStyledString, len: usize, pos: usize, ) { if len > 0 && pos != len - 1 { value.push_normal(", "); other_value.push_normal(", "); } } /// For generic types with parameters with defaults, remove the parameters corresponding to /// the defaults. This repeats a lot of the logic found in `ty::print::pretty`. fn strip_generic_default_params( &self, def_id: DefId, substs: ty::subst::SubstsRef<'tcx>, ) -> SubstsRef<'tcx> { let generics = self.tcx.generics_of(def_id); let mut num_supplied_defaults = 0; let default_params = generics.params.iter().rev().filter_map(|param| match param.kind { ty::GenericParamDefKind::Type { has_default: true, .. } => Some(param.def_id), ty::GenericParamDefKind::Const { has_default: true } => Some(param.def_id), _ => None, }); for (def_id, actual) in iter::zip(default_params, substs.iter().rev()) { match actual.unpack() { GenericArgKind::Const(c) => { if self.tcx.const_param_default(def_id).subst(self.tcx, substs) != c { break; } } GenericArgKind::Type(ty) => { if self.tcx.type_of(def_id).subst(self.tcx, substs) != ty { break; } } _ => break, } num_supplied_defaults += 1; } let len = generics.params.len(); let mut generics = generics.clone(); generics.params.truncate(len - num_supplied_defaults); substs.truncate_to(self.tcx, &generics) } /// Given two `fn` signatures highlight only sub-parts that are different. fn cmp_fn_sig( &self, sig1: &ty::PolyFnSig<'tcx>, sig2: &ty::PolyFnSig<'tcx>, ) -> (DiagnosticStyledString, DiagnosticStyledString) { let get_lifetimes = |sig| { use rustc_hir::def::Namespace; let mut s = String::new(); let (_, sig, reg) = ty::print::FmtPrinter::new(self.tcx, &mut s, Namespace::TypeNS) .name_all_regions(sig) .unwrap(); let lts: Vec<String> = reg.into_iter().map(|(_, kind)| kind.to_string()).collect(); (if lts.is_empty() { String::new() } else { format!("for<{}> ", lts.join(", ")) }, sig) }; let (lt1, sig1) = get_lifetimes(sig1); let (lt2, sig2) = get_lifetimes(sig2); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T let mut values = ( DiagnosticStyledString::normal("".to_string()), DiagnosticStyledString::normal("".to_string()), ); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^ values.0.push(sig1.unsafety.prefix_str(), sig1.unsafety != sig2.unsafety); values.1.push(sig2.unsafety.prefix_str(), sig1.unsafety != sig2.unsafety); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^^^^^ if sig1.abi != abi::Abi::Rust { values.0.push(format!("extern {} ", sig1.abi), sig1.abi != sig2.abi); } if sig2.abi != abi::Abi::Rust { values.1.push(format!("extern {} ", sig2.abi), sig1.abi != sig2.abi); } // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^^^ let lifetime_diff = lt1 != lt2; values.0.push(lt1, lifetime_diff); values.1.push(lt2, lifetime_diff); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^ values.0.push_normal("fn("); values.1.push_normal("fn("); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^ let len1 = sig1.inputs().len(); let len2 = sig2.inputs().len(); if len1 == len2 { for (i, (l, r)) in iter::zip(sig1.inputs(), sig2.inputs()).enumerate() { let (x1, x2) = self.cmp(l, r); (values.0).0.extend(x1.0); (values.1).0.extend(x2.0); self.push_comma(&mut values.0, &mut values.1, len1, i); } } else { for (i, l) in sig1.inputs().iter().enumerate() { values.0.push_highlighted(l.to_string()); if i != len1 - 1 { values.0.push_highlighted(", "); } } for (i, r) in sig2.inputs().iter().enumerate() { values.1.push_highlighted(r.to_string()); if i != len2 - 1 { values.1.push_highlighted(", "); } } } if sig1.c_variadic { if len1 > 0 { values.0.push_normal(", "); } values.0.push("...", !sig2.c_variadic); } if sig2.c_variadic { if len2 > 0 { values.1.push_normal(", "); } values.1.push("...", !sig1.c_variadic); } // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^ values.0.push_normal(")"); values.1.push_normal(")"); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^^^ let output1 = sig1.output(); let output2 = sig2.output(); let (x1, x2) = self.cmp(output1, output2); if !output1.is_unit() { values.0.push_normal(" -> "); (values.0).0.extend(x1.0); } if !output2.is_unit() { values.1.push_normal(" -> "); (values.1).0.extend(x2.0); } values } /// Compares two given types, eliding parts that are the same between them and highlighting /// relevant differences, and return two representation of those types for highlighted printing. fn cmp(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> (DiagnosticStyledString, DiagnosticStyledString) { debug!("cmp(t1={}, t1.kind={:?}, t2={}, t2.kind={:?})", t1, t1.kind(), t2, t2.kind()); // helper functions fn equals<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { match (a.kind(), b.kind()) { (a, b) if *a == *b => true, (&ty::Int(_), &ty::Infer(ty::InferTy::IntVar(_))) | ( &ty::Infer(ty::InferTy::IntVar(_)), &ty::Int(_) | &ty::Infer(ty::InferTy::IntVar(_)), ) | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_))) | ( &ty::Infer(ty::InferTy::FloatVar(_)), &ty::Float(_) | &ty::Infer(ty::InferTy::FloatVar(_)), ) => true, _ => false, } } fn push_ty_ref<'tcx>( region: &ty::Region<'tcx>, ty: Ty<'tcx>, mutbl: hir::Mutability, s: &mut DiagnosticStyledString, ) { let mut r = region.to_string(); if r == "'_" { r.clear(); } else { r.push(' '); } s.push_highlighted(format!("&{}{}", r, mutbl.prefix_str())); s.push_normal(ty.to_string()); } // process starts here match (t1.kind(), t2.kind()) { (&ty::Adt(def1, sub1), &ty::Adt(def2, sub2)) => { let sub_no_defaults_1 = self.strip_generic_default_params(def1.did, sub1); let sub_no_defaults_2 = self.strip_generic_default_params(def2.did, sub2); let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); let path1 = self.tcx.def_path_str(def1.did); let path2 = self.tcx.def_path_str(def2.did); if def1.did == def2.did { // Easy case. Replace same types with `_` to shorten the output and highlight // the differing ones. // let x: Foo<Bar, Qux> = y::<Foo<Quz, Qux>>(); // Foo<Bar, _> // Foo<Quz, _> // --- ^ type argument elided // | // highlighted in output values.0.push_normal(path1); values.1.push_normal(path2); // Avoid printing out default generic parameters that are common to both // types. let len1 = sub_no_defaults_1.len(); let len2 = sub_no_defaults_2.len(); let common_len = cmp::min(len1, len2); let remainder1: Vec<_> = sub1.types().skip(common_len).collect(); let remainder2: Vec<_> = sub2.types().skip(common_len).collect(); let common_default_params = iter::zip(remainder1.iter().rev(), remainder2.iter().rev()) .filter(|(a, b)| a == b) .count(); let len = sub1.len() - common_default_params; let consts_offset = len - sub1.consts().count(); // Only draw `<...>` if there're lifetime/type arguments. if len > 0 { values.0.push_normal("<"); values.1.push_normal("<"); } fn lifetime_display(lifetime: Region<'_>) -> String { let s = lifetime.to_string(); if s.is_empty() { "'_".to_string() } else { s } } // At one point we'd like to elide all lifetimes here, they are irrelevant for // all diagnostics that use this output // // Foo<'x, '_, Bar> // Foo<'y, '_, Qux> // ^^ ^^ --- type arguments are not elided // | | // | elided as they were the same // not elided, they were different, but irrelevant let lifetimes = sub1.regions().zip(sub2.regions()); for (i, lifetimes) in lifetimes.enumerate() { let l1 = lifetime_display(lifetimes.0); let l2 = lifetime_display(lifetimes.1); if lifetimes.0 == lifetimes.1 { values.0.push_normal("'_"); values.1.push_normal("'_"); } else { values.0.push_highlighted(l1); values.1.push_highlighted(l2); } self.push_comma(&mut values.0, &mut values.1, len, i); } // We're comparing two types with the same path, so we compare the type // arguments for both. If they are the same, do not highlight and elide from the // output. // Foo<_, Bar> // Foo<_, Qux> // ^ elided type as this type argument was the same in both sides let type_arguments = sub1.types().zip(sub2.types()); let regions_len = sub1.regions().count(); let num_display_types = consts_offset - regions_len; for (i, (ta1, ta2)) in type_arguments.take(num_display_types).enumerate() { let i = i + regions_len; if ta1 == ta2 { values.0.push_normal("_"); values.1.push_normal("_"); } else { let (x1, x2) = self.cmp(ta1, ta2); (values.0).0.extend(x1.0); (values.1).0.extend(x2.0); } self.push_comma(&mut values.0, &mut values.1, len, i); } // Do the same for const arguments, if they are equal, do not highlight and // elide them from the output. let const_arguments = sub1.consts().zip(sub2.consts()); for (i, (ca1, ca2)) in const_arguments.enumerate() { let i = i + consts_offset; if ca1 == ca2 { values.0.push_normal("_"); values.1.push_normal("_"); } else { values.0.push_highlighted(ca1.to_string()); values.1.push_highlighted(ca2.to_string()); } self.push_comma(&mut values.0, &mut values.1, len, i); } // Close the type argument bracket. // Only draw `<...>` if there're lifetime/type arguments. if len > 0 { values.0.push_normal(">"); values.1.push_normal(">"); } values } else { // Check for case: // let x: Foo<Bar<Qux> = foo::<Bar<Qux>>(); // Foo<Bar<Qux> // ------- this type argument is exactly the same as the other type // Bar<Qux> if self .cmp_type_arg( &mut values.0, &mut values.1, path1.clone(), sub_no_defaults_1, path2.clone(), &t2, ) .is_some() { return values; } // Check for case: // let x: Bar<Qux> = y:<Foo<Bar<Qux>>>(); // Bar<Qux> // Foo<Bar<Qux>> // ------- this type argument is exactly the same as the other type if self .cmp_type_arg( &mut values.1, &mut values.0, path2, sub_no_defaults_2, path1, &t1, ) .is_some() { return values; } // We can't find anything in common, highlight relevant part of type path. // let x: foo::bar::Baz<Qux> = y:<foo::bar::Bar<Zar>>(); // foo::bar::Baz<Qux> // foo::bar::Bar<Zar> // -------- this part of the path is different let t1_str = t1.to_string(); let t2_str = t2.to_string(); let min_len = t1_str.len().min(t2_str.len()); const SEPARATOR: &str = "::"; let separator_len = SEPARATOR.len(); let split_idx: usize = iter::zip(t1_str.split(SEPARATOR), t2_str.split(SEPARATOR)) .take_while(|(mod1_str, mod2_str)| mod1_str == mod2_str) .map(|(mod_str, _)| mod_str.len() + separator_len) .sum(); debug!( "cmp: separator_len={}, split_idx={}, min_len={}", separator_len, split_idx, min_len ); if split_idx >= min_len { // paths are identical, highlight everything ( DiagnosticStyledString::highlighted(t1_str), DiagnosticStyledString::highlighted(t2_str), ) } else { let (common, uniq1) = t1_str.split_at(split_idx); let (_, uniq2) = t2_str.split_at(split_idx); debug!("cmp: common={}, uniq1={}, uniq2={}", common, uniq1, uniq2); values.0.push_normal(common); values.0.push_highlighted(uniq1); values.1.push_normal(common); values.1.push_highlighted(uniq2); values } } } // When finding T != &T, highlight only the borrow (&ty::Ref(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0); values.1.push_normal(t2.to_string()); values } (_, &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); values.0.push_normal(t1.to_string()); push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1); values } // When encountering &T != &mut T, highlight only the borrow (&ty::Ref(r1, ref_ty1, mutbl1), &ty::Ref(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0); push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1); values } // When encountering tuples of the same size, highlight only the differing types (&ty::Tuple(substs1), &ty::Tuple(substs2)) if substs1.len() == substs2.len() => { let mut values = (DiagnosticStyledString::normal("("), DiagnosticStyledString::normal("(")); let len = substs1.len(); for (i, (left, right)) in substs1.types().zip(substs2.types()).enumerate() { let (x1, x2) = self.cmp(left, right); (values.0).0.extend(x1.0); (values.1).0.extend(x2.0); self.push_comma(&mut values.0, &mut values.1, len, i); } if len == 1 { // Keep the output for single element tuples as `(ty,)`. values.0.push_normal(","); values.1.push_normal(","); } values.0.push_normal(")"); values.1.push_normal(")"); values } (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => { let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1); let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2); let mut values = self.cmp_fn_sig(&sig1, &sig2); let path1 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did1, substs1)); let path2 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did2, substs2)); let same_path = path1 == path2; values.0.push(path1, !same_path); values.1.push(path2, !same_path); values } (ty::FnDef(did1, substs1), ty::FnPtr(sig2)) => { let sig1 = self.tcx.fn_sig(*did1).subst(self.tcx, substs1); let mut values = self.cmp_fn_sig(&sig1, sig2); values.0.push_highlighted(format!( " {{{}}}", self.tcx.def_path_str_with_substs(*did1, substs1) )); values } (ty::FnPtr(sig1), ty::FnDef(did2, substs2)) => { let sig2 = self.tcx.fn_sig(*did2).subst(self.tcx, substs2); let mut values = self.cmp_fn_sig(sig1, &sig2); values.1.push_normal(format!( " {{{}}}", self.tcx.def_path_str_with_substs(*did2, substs2) )); values } (ty::FnPtr(sig1), ty::FnPtr(sig2)) => self.cmp_fn_sig(sig1, sig2), _ => { if t1 == t2 { // The two types are the same, elide and don't highlight. (DiagnosticStyledString::normal("_"), DiagnosticStyledString::normal("_")) } else { // We couldn't find anything in common, highlight everything. ( DiagnosticStyledString::highlighted(t1.to_string()), DiagnosticStyledString::highlighted(t2.to_string()), ) } } } } pub fn note_type_err( &self, diag: &mut DiagnosticBuilder<'tcx>, cause: &ObligationCause<'tcx>, secondary_span: Option<(Span, String)>, mut values: Option<ValuePairs<'tcx>>, terr: &TypeError<'tcx>, ) { let span = cause.span(self.tcx); debug!("note_type_err cause={:?} values={:?}, terr={:?}", cause, values, terr); // For some types of errors, expected-found does not make // sense, so just ignore the values we were given. if let TypeError::CyclicTy(_) = terr { values = None; } struct OpaqueTypesVisitor<'tcx> { types: FxHashMap<TyCategory, FxHashSet<Span>>, expected: FxHashMap<TyCategory, FxHashSet<Span>>, found: FxHashMap<TyCategory, FxHashSet<Span>>, ignore_span: Span, tcx: TyCtxt<'tcx>, } impl<'tcx> OpaqueTypesVisitor<'tcx> { fn visit_expected_found( tcx: TyCtxt<'tcx>, expected: Ty<'tcx>, found: Ty<'tcx>, ignore_span: Span, ) -> Self { let mut types_visitor = OpaqueTypesVisitor { types: Default::default(), expected: Default::default(), found: Default::default(), ignore_span, tcx, }; // The visitor puts all the relevant encountered types in `self.types`, but in // here we want to visit two separate types with no relation to each other, so we // move the results from `types` to `expected` or `found` as appropriate. expected.visit_with(&mut types_visitor); std::mem::swap(&mut types_visitor.expected, &mut types_visitor.types); found.visit_with(&mut types_visitor); std::mem::swap(&mut types_visitor.found, &mut types_visitor.types); types_visitor } fn report(&self, err: &mut DiagnosticBuilder<'_>) { self.add_labels_for_types(err, "expected", &self.expected); self.add_labels_for_types(err, "found", &self.found); } fn add_labels_for_types( &self, err: &mut DiagnosticBuilder<'_>, target: &str, types: &FxHashMap<TyCategory, FxHashSet<Span>>, ) { for (key, values) in types.iter() { let count = values.len(); let kind = key.descr(); let mut returned_async_output_error = false; for &sp in values { if sp.is_desugaring(DesugaringKind::Async) && !returned_async_output_error { if &[sp] != err.span.primary_spans() { let mut span: MultiSpan = sp.into(); span.push_span_label( sp, format!( "checked the `Output` of this `async fn`, {}{} {}{}", if count > 1 { "one of the " } else { "" }, target, kind, pluralize!(count), ), ); err.span_note( span, "while checking the return type of the `async fn`", ); } else { err.span_label( sp, format!( "checked the `Output` of this `async fn`, {}{} {}{}", if count > 1 { "one of the " } else { "" }, target, kind, pluralize!(count), ), ); err.note("while checking the return type of the `async fn`"); } returned_async_output_error = true; } else { err.span_label( sp, format!( "{}{} {}{}", if count == 1 { "the " } else { "one of the " }, target, kind, pluralize!(count), ), ); } } } } } impl<'tcx> ty::fold::TypeVisitor<'tcx> for OpaqueTypesVisitor<'tcx> { fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { if let Some((kind, def_id)) = TyCategory::from_ty(self.tcx, t) { let span = self.tcx.def_span(def_id); // Avoid cluttering the output when the "found" and error span overlap: // // error[E0308]: mismatched types // --> $DIR/issue-20862.rs:2:5 // | // LL | |y| x + y // | ^^^^^^^^^ // | | // | the found closure // | expected `()`, found closure // | // = note: expected unit type `()` // found closure `[closure@$DIR/issue-20862.rs:2:5: 2:14 x:_]` if !self.ignore_span.overlaps(span) { self.types.entry(kind).or_default().insert(span); } } t.super_visit_with(self) } } debug!("note_type_err(diag={:?})", diag); enum Mismatch<'a> { Variable(ty::error::ExpectedFound<Ty<'a>>), Fixed(&'static str), } let (expected_found, exp_found, is_simple_error) = match values { None => (None, Mismatch::Fixed("type"), false), Some(values) => { let (is_simple_error, exp_found) = match values { ValuePairs::Types(exp_found) => { let is_simple_err = exp_found.expected.is_simple_text() && exp_found.found.is_simple_text(); OpaqueTypesVisitor::visit_expected_found( self.tcx, exp_found.expected, exp_found.found, span, ) .report(diag); (is_simple_err, Mismatch::Variable(exp_found)) } ValuePairs::TraitRefs(_) => (false, Mismatch::Fixed("trait")), _ => (false, Mismatch::Fixed("type")), }; let vals = match self.values_str(values) { Some((expected, found)) => Some((expected, found)), None => { // Derived error. Cancel the emitter. diag.cancel(); return; } }; (vals, exp_found, is_simple_error) } }; // Ignore msg for object safe coercion // since E0038 message will be printed match terr { TypeError::ObjectUnsafeCoercion(_) => {} _ => { diag.span_label(span, terr.to_string()); if let Some((sp, msg)) = secondary_span { diag.span_label(sp, msg); } } }; if let Some((expected, found)) = expected_found { let (expected_label, found_label, exp_found) = match exp_found { Mismatch::Variable(ef) => ( ef.expected.prefix_string(self.tcx), ef.found.prefix_string(self.tcx), Some(ef), ), Mismatch::Fixed(s) => (s.into(), s.into(), None), }; match (&terr, expected == found) { (TypeError::Sorts(values), extra) => { let sort_string = |ty: Ty<'tcx>| match (extra, ty.kind()) { (true, ty::Opaque(def_id, _)) => { let pos = self .tcx .sess .source_map() .lookup_char_pos(self.tcx.def_span(*def_id).lo()); format!( " (opaque type at <{}:{}:{}>)", pos.file.name.prefer_local(), pos.line, pos.col.to_usize() + 1, ) } (true, _) => format!(" ({})", ty.sort_string(self.tcx)), (false, _) => "".to_string(), }; if !(values.expected.is_simple_text() && values.found.is_simple_text()) || (exp_found.map_or(false, |ef| { // This happens when the type error is a subset of the expectation, // like when you have two references but one is `usize` and the other // is `f32`. In those cases we still want to show the `note`. If the // value from `ef` is `Infer(_)`, then we ignore it. if !ef.expected.is_ty_infer() { ef.expected != values.expected } else if !ef.found.is_ty_infer() { ef.found != values.found } else { false } })) { diag.note_expected_found_extra( &expected_label, expected, &found_label, found, &sort_string(values.expected), &sort_string(values.found), ); } } (TypeError::ObjectUnsafeCoercion(_), _) => { diag.note_unsuccessful_coercion(found, expected); } (_, _) => { debug!( "note_type_err: exp_found={:?}, expected={:?} found={:?}", exp_found, expected, found ); if !is_simple_error || terr.must_include_note() { diag.note_expected_found(&expected_label, expected, &found_label, found); } } } } let exp_found = match exp_found { Mismatch::Variable(exp_found) => Some(exp_found), Mismatch::Fixed(_) => None, }; let exp_found = match terr { // `terr` has more accurate type information than `exp_found` in match expressions. ty::error::TypeError::Sorts(terr) if exp_found.map_or(false, |ef| terr.found == ef.found) => { Some(*terr) } _ => exp_found, }; debug!("exp_found {:?} terr {:?}", exp_found, terr); if let Some(exp_found) = exp_found { self.suggest_as_ref_where_appropriate(span, &exp_found, diag); self.suggest_accessing_field_where_appropriate(cause, &exp_found, diag); self.suggest_await_on_expect_found(cause, span, &exp_found, diag); } // In some (most?) cases cause.body_id points to actual body, but in some cases // it's a actual definition. According to the comments (e.g. in // librustc_typeck/check/compare_method.rs:compare_predicate_entailment) the latter // is relied upon by some other code. This might (or might not) need cleanup. let body_owner_def_id = self.tcx.hir().opt_local_def_id(cause.body_id).unwrap_or_else(|| { self.tcx.hir().body_owner_def_id(hir::BodyId { hir_id: cause.body_id }) }); self.check_and_note_conflicting_crates(diag, terr); self.tcx.note_and_explain_type_err(diag, terr, cause, span, body_owner_def_id.to_def_id()); if let Some(ValuePairs::PolyTraitRefs(exp_found)) = values { if let ty::Closure(def_id, _) = exp_found.expected.skip_binder().self_ty().kind() { if let Some(def_id) = def_id.as_local() { let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id); let span = self.tcx.hir().span(hir_id); diag.span_note(span, "this closure does not fulfill the lifetime requirements"); } } } // It reads better to have the error origin as the final // thing. self.note_error_origin(diag, cause, exp_found); } pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Ty<'tcx>> { if let ty::Opaque(def_id, substs) = ty.kind() { let future_trait = self.tcx.require_lang_item(LangItem::Future, None); // Future::Output let item_def_id = self .tcx .associated_items(future_trait) .in_definition_order() .next() .unwrap() .def_id; let bounds = self.tcx.explicit_item_bounds(*def_id); for (predicate, _) in bounds { let predicate = predicate.subst(self.tcx, substs); if let ty::PredicateKind::Projection(projection_predicate) = predicate.kind().skip_binder() { if projection_predicate.projection_ty.item_def_id == item_def_id { // We don't account for multiple `Future::Output = Ty` contraints. return Some(projection_predicate.ty); } } } } None } /// A possible error is to forget to add `.await` when using futures: /// /// ``` /// async fn make_u32() -> u32 { /// 22 /// } /// /// fn take_u32(x: u32) {} /// /// async fn foo() { /// let x = make_u32(); /// take_u32(x); /// } /// ``` /// /// This routine checks if the found type `T` implements `Future<Output=U>` where `U` is the /// expected type. If this is the case, and we are inside of an async body, it suggests adding /// `.await` to the tail of the expression. fn suggest_await_on_expect_found( &self, cause: &ObligationCause<'tcx>, exp_span: Span, exp_found: &ty::error::ExpectedFound<Ty<'tcx>>, diag: &mut DiagnosticBuilder<'tcx>, ) { debug!( "suggest_await_on_expect_found: exp_span={:?}, expected_ty={:?}, found_ty={:?}", exp_span, exp_found.expected, exp_found.found, ); if let ObligationCauseCode::CompareImplMethodObligation { .. } = &cause.code { return; } match ( self.get_impl_future_output_ty(exp_found.expected), self.get_impl_future_output_ty(exp_found.found), ) { (Some(exp), Some(found)) if ty::TyS::same_type(exp, found) => match &cause.code { ObligationCauseCode::IfExpression(box IfExpressionCause { then, .. }) => { diag.multipart_suggestion( "consider `await`ing on both `Future`s", vec![ (then.shrink_to_hi(), ".await".to_string()), (exp_span.shrink_to_hi(), ".await".to_string()), ], Applicability::MaybeIncorrect, ); } ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause { prior_arms, .. }) => { if let [.., arm_span] = &prior_arms[..] { diag.multipart_suggestion( "consider `await`ing on both `Future`s", vec![ (arm_span.shrink_to_hi(), ".await".to_string()), (exp_span.shrink_to_hi(), ".await".to_string()), ], Applicability::MaybeIncorrect, ); } else { diag.help("consider `await`ing on both `Future`s"); } } _ => { diag.help("consider `await`ing on both `Future`s"); } }, (_, Some(ty)) if ty::TyS::same_type(exp_found.expected, ty) => { let span = match cause.code { // scrutinee's span ObligationCauseCode::Pattern { span: Some(span), .. } => span, _ => exp_span, }; diag.span_suggestion_verbose( span.shrink_to_hi(), "consider `await`ing on the `Future`", ".await".to_string(), Applicability::MaybeIncorrect, ); } (Some(ty), _) if ty::TyS::same_type(ty, exp_found.found) => { let span = match cause.code { // scrutinee's span ObligationCauseCode::Pattern { span: Some(span), .. } => span, _ => exp_span, }; diag.span_suggestion_verbose( span.shrink_to_hi(), "consider `await`ing on the `Future`", ".await".to_string(), Applicability::MaybeIncorrect, ); } _ => {} } } fn suggest_accessing_field_where_appropriate( &self, cause: &ObligationCause<'tcx>, exp_found: &ty::error::ExpectedFound<Ty<'tcx>>, diag: &mut DiagnosticBuilder<'tcx>, ) { debug!( "suggest_accessing_field_where_appropriate(cause={:?}, exp_found={:?})", cause, exp_found ); if let ty::Adt(expected_def, expected_substs) = exp_found.expected.kind() { if expected_def.is_enum() { return; } if let Some((name, ty)) = expected_def .non_enum_variant() .fields .iter() .filter(|field| field.vis.is_accessible_from(field.did, self.tcx)) .map(|field| (field.ident.name, field.ty(self.tcx, expected_substs))) .find(|(_, ty)| ty::TyS::same_type(ty, exp_found.found)) { if let ObligationCauseCode::Pattern { span: Some(span), .. } = cause.code { if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) { let suggestion = if expected_def.is_struct() { format!("{}.{}", snippet, name) } else if expected_def.is_union() { format!("unsafe {{ {}.{} }}", snippet, name) } else { return; }; diag.span_suggestion( span, &format!( "you might have meant to use field `{}` whose type is `{}`", name, ty ), suggestion, Applicability::MaybeIncorrect, ); } } } } } /// When encountering a case where `.as_ref()` on a `Result` or `Option` would be appropriate, /// suggests it. fn suggest_as_ref_where_appropriate( &self, span: Span, exp_found: &ty::error::ExpectedFound<Ty<'tcx>>, diag: &mut DiagnosticBuilder<'tcx>, ) { if let (ty::Adt(exp_def, exp_substs), ty::Ref(_, found_ty, _)) = (exp_found.expected.kind(), exp_found.found.kind()) { if let ty::Adt(found_def, found_substs) = *found_ty.kind() { let path_str = format!("{:?}", exp_def); if exp_def == &found_def { let opt_msg = "you can convert from `&Option<T>` to `Option<&T>` using \ `.as_ref()`"; let result_msg = "you can convert from `&Result<T, E>` to \ `Result<&T, &E>` using `.as_ref()`"; let have_as_ref = &[ ("std::option::Option", opt_msg), ("core::option::Option", opt_msg), ("std::result::Result", result_msg), ("core::result::Result", result_msg), ]; if let Some(msg) = have_as_ref .iter() .find_map(|(path, msg)| (&path_str == path).then_some(msg)) { let mut show_suggestion = true; for (exp_ty, found_ty) in iter::zip(exp_substs.types(), found_substs.types()) { match *exp_ty.kind() { ty::Ref(_, exp_ty, _) => { match (exp_ty.kind(), found_ty.kind()) { (_, ty::Param(_)) | (_, ty::Infer(_)) | (ty::Param(_), _) | (ty::Infer(_), _) => {} _ if ty::TyS::same_type(exp_ty, found_ty) => {} _ => show_suggestion = false, }; } ty::Param(_) | ty::Infer(_) => {} _ => show_suggestion = false, } } if let (Ok(snippet), true) = (self.tcx.sess.source_map().span_to_snippet(span), show_suggestion) { diag.span_suggestion( span, msg, format!("{}.as_ref()", snippet), Applicability::MachineApplicable, ); } } } } } } pub fn report_and_explain_type_error( &self, trace: TypeTrace<'tcx>, terr: &TypeError<'tcx>, ) -> DiagnosticBuilder<'tcx> { debug!("report_and_explain_type_error(trace={:?}, terr={:?})", trace, terr); let span = trace.cause.span(self.tcx); let failure_code = trace.cause.as_failure_code(terr); let mut diag = match failure_code { FailureCode::Error0038(did) => { let violations = self.tcx.object_safety_violations(did); report_object_safety_error(self.tcx, span, did, violations) } FailureCode::Error0317(failure_str) => { struct_span_err!(self.tcx.sess, span, E0317, "{}", failure_str) } FailureCode::Error0580(failure_str) => { struct_span_err!(self.tcx.sess, span, E0580, "{}", failure_str) } FailureCode::Error0308(failure_str) => { let mut err = struct_span_err!(self.tcx.sess, span, E0308, "{}", failure_str); if let ValuePairs::Types(ty::error::ExpectedFound { expected, found }) = trace.values { // If a tuple of length one was expected and the found expression has // parentheses around it, perhaps the user meant to write `(expr,)` to // build a tuple (issue #86100) match (expected.kind(), found.kind()) { (ty::Tuple(_), ty::Tuple(_)) => {} (ty::Tuple(_), _) if expected.tuple_fields().count() == 1 => { if let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span) { if let Some(code) = code.strip_prefix('(').and_then(|s| s.strip_suffix(')')) { err.span_suggestion( span, "use a trailing comma to create a tuple with one element", format!("({},)", code), Applicability::MaybeIncorrect, ); } } } _ => {} } } err } FailureCode::Error0644(failure_str) => { struct_span_err!(self.tcx.sess, span, E0644, "{}", failure_str) } }; self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr); diag } fn values_str( &self, values: ValuePairs<'tcx>, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { match values { infer::Types(exp_found) => self.expected_found_str_ty(exp_found), infer::Regions(exp_found) => self.expected_found_str(exp_found), infer::Consts(exp_found) => self.expected_found_str(exp_found), infer::TraitRefs(exp_found) => { let pretty_exp_found = ty::error::ExpectedFound { expected: exp_found.expected.print_only_trait_path(), found: exp_found.found.print_only_trait_path(), }; self.expected_found_str(pretty_exp_found) } infer::PolyTraitRefs(exp_found) => { let pretty_exp_found = ty::error::ExpectedFound { expected: exp_found.expected.print_only_trait_path(), found: exp_found.found.print_only_trait_path(), }; self.expected_found_str(pretty_exp_found) } } } fn expected_found_str_ty( &self, exp_found: ty::error::ExpectedFound<Ty<'tcx>>, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_vars_if_possible(exp_found); if exp_found.references_error() { return None; } Some(self.cmp(exp_found.expected, exp_found.found)) } /// Returns a string of the form "expected `{}`, found `{}`". fn expected_found_str<T: fmt::Display + TypeFoldable<'tcx>>( &self, exp_found: ty::error::ExpectedFound<T>, ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_vars_if_possible(exp_found); if exp_found.references_error() { return None; } Some(( DiagnosticStyledString::highlighted(exp_found.expected.to_string()), DiagnosticStyledString::highlighted(exp_found.found.to_string()), )) } pub fn report_generic_bound_failure( &self, span: Span, origin: Option<SubregionOrigin<'tcx>>, bound_kind: GenericKind<'tcx>, sub: Region<'tcx>, ) { self.construct_generic_bound_failure(span, origin, bound_kind, sub).emit(); } pub fn construct_generic_bound_failure( &self, span: Span, origin: Option<SubregionOrigin<'tcx>>, bound_kind: GenericKind<'tcx>, sub: Region<'tcx>, ) -> DiagnosticBuilder<'a> { let hir = &self.tcx.hir(); // Attempt to obtain the span of the parameter so we can // suggest adding an explicit lifetime bound to it. let generics = self .in_progress_typeck_results .map(|typeck_results| typeck_results.borrow().hir_owner) .map(|owner| { let hir_id = hir.local_def_id_to_hir_id(owner); let parent_id = hir.get_parent_item(hir_id); ( // Parent item could be a `mod`, so we check the HIR before calling: if let Some(Node::Item(Item { kind: ItemKind::Trait(..) | ItemKind::Impl { .. }, .. })) = hir.find(parent_id) { Some(self.tcx.generics_of(hir.local_def_id(parent_id).to_def_id())) } else { None }, self.tcx.generics_of(owner.to_def_id()), ) }); let type_param_span = match (generics, bound_kind) { (Some((_, ref generics)), GenericKind::Param(ref param)) => { // Account for the case where `param` corresponds to `Self`, // which doesn't have the expected type argument. if !(generics.has_self && param.index == 0) { let type_param = generics.type_param(param, self.tcx); type_param.def_id.as_local().map(|def_id| { // Get the `hir::Param` to verify whether it already has any bounds. // We do this to avoid suggesting code that ends up as `T: 'a'b`, // instead we suggest `T: 'a + 'b` in that case. let id = hir.local_def_id_to_hir_id(def_id); let mut has_bounds = false; if let Node::GenericParam(param) = hir.get(id) { has_bounds = !param.bounds.is_empty(); } let sp = hir.span(id); // `sp` only covers `T`, change it so that it covers // `T:` when appropriate let is_impl_trait = bound_kind.to_string().starts_with("impl "); let sp = if has_bounds && !is_impl_trait { sp.to(self .tcx .sess .source_map() .next_point(self.tcx.sess.source_map().next_point(sp))) } else { sp }; (sp, has_bounds, is_impl_trait) }) } else { None } } _ => None, }; let new_lt = generics .as_ref() .and_then(|(parent_g, g)| { let mut possible = (b'a'..=b'z').map(|c| format!("'{}", c as char)); let mut lts_names = g .params .iter() .filter(|p| matches!(p.kind, ty::GenericParamDefKind::Lifetime)) .map(|p| p.name.as_str()) .collect::<Vec<_>>(); if let Some(g) = parent_g { lts_names.extend( g.params .iter() .filter(|p| matches!(p.kind, ty::GenericParamDefKind::Lifetime)) .map(|p| p.name.as_str()), ); } let lts = lts_names.iter().map(|s| -> &str { &*s }).collect::<Vec<_>>(); possible.find(|candidate| !lts.contains(&candidate.as_str())) }) .unwrap_or("'lt".to_string()); let add_lt_sugg = generics .as_ref() .and_then(|(_, g)| g.params.first()) .and_then(|param| param.def_id.as_local()) .map(|def_id| { ( hir.span(hir.local_def_id_to_hir_id(def_id)).shrink_to_lo(), format!("{}, ", new_lt), ) }); let labeled_user_string = match bound_kind { GenericKind::Param(ref p) => format!("the parameter type `{}`", p), GenericKind::Projection(ref p) => format!("the associated type `{}`", p), }; if let Some(SubregionOrigin::CompareImplMethodObligation { span, item_name, impl_item_def_id, trait_item_def_id, }) = origin { return self.report_extra_impl_obligation( span, item_name, impl_item_def_id, trait_item_def_id, &format!("`{}: {}`", bound_kind, sub), ); } fn binding_suggestion<'tcx, S: fmt::Display>( err: &mut DiagnosticBuilder<'tcx>, type_param_span: Option<(Span, bool, bool)>, bound_kind: GenericKind<'tcx>, sub: S, ) { let msg = "consider adding an explicit lifetime bound"; if let Some((sp, has_lifetimes, is_impl_trait)) = type_param_span { let suggestion = if is_impl_trait { format!("{} + {}", bound_kind, sub) } else { let tail = if has_lifetimes { " + " } else { "" }; format!("{}: {}{}", bound_kind, sub, tail) }; err.span_suggestion( sp, &format!("{}...", msg), suggestion, Applicability::MaybeIncorrect, // Issue #41966 ); } else { let consider = format!( "{} {}...", msg, if type_param_span.map_or(false, |(_, _, is_impl_trait)| is_impl_trait) { format!(" `{}` to `{}`", sub, bound_kind) } else { format!("`{}: {}`", bound_kind, sub) }, ); err.help(&consider); } } let new_binding_suggestion = |err: &mut DiagnosticBuilder<'tcx>, type_param_span: Option<(Span, bool, bool)>, bound_kind: GenericKind<'tcx>| { let msg = "consider introducing an explicit lifetime bound"; if let Some((sp, has_lifetimes, is_impl_trait)) = type_param_span { let suggestion = if is_impl_trait { (sp.shrink_to_hi(), format!(" + {}", new_lt)) } else { let tail = if has_lifetimes { " +" } else { "" }; (sp, format!("{}: {}{}", bound_kind, new_lt, tail)) }; let mut sugg = vec![suggestion, (span.shrink_to_hi(), format!(" + {}", new_lt))]; if let Some(lt) = add_lt_sugg { sugg.push(lt); sugg.rotate_right(1); } // `MaybeIncorrect` due to issue #41966. err.multipart_suggestion(msg, sugg, Applicability::MaybeIncorrect); } }; let mut err = match *sub { ty::ReEarlyBound(ty::EarlyBoundRegion { name, .. }) | ty::ReFree(ty::FreeRegion { bound_region: ty::BrNamed(_, name), .. }) => { // Does the required lifetime have a nice name we can print? let mut err = struct_span_err!( self.tcx.sess, span, E0309, "{} may not live long enough", labeled_user_string ); // Explicitly use the name instead of `sub`'s `Display` impl. The `Display` impl // for the bound is not suitable for suggestions when `-Zverbose` is set because it // uses `Debug` output, so we handle it specially here so that suggestions are // always correct. binding_suggestion(&mut err, type_param_span, bound_kind, name); err } ty::ReStatic => { // Does the required lifetime have a nice name we can print? let mut err = struct_span_err!( self.tcx.sess, span, E0310, "{} may not live long enough", labeled_user_string ); binding_suggestion(&mut err, type_param_span, bound_kind, "'static"); err } _ => { // If not, be less specific. let mut err = struct_span_err!( self.tcx.sess, span, E0311, "{} may not live long enough", labeled_user_string ); note_and_explain_region( self.tcx, &mut err, &format!("{} must be valid for ", labeled_user_string), sub, "...", None, ); if let Some(infer::RelateParamBound(_, t, _)) = origin { let return_impl_trait = self .in_progress_typeck_results .map(|typeck_results| typeck_results.borrow().hir_owner) .and_then(|owner| self.tcx.return_type_impl_trait(owner)) .is_some(); let t = self.resolve_vars_if_possible(t); match t.kind() { // We've got: // fn get_later<G, T>(g: G, dest: &mut T) -> impl FnOnce() + '_ // suggest: // fn get_later<'a, G: 'a, T>(g: G, dest: &mut T) -> impl FnOnce() + '_ + 'a ty::Closure(_, _substs) | ty::Opaque(_, _substs) if return_impl_trait => { new_binding_suggestion(&mut err, type_param_span, bound_kind); } _ => { binding_suggestion(&mut err, type_param_span, bound_kind, new_lt); } } } err } }; if let Some(origin) = origin { self.note_region_origin(&mut err, &origin); } err } fn report_sub_sup_conflict( &self, var_origin: RegionVariableOrigin, sub_origin: SubregionOrigin<'tcx>, sub_region: Region<'tcx>, sup_origin: SubregionOrigin<'tcx>, sup_region: Region<'tcx>, ) { let mut err = self.report_inference_failure(var_origin); note_and_explain_region( self.tcx, &mut err, "first, the lifetime cannot outlive ", sup_region, "...", None, ); debug!("report_sub_sup_conflict: var_origin={:?}", var_origin); debug!("report_sub_sup_conflict: sub_region={:?}", sub_region); debug!("report_sub_sup_conflict: sub_origin={:?}", sub_origin); debug!("report_sub_sup_conflict: sup_region={:?}", sup_region); debug!("report_sub_sup_conflict: sup_origin={:?}", sup_origin); if let (&infer::Subtype(ref sup_trace), &infer::Subtype(ref sub_trace)) = (&sup_origin, &sub_origin) { debug!("report_sub_sup_conflict: sup_trace={:?}", sup_trace); debug!("report_sub_sup_conflict: sub_trace={:?}", sub_trace); debug!("report_sub_sup_conflict: sup_trace.values={:?}", sup_trace.values); debug!("report_sub_sup_conflict: sub_trace.values={:?}", sub_trace.values); if let (Some((sup_expected, sup_found)), Some((sub_expected, sub_found))) = (self.values_str(sup_trace.values), self.values_str(sub_trace.values)) { if sub_expected == sup_expected && sub_found == sup_found { note_and_explain_region( self.tcx, &mut err, "...but the lifetime must also be valid for ", sub_region, "...", None, ); err.span_note( sup_trace.cause.span, &format!("...so that the {}", sup_trace.cause.as_requirement_str()), ); err.note_expected_found(&"", sup_expected, &"", sup_found); err.emit(); return; } } } self.note_region_origin(&mut err, &sup_origin); note_and_explain_region( self.tcx, &mut err, "but, the lifetime must be valid for ", sub_region, "...", None, ); self.note_region_origin(&mut err, &sub_origin); err.emit(); } /// Determine whether an error associated with the given span and definition /// should be treated as being caused by the implicit `From` conversion /// within `?` desugaring. pub fn is_try_conversion(&self, span: Span, trait_def_id: DefId) -> bool { span.is_desugaring(DesugaringKind::QuestionMark) && self.tcx.is_diagnostic_item(sym::from_trait, trait_def_id) } } impl<'a, 'tcx> InferCtxt<'a, 'tcx> { fn report_inference_failure( &self, var_origin: RegionVariableOrigin, ) -> DiagnosticBuilder<'tcx> { let br_string = |br: ty::BoundRegionKind| { let mut s = match br { ty::BrNamed(_, name) => name.to_string(), _ => String::new(), }; if !s.is_empty() { s.push(' '); } s }; let var_description = match var_origin { infer::MiscVariable(_) => String::new(), infer::PatternRegion(_) => " for pattern".to_string(), infer::AddrOfRegion(_) => " for borrow expression".to_string(), infer::Autoref(_, _) => " for autoref".to_string(), infer::Coercion(_) => " for automatic coercion".to_string(), infer::LateBoundRegion(_, br, infer::FnCall) => { format!(" for lifetime parameter {}in function call", br_string(br)) } infer::LateBoundRegion(_, br, infer::HigherRankedType) => { format!(" for lifetime parameter {}in generic type", br_string(br)) } infer::LateBoundRegion(_, br, infer::AssocTypeProjection(def_id)) => format!( " for lifetime parameter {}in trait containing associated type `{}`", br_string(br), self.tcx.associated_item(def_id).ident ), infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name), infer::UpvarRegion(ref upvar_id, _) => { let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id); format!(" for capture of `{}` by closure", var_name) } infer::Nll(..) => bug!("NLL variable found in lexical phase"), }; struct_span_err!( self.tcx.sess, var_origin.span(), E0495, "cannot infer an appropriate lifetime{} due to conflicting requirements", var_description ) } } enum FailureCode { Error0038(DefId), Error0317(&'static str), Error0580(&'static str), Error0308(&'static str), Error0644(&'static str), } trait ObligationCauseExt<'tcx> { fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode; fn as_requirement_str(&self) -> &'static str; } impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> { fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode { use self::FailureCode::*; use crate::traits::ObligationCauseCode::*; match self.code { CompareImplMethodObligation { .. } => Error0308("method not compatible with trait"), CompareImplTypeObligation { .. } => Error0308("type not compatible with trait"), MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => { Error0308(match source { hir::MatchSource::IfLetDesugar { .. } => { "`if let` arms have incompatible types" } hir::MatchSource::TryDesugar => { "try expression alternatives have incompatible types" } _ => "`match` arms have incompatible types", }) } IfExpression { .. } => Error0308("`if` and `else` have incompatible types"), IfExpressionWithNoElse => Error0317("`if` may be missing an `else` clause"), MainFunctionType => Error0580("`main` function has wrong type"), StartFunctionType => Error0308("`#[start]` function has wrong type"), IntrinsicType => Error0308("intrinsic has wrong type"), MethodReceiver => Error0308("mismatched `self` parameter type"), // In the case where we have no more specific thing to // say, also take a look at the error code, maybe we can // tailor to that. _ => match terr { TypeError::CyclicTy(ty) if ty.is_closure() || ty.is_generator() => { Error0644("closure/generator type that references itself") } TypeError::IntrinsicCast => { Error0308("cannot coerce intrinsics to function pointers") } TypeError::ObjectUnsafeCoercion(did) => Error0038(*did), _ => Error0308("mismatched types"), }, } } fn as_requirement_str(&self) -> &'static str { use crate::traits::ObligationCauseCode::*; match self.code { CompareImplMethodObligation { .. } => "method type is compatible with trait", CompareImplTypeObligation { .. } => "associated type is compatible with trait", ExprAssignable => "expression is assignable", MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => match source { hir::MatchSource::IfLetDesugar { .. } => "`if let` arms have compatible types", _ => "`match` arms have compatible types", }, IfExpression { .. } => "`if` and `else` have incompatible types", IfExpressionWithNoElse => "`if` missing an `else` returns `()`", MainFunctionType => "`main` function has the correct type", StartFunctionType => "`#[start]` function has the correct type", IntrinsicType => "intrinsic has the correct type", MethodReceiver => "method receiver has the correct type", _ => "types are compatible", } } } /// This is a bare signal of what kind of type we're dealing with. `ty::TyKind` tracks /// extra information about each type, but we only care about the category. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum TyCategory { Closure, Opaque, Generator(hir::GeneratorKind), Foreign, } impl TyCategory { fn descr(&self) -> &'static str { match self { Self::Closure => "closure", Self::Opaque => "opaque type", Self::Generator(gk) => gk.descr(), Self::Foreign => "foreign type", } } pub fn from_ty(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Self, DefId)> { match *ty.kind() { ty::Closure(def_id, _) => Some((Self::Closure, def_id)), ty::Opaque(def_id, _) => Some((Self::Opaque, def_id)), ty::Generator(def_id, ..) => { Some((Self::Generator(tcx.generator_kind(def_id).unwrap()), def_id)) } ty::Foreign(def_id) => Some((Self::Foreign, def_id)), _ => None, } } }
42.423808
100
0.467147
7a51897b4791d17df53a7d31a01a658c2f785d96
77
extern crate glutin; pub use input::glutin::{ElementState, VirtualKeyCode};
19.25
54
0.779221
8a21c368e1e4afe64f3a8c4f825e3692781171c2
2,563
use swc_ecma_ast::*; use swc_ecma_utils::is_valid_ident; use swc_ecma_visit::{noop_fold_type, Fold, FoldWith}; use swc_trace_macro::swc_trace; /// babel: `transform-member-expression-literals` /// /// # Input /// ```js /// obj["foo"] = "isValid"; /// /// obj.const = "isKeyword"; /// obj["var"] = "isKeyword"; /// ``` /// /// # Output /// ```js /// obj.foo = "isValid"; /// /// obj["const"] = "isKeyword"; /// obj["var"] = "isKeyword"; /// ``` #[tracing::instrument(level = "info", skip_all)] pub fn member_expression_literals() -> impl Fold { MemberExprLit } #[derive(Default, Clone, Copy)] struct MemberExprLit; #[swc_trace] impl Fold for MemberExprLit { noop_fold_type!(); fn fold_member_expr(&mut self, e: MemberExpr) -> MemberExpr { let e = e.fold_children_with(self); if let MemberProp::Ident(i) = e.prop { if i.sym.is_reserved() || i.sym.is_reserved_in_strict_mode(true) || i.sym.is_reserved_in_es3() // it's not bind, so you could use eval || !is_valid_ident(&i.sym) { return MemberExpr { prop: MemberProp::Computed(ComputedPropName { span: i.span, expr: Box::new(Expr::Lit(Lit::Str(Str { span: i.span, raw: None, value: i.sym, }))), }), ..e }; } else { return MemberExpr { prop: MemberProp::Ident(swc_ecma_utils::quote_ident!(i.span, i.sym)), ..e }; } }; e } } #[cfg(test)] mod tests { use swc_ecma_transforms_testing::test; use super::*; test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, basic, r#"obj["foo"] = "isValid"; obj.const = "isKeyword"; obj["var"] = "isKeyword";"#, r#"obj["foo"] = "isValid"; obj["const"] = "isKeyword"; obj["var"] = "isKeyword";"#, ok_if_code_eq ); test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, issue_206, "const number = foo[bar1][baz1]", "const number = foo[bar1][baz1]" ); test!( ::swc_ecma_parser::Syntax::default(), |_| MemberExprLit, issue_211, "_query[idx]=$this.attr('data-ref');", "_query[idx]=$this.attr('data-ref');" ); }
24.883495
89
0.489661
71b840569aa6bee023d0eb2a15ca95e92f9890e8
10,290
use crossterm::{cursor, terminal, RawScreen}; use crossterm::{InputEvent, KeyEvent}; use nu_errors::ShellError; use nu_protocol::{ outln, serve_plugin, CallInfo, Plugin, Primitive, Signature, UntaggedValue, Value, }; use nu_source::AnchorLocation; use syntect::easy::HighlightLines; use syntect::highlighting::{Style, ThemeSet}; use syntect::parsing::SyntaxSet; use std::io::Write; use std::path::Path; enum DrawCommand { DrawString(Style, String), NextLine, } struct TextView; impl TextView { fn new() -> TextView { TextView } } impl Plugin for TextView { fn config(&mut self) -> Result<Signature, ShellError> { Ok(Signature::build("textview").desc("Autoview of text data.")) } fn sink(&mut self, _call_info: CallInfo, input: Vec<Value>) { if !input.is_empty() { view_text_value(&input[0]); } } } fn paint_textview( draw_commands: &Vec<DrawCommand>, starting_row: usize, use_color_buffer: bool, ) -> usize { let terminal = terminal(); let cursor = cursor(); let size = terminal.terminal_size(); // render let mut pos = 0; let width = size.0 as usize; let height = size.1 as usize - 1; let mut frame_buffer = vec![]; for command in draw_commands { match command { DrawCommand::DrawString(style, string) => { for chr in string.chars() { if chr == '\t' { for _ in 0..8 { frame_buffer.push(( ' ', style.foreground.r, style.foreground.g, style.foreground.b, )); } pos += 8; } else { frame_buffer.push(( chr, style.foreground.r, style.foreground.g, style.foreground.b, )); pos += 1; } } } DrawCommand::NextLine => { for _ in 0..(width - pos % width) { frame_buffer.push((' ', 0, 0, 0)); } pos += width - pos % width; } } } let num_frame_buffer_rows = frame_buffer.len() / width; let buffer_needs_scrolling = num_frame_buffer_rows > height; // display let mut ansi_strings = vec![]; let mut normal_chars = vec![]; for c in &frame_buffer[starting_row * width..std::cmp::min(pos, (starting_row + height) * width)] { if use_color_buffer { ansi_strings.push(ansi_term::Colour::RGB(c.1, c.2, c.3).paint(format!("{}", c.0))); } else { normal_chars.push(c.0); } } if buffer_needs_scrolling { let _ = cursor.goto(0, 0); } if use_color_buffer { print!("{}", ansi_term::ANSIStrings(&ansi_strings)); } else { let s: String = normal_chars.into_iter().collect(); print!("{}", s); } if buffer_needs_scrolling { let _ = cursor.goto(0, size.1); print!( "{}", ansi_term::Colour::Blue.paint("[ESC to quit, arrow keys to move]") ); } let _ = std::io::stdout().flush(); num_frame_buffer_rows } fn scroll_view_lines_if_needed(draw_commands: Vec<DrawCommand>, use_color_buffer: bool) { let mut starting_row = 0; if let Ok(_raw) = RawScreen::into_raw_mode() { let terminal = terminal(); let mut size = terminal.terminal_size(); let height = size.1 as usize - 1; let mut max_bottom_line = paint_textview(&draw_commands, starting_row, use_color_buffer); // Only scroll if needed if max_bottom_line > height as usize { let cursor = cursor(); let _ = cursor.hide(); let input = crossterm::input(); let mut sync_stdin = input.read_sync(); loop { if let Some(ev) = sync_stdin.next() { match ev { InputEvent::Keyboard(k) => match k { KeyEvent::Esc => { break; } KeyEvent::Up | KeyEvent::Char('k') => { if starting_row > 0 { starting_row -= 1; max_bottom_line = paint_textview( &draw_commands, starting_row, use_color_buffer, ); } } KeyEvent::Down | KeyEvent::Char('j') => { if starting_row < (max_bottom_line - height) { starting_row += 1; } max_bottom_line = paint_textview(&draw_commands, starting_row, use_color_buffer); } KeyEvent::PageUp | KeyEvent::Ctrl('b') => { starting_row -= std::cmp::min(height, starting_row); max_bottom_line = paint_textview(&draw_commands, starting_row, use_color_buffer); } KeyEvent::PageDown | KeyEvent::Ctrl('f') | KeyEvent::Char(' ') => { if starting_row < (max_bottom_line - height) { starting_row += height; if starting_row > (max_bottom_line - height) { starting_row = max_bottom_line - height; } } max_bottom_line = paint_textview(&draw_commands, starting_row, use_color_buffer); } _ => {} }, _ => {} } } let new_size = terminal.terminal_size(); if size != new_size { size = new_size; let _ = terminal.clear(crossterm::ClearType::All); max_bottom_line = paint_textview(&draw_commands, starting_row, use_color_buffer); } } let _ = cursor.show(); let _ = RawScreen::disable_raw_mode(); } } outln!(""); } fn scroll_view(s: &str) { let mut v = vec![]; for line in s.lines() { v.push(DrawCommand::DrawString(Style::default(), line.to_string())); v.push(DrawCommand::NextLine); } scroll_view_lines_if_needed(v, false); } fn view_text_value(value: &Value) { let value_anchor = value.anchor(); match &value.value { UntaggedValue::Primitive(Primitive::String(ref s)) => { if let Some(source) = value_anchor { let extension: Option<String> = match source { AnchorLocation::File(file) => { let path = Path::new(&file); path.extension().map(|x| x.to_string_lossy().to_string()) } AnchorLocation::Url(url) => { let url = url::Url::parse(&url); if let Ok(url) = url { let url = url.clone(); if let Some(mut segments) = url.path_segments() { if let Some(file) = segments.next_back() { let path = Path::new(file); path.extension().map(|x| x.to_string_lossy().to_string()) } else { None } } else { None } } else { None } } //FIXME: this probably isn't correct AnchorLocation::Source(_source) => None, }; match extension { Some(extension) => { // Load these once at the start of your program let ps: SyntaxSet = syntect::dumps::from_binary(include_bytes!( "../../assets/syntaxes.bin" )); if let Some(syntax) = ps.find_syntax_by_extension(&extension) { let ts: ThemeSet = syntect::dumps::from_binary(include_bytes!( "../../assets/themes.bin" )); let mut h = HighlightLines::new(syntax, &ts.themes["OneHalfDark"]); let mut v = vec![]; for line in s.lines() { let ranges: Vec<(Style, &str)> = h.highlight(line, &ps); for range in ranges { v.push(DrawCommand::DrawString(range.0, range.1.to_string())); } v.push(DrawCommand::NextLine); } scroll_view_lines_if_needed(v, true); } else { scroll_view(s); } } _ => { scroll_view(s); } } } else { scroll_view(s); } } _ => {} } } fn main() { serve_plugin(&mut TextView::new()); }
34.646465
99
0.416132
defb9bc47e12259345bedfedf1c5ccfec0d258e9
4,764
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::net::{set_nonblock, EventedFd}, futures::{ future::Future, task::{Poll, Waker}, try_ready, }, std::{ io, marker::Unpin, net::{self, SocketAddr}, ops::Deref, os::unix::io::AsRawFd, pin::Pin, }, }; /// An I/O object representing a UDP socket. pub struct UdpSocket(EventedFd<net::UdpSocket>); impl Deref for UdpSocket { type Target = EventedFd<net::UdpSocket>; fn deref(&self) -> &Self::Target { &self.0 } } impl UdpSocket { pub fn bind(addr: &SocketAddr) -> io::Result<UdpSocket> { let socket = net::UdpSocket::bind(addr)?; UdpSocket::from_socket(socket) } pub fn from_socket(socket: net::UdpSocket) -> io::Result<UdpSocket> { set_nonblock(socket.as_raw_fd())?; unsafe { Ok(UdpSocket(EventedFd::new(socket)?)) } } pub fn recv_from<'a>(&'a self, buf: &'a mut [u8]) -> RecvFrom<'a> { RecvFrom { socket: self, buf } } pub fn async_recv_from( &self, buf: &mut [u8], lw: &Waker, ) -> Poll<io::Result<(usize, SocketAddr)>> { try_ready!(EventedFd::poll_readable(&self.0, lw)); match self.0.as_ref().recv_from(buf) { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.0.need_read(lw); Poll::Pending } else { Poll::Ready(Err(e)) } } Ok((size, addr)) => Poll::Ready(Ok((size, addr))), } } pub fn send_to<'a>(&'a self, buf: &'a [u8], addr: SocketAddr) -> SendTo<'a> { SendTo { socket: self, buf, addr } } pub fn async_send_to(&self, buf: &[u8], addr: SocketAddr, lw: &Waker) -> Poll<io::Result<()>> { try_ready!(EventedFd::poll_writable(&self.0, lw)); match self.0.as_ref().send_to(buf, addr) { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.0.need_write(lw); Poll::Pending } else { Poll::Ready(Err(e)) } } Ok(_) => Poll::Ready(Ok(())), } } pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { self.0.as_ref().set_broadcast(broadcast) } pub fn broadcast(&self) -> io::Result<bool> { self.0.as_ref().broadcast() } } pub struct RecvFrom<'a> { socket: &'a UdpSocket, buf: &'a mut [u8], } impl<'a> Unpin for RecvFrom<'a> {} impl<'a> Future for RecvFrom<'a> { type Output = io::Result<(usize, SocketAddr)>; fn poll(mut self: Pin<&mut Self>, lw: &Waker) -> Poll<Self::Output> { let this = &mut *self; let (received, addr) = try_ready!(this.socket.async_recv_from(this.buf, lw)); Poll::Ready(Ok((received, addr))) } } pub struct SendTo<'a> { socket: &'a UdpSocket, buf: &'a [u8], addr: SocketAddr, } impl<'a> Unpin for SendTo<'a> {} impl<'a> Future for SendTo<'a> { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, lw: &Waker) -> Poll<Self::Output> { self.socket.async_send_to(self.buf, self.addr, lw) } } #[cfg(test)] mod tests { use super::UdpSocket; use crate::Executor; use std::io::Error; #[test] fn send_recv() { let mut exec = Executor::new().expect("could not create executor"); let addr = "127.0.0.1:29995".parse().unwrap(); let buf = b"hello world"; let socket = UdpSocket::bind(&addr).expect("could not create socket"); let fut = async move { await!(socket.send_to(buf, addr))?; let mut recvbuf = vec![0; 11]; let (received, sender) = await!(socket.recv_from(&mut *recvbuf))?; assert_eq!(addr, sender); assert_eq!(received, buf.len()); assert_eq!(&*buf, &*recvbuf); Ok::<(), Error>(()) }; exec.run_singlethreaded(fut).expect("failed to run udp socket test"); } #[test] fn broadcast() { let mut _exec = Executor::new().expect("could not create executor"); let addr = "127.0.0.1:12345".parse().unwrap(); let socket = UdpSocket::bind(&addr).expect("could not create socket"); let initial = socket.broadcast().expect("could not get broadcast"); assert!(!initial); socket.set_broadcast(true).expect("could not set broadcast"); let set = socket.broadcast().expect("could not get broadcast"); assert!(set); } }
28.357143
99
0.536734
873837ac269e73c51e00599fbcb68c7b2ff24b9c
3,350
#[doc = "Reader of register EN"] pub type R = crate::R<u32, super::EN>; #[doc = "Writer for register EN"] pub type W = crate::W<u32, super::EN>; #[doc = "Register EN `reset()`'s with value 0"] impl crate::ResetValue for super::EN { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `CHIDX`"] pub type CHIDX_R = crate::R<u8, u8>; #[doc = "Write proxy for field `CHIDX`"] pub struct CHIDX_W<'a> { w: &'a mut W, } impl<'a> CHIDX_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f); self.w } } #[doc = "\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EN_A { #[doc = "0: Disable subscription"] DISABLED = 0, #[doc = "1: Enable subscription"] ENABLED = 1, } impl From<EN_A> for bool { #[inline(always)] fn from(variant: EN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `EN`"] pub type EN_R = crate::R<bool, EN_A>; impl EN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EN_A { match self.bits { false => EN_A::DISABLED, true => EN_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == EN_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == EN_A::ENABLED } } #[doc = "Write proxy for field `EN`"] pub struct EN_W<'a> { w: &'a mut W, } impl<'a> EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Disable subscription"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(EN_A::DISABLED) } #[doc = "Enable subscription"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(EN_A::ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:3 - Channel that task CHG\\[n\\].EN will subscribe to"] #[inline(always)] pub fn chidx(&self) -> CHIDX_R { CHIDX_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 31"] #[inline(always)] pub fn en(&self) -> EN_R { EN_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Channel that task CHG\\[n\\].EN will subscribe to"] #[inline(always)] pub fn chidx(&mut self) -> CHIDX_W { CHIDX_W { w: self } } #[doc = "Bit 31"] #[inline(always)] pub fn en(&mut self) -> EN_W { EN_W { w: self } } }
26.587302
86
0.53194
f74ecbc902999c3745dd3a80955611b9d83af5ba
3,221
extern crate simple_logger; #[macro_use] extern crate log; extern crate base64; extern crate bincode; extern crate image; extern crate qrcode; extern crate reqwest; extern crate whatsappweb; use std::fs::{remove_file, File, OpenOptions}; use std::io::Read; use std::sync::Arc; use image::Luma; use whatsappweb::connection; use whatsappweb::connection::{ DisconnectReason, PersistentSession, State, UserData, WhatsappWebConnection, WhatsappWebHandler, }; use whatsappweb::media; use whatsappweb::models::{ChatMessage, ChatMessageContent}; use whatsappweb::models::Jid; use whatsappweb::MediaType; const SESSION_FILENAME: &str = "session.bin"; struct Handler {} impl WhatsappWebHandler for Handler { fn on_state_changed(&self, connection: &WhatsappWebConnection<Handler>, state: State) { info!("new state: {:?}", state); if state == State::Connected { let mut file = Vec::new(); File::open("path/to/image.jpg") .unwrap() .read_to_end(&mut file) .unwrap(); let connection0 = connection.clone(); let (thumbnail, size) = media::generate_thumbnail_and_get_size(&file); let thumbnail = Arc::new(thumbnail); media::upload_file( &file, MediaType::Image, &connection, Box::new(move |file_info| { let jid = Jid::from_phonenumber("+49123456789".to_string()).unwrap(); connection0.send_message( ChatMessageContent::Image(file_info.unwrap(), size, thumbnail.to_vec()), jid, ); }), ); } } fn on_persistent_session_data_changed(&self, persistent_session: PersistentSession) { bincode::serialize_into( OpenOptions::new() .create(true) .write(true) .open(SESSION_FILENAME) .unwrap(), &persistent_session, ) .unwrap(); } fn on_user_data_changed(&self, _: &WhatsappWebConnection<Handler>, _: UserData) {} fn on_disconnect(&self, reason: DisconnectReason) { info!("disconnected"); match reason { DisconnectReason::Removed => { remove_file(SESSION_FILENAME).unwrap(); } _ => {} } } fn on_message(&self, _: &WhatsappWebConnection<Handler>, _: bool, _: Box<ChatMessage>) {} } fn main() { simple_logger::init_with_level(log::Level::Debug).unwrap(); let handler = Handler {}; if let Ok(file) = File::open(SESSION_FILENAME) { let (_, join_handle) = connection::with_persistent_session(bincode::deserialize_from(file).unwrap(), handler); join_handle.join().unwrap(); } else { let (_, join_handle) = connection::new( |qr| { qr.render::<Luma<u8>>() .module_dimensions(10, 10) .build() .save("login_qr.png") .unwrap(); }, handler, ); join_handle.join().unwrap(); } }
30.102804
100
0.561006
712935d0e4f36bc2c6ebaf1e4e42cac0d5855217
2,676
//! Encoders use std::fmt; use std::io; use log::LogRecord; pub mod pattern; pub mod writer; /// A trait implemented by types that can serialize a `LogRecord` into a /// `Write`r. /// /// `Encode`rs are commonly used by `Append`ers to format a log record for /// output. pub trait Encode: fmt::Debug + Send + Sync + 'static { /// Encodes the `LogRecord` into bytes and writes them. fn encode(&self, w: &mut Write, record: &LogRecord) -> io::Result<()>; } /// A text or background color. #[derive(Copy, Clone, Debug)] #[allow(missing_docs)] pub enum Color { Black, Red, Green, Yellow, Blue, Magenta, Cyan, White, } /// The style applied to text output. /// /// Any fields set to `None` will be set to their default format, as defined /// by the `Write`r. #[derive(Clone, Default)] pub struct Style { /// The text (or foreground) color. pub text: Option<Color>, /// The background color. pub background: Option<Color>, /// True if the text should have increased intensity. pub intense: Option<bool>, _p: (), } impl fmt::Debug for Style { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Style") .field("text", &self.text) .field("background", &self.background) .field("intense", &self.intense) .finish() } } impl Style { /// Returns a `Style` with all fields set to their defaults. pub fn new() -> Style { Style::default() } /// Sets the text color. pub fn text(&mut self, text: Color) -> &mut Style { self.text = Some(text); self } /// Sets the background color. pub fn background(&mut self, background: Color) -> &mut Style { self.background = Some(background); self } /// Sets the text intensity. pub fn intense(&mut self, intense: bool) -> &mut Style { self.intense = Some(intense); self } } /// A trait for types that an `Encode`r will write to. /// /// It extends `std::io::Write` and adds some extra functionality. pub trait Write: io::Write { /// Sets the output text style, if supported. /// /// `Write`rs should ignore any parts of the `Style` they do not support. /// /// The default implementation returns `Ok(())`. Implementations that do /// not support styling should do this as well. #[allow(unused_variables)] fn set_style(&mut self, style: &Style) -> io::Result<()> { Ok(()) } } impl<'a, W: Write + ?Sized> Write for &'a mut W { fn set_style(&mut self, style: &Style) -> io::Result<()> { <W as Write>::set_style(*self, style) } }
25.485714
77
0.599028
0e3a46b781a781ed843b444111198ebb17ffce2b
2,309
// Copyright 2018-2020 Cargill Incorporated // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[cfg(feature = "circuit-read")] mod circuit_read; use std::error::Error as StdError; use std::sync::{Arc, RwLock}; use crate::rest_api::{Resource, RestResourceProvider}; use super::SplinterState; #[derive(Debug)] pub enum CircuitRouteError { NotFound(String), PoisonedLock, } impl StdError for CircuitRouteError { fn source(&self) -> Option<&(dyn StdError + 'static)> { match self { CircuitRouteError::NotFound(_) => None, CircuitRouteError::PoisonedLock => None, } } } impl std::fmt::Display for CircuitRouteError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { CircuitRouteError::NotFound(msg) => write!(f, "Circuit not found: {}", msg), CircuitRouteError::PoisonedLock => write!(f, "Splinter State lock was poisoned"), } } } #[derive(Clone)] pub struct CircuitResourceProvider { node_id: String, state: Arc<RwLock<SplinterState>>, } impl CircuitResourceProvider { pub fn new(node_id: String, state: Arc<RwLock<SplinterState>>) -> Self { Self { node_id, state } } } impl RestResourceProvider for CircuitResourceProvider { fn resources(&self) -> Vec<Resource> { // Allowing unused_mut because resources must be mutable if feature circuit-read is enabled #[allow(unused_mut)] let mut resources = Vec::new(); #[cfg(feature = "circuit-read")] { resources.append(&mut vec![ circuit_read::make_fetch_circuit_resource(self.state.clone()), circuit_read::make_list_circuits_resource(self.state.clone()), ]) } resources } }
30.381579
99
0.65916
677dc08c67f13c5e801788ba74192798bdf98f3c
5,621
use hacspec_lib::*; pub enum Error { InvalidAddition, } const BITS: usize = 256; public_nat_mod!( type_name: P256FieldElement, type_of_canvas: FieldCanvas, bit_size_of_field: 256, // XXX: Unfortunately we can't use constants here. modulo_value: "ffffffff00000001000000000000000000000000ffffffffffffffffffffffff" ); public_nat_mod!( type_name: P256Scalar, type_of_canvas: ScalarCanvas, bit_size_of_field: 256, // XXX: Unfortunately we can't use constants here. modulo_value: "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551" ); pub type Affine = (P256FieldElement, P256FieldElement); pub type AffineResult = Result<Affine, Error>; type P256Jacobian = (P256FieldElement, P256FieldElement, P256FieldElement); type JacobianResult = Result<P256Jacobian, Error>; bytes!(Element, 32); fn jacobian_to_affine(p: P256Jacobian) -> Affine { let (x, y, z) = p; let z2 = z.exp(2u32); let z2i = z2.inv(); let z3 = z * z2; let z3i = z3.inv(); let x = x * z2i; let y = y * z3i; (x, y) } fn affine_to_jacobian(p: Affine) -> P256Jacobian { let (x, y) = p; (x, y, P256FieldElement::from_literal(1u128)) } fn point_double(p: P256Jacobian) -> P256Jacobian { let (x1, y1, z1) = p; let delta = z1.exp(2u32); let gamma = y1.exp(2u32); let beta = x1 * gamma; let alpha_1 = x1 - delta; let alpha_2 = x1 + delta; let alpha = P256FieldElement::from_literal(3u128) * (alpha_1 * alpha_2); let x3 = alpha.exp(2u32) - (P256FieldElement::from_literal(8u128) * beta); let z3_ = (y1 + z1).exp(2u32); let z3 = z3_ - (gamma + delta); let y3_1 = (P256FieldElement::from_literal(4u128) * beta) - x3; let y3_2 = P256FieldElement::from_literal(8u128) * (gamma * gamma); let y3 = (alpha * y3_1) - y3_2; (x3, y3, z3) } fn is_point_at_infinity(p: P256Jacobian) -> bool { let (_x, _y, z) = p; z.equal(P256FieldElement::from_literal(0u128)) } fn s1_equal_s2(s1: P256FieldElement, s2: P256FieldElement) -> JacobianResult { if s1.equal(s2) { JacobianResult::Err(Error::InvalidAddition) } else { JacobianResult::Ok(( P256FieldElement::from_literal(0u128), P256FieldElement::from_literal(1u128), P256FieldElement::from_literal(0u128), )) } } fn point_add_jacob(p: P256Jacobian, q: P256Jacobian) -> JacobianResult { let mut result = JacobianResult::Ok(q); if !is_point_at_infinity(p) { if is_point_at_infinity(q) { result = JacobianResult::Ok(p); } else { let (x1, y1, z1) = p; let (x2, y2, z2) = q; let z1z1 = z1.exp(2u32); let z2z2 = z2.exp(2u32); let u1 = x1 * z2z2; let u2 = x2 * z1z1; let s1 = (y1 * z2) * z2z2; let s2 = (y2 * z1) * z1z1; if u1.equal(u2) { result = s1_equal_s2(s1, s2); } else { let h = u2 - u1; let i = (P256FieldElement::from_literal(2u128) * h).exp(2u32); let j = h * i; let r = P256FieldElement::from_literal(2u128) * (s2 - s1); let v = u1 * i; let x3_1 = P256FieldElement::from_literal(2u128) * v; let x3_2 = r.exp(2u32) - j; let x3 = x3_2 - x3_1; let y3_1 = (P256FieldElement::from_literal(2u128) * s1) * j; let y3_2 = r * (v - x3); let y3 = y3_2 - y3_1; let z3_ = (z1 + z2).exp(2u32); let z3 = (z3_ - (z1z1 + z2z2)) * h; result = JacobianResult::Ok((x3, y3, z3)); } } }; result } fn ltr_mul(k: P256Scalar, p: P256Jacobian) -> JacobianResult { let mut q = ( P256FieldElement::from_literal(0u128), P256FieldElement::from_literal(1u128), P256FieldElement::from_literal(0u128), ); for i in 0..BITS { q = point_double(q); if k.get_bit(BITS - 1 - i).equal(P256Scalar::ONE()) { q = point_add_jacob(q, p)?; } } JacobianResult::Ok(q) } pub fn p256_point_mul(k: P256Scalar, p: Affine) -> AffineResult { let jac = ltr_mul(k, affine_to_jacobian(p))?; AffineResult::Ok(jacobian_to_affine(jac)) } pub fn p256_point_mul_base(k: P256Scalar) -> AffineResult { let base_point = ( P256FieldElement::from_byte_seq_be(&Element(secret_bytes!([ 0x6Bu8, 0x17u8, 0xD1u8, 0xF2u8, 0xE1u8, 0x2Cu8, 0x42u8, 0x47u8, 0xF8u8, 0xBCu8, 0xE6u8, 0xE5u8, 0x63u8, 0xA4u8, 0x40u8, 0xF2u8, 0x77u8, 0x03u8, 0x7Du8, 0x81u8, 0x2Du8, 0xEBu8, 0x33u8, 0xA0u8, 0xF4u8, 0xA1u8, 0x39u8, 0x45u8, 0xD8u8, 0x98u8, 0xC2u8, 0x96u8 ]))), P256FieldElement::from_byte_seq_be(&Element(secret_bytes!([ 0x4Fu8, 0xE3u8, 0x42u8, 0xE2u8, 0xFEu8, 0x1Au8, 0x7Fu8, 0x9Bu8, 0x8Eu8, 0xE7u8, 0xEBu8, 0x4Au8, 0x7Cu8, 0x0Fu8, 0x9Eu8, 0x16u8, 0x2Bu8, 0xCEu8, 0x33u8, 0x57u8, 0x6Bu8, 0x31u8, 0x5Eu8, 0xCEu8, 0xCBu8, 0xB6u8, 0x40u8, 0x68u8, 0x37u8, 0xBFu8, 0x51u8, 0xF5u8 ]))), ); p256_point_mul(k, base_point) } fn point_add_distinct(p: Affine, q: Affine) -> AffineResult { let r = point_add_jacob(affine_to_jacobian(p), affine_to_jacobian(q))?; AffineResult::Ok(jacobian_to_affine(r)) } #[allow(unused_assignments)] pub fn point_add(p: Affine, q: Affine) -> AffineResult { if p != q { point_add_distinct(p, q) } else { AffineResult::Ok(jacobian_to_affine(point_double(affine_to_jacobian(p)))) } }
32.12
99
0.601316
64e23e4ad60be0e401a0bf58626262da314d3145
3,810
//! Availability Impact (A) use crate::{Error, Metric, MetricType, Result}; use alloc::borrow::ToOwned; use core::{fmt, str::FromStr}; /// Availability Impact (A) - CVSS v3.1 Base Metric Group /// /// Described in CVSS v3.1 Specification: Section 2.3.3: /// <https://www.first.org/cvss/specification-document#t6> /// /// > This metric measures the impact to the availability of the impacted /// > component resulting from a successfully exploited vulnerability. /// > While the Confidentiality and Integrity impact metrics apply to the /// > loss of confidentiality or integrity of data (e.g., information, /// > files) used by the impacted component, this metric refers to the loss /// > of availability of the impacted component itself, such as a networked /// > service (e.g., web, database, email). Since availability refers to the /// > accessibility of information resources, attacks that consume network /// > bandwidth, processor cycles, or disk space all impact the availability /// > of an impacted component. The Base Score is greatest when the /// > consequence to the impacted component is highest. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] pub enum Availability { /// None (N) /// /// > There is no impact to availability within the impacted component. None, /// Low (L) /// /// > Performance is reduced or there are interruptions in resource /// > availability. Even if repeated exploitation of the vulnerability /// > is possible, the attacker does not have the ability to completely /// > deny service to legitimate users. The resources in the impacted /// > component are either partially available all of the time, or fully /// > available only some of the time, but overall there is no direct, /// > serious consequence to the impacted component. Low, /// High (H) /// /// > There is a total loss of availability, resulting in the attacker /// > being able to fully deny access to resources in the impacted /// > component; this loss is either sustained (while the attacker /// > continues to deliver the attack) or persistent (the condition /// > persists even after the attack has completed). Alternatively, /// > the attacker has the ability to deny some availability, but /// > the loss of availability presents a direct, serious consequence /// > to the impacted component (e.g., the attacker cannot disrupt /// > existing connections, but can prevent new connections; the /// > attacker can repeatedly exploit a vulnerability that, in each /// > instance of a successful attack, leaks a only small amount of /// > memory, but after repeated exploitation causes a service to become /// > completely unavailable). High, } impl Metric for Availability { const TYPE: MetricType = MetricType::A; fn score(self) -> f64 { match self { Availability::None => 0.0, Availability::Low => 0.22, Availability::High => 0.56, } } fn as_str(self) -> &'static str { match self { Availability::None => "N", Availability::Low => "L", Availability::High => "H", } } } impl fmt::Display for Availability { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}:{}", Self::name(), self.as_str()) } } impl FromStr for Availability { type Err = Error; fn from_str(s: &str) -> Result<Self> { match s { "N" => Ok(Availability::None), "L" => Ok(Availability::Low), "H" => Ok(Availability::High), _ => Err(Error::InvalidMetric { metric_type: Self::TYPE, value: s.to_owned(), }), } } }
38.1
76
0.64042
1105bda0e75197863457c2f242897aeeb43f8f7a
7,526
use super::ViewPage; use seed::prelude::*; use crate::{ entity::{ form::settings::{Field, Form, Problem}, Viewer, }, loading, request, route::{self, Route}, GMsg, Session, }; // ------ ------ // Model // ------ ------ // ------ Model ------ #[derive(Default)] pub struct Model { session: Session, problems: Vec<Problem>, status: Status, } impl Model { pub const fn session(&self) -> &Session { &self.session } } impl From<Model> for Session { fn from(model: Model) -> Self { model.session } } // ------ Status ------ enum Status { Loading, LoadingSlowly, Loaded(Form), Failed, } impl Default for Status { fn default() -> Self { Self::Loading } } // ------ ------ // Init // ------ ------ pub fn init(session: Session, orders: &mut impl Orders<Msg, GMsg>) -> Model { orders .perform_cmd(loading::notify_on_slow_load(Msg::SlowLoadThresholdPassed)) .perform_cmd(request::settings::load( session.viewer(), Msg::FormLoadCompleted, )); Model { session, ..Model::default() } } // ------ ------ // Sink // ------ ------ pub fn sink(g_msg: GMsg, model: &mut Model, orders: &mut impl Orders<Msg, GMsg>) { match g_msg { GMsg::SessionChanged(session) => { model.session = session; route::go_to(Route::Home, orders); } _ => (), } } // ------ ------ // Update // ------ ------ pub enum Msg { FormSubmitted, FieldChanged(Field), FormLoadCompleted(Result<Form, Vec<Problem>>), SaveCompleted(Result<Viewer, Vec<Problem>>), SlowLoadThresholdPassed, } pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders<Msg, GMsg>) { match msg { Msg::FormSubmitted => { if let Status::Loaded(form) = &model.status { match form.trim_fields().validate() { Ok(valid_form) => { model.problems.clear(); orders.perform_cmd(request::settings::update( model.session.viewer(), &valid_form, Msg::SaveCompleted, )); } Err(problems) => { model.problems = problems; } } } } Msg::FieldChanged(field) => { if let Status::Loaded(form) = &mut model.status { form.upsert_field(field); } } Msg::FormLoadCompleted(Ok(form)) => { model.status = Status::Loaded(form); } Msg::FormLoadCompleted(Err(problems)) => { model.problems = problems; model.status = Status::Failed; } Msg::SaveCompleted(Ok(viewer)) => { viewer.store(); orders.send_g_msg(GMsg::SessionChanged(Session::LoggedIn(viewer))); } Msg::SaveCompleted(Err(problems)) => { model.problems = problems; } Msg::SlowLoadThresholdPassed => { if let Status::Loading = model.status { model.status = Status::LoadingSlowly } } } } // ------ ------ // View // ------ ------ pub fn view<'a>(model: &Model) -> ViewPage<'a, Msg> { ViewPage::new("Settings", view_content(model)) } // ====== PRIVATE ====== fn view_content(model: &Model) -> Node<Msg> { div![ class!["auth-page"], div![ class!["container", "page"], div![ class!["row"], div![ class!["col-md-6", "offset-md-3", "col-x32-12"], h1![class!["text-xs-center"], "Your Settings"], if model.session.viewer().is_some() { vec![ ul![ class!["error-messages"], model.problems.iter().map(|problem| li![problem.message()]) ], view_form(model), ] } else { vec![div!["Sign in to view your settings."]] } ] ] ] ] } // ------ view form ------ fn view_form(model: &Model) -> Node<Msg> { match &model.status { Status::Loading => empty![], Status::LoadingSlowly => loading::view_icon(), Status::Loaded(form) => form![ raw_ev(Ev::Submit, |event| { event.prevent_default(); Msg::FormSubmitted }), form.iter_fields().map(view_fieldset), button![ class!["btn", "btn-lg", "btn-primary", "pull-xs-right"], "Update Settings" ] ], Status::Failed => loading::view_error("page"), } } fn view_fieldset(field: &Field) -> Node<Msg> { match field { Field::Avatar(value) => fieldset![ class!["form-group"], input![ class!["form-control"], attrs! { At::Type => "text", At::Placeholder => "URL of profile picture", At::Value => value }, input_ev(Ev::Input, |new_value| Msg::FieldChanged(Field::Avatar( new_value ))), ] ], Field::Username(value) => fieldset![ class!["form-group"], input![ class!["form-control", "form-control-lg"], attrs! { At::Type => "text", At::Placeholder => "Your Name", At::Value => value }, input_ev(Ev::Input, |new_value| Msg::FieldChanged(Field::Username( new_value ))), ] ], Field::Bio(value) => fieldset![ class!["form-group"], textarea![ class!["form-control", "form-control-lg"], attrs! { At::Rows => 8, At::Placeholder => "Short bio about you", }, value, input_ev(Ev::Input, |new_value| Msg::FieldChanged(Field::Bio( new_value ))), ] ], Field::Email(value) => fieldset![ class!["form-group"], input![ class!["form-control", "form-control-lg"], attrs! { At::Type => "text", At::Placeholder => "Email", At::Value => value }, input_ev(Ev::Input, |new_value| Msg::FieldChanged(Field::Email( new_value ))), ] ], Field::Password(value) => fieldset![ class!["form-group"], input![ class!["form-control", "form-control-lg"], attrs! { At::Type => "password", At::Placeholder => "Password", At::Value => value }, input_ev(Ev::Input, |new_value| Msg::FieldChanged(Field::Password( new_value ))), ] ], } }
27.367273
91
0.425193
e82a4ba0a474c2f4e513e937eeced0e11a1758bf
62,335
use std::cell::{RefCell, RefMut}; use std::collections::HashMap; use std::convert::TryFrom; use std::rc::Rc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Instant; use anyhow::anyhow; use anyhow::Error; use never::Never; use semver::Version; use wasmtime::{Memory, Trap}; use graph::blockchain::{Blockchain, HostFnCtx, TriggerWithHandler}; use graph::data::store; use graph::prelude::*; use graph::runtime::gas::{self, Gas, GasCounter, SaturatingInto}; use graph::runtime::HostExportError; use graph::runtime::{AscHeap, IndexForAscTypeId}; use graph::{components::subgraph::MappingError, runtime::AscPtr}; use graph::{ data::subgraph::schema::SubgraphError, runtime::{asc_get, asc_new, try_asc_get, DeterministicHostError}, }; pub use into_wasm_ret::IntoWasmRet; pub use stopwatch::TimeoutStopwatch; use crate::asc_abi::class::*; use crate::error::DeterminismLevel; use crate::gas_rules::{GAS_COST_LOAD, GAS_COST_STORE}; pub use crate::host_exports; use crate::host_exports::HostExports; use crate::mapping::MappingContext; use crate::mapping::ValidModule; mod into_wasm_ret; pub mod stopwatch; pub const TRAP_TIMEOUT: &str = "trap: interrupt"; pub trait IntoTrap { fn determinism_level(&self) -> DeterminismLevel; fn into_trap(self) -> Trap; } /// Handle to a WASM instance, which is terminated if and only if this is dropped. pub struct WasmInstance<C: Blockchain> { pub instance: wasmtime::Instance, // This is the only reference to `WasmInstanceContext` that's not within the instance itself, so // we can always borrow the `RefCell` with no concern for race conditions. // // Also this is the only strong reference, so the instance will be dropped once this is dropped. // The weak references are circulary held by instance itself through host exports. pub instance_ctx: Rc<RefCell<Option<WasmInstanceContext<C>>>>, // A reference to the gas counter used for reporting the gas used. pub gas: GasCounter, } impl<C: Blockchain> Drop for WasmInstance<C> { fn drop(&mut self) { // Assert that the instance will be dropped. assert_eq!(Rc::strong_count(&self.instance_ctx), 1); } } /// Proxies to the WasmInstanceContext. impl<C: Blockchain> AscHeap for WasmInstance<C> { fn raw_new(&mut self, bytes: &[u8], gas: &GasCounter) -> Result<u32, DeterministicHostError> { let mut ctx = RefMut::map(self.instance_ctx.borrow_mut(), |i| i.as_mut().unwrap()); ctx.raw_new(bytes, gas) } fn get( &self, offset: u32, size: u32, gas: &GasCounter, ) -> Result<Vec<u8>, DeterministicHostError> { self.instance_ctx().get(offset, size, gas) } fn api_version(&self) -> Version { self.instance_ctx().api_version() } fn asc_type_id( &mut self, type_id_index: IndexForAscTypeId, ) -> Result<u32, DeterministicHostError> { self.instance_ctx_mut().asc_type_id(type_id_index) } } impl<C: Blockchain> WasmInstance<C> { pub(crate) fn handle_json_callback( mut self, handler_name: &str, value: &serde_json::Value, user_data: &store::Value, ) -> Result<BlockState<C>, anyhow::Error> { let gas = GasCounter::default(); let value = asc_new(&mut self, value, &gas)?; let user_data = asc_new(&mut self, user_data, &gas)?; self.instance_ctx_mut().ctx.state.enter_handler(); // Invoke the callback self.instance .get_func(handler_name) .with_context(|| format!("function {} not found", handler_name))? .typed()? .call((value.wasm_ptr(), user_data.wasm_ptr())) .with_context(|| format!("Failed to handle callback '{}'", handler_name))?; self.instance_ctx_mut().ctx.state.exit_handler(); Ok(self.take_ctx().ctx.state) } pub(crate) fn handle_trigger( mut self, trigger: TriggerWithHandler<C>, ) -> Result<(BlockState<C>, Gas), MappingError> { let handler_name = trigger.handler_name().to_owned(); let gas = self.gas.clone(); let asc_trigger = trigger.to_asc_ptr(&mut self, &gas)?; self.invoke_handler(&handler_name, asc_trigger) } pub fn take_ctx(&mut self) -> WasmInstanceContext<C> { self.instance_ctx.borrow_mut().take().unwrap() } pub(crate) fn instance_ctx(&self) -> std::cell::Ref<'_, WasmInstanceContext<C>> { std::cell::Ref::map(self.instance_ctx.borrow(), |i| i.as_ref().unwrap()) } pub fn instance_ctx_mut(&self) -> std::cell::RefMut<'_, WasmInstanceContext<C>> { std::cell::RefMut::map(self.instance_ctx.borrow_mut(), |i| i.as_mut().unwrap()) } #[cfg(debug_assertions)] pub fn get_func(&self, func_name: &str) -> wasmtime::Func { self.instance.get_func(func_name).unwrap() } #[cfg(debug_assertions)] pub fn gas_used(&self) -> u64 { self.gas.get().value() } fn invoke_handler<T>( &mut self, handler: &str, arg: AscPtr<T>, ) -> Result<(BlockState<C>, Gas), MappingError> { let func = self .instance .get_func(handler) .with_context(|| format!("function {} not found", handler))?; // Caution: Make sure all exit paths from this function call `exit_handler`. self.instance_ctx_mut().ctx.state.enter_handler(); // This `match` will return early if there was a non-deterministic trap. let deterministic_error: Option<Error> = match func.typed()?.call(arg.wasm_ptr()) { Ok(()) => None, Err(trap) if self.instance_ctx().possible_reorg => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::PossibleReorg(trap.into())); } Err(trap) if trap.to_string().contains(TRAP_TIMEOUT) => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(Error::from(trap).context(format!( "Handler '{}' hit the timeout of '{}' seconds", handler, self.instance_ctx().timeout.unwrap().as_secs() )))); } Err(trap) => { use wasmtime::TrapCode::*; let trap_code = trap.trap_code(); let e = Error::from(trap); match trap_code { Some(MemoryOutOfBounds) | Some(HeapMisaligned) | Some(TableOutOfBounds) | Some(IndirectCallToNull) | Some(BadSignature) | Some(IntegerOverflow) | Some(IntegerDivisionByZero) | Some(BadConversionToInteger) | Some(UnreachableCodeReached) => Some(e), _ if self.instance_ctx().deterministic_host_trap => Some(e), _ => { self.instance_ctx_mut().ctx.state.exit_handler(); return Err(MappingError::Unknown(e)); } } } }; if let Some(deterministic_error) = deterministic_error { let message = format!("{:#}", deterministic_error).replace("\n", "\t"); // Log the error and restore the updates snapshot, effectively reverting the handler. error!(&self.instance_ctx().ctx.logger, "Handler skipped due to execution failure"; "handler" => handler, "error" => &message, ); let subgraph_error = SubgraphError { subgraph_id: self.instance_ctx().ctx.host_exports.subgraph_id.clone(), message, block_ptr: Some(self.instance_ctx().ctx.block_ptr.cheap_clone()), handler: Some(handler.to_string()), deterministic: true, }; self.instance_ctx_mut() .ctx .state .exit_handler_and_discard_changes_due_to_error(subgraph_error); } else { self.instance_ctx_mut().ctx.state.exit_handler(); } let gas = self.gas.get(); Ok((self.take_ctx().ctx.state, gas)) } } #[derive(Copy, Clone)] pub struct ExperimentalFeatures { pub allow_non_deterministic_ipfs: bool, } pub struct WasmInstanceContext<C: Blockchain> { // In the future there may be multiple memories, but currently there is only one memory per // module. And at least AS calls it "memory". There is no uninitialized memory in Wasm, memory // is zeroed when initialized or grown. memory: Memory, // Function exported by the wasm module that will allocate the request number of bytes and // return a pointer to the first byte of allocated space. memory_allocate: wasmtime::TypedFunc<i32, i32>, // Function wrapper for `idof<T>` from AssemblyScript id_of_type: Option<wasmtime::TypedFunc<u32, u32>>, pub ctx: MappingContext<C>, pub valid_module: Arc<ValidModule>, pub host_metrics: Arc<HostMetrics>, pub(crate) timeout: Option<Duration>, // Used by ipfs.map. pub(crate) timeout_stopwatch: Arc<std::sync::Mutex<TimeoutStopwatch>>, // First free byte in the current arena. Set on the first call to `raw_new`. arena_start_ptr: i32, // Number of free bytes starting from `arena_start_ptr`. arena_free_size: i32, // A trap ocurred due to a possible reorg detection. pub possible_reorg: bool, // A host export trap ocurred for a deterministic reason. pub deterministic_host_trap: bool, pub(crate) experimental_features: ExperimentalFeatures, } impl<C: Blockchain> WasmInstance<C> { /// Instantiates the module and sets it to be interrupted after `timeout`. pub fn from_valid_module_with_ctx( valid_module: Arc<ValidModule>, ctx: MappingContext<C>, host_metrics: Arc<HostMetrics>, timeout: Option<Duration>, experimental_features: ExperimentalFeatures, ) -> Result<WasmInstance<C>, anyhow::Error> { let mut linker = wasmtime::Linker::new(&wasmtime::Store::new(valid_module.module.engine())); let host_fns = ctx.host_fns.cheap_clone(); let api_version = ctx.host_exports.api_version.clone(); // Used by exports to access the instance context. There are two ways this can be set: // - After instantiation, if no host export is called in the start function. // - During the start function, if it calls a host export. // Either way, after instantiation this will have been set. let shared_ctx: Rc<RefCell<Option<WasmInstanceContext<C>>>> = Rc::new(RefCell::new(None)); // We will move the ctx only once, to init `shared_ctx`. But we don't statically know where // it will be moved so we need this ugly thing. let ctx: Rc<RefCell<Option<MappingContext<C>>>> = Rc::new(RefCell::new(Some(ctx))); // Start the timeout watchdog task. let timeout_stopwatch = Arc::new(std::sync::Mutex::new(TimeoutStopwatch::start_new())); if let Some(timeout) = timeout { // This task is likely to outlive the instance, which is fine. let interrupt_handle = linker.store().interrupt_handle().unwrap(); let timeout_stopwatch = timeout_stopwatch.clone(); graph::spawn_allow_panic(async move { let minimum_wait = Duration::from_secs(1); loop { let time_left = timeout.checked_sub(timeout_stopwatch.lock().unwrap().elapsed()); match time_left { None => break interrupt_handle.interrupt(), // Timed out. Some(time) if time < minimum_wait => break interrupt_handle.interrupt(), Some(time) => tokio::time::sleep(time).await, } } }); } // Because `gas` and `deterministic_host_trap` need to be accessed from the gas // host fn, they need to be separate from the rest of the context. let gas = GasCounter::default(); let deterministic_host_trap = Rc::new(AtomicBool::new(false)); macro_rules! link { ($wasm_name:expr, $rust_name:ident, $($param:ident),*) => { link!($wasm_name, $rust_name, "host_export_other", $($param),*) }; ($wasm_name:expr, $rust_name:ident, $section:expr, $($param:ident),*) => { let modules = valid_module .import_name_to_modules .get($wasm_name) .into_iter() .flatten(); // link an import with all the modules that require it. for module in modules { let func_shared_ctx = Rc::downgrade(&shared_ctx); let valid_module = valid_module.cheap_clone(); let host_metrics = host_metrics.cheap_clone(); let timeout_stopwatch = timeout_stopwatch.cheap_clone(); let ctx = ctx.cheap_clone(); let gas = gas.cheap_clone(); linker.func( module, $wasm_name, move |caller: wasmtime::Caller, $($param: u32),*| { let instance = func_shared_ctx.upgrade().unwrap(); let mut instance = instance.borrow_mut(); // Happens when calling a host fn in Wasm start. if instance.is_none() { *instance = Some(WasmInstanceContext::from_caller( caller, ctx.borrow_mut().take().unwrap(), valid_module.cheap_clone(), host_metrics.cheap_clone(), timeout, timeout_stopwatch.cheap_clone(), experimental_features.clone() ).unwrap()) } let instance = instance.as_mut().unwrap(); let _section = instance.host_metrics.stopwatch.start_section($section); let result = instance.$rust_name( &gas, $($param.into()),* ); match result { Ok(result) => Ok(result.into_wasm_ret()), Err(e) => { match IntoTrap::determinism_level(&e) { DeterminismLevel::Deterministic => { instance.deterministic_host_trap = true; }, DeterminismLevel::PossibleReorg => { instance.possible_reorg = true; }, DeterminismLevel::Unimplemented | DeterminismLevel::NonDeterministic => {}, } Err(IntoTrap::into_trap(e)) } } } )?; } }; } // Link chain-specifc host fns. for host_fn in host_fns.iter() { let modules = valid_module .import_name_to_modules .get(host_fn.name) .into_iter() .flatten(); for module in modules { let func_shared_ctx = Rc::downgrade(&shared_ctx); let host_fn = host_fn.cheap_clone(); let gas = gas.cheap_clone(); linker.func(module, host_fn.name, move |call_ptr: u32| { let start = Instant::now(); let instance = func_shared_ctx.upgrade().unwrap(); let mut instance = instance.borrow_mut(); let instance = match &mut *instance { Some(instance) => instance, // Happens when calling a host fn in Wasm start. None => { return Err(anyhow!( "{} is not allowed in global variables", host_fn.name ) .into()); } }; let name_for_metrics = host_fn.name.replace('.', "_"); let stopwatch = &instance.host_metrics.stopwatch; let _section = stopwatch.start_section(&format!("host_export_{}", name_for_metrics)); let ctx = HostFnCtx { logger: instance.ctx.logger.cheap_clone(), block_ptr: instance.ctx.block_ptr.cheap_clone(), heap: instance, gas: gas.cheap_clone(), }; let ret = (host_fn.func)(ctx, call_ptr).map_err(|e| match e { HostExportError::Deterministic(e) => { instance.deterministic_host_trap = true; e } HostExportError::PossibleReorg(e) => { instance.possible_reorg = true; e } HostExportError::Unknown(e) => e, })?; instance.host_metrics.observe_host_fn_execution_time( start.elapsed().as_secs_f64(), &name_for_metrics, ); Ok(ret) })?; } } link!("ethereum.encode", ethereum_encode, params_ptr); link!("ethereum.decode", ethereum_decode, params_ptr, data_ptr); link!("abort", abort, message_ptr, file_name_ptr, line, column); link!("store.get", store_get, "host_export_store_get", entity, id); link!( "store.set", store_set, "host_export_store_set", entity, id, data ); // All IPFS-related functions exported by the host WASM runtime should be listed in the // graph::data::subgraph::features::IPFS_ON_ETHEREUM_CONTRACTS_FUNCTION_NAMES array for // automatic feature detection to work. // // For reference, search this codebase for: ff652476-e6ad-40e4-85b8-e815d6c6e5e2 link!("ipfs.cat", ipfs_cat, "host_export_ipfs_cat", hash_ptr); link!( "ipfs.map", ipfs_map, "host_export_ipfs_map", link_ptr, callback, user_data, flags ); // The previous ipfs-related functions are unconditionally linked for backward compatibility if experimental_features.allow_non_deterministic_ipfs { link!( "ipfs.getBlock", ipfs_get_block, "host_export_ipfs_get_block", hash_ptr ); } link!("store.remove", store_remove, entity_ptr, id_ptr); link!("typeConversion.bytesToString", bytes_to_string, ptr); link!("typeConversion.bytesToHex", bytes_to_hex, ptr); link!("typeConversion.bigIntToString", big_int_to_string, ptr); link!("typeConversion.bigIntToHex", big_int_to_hex, ptr); link!("typeConversion.stringToH160", string_to_h160, ptr); link!("typeConversion.bytesToBase58", bytes_to_base58, ptr); link!("json.fromBytes", json_from_bytes, ptr); link!("json.try_fromBytes", json_try_from_bytes, ptr); link!("json.toI64", json_to_i64, ptr); link!("json.toU64", json_to_u64, ptr); link!("json.toF64", json_to_f64, ptr); link!("json.toBigInt", json_to_big_int, ptr); link!("crypto.keccak256", crypto_keccak_256, ptr); link!("bigInt.plus", big_int_plus, x_ptr, y_ptr); link!("bigInt.minus", big_int_minus, x_ptr, y_ptr); link!("bigInt.times", big_int_times, x_ptr, y_ptr); link!("bigInt.dividedBy", big_int_divided_by, x_ptr, y_ptr); link!("bigInt.dividedByDecimal", big_int_divided_by_decimal, x, y); link!("bigInt.mod", big_int_mod, x_ptr, y_ptr); link!("bigInt.pow", big_int_pow, x_ptr, exp); link!("bigInt.fromString", big_int_from_string, ptr); link!("bigInt.bitOr", big_int_bit_or, x_ptr, y_ptr); link!("bigInt.bitAnd", big_int_bit_and, x_ptr, y_ptr); link!("bigInt.leftShift", big_int_left_shift, x_ptr, bits); link!("bigInt.rightShift", big_int_right_shift, x_ptr, bits); link!("bigDecimal.toString", big_decimal_to_string, ptr); link!("bigDecimal.fromString", big_decimal_from_string, ptr); link!("bigDecimal.plus", big_decimal_plus, x_ptr, y_ptr); link!("bigDecimal.minus", big_decimal_minus, x_ptr, y_ptr); link!("bigDecimal.times", big_decimal_times, x_ptr, y_ptr); link!("bigDecimal.dividedBy", big_decimal_divided_by, x, y); link!("bigDecimal.equals", big_decimal_equals, x_ptr, y_ptr); link!("dataSource.create", data_source_create, name, params); link!( "dataSource.createWithContext", data_source_create_with_context, name, params, context ); link!("dataSource.address", data_source_address,); link!("dataSource.network", data_source_network,); link!("dataSource.context", data_source_context,); link!("ens.nameByHash", ens_name_by_hash, ptr); link!("log.log", log_log, level, msg_ptr); // `arweave and `box` functionality was removed, but apiVersion <= 0.0.4 must link it. if api_version <= Version::new(0, 0, 4) { link!("arweave.transactionData", arweave_transaction_data, ptr); link!("box.profile", box_profile, ptr); } // link the `gas` function // See also e3f03e62-40e4-4f8c-b4a1-d0375cca0b76 { let gas = gas.cheap_clone(); linker.func("gas", "gas", move |gas_used: u32| -> Result<(), Trap> { // Gas metering has a relevant execution cost cost, being called tens of thousands // of times per handler, but it's not worth having a stopwatch section here because // the cost of measuring would be greater than the cost of `consume_host_fn`. Last // time this was benchmarked it took < 100ns to run. if let Err(e) = gas.consume_host_fn(gas_used.saturating_into()) { deterministic_host_trap.store(true, Ordering::SeqCst); return Err(e.into_trap()); } Ok(()) })?; } let instance = linker.instantiate(&valid_module.module)?; // Usually `shared_ctx` is still `None` because no host fns were called during start. if shared_ctx.borrow().is_none() { *shared_ctx.borrow_mut() = Some(WasmInstanceContext::from_instance( &instance, ctx.borrow_mut().take().unwrap(), valid_module, host_metrics, timeout, timeout_stopwatch, experimental_features, )?); } match api_version { version if version <= Version::new(0, 0, 4) => {} _ => { instance .get_func("_start") .context("`_start` function not found")? .typed::<(), ()>()? .call(())?; } } Ok(WasmInstance { instance, instance_ctx: shared_ctx, gas, }) } } impl<C: Blockchain> AscHeap for WasmInstanceContext<C> { fn raw_new(&mut self, bytes: &[u8], gas: &GasCounter) -> Result<u32, DeterministicHostError> { // The cost of writing to wasm memory from the host is the same as of writing from wasm // using load instructions. gas.consume_host_fn(Gas::new(GAS_COST_STORE as u64 * bytes.len() as u64))?; // We request large chunks from the AssemblyScript allocator to use as arenas that we // manage directly. static MIN_ARENA_SIZE: i32 = 10_000; let size = i32::try_from(bytes.len()).unwrap(); if size > self.arena_free_size { // Allocate a new arena. Any free space left in the previous arena is left unused. This // causes at most half of memory to be wasted, which is acceptable. let arena_size = size.max(MIN_ARENA_SIZE); // Unwrap: This may panic if more memory needs to be requested from the OS and that // fails. This error is not deterministic since it depends on the operating conditions // of the node. self.arena_start_ptr = self.memory_allocate.call(arena_size).unwrap(); self.arena_free_size = arena_size; match &self.ctx.host_exports.api_version { version if *version <= Version::new(0, 0, 4) => {} _ => { // This arithmetic is done because when you call AssemblyScripts's `__alloc` // function, it isn't typed and it just returns `mmInfo` on it's header, // differently from allocating on regular types (`__new` for example). // `mmInfo` has size of 4, and everything allocated on AssemblyScript memory // should have alignment of 16, this means we need to do a 12 offset on these // big chunks of untyped allocation. self.arena_start_ptr += 12; self.arena_free_size -= 12; } }; }; let ptr = self.arena_start_ptr as usize; // Unwrap: We have just allocated enough space for `bytes`. self.memory.write(ptr, bytes).unwrap(); self.arena_start_ptr += size; self.arena_free_size -= size; Ok(ptr as u32) } fn get( &self, offset: u32, size: u32, gas: &GasCounter, ) -> Result<Vec<u8>, DeterministicHostError> { // The cost of reading wasm memory from the host is the same as of reading from wasm using // load instructions. gas.consume_host_fn(Gas::new(GAS_COST_LOAD as u64 * size as u64))?; let offset = offset as usize; let size = size as usize; let mut data = vec![0; size]; self.memory.read(offset, &mut data).map_err(|_| { DeterministicHostError::from(anyhow!( "Heap access out of bounds. Offset: {} Size: {}", offset, size )) })?; Ok(data) } fn api_version(&self) -> Version { self.ctx.host_exports.api_version.clone() } fn asc_type_id( &mut self, type_id_index: IndexForAscTypeId, ) -> Result<u32, DeterministicHostError> { let type_id = self .id_of_type .as_ref() .unwrap() // Unwrap ok because it's only called on correct apiVersion, look for AscPtr::generate_header .call(type_id_index as u32) .with_context(|| format!("Failed to call 'asc_type_id' with '{:?}'", type_id_index)) .map_err(DeterministicHostError::from)?; Ok(type_id) } } impl<C: Blockchain> WasmInstanceContext<C> { pub fn from_instance( instance: &wasmtime::Instance, ctx: MappingContext<C>, valid_module: Arc<ValidModule>, host_metrics: Arc<HostMetrics>, timeout: Option<Duration>, timeout_stopwatch: Arc<std::sync::Mutex<TimeoutStopwatch>>, experimental_features: ExperimentalFeatures, ) -> Result<Self, anyhow::Error> { // Provide access to the WASM runtime linear memory let memory = instance .get_memory("memory") .context("Failed to find memory export in the WASM module")?; let memory_allocate = match &ctx.host_exports.api_version { version if *version <= Version::new(0, 0, 4) => instance .get_func("memory.allocate") .context("`memory.allocate` function not found"), _ => instance .get_func("allocate") .context("`allocate` function not found"), }? .typed()? .clone(); let id_of_type = match &ctx.host_exports.api_version { version if *version <= Version::new(0, 0, 4) => None, _ => Some( instance .get_func("id_of_type") .context("`id_of_type` function not found")? .typed()? .clone(), ), }; Ok(WasmInstanceContext { memory_allocate, id_of_type, memory, ctx, valid_module, host_metrics, timeout, timeout_stopwatch, arena_free_size: 0, arena_start_ptr: 0, possible_reorg: false, deterministic_host_trap: false, experimental_features, }) } pub fn from_caller( caller: wasmtime::Caller, ctx: MappingContext<C>, valid_module: Arc<ValidModule>, host_metrics: Arc<HostMetrics>, timeout: Option<Duration>, timeout_stopwatch: Arc<std::sync::Mutex<TimeoutStopwatch>>, experimental_features: ExperimentalFeatures, ) -> Result<Self, anyhow::Error> { let memory = caller .get_export("memory") .and_then(|e| e.into_memory()) .context("Failed to find memory export in the WASM module")?; let memory_allocate = match &ctx.host_exports.api_version { version if *version <= Version::new(0, 0, 4) => caller .get_export("memory.allocate") .and_then(|e| e.into_func()) .context("`memory.allocate` function not found"), _ => caller .get_export("allocate") .and_then(|e| e.into_func()) .context("`allocate` function not found"), }? .typed()? .clone(); let id_of_type = match &ctx.host_exports.api_version { version if *version <= Version::new(0, 0, 4) => None, _ => Some( caller .get_export("id_of_type") .and_then(|e| e.into_func()) .context("`id_of_type` function not found")? .typed()? .clone(), ), }; Ok(WasmInstanceContext { id_of_type, memory_allocate, memory, ctx, valid_module, host_metrics, timeout, timeout_stopwatch, arena_free_size: 0, arena_start_ptr: 0, possible_reorg: false, deterministic_host_trap: false, experimental_features, }) } } // Implementation of externals. impl<C: Blockchain> WasmInstanceContext<C> { /// function abort(message?: string | null, fileName?: string | null, lineNumber?: u32, columnNumber?: u32): void /// Always returns a trap. pub fn abort( &mut self, gas: &GasCounter, message_ptr: AscPtr<AscString>, file_name_ptr: AscPtr<AscString>, line_number: u32, column_number: u32, ) -> Result<Never, DeterministicHostError> { let message = match message_ptr.is_null() { false => Some(asc_get(self, message_ptr, gas)?), true => None, }; let file_name = match file_name_ptr.is_null() { false => Some(asc_get(self, file_name_ptr, gas)?), true => None, }; let line_number = match line_number { 0 => None, _ => Some(line_number), }; let column_number = match column_number { 0 => None, _ => Some(column_number), }; self.ctx .host_exports .abort(message, file_name, line_number, column_number, gas) } /// function store.set(entity: string, id: string, data: Entity): void pub fn store_set( &mut self, gas: &GasCounter, entity_ptr: AscPtr<AscString>, id_ptr: AscPtr<AscString>, data_ptr: AscPtr<AscEntity>, ) -> Result<(), HostExportError> { let stopwatch = &self.host_metrics.stopwatch; stopwatch.start_section("host_export_store_set__wasm_instance_context_store_set"); let entity = asc_get(self, entity_ptr, gas)?; let id = asc_get(self, id_ptr, gas)?; let data = try_asc_get(self, data_ptr, gas)?; self.ctx.host_exports.store_set( &self.ctx.logger, &mut self.ctx.state, &self.ctx.proof_of_indexing, entity, id, data, stopwatch, gas, )?; Ok(()) } /// function store.remove(entity: string, id: string): void pub fn store_remove( &mut self, gas: &GasCounter, entity_ptr: AscPtr<AscString>, id_ptr: AscPtr<AscString>, ) -> Result<(), HostExportError> { let entity = asc_get(self, entity_ptr, gas)?; let id = asc_get(self, id_ptr, gas)?; self.ctx.host_exports.store_remove( &self.ctx.logger, &mut self.ctx.state, &self.ctx.proof_of_indexing, entity, id, gas, ) } /// function store.get(entity: string, id: string): Entity | null pub fn store_get( &mut self, gas: &GasCounter, entity_ptr: AscPtr<AscString>, id_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscEntity>, HostExportError> { let _timer = self .host_metrics .cheap_clone() .time_host_fn_execution_region("store_get"); let entity_type: String = asc_get(self, entity_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; let entity_option = self.ctx.host_exports.store_get( &mut self.ctx.state, entity_type.clone(), id.clone(), gas, )?; let ret = match entity_option { Some(entity) => { let _section = self .host_metrics .stopwatch .start_section("store_get_asc_new"); asc_new(self, &entity.sorted(), gas)? } None => match &self.ctx.debug_fork { Some(fork) => { let entity_option = fork.fetch(entity_type, id).map_err(|e| { HostExportError::Unknown(anyhow!( "store_get: failed to fetch entity from the debug fork: {}", e )) })?; match entity_option { Some(entity) => { let _section = self .host_metrics .stopwatch .start_section("store_get_asc_new"); let entity = asc_new(self, &entity.sorted(), gas)?; self.store_set(gas, entity_ptr, id_ptr, entity)?; entity } None => AscPtr::null(), } } None => AscPtr::null(), }, }; Ok(ret) } /// function typeConversion.bytesToString(bytes: Bytes): string pub fn bytes_to_string( &mut self, gas: &GasCounter, bytes_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let string = self.ctx.host_exports.bytes_to_string( &self.ctx.logger, asc_get(self, bytes_ptr, gas)?, gas, )?; asc_new(self, &string, gas) } /// Converts bytes to a hex string. /// function typeConversion.bytesToHex(bytes: Bytes): string /// References: /// https://godoc.org/github.com/ethereum/go-ethereum/common/hexutil#hdr-Encoding_Rules /// https://github.com/ethereum/web3.js/blob/f98fe1462625a6c865125fecc9cb6b414f0a5e83/packages/web3-utils/src/utils.js#L283 pub fn bytes_to_hex( &mut self, gas: &GasCounter, bytes_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let bytes: Vec<u8> = asc_get(self, bytes_ptr, gas)?; gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &bytes))?; // Even an empty string must be prefixed with `0x`. // Encodes each byte as a two hex digits. let hex = format!("0x{}", hex::encode(bytes)); asc_new(self, &hex, gas) } /// function typeConversion.bigIntToString(n: Uint8Array): string pub fn big_int_to_string( &mut self, gas: &GasCounter, big_int_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let n: BigInt = asc_get(self, big_int_ptr, gas)?; gas.consume_host_fn(gas::DEFAULT_GAS_OP.with_args(gas::complexity::Size, &n))?; asc_new(self, &n.to_string(), gas) } /// function bigInt.fromString(x: string): BigInt pub fn big_int_from_string( &mut self, gas: &GasCounter, string_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self .ctx .host_exports .big_int_from_string(asc_get(self, string_ptr, gas)?, gas)?; asc_new(self, &result, gas) } /// function typeConversion.bigIntToHex(n: Uint8Array): string pub fn big_int_to_hex( &mut self, gas: &GasCounter, big_int_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let n: BigInt = asc_get(self, big_int_ptr, gas)?; let hex = self.ctx.host_exports.big_int_to_hex(n, gas)?; asc_new(self, &hex, gas) } /// function typeConversion.stringToH160(s: String): H160 pub fn string_to_h160( &mut self, gas: &GasCounter, str_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscH160>, DeterministicHostError> { let s: String = asc_get(self, str_ptr, gas)?; let h160 = self.ctx.host_exports.string_to_h160(&s, gas)?; asc_new(self, &h160, gas) } /// function json.fromBytes(bytes: Bytes): JSONValue pub fn json_from_bytes( &mut self, gas: &GasCounter, bytes_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscEnum<JsonValueKind>>, DeterministicHostError> { let bytes: Vec<u8> = asc_get(self, bytes_ptr, gas)?; let result = self .ctx .host_exports .json_from_bytes(&bytes, gas) .with_context(|| { format!( "Failed to parse JSON from byte array. Bytes (truncated to 1024 chars): `{:?}`", &bytes[..bytes.len().min(1024)], ) }) .map_err(DeterministicHostError::from)?; asc_new(self, &result, gas) } /// function json.try_fromBytes(bytes: Bytes): Result<JSONValue, boolean> pub fn json_try_from_bytes( &mut self, gas: &GasCounter, bytes_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscResult<AscPtr<AscEnum<JsonValueKind>>, bool>>, DeterministicHostError> { let bytes: Vec<u8> = asc_get(self, bytes_ptr, gas)?; let result = self .ctx .host_exports .json_from_bytes(&bytes, gas) .map_err(|e| { warn!( &self.ctx.logger, "Failed to parse JSON from byte array"; "bytes" => format!("{:?}", bytes), "error" => format!("{}", e) ); // Map JSON errors to boolean to match the `Result<JSONValue, boolean>` // result type expected by mappings true }); asc_new(self, &result, gas) } /// function ipfs.cat(link: String): Bytes pub fn ipfs_cat( &mut self, gas: &GasCounter, link_ptr: AscPtr<AscString>, ) -> Result<AscPtr<Uint8Array>, HostExportError> { // Note on gas: There is no gas costing for the ipfs call itself, // since it's not enabled on the network. if !self.experimental_features.allow_non_deterministic_ipfs { return Err(HostExportError::Deterministic(anyhow!( "`ipfs.cat` is deprecated. Improved support for IPFS will be added in the future" ))); } let link = asc_get(self, link_ptr, gas)?; let ipfs_res = self.ctx.host_exports.ipfs_cat(&self.ctx.logger, link); match ipfs_res { Ok(bytes) => asc_new(self, &*bytes, gas).map_err(Into::into), // Return null in case of error. Err(e) => { info!(&self.ctx.logger, "Failed ipfs.cat, returning `null`"; "link" => asc_get::<String, _, _>(self, link_ptr, gas)?, "error" => e.to_string()); Ok(AscPtr::null()) } } } /// function ipfs.getBlock(link: String): Bytes pub fn ipfs_get_block( &mut self, gas: &GasCounter, link_ptr: AscPtr<AscString>, ) -> Result<AscPtr<Uint8Array>, HostExportError> { // Note on gas: There is no gas costing for the ipfs call itself, // since it's not enabled on the network. if !self.experimental_features.allow_non_deterministic_ipfs { return Err(HostExportError::Deterministic(anyhow!( "`ipfs.getBlock` is deprecated. Improved support for IPFS will be added in the future" ))); } let link = asc_get(self, link_ptr, gas)?; let ipfs_res = self.ctx.host_exports.ipfs_get_block(&self.ctx.logger, link); match ipfs_res { Ok(bytes) => asc_new(self, &*bytes, gas).map_err(Into::into), // Return null in case of error. Err(e) => { info!(&self.ctx.logger, "Failed ipfs.getBlock, returning `null`"; "link" => asc_get::<String, _, _>(self, link_ptr, gas)?, "error" => e.to_string()); Ok(AscPtr::null()) } } } /// function ipfs.map(link: String, callback: String, flags: String[]): void pub fn ipfs_map( &mut self, gas: &GasCounter, link_ptr: AscPtr<AscString>, callback: AscPtr<AscString>, user_data: AscPtr<AscEnum<StoreValueKind>>, flags: AscPtr<Array<AscPtr<AscString>>>, ) -> Result<(), HostExportError> { // Note on gas: // Ideally we would consume gas the same as ipfs_cat and then share // gas across the spawned modules for callbacks. if !self.experimental_features.allow_non_deterministic_ipfs { return Err(HostExportError::Deterministic(anyhow!( "`ipfs.map` is deprecated. Improved support for IPFS will be added in the future" ))); } let link: String = asc_get(self, link_ptr, gas)?; let callback: String = asc_get(self, callback, gas)?; let user_data: store::Value = try_asc_get(self, user_data, gas)?; let flags = asc_get(self, flags, gas)?; // Pause the timeout while running ipfs_map, ensure it will be restarted by using a guard. self.timeout_stopwatch.lock().unwrap().stop(); let defer_stopwatch = self.timeout_stopwatch.clone(); let _stopwatch_guard = defer::defer(|| defer_stopwatch.lock().unwrap().start()); let start_time = Instant::now(); let output_states = HostExports::ipfs_map( &self.ctx.host_exports.link_resolver.clone(), self, link.clone(), &*callback, user_data, flags, )?; debug!( &self.ctx.logger, "Successfully processed file with ipfs.map"; "link" => &link, "callback" => &*callback, "n_calls" => output_states.len(), "time" => format!("{}ms", start_time.elapsed().as_millis()) ); for output_state in output_states { self.ctx.state.extend(output_state); } Ok(()) } /// Expects a decimal string. /// function json.toI64(json: String): i64 pub fn json_to_i64( &mut self, gas: &GasCounter, json_ptr: AscPtr<AscString>, ) -> Result<i64, DeterministicHostError> { self.ctx .host_exports .json_to_i64(asc_get(self, json_ptr, gas)?, gas) } /// Expects a decimal string. /// function json.toU64(json: String): u64 pub fn json_to_u64( &mut self, gas: &GasCounter, json_ptr: AscPtr<AscString>, ) -> Result<u64, DeterministicHostError> { self.ctx .host_exports .json_to_u64(asc_get(self, json_ptr, gas)?, gas) } /// Expects a decimal string. /// function json.toF64(json: String): f64 pub fn json_to_f64( &mut self, gas: &GasCounter, json_ptr: AscPtr<AscString>, ) -> Result<f64, DeterministicHostError> { self.ctx .host_exports .json_to_f64(asc_get(self, json_ptr, gas)?, gas) } /// Expects a decimal string. /// function json.toBigInt(json: String): BigInt pub fn json_to_big_int( &mut self, gas: &GasCounter, json_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let big_int = self .ctx .host_exports .json_to_big_int(asc_get(self, json_ptr, gas)?, gas)?; asc_new(self, &*big_int, gas) } /// function crypto.keccak256(input: Bytes): Bytes pub fn crypto_keccak_256( &mut self, gas: &GasCounter, input_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<Uint8Array>, DeterministicHostError> { let input = self .ctx .host_exports .crypto_keccak_256(asc_get(self, input_ptr, gas)?, gas)?; asc_new(self, input.as_ref(), gas) } /// function bigInt.plus(x: BigInt, y: BigInt): BigInt pub fn big_int_plus( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_plus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.minus(x: BigInt, y: BigInt): BigInt pub fn big_int_minus( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_minus( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.times(x: BigInt, y: BigInt): BigInt pub fn big_int_times( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_times( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.dividedBy(x: BigInt, y: BigInt): BigInt pub fn big_int_divided_by( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_divided_by( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.dividedByDecimal(x: BigInt, y: BigDecimal): BigDecimal pub fn big_int_divided_by_decimal( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let x = BigDecimal::new(asc_get(self, x_ptr, gas)?, 0); let result = self.ctx .host_exports .big_decimal_divided_by(x, try_asc_get(self, y_ptr, gas)?, gas)?; asc_new(self, &result, gas) } /// function bigInt.mod(x: BigInt, y: BigInt): BigInt pub fn big_int_mod( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_mod( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.pow(x: BigInt, exp: u8): BigInt pub fn big_int_pow( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, exp: u32, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let exp = u8::try_from(exp).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self .ctx .host_exports .big_int_pow(asc_get(self, x_ptr, gas)?, exp, gas)?; asc_new(self, &result, gas) } /// function bigInt.bitOr(x: BigInt, y: BigInt): BigInt pub fn big_int_bit_or( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_bit_or( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.bitAnd(x: BigInt, y: BigInt): BigInt pub fn big_int_bit_and( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, y_ptr: AscPtr<AscBigInt>, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let result = self.ctx.host_exports.big_int_bit_and( asc_get(self, x_ptr, gas)?, asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigInt.leftShift(x: BigInt, bits: u8): BigInt pub fn big_int_left_shift( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, bits: u32, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let bits = u8::try_from(bits).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self.ctx .host_exports .big_int_left_shift(asc_get(self, x_ptr, gas)?, bits, gas)?; asc_new(self, &result, gas) } /// function bigInt.rightShift(x: BigInt, bits: u8): BigInt pub fn big_int_right_shift( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigInt>, bits: u32, ) -> Result<AscPtr<AscBigInt>, DeterministicHostError> { let bits = u8::try_from(bits).map_err(|e| DeterministicHostError::from(Error::from(e)))?; let result = self.ctx .host_exports .big_int_right_shift(asc_get(self, x_ptr, gas)?, bits, gas)?; asc_new(self, &result, gas) } /// function typeConversion.bytesToBase58(bytes: Bytes): string pub fn bytes_to_base58( &mut self, gas: &GasCounter, bytes_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let result = self .ctx .host_exports .bytes_to_base58(asc_get(self, bytes_ptr, gas)?, gas)?; asc_new(self, &result, gas) } /// function bigDecimal.toString(x: BigDecimal): string pub fn big_decimal_to_string( &mut self, gas: &GasCounter, big_decimal_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscString>, DeterministicHostError> { let result = self .ctx .host_exports .big_decimal_to_string(try_asc_get(self, big_decimal_ptr, gas)?, gas)?; asc_new(self, &result, gas) } /// function bigDecimal.fromString(x: string): BigDecimal pub fn big_decimal_from_string( &mut self, gas: &GasCounter, string_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let result = self .ctx .host_exports .big_decimal_from_string(asc_get(self, string_ptr, gas)?, gas)?; asc_new(self, &result, gas) } /// function bigDecimal.plus(x: BigDecimal, y: BigDecimal): BigDecimal pub fn big_decimal_plus( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigDecimal>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let result = self.ctx.host_exports.big_decimal_plus( try_asc_get(self, x_ptr, gas)?, try_asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigDecimal.minus(x: BigDecimal, y: BigDecimal): BigDecimal pub fn big_decimal_minus( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigDecimal>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let result = self.ctx.host_exports.big_decimal_minus( try_asc_get(self, x_ptr, gas)?, try_asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigDecimal.times(x: BigDecimal, y: BigDecimal): BigDecimal pub fn big_decimal_times( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigDecimal>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let result = self.ctx.host_exports.big_decimal_times( try_asc_get(self, x_ptr, gas)?, try_asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigDecimal.dividedBy(x: BigDecimal, y: BigDecimal): BigDecimal pub fn big_decimal_divided_by( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigDecimal>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<AscPtr<AscBigDecimal>, DeterministicHostError> { let result = self.ctx.host_exports.big_decimal_divided_by( try_asc_get(self, x_ptr, gas)?, try_asc_get(self, y_ptr, gas)?, gas, )?; asc_new(self, &result, gas) } /// function bigDecimal.equals(x: BigDecimal, y: BigDecimal): bool pub fn big_decimal_equals( &mut self, gas: &GasCounter, x_ptr: AscPtr<AscBigDecimal>, y_ptr: AscPtr<AscBigDecimal>, ) -> Result<bool, DeterministicHostError> { self.ctx.host_exports.big_decimal_equals( try_asc_get(self, x_ptr, gas)?, try_asc_get(self, y_ptr, gas)?, gas, ) } /// function dataSource.create(name: string, params: Array<string>): void pub fn data_source_create( &mut self, gas: &GasCounter, name_ptr: AscPtr<AscString>, params_ptr: AscPtr<Array<AscPtr<AscString>>>, ) -> Result<(), HostExportError> { let name: String = asc_get(self, name_ptr, gas)?; let params: Vec<String> = asc_get(self, params_ptr, gas)?; self.ctx.host_exports.data_source_create( &self.ctx.logger, &mut self.ctx.state, name, params, None, self.ctx.block_ptr.number, gas, ) } /// function createWithContext(name: string, params: Array<string>, context: DataSourceContext): void pub fn data_source_create_with_context( &mut self, gas: &GasCounter, name_ptr: AscPtr<AscString>, params_ptr: AscPtr<Array<AscPtr<AscString>>>, context_ptr: AscPtr<AscEntity>, ) -> Result<(), HostExportError> { let name: String = asc_get(self, name_ptr, gas)?; let params: Vec<String> = asc_get(self, params_ptr, gas)?; let context: HashMap<_, _> = try_asc_get(self, context_ptr, gas)?; self.ctx.host_exports.data_source_create( &self.ctx.logger, &mut self.ctx.state, name, params, Some(context.into()), self.ctx.block_ptr.number, gas, ) } /// function dataSource.address(): Bytes pub fn data_source_address( &mut self, gas: &GasCounter, ) -> Result<AscPtr<Uint8Array>, DeterministicHostError> { asc_new( self, self.ctx.host_exports.data_source_address(gas)?.as_slice(), gas, ) } /// function dataSource.network(): String pub fn data_source_network( &mut self, gas: &GasCounter, ) -> Result<AscPtr<AscString>, DeterministicHostError> { asc_new(self, &self.ctx.host_exports.data_source_network(gas)?, gas) } /// function dataSource.context(): DataSourceContext pub fn data_source_context( &mut self, gas: &GasCounter, ) -> Result<AscPtr<AscEntity>, DeterministicHostError> { asc_new( self, &self.ctx.host_exports.data_source_context(gas)?.sorted(), gas, ) } pub fn ens_name_by_hash( &mut self, gas: &GasCounter, hash_ptr: AscPtr<AscString>, ) -> Result<AscPtr<AscString>, HostExportError> { // Not enabled on the network, no gas consumed. drop(gas); // This is unrelated to IPFS, but piggyback on the config to disallow it on the network. if !self.experimental_features.allow_non_deterministic_ipfs { return Err(HostExportError::Deterministic(anyhow!( "`ens_name_by_hash` is deprecated" ))); } let hash: String = asc_get(self, hash_ptr, gas)?; let name = self.ctx.host_exports.ens_name_by_hash(&*hash)?; // map `None` to `null`, and `Some(s)` to a runtime string name.map(|name| asc_new(self, &*name, gas).map_err(Into::into)) .unwrap_or(Ok(AscPtr::null())) } pub fn log_log( &mut self, gas: &GasCounter, level: u32, msg: AscPtr<AscString>, ) -> Result<(), DeterministicHostError> { let level = LogLevel::from(level).into(); let msg: String = asc_get(self, msg, gas)?; self.ctx .host_exports .log_log(&self.ctx.logger, level, msg, gas) } /// function encode(token: ethereum.Value): Bytes | null pub fn ethereum_encode( &mut self, gas: &GasCounter, token_ptr: AscPtr<AscEnum<EthereumValueKind>>, ) -> Result<AscPtr<Uint8Array>, DeterministicHostError> { let data = self .ctx .host_exports .ethereum_encode(asc_get(self, token_ptr, gas)?, gas); // return `null` if it fails data.map(|bytes| asc_new(self, &*bytes, gas)) .unwrap_or(Ok(AscPtr::null())) } /// function decode(types: String, data: Bytes): ethereum.Value | null pub fn ethereum_decode( &mut self, gas: &GasCounter, types_ptr: AscPtr<AscString>, data_ptr: AscPtr<Uint8Array>, ) -> Result<AscPtr<AscEnum<EthereumValueKind>>, DeterministicHostError> { let result = self.ctx.host_exports.ethereum_decode( asc_get(self, types_ptr, gas)?, asc_get(self, data_ptr, gas)?, gas, ); // return `null` if it fails result .map(|param| asc_new(self, &param, gas)) .unwrap_or(Ok(AscPtr::null())) } /// function arweave.transactionData(txId: string): Bytes | null pub fn arweave_transaction_data( &mut self, _gas: &GasCounter, _tx_id: AscPtr<AscString>, ) -> Result<AscPtr<Uint8Array>, HostExportError> { Err(HostExportError::Deterministic(anyhow!( "`arweave.transactionData` has been removed." ))) } /// function box.profile(address: string): JSONValue | null pub fn box_profile( &mut self, _gas: &GasCounter, _address: AscPtr<AscString>, ) -> Result<AscPtr<AscJson>, HostExportError> { Err(HostExportError::Deterministic(anyhow!( "`box.profile` has been removed." ))) } }
36.667647
127
0.556349
fc83b13aa10da3008207f13ea78f454ae1c686c4
5,163
use awint::prelude::*; use rand_xoshiro::{rand_core::SeedableRng, Xoshiro128StarStar}; /// [Bits::lut] needs its own test because of its special requirements #[test] fn lut_and_field() { let mut rng = Xoshiro128StarStar::seed_from_u64(0); #[cfg(not(miri))] let (out_bw_max, pow_max) = (258, 8); #[cfg(miri)] let (out_bw_max, pow_max) = (68, 3); let mut awi_tmp0 = ExtAwi::zero(bw(out_bw_max * (1 << pow_max))); let mut awi_tmp1 = ExtAwi::zero(bw(out_bw_max * (1 << pow_max))); let tmp0 = awi_tmp0.const_as_mut(); let tmp1 = awi_tmp1.const_as_mut(); for out_bw in 1..out_bw_max { let mut awi_out = ExtAwi::zero(bw(out_bw)); for pow in 1..pow_max { let mul = 1 << pow; let mut awi_lut = ExtAwi::zero(bw(out_bw * mul)); awi_lut[..].rand_assign_using(&mut rng).unwrap(); let mut awi_inx = ExtAwi::zero(bw(pow)); let out = awi_out.const_as_mut(); let lut = awi_lut.const_as_ref(); let inx = awi_inx.const_as_mut(); for i in 0..mul { inx.usize_assign(i); out.lut(lut, inx).unwrap(); tmp0.zero_resize_assign(out); tmp1.field(0, lut, i * out.bw(), out.bw()).unwrap(); assert_eq!(tmp0, tmp1); } } } } /// Test [Bits::lut] and [Bits::lut_set] #[test] fn lut_and_lut_set() { let mut rng = Xoshiro128StarStar::seed_from_u64(0); #[cfg(not(miri))] let (entry_bw_max, pow_max) = (258, 8); #[cfg(miri)] let (entry_bw_max, pow_max) = (68, 3); for entry_bw in 1..entry_bw_max { let mut awi_entry = ExtAwi::zero(bw(entry_bw)); let entry = awi_entry.const_as_mut(); let mut awi_entry_copy = ExtAwi::zero(entry.nzbw()); let entry_copy = awi_entry_copy.const_as_mut(); let mut awi_entry_old = ExtAwi::zero(entry.nzbw()); let entry_old = awi_entry_old.const_as_mut(); for pow in 1..pow_max { let mul = 1 << pow; let mut awi_lut = ExtAwi::zero(bw(entry_bw * mul)); let lut = awi_lut.const_as_mut(); lut.rand_assign_using(&mut rng).unwrap(); let mut awi_lut_copy = ExtAwi::zero(lut.nzbw()); let lut_copy = awi_lut_copy.const_as_mut(); lut_copy.copy_assign(lut).unwrap(); let mut awi_inx = ExtAwi::zero(bw(pow)); let inx = awi_inx.const_as_mut(); for _ in 0..mul { inx.rand_assign_using(&mut rng).unwrap(); entry.rand_assign_using(&mut rng).unwrap(); entry_copy.copy_assign(entry).unwrap(); // before `lut_set`, copy the old entry entry_old.lut(lut, inx).unwrap(); // set new value lut.lut_set(entry, inx).unwrap(); // get the value that was set entry.lut(lut, inx).unwrap(); assert_eq!(entry, entry_copy); // restore to original state and make sure nothing else was overwritten lut.lut_set(entry_old, inx).unwrap(); assert_eq!(lut, lut_copy); } } } } #[test] fn funnel() { let mut rng = Xoshiro128StarStar::seed_from_u64(0); #[cfg(miri)] let max_pow = 7; #[cfg(not(miri))] let max_pow = 10; for pow in 1..max_pow { let mut awi_shift = ExtAwi::zero(bw(pow)); let mut awi_lhs = ExtAwi::zero(bw(1 << pow)); let mut awi_rhs = ExtAwi::zero(bw(2 * awi_lhs.bw())); let mut awi_alt0 = ExtAwi::zero(bw(awi_rhs.bw())); let mut awi_alt1 = ExtAwi::zero(bw(awi_lhs.bw())); awi_rhs[..].rand_assign_using(&mut rng).unwrap(); let shift = awi_shift.const_as_mut(); let lhs = awi_lhs.const_as_mut(); let rhs = awi_rhs.const_as_mut(); let alt0 = awi_alt0.const_as_mut(); let alt1 = awi_alt1.const_as_mut(); for s in 0..lhs.bw() { alt0.copy_assign(rhs).unwrap(); alt0.lshr_assign(s).unwrap(); alt1.zero_resize_assign(alt0); shift.usize_assign(s); lhs.funnel(rhs, shift).unwrap(); assert_eq!(lhs, alt1); } } } #[test] fn awint_internals_test() { let mut rng = &mut Xoshiro128StarStar::seed_from_u64(0); let mut lhs = inlawi_zero!(128); let mut rhs = inlawi_zero!(128); let mut add = inlawi_zero!(128); lhs[..].rand_assign_using(&mut rng).unwrap(); rhs[..].rand_assign_using(&mut rng).unwrap(); add[..].rand_assign_using(&mut rng).unwrap(); let (lo, hi) = awint_internals::widening_mul_add_u128( lhs[..].to_u128(), rhs[..].to_u128(), add[..].to_u128(), ); let mut tmp0 = extawi!(0u128); let mut tmp1 = extawi!(0u128); tmp0[..].u128_assign(lo); tmp1[..].u128_assign(hi); let lhs = inlawi_zero!(..,lhs;..256).unwrap(); let rhs = inlawi_zero!(..,rhs;..256).unwrap(); let mut add = inlawi_zero!(..,add;..256).unwrap(); add[..].mul_add_assign(&lhs[..], &rhs[..]).unwrap(); assert_eq!(&extawi!(tmp1, tmp0)[..], &add[..]); }
38.244444
87
0.561883
fbdff89781f9e9e2323d9d10b35cf2e3381c5435
2,016
// WASI: // mapdir: .:test_fs/hamlet // this program is used in the pause/resume test use std::fs; #[cfg(target_os = "wasi")] use std::os::wasi::prelude::AsRawFd; use std::path::PathBuf; #[cfg(target_os = "wasi")] #[repr(C)] struct WasiIovec { pub buf: u32, pub buf_len: u32, } #[cfg(target_os = "wasi")] #[link(wasm_import_module = "wasi_unstable")] extern "C" { fn fd_read(fd: u32, iovs: u32, iovs_len: u32, nread: u32) -> u16; } #[cfg(target_os = "wasi")] fn read(fd: u32, iovs: &[&mut [u8]]) -> u32 { let mut nread = 0; let mut processed_iovs = vec![]; for iov in iovs { processed_iovs.push(WasiIovec { buf: iov.as_ptr() as usize as u32, buf_len: iov.len() as u32, }) } unsafe { fd_read( fd, processed_iovs.as_ptr() as usize as u32, processed_iovs.len() as u32, &mut nread as *mut u32 as usize as u32, ); } nread } fn main() { #[cfg(not(target_os = "wasi"))] let mut base = PathBuf::from("test_fs/hamlet"); #[cfg(target_os = "wasi")] let mut base = PathBuf::from("."); base.push("act3/scene4.txt"); let mut file = fs::File::open(&base).expect("Could not open file"); let mut buffer = [0u8; 64]; #[cfg(target_os = "wasi")] { let raw_fd = file.as_raw_fd(); assert_eq!(read(raw_fd, &[&mut buffer]), 64); let str_val = std::str::from_utf8(&buffer[..]).unwrap().to_string(); println!("{}", &str_val); } // leak the file handle so that we can use it later std::mem::forget(file); #[cfg(not(target_os = "wasi"))] { // eh, just print the output directly println!( "SCENE IV. The Queen's closet. Enter QUEEN GERTRUDE and POLO" ); } } #[cfg(target_os = "wasi")] #[no_mangle] fn second_entry() -> bool { let raw_fd = 5; let mut buffer = [0u8; 8]; let result = read(raw_fd, &[&mut buffer]); &buffer == b"NIUS \n\nL" }
23.172414
76
0.554067
bf631dc797a9637659fda1daac5dcfce4708b94b
10,165
use { clap::{crate_description, crate_name, App, Arg, ArgMatches}, safecoin_faucet::faucet::FAUCET_PORT, safecoin_sdk::{ fee_calculator::FeeRateGovernor, pubkey::Pubkey, signature::{read_keypair_file, Keypair}, }, std::{net::SocketAddr, process::exit, time::Duration}, }; const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = safecoin_sdk::native_token::LAMPORTS_PER_SAFE; /// Holds the configuration for a single run of the benchmark pub struct Config { pub entrypoint_addr: SocketAddr, pub faucet_addr: SocketAddr, pub id: Keypair, pub threads: usize, pub num_nodes: usize, pub duration: Duration, pub tx_count: usize, pub keypair_multiplier: usize, pub thread_batch_sleep_ms: usize, pub sustained: bool, pub client_ids_and_stake_file: String, pub write_to_client_file: bool, pub read_from_client_file: bool, pub target_lamports_per_signature: u64, pub multi_client: bool, pub num_lamports_per_account: u64, pub target_slots_per_epoch: u64, pub target_node: Option<Pubkey>, } impl Default for Config { fn default() -> Config { Config { entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 10015)), faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)), id: Keypair::new(), threads: 4, num_nodes: 1, duration: Duration::new(std::u64::MAX, 0), tx_count: 50_000, keypair_multiplier: 8, thread_batch_sleep_ms: 1000, sustained: false, client_ids_and_stake_file: String::new(), write_to_client_file: false, read_from_client_file: false, target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature, multi_client: true, num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT, target_slots_per_epoch: 0, target_node: None, } } } /// Defines and builds the CLI args for a run of the benchmark pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> { App::new(crate_name!()).about(crate_description!()) .version(version) .arg( Arg::with_name("entrypoint") .short("n") .long("entrypoint") .value_name("HOST:PORT") .takes_value(true) .help("Rendezvous with the cluster at this entry point; defaults to 127.0.0.1:10015"), ) .arg( Arg::with_name("faucet") .short("d") .long("faucet") .value_name("HOST:PORT") .takes_value(true) .help("Location of the faucet; defaults to entrypoint:FAUCET_PORT"), ) .arg( Arg::with_name("identity") .short("i") .long("identity") .value_name("PATH") .takes_value(true) .help("File containing a client identity (keypair)"), ) .arg( Arg::with_name("num-nodes") .short("N") .long("num-nodes") .value_name("NUM") .takes_value(true) .help("Wait for NUM nodes to converge"), ) .arg( Arg::with_name("threads") .short("t") .long("threads") .value_name("NUM") .takes_value(true) .help("Number of threads"), ) .arg( Arg::with_name("duration") .long("duration") .value_name("SECS") .takes_value(true) .help("Seconds to run benchmark, then exit; default is forever"), ) .arg( Arg::with_name("sustained") .long("sustained") .help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."), ) .arg( Arg::with_name("no-multi-client") .long("no-multi-client") .help("Disable multi-client support, only transact with the entrypoint."), ) .arg( Arg::with_name("target_node") .long("target-node") .requires("no-multi-client") .takes_value(true) .value_name("PUBKEY") .help("Specify an exact node to send transactions to."), ) .arg( Arg::with_name("tx_count") .long("tx_count") .value_name("NUM") .takes_value(true) .help("Number of transactions to send per batch") ) .arg( Arg::with_name("keypair_multiplier") .long("keypair-multiplier") .value_name("NUM") .takes_value(true) .help("Multiply by transaction count to determine number of keypairs to create") ) .arg( Arg::with_name("thread-batch-sleep-ms") .short("z") .long("thread-batch-sleep-ms") .value_name("NUM") .takes_value(true) .help("Per-thread-per-iteration sleep in ms"), ) .arg( Arg::with_name("write-client-keys") .long("write-client-keys") .value_name("FILENAME") .takes_value(true) .help("Generate client keys and stakes and write the list to YAML file"), ) .arg( Arg::with_name("read-client-keys") .long("read-client-keys") .value_name("FILENAME") .takes_value(true) .help("Read client keys and stakes from the YAML file"), ) .arg( Arg::with_name("target_lamports_per_signature") .long("target-lamports-per-signature") .value_name("LAMPORTS") .takes_value(true) .help( "The cost in lamports that the cluster will charge for signature \ verification when the cluster is operating at target-signatures-per-slot", ), ) .arg( Arg::with_name("num_lamports_per_account") .long("num-lamports-per-account") .value_name("LAMPORTS") .takes_value(true) .help( "Number of lamports per account.", ), ) .arg( Arg::with_name("target_slots_per_epoch") .long("target-slots-per-epoch") .value_name("SLOTS") .takes_value(true) .help( "Wait until epochs are this many slots long.", ), ) } /// Parses a clap `ArgMatches` structure into a `Config` /// # Arguments /// * `matches` - command line arguments parsed by clap /// # Panics /// Panics if there is trouble parsing any of the arguments pub fn extract_args(matches: &ArgMatches) -> Config { let mut args = Config::default(); if let Some(addr) = matches.value_of("entrypoint") { args.entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| { eprintln!("failed to parse entrypoint address: {}", e); exit(1) }); } if let Some(addr) = matches.value_of("faucet") { args.faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| { eprintln!("failed to parse faucet address: {}", e); exit(1) }); } if matches.is_present("identity") { args.id = read_keypair_file(matches.value_of("identity").unwrap()) .expect("can't read client identity"); } if let Some(t) = matches.value_of("threads") { args.threads = t.to_string().parse().expect("can't parse threads"); } if let Some(n) = matches.value_of("num-nodes") { args.num_nodes = n.to_string().parse().expect("can't parse num-nodes"); } if let Some(duration) = matches.value_of("duration") { args.duration = Duration::new( duration.to_string().parse().expect("can't parse duration"), 0, ); } if let Some(s) = matches.value_of("tx_count") { args.tx_count = s.to_string().parse().expect("can't parse tx_count"); } if let Some(s) = matches.value_of("keypair_multiplier") { args.keypair_multiplier = s .to_string() .parse() .expect("can't parse keypair-multiplier"); assert!(args.keypair_multiplier >= 2); } if let Some(t) = matches.value_of("thread-batch-sleep-ms") { args.thread_batch_sleep_ms = t .to_string() .parse() .expect("can't parse thread-batch-sleep-ms"); } args.sustained = matches.is_present("sustained"); if let Some(s) = matches.value_of("write-client-keys") { args.write_to_client_file = true; args.client_ids_and_stake_file = s.to_string(); } if let Some(s) = matches.value_of("read-client-keys") { assert!(!args.write_to_client_file); args.read_from_client_file = true; args.client_ids_and_stake_file = s.to_string(); } if let Some(v) = matches.value_of("target_lamports_per_signature") { args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports"); } args.multi_client = !matches.is_present("no-multi-client"); args.target_node = matches .value_of("target_node") .map(|target_str| target_str.parse().unwrap()); if let Some(v) = matches.value_of("num_lamports_per_account") { args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports"); } if let Some(t) = matches.value_of("target_slots_per_epoch") { args.target_slots_per_epoch = t .to_string() .parse() .expect("can't parse target slots per epoch"); } args }
34.811644
119
0.547664
ddae8fa824843d8c4111944d8841120e1f1979c6
1,057
//! Echo characters sent back to the serial port. //! //! Note: This example is for the STM32F745/STM32F746 #![deny(unsafe_code)] #![deny(warnings)] #![no_main] #![no_std] extern crate panic_halt; use nb::block; use cortex_m_rt::entry; use stm32f7xx_hal::{ pac, prelude::*, serial::{self, Serial}, }; #[entry] fn main() -> ! { let p = pac::Peripherals::take().unwrap(); let rcc = p.RCC.constrain(); let clocks = rcc.cfgr.sysclk(216_000_000.Hz()).freeze(); let gpioa = p.GPIOA.split(); let gpiob = p.GPIOB.split(); let tx = gpioa.pa9.into_alternate(); let rx = gpiob.pb7.into_alternate(); let serial = Serial::new( p.USART1, (tx, rx), clocks, serial::Config { baud_rate: 115_200.Bps(), oversampling: serial::Oversampling::By16, character_match: None, }, ); let (mut tx, mut rx) = serial.split(); loop { let received = block!(rx.read()).unwrap_or('E' as u8); block!(tx.write(received)).ok(); } }
20.326923
62
0.573321
d50209fbecbb27116747c64ee315db64e3cddf79
1,789
cfg_sync!( mod sync_impl; pub use sync_impl::{MmapFileReader, MmapFileReaderExt}; ); cfg_async! { macro_rules! declare_and_impl_basic_reader { () => { pin_project! { /// AsyncMmapFileReader helps read data from mmap file /// like a normal file. pub struct AsyncMmapFileReader<'a> { #[pin] r: Cursor<&'a [u8]>, offset: usize, len: usize, } } impl<'a> AsyncMmapFileReader<'a> { pub(crate) fn new(r: Cursor<&'a [u8]>, offset: usize, len: usize) -> Self { Self { r, offset, len } } /// Returns the start offset(related to the mmap) of the reader #[inline] pub fn offset(&self) -> usize { self.offset } /// Returns the length of the reader #[inline] pub fn len(&self) -> usize { self.len } } impl Debug for AsyncMmapFileReader<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("AsyncMmapFileReader") .field("offset", &self.offset) .field("len", &self.len) .field("reader", &self.r) .finish() } } }; } } cfg_async_std!( pub(crate) mod async_std_impl; ); cfg_smol!( pub(crate) mod smol_impl; ); cfg_tokio!( pub(crate) mod tokio_impl; );
26.701493
91
0.405254
c16d6fabbaf8850111f1cdb222ada7f0d70b8d1f
9,014
use raui_core::prelude::*; use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[cfg(not(feature = "scalar64"))] use std::f32::consts::PI; #[cfg(feature = "scalar64")] use std::f64::consts::PI; #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum ThemeColor { Default, Primary, Secondary, } impl Default for ThemeColor { fn default() -> Self { Self::Default } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum ThemeVariant { ContentOnly, Filled, Outline, } impl Default for ThemeVariant { fn default() -> Self { Self::Filled } } #[derive(PropsData, Debug, Default, Clone, Serialize, Deserialize)] #[props_data(raui_core::props::PropsData)] #[prefab(raui_core::Prefab)] pub struct ThemedWidgetProps { #[serde(default)] pub color: ThemeColor, #[serde(default)] pub variant: ThemeVariant, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemeColorSet { #[serde(default)] pub main: Color, #[serde(default)] pub light: Color, #[serde(default)] pub dark: Color, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemeColors { #[serde(default)] pub default: ThemeColorSet, #[serde(default)] pub primary: ThemeColorSet, #[serde(default)] pub secondary: ThemeColorSet, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemeColorsBundle { #[serde(default)] pub main: ThemeColors, #[serde(default)] pub contrast: ThemeColors, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ThemedImageMaterial { Color, Image(ImageBoxImage), Procedural(ImageBoxProcedural), } impl Default for ThemedImageMaterial { fn default() -> Self { Self::Color } } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemedTextMaterial { #[serde(default)] pub horizontal_align: TextBoxHorizontalAlign, #[serde(default)] pub vertical_align: TextBoxVerticalAlign, #[serde(default)] pub direction: TextBoxDirection, #[serde(default)] pub font: TextBoxFont, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemedButtonMaterial { #[serde(default)] pub default: ThemedImageMaterial, #[serde(default)] pub selected: ThemedImageMaterial, #[serde(default)] pub trigger: ThemedImageMaterial, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct ThemedSwitchMaterial { #[serde(default)] pub on: ThemedImageMaterial, #[serde(default)] pub off: ThemedImageMaterial, } #[derive(PropsData, Debug, Default, Clone, Serialize, Deserialize)] #[props_data(raui_core::props::PropsData)] #[prefab(raui_core::Prefab)] pub struct ThemeProps { #[serde(default)] pub active_colors: ThemeColorsBundle, #[serde(default)] pub background_colors: ThemeColorsBundle, #[serde(default)] #[serde(skip_serializing_if = "HashMap::is_empty")] pub content_backgrounds: HashMap<String, ThemedImageMaterial>, #[serde(default)] #[serde(skip_serializing_if = "HashMap::is_empty")] pub button_backgrounds: HashMap<String, ThemedButtonMaterial>, #[serde(default)] #[serde(skip_serializing_if = "Vec::is_empty")] pub icons_level_sizes: Vec<Scalar>, #[serde(default)] #[serde(skip_serializing_if = "HashMap::is_empty")] pub text_variants: HashMap<String, ThemedTextMaterial>, #[serde(default)] #[serde(skip_serializing_if = "HashMap::is_empty")] pub switch_variants: HashMap<String, ThemedSwitchMaterial>, #[serde(default)] #[serde(skip_serializing_if = "HashMap::is_empty")] pub modal_shadow_variants: HashMap<String, Color>, } pub fn new_light_theme() -> ThemeProps { make_default_theme( color_from_rgba(241, 250, 238, 1.0), color_from_rgba(29, 53, 87, 1.0), color_from_rgba(230, 57, 70, 1.0), color_from_rgba(255, 255, 255, 1.0), ) } pub fn new_dark_theme() -> ThemeProps { make_default_theme( color_from_rgba(64, 64, 64, 1.0), color_from_rgba(255, 98, 86, 1.0), color_from_rgba(0, 196, 228, 1.0), color_from_rgba(32, 32, 32, 1.0), ) } pub fn new_all_white_theme() -> ThemeProps { make_default_theme( color_from_rgba(255, 255, 255, 1.0), color_from_rgba(255, 255, 255, 1.0), color_from_rgba(255, 255, 255, 1.0), color_from_rgba(255, 255, 255, 1.0), ) } pub fn make_default_theme( default: Color, primary: Color, secondary: Color, background: Color, ) -> ThemeProps { let background_primary = color_lerp(background, primary, 0.05); let background_secondary = color_lerp(background, secondary, 0.05); let mut background_modal = fluid_polarize_color(background); background_modal.a = 0.75; let mut content_backgrounds = HashMap::with_capacity(1); content_backgrounds.insert(String::new(), Default::default()); let mut button_backgrounds = HashMap::with_capacity(1); button_backgrounds.insert(String::new(), Default::default()); let mut text_variants = HashMap::with_capacity(1); text_variants.insert( String::new(), ThemedTextMaterial { font: TextBoxFont { size: 18.0, ..Default::default() }, ..Default::default() }, ); let mut switch_variants = HashMap::with_capacity(4); switch_variants.insert(String::new(), ThemedSwitchMaterial::default()); switch_variants.insert("checkbox".to_owned(), ThemedSwitchMaterial::default()); switch_variants.insert("toggle".to_owned(), ThemedSwitchMaterial::default()); switch_variants.insert("radio".to_owned(), ThemedSwitchMaterial::default()); let mut modal_shadow_variants = HashMap::with_capacity(1); modal_shadow_variants.insert(String::new(), background_modal); ThemeProps { active_colors: make_colors_bundle( make_color_set(default, 0.1, 0.2), make_color_set(primary, 0.1, 0.2), make_color_set(secondary, 0.1, 0.2), ), background_colors: make_colors_bundle( make_color_set(background, 0.1, 0.2), make_color_set(background_primary, 0.1, 0.2), make_color_set(background_secondary, 0.1, 0.2), ), content_backgrounds, button_backgrounds, icons_level_sizes: vec![18.0, 24.0, 32.0, 48.0, 64.0, 128.0, 256.0, 512.0, 1024.0], text_variants, switch_variants, modal_shadow_variants, } } pub fn color_from_rgba(r: u8, g: u8, b: u8, a: Scalar) -> Color { Color { r: r as Scalar / 255.0, g: g as Scalar / 255.0, b: b as Scalar / 255.0, a, } } pub fn make_colors_bundle( default: ThemeColorSet, primary: ThemeColorSet, secondary: ThemeColorSet, ) -> ThemeColorsBundle { let contrast = ThemeColors { default: ThemeColorSet { main: contrast_color(default.main), light: contrast_color(default.light), dark: contrast_color(default.dark), }, primary: ThemeColorSet { main: contrast_color(primary.main), light: contrast_color(primary.light), dark: contrast_color(primary.dark), }, secondary: ThemeColorSet { main: contrast_color(secondary.main), light: contrast_color(secondary.light), dark: contrast_color(secondary.dark), }, }; let main = ThemeColors { default, primary, secondary, }; ThemeColorsBundle { main, contrast } } pub fn contrast_color(base_color: Color) -> Color { Color { r: 1.0 - base_color.r, g: 1.0 - base_color.g, b: 1.0 - base_color.b, a: base_color.a, } } pub fn fluid_polarize(v: Scalar) -> Scalar { (v - 0.5 * PI).sin() * 0.5 + 0.5 } pub fn fluid_polarize_color(color: Color) -> Color { Color { r: fluid_polarize(color.r), g: fluid_polarize(color.g), b: fluid_polarize(color.b), a: color.a, } } pub fn make_color_set(base_color: Color, lighter: Scalar, darker: Scalar) -> ThemeColorSet { let main = base_color; let light = Color { r: lerp_clamped(main.r, 1.0, lighter), g: lerp_clamped(main.g, 1.0, lighter), b: lerp_clamped(main.b, 1.0, lighter), a: main.a, }; let dark = Color { r: lerp_clamped(main.r, 0.0, darker), g: lerp_clamped(main.g, 0.0, darker), b: lerp_clamped(main.b, 0.0, darker), a: main.a, }; ThemeColorSet { main, light, dark } } pub fn color_lerp(from: Color, to: Color, factor: Scalar) -> Color { Color { r: lerp_clamped(from.r, to.r, factor), g: lerp_clamped(from.g, to.g, factor), b: lerp_clamped(from.b, to.b, factor), a: lerp_clamped(from.a, to.a, factor), } }
29.171521
92
0.639893
4b782a0d16eb1621f0cd30ae1347a82ae7b7e9c3
15,659
//! # Serde JSON //! //! JSON is a ubiquitous open-standard format that uses human-readable text to //! transmit data objects consisting of key-value pairs. //! //! ```json //! { //! "name": "John Doe", //! "age": 43, //! "address": { //! "street": "10 Downing Street", //! "city": "London" //! }, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! } //! ``` //! //! There are three common ways that you might find yourself needing to work //! with JSON data in Rust. //! //! - **As text data.** An unprocessed string of JSON data that you receive on //! an HTTP endpoint, read from a file, or prepare to send to a remote //! server. //! - **As an untyped or loosely typed representation.** Maybe you want to //! check that some JSON data is valid before passing it on, but without //! knowing the structure of what it contains. Or you want to do very basic //! manipulations like insert a key in a particular spot. //! - **As a strongly typed Rust data structure.** When you expect all or most //! of your data to conform to a particular structure and want to get real //! work done without JSON's loosey-goosey nature tripping you up. //! //! Serde JSON provides efficient, flexible, safe ways of converting data //! between each of these representations. //! //! # Operating on untyped JSON values //! //! Any valid JSON data can be manipulated in the following recursive enum //! representation. This data structure is [`serde_json::Value`][value]. //! //! ``` //! # use serde_json::{Number, Map}; //! # //! # #[allow(dead_code)] //! enum Value { //! Null, //! Bool(bool), //! Number(Number), //! String(String), //! Array(Vec<Value>), //! Object(Map<String, Value>), //! } //! ``` //! //! A string of JSON data can be parsed into a `serde_json::Value` by the //! [`serde_json::from_str`][from_str] function. There is also //! [`from_slice`][from_slice] for parsing from a byte slice &[u8] and //! [`from_reader`][from_reader] for parsing from any `io::Read` like a File or //! a TCP stream. //! //! ``` //! use serde_json::{Result, Value}; //! //! fn untyped_example() -> Result<()> { //! // Some JSON input data as a &str. Maybe this comes from the user. //! let data = r#" //! { //! "name": "John Doe", //! "age": 43, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! }"#; //! //! // Parse the string of data into serde_json::Value. //! let v: Value = serde_json::from_str(data)?; //! //! // Access parts of the data by indexing with square brackets. //! println!("Please call {} at the number {}", v["name"], v["phones"][0]); //! //! Ok(()) //! } //! # //! # fn main() { //! # untyped_example().unwrap(); //! # } //! ``` //! //! The result of square bracket indexing like `v["name"]` is a borrow of the //! data at that index, so the type is `&Value`. A JSON map can be indexed with //! string keys, while a JSON array can be indexed with integer keys. If the //! type of the data is not right for the type with which it is being indexed, //! or if a map does not contain the key being indexed, or if the index into a //! vector is out of bounds, the returned element is `Value::Null`. //! //! When a `Value` is printed, it is printed as a JSON string. So in the code //! above, the output looks like `Please call "John Doe" at the number "+44 //! 1234567"`. The quotation marks appear because `v["name"]` is a `&Value` //! containing a JSON string and its JSON representation is `"John Doe"`. //! Printing as a plain string without quotation marks involves converting from //! a JSON string to a Rust string with [`as_str()`] or avoiding the use of //! `Value` as described in the following section. //! //! [`as_str()`]: https://docs.serde.rs/serde_json/enum.Value.html#method.as_str //! //! The `Value` representation is sufficient for very basic tasks but can be //! tedious to work with for anything more significant. Error handling is //! verbose to implement correctly, for example imagine trying to detect the //! presence of unrecognized fields in the input data. The compiler is powerless //! to help you when you make a mistake, for example imagine typoing `v["name"]` //! as `v["nmae"]` in one of the dozens of places it is used in your code. //! //! # Parsing JSON as strongly typed data structures //! //! Serde provides a powerful way of mapping JSON data into Rust data structures //! largely automatically. //! //! ``` //! use serde::{Deserialize, Serialize}; //! use serde_json::Result; //! //! #[derive(Serialize, Deserialize)] //! struct Person { //! name: String, //! age: u8, //! phones: Vec<String>, //! } //! //! fn typed_example() -> Result<()> { //! // Some JSON input data as a &str. Maybe this comes from the user. //! let data = r#" //! { //! "name": "John Doe", //! "age": 43, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! }"#; //! //! // Parse the string of data into a Person object. This is exactly the //! // same function as the one that produced serde_json::Value above, but //! // now we are asking it for a Person as output. //! let p: Person = serde_json::from_str(data)?; //! //! // Do things just like with any other Rust data structure. //! println!("Please call {} at the number {}", p.name, p.phones[0]); //! //! Ok(()) //! } //! # //! # fn main() { //! # typed_example().unwrap(); //! # } //! ``` //! //! This is the same `serde_json::from_str` function as before, but this time we //! assign the return value to a variable of type `Person` so Serde will //! automatically interpret the input data as a `Person` and produce informative //! error messages if the layout does not conform to what a `Person` is expected //! to look like. //! //! Any type that implements Serde's `Deserialize` trait can be deserialized //! this way. This includes built-in Rust standard library types like `Vec<T>` //! and `HashMap<K, V>`, as well as any structs or enums annotated with //! `#[derive(Deserialize)]`. //! //! Once we have `p` of type `Person`, our IDE and the Rust compiler can help us //! use it correctly like they do for any other Rust code. The IDE can //! autocomplete field names to prevent typos, which was impossible in the //! `serde_json::Value` representation. And the Rust compiler can check that //! when we write `p.phones[0]`, then `p.phones` is guaranteed to be a //! `Vec<String>` so indexing into it makes sense and produces a `String`. //! //! # Constructing JSON values //! //! Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value` //! objects with very natural JSON syntax. //! //! ``` //! use serde_json::json; //! //! fn main() { //! // The type of `john` is `serde_json::Value` //! let john = json!({ //! "name": "John Doe", //! "age": 43, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! }); //! //! println!("first phone number: {}", john["phones"][0]); //! //! // Convert to a string of JSON and print it out //! println!("{}", john.to_string()); //! } //! ``` //! //! The `Value::to_string()` function converts a `serde_json::Value` into a //! `String` of JSON text. //! //! One neat thing about the `json!` macro is that variables and expressions can //! be interpolated directly into the JSON value as you are building it. Serde //! will check at compile time that the value you are interpolating is able to //! be represented as JSON. //! //! ``` //! # use serde_json::json; //! # //! # fn random_phone() -> u16 { 0 } //! # //! let full_name = "John Doe"; //! let age_last_year = 42; //! //! // The type of `john` is `serde_json::Value` //! let john = json!({ //! "name": full_name, //! "age": age_last_year + 1, //! "phones": [ //! format!("+44 {}", random_phone()) //! ] //! }); //! ``` //! //! This is amazingly convenient but we have the problem we had before with //! `Value` which is that the IDE and Rust compiler cannot help us if we get it //! wrong. Serde JSON provides a better way of serializing strongly-typed data //! structures into JSON text. //! //! # Creating JSON by serializing data structures //! //! A data structure can be converted to a JSON string by //! [`serde_json::to_string`][to_string]. There is also //! [`serde_json::to_vec`][to_vec] which serializes to a `Vec<u8>` and //! [`serde_json::to_writer`][to_writer] which serializes to any `io::Write` //! such as a File or a TCP stream. //! //! ``` //! use serde::{Deserialize, Serialize}; //! use serde_json::Result; //! //! #[derive(Serialize, Deserialize)] //! struct Address { //! street: String, //! city: String, //! } //! //! fn print_an_address() -> Result<()> { //! // Some data structure. //! let address = Address { //! street: "10 Downing Street".to_owned(), //! city: "London".to_owned(), //! }; //! //! // Serialize it to a JSON string. //! let j = serde_json::to_string(&address)?; //! //! // Print, write to a file, or send to an HTTP server. //! println!("{}", j); //! //! Ok(()) //! } //! # //! # fn main() { //! # print_an_address().unwrap(); //! # } //! ``` //! //! Any type that implements Serde's `Serialize` trait can be serialized this //! way. This includes built-in Rust standard library types like `Vec<T>` and //! `HashMap<K, V>`, as well as any structs or enums annotated with //! `#[derive(Serialize)]`. //! //! # No-std support //! //! As long as there is a memory allocator, it is possible to use serde_json //! without the rest of the Rust standard library. This is supported on Rust //! 1.36+. Disable the default "std" feature and enable the "alloc" feature: //! //! ```toml //! [dependencies] //! serde_json = { version = "1.0", default-features = false, features = ["alloc"] } //! ``` //! //! For JSON support in Serde without a memory allocator, please see the //! [`serde-json-core`] crate. //! //! [value]: https://docs.serde.rs/serde_json/value/enum.Value.html //! [from_str]: https://docs.serde.rs/serde_json/de/fn.from_str.html //! [from_slice]: https://docs.serde.rs/serde_json/de/fn.from_slice.html //! [from_reader]: https://docs.serde.rs/serde_json/de/fn.from_reader.html //! [to_string]: https://docs.serde.rs/serde_json/ser/fn.to_string.html //! [to_vec]: https://docs.serde.rs/serde_json/ser/fn.to_vec.html //! [to_writer]: https://docs.serde.rs/serde_json/ser/fn.to_writer.html //! [macro]: https://docs.serde.rs/serde_json/macro.json.html //! [`serde-json-core`]: https://japaric.github.io/serde-json-core/serde_json_core/ #![doc(html_root_url = "https://docs.rs/serde_json/1.0.59")] #![deny(clippy::all, clippy::pedantic)] // Ignored clippy lints #![allow( clippy::comparison_chain, clippy::deprecated_cfg_attr, clippy::doc_markdown, clippy::excessive_precision, clippy::float_cmp, clippy::match_like_matches_macro, clippy::match_single_binding, clippy::needless_doctest_main, clippy::transmute_ptr_to_ptr, // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 clippy::unnested_or_patterns, )] // Ignored clippy_pedantic lints #![allow( // Deserializer::from_str, into_iter clippy::should_implement_trait, // integer and float ser/de requires these sorts of casts clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, // correctly used clippy::enum_glob_use, clippy::if_not_else, clippy::integer_division, clippy::map_err_ignore, clippy::match_same_arms, clippy::similar_names, clippy::unused_self, clippy::wildcard_imports, // things are often more readable this way clippy::cast_lossless, clippy::module_name_repetitions, clippy::shadow_unrelated, clippy::single_match_else, clippy::too_many_lines, clippy::unreadable_literal, clippy::unseparated_literal_suffix, clippy::use_self, clippy::zero_prefixed_literal, // we support older compilers clippy::checked_conversions, clippy::mem_replace_with_default, // noisy clippy::missing_errors_doc, clippy::must_use_candidate, )] #![allow(non_upper_case_globals)] #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] //////////////////////////////////////////////////////////////////////////////// #[cfg(not(feature = "std"))] extern crate alloc; /// A facade around all the types we need from the `std`, `core`, and `alloc` /// crates. This avoids elaborate import wrangling having to happen in every /// module. mod lib { mod core { #[cfg(not(feature = "std"))] pub use core::*; #[cfg(feature = "std")] pub use std::*; } pub use self::core::cell::{Cell, RefCell}; pub use self::core::clone::{self, Clone}; pub use self::core::convert::{self, From, Into}; pub use self::core::default::{self, Default}; pub use self::core::fmt::{self, Debug, Display}; pub use self::core::hash::{self, Hash}; pub use self::core::iter::FusedIterator; pub use self::core::marker::{self, PhantomData}; pub use self::core::ops::{Bound, RangeBounds}; pub use self::core::result::{self, Result}; pub use self::core::{borrow, char, cmp, iter, mem, num, ops, slice, str}; #[cfg(not(feature = "std"))] pub use alloc::borrow::{Cow, ToOwned}; #[cfg(feature = "std")] pub use std::borrow::{Cow, ToOwned}; #[cfg(not(feature = "std"))] pub use alloc::string::{String, ToString}; #[cfg(feature = "std")] pub use std::string::{String, ToString}; #[cfg(not(feature = "std"))] pub use alloc::vec::{self, Vec}; #[cfg(feature = "std")] pub use std::vec::{self, Vec}; #[cfg(not(feature = "std"))] pub use alloc::boxed::Box; #[cfg(feature = "std")] pub use std::boxed::Box; #[cfg(not(feature = "std"))] pub use alloc::collections::{btree_map, BTreeMap}; #[cfg(feature = "std")] pub use std::collections::{btree_map, BTreeMap}; #[cfg(feature = "std")] pub use std::error; } //////////////////////////////////////////////////////////////////////////////// #[cfg(feature = "std")] #[doc(inline)] pub use crate::de::from_reader; #[doc(inline)] pub use crate::de::{from_slice, from_str, Deserializer, StreamDeserializer}; #[doc(inline)] pub use crate::error::{Error, Result}; #[doc(inline)] pub use crate::ser::{to_string, to_string_pretty, to_vec, to_vec_pretty}; #[cfg(feature = "std")] #[doc(inline)] pub use crate::ser::{to_writer, to_writer_pretty, Serializer}; #[doc(inline)] pub use crate::value::{from_value, to_value, Map, Number, Value}; // We only use our own error type; no need for From conversions provided by the // standard library's try! macro. This reduces lines of LLVM IR by 4%. macro_rules! tri { ($e:expr) => { match $e { crate::lib::Result::Ok(val) => val, crate::lib::Result::Err(err) => return crate::lib::Result::Err(err), } }; ($e:expr,) => { tri!($e) }; } #[macro_use] mod macros; pub mod de; pub mod error; pub mod map; #[cfg(feature = "std")] pub mod ser; #[cfg(not(feature = "std"))] mod ser; pub mod value; mod features_check; mod io; #[cfg(feature = "std")] mod iter; #[cfg(feature = "float_roundtrip")] mod lexical; mod number; mod read; #[cfg(feature = "raw_value")] mod raw;
33.317021
84
0.617472
e657a457c7f6e4350974c6b47341f018b6665785
1,485
#[doc = "Register `TASKS_RESUME` writer"] pub struct W(crate::W<TASKS_RESUME_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TASKS_RESUME_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<TASKS_RESUME_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<TASKS_RESUME_SPEC>) -> Self { W(writer) } } impl W { #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Resume SPI transaction\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_resume](index.html) module"] pub struct TASKS_RESUME_SPEC; impl crate::RegisterSpec for TASKS_RESUME_SPEC { type Ux = u32; } #[doc = "`write(|w| ..)` method takes [tasks_resume::W](W) writer structure"] impl crate::Writable for TASKS_RESUME_SPEC { type Writer = W; } #[doc = "`reset()` method sets TASKS_RESUME to value 0"] impl crate::Resettable for TASKS_RESUME_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
32.282609
337
0.638384
8f92eb43149d989c12bf995dd417a9776e857e95
3,707
use druid::widget::{Align, Button, Flex, Label, Padding, TextBox}; use druid::{AppLauncher, Data, Lens, LocalizedString, Widget, WidgetExt, WindowDesc}; const VERTICAL_WIDGET_SPACING: f64 = 20.0; const HORIZTONAL_WIDGET_SPACING: f64 = 8.0; const WINDOW_TITLE: LocalizedString<HelloState> = LocalizedString::new("Rakaly"); #[derive(Clone, Data, Lens)] struct HelloState { steam_name: String, api_key: String, } pub fn run() { // describe the main window let main_window = WindowDesc::new(build_root_widget) .title(WINDOW_TITLE) .window_size((400.0, 400.0)); // create the initial app state let initial_state = HelloState { steam_name: "".into(), api_key: "".into(), }; // start the application AppLauncher::with_window(main_window) .launch(initial_state) .expect("Failed to launch application"); } fn build_root_widget() -> impl Widget<HelloState> { let watch_dir = if let Some(user_dirs) = directories::UserDirs::new() { user_dirs .document_dir() .map(|x| { x.join("Paradox Interactive") .join("Europa Universalis IV") .join("save games") }) .map(|x| x.display().to_string()) .unwrap_or_else(|| String::from("unknown")) } else { String::from("unknown") }; let intro_text_1 = Label::new("When the \"Start\" button is pressed, Rakaly will"); let intro_text_2 = Label::new("automatically start watching the following directory"); let intro_text_3 = Label::new("for any changes:"); let intro_text_4 = Label::new(watch_dir); let intro_text_5 = Label::new("Rakaly will upload the new files to the server."); let intro_text_6 = Label::new("To start the uploader on boot, click \"Enable on Startup\""); let steam_name_box = TextBox::new().expand_width().lens(HelloState::steam_name); let steam_name_row = Flex::row() .must_fill_main_axis(true) .with_child(Label::new("Steam Name:")) .with_spacer(HORIZTONAL_WIDGET_SPACING) .with_flex_child(steam_name_box, 1.0); let api_key_box = TextBox::new().expand_width().lens(HelloState::api_key); let api_key_row = Flex::row() .must_fill_main_axis(true) .with_child(Label::new("API Key:")) .with_spacer(HORIZTONAL_WIDGET_SPACING) .with_flex_child(api_key_box, 1.0); let immediate_btn_layout = Flex::row() .with_child(Button::new("Start").on_click(|_ctx, _data: &mut HelloState, _env| {})) .with_spacer(HORIZTONAL_WIDGET_SPACING) .with_child(Button::new("Stop").on_click(|_ctx, _data: &mut HelloState, _env| {})); let service_btn_layout = Flex::row() .with_child( Button::new("Enable on Startup").on_click(|_ctx, _data: &mut HelloState, _env| {}), ) .with_spacer(HORIZTONAL_WIDGET_SPACING) .with_child( Button::new("Disable on Startup").on_click(|_ctx, _data: &mut HelloState, _env| {}), ); let layout = Flex::column() .with_child(intro_text_1) .with_child(intro_text_2) .with_child(intro_text_3) .with_child(intro_text_4) .with_child(intro_text_5) .with_child(intro_text_6) .with_spacer(VERTICAL_WIDGET_SPACING) .with_child(steam_name_row) .with_spacer(VERTICAL_WIDGET_SPACING) .with_child(api_key_row) .with_spacer(VERTICAL_WIDGET_SPACING) .with_child(immediate_btn_layout) .with_spacer(VERTICAL_WIDGET_SPACING) .with_child(service_btn_layout) .expand_width(); Padding::new(10.0, Align::left(layout)) }
35.990291
96
0.641759
39f07c6ab106382a34958738c6fdcad1f4157cbc
7,592
#![allow(unused_imports, non_camel_case_types)] use crate::models::r5::ConceptMap_Element::ConceptMap_Element; use crate::models::r5::ConceptMap_Unmapped::ConceptMap_Unmapped; use crate::models::r5::Extension::Extension; use serde_json::json; use serde_json::value::Value; use std::borrow::Cow; /// A statement of relationships from one set of concepts to one or more other /// concepts - either concepts in code systems, or data element/data element concepts, /// or classes in class models. #[derive(Debug)] pub struct ConceptMap_Group<'a> { pub(crate) value: Cow<'a, Value>, } impl ConceptMap_Group<'_> { pub fn new(value: &Value) -> ConceptMap_Group { ConceptMap_Group { value: Cow::Borrowed(value), } } pub fn to_json(&self) -> Value { (*self.value).clone() } /// Mappings for an individual concept in the source to one or more concepts in the /// target. pub fn element(&self) -> Vec<ConceptMap_Element> { self.value .get("element") .unwrap() .as_array() .unwrap() .into_iter() .map(|e| ConceptMap_Element { value: Cow::Borrowed(e), }) .collect::<Vec<_>>() } /// May be used to represent additional information that is not part of the basic /// definition of the element. To make the use of extensions safe and manageable, /// there is a strict set of governance applied to the definition and use of /// extensions. Though any implementer can define an extension, there is a set of /// requirements that SHALL be met as part of the definition of the extension. pub fn extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("extension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// Unique id for the element within a resource (for internal references). This may be /// any string value that does not contain spaces. pub fn id(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("id") { return Some(string); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the element and that modifies the understanding of the element /// in which it is contained and/or the understanding of the containing element's /// descendants. Usually modifier elements provide negation or qualification. To make /// the use of extensions safe and manageable, there is a strict set of governance /// applied to the definition and use of extensions. Though any implementer can define /// an extension, there is a set of requirements that SHALL be met as part of the /// definition of the extension. Applications processing a resource are required to /// check for modifier extensions. Modifier extensions SHALL NOT change the meaning /// of any elements on Resource or DomainResource (including cannot change the meaning /// of modifierExtension itself). pub fn modifier_extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("modifierExtension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// An absolute URI that identifies the source system where the concepts to be mapped /// are defined. pub fn source(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("source") { return Some(string); } return None; } /// An absolute URI that identifies the target system that the concepts will be mapped /// to. pub fn target(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("target") { return Some(string); } return None; } /// What to do when there is no mapping to a target concept from the source concept. /// This provides the "default" to be applied when there is no target concept mapping /// specified. The 'unmapped' element is ignored if a code is specified to have /// relationship = not-related-to. pub fn unmapped(&self) -> Option<ConceptMap_Unmapped> { if let Some(val) = self.value.get("unmapped") { return Some(ConceptMap_Unmapped { value: Cow::Borrowed(val), }); } return None; } pub fn validate(&self) -> bool { if !self .element() .into_iter() .map(|e| e.validate()) .all(|x| x == true) { return false; } if let Some(_val) = self.extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.id() {} if let Some(_val) = self.modifier_extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.source() {} if let Some(_val) = self.target() {} if let Some(_val) = self.unmapped() { if !_val.validate() { return false; } } return true; } } #[derive(Debug)] pub struct ConceptMap_GroupBuilder { pub(crate) value: Value, } impl ConceptMap_GroupBuilder { pub fn build(&self) -> ConceptMap_Group { ConceptMap_Group { value: Cow::Owned(self.value.clone()), } } pub fn with(existing: ConceptMap_Group) -> ConceptMap_GroupBuilder { ConceptMap_GroupBuilder { value: (*existing.value).clone(), } } pub fn new(element: Vec<ConceptMap_Element>) -> ConceptMap_GroupBuilder { let mut __value: Value = json!({}); __value["element"] = json!(element.into_iter().map(|e| e.value).collect::<Vec<_>>()); return ConceptMap_GroupBuilder { value: __value }; } pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut ConceptMap_GroupBuilder { self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn id<'a>(&'a mut self, val: &str) -> &'a mut ConceptMap_GroupBuilder { self.value["id"] = json!(val); return self; } pub fn modifier_extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut ConceptMap_GroupBuilder { self.value["modifierExtension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn source<'a>(&'a mut self, val: &str) -> &'a mut ConceptMap_GroupBuilder { self.value["source"] = json!(val); return self; } pub fn target<'a>(&'a mut self, val: &str) -> &'a mut ConceptMap_GroupBuilder { self.value["target"] = json!(val); return self; } pub fn unmapped<'a>(&'a mut self, val: ConceptMap_Unmapped) -> &'a mut ConceptMap_GroupBuilder { self.value["unmapped"] = json!(val.value); return self; } }
35.148148
100
0.579557
14022978f48124dfdb7509826c36c8b87db3b22c
1,160
#![allow(non_snake_case)] struct Person { age: i32, } impl Person { fn new(initialAge: i32) -> Person { // Add some more code to run some checks on initialAge if initialAge < 0 { println!("Age is not valid, setting age to 0."); return Person { age: 0 }; } return Person { age: initialAge }; } fn amIOld(&self) { let out = match self.age { age if age < 13 => "You are young.", age if age < 18 => "You are a teenager.", _ => "You are old.", }; println!("{}", out); } fn yearPasses(&mut self) { self.age += 1; } } fn main() { let T: i32 = read_line().trim().parse().unwrap(); for _ in 0..T { let age = read_line().trim().parse().unwrap(); let mut p = Person::new(age); p.amIOld(); for _ in 0..3 { p.yearPasses(); } p.amIOld(); println!(""); } } fn read_line() -> String { let mut input = String::new(); std::io::stdin() .read_line(&mut input) .expect("Could not read stdin!"); return input; }
20.714286
62
0.478448
fca86218cf4bbc312cc516a067a671c35443fda3
953
use protobuf::Message; use std::fs::File; use tract_onnx::pb::{ModelProto, ValueInfoProto}; fn main() { let input = std::env::args().nth(1).unwrap(); let output = std::env::args().nth(2).unwrap(); let mut model = protobuf::parse_from_reader::<ModelProto>(&mut File::open(input).unwrap()).unwrap(); let mut graph = model.take_graph(); let all_outputs: Vec<tract_onnx::pb::ValueInfoProto> = graph .get_node() .iter() .flat_map(|n| { n.get_output().iter().map(|s| { let mut vip = ValueInfoProto::new(); vip.set_name(s.to_string()); vip }) }) .collect(); graph.set_output(all_outputs.into()); model.set_graph(graph); let mut f = File::create(output).unwrap(); let mut stream = protobuf::stream::CodedOutputStream::new(&mut f); model.write_to(&mut stream).unwrap(); stream.flush().unwrap(); }
32.862069
92
0.577125
26c7280f81fece9e28867cd662a703ce6c3f24f4
7,157
#[cfg(test)] #[path = "../../tests/unit/validation/vehicles_test.rs"] mod vehicles_test; use super::*; use crate::validation::common::get_time_windows; use std::ops::Deref; use vrp_core::models::common::TimeWindow; /// Checks that fleet has no vehicle with duplicate type ids. fn check_e1300_no_vehicle_types_with_duplicate_type_ids(ctx: &ValidationContext) -> Result<(), FormatError> { get_duplicates(ctx.vehicles().map(|vehicle| &vehicle.type_id)).map_or(Ok(()), |ids| { Err(FormatError::new( "E1300".to_string(), "duplicated vehicle type ids".to_string(), format!("remove duplicated vehicle type ids: {}", ids.join(", ")), )) }) } /// Checks that fleet has no vehicle with duplicate ids. fn check_e1301_no_vehicle_types_with_duplicate_ids(ctx: &ValidationContext) -> Result<(), FormatError> { get_duplicates(ctx.vehicles().flat_map(|vehicle| vehicle.vehicle_ids.iter())).map_or(Ok(()), |ids| { Err(FormatError::new( "E1301".to_string(), "duplicated vehicle ids".to_string(), format!("remove duplicated vehicle ids: {}", ids.join(", ")), )) }) } /// Checks that vehicle shift time is correct. fn check_e1302_vehicle_shift_time(ctx: &ValidationContext) -> Result<(), FormatError> { let type_ids = ctx .vehicles() .filter_map(|vehicle| { let tws = vehicle .shifts .iter() .map(|shift| { vec![ shift.start.time.clone(), shift.end.as_ref().map_or_else(|| shift.start.time.clone(), |end| end.time.clone()), ] }) .collect::<Vec<_>>(); if check_raw_time_windows(&tws, false) { None } else { Some(vehicle.type_id.to_string()) } }) .collect::<Vec<_>>(); if type_ids.is_empty() { Ok(()) } else { Err(FormatError::new( "E1302".to_string(), "invalid start or end times in vehicle shift".to_string(), format!( "ensure that start and end time conform shift time rules, vehicle type ids: {}", type_ids.join(", ") ), )) } } /// Checks that break time window is correct. fn check_e1303_vehicle_breaks_time_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> { let type_ids = get_invalid_type_ids( ctx, Box::new(|shift, shift_time| { shift .breaks .as_ref() .map(|breaks| { let tws = breaks .iter() .filter_map(|b| match &b.time { VehicleBreakTime::TimeWindow(tw) => Some(get_time_window_from_vec(tw)), _ => None, }) .collect::<Vec<_>>(); check_shift_time_windows(shift_time, tws, false) }) .unwrap_or(true) }), ); if type_ids.is_empty() { Ok(()) } else { Err(FormatError::new( "E1303".to_string(), "invalid break time windows in vehicle shift".to_string(), format!("ensure that break conform rules, vehicle type ids: '{}'", type_ids.join(", ")), )) } } /// Checks that reload time windows are correct. fn check_e1304_vehicle_reload_time_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> { let type_ids = get_invalid_type_ids( ctx, Box::new(|shift, shift_time| { shift .reloads .as_ref() .map(|reloads| { let tws = reloads .iter() .filter_map(|reload| reload.times.as_ref()) .map(|tws| get_time_windows(tws)) .flatten() .collect::<Vec<_>>(); check_shift_time_windows(shift_time, tws, true) }) .unwrap_or(true) }), ); if type_ids.is_empty() { Ok(()) } else { Err(FormatError::new( "E1304".to_string(), "invalid reload time windows in vehicle shift".to_string(), format!("ensure that reload conform rules, vehicle type ids: '{}'", type_ids.join(", ")), )) } } /// Checks that vehicle area restrictions are valid. fn check_e1305_vehicle_limit_area_is_correct(ctx: &ValidationContext) -> Result<(), FormatError> { let type_ids = ctx .vehicles() .filter(|vehicle| { vehicle .limits .as_ref() .and_then(|l| l.allowed_areas.as_ref()) .map_or(false, |areas| areas.is_empty() || areas.iter().any(|area| area.len() < 3)) }) .map(|vehicle| vehicle.type_id.to_string()) .collect::<Vec<_>>(); if type_ids.is_empty() { Ok(()) } else { Err(FormatError::new( "E1305".to_string(), "invalid allowed area definition in vehicle limits".to_string(), format!( "ensure that areas list is not empty and each area has at least three coordinates, \ vehicle type ids: '{}'", type_ids.join(", ") ), )) } } fn get_invalid_type_ids( ctx: &ValidationContext, check_shift: Box<dyn Fn(&VehicleShift, Option<TimeWindow>) -> bool>, ) -> Vec<String> { ctx.vehicles() .filter_map(|vehicle| { let all_correct = vehicle.shifts.iter().all(|shift| check_shift.deref()(shift, get_shift_time_window(shift))); if all_correct { None } else { Some(vehicle.type_id.clone()) } }) .collect::<Vec<_>>() } fn check_shift_time_windows( shift_time: Option<TimeWindow>, tws: Vec<Option<TimeWindow>>, skip_intersection_check: bool, ) -> bool { tws.is_empty() || (check_time_windows(&tws, skip_intersection_check) && shift_time .as_ref() .map_or(true, |shift_time| tws.into_iter().map(|tw| tw.unwrap()).all(|tw| tw.intersects(shift_time)))) } fn get_shift_time_window(shift: &VehicleShift) -> Option<TimeWindow> { get_time_window( &shift.start.time, &shift.end.clone().map_or_else(|| "2200-07-04T00:00:00Z".to_string(), |end| end.time), ) } /// Validates vehicles from the fleet. pub fn validate_vehicles(ctx: &ValidationContext) -> Result<(), Vec<FormatError>> { combine_error_results(&[ check_e1300_no_vehicle_types_with_duplicate_type_ids(ctx), check_e1301_no_vehicle_types_with_duplicate_ids(ctx), check_e1302_vehicle_shift_time(ctx), check_e1303_vehicle_breaks_time_is_correct(ctx), check_e1304_vehicle_reload_time_is_correct(ctx), check_e1305_vehicle_limit_area_is_correct(ctx), ]) }
33.600939
118
0.54059
7abb439d752a153138259cf6afe06bf94f6b0857
7,083
//! Field descriptors used in the schema-based interface. use std::borrow::Borrow; use std::marker::PhantomData; use ekiden_common::serializer::{Deserializable, Serializable}; use super::super::{Database, DatabaseHandle}; /// Descriptor for scalar fields. pub struct ScalarDescriptor<T> { namespace: &'static str, name: &'static str, value_type: PhantomData<T>, } /// Descriptor for map fields. pub struct MapDescriptor<K, V> { namespace: &'static str, name: &'static str, key_type: PhantomData<K>, value_type: PhantomData<V>, } impl<T> ScalarDescriptor<T> where T: Serializable + Deserializable, { /// Create new scalar descriptor. pub fn new(namespace: &'static str, name: &'static str) -> Self { Self { namespace: &namespace, name: &name, value_type: PhantomData, } } /// Derive the key for storing this field in the underlying database. fn get_key(&self) -> Vec<u8> { let mut key = vec![]; self.namespace.write_to(&mut key).unwrap(); self.name.write_to(&mut key).unwrap(); key } /// Insert a value for this field. /// /// If the database did not have this key present, [`None`] is returned. /// /// If the database did have this key present, the value is updated, and the old value is /// returned. /// /// The value may be any borrowed form of the descriptor's value type, but [`Serializable`] /// on the borrowed form must match those for the value type. /// /// [`None`]: std::option::Option /// [`Serializable`]: ekiden_common::serializer::Serializable pub fn insert<Q>(&self, value: &Q) -> Option<T> where T: Borrow<Q>, Q: ?Sized + Serializable, { let mut db = DatabaseHandle::instance(); let value = Serializable::write(value.borrow()).expect("Failed to serialize state"); match db.insert(&self.get_key(), &value) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Fetch a value for this field. pub fn get(&self) -> Option<T> { let db = DatabaseHandle::instance(); match db.get(&self.get_key()) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Remove a value for this field, returning the value at the key if the key was previously /// in the database. pub fn remove(&self) -> Option<T> { let mut db = DatabaseHandle::instance(); match db.remove(&self.get_key()) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Check if a field is present in the underlying database. pub fn is_present(&self) -> bool { let db = DatabaseHandle::instance(); db.contains_key(&self.get_key()) } } impl<K, V> MapDescriptor<K, V> where K: Serializable, V: Serializable + Deserializable, { /// Create new map descriptor. pub fn new(namespace: &'static str, name: &'static str) -> Self { Self { namespace: &namespace, name: &name, key_type: PhantomData, value_type: PhantomData, } } /// Derive the key for storing this field in the underlying database. /// /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] /// on the borrowed form must match those for the key type. /// /// [`Serializable`]: ekiden_common::serializer::Serializable fn get_key_for_subkey<Q>(&self, subkey: &Q) -> Vec<u8> where K: Borrow<Q>, Q: ?Sized + Serializable, { let mut key = vec![]; self.namespace.write_to(&mut key).unwrap(); self.name.write_to(&mut key).unwrap(); subkey.write_to(&mut key).unwrap(); key } /// Insert a value for this field. /// /// If the database did not have this key present, [`None`] is returned. /// /// If the database did have this key present, the value is updated, and the old value is /// returned. /// /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] /// on the borrowed form must match those for the key type. /// /// The value may be any borrowed form of the descriptor's value type, but [`Serializable`] /// on the borrowed form must match those for the value type. /// /// [`None`]: std::option::Option /// [`Serializable`]: ekiden_common::serializer::Serializable pub fn insert<Q, P>(&self, key: &Q, value: &P) -> Option<V> where K: Borrow<Q>, V: Borrow<P>, Q: ?Sized + Serializable, P: ?Sized + Serializable, { let mut db = DatabaseHandle::instance(); let value = Serializable::write(value.borrow()).expect("Failed to serialize value"); match db.insert(&self.get_key_for_subkey(key), &value) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Fetch a value for this field. /// /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] /// on the borrowed form must match those for the key type. /// /// [`Serializable`]: ekiden_common::serializer::Serializable pub fn get<Q>(&self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: ?Sized + Serializable, { let db = DatabaseHandle::instance(); match db.get(&self.get_key_for_subkey(key)) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Remove a value for this field, returning the value at the key if the key was previously /// in the database. /// /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] /// on the borrowed form must match those for the key type. /// /// [`Serializable`]: ekiden_common::serializer::Serializable pub fn remove<Q>(&self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: ?Sized + Serializable, { let mut db = DatabaseHandle::instance(); match db.remove(&self.get_key_for_subkey(key)) { Some(value) => Some(Deserializable::read(&value).expect("Corrupted state")), None => None, } } /// Check if a field is present in the underlying database. /// /// The key may be any borrowed form of the descriptor's key type, but [`Serializable`] /// on the borrowed form must match those for the key type. /// /// [`Serializable`]: ekiden_common::serializer::Serializable pub fn contains_key<Q>(&self, key: &Q) -> bool where K: Borrow<Q>, Q: ?Sized + Serializable, { let db = DatabaseHandle::instance(); db.contains_key(&self.get_key_for_subkey(key)) } }
33.253521
95
0.595087
33dae34365285503f5ff55fb21ae5824e87d1e55
525
impl Solution { pub fn second_highest(s: String) -> i32 { let mut highest = -1; let mut second_highest = -1; for c in s.chars() { if let Some(n) = c.to_digit(10) { let n = n as i32; if highest < n { second_highest = highest; highest = n; } else if second_highest < n && n < highest { second_highest = n; } } } second_highest } }
27.631579
61
0.411429
5dad93da8b851f7c6346f5343c65083ef3a58fba
1,110
use rumqtt::{ConnectionMethod, MqttClient, MqttOptions, QoS}; use std::{thread, time::Duration}; fn main() { pretty_env_logger::init(); let client_id = "tls-test".to_owned(); let ca = include_bytes!("tlsfiles/ca-chain.cert.pem").to_vec(); let client_cert = include_bytes!("tlsfiles/bike1.cert.pem").to_vec(); let client_key = include_bytes!("tlsfiles/bike1.key.pem").to_vec(); let connection_method = ConnectionMethod::Tls(ca, Some((client_cert, client_key))); let mqtt_options = MqttOptions::new(client_id, "localhost", 8883) .set_keep_alive(10) .set_connection_method(connection_method); let (mut mqtt_client, notifications) = MqttClient::start(mqtt_options).unwrap(); let topic = "hello/world"; thread::spawn(move || { for i in 0..100 { let payload = format!("publish {}", i); thread::sleep(Duration::from_secs(1)); mqtt_client.publish(topic.clone(), QoS::AtLeastOnce, false, payload).unwrap(); } }); for notification in notifications { println!("{:?}", notification) } }
33.636364
90
0.645045
1e0e4d15cab0d75296b701b4a511cb783b69d7e8
2,626
use std::time::Duration; use std::thread; use std::path::Path; use sdl2::event::Event; use sdl2::init; use sdl2::ttf; use utils::vec2i::Vec2i; use gui::core::gui_application::GUIApplication; use gui::core::mouse::{MouseClickEvent, MouseDragEvent, MouseMoveEvent}; use gui::core::mouse::MouseButton as APIMouseButton; use super::api_bridge::{api_mouse_button_of}; use super::sdl2_graphics::SDL2Graphics; pub fn run_gui_app(app: &mut GUIApplication) { // Initialize SDL2 let context = init().expect("Could not initialize SDL2 context"); let ttf = ttf::init().expect("Could not initialize TTF context"); let video = context.video().expect("Could not initialize video context"); // Initialize font let font_path = Path::new("resources/Arial.ttf"); // Create a window and a canvas let window = video.window(app.title().as_str(), app.width(), app.height()).position_centered().build().expect("Error while creating window"); let canvas = window.into_canvas().build().expect("Error while creating canvas"); let mut graphics = SDL2Graphics::from(canvas, ttf, font_path); // Initialize event loop let mut event_pump = context.event_pump().expect("Error while fetching event pump"); let iterations_per_second = 60; let sleep_per_iteration = Duration::new(0, 1_000_000_000u32 / iterations_per_second); let mut last_mouse_pos: Option<Vec2i> = None; let mut mouse_pressed = false; let mut mouse_button: Option<APIMouseButton> = None; 'mainloop: loop { for event in event_pump.poll_iter() { match event { Event::Quit {..} => break 'mainloop, Event::MouseButtonDown {x, y, mouse_btn, ..} => { let pos = Vec2i::of(x, y); let button = api_mouse_button_of(mouse_btn); app.on_mouse_down(MouseClickEvent::at(pos, button)); mouse_button = Some(button); mouse_pressed = true; last_mouse_pos = Some(pos); }, Event::MouseButtonUp {x, y, mouse_btn, ..} => { let pos = Vec2i::of(x, y); let button = api_mouse_button_of(mouse_btn); app.on_mouse_up(MouseClickEvent::at(pos, button)); mouse_pressed = false; mouse_button = None; last_mouse_pos = Some(pos); }, Event::MouseMotion {x, y, ..} => { let pos = Vec2i::of(x, y); if let Some(last_pos) = last_mouse_pos { if mouse_pressed { app.on_mouse_drag(MouseDragEvent::between(last_pos, pos, mouse_button.unwrap())); } else { app.on_mouse_move(MouseMoveEvent::between(last_pos, pos)); } } last_mouse_pos = Some(pos); }, _ => {} } } app.render(&mut graphics); graphics.show(); thread::sleep(sleep_per_iteration); } }
34.103896
142
0.683168
b94e74ada9cefdc9904067da299e433f0ae86423
32,577
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(deprecated)] //! Thread-local reference-counted boxes (the `Rc<T>` type). //! //! The `Rc<T>` type provides shared ownership of an immutable value. //! Destruction is deterministic, and will occur as soon as the last owner is //! gone. It is marked as non-sendable because it avoids the overhead of atomic //! reference counting. //! //! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer //! to the box. A `Weak<T>` pointer can be upgraded to an `Rc<T>` pointer, but //! will return `None` if the value has already been dropped. //! //! For example, a tree with parent pointers can be represented by putting the //! nodes behind strong `Rc<T>` pointers, and then storing the parent pointers //! as `Weak<T>` pointers. //! //! # Examples //! //! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`. //! We want to have our `Gadget`s point to their `Owner`. We can't do this with //! unique ownership, because more than one gadget may belong to the same //! `Owner`. `Rc<T>` allows us to share an `Owner` between multiple `Gadget`s, //! and have the `Owner` remain allocated as long as any `Gadget` points at it. //! //! ```rust //! use std::rc::Rc; //! //! struct Owner { //! name: String //! // ...other fields //! } //! //! struct Gadget { //! id: i32, //! owner: Rc<Owner> //! // ...other fields //! } //! //! fn main() { //! // Create a reference counted Owner. //! let gadget_owner : Rc<Owner> = Rc::new( //! Owner { name: String::from("Gadget Man") } //! ); //! //! // Create Gadgets belonging to gadget_owner. To increment the reference //! // count we clone the `Rc<T>` object. //! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; //! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; //! //! drop(gadget_owner); //! //! // Despite dropping gadget_owner, we're still able to print out the name //! // of the Owner of the Gadgets. This is because we've only dropped the //! // reference count object, not the Owner it wraps. As long as there are //! // other `Rc<T>` objects pointing at the same Owner, it will remain //! // allocated. Notice that the `Rc<T>` wrapper around Gadget.owner gets //! // automatically dereferenced for us. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! //! // At the end of the method, gadget1 and gadget2 get destroyed, and with //! // them the last counted references to our Owner. Gadget Man now gets //! // destroyed as well. //! } //! ``` //! //! If our requirements change, and we also need to be able to traverse from //! Owner → Gadget, we will run into problems: an `Rc<T>` pointer from Owner //! → Gadget introduces a cycle between the objects. This means that their //! reference counts can never reach 0, and the objects will remain allocated: a //! memory leak. In order to get around this, we can use `Weak<T>` pointers. //! These pointers don't contribute to the total count. //! //! Rust actually makes it somewhat difficult to produce this loop in the first //! place: in order to end up with two objects that point at each other, one of //! them needs to be mutable. This is problematic because `Rc<T>` enforces //! memory safety by only giving out shared references to the object it wraps, //! and these don't allow direct mutation. We need to wrap the part of the //! object we wish to mutate in a `RefCell`, which provides *interior //! mutability*: a method to achieve mutability through a shared reference. //! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` //! documentation for more details on interior mutability. //! //! ```rust //! use std::rc::Rc; //! use std::rc::Weak; //! use std::cell::RefCell; //! //! struct Owner { //! name: String, //! gadgets: RefCell<Vec<Weak<Gadget>>>, //! // ...other fields //! } //! //! struct Gadget { //! id: i32, //! owner: Rc<Owner>, //! // ...other fields //! } //! //! fn main() { //! // Create a reference counted Owner. Note the fact that we've put the //! // Owner's vector of Gadgets inside a RefCell so that we can mutate it //! // through a shared reference. //! let gadget_owner : Rc<Owner> = Rc::new( //! Owner { //! name: "Gadget Man".to_string(), //! gadgets: RefCell::new(Vec::new()), //! } //! ); //! //! // Create Gadgets belonging to gadget_owner as before. //! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); //! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); //! //! // Add the Gadgets to their Owner. To do this we mutably borrow from //! // the RefCell holding the Owner's Gadgets. //! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget1)); //! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget2)); //! //! // Iterate over our Gadgets, printing their details out //! for gadget_opt in gadget_owner.gadgets.borrow().iter() { //! //! // gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee //! // that their object is still allocated, we need to call upgrade() //! // on them to turn them into a strong reference. This returns an //! // Option, which contains a reference to our object if it still //! // exists. //! let gadget = gadget_opt.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } //! //! // At the end of the method, gadget_owner, gadget1 and gadget2 get //! // destroyed. There are now no strong (`Rc<T>`) references to the gadgets. //! // Once they get destroyed, the Gadgets get destroyed. This zeroes the //! // reference count on Gadget Man, they get destroyed as well. //! } //! ``` #![stable(feature = "rust1", since = "1.0.0")] #[cfg(not(test))] use boxed::Box; #[cfg(test)] use std::boxed::Box; use core::borrow; use core::cell::Cell; use core::cmp::Ordering; use core::fmt; use core::hash::{Hasher, Hash}; use core::intrinsics::{assume, abort}; use core::marker; #[cfg(not(stage0))] use core::marker::Unsize; use core::mem::{self, align_of_val, size_of_val, forget}; use core::ops::Deref; #[cfg(not(stage0))] use core::ops::CoerceUnsized; use core::ptr::{self, Shared}; use heap::deallocate; struct RcBox<T: ?Sized> { strong: Cell<usize>, weak: Cell<usize>, value: T, } /// A reference-counted pointer type over an immutable value. /// /// See the [module level documentation](./index.html) for more details. #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc<T: ?Sized> { // FIXME #12808: strange names to try to avoid interfering with field // accesses of the contained type via Deref _ptr: Shared<RcBox<T>>, } impl<T: ?Sized> !marker::Send for Rc<T> {} impl<T: ?Sized> !marker::Sync for Rc<T> {} #[cfg(not(stage0))] // remove cfg after new snapshot impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {} impl<T> Rc<T> { /// Constructs a new `Rc<T>`. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new(value: T) -> Rc<T> { unsafe { Rc { // there is an implicit weak pointer owned by all the strong // pointers, which ensures that the weak destructor never frees // the allocation while the strong destructor is running, even // if the weak pointer is stored inside the strong one. _ptr: Shared::new(Box::into_raw(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value: value, })), } } } /// Unwraps the contained value if the `Rc<T>` has only one strong reference. /// This will succeed even if there are outstanding weak references. /// /// Otherwise, an `Err` is returned with the same `Rc<T>`. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let x = Rc::new(3); /// assert_eq!(Rc::try_unwrap(x), Ok(3)); /// /// let x = Rc::new(4); /// let _y = x.clone(); /// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] pub fn try_unwrap(this: Self) -> Result<T, Self> { if Rc::would_unwrap(&this) { unsafe { let val = ptr::read(&*this); // copy the contained object // Indicate to Weaks that they can't be promoted by decrememting // the strong count, and then remove the implicit "strong weak" // pointer while also handling drop logic by just crafting a // fake Weak. this.dec_strong(); let _weak = Weak { _ptr: this._ptr }; forget(this); Ok(val) } } else { Err(this) } } /// Checks if `Rc::try_unwrap` would return `Ok`. #[unstable(feature = "rc_would_unwrap", reason = "just added for niche usecase", issue = "28356")] pub fn would_unwrap(this: &Self) -> bool { Rc::strong_count(&this) == 1 } } impl<T: ?Sized> Rc<T> { /// Downgrades the `Rc<T>` to a `Weak<T>` reference. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// let weak_five = Rc::downgrade(&five); /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak<T> { this.inc_weak(); Weak { _ptr: this._ptr } } /// Get the number of weak references to this value. #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] pub fn weak_count(this: &Self) -> usize { this.weak() - 1 } /// Get the number of strong references to this value. #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] pub fn strong_count(this: &Self) -> usize { this.strong() } /// Returns true if there are no other `Rc` or `Weak<T>` values that share /// the same inner value. /// /// # Examples /// /// ``` /// #![feature(rc_counts)] /// /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// assert!(Rc::is_unique(&five)); /// ``` #[inline] #[unstable(feature = "rc_counts", reason = "uniqueness has unclear meaning", issue = "28356")] pub fn is_unique(this: &Self) -> bool { Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1 } /// Returns a mutable reference to the contained value if the `Rc<T>` has /// one strong reference and no weak references. /// /// Returns `None` if the `Rc<T>` is not unique. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let mut x = Rc::new(3); /// *Rc::get_mut(&mut x).unwrap() = 4; /// assert_eq!(*x, 4); /// /// let _y = x.clone(); /// assert!(Rc::get_mut(&mut x).is_none()); /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if Rc::is_unique(this) { let inner = unsafe { &mut **this._ptr }; Some(&mut inner.value) } else { None } } } impl<T: Clone> Rc<T> { #[inline] #[unstable(feature = "rc_make_unique", reason = "renamed to Rc::make_mut", issue = "27718")] #[deprecated(since = "1.4.0", reason = "renamed to Rc::make_mut")] pub fn make_unique(&mut self) -> &mut T { Rc::make_mut(self) } /// Make a mutable reference into the given `Rc<T>` by cloning the inner /// data if the `Rc<T>` doesn't have one strong reference and no weak /// references. /// /// This is also referred to as a copy-on-write. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let mut data = Rc::new(5); /// /// *Rc::make_mut(&mut data) += 1; // Won't clone anything /// let mut other_data = data.clone(); // Won't clone inner data /// *Rc::make_mut(&mut data) += 1; // Clones inner data /// *Rc::make_mut(&mut data) += 1; // Won't clone anything /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything /// /// // Note: data and other_data now point to different numbers /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); /// /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] pub fn make_mut(this: &mut Self) -> &mut T { if Rc::strong_count(this) != 1 { // Gotta clone the data, there are other Rcs *this = Rc::new((**this).clone()) } else if Rc::weak_count(this) != 0 { // Can just steal the data, all that's left is Weaks unsafe { let mut swap = Rc::new(ptr::read(&(**this._ptr).value)); mem::swap(this, &mut swap); swap.dec_strong(); // Remove implicit strong-weak ref (no need to craft a fake // Weak here -- we know other Weaks can clean up for us) swap.dec_weak(); forget(swap); } } // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the `Rc<T>` itself to be `mut`, so we're returning the only possible // reference to the inner value. let inner = unsafe { &mut **this._ptr }; &mut inner.value } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Deref for Rc<T> { type Target = T; #[inline(always)] fn deref(&self) -> &T { &self.inner().value } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Drop for Rc<T> { /// Drops the `Rc<T>`. /// /// This will decrement the strong reference count. If the strong reference /// count becomes zero and the only other references are `Weak<T>` ones, /// `drop`s the inner value. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// { /// let five = Rc::new(5); /// /// // stuff /// /// drop(five); // explicit drop /// } /// { /// let five = Rc::new(5); /// /// // stuff /// /// } // implicit drop /// ``` #[unsafe_destructor_blind_to_params] fn drop(&mut self) { unsafe { let ptr = *self._ptr; if !(*(&ptr as *const _ as *const *const ())).is_null() && ptr as *const () as usize != mem::POST_DROP_USIZE { self.dec_strong(); if self.strong() == 0 { // destroy the contained object ptr::drop_in_place(&mut (*ptr).value); // remove the implicit "strong weak" pointer now that we've // destroyed the contents. self.dec_weak(); if self.weak() == 0 { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Clone for Rc<T> { /// Makes a clone of the `Rc<T>`. /// /// When you clone an `Rc<T>`, it will create another pointer to the data and /// increase the strong reference counter. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five.clone(); /// ``` #[inline] fn clone(&self) -> Rc<T> { self.inc_strong(); Rc { _ptr: self._ptr } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Default> Default for Rc<T> { /// Creates a new `Rc<T>`, with the `Default` value for `T`. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let x: Rc<i32> = Default::default(); /// ``` #[inline] fn default() -> Rc<T> { Rc::new(Default::default()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + PartialEq> PartialEq for Rc<T> { /// Equality for two `Rc<T>`s. /// /// Two `Rc<T>`s are equal if their inner value are equal. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five == Rc::new(5); /// ``` #[inline(always)] fn eq(&self, other: &Rc<T>) -> bool { **self == **other } /// Inequality for two `Rc<T>`s. /// /// Two `Rc<T>`s are unequal if their inner value are unequal. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five != Rc::new(5); /// ``` #[inline(always)] fn ne(&self, other: &Rc<T>) -> bool { **self != **other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + Eq> Eq for Rc<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> { /// Partial comparison for two `Rc<T>`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five.partial_cmp(&Rc::new(5)); /// ``` #[inline(always)] fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } /// Less-than comparison for two `Rc<T>`s. /// /// The two are compared by calling `<` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five < Rc::new(5); /// ``` #[inline(always)] fn lt(&self, other: &Rc<T>) -> bool { **self < **other } /// 'Less-than or equal to' comparison for two `Rc<T>`s. /// /// The two are compared by calling `<=` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five <= Rc::new(5); /// ``` #[inline(always)] fn le(&self, other: &Rc<T>) -> bool { **self <= **other } /// Greater-than comparison for two `Rc<T>`s. /// /// The two are compared by calling `>` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five > Rc::new(5); /// ``` #[inline(always)] fn gt(&self, other: &Rc<T>) -> bool { **self > **other } /// 'Greater-than or equal to' comparison for two `Rc<T>`s. /// /// The two are compared by calling `>=` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five >= Rc::new(5); /// ``` #[inline(always)] fn ge(&self, other: &Rc<T>) -> bool { **self >= **other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + Ord> Ord for Rc<T> { /// Comparison for two `Rc<T>`s. /// /// The two are compared by calling `cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// five.partial_cmp(&Rc::new(5)); /// ``` #[inline] fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized+Hash> Hash for Rc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state); } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized+fmt::Display> fmt::Display for Rc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized+fmt::Debug> fmt::Debug for Rc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Pointer for Rc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&*self._ptr, f) } } /// A weak version of `Rc<T>`. /// /// Weak references do not count when determining if the inner value should be /// dropped. /// /// See the [module level documentation](./index.html) for more. #[unsafe_no_drop_flag] #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak<T: ?Sized> { // FIXME #12808: strange names to try to avoid interfering with // field accesses of the contained type via Deref _ptr: Shared<RcBox<T>>, } impl<T: ?Sized> !marker::Send for Weak<T> {} impl<T: ?Sized> !marker::Sync for Weak<T> {} #[cfg(not(stage0))] // remove cfg after new snapshot impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} impl<T: ?Sized> Weak<T> { /// Upgrades a weak reference to a strong reference. /// /// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible. /// /// Returns `None` if there were no strong references and the data was /// destroyed. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5); /// /// let weak_five = Rc::downgrade(&five); /// /// let strong_five: Option<Rc<_>> = weak_five.upgrade(); /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option<Rc<T>> { if self.strong() == 0 { None } else { self.inc_strong(); Some(Rc { _ptr: self._ptr }) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Drop for Weak<T> { /// Drops the `Weak<T>`. /// /// This will decrement the weak reference count. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// { /// let five = Rc::new(5); /// let weak_five = Rc::downgrade(&five); /// /// // stuff /// /// drop(weak_five); // explicit drop /// } /// { /// let five = Rc::new(5); /// let weak_five = Rc::downgrade(&five); /// /// // stuff /// /// } // implicit drop /// ``` fn drop(&mut self) { unsafe { let ptr = *self._ptr; if !(*(&ptr as *const _ as *const *const ())).is_null() && ptr as *const () as usize != mem::POST_DROP_USIZE { self.dec_weak(); // the weak count starts at 1, and will only go to zero if all // the strong pointers have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } } } #[stable(feature = "rc_weak", since = "1.4.0")] impl<T: ?Sized> Clone for Weak<T> { /// Makes a clone of the `Weak<T>`. /// /// This increases the weak reference count. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let weak_five = Rc::downgrade(&Rc::new(5)); /// /// weak_five.clone(); /// ``` #[inline] fn clone(&self) -> Weak<T> { self.inc_weak(); Weak { _ptr: self._ptr } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized+fmt::Debug> fmt::Debug for Weak<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(Weak)") } } // NOTE: We checked_add here to deal with mem::forget safety. In particular // if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then // you can free the allocation while outstanding Rcs (or Weaks) exist. // We abort because this is such a degenerate scenario that we don't care about // what happens -- no real program should ever experience this. // // This should have negligible overhead since you don't actually need to // clone these much in Rust thanks to ownership and move-semantics. #[doc(hidden)] trait RcBoxPtr<T: ?Sized> { fn inner(&self) -> &RcBox<T>; #[inline] fn strong(&self) -> usize { self.inner().strong.get() } #[inline] fn inc_strong(&self) { self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() })); } #[inline] fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); } #[inline] fn weak(&self) -> usize { self.inner().weak.get() } #[inline] fn inc_weak(&self) { self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() })); } #[inline] fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); } } impl<T: ?Sized> RcBoxPtr<T> for Rc<T> { #[inline(always)] fn inner(&self) -> &RcBox<T> { unsafe { // Safe to assume this here, as if it weren't true, we'd be breaking // the contract anyway. // This allows the null check to be elided in the destructor if we // manipulated the reference count in the same function. assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); &(**self._ptr) } } } impl<T: ?Sized> RcBoxPtr<T> for Weak<T> { #[inline(always)] fn inner(&self) -> &RcBox<T> { unsafe { // Safe to assume this here, as if it weren't true, we'd be breaking // the contract anyway. // This allows the null check to be elided in the destructor if we // manipulated the reference count in the same function. assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); &(**self._ptr) } } } #[cfg(test)] mod tests { use super::{Rc, Weak}; use std::boxed::Box; use std::cell::RefCell; use std::option::Option; use std::option::Option::{Some, None}; use std::result::Result::{Err, Ok}; use std::mem::drop; use std::clone::Clone; #[test] fn test_clone() { let x = Rc::new(RefCell::new(5)); let y = x.clone(); *x.borrow_mut() = 20; assert_eq!(*y.borrow(), 20); } #[test] fn test_simple() { let x = Rc::new(5); assert_eq!(*x, 5); } #[test] fn test_simple_clone() { let x = Rc::new(5); let y = x.clone(); assert_eq!(*x, 5); assert_eq!(*y, 5); } #[test] fn test_destructor() { let x: Rc<Box<_>> = Rc::new(box 5); assert_eq!(**x, 5); } #[test] fn test_live() { let x = Rc::new(5); let y = Rc::downgrade(&x); assert!(y.upgrade().is_some()); } #[test] fn test_dead() { let x = Rc::new(5); let y = Rc::downgrade(&x); drop(x); assert!(y.upgrade().is_none()); } #[test] fn weak_self_cyclic() { struct Cycle { x: RefCell<Option<Weak<Cycle>>>, } let a = Rc::new(Cycle { x: RefCell::new(None) }); let b = Rc::downgrade(&a.clone()); *a.x.borrow_mut() = Some(b); // hopefully we don't double-free (or leak)... } #[test] fn is_unique() { let x = Rc::new(3); assert!(Rc::is_unique(&x)); let y = x.clone(); assert!(!Rc::is_unique(&x)); drop(y); assert!(Rc::is_unique(&x)); let w = Rc::downgrade(&x); assert!(!Rc::is_unique(&x)); drop(w); assert!(Rc::is_unique(&x)); } #[test] fn test_strong_count() { let a = Rc::new(0u32); assert!(Rc::strong_count(&a) == 1); let w = Rc::downgrade(&a); assert!(Rc::strong_count(&a) == 1); let b = w.upgrade().expect("upgrade of live rc failed"); assert!(Rc::strong_count(&b) == 2); assert!(Rc::strong_count(&a) == 2); drop(w); drop(a); assert!(Rc::strong_count(&b) == 1); let c = b.clone(); assert!(Rc::strong_count(&b) == 2); assert!(Rc::strong_count(&c) == 2); } #[test] fn test_weak_count() { let a = Rc::new(0u32); assert!(Rc::strong_count(&a) == 1); assert!(Rc::weak_count(&a) == 0); let w = Rc::downgrade(&a); assert!(Rc::strong_count(&a) == 1); assert!(Rc::weak_count(&a) == 1); drop(w); assert!(Rc::strong_count(&a) == 1); assert!(Rc::weak_count(&a) == 0); let c = a.clone(); assert!(Rc::strong_count(&a) == 2); assert!(Rc::weak_count(&a) == 0); drop(c); } #[test] fn try_unwrap() { let x = Rc::new(3); assert_eq!(Rc::try_unwrap(x), Ok(3)); let x = Rc::new(4); let _y = x.clone(); assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); let x = Rc::new(5); let _w = Rc::downgrade(&x); assert_eq!(Rc::try_unwrap(x), Ok(5)); } #[test] fn get_mut() { let mut x = Rc::new(3); *Rc::get_mut(&mut x).unwrap() = 4; assert_eq!(*x, 4); let y = x.clone(); assert!(Rc::get_mut(&mut x).is_none()); drop(y); assert!(Rc::get_mut(&mut x).is_some()); let _w = Rc::downgrade(&x); assert!(Rc::get_mut(&mut x).is_none()); } #[test] fn test_cowrc_clone_make_unique() { let mut cow0 = Rc::new(75); let mut cow1 = cow0.clone(); let mut cow2 = cow1.clone(); assert!(75 == *Rc::make_mut(&mut cow0)); assert!(75 == *Rc::make_mut(&mut cow1)); assert!(75 == *Rc::make_mut(&mut cow2)); *Rc::make_mut(&mut cow0) += 1; *Rc::make_mut(&mut cow1) += 2; *Rc::make_mut(&mut cow2) += 3; assert!(76 == *cow0); assert!(77 == *cow1); assert!(78 == *cow2); // none should point to the same backing memory assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 != *cow2); } #[test] fn test_cowrc_clone_unique2() { let mut cow0 = Rc::new(75); let cow1 = cow0.clone(); let cow2 = cow1.clone(); assert!(75 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); *Rc::make_mut(&mut cow0) += 1; assert!(76 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); // cow1 and cow2 should share the same contents // cow0 should have a unique reference assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 == *cow2); } #[test] fn test_cowrc_clone_weak() { let mut cow0 = Rc::new(75); let cow1_weak = Rc::downgrade(&cow0); assert!(75 == *cow0); assert!(75 == *cow1_weak.upgrade().unwrap()); *Rc::make_mut(&mut cow0) += 1; assert!(76 == *cow0); assert!(cow1_weak.upgrade().is_none()); } #[test] fn test_show() { let foo = Rc::new(75); assert_eq!(format!("{:?}", foo), "75"); } #[test] fn test_unsized() { let foo: Rc<[i32]> = Rc::new([1, 2, 3]); assert_eq!(foo, foo.clone()); } } impl<T: ?Sized> borrow::Borrow<T> for Rc<T> { fn borrow(&self) -> &T { &**self } } #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] impl<T: ?Sized> AsRef<T> for Rc<T> { fn as_ref(&self) -> &T { &**self } }
28.957333
100
0.52313
339971e4d8e1af2d876ca0760a2f2b12cc04d136
1,496
/// Problem 35 - Project Euler /// http://projecteuler.net/index.php?section=problems&id=35 extern crate project_euler; use project_euler::prime::{is_prime, primes}; use std::collections::HashSet; fn rotate1(x: u32) -> u32 { if x / 10 == 0 { return x; } let mut xs = x.to_string().chars().collect::<Vec<char>>(); let top = xs.remove(0); xs.push(top); xs.iter().collect::<String>().parse::<u32>().unwrap_or(0) } fn rotate(x: u32, n: u32) -> u32 { if n == 0 { return x; } let mut ans = x; for _ in 1..n + 1 { ans = rotate1(ans); } ans } fn main() { let ps = primes(1_000_000); let mut hs = HashSet::new(); for p in ps { if p.to_string().chars().any(|c| c == '0') { continue; } let digits_count = p.to_string().chars().count() as u32; if (0..digits_count).map(|i| rotate(p, i)).all(|x| is_prime(x)) { hs.insert(p); } } println!("{}", hs.len()); } #[test] fn test_rotate1() { assert_eq!(rotate1(2), 2); assert_eq!(rotate1(3), 3); assert_eq!(rotate1(5), 5); assert_eq!(rotate1(7), 7); assert_eq!(rotate1(11), 11); assert_eq!(rotate1(13), 31); assert_eq!(rotate1(13432), 34321); assert_eq!(rotate1(10), 1); assert_eq!(rotate1(10000), 1); } #[test] fn test_rotate() { assert_eq!(rotate(2, 1), 2); assert_eq!(rotate(3, 1), 3); assert_eq!(rotate(5, 1), 5); assert_eq!(rotate(7, 1), 7); }
22.328358
73
0.542781
28ea13c469453f0ec41f186ccefd8c58d00b8cbb
409
#![allow(unused_imports)] use wasm_bindgen::prelude::*; #[wasm_bindgen] #[doc = "The `AttestationConveyancePreference` enum."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `AttestationConveyancePreference`*"] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum AttestationConveyancePreference { None = "none", Indirect = "indirect", Direct = "direct", }
31.461538
110
0.706601
ff57cf7d748c6b582e45bd3aa9c681ace63124cb
1,433
use crate::core::{Cell, InternalVM, RuntimeError, VMInstruction, VirtualMachine}; use algebra::Field; use r1cs_core::ConstraintSystem; use zinc_bytecode::instructions::Or; impl<F, CS> VMInstruction<F, CS> for Or where F: Field, CS: ConstraintSystem<F>, { fn execute(&self, vm: &mut VirtualMachine<F, CS>) -> Result<(), RuntimeError> { let right = vm.pop()?.value()?; let left = vm.pop()?.value()?; let or = vm.operations().or(left, right)?; vm.push(Cell::Value(or)) } } #[cfg(test)] mod tests { use super::*; use crate::instructions::testing_utils::{TestingError, VMTestRunner}; use zinc_bytecode::scalar::ScalarType; use zinc_bytecode::*; #[test] fn test_or() -> Result<(), TestingError> { VMTestRunner::new() .add(PushConst::new(0.into(), ScalarType::Boolean)) .add(PushConst::new(0.into(), ScalarType::Boolean)) .add(Or) .add(PushConst::new(0.into(), ScalarType::Boolean)) .add(PushConst::new(1.into(), ScalarType::Boolean)) .add(Or) .add(PushConst::new(1.into(), ScalarType::Boolean)) .add(PushConst::new(0.into(), ScalarType::Boolean)) .add(Or) .add(PushConst::new(1.into(), ScalarType::Boolean)) .add(PushConst::new(1.into(), ScalarType::Boolean)) .add(Or) .test(&[1, 1, 1, 0]) } }
31.152174
83
0.577111
8a1849c01aa2e36e086a36fa582e86f9042e45dd
11,342
#![feature(box_syntax)] #![feature(test)] #![feature(box_patterns)] #![feature(specialization)] extern crate test; use std::{ env, fs::{read_dir, File}, io::{self, Read, Write}, path::Path, sync::{Arc, RwLock}, }; use swc_common::{Fold, FoldWith}; use swc_ecma_ast::*; use swc_ecma_codegen::{self, Emitter}; use swc_ecma_parser::{lexer::Lexer, Parser, Session, SourceFileInput, Syntax}; use swc_ecma_transforms::fixer; use test::{ test_main, DynTestFn, Options, ShouldPanic::No, TestDesc, TestDescAndFn, TestName, TestType, }; const IGNORED_PASS_TESTS: &[&str] = &[ // TODO: uningnore "5654d4106d7025c2.js", "431ecef8c85d4d24.js", // Generated code is better than it from `pass` "0da4b57d03d33129.js", "aec65a9745669870.js", "1c055d256ec34f17.js", "d57a361bc638f38c.js", "95520bedf0fdd4c9.js", "5f1e0eff7ac775ee.js", "90ad0135b905a622.js", "7da12349ac9f51f2.js", "46173461e93df4c2.js", "446ffc8afda7e47f.js", "3b5d1fb0e093dab8.js", "0140c25a4177e5f7.module.js", "e877f5e6753dc7e4.js", "aac70baa56299267.js", // Wrong tests (normalized expected.js is wrong) "50c6ab935ccb020a.module.js", "9949a2e1a6844836.module.js", "1efde9ddd9d6e6ce.module.js", // Wrong tests (variable name or value is different) "8386fbff927a9e0e.js", "0339fa95c78c11bd.js", "0426f15dac46e92d.js", "0b4d61559ccce0f9.js", "0f88c334715d2489.js", "1093d98f5fc0758d.js", "15d9592709b947a0.js", "2179895ec5cc6276.js", "247a3a57e8176ebd.js", "441a92357939904a.js", "47f974d6fc52e3e4.js", "4e1a0da46ca45afe.js", "5829d742ab805866.js", "589dc8ad3b9aa28f.js", "598a5cedba92154d.js", "72d79750e81ef03d.js", "7788d3c1e1247da9.js", "7b72d7b43bedc895.js", "7dab6e55461806c9.js", "82c827ccaecbe22b.js", "87a9b0d1d80812cc.js", "8c80f7ee04352eba.js", "96f5d93be9a54573.js", "988e362ed9ddcac5.js", "9bcae7c7f00b4e3c.js", "a8a03a88237c4e8f.js", "ad06370e34811a6a.js", "b0fdc038ee292aba.js", "b62c6dd890bef675.js", "cb211fadccb029c7.js", "ce968fcdf3a1987c.js", "db3c01738aaf0b92.js", "e1387fe892984e2b.js", "e71c1d5f0b6b833c.js", "e8ea384458526db0.js", // We don't implement Annex B fully. "1c1e2a43fe5515b6.js", "3dabeca76119d501.js", "52aeec7b8da212a2.js", "59ae0289778b80cd.js", "a4d62a651f69d815.js", "c06df922631aeabc.js", ]; fn add_test<F: FnOnce() + Send + 'static>( tests: &mut Vec<TestDescAndFn>, name: String, ignore: bool, f: F, ) { tests.push(TestDescAndFn { desc: TestDesc { test_type: TestType::UnitTest, name: TestName::DynTestName(name), ignore, should_panic: No, allow_fail: false, }, testfn: DynTestFn(box f), }); } struct MyHandlers; impl swc_ecma_codegen::Handlers for MyHandlers {} fn error_tests(tests: &mut Vec<TestDescAndFn>) -> Result<(), io::Error> { let dir = Path::new(env!("CARGO_MANIFEST_DIR")) .parent() .unwrap() .join("parser") .join("tests") .join("test262-parser"); eprintln!("Loading tests from {}", dir.display()); let normal = dir.join("pass"); let explicit = dir.join("pass-explicit"); for entry in read_dir(&explicit).expect("failed to read directory") { let entry = entry?; let file_name = entry .path() .strip_prefix(&explicit) .expect("failed to strip prefix") .to_str() .expect("to_str() failed") .to_string(); let input = { let mut buf = String::new(); File::open(entry.path())?.read_to_string(&mut buf)?; buf }; let ignore = IGNORED_PASS_TESTS.contains(&&*file_name); let module = file_name.contains("module"); let name = format!("fixer::{}", file_name); add_test(tests, name, ignore, { let normal = normal.clone(); move || { eprintln!( "\n\n========== Running fixer test {}\nSource:\n{}\n", file_name, input ); let mut wr = Buf(Arc::new(RwLock::new(vec![]))); let mut wr2 = Buf(Arc::new(RwLock::new(vec![]))); ::testing::run_test(false, |cm, handler| { let src = cm.load_file(&entry.path()).expect("failed to load file"); let expected = cm .load_file(&normal.join(file_name)) .expect("failed to load reference file"); { let handlers = box MyHandlers; let handlers2 = box MyHandlers; let mut parser: Parser<'_, Lexer<'_, SourceFileInput<'_>>> = Parser::new( Session { handler: &handler }, Syntax::default(), (&*src).into(), None, ); let mut emitter = Emitter { cfg: swc_ecma_codegen::Config { minify: false }, cm: cm.clone(), wr: box swc_ecma_codegen::text_writer::JsWriter::new( cm.clone(), "\n", &mut wr, None, ), comments: None, handlers, }; let mut expected_emitter = Emitter { cfg: swc_ecma_codegen::Config { minify: false }, cm: cm.clone(), wr: box swc_ecma_codegen::text_writer::JsWriter::new( cm, "\n", &mut wr2, None, ), comments: None, handlers: handlers2, }; // Parse source let mut e_parser: Parser<'_, Lexer<'_, SourceFileInput<'_>>> = Parser::new( Session { handler: &handler }, Syntax::default(), (&*expected).into(), None, ); if module { let module = parser .parse_module() .map(normalize) .map(|p| p.fold_with(&mut fixer())) .map_err(|mut e| { e.emit(); })?; let module2 = e_parser .parse_module() .map(normalize) .map_err(|mut e| { e.emit(); }) .expect("failed to parse reference file"); if module == module2 { return Ok(()); } emitter.emit_module(&module).unwrap(); expected_emitter.emit_module(&module2).unwrap(); } else { let script = parser .parse_script() .map(normalize) .map(|p| p.fold_with(&mut fixer())) .map_err(|mut e| { e.emit(); })?; let script2 = e_parser .parse_script() .map(normalize) .map(|p| p.fold_with(&mut fixer())) .map_err(|mut e| { e.emit(); })?; if script == script2 { return Ok(()); } emitter.emit_script(&script).unwrap(); expected_emitter.emit_script(&script2).unwrap(); } } let output = String::from_utf8_lossy(&*wr.0.read().unwrap()).to_string(); let expected = String::from_utf8_lossy(&*wr2.0.read().unwrap()).to_string(); if output == expected { return Ok(()); } eprintln!("Wrong output:\n{}\n-----\n{}", output, expected); Err(()) }) .expect("failed to run test"); } }); } Ok(()) } #[test] fn identity() { let args: Vec<_> = env::args().collect(); let mut tests = Vec::new(); error_tests(&mut tests).expect("failed to load testss"); test_main(&args, tests, Some(Options::new())); } #[derive(Debug, Clone)] struct Buf(Arc<RwLock<Vec<u8>>>); impl Write for Buf { fn write(&mut self, data: &[u8]) -> io::Result<usize> { self.0.write().unwrap().write(data) } fn flush(&mut self) -> io::Result<()> { self.0.write().unwrap().flush() } } struct Normalizer; impl Fold<Stmt> for Normalizer { fn fold(&mut self, stmt: Stmt) -> Stmt { let stmt = stmt.fold_children(self); match stmt { Stmt::Expr(ExprStmt { span, expr: box Expr::Paren(ParenExpr { expr, .. }), }) => Stmt::Expr(ExprStmt { span, expr }), _ => stmt, } } } impl Fold<PropName> for Normalizer { fn fold(&mut self, name: PropName) -> PropName { let name = name.fold_children(self); match name { PropName::Ident(i) => PropName::Str(Str { value: i.sym, span: i.span, has_escape: false, }), PropName::Num(n) => { let s = if n.value.is_infinite() { if n.value.is_sign_positive() { "Infinity".into() } else { "-Infinity".into() } } else { format!("{}", n.value) }; PropName::Str(Str { value: s.into(), span: n.span, has_escape: false, }) } _ => name, } } } impl Fold<NewExpr> for Normalizer { fn fold(&mut self, expr: NewExpr) -> NewExpr { let mut expr = expr.fold_children(self); expr.args = match expr.args { Some(..) => expr.args, None => Some(vec![]), }; expr } } fn normalize<T>(node: T) -> T where T: FoldWith<Normalizer> + FoldWith<::testing::DropSpan>, { node.fold_with(&mut Normalizer) .fold_with(&mut ::testing::DropSpan) }
32.039548
99
0.449921
56f3f5fcf45c3ad78e26d99ad6d593bb8af52c3e
2,759
use wow_srp::client::SrpClientUser; use wow_srp::normalized_string::NormalizedString; use wow_srp::server::SrpVerifier; use wow_srp::{PublicKey, GENERATOR, LARGE_SAFE_PRIME_LITTLE_ENDIAN}; #[test] fn authenticate_with_self() { let username: NormalizedString = NormalizedString::new("A").unwrap(); let password: NormalizedString = NormalizedString::new("A").unwrap(); let client = SrpClientUser::new(username, password); let username: NormalizedString = NormalizedString::new("A").unwrap(); let password: NormalizedString = NormalizedString::new("A").unwrap(); let verifier = SrpVerifier::from_username_and_password(username, password); let password_verifier = hex::encode(&verifier.password_verifier()); let client_salt = hex::encode(&verifier.salt()); let server = verifier.into_proof(); let server_salt = hex::encode(&server.salt()); let server_public_key = hex::encode(&server.server_public_key()); let client = client.into_challenge( GENERATOR, LARGE_SAFE_PRIME_LITTLE_ENDIAN, PublicKey::from_le_bytes(server.server_public_key()).unwrap(), *server.salt(), ); let client_public_key = *client.client_public_key(); let (mut server, server_proof) = match server.into_server( PublicKey::from_le_bytes(&client_public_key).unwrap(), *client.client_proof(), ) { Ok(s) => s, Err(e) => { panic!( "'{}'\ \nverifier: {}\ \nclient_salt: {}\ \nserver_salt: {}\ \nserver_public_key: {}\ \nclient_public_key: {}", e, password_verifier, client_salt, server_salt, server_public_key, hex::encode(client_public_key), ) } }; let e = client.verify_server_proof(server_proof); let client = match e { Ok(s) => s, Err(e) => { panic!( "'{}'\ \nverifier: {}\ \nclient_salt: {}\ \nserver_salt: {}\ \nserver_public_key: {}\ \nclient_public_key: {}", e, password_verifier, client_salt, server_salt, server_public_key, hex::encode(client_public_key), ) } }; assert_eq!(*server.session_key(), client.session_key()); let reconnection_data = client.calculate_reconnect_values(*server.reconnect_challenge_data()); let verified = server .verify_reconnection_attempt(reconnection_data.challenge_data, reconnection_data.proof); assert!(verified); }
32.458824
98
0.582095
3a67f9172105614f78ea3e29899970bbce06f844
8,606
/* * Copyright 2019 OysterPack Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! common nng errors use oysterpack_errors::IsError; use std::fmt; /// Failed to create socket #[derive(Debug)] pub struct SocketCreateError(nng::Error); impl SocketCreateError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870511279758140964159435436428736321); /// Level::Alert pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Alert; } impl IsError for SocketCreateError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for SocketCreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to create nng socket: {}", self.0) } } impl From<nng::Error> for SocketCreateError { fn from(err: nng::Error) -> SocketCreateError { SocketCreateError(err) } } /// An error occurred when setting a socket option. #[derive(Debug)] pub struct SocketSetOptError(nng::Error); impl SocketSetOptError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870511354278148346409496152407634279); /// Level::Alert pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Alert; } impl IsError for SocketSetOptError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for SocketSetOptError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to set nng socket option: {}", self.0) } } impl From<nng::Error> for SocketSetOptError { fn from(err: nng::Error) -> SocketSetOptError { SocketSetOptError(err) } } /// Failed to send a message on the socket #[derive(Debug)] pub struct SocketSendError(nng::Error); impl SocketSendError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870691045390492837758317571713575234); /// Level::Alert pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Alert; } impl IsError for SocketSendError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for SocketSendError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to send nng message on socket: {}", self.0) } } impl From<(nng::Message, nng::Error)> for SocketSendError { fn from(err: (nng::Message, nng::Error)) -> SocketSendError { SocketSendError(err.1) } } /// Failed to receive a message on the socket #[derive(Debug)] pub struct SocketRecvError(nng::Error); impl SocketRecvError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870691257326561948476799832627658814); /// Level::Alert pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Alert; } impl IsError for SocketRecvError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for SocketRecvError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to receive nng message on socket: {}", self.0) } } impl From<nng::Error> for SocketRecvError { fn from(err: nng::Error) -> SocketRecvError { SocketRecvError(err) } } /// Failed to create new socket context #[derive(Debug)] pub struct AioContextCreateError(nng::Error); impl AioContextCreateError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870374278155759380545373361718947172); /// Level::Error pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Error; } impl IsError for AioContextCreateError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for AioContextCreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to create new nng socket context: {}", self.0) } } impl From<nng::Error> for AioContextCreateError { fn from(err: nng::Error) -> AioContextCreateError { AioContextCreateError(err) } } /// Failed to create new asynchronous I/O handle #[derive(Debug)] pub struct AioCreateError(nng::Error); impl AioCreateError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870510443603468311033495279443790945); /// Level::Alert pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Alert; } impl IsError for AioCreateError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for AioCreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Failed to create new nng::aio handle: {}", self.0) } } impl From<nng::Error> for AioCreateError { fn from(err: nng::Error) -> AioCreateError { AioCreateError(err) } } /// Aio receive operation failed #[derive(Debug)] pub struct AioReceiveError(nng::Error); impl AioReceiveError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870374078796088086815067802169113773); /// Level::Error pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Error; } impl IsError for AioReceiveError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for AioReceiveError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "nng::aio receive operation failed: {}", self.0) } } impl From<nng::Error> for AioReceiveError { fn from(err: nng::Error) -> AioReceiveError { AioReceiveError(err) } } /// Aio send operation failed #[derive(Debug)] pub struct AioSendError(nng::Error); impl AioSendError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870731804758238792469857071507712508); /// Level::Error pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Error; } impl IsError for AioSendError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for AioSendError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "nng::aio send operation failed: {}", self.0) } } impl From<nng::Error> for AioSendError { fn from(err: nng::Error) -> AioSendError { AioSendError(err) } } /// Aio error #[derive(Debug)] pub struct AioError(nng::Error); impl AioError { /// Error Id pub const ERROR_ID: oysterpack_errors::Id = oysterpack_errors::Id(1870909251735477333990432689425320576); /// Level::Error pub const ERROR_LEVEL: oysterpack_errors::Level = oysterpack_errors::Level::Error; } impl IsError for AioError { fn error_id(&self) -> oysterpack_errors::Id { Self::ERROR_ID } fn error_level(&self) -> oysterpack_errors::Level { Self::ERROR_LEVEL } } impl fmt::Display for AioError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "nng::aio error: {}", self.0) } } impl From<nng::Error> for AioError { fn from(err: nng::Error) -> AioError { AioError(err) } }
26.318043
86
0.666279
6afaa25c60c25634a9c5fceee3e505b7e6918fb5
1,631
use super::*; use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; benchmarks! { create { let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller)) breed { let caller = whitelisted_caller(); let mut kitty = Kitty(Default::default()); let kitty_id = orml_nft::Pallet::<T>::mint(&caller, Pallet::<T>::class_id(), Vec::new(), kitty.clone())?; kitty.0[0] = 1; let kitty_id2 = orml_nft::Pallet::<T>::mint(&caller, Pallet::<T>::class_id(), Vec::new(), kitty)?; }: _(RawOrigin::Signed(caller), kitty_id, kitty_id2) transfer { let caller = whitelisted_caller(); let to = account("to", 0, 0); let kitty_id = orml_nft::Pallet::<T>::mint(&caller, Pallet::<T>::class_id(), Vec::new(), Kitty(Default::default()))?; }: _(RawOrigin::Signed(caller), to, kitty_id) set_price { let caller = whitelisted_caller(); let kitty_id = orml_nft::Pallet::<T>::mint(&caller, Pallet::<T>::class_id(), Vec::new(), Kitty(Default::default()))?; }: _(RawOrigin::Signed(caller), kitty_id, Some(100u32.into())) buy { let caller = whitelisted_caller(); let seller = account("seller", 0, 0); let _ = T::Currency::make_free_balance_be(&caller, 1000u32.into()); let kitty_id = orml_nft::Pallet::<T>::mint(&seller, Pallet::<T>::class_id(), Vec::new(), Kitty(Default::default()))?; Pallet::<T>::set_price(RawOrigin::Signed(seller.clone()).into(), kitty_id, Some(500u32.into()))?; }: _(RawOrigin::Signed(caller), seller, kitty_id, 500u32.into()) } impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test,);
32.62
119
0.670141
4acd046d18ce09bef0637bdecbe827c387fc03d0
1,398
//! Cheesy way to easily wrap text in console colors. //! Example: //! ``` //! use phd::color; //! println!("{}Error: {}{}", color::Red, "Something broke.", color::Reset); //! ``` use std::{ fmt, sync::atomic::{AtomicBool, Ordering as AtomicOrdering}, }; /// Whether to show colors or not. /// Defaults to true. static SHOW_COLORS: AtomicBool = AtomicBool::new(true); /// Hide colors. pub fn hide_colors() { SHOW_COLORS.swap(false, AtomicOrdering::Relaxed); } /// Are we showing colors are not? pub fn showing_colors() -> bool { SHOW_COLORS.load(AtomicOrdering::Relaxed) } macro_rules! color { ($t:ident, $code:expr) => { #[allow(missing_docs)] pub struct $t; impl fmt::Display for $t { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if showing_colors() { write!(f, "\x1b[{}m", $code) } else { write!(f, "") } } } }; } color!(Black, 90); color!(Red, 91); color!(Green, 92); color!(Yellow, 93); color!(Blue, 94); color!(Magenta, 95); color!(Cyan, 96); color!(White, 97); color!(DarkBlack, 30); color!(DarkRed, 31); color!(DarkGreen, 32); color!(DarkYellow, 33); color!(DarkBlue, 34); color!(DarkMagenta, 35); color!(DarkCyan, 36); color!(DarkWhite, 37); color!(Reset, 0); color!(Bold, 1); color!(Underline, 4);
21.84375
76
0.56867
e8b68224f1fdca07130c01212fe80956a05dc4f8
10,261
// Copyright (c) 2018-2021 Brendan Molloy <brendan@bbqsrc.net> // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::{ any::{Any, TypeId}, collections::HashMap, path::Path, rc::Rc, }; use std::{pin::Pin, time::Duration}; use futures::{Future, StreamExt}; use gherkin::ParseFileError; use regex::Regex; use crate::{criteria::Criteria, steps::Steps}; use crate::{EventHandler, World}; pub(crate) type LifecycleFuture = Pin<Box<dyn Future<Output = ()>>>; #[derive(Clone)] pub struct LifecycleContext { pub(crate) context: Rc<Context>, pub feature: Rc<gherkin::Feature>, pub rule: Option<Rc<gherkin::Rule>>, pub scenario: Option<Rc<gherkin::Scenario>>, } impl LifecycleContext { #[inline] pub fn get<T: Any>(&self) -> Option<&T> { self.context.get() } } pub type LifecycleFn = fn(LifecycleContext) -> LifecycleFuture; pub struct Cucumber<W: World> { context: Context, steps: Steps<W>, features: Vec<gherkin::Feature>, event_handler: Box<dyn EventHandler>, /// If `Some`, enforce an upper bound on the amount /// of time a step is allowed to execute. /// If `Some`, also avoid indefinite locks during /// step clean-up handling (i.e. to recover panic info) step_timeout: Option<Duration>, /// If true, capture stdout and stderr content /// during tests. enable_capture: bool, /// If given, filters the scenario which are run scenario_filter: Option<Regex>, language: Option<String>, debug: bool, before: Vec<(Criteria, LifecycleFn)>, after: Vec<(Criteria, LifecycleFn)>, } pub struct StepContext { context: Rc<Context>, pub step: Rc<gherkin::Step>, pub matches: Vec<String>, } impl StepContext { #[inline] pub(crate) fn new(context: Rc<Context>, step: Rc<gherkin::Step>, matches: Vec<String>) -> Self { Self { context, step, matches, } } #[inline] pub fn get<T: Any>(&self) -> Option<&T> { self.context.get() } } #[derive(Default)] pub struct Context { data: HashMap<TypeId, Box<dyn Any>>, } impl Context { pub fn new() -> Self { Default::default() } pub fn get<T: Any>(&self) -> Option<&T> { self.data .get(&TypeId::of::<T>()) .and_then(|x| x.downcast_ref::<T>()) } pub fn insert<T: Any>(&mut self, value: T) { self.data.insert(TypeId::of::<T>(), Box::new(value)); } pub fn add<T: Any>(mut self, value: T) -> Self { self.insert(value); self } } impl<W: World> Default for Cucumber<W> { fn default() -> Self { Cucumber { context: Default::default(), steps: Default::default(), features: Default::default(), event_handler: Box::new(crate::output::BasicOutput::new(false)), step_timeout: None, enable_capture: true, debug: false, scenario_filter: None, language: None, before: vec![], after: vec![], } } } impl<W: World> Cucumber<W> { /// Construct a default `Cucumber` instance. /// /// Comes with the default `EventHandler` implementation responsible for /// printing test execution progress. pub fn new() -> Cucumber<W> { Default::default() } /// Construct a `Cucumber` instance with a custom `EventHandler`. pub fn with_handler<O: EventHandler>(event_handler: O) -> Self { Cucumber { context: Default::default(), steps: Default::default(), features: Default::default(), event_handler: Box::new(event_handler), step_timeout: None, enable_capture: true, debug: false, scenario_filter: None, language: None, before: vec![], after: vec![], } } /// Add some steps to the Cucumber instance. /// /// Does *not* replace any previously added steps. pub fn steps(mut self, steps: Steps<W>) -> Self { self.steps.append(steps); self } /// A collection of directory paths that will be walked to /// find ".feature" files. /// /// Removes any previously-supplied features. pub fn features<P: AsRef<Path>>(mut self, features: impl IntoIterator<Item = P>) -> Self { let features = features .into_iter() .map(|path| match path.as_ref().canonicalize() { Ok(p) if p.ends_with(".feature") => { let env = match self.language.as_ref() { Some(lang) => gherkin::GherkinEnv::new(lang).unwrap(), None => Default::default(), }; vec![gherkin::Feature::parse_path(&p, env)] } Ok(p) => { let walker = globwalk::GlobWalkerBuilder::new(p, "*.feature") .case_insensitive(true) .build() .expect("feature path is invalid"); walker .filter_map(Result::ok) .map(|entry| { let env = match self.language.as_ref() { Some(lang) => gherkin::GherkinEnv::new(lang).unwrap(), None => Default::default(), }; gherkin::Feature::parse_path(entry.path(), env) }) .collect::<Vec<_>>() } Err(e) => { eprintln!("{}", e); eprintln!("There was an error parsing {:?}; aborting.", path.as_ref()); std::process::exit(1); } }) .flatten() .collect::<Result<Vec<_>, _>>(); let mut features = features.unwrap_or_else(|e| match e { ParseFileError::Reading { path, source } => { eprintln!("Error reading '{}':", path.display()); eprintln!("{:?}", source); std::process::exit(1); } ParseFileError::Parsing { path, error, source, } => { eprintln!("Error parsing '{}':", path.display()); if let Some(error) = error { eprintln!("{}", error); } eprintln!("{:?}", source); std::process::exit(1); } }); features.sort(); self.features = features; self } /// If `Some`, enforce an upper bound on the amount /// of time a step is allowed to execute. /// If `Some`, also avoid indefinite locks during /// step clean-up handling (i.e. to recover panic info) pub fn step_timeout(mut self, step_timeout: Duration) -> Self { self.step_timeout = Some(step_timeout); self } /// If true, capture stdout and stderr content /// during tests. pub fn enable_capture(mut self, enable_capture: bool) -> Self { self.enable_capture = enable_capture; self } pub fn scenario_regex(mut self, regex: &str) -> Self { let regex = Regex::new(regex).expect("Error compiling scenario regex"); self.scenario_filter = Some(regex); self } /// Call this to incorporate command line options into the configuration. pub fn cli(self) -> Self { let opts = crate::cli::make_app(); let mut s = self; if let Some(re) = opts.scenario_filter { s = s.scenario_regex(&re); } if opts.nocapture { s = s.enable_capture(false); } if opts.debug { s = s.debug(true); } s } /// Set the default language to assume for each .feature file. pub fn language(mut self, language: &str) -> Self { if gherkin::is_language_supported(language) { self.language = Some(language.to_string()); } else { eprintln!( "ERROR: Provided language '{}' not supported; ignoring.", language ); } self } pub fn before(mut self, criteria: Criteria, handler: LifecycleFn) -> Self { self.before.push((criteria, handler)); self } pub fn after(mut self, criteria: Criteria, handler: LifecycleFn) -> Self { self.after.push((criteria, handler)); self } /// Enable printing stdout and stderr for every step, regardless of error state. pub fn debug(mut self, value: bool) -> Self { self.event_handler = Box::new(crate::output::BasicOutput::new(value)); self.debug = value; self } pub fn context(mut self, context: Context) -> Self { self.context = context; self } /// Run and report number of errors if any pub async fn run(mut self) -> crate::runner::RunResult { let runner = crate::runner::Runner::new( Rc::new(self.context), self.steps.steps, Rc::new(self.features), self.step_timeout, self.enable_capture, self.scenario_filter, self.before, self.after, ); let mut stream = runner.run(); while let Some(event) = stream.next().await { self.event_handler.handle_event(&event); if let crate::event::CucumberEvent::Finished(result) = event { return result; } } unreachable!("CucumberEvent::Finished must be fired") } /// Convenience function to run all tests and exit with error code 1 on failure. pub async fn run_and_exit(self) { let code = if self.run().await.failed() { 1 } else { 0 }; std::process::exit(code); } }
29.656069
100
0.532794
876bbc02d264bd54e07aca754315c615920dc28f
53
fn main() { let i = ~100; assert *i == 100; }
13.25
21
0.433962
6af3cd035bae12cf9af6be5170b0f81cba89b9a4
2,874
extern crate physics2d; mod testbed; use physics2d::*; use physics2d::debug::DebugCollision; struct CollisionsTestbed { world: World, } impl CollisionsTestbed { pub fn new(config: &testbed::Config) -> CollisionsTestbed { let window_width = config.window_width as f32 / config.pixels_per_unit; let window_height = config.window_height as f32 / config.pixels_per_unit; let ground_width = window_width / 2.0; let ground_height = window_height / 10.0; let ground_vertices = box_vertices(ground_width, ground_height); let ground_poly = shapes::Polygon::new(ground_vertices); let mut ground = Body::new(ground_poly.into_shape(), 10.0, Material::new(1.2, 0.2)); ground.transform.position.y = -window_height / 2.0 + ground_height / 2.0 + 0.1; ground.set_static(); let mut world = World::default(); let obs_circle = shapes::Circle::new(5.0); let mut obstacle = Body::new(obs_circle.into_shape(), 0.0, Material::new(0.8, 0.8)); obstacle.set_static(); world.add_body(ground); world.add_body(obstacle); CollisionsTestbed { world, } } } impl testbed::Testbed for CollisionsTestbed { fn sfml_loop(&mut self, input: &testbed::Input, dt: f32) { if input.left_mouse_released { let vertices = box_vertices(5.0, 5.0); let polygon = shapes::Polygon::new(vertices); let mut body = Body::new(polygon.into_shape(), 10.0, Material::new(0.3, 0.3)); body.transform.position = input.mouse_position; body.transform.set_rotation(0.2); self.world.add_body(body); } self.world.update(dt); } fn sfml_draw(&mut self, canvas: &mut testbed::Canvas, dt: f32) { let bodies = self.world.bodies_iter(); let body_count = self.world.body_count(); for body in bodies { canvas.draw_body(body); } canvas.draw_text(format!("FPS: {}", 1.0 / dt), 16); canvas.draw_text(format!("Body count: {}", body_count), 16); for contact in self.world.contacts() { canvas.draw_point(contact.position); canvas.draw_line(contact.position, contact.position + contact.normal * contact.penetration) } } } fn box_vertices(w: f32, h: f32) -> Vec<Vec2> { vec![Vec2::ZERO, Vec2::RIGHT * w, Vec2::new(w, h), Vec2::UP * h] } fn main() { let config = testbed::Config { title: "Collisions".to_string(), window_width: 800, window_height: 600, pixels_per_unit: 10.0, }; let testbed = CollisionsTestbed::new(&config); testbed::run(testbed, config); }
29.9375
103
0.580724
1ebeda4276084050d99f6b719acc6495cafdd18f
696
/** * [258] Add Digits * * Given a non-negative integer num, repeatedly add all its digits until the result has only one digit. * * Example: * * * Input: 38 * Output: 2 * Explanation: The process is like: 3 + 8 = 11, 1 + 1 = 2. * Since 2 has only one digit, return it. * * * Follow up:<br /> * Could you do it without any loop/recursion in O(1) runtime? */ pub struct Solution {} // submission codes start here impl Solution { pub fn add_digits(num: i32) -> i32 { 1 + ((num - 1) % 9) } } // submission codes end #[cfg(test)] mod tests { use super::*; #[test] fn test_258() { assert_eq!(Solution::add_digits(1234), 1); } }
17.846154
103
0.579023
381f11ff1efcc0265fef35d44600c28a503e2918
1,097
// ignore-windows // // compile-flags: -g -C no-prepopulate-passes --remap-path-prefix={{cwd}}=/the/cwd --remap-path-prefix={{src-base}}=/the/src // aux-build:remap_path_prefix_aux.rs extern crate remap_path_prefix_aux; // Here we check that submodules and include files are found using the path without // remapping. This test requires that rustc is called with an absolute path. mod aux_mod; include!("aux_mod.rs"); // Here we check that the expansion of the file!() macro is mapped. // CHECK: @alloc2 = private unnamed_addr constant <{ [34 x i8] }> <{ [34 x i8] c"/the/src/remap_path_prefix/main.rs" }>, align 1 pub static FILE_PATH: &'static str = file!(); fn main() { remap_path_prefix_aux::some_aux_function(); aux_mod::some_aux_mod_function(); some_aux_mod_function(); } // Here we check that local debuginfo is mapped correctly. // CHECK: !DIFile(filename: "/the/src/remap_path_prefix/main.rs", directory: "" // And here that debuginfo from other crates are expanded to absolute paths. // CHECK: !DIFile(filename: "/the/aux-src/remap_path_prefix_aux.rs", directory: ""
37.827586
128
0.724704
28b3732ced161d1cee55f2659a89608a54c90143
8,066
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::EventController; use crate::Gesture; use crate::GestureSingle; use crate::PropagationPhase; use crate::Widget; use glib::object::Cast; use glib::object::IsA; use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GtkGestureMultiPress")] pub struct GestureMultiPress(Object<ffi::GtkGestureMultiPress, ffi::GtkGestureMultiPressClass>) @extends GestureSingle, Gesture, EventController; match fn { type_ => || ffi::gtk_gesture_multi_press_get_type(), } } impl GestureMultiPress { #[doc(alias = "gtk_gesture_multi_press_new")] pub fn new(widget: &impl IsA<Widget>) -> GestureMultiPress { skip_assert_initialized!(); unsafe { Gesture::from_glib_full(ffi::gtk_gesture_multi_press_new( widget.as_ref().to_glib_none().0, )) .unsafe_cast() } } // rustdoc-stripper-ignore-next /// Creates a new builder-pattern struct instance to construct [`GestureMultiPress`] objects. /// /// This method returns an instance of [`GestureMultiPressBuilder`] which can be used to create [`GestureMultiPress`] objects. pub fn builder() -> GestureMultiPressBuilder { GestureMultiPressBuilder::default() } #[doc(alias = "gtk_gesture_multi_press_get_area")] #[doc(alias = "get_area")] pub fn area(&self) -> Option<gdk::Rectangle> { unsafe { let mut rect = gdk::Rectangle::uninitialized(); let ret = from_glib(ffi::gtk_gesture_multi_press_get_area( self.to_glib_none().0, rect.to_glib_none_mut().0, )); if ret { Some(rect) } else { None } } } #[doc(alias = "gtk_gesture_multi_press_set_area")] pub fn set_area(&self, rect: Option<&gdk::Rectangle>) { unsafe { ffi::gtk_gesture_multi_press_set_area(self.to_glib_none().0, rect.to_glib_none().0); } } #[doc(alias = "pressed")] pub fn connect_pressed<F: Fn(&Self, i32, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn pressed_trampoline< F: Fn(&GestureMultiPress, i32, f64, f64) + 'static, >( this: *mut ffi::GtkGestureMultiPress, n_press: libc::c_int, x: libc::c_double, y: libc::c_double, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), n_press, x, y) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"pressed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( pressed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "released")] pub fn connect_released<F: Fn(&Self, i32, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn released_trampoline< F: Fn(&GestureMultiPress, i32, f64, f64) + 'static, >( this: *mut ffi::GtkGestureMultiPress, n_press: libc::c_int, x: libc::c_double, y: libc::c_double, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), n_press, x, y) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"released\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( released_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "stopped")] pub fn connect_stopped<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn stopped_trampoline<F: Fn(&GestureMultiPress) + 'static>( this: *mut ffi::GtkGestureMultiPress, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"stopped\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( stopped_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } } impl Default for GestureMultiPress { fn default() -> Self { glib::object::Object::new::<Self>(&[]) .expect("Can't construct GestureMultiPress object with default parameters") } } #[derive(Clone, Default)] // rustdoc-stripper-ignore-next /// A [builder-pattern] type to construct [`GestureMultiPress`] objects. /// /// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html pub struct GestureMultiPressBuilder { button: Option<u32>, exclusive: Option<bool>, touch_only: Option<bool>, n_points: Option<u32>, window: Option<gdk::Window>, propagation_phase: Option<PropagationPhase>, widget: Option<Widget>, } impl GestureMultiPressBuilder { // rustdoc-stripper-ignore-next /// Create a new [`GestureMultiPressBuilder`]. pub fn new() -> Self { Self::default() } // rustdoc-stripper-ignore-next /// Build the [`GestureMultiPress`]. pub fn build(self) -> GestureMultiPress { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; if let Some(ref button) = self.button { properties.push(("button", button)); } if let Some(ref exclusive) = self.exclusive { properties.push(("exclusive", exclusive)); } if let Some(ref touch_only) = self.touch_only { properties.push(("touch-only", touch_only)); } if let Some(ref n_points) = self.n_points { properties.push(("n-points", n_points)); } if let Some(ref window) = self.window { properties.push(("window", window)); } if let Some(ref propagation_phase) = self.propagation_phase { properties.push(("propagation-phase", propagation_phase)); } if let Some(ref widget) = self.widget { properties.push(("widget", widget)); } glib::Object::new::<GestureMultiPress>(&properties) .expect("Failed to create an instance of GestureMultiPress") } pub fn button(mut self, button: u32) -> Self { self.button = Some(button); self } pub fn exclusive(mut self, exclusive: bool) -> Self { self.exclusive = Some(exclusive); self } pub fn touch_only(mut self, touch_only: bool) -> Self { self.touch_only = Some(touch_only); self } pub fn n_points(mut self, n_points: u32) -> Self { self.n_points = Some(n_points); self } pub fn window(mut self, window: &gdk::Window) -> Self { self.window = Some(window.clone()); self } pub fn propagation_phase(mut self, propagation_phase: PropagationPhase) -> Self { self.propagation_phase = Some(propagation_phase); self } pub fn widget(mut self, widget: &impl IsA<Widget>) -> Self { self.widget = Some(widget.clone().upcast()); self } } impl fmt::Display for GestureMultiPress { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("GestureMultiPress") } }
32.135458
149
0.561741
38e6113f46f04f365d298e08d997d98b53b70da8
2,554
use crate::request::prelude::*; use std::{ error::Error, fmt::{Display, Formatter, Result as FmtResult}, }; use twilight_model::{ guild::GuildPrune, id::{GuildId, RoleId}, }; /// The error created when the guild prune count can not be requested as configured. #[derive(Clone, Debug)] pub enum GetGuildPruneCountError { /// The number of days is 0. DaysInvalid, } impl Display for GetGuildPruneCountError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match self { Self::DaysInvalid => f.write_str("the number of days is invalid"), } } } impl Error for GetGuildPruneCountError {} #[derive(Default)] struct GetGuildPruneCountFields { days: Option<u64>, include_roles: Vec<u64>, } /// Get the counts of guild members to be pruned. pub struct GetGuildPruneCount<'a> { fields: GetGuildPruneCountFields, fut: Option<Pending<'a, GuildPrune>>, guild_id: GuildId, http: &'a Client, } impl<'a> GetGuildPruneCount<'a> { pub(crate) fn new(http: &'a Client, guild_id: GuildId) -> Self { Self { fields: GetGuildPruneCountFields::default(), fut: None, guild_id, http, } } /// Set the number of days that a user must be inactive before being /// able to be pruned. /// /// The number of days must be greater than 0. /// /// # Errors /// /// Returns [`GetGuildPruneCountError::DaysInvalid`] if the number of days /// is 0. /// /// [`GetGuildPruneCountError::DaysInvalid`]: enum.GetGuildPruneCountError.html#variant.DaysInvalid pub fn days(mut self, days: u64) -> Result<Self, GetGuildPruneCountError> { if validate::guild_prune_days(days) { return Err(GetGuildPruneCountError::DaysInvalid); } self.fields.days.replace(days); Ok(self) } /// List of roles to include when calculating prune count pub fn include_roles(mut self, roles: impl Iterator<Item = RoleId>) -> Self { let roles = roles.map(|e| e.0).collect::<Vec<_>>(); self.fields.include_roles = roles; self } fn start(&mut self) -> Result<()> { self.fut.replace(Box::pin(self.http.request(Request::from( Route::GetGuildPruneCount { days: self.fields.days, guild_id: self.guild_id.0, include_roles: self.fields.include_roles.clone(), }, )))); Ok(()) } } poll_req!(GetGuildPruneCount<'_>, GuildPrune);
26.604167
103
0.608066
215c2948389ff6644751de05b8236e33c0fe3b0e
31,180
//! A library for working with [Apache Avro](https://avro.apache.org/) in Rust. //! //! Please check our [documentation](https://docs.rs/avro-rs) for examples, tutorials and API reference. //! //! **[Apache Avro](https://avro.apache.org/)** is a data serialization system which provides rich //! data structures and a compact, fast, binary data format. //! //! All data in Avro is schematized, as in the following example: //! //! ```text //! { //! "type": "record", //! "name": "test", //! "fields": [ //! {"name": "a", "type": "long", "default": 42}, //! {"name": "b", "type": "string"} //! ] //! } //! ``` //! //! There are basically two ways of handling Avro data in Rust: //! //! * **as Avro-specialized data types** based on an Avro schema; //! * **as generic Rust serde-compatible types** implementing/deriving `Serialize` and //! `Deserialize`; //! //! **avro-rs** provides a way to read and write both these data representations easily and //! efficiently. //! //! # Installing the library //! //! //! Add to your `Cargo.toml`: //! //! ```toml //! [dependencies] //! avro-rs = "x.y" //! ``` //! //! Or in case you want to leverage the **Snappy** codec: //! //! ```toml //! [dependencies.avro-rs] //! version = "x.y" //! features = ["snappy"] //! ``` //! //! # Upgrading to a newer minor version //! //! The library is still in beta, so there might be backward-incompatible changes between minor //! versions. If you have troubles upgrading, check the [version upgrade guide](migration_guide.md). //! //! # Defining a schema //! //! An Avro data cannot exist without an Avro schema. Schemas **must** be used while writing and //! **can** be used while reading and they carry the information regarding the type of data we are //! handling. Avro schemas are used for both schema validation and resolution of Avro data. //! //! Avro schemas are defined in **JSON** format and can just be parsed out of a raw string: //! //! ``` //! use avro_rs::Schema; //! //! let raw_schema = r#" //! { //! "type": "record", //! "name": "test", //! "fields": [ //! {"name": "a", "type": "long", "default": 42}, //! {"name": "b", "type": "string"} //! ] //! } //! "#; //! //! // if the schema is not valid, this function will return an error //! let schema = Schema::parse_str(raw_schema).unwrap(); //! //! // schemas can be printed for debugging //! println!("{:?}", schema); //! ``` //! //! The library provides also a programmatic interface to define schemas without encoding them in //! JSON (for advanced use), but we highly recommend the JSON interface. Please read the API //! reference in case you are interested. //! //! For more information about schemas and what kind of information you can encapsulate in them, //! please refer to the appropriate section of the //! [Avro Specification](https://avro.apache.org/docs/current/spec.html#schemas). //! //! # Writing data //! //! Once we have defined a schema, we are ready to serialize data in Avro, validating them against //! the provided schema in the process. As mentioned before, there are two ways of handling Avro //! data in Rust. //! //! **NOTE:** The library also provides a low-level interface for encoding a single datum in Avro //! bytecode without generating markers and headers (for advanced use), but we highly recommend the //! `Writer` interface to be totally Avro-compatible. Please read the API reference in case you are //! interested. //! //! ## The avro way //! //! Given that the schema we defined above is that of an Avro *Record*, we are going to use the //! associated type provided by the library to specify the data we want to serialize: //! //! ``` //! # use avro_rs::Schema; //! use avro_rs::types::Record; //! use avro_rs::Writer; //! # //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! // a writer needs a schema and something to write to //! let mut writer = Writer::new(&schema, Vec::new()); //! //! // the Record type models our Record schema //! let mut record = Record::new(writer.schema()).unwrap(); //! record.put("a", 27i64); //! record.put("b", "foo"); //! //! // schema validation happens here //! writer.append(record).unwrap(); //! //! // this is how to get back the resulting avro bytecode //! // this performs a flush operation to make sure data has been written, so it can fail //! // you can also call `writer.flush()` yourself without consuming the writer //! let encoded = writer.into_inner().unwrap(); //! ``` //! //! The vast majority of the times, schemas tend to define a record as a top-level container //! encapsulating all the values to convert as fields and providing documentation for them, but in //! case we want to directly define an Avro value, the library offers that capability via the //! `Value` interface. //! //! ``` //! use avro_rs::types::Value; //! //! let mut value = Value::String("foo".to_string()); //! ``` //! //! ## The serde way //! //! Given that the schema we defined above is an Avro *Record*, we can directly use a Rust struct //! deriving `Serialize` to model our data: //! //! ``` //! # use avro_rs::Schema; //! # use serde::Serialize; //! use avro_rs::Writer; //! //! #[derive(Debug, Serialize)] //! struct Test { //! a: i64, //! b: String, //! } //! //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! // a writer needs a schema and something to write to //! let mut writer = Writer::new(&schema, Vec::new()); //! //! // the structure models our Record schema //! let test = Test { //! a: 27, //! b: "foo".to_owned(), //! }; //! //! // schema validation happens here //! writer.append_ser(test).unwrap(); //! //! // this is how to get back the resulting avro bytecode //! // this performs a flush operation to make sure data is written, so it can fail //! // you can also call `writer.flush()` yourself without consuming the writer //! let encoded = writer.into_inner(); //! ``` //! //! The vast majority of the times, schemas tend to define a record as a top-level container //! encapsulating all the values to convert as fields and providing documentation for them, but in //! case we want to directly define an Avro value, any type implementing `Serialize` should work. //! //! ``` //! let mut value = "foo".to_string(); //! ``` //! //! ## Using codecs to compress data //! //! Avro supports three different compression codecs when encoding data: //! //! * **Null**: leaves data uncompressed; //! * **Deflate**: writes the data block using the deflate algorithm as specified in RFC 1951, and //! typically implemented using the zlib library. Note that this format (unlike the "zlib format" in //! RFC 1950) does not have a checksum. //! * **Snappy**: uses Google's [Snappy](http://google.github.io/snappy/) compression library. Each //! compressed block is followed by the 4-byte, big-endianCRC32 checksum of the uncompressed data in //! the block. You must enable the `snappy` feature to use this codec. //! //! To specify a codec to use to compress data, just specify it while creating a `Writer`: //! ``` //! # use avro_rs::Schema; //! use avro_rs::Writer; //! use avro_rs::Codec; //! # //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Deflate); //! ``` //! //! # Reading data //! //! As far as reading Avro encoded data goes, we can just use the schema encoded with the data to //! read them. The library will do it automatically for us, as it already does for the compression //! codec: //! //! ``` //! use avro_rs::Reader; //! # use avro_rs::Schema; //! # use avro_rs::types::Record; //! # use avro_rs::Writer; //! # //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! # let mut writer = Writer::new(&schema, Vec::new()); //! # let mut record = Record::new(writer.schema()).unwrap(); //! # record.put("a", 27i64); //! # record.put("b", "foo"); //! # writer.append(record).unwrap(); //! # let input = writer.into_inner().unwrap(); //! // reader creation can fail in case the input to read from is not Avro-compatible or malformed //! let reader = Reader::new(&input[..]).unwrap(); //! ``` //! //! In case, instead, we want to specify a different (but compatible) reader schema from the schema //! the data has been written with, we can just do as the following: //! ``` //! use avro_rs::Schema; //! use avro_rs::Reader; //! # use avro_rs::types::Record; //! # use avro_rs::Writer; //! # //! # let writer_raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let writer_schema = Schema::parse_str(writer_raw_schema).unwrap(); //! # let mut writer = Writer::new(&writer_schema, Vec::new()); //! # let mut record = Record::new(writer.schema()).unwrap(); //! # record.put("a", 27i64); //! # record.put("b", "foo"); //! # writer.append(record).unwrap(); //! # let input = writer.into_inner().unwrap(); //! //! let reader_raw_schema = r#" //! { //! "type": "record", //! "name": "test", //! "fields": [ //! {"name": "a", "type": "long", "default": 42}, //! {"name": "b", "type": "string"}, //! {"name": "c", "type": "long", "default": 43} //! ] //! } //! "#; //! //! let reader_schema = Schema::parse_str(reader_raw_schema).unwrap(); //! //! // reader creation can fail in case the input to read from is not Avro-compatible or malformed //! let reader = Reader::with_schema(&reader_schema, &input[..]).unwrap(); //! ``` //! //! The library will also automatically perform schema resolution while reading the data. //! //! For more information about schema compatibility and resolution, please refer to the //! [Avro Specification](https://avro.apache.org/docs/current/spec.html#schemas). //! //! As usual, there are two ways to handle Avro data in Rust, as you can see below. //! //! **NOTE:** The library also provides a low-level interface for decoding a single datum in Avro //! bytecode without markers and header (for advanced use), but we highly recommend the `Reader` //! interface to leverage all Avro features. Please read the API reference in case you are //! interested. //! //! //! ## The avro way //! //! We can just read directly instances of `Value` out of the `Reader` iterator: //! //! ``` //! # use avro_rs::Schema; //! # use avro_rs::types::Record; //! # use avro_rs::Writer; //! use avro_rs::Reader; //! # //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! # let mut writer = Writer::new(&schema, Vec::new()); //! # let mut record = Record::new(writer.schema()).unwrap(); //! # record.put("a", 27i64); //! # record.put("b", "foo"); //! # writer.append(record).unwrap(); //! # let input = writer.into_inner().unwrap(); //! let reader = Reader::new(&input[..]).unwrap(); //! //! // value is a Result of an Avro Value in case the read operation fails //! for value in reader { //! println!("{:?}", value.unwrap()); //! } //! //! ``` //! //! ## The serde way //! //! Alternatively, we can use a Rust type implementing `Deserialize` and representing our schema to //! read the data into: //! //! ``` //! # use avro_rs::Schema; //! # use avro_rs::Writer; //! # use serde::{Deserialize, Serialize}; //! use avro_rs::Reader; //! use avro_rs::from_value; //! //! # #[derive(Serialize)] //! #[derive(Debug, Deserialize)] //! struct Test { //! a: i64, //! b: String, //! } //! //! # let raw_schema = r#" //! # { //! # "type": "record", //! # "name": "test", //! # "fields": [ //! # {"name": "a", "type": "long", "default": 42}, //! # {"name": "b", "type": "string"} //! # ] //! # } //! # "#; //! # let schema = Schema::parse_str(raw_schema).unwrap(); //! # let mut writer = Writer::new(&schema, Vec::new()); //! # let test = Test { //! # a: 27, //! # b: "foo".to_owned(), //! # }; //! # writer.append_ser(test).unwrap(); //! # let input = writer.into_inner().unwrap(); //! let reader = Reader::new(&input[..]).unwrap(); //! //! // value is a Result in case the read operation fails //! for value in reader { //! println!("{:?}", from_value::<Test>(&value.unwrap())); //! } //! ``` //! //! # Putting everything together //! //! The following is an example of how to combine everything showed so far and it is meant to be a //! quick reference of the library interface: //! //! ``` //! use avro_rs::{Codec, Reader, Schema, Writer, from_value, types::Record, Error}; //! use serde::{Deserialize, Serialize}; //! //! #[derive(Debug, Deserialize, Serialize)] //! struct Test { //! a: i64, //! b: String, //! } //! //! fn main() -> Result<(), Error> { //! let raw_schema = r#" //! { //! "type": "record", //! "name": "test", //! "fields": [ //! {"name": "a", "type": "long", "default": 42}, //! {"name": "b", "type": "string"} //! ] //! } //! "#; //! //! let schema = Schema::parse_str(raw_schema)?; //! //! println!("{:?}", schema); //! //! let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Deflate); //! //! let mut record = Record::new(writer.schema()).unwrap(); //! record.put("a", 27i64); //! record.put("b", "foo"); //! //! writer.append(record)?; //! //! let test = Test { //! a: 27, //! b: "foo".to_owned(), //! }; //! //! writer.append_ser(test)?; //! //! let input = writer.into_inner()?; //! let reader = Reader::with_schema(&schema, &input[..])?; //! //! for record in reader { //! println!("{:?}", from_value::<Test>(&record?)); //! } //! Ok(()) //! } //! ``` //! //! `avro-rs` also supports the logical types listed in the [Avro specification](https://avro.apache.org/docs/current/spec.html#Logical+Types): //! //! 1. `Decimal` using the [`num_bigint`](https://docs.rs/num-bigint/0.2.6/num_bigint) crate //! 1. UUID using the [`uuid`](https://docs.rs/uuid/0.8.1/uuid) crate //! 1. Date, Time (milli) as `i32` and Time (micro) as `i64` //! 1. Timestamp (milli and micro) as `i64` //! 1. Duration as a custom type with `months`, `days` and `millis` accessor methods each of which returns an `i32` //! //! Note that the on-disk representation is identical to the underlying primitive/complex type. //! //! ### Read and write logical types //! //! ```rust //! use avro_rs::{ //! types::Record, types::Value, Codec, Days, Decimal, Duration, Millis, Months, Reader, Schema, //! Writer, Error, //! }; //! use num_bigint::ToBigInt; //! //! fn main() -> Result<(), Error> { //! let raw_schema = r#" //! { //! "type": "record", //! "name": "test", //! "fields": [ //! { //! "name": "decimal_fixed", //! "type": { //! "type": "fixed", //! "size": 2, //! "name": "decimal" //! }, //! "logicalType": "decimal", //! "precision": 4, //! "scale": 2 //! }, //! { //! "name": "decimal_var", //! "type": "bytes", //! "logicalType": "decimal", //! "precision": 10, //! "scale": 3 //! }, //! { //! "name": "uuid", //! "type": "string", //! "logicalType": "uuid" //! }, //! { //! "name": "date", //! "type": "int", //! "logicalType": "date" //! }, //! { //! "name": "time_millis", //! "type": "int", //! "logicalType": "time-millis" //! }, //! { //! "name": "time_micros", //! "type": "long", //! "logicalType": "time-micros" //! }, //! { //! "name": "timestamp_millis", //! "type": "long", //! "logicalType": "timestamp-millis" //! }, //! { //! "name": "timestamp_micros", //! "type": "long", //! "logicalType": "timestamp-micros" //! }, //! { //! "name": "duration", //! "type": { //! "type": "fixed", //! "size": 12, //! "name": "duration" //! }, //! "logicalType": "duration" //! } //! ] //! } //! "#; //! //! let schema = Schema::parse_str(raw_schema)?; //! //! println!("{:?}", schema); //! //! let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Deflate); //! //! let mut record = Record::new(writer.schema()).unwrap(); //! record.put("decimal_fixed", Decimal::from(9936.to_bigint().unwrap().to_signed_bytes_be())); //! record.put("decimal_var", Decimal::from((-32442.to_bigint().unwrap()).to_signed_bytes_be())); //! record.put("uuid", uuid::Uuid::new_v4()); //! record.put("date", Value::Date(1)); //! record.put("time_millis", Value::TimeMillis(2)); //! record.put("time_micros", Value::TimeMicros(3)); //! record.put("timestamp_millis", Value::TimestampMillis(4)); //! record.put("timestamp_micros", Value::TimestampMicros(5)); //! record.put("duration", Duration::new(Months::new(6), Days::new(7), Millis::new(8))); //! //! writer.append(record)?; //! //! let input = writer.into_inner()?; //! let reader = Reader::with_schema(&schema, &input[..])?; //! //! for record in reader { //! println!("{:?}", record?); //! } //! Ok(()) //! } //! ``` //! //! ## Calculate Avro schema fingerprint //! //! This library supports calculating the following fingerprints: //! //! - SHA-256 //! - MD5 //! - Rabin //! //! An example of fingerprinting for the supported fingerprints: //! //! ```rust //! use avro_rs::rabin::Rabin; //! use avro_rs::{Schema, Error}; //! use md5::Md5; //! use sha2::Sha256; //! //! fn main() -> Result<(), Error> { //! let raw_schema = r#" //! { //! "type": "record", //! "name": "test", //! "fields": [ //! {"name": "a", "type": "long", "default": 42}, //! {"name": "b", "type": "string"} //! ] //! } //! "#; //! let schema = Schema::parse_str(raw_schema)?; //! println!("{}", schema.fingerprint::<Sha256>()); //! println!("{}", schema.fingerprint::<Md5>()); //! println!("{}", schema.fingerprint::<Rabin>()); //! Ok(()) //! } //! ``` //! //! ## Ill-formed data //! //! In order to ease decoding, the Binary Encoding specification of Avro data //! requires some fields to have their length encoded alongside the data. //! //! If encoded data passed to a `Reader` has been ill-formed, it can happen that //! the bytes meant to contain the length of data are bogus and could result //! in extravagant memory allocation. //! //! To shield users from ill-formed data, `avro-rs` sets a limit (default: 512MB) //! to any allocation it will perform when decoding data. //! //! If you expect some of your data fields to be larger than this limit, be sure //! to make use of the `max_allocation_bytes` function before reading **any** data //! (we leverage Rust's [`std::sync::Once`](https://doc.rust-lang.org/std/sync/struct.Once.html) //! mechanism to initialize this value, if //! any call to decode is made before a call to `max_allocation_bytes`, the limit //! will be 512MB throughout the lifetime of the program). //! //! //! ```rust //! use avro_rs::max_allocation_bytes; //! //! max_allocation_bytes(2 * 1024 * 1024 * 1024); // 2GB //! //! // ... happily decode large data //! //! ``` //! //! ## Check schemas compatibility //! //! This library supports checking for schemas compatibility. //! //! Note: It does not yet support named schemas (more on //! https://github.com/flavray/avro-rs/pull/76). //! //! Examples of checking for compatibility: //! //! 1. Compatible schemas //! //! Explanation: an int array schema can be read by a long array schema- an int //! (32bit signed integer) fits into a long (64bit signed integer) //! //! ```rust //! use avro_rs::{Schema, schema_compatibility::SchemaCompatibility}; //! //! let writers_schema = Schema::parse_str(r#"{"type": "array", "items":"int"}"#).unwrap(); //! let readers_schema = Schema::parse_str(r#"{"type": "array", "items":"long"}"#).unwrap(); //! assert_eq!(true, SchemaCompatibility::can_read(&writers_schema, &readers_schema)); //! ``` //! //! 2. Incompatible schemas (a long array schema cannot be read by an int array schema) //! //! Explanation: a long array schema cannot be read by an int array schema- a //! long (64bit signed integer) does not fit into an int (32bit signed integer) //! //! ```rust //! use avro_rs::{Schema, schema_compatibility::SchemaCompatibility}; //! //! let writers_schema = Schema::parse_str(r#"{"type": "array", "items":"long"}"#).unwrap(); //! let readers_schema = Schema::parse_str(r#"{"type": "array", "items":"int"}"#).unwrap(); //! assert_eq!(false, SchemaCompatibility::can_read(&writers_schema, &readers_schema)); //! ``` mod codec; mod de; mod decimal; mod decode; mod duration; mod encode; mod error; mod reader; mod ser; mod util; mod writer; pub mod rabin; pub mod schema; pub mod schema_compatibility; pub mod types; pub use codec::Codec; pub use de::from_value; pub use decimal::Decimal; pub use duration::{Days, Duration, Millis, Months}; pub use error::{Error, Error as DeError, Error as SerError}; pub use reader::{from_avro_datum, Reader}; pub use schema::Schema; pub use ser::to_value; pub use util::max_allocation_bytes; pub use writer::{to_avro_datum, Writer}; /// A convenience type alias for `Result`s with `Error`s. pub type AvroResult<T> = Result<T, Error>; #[cfg(test)] mod tests { use crate::{ from_avro_datum, types::{Record, Value}, Codec, Reader, Schema, Writer, }; //TODO: move where it fits better #[test] fn test_enum_default() { let writer_raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"} ] } "#; let reader_raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"}, { "name": "c", "type": { "type": "enum", "name": "suit", "symbols": ["diamonds", "spades", "clubs", "hearts"] }, "default": "spades" } ] } "#; let writer_schema = Schema::parse_str(writer_raw_schema).unwrap(); let reader_schema = Schema::parse_str(reader_raw_schema).unwrap(); let mut writer = Writer::with_codec(&writer_schema, Vec::new(), Codec::Null); let mut record = Record::new(writer.schema()).unwrap(); record.put("a", 27i64); record.put("b", "foo"); writer.append(record).unwrap(); let input = writer.into_inner().unwrap(); let mut reader = Reader::with_schema(&reader_schema, &input[..]).unwrap(); assert_eq!( reader.next().unwrap().unwrap(), Value::Record(vec![ ("a".to_string(), Value::Long(27)), ("b".to_string(), Value::String("foo".to_string())), ("c".to_string(), Value::Enum(1, "spades".to_string())), ]) ); assert!(reader.next().is_none()); } //TODO: move where it fits better #[test] fn test_enum_string_value() { let raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"}, { "name": "c", "type": { "type": "enum", "name": "suit", "symbols": ["diamonds", "spades", "clubs", "hearts"] }, "default": "spades" } ] } "#; let schema = Schema::parse_str(raw_schema).unwrap(); let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null); let mut record = Record::new(writer.schema()).unwrap(); record.put("a", 27i64); record.put("b", "foo"); record.put("c", "clubs"); writer.append(record).unwrap(); let input = writer.into_inner().unwrap(); let mut reader = Reader::with_schema(&schema, &input[..]).unwrap(); assert_eq!( reader.next().unwrap().unwrap(), Value::Record(vec![ ("a".to_string(), Value::Long(27)), ("b".to_string(), Value::String("foo".to_string())), ("c".to_string(), Value::Enum(2, "clubs".to_string())), ]) ); assert!(reader.next().is_none()); } //TODO: move where it fits better #[test] fn test_enum_resolution() { let writer_raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"}, { "name": "c", "type": { "type": "enum", "name": "suit", "symbols": ["diamonds", "spades", "clubs", "hearts"] }, "default": "spades" } ] } "#; let reader_raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"}, { "name": "c", "type": { "type": "enum", "name": "suit", "symbols": ["diamonds", "spades", "ninja", "hearts"] }, "default": "spades" } ] } "#; let writer_schema = Schema::parse_str(writer_raw_schema).unwrap(); let reader_schema = Schema::parse_str(reader_raw_schema).unwrap(); let mut writer = Writer::with_codec(&writer_schema, Vec::new(), Codec::Null); let mut record = Record::new(writer.schema()).unwrap(); record.put("a", 27i64); record.put("b", "foo"); record.put("c", "clubs"); writer.append(record).unwrap(); let input = writer.into_inner().unwrap(); let mut reader = Reader::with_schema(&reader_schema, &input[..]).unwrap(); assert!(reader.next().unwrap().is_err()); assert!(reader.next().is_none()); } //TODO: move where it fits better #[test] fn test_enum_no_reader_schema() { let writer_raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"}, { "name": "c", "type": { "type": "enum", "name": "suit", "symbols": ["diamonds", "spades", "clubs", "hearts"] }, "default": "spades" } ] } "#; let writer_schema = Schema::parse_str(writer_raw_schema).unwrap(); let mut writer = Writer::with_codec(&writer_schema, Vec::new(), Codec::Null); let mut record = Record::new(writer.schema()).unwrap(); record.put("a", 27i64); record.put("b", "foo"); record.put("c", "clubs"); writer.append(record).unwrap(); let input = writer.into_inner().unwrap(); let mut reader = Reader::new(&input[..]).unwrap(); assert_eq!( reader.next().unwrap().unwrap(), Value::Record(vec![ ("a".to_string(), Value::Long(27)), ("b".to_string(), Value::String("foo".to_string())), ("c".to_string(), Value::Enum(2, "clubs".to_string())), ]) ); } #[test] fn test_illformed_length() { let raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"} ] } "#; let schema = Schema::parse_str(raw_schema).unwrap(); // Would allocated 18446744073709551605 bytes let illformed: &[u8] = &[0x3e, 0x15, 0xff, 0x1f, 0x15, 0xff]; let value = from_avro_datum(&schema, &mut &illformed[..], None); assert!(value.is_err()); } }
33.599138
143
0.522963
9cd146b1930f38dc3468f33a1a162f4544cb4883
41,115
// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). extern crate bazel_protos; extern crate tempdir; use std::error::Error; use std::collections::{BTreeMap, HashSet}; use std::fmt; use std::os::unix::ffi::OsStrExt; use std::path::{Path, PathBuf}; use futures::future::{self, Future}; use ordermap::OrderMap; use tempdir::TempDir; use boxfuture::{Boxable, BoxFuture}; use context::Context; use core::{Failure, Key, Noop, TypeConstraint, Value, Variants, throw}; use externs; use fs::{self, Dir, File, FileContent, Link, PathGlobs, PathStat, StoreFileByDigest, VFS}; use process_execution as process_executor; use hashing; use rule_graph; use selectors::{self, Selector}; use tasks; pub type NodeFuture<T> = BoxFuture<T, Failure>; fn ok<O: Send + 'static>(value: O) -> NodeFuture<O> { future::ok(value).to_boxed() } fn err<O: Send + 'static>(failure: Failure) -> NodeFuture<O> { future::err(failure).to_boxed() } /// /// A helper to indicate that the value represented by the Failure was required, and thus /// fatal if not present. /// fn was_required(failure: Failure) -> Failure { match failure { Failure::Noop(noop) => throw(&format!("No source of required dependency: {:?}", noop)), f => f, } } pub trait GetNode { fn get<N: Node>(&self, node: N) -> NodeFuture<N::Output>; } impl VFS<Failure> for Context { fn read_link(&self, link: Link) -> NodeFuture<PathBuf> { self.get(ReadLink(link)).map(|res| res.0).to_boxed() } fn scandir(&self, dir: Dir) -> NodeFuture<Vec<fs::Stat>> { self.get(Scandir(dir)).map(|res| res.0).to_boxed() } fn is_ignored(&self, stat: &fs::Stat) -> bool { self.core.vfs.is_ignored(stat) } fn mk_error(msg: &str) -> Failure { Failure::Throw( externs::create_exception(msg), "<pants native internals>".to_string(), ) } } impl StoreFileByDigest<Failure> for Context { fn store_by_digest(&self, file: &File) -> BoxFuture<hashing::Digest, Failure> { self.get(DigestFile(file.clone())) } } /// /// Defines executing a cacheable/memoizable step for the given context. /// /// The Output type of a Node is bounded to values that can be stored and retrieved from /// the NodeResult enum. Due to the semantics of memoization, retrieving the typed result /// stored inside the NodeResult requires an implementation of TryFrom<NodeResult>. But the /// combination of bounds at usage sites should mean that a failure to unwrap the result is /// exceedingly rare. /// pub trait Node: Into<NodeKey> { type Output: Clone + fmt::Debug + Into<NodeResult> + TryFrom<NodeResult> + Send + 'static; fn run(self, context: Context) -> NodeFuture<Self::Output>; } /// /// A Node that selects a product for a subject. /// /// A Select can be satisfied by multiple sources, but fails if multiple sources produce a value. /// The 'variants' field represents variant configuration that is propagated to dependencies. When /// a task needs to consume a product as configured by the variants map, it can pass variant_key, /// which matches a 'variant' value to restrict the names of values selected by a SelectNode. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Select { pub subject: Key, pub variants: Variants, pub selector: selectors::Select, entries: rule_graph::Entries, } impl Select { pub fn new( product: TypeConstraint, subject: Key, variants: Variants, edges: &rule_graph::RuleEdges, ) -> Select { let selector = selectors::Select::without_variant(product); let select_key = rule_graph::SelectKey::JustSelect(selector.clone()); Select { selector: selector, subject: subject, variants: variants, entries: edges.entries_for(&select_key), } } pub fn new_with_selector( selector: selectors::Select, subject: Key, variants: Variants, edges: &rule_graph::RuleEdges, ) -> Select { let select_key = rule_graph::SelectKey::JustSelect(selector.clone()); Select { selector: selector, subject: subject, variants: variants, entries: edges .entries_for(&select_key) .into_iter() .filter(|e| e.matches_subject_type(subject.type_id().clone())) .collect(), } } fn product(&self) -> &TypeConstraint { &self.selector.product } fn select_literal_single<'a>( &self, candidate: &'a Value, variant_value: &Option<String>, ) -> bool { if !externs::satisfied_by(&self.selector.product, candidate) { return false; } return match variant_value { &Some(ref vv) if externs::project_str(candidate, "name") != *vv => // There is a variant value, and it doesn't match. false, _ => true, }; } /// /// Looks for has-a or is-a relationships between the given value and the requested product. /// /// Returns the resulting product value, or None if no match was made. /// fn select_literal( &self, context: &Context, candidate: Value, variant_value: &Option<String>, ) -> Option<Value> { // Check whether the subject is-a instance of the product. if self.select_literal_single(&candidate, variant_value) { return Some(candidate); } // Else, check whether it has-a instance of the product. // TODO: returning only the first literal configuration of a given type/variant. Need to // define mergeability for products. if externs::satisfied_by(&context.core.types.has_products, &candidate) { for child in externs::project_multi(&candidate, "products") { if self.select_literal_single(&child, variant_value) { return Some(child); } } } None } /// /// Given the results of configured Task nodes, select a single successful value, or fail. /// fn choose_task_result( &self, context: Context, results: Vec<Result<Value, Failure>>, variant_value: &Option<String>, ) -> Result<Value, Failure> { let mut matches = Vec::new(); let mut max_noop = Noop::NoTask; for result in results { match result { Ok(value) => { if let Some(v) = self.select_literal(&context, value, variant_value) { matches.push(v); } } Err(err) => { match err { Failure::Noop(noop) => { // Record the highest priority Noop value. if noop > max_noop { max_noop = noop; } continue; } i @ Failure::Invalidated => return Err(i), f @ Failure::Throw(..) => return Err(f), } } } } if matches.len() > 1 { // TODO: Multiple successful tasks are not currently supported. We could allow for this // by adding support for "mergeable" products. see: // https://github.com/pantsbuild/pants/issues/2526 return Err(throw("Conflicting values produced for subject and type.")); } match matches.pop() { Some(matched) => // Exactly one value was available. Ok(matched), None => // Propagate the highest priority Noop value. Err(Failure::Noop(max_noop)), } } /// /// Gets a Snapshot for the current subject. /// fn get_snapshot(&self, context: &Context) -> NodeFuture<fs::Snapshot> { // TODO: Hacky... should have an intermediate Node to Select PathGlobs for the subject // before executing, and then treat this as an intrinsic. Otherwise, Snapshots for // different subjects but identical PathGlobs will cause redundant work. if self.entries.len() > 1 { // TODO do something better than this. panic!("we're supposed to get a snapshot, but there's more than one entry!"); } else if self.entries.is_empty() { panic!("we're supposed to get a snapshot, but there are no matching rule entries!"); } context.get(Snapshot { subject: self.subject.clone(), product: self.product().clone(), variants: self.variants.clone(), entry: self.entries[0].clone(), }) } /// /// Return Futures for each Task/Node that might be able to compute the given product for the /// given subject and variants. /// fn gen_nodes(&self, context: &Context) -> Vec<NodeFuture<Value>> { // TODO: These `product==` hooks are hacky. if self.product() == &context.core.types.snapshot { // If the requested product is a Snapshot, execute a Snapshot Node and then lower to a Value // for this caller. let context = context.clone(); vec![ self .get_snapshot(&context) .map(move |snapshot| { Snapshot::store_snapshot(&context, &snapshot) }) .to_boxed(), ] } else if self.product() == &context.core.types.files_content { // If the requested product is FilesContent, request a Snapshot and lower it as FilesContent. let context = context.clone(); vec![ self .get_snapshot(&context) .and_then( move |snapshot| // Request the file contents of the Snapshot, and then store them. snapshot.contents(context.core.store.clone()).map_err(|e| throw(&e)) .map(move |files_content| Snapshot::store_files_content(&context, &files_content)) ) .to_boxed(), ] } else if self.product() == &context.core.types.process_result { let value = externs::val_for_id(self.subject.id()); let mut env: BTreeMap<String, String> = BTreeMap::new(); let env_var_parts = externs::project_multi_strs(&value, "env"); // TODO: Error if env_var_parts.len() % 2 != 0 for i in 0..(env_var_parts.len() / 2) { env.insert( env_var_parts[2 * i].clone(), env_var_parts[2 * i + 1].clone(), ); } // TODO: Make this much less unwrap-happy with https://github.com/pantsbuild/pants/issues/5502 let fingerprint = externs::project_str(&value, "input_files_digest"); let digest_length = externs::project_str(&value, "digest_length"); let digest_length_as_usize = digest_length.parse::<usize>().unwrap(); let digest = hashing::Digest( hashing::Fingerprint::from_hex_string(&fingerprint).unwrap(), digest_length_as_usize, ); let request = process_executor::ExecuteProcessRequest { argv: externs::project_multi_strs(&value, "argv"), env: env, input_files: digest, }; let tmpdir = TempDir::new("process-execution").unwrap(); context .core .store .materialize_directory(tmpdir.path().to_owned(), digest) .wait() .unwrap(); // TODO: this should run off-thread, and asynchronously // TODO: request the Node that invokes the process, rather than invoke directly let result = process_executor::local::run_command_locally(request, tmpdir.path()).unwrap(); vec![ future::ok(externs::unsafe_call( &context.core.types.construct_process_result, &vec![ externs::store_bytes(&result.stdout), externs::store_bytes(&result.stderr), externs::store_i32(result.exit_code), ], )).to_boxed(), ] } else if let Some(&(_, ref value)) = context.core.tasks.gen_singleton(self.product()) { vec![future::ok(value.clone()).to_boxed()] } else { self .entries .iter() .map(|entry| { let task = context.core.rule_graph.task_for_inner(entry); context.get(Task { subject: self.subject.clone(), product: self.product().clone(), variants: self.variants.clone(), task: task, entry: entry.clone(), }) }) .collect::<Vec<NodeFuture<Value>>>() } } } // TODO: This is a Node only because it is used as a root in the graph, but it should never be // requested using context.get impl Node for Select { type Output = Value; fn run(self, context: Context) -> NodeFuture<Value> { // TODO add back support for variants https://github.com/pantsbuild/pants/issues/4020 // If there is a variant_key, see whether it has been configured; if not, no match. let variant_value: Option<String> = match self.selector.variant_key { Some(ref variant_key) => { let variant_value = self.variants.find(variant_key); if variant_value.is_none() { return err(Failure::Noop(Noop::NoVariant)); } variant_value.map(|v| v.to_string()) } None => None, }; // If the Subject "is a" or "has a" Product, then we're done. if let Some(literal_value) = self.select_literal(&context, externs::val_for(&self.subject), &variant_value) { return ok(literal_value); } // Else, attempt to use the configured tasks to compute the value. let deps_future = future::join_all( self .gen_nodes(&context) .into_iter() .map(|node_future| { // Don't fail the join if one fails. node_future.then(|r| future::ok(r)) }) .collect::<Vec<_>>(), ); let variant_value = variant_value.map(|s| s.to_string()); deps_future .and_then(move |dep_results| { future::result(self.choose_task_result( context, dep_results, &variant_value, )) }) .to_boxed() } } impl From<Select> for NodeKey { fn from(n: Select) -> Self { NodeKey::Select(n) } } /// /// A Node that selects the given Product for each of the items in `field` on `dep_product`. /// /// Begins by selecting the `dep_product` for the subject, and then selects a product for each /// member of a collection named `field` on the dep_product. /// /// The value produced by this Node guarantees that the order of the provided values matches the /// order of declaration in the list `field` of the `dep_product`. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct SelectDependencies { pub subject: Key, pub variants: Variants, pub selector: selectors::SelectDependencies, pub dep_product_entries: rule_graph::Entries, pub product_entries: rule_graph::Entries, } impl SelectDependencies { pub fn new( selector: selectors::SelectDependencies, subject: Key, variants: Variants, edges: &rule_graph::RuleEdges, ) -> SelectDependencies { // filters entries by whether the subject type is the right subject type let dep_p_entries = edges.entries_for(&rule_graph::SelectKey::NestedSelect( Selector::SelectDependencies(selector.clone()), selectors::Select::without_variant( selector.clone().dep_product, ), )); let p_entries = edges.entries_for(&rule_graph::SelectKey::ProjectedMultipleNestedSelect( Selector::SelectDependencies(selector.clone()), selector.field_types.clone(), selectors::Select::without_variant( selector.product.clone(), ), )); SelectDependencies { subject: subject, variants: variants, selector: selector.clone(), dep_product_entries: dep_p_entries, product_entries: p_entries, } } fn get_dep(&self, context: &Context, dep_subject: &Value) -> NodeFuture<Value> { // TODO: This method needs to consider whether the `dep_subject` is an Address, // and if so, attempt to parse Variants there. See: // https://github.com/pantsbuild/pants/issues/4020 let dep_subject_key = externs::key_for(dep_subject); Select { selector: selectors::Select::without_variant(self.selector.product), subject: dep_subject_key, variants: self.variants.clone(), // NB: We're filtering out all of the entries for field types other than // dep_subject's since none of them will match. entries: self .product_entries .clone() .into_iter() .filter(|e| { e.matches_subject_type(dep_subject_key.type_id().clone()) }) .collect(), }.run(context.clone()) } } impl SelectDependencies { fn run(self, context: Context) -> NodeFuture<Value> { // Select the product holding the dependency list. Select { selector: selectors::Select::without_variant(self.selector.dep_product), subject: self.subject.clone(), variants: self.variants.clone(), entries: self.dep_product_entries.clone(), }.run(context.clone()) .then(move |dep_product_res| { match dep_product_res { Ok(dep_product) => { // The product and its dependency list are available: project them. let deps = future::join_all( externs::project_multi(&dep_product, &self.selector.field) .iter() .map(|dep_subject| self.get_dep(&context, &dep_subject)) .collect::<Vec<_>>(), ); deps .then(move |dep_values_res| { // Finally, store the resulting values. match dep_values_res { Ok(dep_values) => Ok(externs::store_list(dep_values.iter().collect(), false)), Err(failure) => Err(was_required(failure)), } }) .to_boxed() } Err(failure) => err(failure), } }) .to_boxed() } } /// /// A node that selects for the dep_product type, then recursively selects for the product type of /// the result. Both the product and the dep_product must have the same "field" and the types of /// products in that field must match the field type. /// /// A node that recursively selects the dependencies of requested type and merge them. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct SelectTransitive { pub subject: Key, pub variants: Variants, pub selector: selectors::SelectTransitive, dep_product_entries: rule_graph::Entries, product_entries: rule_graph::Entries, } impl SelectTransitive { fn new( selector: selectors::SelectTransitive, subject: Key, variants: Variants, edges: &rule_graph::RuleEdges, ) -> SelectTransitive { let dep_p_entries = edges.entries_for(&rule_graph::SelectKey::NestedSelect( Selector::SelectTransitive(selector.clone()), selectors::Select::without_variant( selector.clone().dep_product, ), )); let p_entries = edges.entries_for(&rule_graph::SelectKey::ProjectedMultipleNestedSelect( Selector::SelectTransitive(selector.clone()), selector.field_types.clone(), selectors::Select::without_variant( selector.clone().product, ), )); SelectTransitive { subject: subject, variants: variants, selector: selector.clone(), dep_product_entries: dep_p_entries, product_entries: p_entries, } } /// /// Process single subject. /// /// Return tuple of: /// (processed subject_key, product output, dependencies to be processed in future iterations). /// fn expand_transitive( &self, context: &Context, subject_key: Key, ) -> NodeFuture<(Key, Value, Vec<Value>)> { let field_name = self.selector.field.to_owned(); Select { selector: selectors::Select::without_variant(self.selector.product), subject: subject_key, variants: self.variants.clone(), // NB: We're filtering out all of the entries for field types other than // subject_key's since none of them will match. entries: self .product_entries .clone() .into_iter() .filter(|e| e.matches_subject_type(subject_key.type_id().clone())) .collect(), }.run(context.clone()) .map(move |product| { let deps = externs::project_multi(&product, &field_name); (subject_key, product, deps) }) .to_boxed() } } /// /// Track states when processing `SelectTransitive` iteratively. /// #[derive(Debug)] struct TransitiveExpansion { // Subjects to be processed. todo: HashSet<Key>, // Mapping from processed subject `Key` to its product. // Products will be collected at the end of iterations. outputs: OrderMap<Key, Value>, } impl SelectTransitive { fn run(self, context: Context) -> NodeFuture<Value> { // Select the product holding the dependency list. Select { selector: selectors::Select::without_variant(self.selector.dep_product), subject: self.subject.clone(), variants: self.variants.clone(), entries: self.dep_product_entries.clone(), }.run(context.clone()) .then(move |dep_product_res| { match dep_product_res { Ok(dep_product) => { let subject_keys = externs::project_multi(&dep_product, &self.selector.field) .iter() .map(|subject| externs::key_for(&subject)) .collect(); let init = TransitiveExpansion { todo: subject_keys, outputs: OrderMap::default(), }; future::loop_fn(init, move |mut expansion| { let round = future::join_all({ expansion .todo .drain() .map(|subject_key| self.expand_transitive(&context, subject_key)) .collect::<Vec<_>>() }); round.map(move |finished_items| { let mut todo_candidates = Vec::new(); for (subject_key, product, more_deps) in finished_items.into_iter() { expansion.outputs.insert(subject_key, product); todo_candidates.extend(more_deps); } // NB enclose with {} to limit the borrowing scope. { let outputs = &expansion.outputs; expansion.todo.extend( todo_candidates .into_iter() .map(|dep| externs::key_for(&dep)) .filter(|dep_key| !outputs.contains_key(dep_key)) .collect::<Vec<_>>(), ); } if expansion.todo.is_empty() { future::Loop::Break(expansion) } else { future::Loop::Continue(expansion) } }) }).map(|expansion| { externs::store_list(expansion.outputs.values().collect::<Vec<_>>(), false) }) .to_boxed() } Err(failure) => err(failure), } }) .to_boxed() } } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct SelectProjection { subject: Key, variants: Variants, selector: selectors::SelectProjection, input_product_entries: rule_graph::Entries, projected_entries: rule_graph::Entries, } impl SelectProjection { fn new( selector: selectors::SelectProjection, subject: Key, variants: Variants, edges: &rule_graph::RuleEdges, ) -> SelectProjection { let dep_p_entries = edges.entries_for(&rule_graph::SelectKey::NestedSelect( Selector::SelectProjection(selector.clone()), selectors::Select::without_variant( selector.clone().input_product, ), )); let p_entries = edges.entries_for(&rule_graph::SelectKey::ProjectedNestedSelect( Selector::SelectProjection(selector.clone()), selector.projected_subject.clone(), selectors::Select::without_variant( selector.clone().product, ), )); SelectProjection { subject: subject, variants: variants, selector: selector.clone(), input_product_entries: dep_p_entries, projected_entries: p_entries, } } } impl SelectProjection { fn run(self, context: Context) -> NodeFuture<Value> { // Request the product we need to compute the subject. Select { selector: selectors::Select { product: self.selector.input_product, variant_key: None, }, subject: self.subject.clone(), variants: self.variants.clone(), entries: self.input_product_entries.clone(), }.run(context.clone()) .then(move |dep_product_res| { match dep_product_res { Ok(dep_product) => { // And then project the relevant field. let projected_subject = externs::project( &dep_product, &self.selector.field, &self.selector.projected_subject, ); Select { selector: selectors::Select::without_variant(self.selector.product), subject: externs::key_for(&projected_subject), variants: self.variants.clone(), // NB: Unlike SelectDependencies and SelectTransitive, we don't need to filter by // subject here, because there is only one projected type. entries: self.projected_entries.clone(), }.run(context.clone()) .then(move |output_res| { // If the output product is available, return it. match output_res { Ok(output) => Ok(output), Err(failure) => Err(was_required(failure)), } }) .to_boxed() } Err(failure) => err(failure), } }) .to_boxed() } } /// /// A Node that represents executing a process. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct ExecuteProcess(process_executor::ExecuteProcessRequest); #[derive(Clone, Debug)] pub struct ProcessResult(process_executor::ExecuteProcessResult); impl Node for ExecuteProcess { type Output = ProcessResult; fn run(self, context: Context) -> NodeFuture<ProcessResult> { let request = self.0.clone(); // TODO: Make this much less unwrap-happy with https://github.com/pantsbuild/pants/issues/5502 let tmpdir = TempDir::new("process-execution").unwrap(); context .core .store .materialize_directory(tmpdir.path().to_owned(), request.input_files) .wait() .unwrap(); // TODO: this should run off-thread, and asynchronously future::ok(ProcessResult( process_executor::local::run_command_locally( request, tmpdir.path(), ).unwrap(), )).to_boxed() } } impl From<ExecuteProcess> for NodeKey { fn from(n: ExecuteProcess) -> Self { NodeKey::ExecuteProcess(n) } } /// /// A Node that represents reading the destination of a symlink (non-recursively). /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct ReadLink(Link); #[derive(Clone, Debug)] pub struct LinkDest(PathBuf); impl Node for ReadLink { type Output = LinkDest; fn run(self, context: Context) -> NodeFuture<LinkDest> { let link = self.0.clone(); context .core .vfs .read_link(&self.0) .map(|dest_path| LinkDest(dest_path)) .map_err(move |e| { throw(&format!("Failed to read_link for {:?}: {:?}", link, e)) }) .to_boxed() } } impl From<ReadLink> for NodeKey { fn from(n: ReadLink) -> Self { NodeKey::ReadLink(n) } } /// /// A Node that represents reading a file and fingerprinting its contents. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct DigestFile(pub File); impl Node for DigestFile { type Output = hashing::Digest; fn run(self, context: Context) -> NodeFuture<hashing::Digest> { let file = self.0.clone(); context .core .vfs .read_file(&self.0) .map_err(move |e| { throw(&format!( "Error reading file {:?}: {}", file, e.description() )) }) .and_then(move |c| { context .core .store .store_file_bytes(c.content, true) .map_err(|e| throw(&e)) }) .to_boxed() } } impl From<DigestFile> for NodeKey { fn from(n: DigestFile) -> Self { NodeKey::DigestFile(n) } } /// /// A Node that represents executing a directory listing that returns a Stat per directory /// entry (generally in one syscall). No symlinks are expanded. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Scandir(Dir); #[derive(Clone, Debug)] pub struct DirectoryListing(Vec<fs::Stat>); impl Node for Scandir { type Output = DirectoryListing; fn run(self, context: Context) -> NodeFuture<DirectoryListing> { let dir = self.0.clone(); context .core .vfs .scandir(&self.0) .then(move |listing_res| match listing_res { Ok(listing) => Ok(DirectoryListing(listing)), Err(e) => Err(throw(&format!("Failed to scandir for {:?}: {:?}", dir, e))), }) .to_boxed() } } impl From<Scandir> for NodeKey { fn from(n: Scandir) -> Self { NodeKey::Scandir(n) } } /// /// A Node that captures an fs::Snapshot for the given subject. /// /// Begins by selecting PathGlobs for the subject, and then computes a Snapshot for the /// PathStats matched by the PathGlobs. /// #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Snapshot { subject: Key, product: TypeConstraint, variants: Variants, entry: rule_graph::Entry, } impl Snapshot { fn create(context: Context, path_globs: PathGlobs) -> NodeFuture<fs::Snapshot> { // Recursively expand PathGlobs into PathStats. // We rely on Context::expand tracking dependencies for scandirs, // and fs::Snapshot::from_path_stats tracking dependencies for file digests. context .expand(path_globs) .map_err(|e| format!("PlatGlobs expansion failed: {:?}", e)) .and_then(move |path_stats| { fs::Snapshot::from_path_stats(context.core.store.clone(), context.clone(), path_stats) .map_err(move |e| format!("Snapshot failed: {}", e)) }) .map_err(|e| throw(&e)) .to_boxed() } fn lift_path_globs(item: &Value) -> Result<PathGlobs, String> { let include = externs::project_multi_strs(item, "include"); let exclude = externs::project_multi_strs(item, "exclude"); PathGlobs::create(&include, &exclude).map_err(|e| { format!( "Failed to parse PathGlobs for include({:?}), exclude({:?}): {}", include, exclude, e ) }) } fn store_snapshot(context: &Context, item: &fs::Snapshot) -> Value { let path_stats: Vec<_> = item .path_stats .iter() .map(|ps| Self::store_path_stat(context, ps)) .collect(); externs::unsafe_call( &context.core.types.construct_snapshot, &vec![ externs::store_bytes(&(item.digest.0).to_hex().as_bytes()), externs::store_i32((item.digest.1 as i32)), externs::store_list(path_stats.iter().collect(), false), ], ) } fn store_path(item: &Path) -> Value { externs::store_bytes(item.as_os_str().as_bytes()) } fn store_dir(context: &Context, item: &Dir) -> Value { let args = vec![Self::store_path(item.0.as_path())]; externs::unsafe_call(&context.core.types.construct_dir, &args) } fn store_file(context: &Context, item: &File) -> Value { let args = vec![Self::store_path(item.path.as_path())]; externs::unsafe_call(&context.core.types.construct_file, &args) } fn store_path_stat(context: &Context, item: &PathStat) -> Value { let args = match item { &PathStat::Dir { ref path, ref stat } => { vec![Self::store_path(path), Self::store_dir(context, stat)] } &PathStat::File { ref path, ref stat } => { vec![Self::store_path(path), Self::store_file(context, stat)] } }; externs::unsafe_call(&context.core.types.construct_path_stat, &args) } fn store_file_content(context: &Context, item: &FileContent) -> Value { externs::unsafe_call( &context.core.types.construct_file_content, &vec![ Self::store_path(&item.path), externs::store_bytes(&item.content), ], ) } fn store_files_content(context: &Context, item: &Vec<FileContent>) -> Value { let entries: Vec<_> = item .iter() .map(|e| Self::store_file_content(context, e)) .collect(); externs::unsafe_call( &context.core.types.construct_files_content, &vec![externs::store_list(entries.iter().collect(), false)], ) } } impl Node for Snapshot { type Output = fs::Snapshot; fn run(self, context: Context) -> NodeFuture<fs::Snapshot> { let ref edges = context .core .rule_graph .edges_for_inner(&self.entry) .expect("edges for snapshot exist."); // Compute and parse PathGlobs for the subject. Select::new( context.core.types.path_globs.clone(), self.subject.clone(), self.variants.clone(), edges, ).run(context.clone()) .then(move |path_globs_res| match path_globs_res { Ok(path_globs_val) => { match Self::lift_path_globs(&path_globs_val) { Ok(pgs) => Snapshot::create(context, pgs), Err(e) => err(throw(&format!("Failed to parse PathGlobs: {}", e))), } } Err(failure) => err(failure), }) .to_boxed() } } impl From<Snapshot> for NodeKey { fn from(n: Snapshot) -> Self { NodeKey::Snapshot(n) } } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Task { subject: Key, product: TypeConstraint, variants: Variants, task: tasks::Task, entry: rule_graph::Entry, } impl Task { fn get(&self, context: &Context, selector: Selector) -> NodeFuture<Value> { let ref edges = context .core .rule_graph .edges_for_inner(&self.entry) .expect("edges for task exist."); match selector { Selector::Select(s) => { Select::new_with_selector(s, self.subject.clone(), self.variants.clone(), edges) .run(context.clone()) } Selector::SelectDependencies(s) => { SelectDependencies::new(s, self.subject.clone(), self.variants.clone(), edges) .run(context.clone()) } Selector::SelectTransitive(s) => { SelectTransitive::new(s, self.subject.clone(), self.variants.clone(), edges) .run(context.clone()) } Selector::SelectProjection(s) => { SelectProjection::new(s, self.subject.clone(), self.variants.clone(), edges) .run(context.clone()) } } } } impl Node for Task { type Output = Value; fn run(self, context: Context) -> NodeFuture<Value> { let deps = future::join_all( self .task .clause .iter() .map(|selector| self.get(&context, selector.clone())) .collect::<Vec<_>>(), ); let task = self.task.clone(); deps .then(move |deps_result| match deps_result { Ok(deps) => externs::call(&externs::val_for_id(task.func.0), &deps), Err(err) => Err(err), }) .to_boxed() } } impl From<Task> for NodeKey { fn from(n: Task) -> Self { NodeKey::Task(n) } } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub enum NodeKey { DigestFile(DigestFile), ExecuteProcess(ExecuteProcess), ReadLink(ReadLink), Scandir(Scandir), Select(Select), Snapshot(Snapshot), Task(Task), } impl NodeKey { pub fn format(&self) -> String { fn keystr(key: &Key) -> String { externs::id_to_str(key.id()) } fn typstr(tc: &TypeConstraint) -> String { externs::id_to_str(tc.0) } match self { &NodeKey::DigestFile(ref s) => format!("DigestFile({:?})", s.0), &NodeKey::ExecuteProcess(ref s) => format!("ExecuteProcess({:?}", s.0), &NodeKey::ReadLink(ref s) => format!("ReadLink({:?})", s.0), &NodeKey::Scandir(ref s) => format!("Scandir({:?})", s.0), &NodeKey::Select(ref s) => { format!( "Select({}, {})", keystr(&s.subject), typstr(&s.selector.product) ) } &NodeKey::Task(ref s) => { format!( "Task({}, {}, {})", externs::id_to_str(s.task.func.0), keystr(&s.subject), typstr(&s.product) ) } &NodeKey::Snapshot(ref s) => format!("Snapshot({})", keystr(&s.subject)), } } pub fn product_str(&self) -> String { fn typstr(tc: &TypeConstraint) -> String { externs::id_to_str(tc.0) } match self { &NodeKey::ExecuteProcess(..) => "ProcessResult".to_string(), &NodeKey::Select(ref s) => typstr(&s.selector.product), &NodeKey::Task(ref s) => typstr(&s.product), &NodeKey::Snapshot(..) => "Snapshot".to_string(), &NodeKey::DigestFile(..) => "DigestFile".to_string(), &NodeKey::ReadLink(..) => "LinkDest".to_string(), &NodeKey::Scandir(..) => "DirectoryListing".to_string(), } } /// /// If this NodeKey represents an FS operation, returns its Path. /// pub fn fs_subject(&self) -> Option<&Path> { match self { &NodeKey::DigestFile(ref s) => Some(s.0.path.as_path()), &NodeKey::ReadLink(ref s) => Some((s.0).0.as_path()), &NodeKey::Scandir(ref s) => Some((s.0).0.as_path()), // Not FS operations: // Explicitly listed so that if people add new NodeKeys they need to consider whether their // NodeKey represents an FS operation, and accordingly whether they need to add it to the // above list or the below list. &NodeKey::ExecuteProcess { .. } | &NodeKey::Select { .. } | &NodeKey::Snapshot { .. } | &NodeKey::Task { .. } => None, } } } impl Node for NodeKey { type Output = NodeResult; fn run(self, context: Context) -> NodeFuture<NodeResult> { match self { NodeKey::DigestFile(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::ExecuteProcess(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::ReadLink(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::Scandir(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::Select(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::Snapshot(n) => n.run(context).map(|v| v.into()).to_boxed(), NodeKey::Task(n) => n.run(context).map(|v| v.into()).to_boxed(), } } } #[derive(Clone, Debug)] pub enum NodeResult { Unit, Digest(hashing::Digest), DirectoryListing(DirectoryListing), LinkDest(LinkDest), ProcessResult(ProcessResult), Snapshot(fs::Snapshot), Value(Value), } impl From<()> for NodeResult { fn from(_: ()) -> Self { NodeResult::Unit } } impl From<Value> for NodeResult { fn from(v: Value) -> Self { NodeResult::Value(v) } } impl From<fs::Snapshot> for NodeResult { fn from(v: fs::Snapshot) -> Self { NodeResult::Snapshot(v) } } impl From<hashing::Digest> for NodeResult { fn from(v: hashing::Digest) -> Self { NodeResult::Digest(v) } } impl From<ProcessResult> for NodeResult { fn from(v: ProcessResult) -> Self { NodeResult::ProcessResult(v) } } impl From<LinkDest> for NodeResult { fn from(v: LinkDest) -> Self { NodeResult::LinkDest(v) } } impl From<DirectoryListing> for NodeResult { fn from(v: DirectoryListing) -> Self { NodeResult::DirectoryListing(v) } } // TODO: These traits exist in the stdlib, but are marked unstable. // see https://github.com/rust-lang/rust/issues/33417 pub trait TryFrom<T>: Sized { type Err; fn try_from(T) -> Result<Self, Self::Err>; } pub trait TryInto<T>: Sized { type Err; fn try_into(self) -> Result<T, Self::Err>; } impl<T, U> TryInto<U> for T where U: TryFrom<T>, { type Err = U::Err; fn try_into(self) -> Result<U, U::Err> { U::try_from(self) } } impl TryFrom<NodeResult> for NodeResult { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { Ok(nr) } } impl TryFrom<NodeResult> for () { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::Unit => Ok(()), _ => Err(()), } } } impl TryFrom<NodeResult> for Value { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::Value(v) => Ok(v), _ => Err(()), } } } impl TryFrom<NodeResult> for fs::Snapshot { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::Snapshot(v) => Ok(v), _ => Err(()), } } } impl TryFrom<NodeResult> for hashing::Digest { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::Digest(v) => Ok(v), _ => Err(()), } } } impl TryFrom<NodeResult> for ProcessResult { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::ProcessResult(v) => Ok(v), _ => Err(()), } } } impl TryFrom<NodeResult> for LinkDest { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::LinkDest(v) => Ok(v), _ => Err(()), } } } impl TryFrom<NodeResult> for DirectoryListing { type Err = (); fn try_from(nr: NodeResult) -> Result<Self, ()> { match nr { NodeResult::DirectoryListing(v) => Ok(v), _ => Err(()), } } }
29.579137
100
0.611188
e4bbda0a159e037350a39c1eb59cde6127c0749b
5,429
use std::io::BufWriter; use std::io::Write; use xml::reader::XmlEvent as ReadXmlEvent; use xml::writer; use xml::writer::XmlEvent as WriteXmlEvent; use xml::EmitterConfig; use xml::ParserConfig; use crate::android_string::AndroidString; use crate::constants; use crate::error::InnerError; pub fn write<S: Write>( sink: &mut S, android_strings: Vec<AndroidString>, ) -> Result<(), InnerError> { let mut writer = EmitterConfig::new() .perform_indent(true) .indent_string(" ") // 4 spaces .write_document_declaration(true) .create_writer(BufWriter::new(sink)); // Start resources element writer.write(WriteXmlEvent::start_element(constants::elements::RESOURCES))?; // Write all string elements for android_string in android_strings { // String tag with name attribute let mut string_element = WriteXmlEvent::start_element(constants::elements::STRING) .attr(constants::attributes::NAME, android_string.name()); // Include `localizable` attribute if required if !android_string.is_localizable() { string_element = string_element.attr(constants::attributes::LOCALIZABLE, constants::flags::FALSE); } writer.write(string_element)?; write_string(&mut writer, android_string.value())?; writer.write(WriteXmlEvent::end_element())?; } // Ending resources writer.write(WriteXmlEvent::end_element())?; Ok(()) } fn write_string<W: Write>( writer: &mut writer::EventWriter<W>, value: &str, ) -> Result<(), InnerError> { // Right now, to write CDATA sections in strings properly out to the file, // we are creating a reader & then piping the required read events to the // writer. This feels wasteful! There has got to a better, more efficient // way to do this // Artificially inject tags to create valid XML out of the passed in string let value = format!("<a>{}</a>", value); let reader = ParserConfig::new().create_reader(value.as_bytes()); for element_or_error in reader { match element_or_error { Err(error) => return Err::<_, InnerError>(From::from(error)), Ok(ref element) => match element { ReadXmlEvent::Characters(_) => { writer.write(element.as_writer_event().ok_or_else(|| { InnerError::from(format!("Can't build writer event from {}", &value)) })?) } ReadXmlEvent::CData(_) => { writer.write(element.as_writer_event().ok_or_else(|| { InnerError::from(format!("Can't build writer event from {}", &value)) })?) } _ => Ok(()), // No op for other events }, }?; } Ok(()) } #[cfg(test)] mod tests { use test_utilities; use crate::android_string::AndroidString; #[test] fn writes_strings_to_file() { let android_strings = vec![ AndroidString::localizable("localizable_string", "localizable string value"), AndroidString::unlocalizable("non_localizable_string", "non localizable string value"), ]; // Write strings to a vector & split o/p into lines let mut sink: Vec<u8> = vec![]; super::write(&mut sink, android_strings).unwrap(); let written_content = String::from_utf8(sink).unwrap(); let written_lines = written_content.lines(); test_utilities::list::assert_strict_list_eq( written_lines.collect::<Vec<&str>>(), vec![ r##"<?xml version="1.0" encoding="utf-8"?>"##, r##"<resources>"##, r##" <string name="localizable_string">localizable string value</string>"##, r##" <string name="non_localizable_string" translatable="false">non localizable string value</string>"##, r##"</resources>"##, ], ) } #[test] fn writes_string_with_one_cdata_event() { test_cdata_handling("<![CDATA[this is a test]]>") } #[test] fn writes_string_with_character_followed_by_cdata_event() { test_cdata_handling("character event <![CDATA[cdata event]]>") } #[test] fn writes_string_with_cdata_followed_by_character_event() { test_cdata_handling("<![CDATA[cdata event]]> character event") } #[test] fn writes_string_with_multiple_character_and_cdata_events() { test_cdata_handling("character event 1 <![CDATA[cdata event 1]]> character event 2 <![CDATA[cdata event 2]]> <![CDATA[cdata event 3]]> character event 3") } fn test_cdata_handling(value: &str) { // Write string to a vector & split o/p into lines let mut sink: Vec<u8> = vec![]; super::write(&mut sink, vec![AndroidString::localizable("s1", value)]).unwrap(); let written_content = String::from_utf8(sink).unwrap(); let written_lines = written_content.lines(); test_utilities::list::assert_strict_list_eq( written_lines.collect::<Vec<&str>>(), vec![ r##"<?xml version="1.0" encoding="utf-8"?>"##, r##"<resources>"##, &format!(" <string name=\"s1\">{}</string>", value), r##"</resources>"##, ], ) } }
34.801282
162
0.596427
1ab5c770e5d57c079744ec04dc79ea50ff46eb10
8,830
// Copyright © 2020 Starcat LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use log::{info, warn}; use std::time::Duration; use clap::{App, AppSettings, Arg}; use serialport::prelude::SerialPortSettings; use common::constants::ads1299; use hackeeg::client::commands::responses::Status; use hackeeg::common::constants::NUM_CHANNELS; use hackeeg::{client::modes::Mode, client::HackEEGClient, common}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; const MAIN_TAG: &str = "main"; const DEFAULT_STREAM_NAME: &str = "HackEEG"; fn main() -> Result<(), Box<dyn std::error::Error>> { let matches = App::new("HackEEG Streamer") .about("Reads data from a serial port and echoes it to stdout") .setting(AppSettings::DisableVersion) .arg( Arg::with_name("verbosity") .short("v") .multiple(true) .help("Sets the level of verbosity"), ) .arg( Arg::with_name("port") .help("The device path to a serial port") .required(true), ) .arg( Arg::with_name("baud") .help("The baud rate to connect at") .default_value("115200") .required(true), ) .arg( Arg::with_name("sps") .short("s") .long("--sps") .help("Samples per second") .default_value("500"), ) .arg( Arg::with_name("lsl") .short("L") .long("lsl") .help("Send samples to an LSL stream instead of terminal"), ) .arg( Arg::with_name("lsl_stream_name") .short("N") .long("lsl-stream-name") .help("Name of LSL stream to create") .default_value(DEFAULT_STREAM_NAME), ) .arg( Arg::with_name("quiet") .short("q") .long("quiet") .help("Quiet mode: do not print sample data (used for performance testing)"), ) .arg( Arg::with_name("samples") .short("S") .long("samples") .help("How many samples to capture") .takes_value(true), ) .arg( Arg::with_name("messagepack") .short("M") .long("messagepack") .help("MessagePack mode- use MessagePack format to send sample data to the host, rather than JSON Lines") ) .arg( Arg::with_name("channel_test") .short("T") .long("channel-test") .help("Set the channels to internal test settings for software testing") ) .arg( Arg::with_name("gain") .short("g") .long("gain") .help("ADS1299 gain setting for all channels") .default_value("1") .takes_value(true) ) .get_matches(); let log_level = match matches.occurrences_of("verbosity") { 0 => log::LevelFilter::Info, 1 => log::LevelFilter::Debug, _ => log::LevelFilter::Trace, }; let port_name = matches.value_of("port").unwrap(); let baud_rate = matches.value_of("baud").unwrap().parse::<u32>()?; let sps = matches.value_of("sps").unwrap().parse::<u32>()?; common::log::setup_logger(log_level, None)?; let mut settings = SerialPortSettings::default(); settings.baud_rate = baud_rate; settings.timeout = Duration::from_millis(10); let mut client = HackEEGClient::new(port_name, &settings)?; client.blink_board_led()?; let sample_mode = ads1299::Speed::from(sps) as u8 | ads1299::CONFIG1_const; client .wreg::<Status>(ads1299::GlobalSettings::CONFIG1 as u8, sample_mode)? .assert()?; info!(target: MAIN_TAG, "Disabling all channels"); client.disable_all_channels()?; if matches.is_present("channel_test") { info!(target: MAIN_TAG, "Enabling channel config test"); client.channel_config_test()?; } else { let gain: ads1299::Gain = matches .value_of("gain") .expect("Expected gain") .parse::<u32>()? .into(); info!(target: MAIN_TAG, "Configuring channels with gain {}", gain); client.enable_all_channels(Some(gain))?; } // Route reference electrode to SRB1: JP8:1-2, JP7:NC (not connected) // use this with humans to reduce noise info!(target: MAIN_TAG, "Enabling reference electrode SRB1"); client .wreg::<Status>(ads1299::MISC1, ads1299::SRB1 | ads1299::MISC1_const)? .assert()?; // Single-ended mode - setting SRB1 bit sends mid-supply voltage to the N inputs // use this with a signal generator // client.wreg(ads1299::MISC1, ads1299::SRB1)?; // Dual-ended mode info!(target: MAIN_TAG, "Setting dual-ended mode"); client .wreg::<Status>(ads1299::MISC1, ads1299::MISC1_const)? .assert()?; if matches.is_present("messagepack") { client.ensure_mode(Mode::MsgPack)?; } else { client.ensure_mode(Mode::JsonLines)?; } client.start()?; client.rdatac()?; let mut maybe_outlet: Option<lsl_sys::Outlet<i32>> = None; if matches.is_present("lsl") { let stream_name = matches.value_of("lsl_stream_name").unwrap(); let stream_type = "EEG"; // derive our uuid from name-type-num_channels let uuid_name = format!("{}-{}-{}", stream_name, stream_type, NUM_CHANNELS); let stream_id = uuid::Uuid::new_v5(&uuid::Uuid::NAMESPACE_OID, uuid_name.as_bytes()) .to_simple() .to_string(); let stream_info = lsl_sys::StreamInfo::<i32>::new( stream_name, stream_type, NUM_CHANNELS as i32, sps as f64, &stream_id, )?; maybe_outlet = Some(lsl_sys::Outlet::new(stream_info, 0, 360)?); } let quiet = matches.is_present("quiet"); let sigint = Arc::new(AtomicBool::new(false)); signal_hook::flag::register(signal_hook::SIGINT, Arc::clone(&sigint))?; let start = std::time::Instant::now(); let mut counter: u64 = 0; let mut errors: u64 = 0; let max_samples = match matches.value_of("samples") { Some(samples_str) => samples_str.parse::<u64>()?, None => 0, }; loop { if sigint.load(Ordering::Relaxed) { info!(target: MAIN_TAG, "Got SIGINT, breaking read loop"); break; } let resp = client.read_rdatac_response(); match resp { Err(e) => { errors += 1; warn!(target: MAIN_TAG, "Error getting response: {:?}", e); continue; } Ok(sample) => { let ch = sample.channels; if !quiet { println!( "{} @ {}: [{}, {}, {}, {}, {}, {}, {}, {}]", sample.sample_number, sample.timestamp, ch[0].sample, ch[1].sample, ch[2].sample, ch[3].sample, ch[4].sample, ch[5].sample, ch[6].sample, ch[7].sample ); } if let Some(ref outlet) = maybe_outlet { outlet.push_chunk(sample.as_lsl_data().as_slice(), sample.timestamp as f64); } counter += 1; if max_samples > 0 && counter >= max_samples { info!( target: MAIN_TAG, "Reached {} samples, breaking", max_samples ); break; } } } } let elapsed = start.elapsed(); info!( target: MAIN_TAG, "{} samples ({} errors) in {} seconds, or {}/s", counter, errors, elapsed.as_secs_f32(), counter as f32 / elapsed.as_secs_f32() ); Ok(()) }
33.195489
121
0.525934
725b4f0457baa87e69274a161169eb3a8260ecad
30,154
use std::borrow::Cow; use std::collections::{HashMap, HashSet, VecDeque}; use heck::{CamelCase, SnakeCase}; use indexmap::IndexMap; use lazy_static::*; use pest::Parser as _; use pest_derive::*; use regex::Regex; use thiserror::Error; use crate::templates::motif; /// Describes errors that occured while parsing protocol specs. #[derive(Error, Debug)] pub enum ParserError { #[error("Invalid data: {}", .0)] Invalid(String), #[error("Grammar error: {}", .0)] Grammar(pest::error::Error<Rule>), } impl From<std::num::ParseIntError> for ParserError { fn from(source: std::num::ParseIntError) -> Self { Self::Invalid(source.to_string()) } } impl From<pest::error::Error<Rule>> for ParserError { fn from(source: pest::error::Error<Rule>) -> Self { Self::Grammar(source) } } #[derive(Parser)] #[grammar = "protocol.pest"] pub struct ProtocolParser; /// Holds the result of parsing the Kafka protocol html file. /// Exposes it in an easily templatable form (template::motif types). pub struct SpecParser<'a> { pub err_code_rows: motif::ErrorCodeRows, pub api_key_rows: motif::ApiKeyRows, req_resp_specs: IndexMap<String, VersionedSpecs<'a>>, } /// Vector of (version, spec, fields_doc = {f_name -> doc_string}) type VersionedSpecs<'a> = Vec<(i16, Spec<'a>, HashMap<Cow<'a, str>, String>)>; /// Represents a req/resp spec, reflects the form of its recusive BNF definition. #[derive(Debug, Clone, PartialEq)] pub enum Spec<'a> { Value(Primitive), Array(Box<Spec<'a>>), Struct(Vec<(Cow<'a, str>, Spec<'a>)>), } impl<'a> SpecParser<'a> { /// Parses raw file content and return an initialized SpecParser. pub fn new(raw: &'a str) -> Result<Self, ParserError> { let parsed_file = ProtocolParser::parse(Rule::file, &raw)? .next() // there is exactly one { file } .expect("Unreachable file rule"); let mut err_code_rows = vec![]; let mut api_key_rows = vec![]; let mut req_resp_specs = IndexMap::new(); for target in parsed_file.into_inner() { match target.as_rule() { // Parses the error codes table into a templatable enum motif Rule::error_codes => { err_code_rows = target .into_inner() // inner { table } .next() // there is exactly one { table } .expect("Unreachable error_codes table rule") .into_inner() // inner { tr } .into_iter() .map(|tr| { let row = tr .into_inner() // inner { td } .into_iter() .map(|td| td.into_inner().as_str()) // inner { content } .collect::<Vec<_>>(); ( // Enum variant name of the error code String::from(row[0]).to_camel_case(), // Enum value (i16) of the error code String::from(row[1]), // Error's description capped_comment( &format!("{} Retriable: {}.", row[3], yes_no(row[2])), 4, ), ) }) .collect::<Vec<_>>(); } // Parses the API keys table into a templatable enum motif Rule::api_keys => { api_key_rows = target .into_inner() // inner { table } .next() // there is exactly one { table } .expect("Unreachable api_keys table rule") .into_inner() // inner { tr } .into_iter() .map(|tr| { let row = tr .into_inner() // inner { td } .into_iter() .map(|td| { td.into_inner() // inner { a } .next() // there is exactly one { a } .expect("Unreachable api_keys a rule") .into_inner() // inner { content } .as_str() }) .collect::<Vec<_>>(); ( // Enum variant name of the API key String::from(row[0]), // Enum value (i16) of the API key String::from(row[1]), ) }) .collect::<Vec<_>>(); } // Parses all req/resp BNF definitions and description tables // into templatable versioned enum/mod/strut motifs Rule::req_resp => { let parsed_spec = ProtocolParser::parse(Rule::spec, target.as_str())? .next() // there is exactly one { spec } .expect("Unreachable spec rule"); let mut curr_name = None; let mut curr_version = None; let mut curr_spec = None; for section in parsed_spec.into_inner() { match section.as_rule() { Rule::content => { let (name, version, spec) = parse_struct_spec(section.as_str())?; curr_name = Some(name); curr_version = Some(version); curr_spec = Some(spec); } Rule::table => { let fields_doc = section .into_inner() // inner { td } .map(|tr| { let row = tr .into_inner() // inner { td } .into_iter() .map(|td| td.into_inner().as_str()) // inner { content } .collect::<Vec<_>>(); (clean_name(row[0]), String::from(row[1])) }) .collect::<HashMap<_, _>>(); let name = curr_name.take().expect("unreachable no name parsed"); let version = ( curr_version.take().expect("unreachable no version parsed"), curr_spec.take().expect("unreachable no spec parsed"), fields_doc, ); match req_resp_specs.get_mut(&name) { None => { req_resp_specs.insert(name, vec![version]); } Some(versions) => { versions.push(version); } }; } _ => unreachable!("No other rules"), } } } _ => (), } } Ok(SpecParser { err_code_rows, api_key_rows, req_resp_specs, }) } pub fn iter_req_resp(&self) -> impl Iterator<Item = (&String, &VersionedSpecs)> { let mut i = 0; std::iter::from_fn(move || { let req_resp = self.req_resp_specs.get_index(i); i += 1; req_resp }) } } /// Defines methods required to template a versioned req/resp's enum/mod/strucs. pub trait ReqRespMotif { fn enum_name(&self) -> String; fn enum_vfields(&self) -> motif::EnumVfields; fn mod_name(&self) -> String; fn mod_vstructs(&self) -> motif::ModVstructs; } impl<'a> ReqRespMotif for (&'a String, &'a VersionedSpecs<'a>) { fn enum_name(&self) -> String { self.0.clone() } fn enum_vfields(&self) -> motif::EnumVfields { /// Returns Rust type (as a String) for a given enum struct variant field fn rust_type_for( field_name: &str, field_spec: &Spec, enum_name: &str, version: &i16, ) -> String { match field_spec { Spec::Value(primitive) => primitive.rust_type(), Spec::Array(inner) => format!( "Vec<{}>", rust_type_for(field_name, &*inner, enum_name, version) ), Spec::Struct(_) => format!( "{}::v{}::{}", enum_name.to_snake_case(), version, field_name.to_camel_case() ), } } self.1 .iter() .map(|(version, spec, docs)| { let fields: motif::Fields = if let Spec::Struct(fields) = spec { fields .iter() .map(|(field_name, field_spec)| { ( field_name.to_string(), rust_type_for(field_name, field_spec, self.0, version), docs.get(field_name).map_or_else( || String::default(), |doc| capped_comment(doc, 8), ), ) }) .collect::<Vec<_>>() } else { unreachable!("All specs are Spec::Struct(_)"); }; fields }) .collect::<Vec<_>>() } fn mod_name(&self) -> String { self.0.to_snake_case() } fn mod_vstructs(&self) -> motif::ModVstructs { /// Returns Rust type (as a String) for a given struct field fn rust_type_for(field_name: &str, field_spec: &Spec) -> String { match field_spec { Spec::Value(primitive) => primitive.rust_type(), Spec::Array(inner) => format!("Vec<{}>", rust_type_for(field_name, &*inner)), Spec::Struct(_) => field_name.to_camel_case(), } } /// Returns a stack of the inner structs of a given spec. fn spec_deps<'a>(spec: &'a Spec<'_>) -> Vec<(String, &'a Spec<'a>)> { let mut deps = Vec::new(); let mut q = VecDeque::new(); // Initializes the specs exploration queue if let Spec::Struct(fields) = spec { for (f_name, f_spec) in fields { match f_spec { Spec::Value(_) => (), Spec::Array(inner) => q.push_back((f_name.to_camel_case(), &**inner)), Spec::Struct(_) => q.push_back((f_name.to_camel_case(), f_spec)), } } } else { unreachable!("All specs are Spec::Struct(_)"); } // Builds the specs dependencies stack while let Some((ref f_name, ref f_spec)) = q.pop_front() { match (f_name, f_spec) { (_, Spec::Value(_)) => (), (_, Spec::Array(inner)) => q.push_back((f_name.to_camel_case(), &**inner)), (_, Spec::Struct(fields)) => { for (f_name, f_spec) in fields { match f_spec { Spec::Value(_) => (), Spec::Array(inner) => { q.push_back((f_name.to_camel_case(), &**inner)) } Spec::Struct(_) => q.push_back((f_name.to_camel_case(), f_spec)), } } deps.push((f_name.to_camel_case(), &**f_spec)); } } } deps } self.1 .iter() .map(|(_version, spec, docs)| { let structs = spec_deps(spec); structs .iter() .map(|(struct_name, struct_spec)| { let struct_fields: motif::Fields = if let Spec::Struct(fields) = struct_spec { fields .iter() .map(|(field_name, field_spec)| { ( field_name.to_string(), rust_type_for(field_name, field_spec), docs.get(field_name).map_or_else( || String::default(), |doc| capped_comment(doc, 12), ), ) }) .collect::<Vec<_>>() } else { unreachable!("All specs are Spec::Struct(_)"); }; (struct_name.clone(), struct_fields) }) .collect::<Vec<_>>() }) .filter(|versions| versions.len() > 0) .collect::<Vec<_>>() } } fn yes_no(s: &str) -> String { match s { "True" => "Yes".to_string(), "False" => "No".to_string(), _ => panic!("Invalid True/False: {}", s), } } fn clean_name(s: &str) -> Cow<'_, str> { if s.contains('\'') { s.replace('\'', "").into() } else { s.into() } } fn capped_comment(text: &str, nb_indent: usize) -> String { lazy_static! { static ref RE: Regex = Regex::new(r"\b.{1,70}\b\W?").expect("Invalid regex"); } let comment = if nb_indent > 0 { format!("{}///", " ".repeat(nb_indent)) } else { String::from("///") }; RE.captures_iter(text) .into_iter() .filter_map(|c| c.get(0)) .map(|c| format!("{} {}", comment, c.as_str())) .collect::<Vec<_>>() .as_slice() .join("\n") } fn parse_struct_spec<'a>(raw: &'a str) -> Result<(String, i16, Spec<'a>), ParserError> { #[derive(Debug, Clone)] enum Field<'a> { Simple(Cow<'a, str>), Array(Cow<'a, str>), } impl<'a> Field<'a> { fn new(name: &str) -> Field { if name.chars().nth(0).expect("no first char") == '[' && name.chars().last().expect("no last char") == ']' { Field::Array(clean_name(&name[1..name.len() - 1])) } else { Field::Simple(clean_name(name)) } } } #[derive(Debug, Clone)] enum Kind<'a> { Value(Primitive), Array(Primitive), Struct(Vec<Field<'a>>), } impl<'a> Kind<'a> { fn for_root(raw: &str) -> Kind { let fields = raw .split(' ') .filter(|s| *s != "") .collect::<Vec<_>>() .iter() .map(|name| Field::new(name)) .collect::<Vec<_>>(); Kind::Struct(fields) } fn for_field(raw: &str) -> Kind { lazy_static! { static ref ARRAY: Regex = Regex::new(r"ARRAY\((.+?)\)").expect("Invalid regex"); } let kind = raw.split(' ').filter(|s| *s != "").collect::<Vec<_>>(); if kind.len() == 1 { let field = kind[0]; if Primitive::is_valid(field) { Kind::Value(Primitive::from(field)) } else if ARRAY.is_match(field) { let inner = ARRAY .captures(field) .expect("unreachable field kind parsing") .get(1) .expect("unreachable field kind capture") .as_str(); if Primitive::is_valid(inner) { Kind::Array(Primitive::from(inner)) } else { Kind::Struct(vec![Field::new(inner)]) } } else { Kind::Struct(vec![Field::new(field)]) } } else { let fields = kind.iter().map(|name| Field::new(name)).collect::<Vec<_>>(); Kind::Struct(fields) } } } #[derive(Debug, Clone)] struct Line<'a> { name: Cow<'a, str>, kind: Kind<'a>, } fn insert_spec<'a>( mut specs: HashMap<Cow<'a, str>, Spec<'a>>, line: Line<'a>, ) -> Result<HashMap<Cow<'a, str>, Spec<'a>>, ParserError> { match line { Line { kind: Kind::Value(primitive), name, .. } => { specs.insert(name.into(), Spec::Value(primitive)); } Line { kind: Kind::Array(primitive), name, .. } => { specs.insert(name.into(), Spec::Array(Box::new(Spec::Value(primitive)))); } Line { kind: Kind::Struct(fields), name, .. } => { let mut inner_specs = vec![]; for field in fields { match field { Field::Simple(ref name) => { let spec = specs.get(name).ok_or_else(|| { ParserError::Invalid(format!("Missing spec for field: {}", name)) })?; inner_specs.push((name.clone(), spec.clone())); } Field::Array(ref name) => { let spec = specs.get(name).ok_or_else(|| { ParserError::Invalid(format!("Missing spec for field: {}", name)) })?; inner_specs.push((name.clone(), Spec::Array(Box::new(spec.clone())))); } } } specs.insert(name.into(), Spec::Struct(inner_specs)); } }; Ok(specs) } lazy_static! { static ref HEADER: Regex = Regex::new(r"(\w+) (\w+) \(Version: (\d+)\) =>(.*)").expect("Invalid regex"); } let raw_lines = raw.split('\n').collect::<Vec<_>>(); let (first, rest) = raw_lines.split_first().expect("Unreachable split fail"); let header = HEADER.captures(first).ok_or_else(|| { ParserError::Invalid(format!("First line didn't match: {:?} {}", *HEADER, first)) })?; let (name, version) = match (header.get(1), header.get(2), header.get(3)) { (Some(name), Some(genre), Some(version)) => { let version: i16 = version.as_str().parse()?; let name = format!("{}{}", name.as_str(), genre.as_str()); (name, version) } _ => return Err(ParserError::Invalid(format!("Invalid name match: {:?}", header)).into()), }; let root = Kind::for_root(header.get(4).map_or("", |m| m.as_str().trim())); let mut lines = rest .to_vec() .iter() .filter(|s| **s != "") .map(|s| { let parts = s.split(" =>").collect::<Vec<_>>(); let name = clean_name(parts.get(0).expect(&format!("Invalid line: {}", s)).trim()); let kind = Kind::for_field(parts.get(1).expect(&format!("Invalid line: {}", s))); Line { name, kind } }) .collect::<Vec<_>>(); let mut fields_spec = HashMap::new(); lines.reverse(); for line in lines { fields_spec = insert_spec(fields_spec, line.clone())?; } let mut specs = vec![]; if let Kind::Struct(fields) = root { for field in fields { match field { Field::Simple(ref name) => { let field_spec = fields_spec.get(name).ok_or_else(|| { ParserError::Invalid(format!("Missing spec for root field: {}", name)) })?; specs.push((name.clone(), field_spec.clone())); } Field::Array(ref name) => { let field_spec = fields_spec.get(name).ok_or_else(|| { ParserError::Invalid(format!("Missing spec for root field: {}", name)) })?; specs.push((name.clone(), Spec::Array(Box::new(field_spec.clone())))); } } } } Ok((name, version, Spec::Struct(specs))) } #[derive(Debug, Clone, Copy, PartialEq)] pub enum Primitive { /// Represents a boolean value in a byte. Values 0 and 1 are used to /// represent false and true respectively. When reading a boolean value, /// any non-zero value is considered true. Boolean, /// Represents an integer between -2^7 and 2^7-1 inclusive. Int8, /// Represents an integer between -2^15 and 2^15-1 inclusive. /// The values are encoded using two bytes in network byte order (big-endian). Int16, /// Represents an integer between -2^31 and 2^31-1 inclusive. /// The values are encoded using four bytes in network byte order (big-endian). Int32, /// Represents an integer between -2^63 and 2^63-1 inclusive. /// The values are encoded using eight bytes in network byte order (big-endian). Int64, /// Represents an integer between 0 and 2^32-1 inclusive. /// The values are encoded using four bytes in network byte order (big-endian). Uint32, /// Represents an integer between -2^31 and 2^31-1 inclusive. /// Encoding follows the variable-length zig-zag encoding from Google Protocol Buffers. Varint, /// Represents an integer between -2^63 and 2^63-1 inclusive. /// Encoding follows the variable-length zig-zag encoding from Google Protocol Buffers. Varlong, /// Represents a sequence of characters. First the length N is given as an INT16. /// Then N bytes follow which are the UTF-8 encoding of the character sequence. /// Length must not be negative. String, /// Represents a sequence of characters or null. For non-null strings, /// first the length N is given as an INT16. Then N bytes follow which are /// the UTF-8 encoding of the character sequence. A null value is encoded with /// length of -1 and there are no following bytes. NullableString, /// Represents a raw sequence of bytes. First the length N is given as an INT32. /// Then N bytes follow. Bytes, /// Represents a raw sequence of bytes or null. For non-null values, /// first the length N is given as an INT32. Then N bytes follow. /// A null value is encoded with length of -1 and there are no following bytes. NullableBytes, /// Represents a sequence of Kafka records as NULLABLE_BYTES. Records, } impl Primitive { fn from(raw: &str) -> Primitive { match raw { "BOOLEAN" => Primitive::Boolean, "INT8" => Primitive::Int8, "INT16" => Primitive::Int16, "INT32" => Primitive::Int32, "INT64" => Primitive::Int64, "UINT32" => Primitive::Uint32, "VARINT" => Primitive::Varint, "VARLONG" => Primitive::Varlong, "STRING" => Primitive::String, "NULLABLE_STRING" => Primitive::NullableString, "BYTES" => Primitive::Bytes, "NULLABLE_BYTES" => Primitive::NullableBytes, "RECORDS" => Primitive::Records, _ => unreachable!("Unknown primitive: {}", raw), } } fn is_valid(raw: &str) -> bool { lazy_static! { static ref VALIDS: HashSet<String> = { let s: HashSet<_> = vec![ "BOOLEAN", "INT8", "INT16", "INT32", "INT64", "UINT32", "VARINT", "VARLONG", "STRING", "NULLABLE_STRING", "BYTES", "NULLABLE_BYTES", "RECORDS", ] .iter() .map(|s| s.to_string()) .collect(); s }; } VALIDS.contains(raw) } fn rust_type(&self) -> String { match *self { Primitive::Boolean => "bool".to_string(), Primitive::Int8 => "i8".to_string(), Primitive::Int16 => "i16".to_string(), Primitive::Int32 => "i32".to_string(), Primitive::Int64 => "i64".to_string(), Primitive::Uint32 => "u32".to_string(), Primitive::Varint => "crate::types::Varint".to_string(), Primitive::Varlong => "crate::types::Varlong".to_string(), Primitive::String => "String".to_string(), Primitive::NullableString => "crate::types::NullableString".to_string(), Primitive::Bytes => "crate::types::Bytes".to_string(), Primitive::NullableBytes => "crate::types::NullableBytes".to_string(), Primitive::Records => "crate::types::NullableBytes".to_string(), } } } #[cfg(test)] mod tests { use super::*; #[test] #[ignore] fn parse_error_codes() { let raw = include_str!("protocol.html"); let parser = SpecParser::new(raw).unwrap(); for row in parser.err_code_rows { println!("{:?}", row); } } #[test] #[ignore] fn parse_api_keys() { let raw = include_str!("protocol.html"); let parser = SpecParser::new(raw).unwrap(); for row in parser.api_key_rows { println!("{:?}", row); } } #[test] #[ignore] fn parse_req_resp() { let raw = include_str!("protocol.html"); let parser = SpecParser::new(raw).unwrap(); println!("{:?}", parser.req_resp_specs.get_index(0)); println!( "{:?}", parser .req_resp_specs .get_index(parser.req_resp_specs.len() - 1) ); } #[test] #[ignore] fn parse_enum_vfields() { let raw = include_str!("protocol.html"); let parser = SpecParser::new(raw).unwrap(); let mut it = parser.iter_req_resp(); let req_resp = it.next().unwrap(); let vfields = req_resp.enum_vfields(); println!("{:?}", req_resp.0); println!("{:?}", req_resp.1.get(0).unwrap()); println!("{:?}", vfields); } #[test] #[ignore] fn parse_mod_vstructs() { let raw = include_str!("protocol.html"); let parser = SpecParser::new(raw).unwrap(); let mut it = parser.iter_req_resp(); let req_resp = it.next().unwrap(); let vstructs = req_resp.mod_vstructs(); println!("{:?}", req_resp.0.to_snake_case()); println!("{:?}", req_resp.1.get(0).unwrap()); println!("{:?}", vstructs); } #[test] fn parse_spec() { use super::Spec::*; let raw = "CreateTopics Request (Version: 0) => [create_topic_requests] timeout create_topic_requests => topic num_partitions replication_factor [replica_assignment] [config_entries] topic => STRING num_partitions => INT32 replication_factor => INT16 replica_assignment => partition [replicas] partition => INT32 replicas => INT32 config_entries => config_name config_value config_name => STRING config_value => NULLABLE_STRING timeout => INT32"; let (name, version, spec) = parse_struct_spec(raw).unwrap(); assert_eq!("CreateTopicsRequest", name); assert_eq!(0, version); assert_eq!( Struct(vec![ ( "create_topic_requests".into(), Array(Box::new(Struct(vec![ ("topic".into(), Value(Primitive::String)), ("num_partitions".into(), Value(Primitive::Int32)), ("replication_factor".into(), Value(Primitive::Int16)), ( "replica_assignment".into(), Array(Box::new(Struct(vec![ ("partition".into(), Value(Primitive::Int32)), ("replicas".into(), Array(Box::new(Value(Primitive::Int32)))) ]))) ), ( "config_entries".into(), Array(Box::new(Struct(vec![ ("config_name".into(), Value(Primitive::String)), ("config_value".into(), Value(Primitive::NullableString)) ]))) ) ]))) ), ("timeout".into(), Value(Primitive::Int32)) ]), spec ); } }
37.22716
104
0.443059
1c76c31e1a7ff9131311928e181e41bbef1679b0
39,936
use crate::expand::{self, AstFragment, Invocation}; use crate::module::DirectoryOwnership; use rustc_ast::ptr::P; use rustc_ast::token; use rustc_ast::tokenstream::TokenStream; use rustc_ast::visit::{AssocCtxt, Visitor}; use rustc_ast::{self as ast, Attribute, NodeId, PatKind}; use rustc_attr::{self as attr, Deprecation, HasAttrs, Stability}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::{self, Lrc}; use rustc_errors::{DiagnosticBuilder, ErrorReported}; use rustc_parse::{self, nt_to_tokenstream, parser, MACRO_ARGUMENTS}; use rustc_session::{parse::ParseSess, Limit, Session}; use rustc_span::def_id::{DefId, LOCAL_CRATE}; use rustc_span::edition::Edition; use rustc_span::hygiene::{AstPass, ExpnData, ExpnId, ExpnKind}; use rustc_span::source_map::SourceMap; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{FileName, MultiSpan, Span, DUMMY_SP}; use smallvec::{smallvec, SmallVec}; use std::default::Default; use std::iter; use std::path::PathBuf; use std::rc::Rc; crate use rustc_span::hygiene::MacroKind; #[derive(Debug, Clone)] pub enum Annotatable { Item(P<ast::Item>), TraitItem(P<ast::AssocItem>), ImplItem(P<ast::AssocItem>), ForeignItem(P<ast::ForeignItem>), Stmt(P<ast::Stmt>), Expr(P<ast::Expr>), Arm(ast::Arm), Field(ast::Field), FieldPat(ast::FieldPat), GenericParam(ast::GenericParam), Param(ast::Param), StructField(ast::StructField), Variant(ast::Variant), } impl HasAttrs for Annotatable { fn attrs(&self) -> &[Attribute] { match *self { Annotatable::Item(ref item) => &item.attrs, Annotatable::TraitItem(ref trait_item) => &trait_item.attrs, Annotatable::ImplItem(ref impl_item) => &impl_item.attrs, Annotatable::ForeignItem(ref foreign_item) => &foreign_item.attrs, Annotatable::Stmt(ref stmt) => stmt.attrs(), Annotatable::Expr(ref expr) => &expr.attrs, Annotatable::Arm(ref arm) => &arm.attrs, Annotatable::Field(ref field) => &field.attrs, Annotatable::FieldPat(ref fp) => &fp.attrs, Annotatable::GenericParam(ref gp) => &gp.attrs, Annotatable::Param(ref p) => &p.attrs, Annotatable::StructField(ref sf) => &sf.attrs, Annotatable::Variant(ref v) => &v.attrs(), } } fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) { match self { Annotatable::Item(item) => item.visit_attrs(f), Annotatable::TraitItem(trait_item) => trait_item.visit_attrs(f), Annotatable::ImplItem(impl_item) => impl_item.visit_attrs(f), Annotatable::ForeignItem(foreign_item) => foreign_item.visit_attrs(f), Annotatable::Stmt(stmt) => stmt.visit_attrs(f), Annotatable::Expr(expr) => expr.visit_attrs(f), Annotatable::Arm(arm) => arm.visit_attrs(f), Annotatable::Field(field) => field.visit_attrs(f), Annotatable::FieldPat(fp) => fp.visit_attrs(f), Annotatable::GenericParam(gp) => gp.visit_attrs(f), Annotatable::Param(p) => p.visit_attrs(f), Annotatable::StructField(sf) => sf.visit_attrs(f), Annotatable::Variant(v) => v.visit_attrs(f), } } } impl Annotatable { pub fn span(&self) -> Span { match *self { Annotatable::Item(ref item) => item.span, Annotatable::TraitItem(ref trait_item) => trait_item.span, Annotatable::ImplItem(ref impl_item) => impl_item.span, Annotatable::ForeignItem(ref foreign_item) => foreign_item.span, Annotatable::Stmt(ref stmt) => stmt.span, Annotatable::Expr(ref expr) => expr.span, Annotatable::Arm(ref arm) => arm.span, Annotatable::Field(ref field) => field.span, Annotatable::FieldPat(ref fp) => fp.pat.span, Annotatable::GenericParam(ref gp) => gp.ident.span, Annotatable::Param(ref p) => p.span, Annotatable::StructField(ref sf) => sf.span, Annotatable::Variant(ref v) => v.span, } } pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) { match self { Annotatable::Item(item) => visitor.visit_item(item), Annotatable::TraitItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Trait), Annotatable::ImplItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Impl), Annotatable::ForeignItem(foreign_item) => visitor.visit_foreign_item(foreign_item), Annotatable::Stmt(stmt) => visitor.visit_stmt(stmt), Annotatable::Expr(expr) => visitor.visit_expr(expr), Annotatable::Arm(arm) => visitor.visit_arm(arm), Annotatable::Field(field) => visitor.visit_field(field), Annotatable::FieldPat(fp) => visitor.visit_field_pattern(fp), Annotatable::GenericParam(gp) => visitor.visit_generic_param(gp), Annotatable::Param(p) => visitor.visit_param(p), Annotatable::StructField(sf) => visitor.visit_struct_field(sf), Annotatable::Variant(v) => visitor.visit_variant(v), } } crate fn into_tokens(self, sess: &ParseSess) -> TokenStream { let nt = match self { Annotatable::Item(item) => token::NtItem(item), Annotatable::TraitItem(item) | Annotatable::ImplItem(item) => { token::NtItem(P(item.and_then(ast::AssocItem::into_item))) } Annotatable::ForeignItem(item) => { token::NtItem(P(item.and_then(ast::ForeignItem::into_item))) } Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()), Annotatable::Expr(expr) => token::NtExpr(expr), Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), }; nt_to_tokenstream(&nt, sess, DUMMY_SP) } pub fn expect_item(self) -> P<ast::Item> { match self { Annotatable::Item(i) => i, _ => panic!("expected Item"), } } pub fn expect_trait_item(self) -> P<ast::AssocItem> { match self { Annotatable::TraitItem(i) => i, _ => panic!("expected Item"), } } pub fn expect_impl_item(self) -> P<ast::AssocItem> { match self { Annotatable::ImplItem(i) => i, _ => panic!("expected Item"), } } pub fn expect_foreign_item(self) -> P<ast::ForeignItem> { match self { Annotatable::ForeignItem(i) => i, _ => panic!("expected foreign item"), } } pub fn expect_stmt(self) -> ast::Stmt { match self { Annotatable::Stmt(stmt) => stmt.into_inner(), _ => panic!("expected statement"), } } pub fn expect_expr(self) -> P<ast::Expr> { match self { Annotatable::Expr(expr) => expr, _ => panic!("expected expression"), } } pub fn expect_arm(self) -> ast::Arm { match self { Annotatable::Arm(arm) => arm, _ => panic!("expected match arm"), } } pub fn expect_field(self) -> ast::Field { match self { Annotatable::Field(field) => field, _ => panic!("expected field"), } } pub fn expect_field_pattern(self) -> ast::FieldPat { match self { Annotatable::FieldPat(fp) => fp, _ => panic!("expected field pattern"), } } pub fn expect_generic_param(self) -> ast::GenericParam { match self { Annotatable::GenericParam(gp) => gp, _ => panic!("expected generic parameter"), } } pub fn expect_param(self) -> ast::Param { match self { Annotatable::Param(param) => param, _ => panic!("expected parameter"), } } pub fn expect_struct_field(self) -> ast::StructField { match self { Annotatable::StructField(sf) => sf, _ => panic!("expected struct field"), } } pub fn expect_variant(self) -> ast::Variant { match self { Annotatable::Variant(v) => v, _ => panic!("expected variant"), } } pub fn derive_allowed(&self) -> bool { match *self { Annotatable::Item(ref item) => match item.kind { ast::ItemKind::Struct(..) | ast::ItemKind::Enum(..) | ast::ItemKind::Union(..) => { true } _ => false, }, _ => false, } } } /// Result of an expansion that may need to be retried. /// Consider using this for non-`MultiItemModifier` expanders as well. pub enum ExpandResult<T, U> { /// Expansion produced a result (possibly dummy). Ready(T), /// Expansion could not produce a result and needs to be retried. Retry(U), } // `meta_item` is the attribute, and `item` is the item being modified. pub trait MultiItemModifier { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &ast::MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable>; } impl<F> MultiItemModifier for F where F: Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, Annotatable) -> Vec<Annotatable>, { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &ast::MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable> { ExpandResult::Ready(self(ecx, span, meta_item, item)) } } pub trait ProcMacro { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, ts: TokenStream, ) -> Result<TokenStream, ErrorReported>; } impl<F> ProcMacro for F where F: Fn(TokenStream) -> TokenStream, { fn expand<'cx>( &self, _ecx: &'cx mut ExtCtxt<'_>, _span: Span, ts: TokenStream, ) -> Result<TokenStream, ErrorReported> { // FIXME setup implicit context in TLS before calling self. Ok(self(ts)) } } pub trait AttrProcMacro { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, annotation: TokenStream, annotated: TokenStream, ) -> Result<TokenStream, ErrorReported>; } impl<F> AttrProcMacro for F where F: Fn(TokenStream, TokenStream) -> TokenStream, { fn expand<'cx>( &self, _ecx: &'cx mut ExtCtxt<'_>, _span: Span, annotation: TokenStream, annotated: TokenStream, ) -> Result<TokenStream, ErrorReported> { // FIXME setup implicit context in TLS before calling self. Ok(self(annotation, annotated)) } } /// Represents a thing that maps token trees to Macro Results pub trait TTMacroExpander { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, input: TokenStream, ) -> Box<dyn MacResult + 'cx>; } pub type MacroExpanderFn = for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>; impl<F> TTMacroExpander for F where F: for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>, { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, input: TokenStream, ) -> Box<dyn MacResult + 'cx> { self(ecx, span, input) } } // Use a macro because forwarding to a simple function has type system issues macro_rules! make_stmts_default { ($me:expr) => { $me.make_expr().map(|e| { smallvec![ast::Stmt { id: ast::DUMMY_NODE_ID, span: e.span, kind: ast::StmtKind::Expr(e), tokens: None }] }) }; } /// The result of a macro expansion. The return values of the various /// methods are spliced into the AST at the callsite of the macro. pub trait MacResult { /// Creates an expression. fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> { None } /// Creates zero or more items. fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> { None } /// Creates zero or more impl items. fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { None } /// Creates zero or more trait items. fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { None } /// Creates zero or more items in an `extern {}` block fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { None } /// Creates a pattern. fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> { None } /// Creates zero or more statements. /// /// By default this attempts to create an expression statement, /// returning None if that fails. fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> { make_stmts_default!(self) } fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> { None } fn make_arms(self: Box<Self>) -> Option<SmallVec<[ast::Arm; 1]>> { None } fn make_fields(self: Box<Self>) -> Option<SmallVec<[ast::Field; 1]>> { None } fn make_field_patterns(self: Box<Self>) -> Option<SmallVec<[ast::FieldPat; 1]>> { None } fn make_generic_params(self: Box<Self>) -> Option<SmallVec<[ast::GenericParam; 1]>> { None } fn make_params(self: Box<Self>) -> Option<SmallVec<[ast::Param; 1]>> { None } fn make_struct_fields(self: Box<Self>) -> Option<SmallVec<[ast::StructField; 1]>> { None } fn make_variants(self: Box<Self>) -> Option<SmallVec<[ast::Variant; 1]>> { None } } macro_rules! make_MacEager { ( $( $fld:ident: $t:ty, )* ) => { /// `MacResult` implementation for the common case where you've already /// built each form of AST that you might return. #[derive(Default)] pub struct MacEager { $( pub $fld: Option<$t>, )* } impl MacEager { $( pub fn $fld(v: $t) -> Box<dyn MacResult> { Box::new(MacEager { $fld: Some(v), ..Default::default() }) } )* } } } make_MacEager! { expr: P<ast::Expr>, pat: P<ast::Pat>, items: SmallVec<[P<ast::Item>; 1]>, impl_items: SmallVec<[P<ast::AssocItem>; 1]>, trait_items: SmallVec<[P<ast::AssocItem>; 1]>, foreign_items: SmallVec<[P<ast::ForeignItem>; 1]>, stmts: SmallVec<[ast::Stmt; 1]>, ty: P<ast::Ty>, } impl MacResult for MacEager { fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> { self.expr } fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> { self.items } fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { self.impl_items } fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { self.trait_items } fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { self.foreign_items } fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> { match self.stmts.as_ref().map_or(0, |s| s.len()) { 0 => make_stmts_default!(self), _ => self.stmts, } } fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> { if let Some(p) = self.pat { return Some(p); } if let Some(e) = self.expr { if let ast::ExprKind::Lit(_) = e.kind { return Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, span: e.span, kind: PatKind::Lit(e), tokens: None, })); } } None } fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> { self.ty } } /// Fill-in macro expansion result, to allow compilation to continue /// after hitting errors. #[derive(Copy, Clone)] pub struct DummyResult { is_error: bool, span: Span, } impl DummyResult { /// Creates a default MacResult that can be anything. /// /// Use this as a return value after hitting any errors and /// calling `span_err`. pub fn any(span: Span) -> Box<dyn MacResult + 'static> { Box::new(DummyResult { is_error: true, span }) } /// Same as `any`, but must be a valid fragment, not error. pub fn any_valid(span: Span) -> Box<dyn MacResult + 'static> { Box::new(DummyResult { is_error: false, span }) } /// A plain dummy expression. pub fn raw_expr(sp: Span, is_error: bool) -> P<ast::Expr> { P(ast::Expr { id: ast::DUMMY_NODE_ID, kind: if is_error { ast::ExprKind::Err } else { ast::ExprKind::Tup(Vec::new()) }, span: sp, attrs: ast::AttrVec::new(), tokens: None, }) } /// A plain dummy pattern. pub fn raw_pat(sp: Span) -> ast::Pat { ast::Pat { id: ast::DUMMY_NODE_ID, kind: PatKind::Wild, span: sp, tokens: None } } /// A plain dummy type. pub fn raw_ty(sp: Span, is_error: bool) -> P<ast::Ty> { P(ast::Ty { id: ast::DUMMY_NODE_ID, kind: if is_error { ast::TyKind::Err } else { ast::TyKind::Tup(Vec::new()) }, span: sp, tokens: None, }) } } impl MacResult for DummyResult { fn make_expr(self: Box<DummyResult>) -> Option<P<ast::Expr>> { Some(DummyResult::raw_expr(self.span, self.is_error)) } fn make_pat(self: Box<DummyResult>) -> Option<P<ast::Pat>> { Some(P(DummyResult::raw_pat(self.span))) } fn make_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::Item>; 1]>> { Some(SmallVec::new()) } fn make_impl_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { Some(SmallVec::new()) } fn make_trait_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { Some(SmallVec::new()) } fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { Some(SmallVec::new()) } fn make_stmts(self: Box<DummyResult>) -> Option<SmallVec<[ast::Stmt; 1]>> { Some(smallvec![ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Expr(DummyResult::raw_expr(self.span, self.is_error)), span: self.span, tokens: None }]) } fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> { Some(DummyResult::raw_ty(self.span, self.is_error)) } fn make_arms(self: Box<DummyResult>) -> Option<SmallVec<[ast::Arm; 1]>> { Some(SmallVec::new()) } fn make_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::Field; 1]>> { Some(SmallVec::new()) } fn make_field_patterns(self: Box<DummyResult>) -> Option<SmallVec<[ast::FieldPat; 1]>> { Some(SmallVec::new()) } fn make_generic_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::GenericParam; 1]>> { Some(SmallVec::new()) } fn make_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::Param; 1]>> { Some(SmallVec::new()) } fn make_struct_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::StructField; 1]>> { Some(SmallVec::new()) } fn make_variants(self: Box<DummyResult>) -> Option<SmallVec<[ast::Variant; 1]>> { Some(SmallVec::new()) } } /// A syntax extension kind. pub enum SyntaxExtensionKind { /// A token-based function-like macro. Bang( /// An expander with signature TokenStream -> TokenStream. Box<dyn ProcMacro + sync::Sync + sync::Send>, ), /// An AST-based function-like macro. LegacyBang( /// An expander with signature TokenStream -> AST. Box<dyn TTMacroExpander + sync::Sync + sync::Send>, ), /// A token-based attribute macro. Attr( /// An expander with signature (TokenStream, TokenStream) -> TokenStream. /// The first TokenSteam is the attribute itself, the second is the annotated item. /// The produced TokenSteam replaces the input TokenSteam. Box<dyn AttrProcMacro + sync::Sync + sync::Send>, ), /// An AST-based attribute macro. LegacyAttr( /// An expander with signature (AST, AST) -> AST. /// The first AST fragment is the attribute itself, the second is the annotated item. /// The produced AST fragment replaces the input AST fragment. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), /// A trivial attribute "macro" that does nothing, /// only keeps the attribute and marks it as inert, /// thus making it ineligible for further expansion. NonMacroAttr { /// Suppresses the `unused_attributes` lint for this attribute. mark_used: bool, }, /// A token-based derive macro. Derive( /// An expander with signature TokenStream -> TokenStream (not yet). /// The produced TokenSteam is appended to the input TokenSteam. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), /// An AST-based derive macro. LegacyDerive( /// An expander with signature AST -> AST. /// The produced AST fragment is appended to the input AST fragment. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), } /// A struct representing a macro definition in "lowered" form ready for expansion. pub struct SyntaxExtension { /// A syntax extension kind. pub kind: SyntaxExtensionKind, /// Span of the macro definition. pub span: Span, /// List of unstable features that are treated as stable inside this macro. pub allow_internal_unstable: Option<Lrc<[Symbol]>>, /// Suppresses the `unsafe_code` lint for code produced by this macro. pub allow_internal_unsafe: bool, /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro. pub local_inner_macros: bool, /// The macro's stability info. pub stability: Option<Stability>, /// The macro's deprecation info. pub deprecation: Option<Deprecation>, /// Names of helper attributes registered by this macro. pub helper_attrs: Vec<Symbol>, /// Edition of the crate in which this macro is defined. pub edition: Edition, /// Built-in macros have a couple of special properties like availability /// in `#[no_implicit_prelude]` modules, so we have to keep this flag. pub is_builtin: bool, /// We have to identify macros providing a `Copy` impl early for compatibility reasons. pub is_derive_copy: bool, } impl SyntaxExtension { /// Returns which kind of macro calls this syntax extension. pub fn macro_kind(&self) -> MacroKind { match self.kind { SyntaxExtensionKind::Bang(..) | SyntaxExtensionKind::LegacyBang(..) => MacroKind::Bang, SyntaxExtensionKind::Attr(..) | SyntaxExtensionKind::LegacyAttr(..) | SyntaxExtensionKind::NonMacroAttr { .. } => MacroKind::Attr, SyntaxExtensionKind::Derive(..) | SyntaxExtensionKind::LegacyDerive(..) => { MacroKind::Derive } } } /// Constructs a syntax extension with default properties. pub fn default(kind: SyntaxExtensionKind, edition: Edition) -> SyntaxExtension { SyntaxExtension { span: DUMMY_SP, allow_internal_unstable: None, allow_internal_unsafe: false, local_inner_macros: false, stability: None, deprecation: None, helper_attrs: Vec::new(), edition, is_builtin: false, is_derive_copy: false, kind, } } /// Constructs a syntax extension with the given properties /// and other properties converted from attributes. pub fn new( sess: &Session, kind: SyntaxExtensionKind, span: Span, helper_attrs: Vec<Symbol>, edition: Edition, name: Symbol, attrs: &[ast::Attribute], ) -> SyntaxExtension { let allow_internal_unstable = attr::allow_internal_unstable(sess, &attrs) .map(|features| features.collect::<Vec<Symbol>>().into()); let mut local_inner_macros = false; if let Some(macro_export) = sess.find_by_name(attrs, sym::macro_export) { if let Some(l) = macro_export.meta_item_list() { local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros); } } let is_builtin = sess.contains_name(attrs, sym::rustc_builtin_macro); let (stability, const_stability) = attr::find_stability(&sess, attrs, span); if const_stability.is_some() { sess.parse_sess .span_diagnostic .span_err(span, "macros cannot have const stability attributes"); } SyntaxExtension { kind, span, allow_internal_unstable, allow_internal_unsafe: sess.contains_name(attrs, sym::allow_internal_unsafe), local_inner_macros, stability, deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d), helper_attrs, edition, is_builtin, is_derive_copy: is_builtin && name == sym::Copy, } } pub fn dummy_bang(edition: Edition) -> SyntaxExtension { fn expander<'cx>( _: &'cx mut ExtCtxt<'_>, span: Span, _: TokenStream, ) -> Box<dyn MacResult + 'cx> { DummyResult::any(span) } SyntaxExtension::default(SyntaxExtensionKind::LegacyBang(Box::new(expander)), edition) } pub fn dummy_derive(edition: Edition) -> SyntaxExtension { fn expander( _: &mut ExtCtxt<'_>, _: Span, _: &ast::MetaItem, _: Annotatable, ) -> Vec<Annotatable> { Vec::new() } SyntaxExtension::default(SyntaxExtensionKind::Derive(Box::new(expander)), edition) } pub fn non_macro_attr(mark_used: bool, edition: Edition) -> SyntaxExtension { SyntaxExtension::default(SyntaxExtensionKind::NonMacroAttr { mark_used }, edition) } pub fn expn_data( &self, parent: ExpnId, call_site: Span, descr: Symbol, macro_def_id: Option<DefId>, ) -> ExpnData { ExpnData { kind: ExpnKind::Macro(self.macro_kind(), descr), parent, call_site, def_site: self.span, allow_internal_unstable: self.allow_internal_unstable.clone(), allow_internal_unsafe: self.allow_internal_unsafe, local_inner_macros: self.local_inner_macros, edition: self.edition, macro_def_id, krate: LOCAL_CRATE, orig_id: None, } } } /// Result of resolving a macro invocation. pub enum InvocationRes { Single(Lrc<SyntaxExtension>), DeriveContainer(Vec<Lrc<SyntaxExtension>>), } /// Error type that denotes indeterminacy. pub struct Indeterminate; pub trait ResolverExpand { fn next_node_id(&mut self) -> NodeId; fn resolve_dollar_crates(&mut self); fn visit_ast_fragment_with_placeholders(&mut self, expn_id: ExpnId, fragment: &AstFragment); fn register_builtin_macro(&mut self, ident: Ident, ext: SyntaxExtension); fn expansion_for_ast_pass( &mut self, call_site: Span, pass: AstPass, features: &[Symbol], parent_module_id: Option<NodeId>, ) -> ExpnId; fn resolve_imports(&mut self); fn resolve_macro_invocation( &mut self, invoc: &Invocation, eager_expansion_root: ExpnId, force: bool, ) -> Result<InvocationRes, Indeterminate>; fn check_unused_macros(&mut self); /// Some parent node that is close enough to the given macro call. fn lint_node_id(&mut self, expn_id: ExpnId) -> NodeId; // Resolver interfaces for specific built-in macros. /// Does `#[derive(...)]` attribute with the given `ExpnId` have built-in `Copy` inside it? fn has_derive_copy(&self, expn_id: ExpnId) -> bool; /// Path resolution logic for `#[cfg_accessible(path)]`. fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result<bool, Indeterminate>; } #[derive(Clone)] pub struct ModuleData { pub mod_path: Vec<Ident>, pub directory: PathBuf, } #[derive(Clone)] pub struct ExpansionData { pub id: ExpnId, pub depth: usize, pub module: Rc<ModuleData>, pub directory_ownership: DirectoryOwnership, pub prior_type_ascription: Option<(Span, bool)>, } /// One of these is made during expansion and incrementally updated as we go; /// when a macro expansion occurs, the resulting nodes have the `backtrace() /// -> expn_data` of their expansion context stored into their span. pub struct ExtCtxt<'a> { pub sess: &'a Session, pub ecfg: expand::ExpansionConfig<'a>, pub reduced_recursion_limit: Option<Limit>, pub root_path: PathBuf, pub resolver: &'a mut dyn ResolverExpand, pub current_expansion: ExpansionData, /// Error recovery mode entered when expansion is stuck /// (or during eager expansion, but that's a hack). pub force_mode: bool, pub expansions: FxHashMap<Span, Vec<String>>, /// Called directly after having parsed an external `mod foo;` in expansion. pub(super) extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate)>, } impl<'a> ExtCtxt<'a> { pub fn new( sess: &'a Session, ecfg: expand::ExpansionConfig<'a>, resolver: &'a mut dyn ResolverExpand, extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate)>, ) -> ExtCtxt<'a> { ExtCtxt { sess, ecfg, reduced_recursion_limit: None, resolver, extern_mod_loaded, root_path: PathBuf::new(), current_expansion: ExpansionData { id: ExpnId::root(), depth: 0, module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }), directory_ownership: DirectoryOwnership::Owned { relative: None }, prior_type_ascription: None, }, force_mode: false, expansions: FxHashMap::default(), } } /// Returns a `Folder` for deeply expanding all macros in an AST node. pub fn expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> { expand::MacroExpander::new(self, false) } /// Returns a `Folder` that deeply expands all macros and assigns all `NodeId`s in an AST node. /// Once `NodeId`s are assigned, the node may not be expanded, removed, or otherwise modified. pub fn monotonic_expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> { expand::MacroExpander::new(self, true) } pub fn new_parser_from_tts(&self, stream: TokenStream) -> parser::Parser<'a> { rustc_parse::stream_to_parser(&self.sess.parse_sess, stream, MACRO_ARGUMENTS) } pub fn source_map(&self) -> &'a SourceMap { self.sess.parse_sess.source_map() } pub fn parse_sess(&self) -> &'a ParseSess { &self.sess.parse_sess } pub fn call_site(&self) -> Span { self.current_expansion.id.expn_data().call_site } /// Equivalent of `Span::def_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_def_site_ctxt(&self, span: Span) -> Span { span.with_def_site_ctxt(self.current_expansion.id) } /// Equivalent of `Span::call_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_call_site_ctxt(&self, span: Span) -> Span { span.with_call_site_ctxt(self.current_expansion.id) } /// Equivalent of `Span::mixed_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_mixed_site_ctxt(&self, span: Span) -> Span { span.with_mixed_site_ctxt(self.current_expansion.id) } /// Returns span for the macro which originally caused the current expansion to happen. /// /// Stops backtracing at include! boundary. pub fn expansion_cause(&self) -> Option<Span> { self.current_expansion.id.expansion_cause() } pub fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'a> { self.sess.parse_sess.span_diagnostic.struct_span_err(sp, msg) } /// Emit `msg` attached to `sp`, without immediately stopping /// compilation. /// /// Compilation will be stopped in the near future (at the end of /// the macro expansion phase). pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.sess.parse_sess.span_diagnostic.span_err(sp, msg); } pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.sess.parse_sess.span_diagnostic.span_warn(sp, msg); } pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! { self.sess.parse_sess.span_diagnostic.span_bug(sp, msg); } pub fn trace_macros_diag(&mut self) { for (sp, notes) in self.expansions.iter() { let mut db = self.sess.parse_sess.span_diagnostic.span_note_diag(*sp, "trace_macro"); for note in notes { db.note(note); } db.emit(); } // Fixme: does this result in errors? self.expansions.clear(); } pub fn bug(&self, msg: &str) -> ! { self.sess.parse_sess.span_diagnostic.bug(msg); } pub fn trace_macros(&self) -> bool { self.ecfg.trace_mac } pub fn set_trace_macros(&mut self, x: bool) { self.ecfg.trace_mac = x } pub fn std_path(&self, components: &[Symbol]) -> Vec<Ident> { let def_site = self.with_def_site_ctxt(DUMMY_SP); iter::once(Ident::new(kw::DollarCrate, def_site)) .chain(components.iter().map(|&s| Ident::with_dummy_span(s))) .collect() } pub fn check_unused_macros(&mut self) { self.resolver.check_unused_macros(); } /// Resolves a path mentioned inside Rust code. /// /// This unifies the logic used for resolving `include_X!`, and `#[doc(include)]` file paths. /// /// Returns an absolute path to the file that `path` refers to. pub fn resolve_path( &self, path: impl Into<PathBuf>, span: Span, ) -> Result<PathBuf, DiagnosticBuilder<'a>> { let path = path.into(); // Relative paths are resolved relative to the file in which they are found // after macro expansion (that is, they are unhygienic). if !path.is_absolute() { let callsite = span.source_callsite(); let mut result = match self.source_map().span_to_unmapped_path(callsite) { FileName::Real(name) => name.into_local_path(), FileName::DocTest(path, _) => path, other => { return Err(self.struct_span_err( span, &format!("cannot resolve relative path in non-file source `{}`", other), )); } }; result.pop(); result.push(path); Ok(result) } else { Ok(path) } } } /// Extracts a string literal from the macro expanded version of `expr`, /// emitting `err_msg` if `expr` is not a string literal. This does not stop /// compilation on error, merely emits a non-fatal error and returns `None`. pub fn expr_to_spanned_string<'a>( cx: &'a mut ExtCtxt<'_>, expr: P<ast::Expr>, err_msg: &str, ) -> Result<(Symbol, ast::StrStyle, Span), Option<DiagnosticBuilder<'a>>> { // Perform eager expansion on the expression. // We want to be able to handle e.g., `concat!("foo", "bar")`. let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr(); Err(match expr.kind { ast::ExprKind::Lit(ref l) => match l.kind { ast::LitKind::Str(s, style) => return Ok((s, style, expr.span)), ast::LitKind::Err(_) => None, _ => Some(cx.struct_span_err(l.span, err_msg)), }, ast::ExprKind::Err => None, _ => Some(cx.struct_span_err(expr.span, err_msg)), }) } pub fn expr_to_string( cx: &mut ExtCtxt<'_>, expr: P<ast::Expr>, err_msg: &str, ) -> Option<(Symbol, ast::StrStyle)> { expr_to_spanned_string(cx, expr, err_msg) .map_err(|err| { err.map(|mut err| { err.emit(); }) }) .ok() .map(|(symbol, style, _)| (symbol, style)) } /// Non-fatally assert that `tts` is empty. Note that this function /// returns even when `tts` is non-empty, macros that *need* to stop /// compilation should call /// `cx.parse_sess.span_diagnostic.abort_if_errors()` (this should be /// done as rarely as possible). pub fn check_zero_tts(cx: &ExtCtxt<'_>, sp: Span, tts: TokenStream, name: &str) { if !tts.is_empty() { cx.span_err(sp, &format!("{} takes no arguments", name)); } } /// Parse an expression. On error, emit it, advancing to `Eof`, and return `None`. pub fn parse_expr(p: &mut parser::Parser<'_>) -> Option<P<ast::Expr>> { match p.parse_expr() { Ok(e) => return Some(e), Err(mut err) => err.emit(), } while p.token != token::Eof { p.bump(); } None } /// Interpreting `tts` as a comma-separated sequence of expressions, /// expect exactly one string literal, or emit an error and return `None`. pub fn get_single_str_from_tts( cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream, name: &str, ) -> Option<String> { let mut p = cx.new_parser_from_tts(tts); if p.token == token::Eof { cx.span_err(sp, &format!("{} takes 1 argument", name)); return None; } let ret = parse_expr(&mut p)?; let _ = p.eat(&token::Comma); if p.token != token::Eof { cx.span_err(sp, &format!("{} takes 1 argument", name)); } expr_to_string(cx, ret, "argument must be a string literal").map(|(s, _)| s.to_string()) } /// Extracts comma-separated expressions from `tts`. /// On error, emit it, and return `None`. pub fn get_exprs_from_tts( cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream, ) -> Option<Vec<P<ast::Expr>>> { let mut p = cx.new_parser_from_tts(tts); let mut es = Vec::new(); while p.token != token::Eof { let expr = parse_expr(&mut p)?; // Perform eager expansion on the expression. // We want to be able to handle e.g., `concat!("foo", "bar")`. let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr(); es.push(expr); if p.eat(&token::Comma) { continue; } if p.token != token::Eof { cx.span_err(sp, "expected token: `,`"); return None; } } Some(es) }
33.307756
99
0.58749
11c6b88eddc75f86975459db45aa6d780de73699
1,412
//! //! The semantic analyzer structure type element error. //! use zinc_lexical::Location; /// /// The semantic analyzer structure type element error. /// #[derive(Debug, PartialEq)] pub enum Error { /// A field with the same name occurs for the second time. DuplicateField { /// The duplicate field location. location: Location, /// The structure type name. type_identifier: String, /// The duplicate field name. field_name: String, }, /// The structure expected generics, but got none. ExpectedGenerics { /// The type initializer location. location: Location, /// The structure type name. type_identifier: String, /// The number of expected generics. expected: usize, }, /// The structure did not expect generics, but got some. UnexpectedGenerics { /// The type initializer location. location: Location, /// The structure type name. type_identifier: String, }, /// The structure expected different number of generic arguments. InvalidGenericsNumber { /// The type initializer location. location: Location, /// The structure type name. type_identifier: String, /// The number of expected generics. expected: usize, /// The number of found generics. found: usize, }, }
28.816327
69
0.616147
2f15cca666bb2f5571c7a093caa2a0a412a2e242
13,034
use std::str::FromStr; #[derive(Debug, Clone, Copy, PartialEq)] pub enum OpeningQuotes { AsciiDouble, // " AsciiSingle, // ' EnglishDouble, // “ EnglishSingle, // ‘ GermanDouble, // „ GermanSingle, // ‚ SwedishDouble, // ” SwedishSingle, // ’ GuillementsDouble, // « GuillementsSingle, // ‹ FrenchDouble, // « with space FrenchSingle, // ‹ with space DanishDouble, // » DanishSingle, // › JapaneseFilled, // 「 JapaneseEmpty, // 『 TibetanDouble, // 《 TibetanSingle, //〈 } impl OpeningQuotes { pub fn to_str(self) -> &'static str { use OpeningQuotes::*; match self { AsciiDouble => "\"", AsciiSingle => "'", EnglishDouble => "“", EnglishSingle => "‘", GermanDouble => "„", GermanSingle => "‚", SwedishDouble => "”", SwedishSingle => "’", GuillementsDouble => "«", GuillementsSingle => "‹", FrenchDouble => "«\u{A0}", FrenchSingle => "‹\u{A0}", DanishDouble => "»", DanishSingle => "›", JapaneseFilled => "「", JapaneseEmpty => "『", TibetanDouble => "《", TibetanSingle => "〈", } } } #[derive(Debug, Clone, Copy, PartialEq)] pub enum ClosingQuotes { AsciiDouble, // " AsciiSingle, // ' EnglishDouble, // ” EnglishSingle, // ’ GermanDouble, // “ GermanSingle, // ‘ GuillementsDouble, // » GuillementsSingle, // › FrenchDouble, // » with space FrenchSingle, // › with space DanishDouble, // « DanishSingle, // ‹ AlbanianAltDouble, // „ AlbanianAltSingle, // ‚ JapaneseFilled, // 」 JapaneseEmpty, // 』 TibetanDouble, // 》 TibetanSingle, // 〉 } impl ClosingQuotes { pub fn to_str(self) -> &'static str { use ClosingQuotes::*; match self { AsciiDouble => "\"", AsciiSingle => "'", EnglishDouble => "”", EnglishSingle => "’", GermanDouble => "“", GermanSingle => "‘", GuillementsDouble => "»", GuillementsSingle => "›", FrenchDouble => "\u{A0}»", FrenchSingle => "\u{A0}›", DanishDouble => "«", DanishSingle => "‹", AlbanianAltDouble => "„", AlbanianAltSingle => "‚", JapaneseFilled => "」", JapaneseEmpty => "』", TibetanDouble => "》", TibetanSingle => "〉", } } } #[derive(Debug, Clone, Copy, PartialEq)] pub struct QuoteStyle { pub double_start: OpeningQuotes, pub double_end: ClosingQuotes, pub single_start: OpeningQuotes, pub single_end: ClosingQuotes, } impl QuoteStyle { pub fn is_english(&self) -> bool { matches!( self, QuoteStyle { double_start: OpeningQuotes::EnglishDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } ) } pub fn ascii() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::AsciiDouble, double_end: ClosingQuotes::AsciiDouble, single_start: OpeningQuotes::AsciiSingle, single_end: ClosingQuotes::AsciiSingle, } } pub fn english() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::EnglishDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } } pub fn german() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::GermanDouble, single_start: OpeningQuotes::GermanSingle, single_end: ClosingQuotes::GermanSingle, } } pub fn guillements() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GuillementsDouble, double_end: ClosingQuotes::GuillementsDouble, single_start: OpeningQuotes::GuillementsSingle, single_end: ClosingQuotes::GuillementsSingle, } } // ////////////////// Special cases ////////////////// // fn albanian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::GermanDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn arabic_armenian_kazakh_khmer_pashto_persian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GuillementsDouble, double_end: ClosingQuotes::GuillementsDouble, single_start: OpeningQuotes::AsciiSingle, single_end: ClosingQuotes::AsciiSingle, } } fn azerbaijani_belarusian_mongolian_russian_uzbek() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GuillementsDouble, double_end: ClosingQuotes::GuillementsDouble, single_start: OpeningQuotes::GermanDouble, single_end: ClosingQuotes::GermanDouble, } } fn guillements_and_english() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GuillementsDouble, double_end: ClosingQuotes::GuillementsDouble, single_start: OpeningQuotes::EnglishDouble, single_end: ClosingQuotes::EnglishDouble, } } fn bosnian_finnish_hebrew_swedish() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::SwedishDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::SwedishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn bulgarian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::GermanDouble, single_start: OpeningQuotes::SwedishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn croatian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn danish() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::DanishDouble, double_end: ClosingQuotes::DanishDouble, single_start: OpeningQuotes::DanishSingle, single_end: ClosingQuotes::DanishSingle, } } fn estonian_georgian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::GermanDouble, single_start: OpeningQuotes::AsciiSingle, single_end: ClosingQuotes::AsciiSingle, } } fn french() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::FrenchDouble, double_end: ClosingQuotes::FrenchDouble, single_start: OpeningQuotes::EnglishDouble, single_end: ClosingQuotes::EnglishDouble, } } fn hungarian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::DanishDouble, single_end: ClosingQuotes::DanishDouble, } } fn ido() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::FrenchDouble, double_end: ClosingQuotes::FrenchDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn japanese_taiwanese_traditional_chinese_new_tai_lue() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::JapaneseFilled, double_end: ClosingQuotes::JapaneseFilled, single_start: OpeningQuotes::JapaneseEmpty, single_end: ClosingQuotes::JapaneseEmpty, } } fn north_korean() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::TibetanDouble, double_end: ClosingQuotes::TibetanDouble, single_start: OpeningQuotes::TibetanSingle, single_end: ClosingQuotes::TibetanSingle, } } fn lao_latvian_vietnamese() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::EnglishDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::AsciiSingle, single_end: ClosingQuotes::AsciiSingle, } } fn macedonian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::GermanDouble, single_start: OpeningQuotes::SwedishSingle, single_end: ClosingQuotes::GermanSingle, } } fn norwegian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GuillementsDouble, double_end: ClosingQuotes::GuillementsDouble, single_start: OpeningQuotes::EnglishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn polish_romanian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::GuillementsDouble, single_end: ClosingQuotes::GuillementsDouble, } } fn serbian() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::GermanDouble, double_end: ClosingQuotes::EnglishDouble, single_start: OpeningQuotes::SwedishSingle, single_end: ClosingQuotes::EnglishSingle, } } fn tai_le_tibetan() -> QuoteStyle { QuoteStyle { double_start: OpeningQuotes::TibetanDouble, double_end: ClosingQuotes::TibetanDouble, single_start: OpeningQuotes::TibetanSingle, single_end: ClosingQuotes::TibetanSingle, } } } impl Default for QuoteStyle { fn default() -> Self { QuoteStyle::english() } } impl FromStr for QuoteStyle { type Err = (); fn from_str(value: &str) -> Result<Self, Self::Err> { Ok(match value { "afrikaans" | "chinese" | "dutch" | "english" | "esperanto" | "filipino" | "hindi" | "indonesian" | "interlingua" | "irish" | "scottish gaelic" | "south korean" | "maltese" | "brazilian" | "tamil" | "thai" | "turkish" | "urdu" | "welsh" => { QuoteStyle::english() } "czech" | "german" | "icelandic" | "lithuanian" | "slovak" | "slovene" | "sorbian" => { QuoteStyle::german() } "amharic" | "swiss" | "romansh" | "tigrinya" | "uyghur" => QuoteStyle::guillements(), "basque" | "catalan" | "galician" | "greek" | "italian" | "occitan" | "portuguese" | "spanish" | "ukrainian" => QuoteStyle::guillements_and_english(), "albanian" => QuoteStyle::albanian(), "arabic" | "armenian" | "kazakh" | "khmer" | "pashto" | "persian" => { QuoteStyle::arabic_armenian_kazakh_khmer_pashto_persian() } "azerbaijani" | "belarusian" | "mongolian" | "russian" | "uzbek" => { QuoteStyle::azerbaijani_belarusian_mongolian_russian_uzbek() } "bosnian" | "finnish" | "hebrew" | "swedish" => { QuoteStyle::bosnian_finnish_hebrew_swedish() } "bulgarian" => QuoteStyle::bulgarian(), "croatian" => QuoteStyle::croatian(), "danish" => QuoteStyle::danish(), "estonian" | "georgian" => QuoteStyle::estonian_georgian(), "french" => QuoteStyle::french(), "hungarian" => QuoteStyle::hungarian(), "ido" => QuoteStyle::ido(), "japanese" | "taiwanese" | "traditional chinese" | "new tai lue" => { QuoteStyle::japanese_taiwanese_traditional_chinese_new_tai_lue() } "north korean" => QuoteStyle::north_korean(), "lao" | "latvian" | "vietnamese" => QuoteStyle::lao_latvian_vietnamese(), "macedonian" => QuoteStyle::macedonian(), "norwegian" => QuoteStyle::norwegian(), "polish" | "romanian" => QuoteStyle::polish_romanian(), "serbian" => QuoteStyle::serbian(), "tai le" | "tibetan" => QuoteStyle::tai_le_tibetan(), _ => return Err(()), }) } }
31.559322
99
0.569741
9cceb68b1bc22f939f815314aa579044044c948c
9,121
use env_logger; use log::{debug, warn}; use fnv::FnvHashMap; use std::{ collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr}, str, time::{Duration, Instant}, }; use futures::{ prelude::*, sync::mpsc::{channel, Sender}, }; use tokio::codec::length_delimited::LengthDelimitedCodec; use tokio::timer::{Error, Interval}; use p2p::{ builder::ServiceBuilder, service::{Message, ProtocolHandle, ServiceContext, ServiceEvent, ServiceHandle, ServiceTask}, session::{ProtocolId, ProtocolMeta, SessionId}, SessionType, }; use secio::PublicKey; use discovery::{AddressManager, Direction, Discovery, DiscoveryHandle, RawAddr, Substream}; fn main() { env_logger::init(); if std::env::args().nth(1) == Some("server".to_string()) { debug!("Starting server ......"); let (discovery, _) = create_discovery(1); let protocol = DiscoveryProtocol::new(0, "server", discovery); let mut service = ServiceBuilder::default() .insert_protocol(protocol) .forever(true) .build(SHandle {}); let _ = service.listen("127.0.0.1:1337".parse().unwrap()); tokio::run(service.for_each(|_| Ok(()))) } else { debug!("Starting client ......"); let (discovery, _) = create_discovery(5000); let protocol = DiscoveryProtocol::new(0, "client", discovery); let mut service = ServiceBuilder::default() .insert_protocol(protocol) .forever(true) .build(SHandle {}) .dial("127.0.0.1:1337".parse().unwrap()); let _ = service.listen("127.0.0.1:1337".parse().unwrap()); tokio::run(service.for_each(|_| Ok(()))) } } fn create_discovery(start: u16) -> (Discovery<SimpleAddressManager>, DiscoveryHandle) { let addrs: FnvHashMap<RawAddr, i32> = (start..start + 3333) .map(|port| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)) .map(|addr| (RawAddr::from(addr), 100)) .collect(); let addr_mgr = SimpleAddressManager { addrs }; let discovery = Discovery::new(addr_mgr); let handle = discovery.handle(); (discovery, handle) } struct DiscoveryProtocol { id: usize, ty: &'static str, notify_counter: u32, discovery: Option<Discovery<SimpleAddressManager>>, discovery_handle: DiscoveryHandle, discovery_senders: FnvHashMap<SessionId, Sender<Vec<u8>>>, sessions: HashMap<SessionId, SessionData>, } impl DiscoveryProtocol { fn new( id: usize, ty: &'static str, discovery: Discovery<SimpleAddressManager>, ) -> DiscoveryProtocol { let discovery_handle = discovery.handle(); DiscoveryProtocol { id, ty, notify_counter: 0, discovery: Some(discovery), discovery_handle, discovery_senders: FnvHashMap::default(), sessions: HashMap::default(), } } } impl ProtocolMeta<LengthDelimitedCodec> for DiscoveryProtocol { fn id(&self) -> ProtocolId { self.id } fn codec(&self) -> LengthDelimitedCodec { LengthDelimitedCodec::new() } fn handle(&self) -> Option<Box<dyn ProtocolHandle + Send + 'static>> { let discovery = self .discovery .as_ref() .map(|discovery| Discovery::new(discovery.addr_mgr().clone())); let discovery_handle = discovery .as_ref() .map(|discovery| discovery.handle()) .unwrap(); Some(Box::new(DiscoveryProtocol { id: self.id, ty: self.ty, notify_counter: 0, discovery, discovery_handle, discovery_senders: FnvHashMap::default(), sessions: HashMap::default(), })) } } impl ProtocolHandle for DiscoveryProtocol { fn init(&mut self, control: &mut ServiceContext) { debug!("protocol [discovery({})]: init", self.id); let mut interval_sender = control.sender().clone(); let proto_id = self.id(); let interval_seconds = 5; debug!("Setup interval {} seconds", interval_seconds); let interval_task = Interval::new(Instant::now(), Duration::from_secs(interval_seconds)) .for_each(move |_| { interval_sender .try_send(ServiceTask::ProtocolNotify { proto_id, token: 3 }) .map_err(|err| { warn!("interval error: {:?}", err); Error::shutdown() }) }) .map_err(|err| warn!("{}", err)); let discovery_task = self .discovery .take() .map(|discovery| { debug!("Start discovery future_task"); discovery .for_each(|()| { debug!("discovery.for_each()"); Ok(()) }) .map_err(|err| { warn!("discovery stream error: {:?}", err); () }) .then(|_| { warn!("End of discovery"); Ok(()) }) }) .unwrap(); control.future_task(interval_task); control.future_task(discovery_task); } fn connected( &mut self, control: &mut ServiceContext, session_id: SessionId, address: SocketAddr, ty: SessionType, _: &Option<PublicKey>, _: &str, ) { self.sessions .entry(session_id) .or_insert(SessionData::new(address, ty)); debug!( "protocol [discovery] open on session [{}], address: [{}], type: [{:?}]", session_id, address, ty ); let direction = if ty == SessionType::Server { Direction::Inbound } else { Direction::Outbound }; let (sender, receiver) = channel(8); self.discovery_senders.insert(session_id, sender); let substream = Substream::new( address, direction, self.id, session_id, receiver, control.sender().clone(), ); match self.discovery_handle.substream_sender.try_send(substream) { Ok(_) => { debug!("Send substream success"); } Err(err) => { warn!("Send substream failed : {:?}", err); } } } fn disconnected(&mut self, _control: &mut ServiceContext, session_id: SessionId) { self.sessions.remove(&session_id); self.discovery_senders.remove(&session_id); debug!("protocol [discovery] close on session [{}]", session_id); } fn received(&mut self, _env: &mut ServiceContext, data: Message) { debug!("[received message]: length={}", data.data.len()); self.sessions .get_mut(&data.id) .unwrap() .push_data(data.data.clone()); if let Some(ref mut sender) = self.discovery_senders.get_mut(&data.id) { if let Err(err) = sender.try_send(data.data) { if err.is_full() { warn!("channel is full"); } else if err.is_disconnected() { warn!("channel is disconnected"); } else { warn!("other channel error: {:?}", err); } } } } fn notify(&mut self, _control: &mut ServiceContext, token: u64) { debug!("protocol [discovery] received notify token: {}", token); self.notify_counter += 1; } } struct SHandle {} impl ServiceHandle for SHandle { fn handle_error(&mut self, _env: &mut ServiceContext, error: ServiceEvent) { debug!("service error: {:?}", error); } fn handle_event(&mut self, _env: &mut ServiceContext, event: ServiceEvent) { debug!("service event: {:?}", event); } } #[derive(Clone)] struct SessionData { ty: SessionType, address: SocketAddr, data: Vec<Vec<u8>>, } impl SessionData { fn new(address: SocketAddr, ty: SessionType) -> Self { SessionData { address, ty, data: Vec::new(), } } fn push_data(&mut self, data: Vec<u8>) { self.data.push(data); } } #[derive(Default, Clone, Debug)] pub struct SimpleAddressManager { pub addrs: FnvHashMap<RawAddr, i32>, } impl AddressManager for SimpleAddressManager { fn add_new(&mut self, addr: SocketAddr) { self.addrs.entry(RawAddr::from(addr)).or_insert(100); } fn misbehave(&mut self, addr: SocketAddr, _ty: u64) -> i32 { let value = self.addrs.entry(RawAddr::from(addr)).or_insert(100); *value -= 20; *value } fn get_random(&mut self, n: usize) -> Vec<SocketAddr> { self.addrs .keys() .take(n) .map(|addr| addr.socket_addr()) .collect() } }
30.814189
97
0.541169
fc877509f3c7beb9d49edbd18bebafbd3226fcee
20,958
//! HTTP response types. //! //! This module contains structs related to HTTP responses, notably the //! `Response` type itself as well as a builder to create responses. Typically //! you'll import the `http::Response` type rather than reaching into this //! module itself. //! //! # Examples //! //! Creating a `Response` to return //! //! ``` //! use http::{Request, Response, StatusCode}; //! //! fn respond_to(req: Request<()>) -> http::Result<Response<()>> { //! let mut response = Response::builder(); //! response.header("Foo", "Bar") //! .status(StatusCode::OK); //! //! if req.headers().contains_key("Another-Header") { //! response.header("Another-Header", "Ack"); //! } //! //! response.body(()) //! } //! ``` //! //! A simple 404 handler //! //! ``` //! use http::{Request, Response, StatusCode}; //! //! fn not_found(_req: Request<()>) -> http::Result<Response<()>> { //! Response::builder() //! .status(StatusCode::NOT_FOUND) //! .body(()) //! } //! ``` //! //! Or otherwise inspecting the result of a request: //! //! ```no_run //! use http::{Request, Response}; //! //! fn get(url: &str) -> http::Result<Response<()>> { //! // ... //! # panic!() //! } //! //! let response = get("https://www.rust-lang.org/").unwrap(); //! //! if !response.status().is_success() { //! panic!("failed to get a successful response status!"); //! } //! //! if let Some(date) = response.headers().get("Date") { //! // we've got a `Date` header! //! } //! //! let body = response.body(); //! // ... //! ``` use std::any::Any; use std::fmt; use {Error, Result, HttpTryFrom, Extensions}; use header::{HeaderMap, HeaderName, HeaderValue}; use status::StatusCode; use version::Version; /// Represents an HTTP response /// /// An HTTP response consists of a head and a potentially optional body. The body /// component is generic, enabling arbitrary types to represent the HTTP body. /// For example, the body could be `Vec<u8>`, a `Stream` of byte chunks, or a /// value that has been deserialized. /// /// Typically you'll work with responses on the client side as the result of /// sending a `Request` and on the server you'll be generating a `Request` to /// send back to the client. /// /// # Examples /// /// Creating a `Response` to return /// /// ``` /// use http::{Request, Response, StatusCode}; /// /// fn respond_to(req: Request<()>) -> http::Result<Response<()>> { /// let mut response = Response::builder(); /// response.header("Foo", "Bar") /// .status(StatusCode::OK); /// /// if req.headers().contains_key("Another-Header") { /// response.header("Another-Header", "Ack"); /// } /// /// response.body(()) /// } /// ``` /// /// A simple 404 handler /// /// ``` /// use http::{Request, Response, StatusCode}; /// /// fn not_found(_req: Request<()>) -> http::Result<Response<()>> { /// Response::builder() /// .status(StatusCode::NOT_FOUND) /// .body(()) /// } /// ``` /// /// Or otherwise inspecting the result of a request: /// /// ```no_run /// use http::{Request, Response}; /// /// fn get(url: &str) -> http::Result<Response<()>> { /// // ... /// # panic!() /// } /// /// let response = get("https://www.rust-lang.org/").unwrap(); /// /// if !response.status().is_success() { /// panic!("failed to get a successful response status!"); /// } /// /// if let Some(date) = response.headers().get("Date") { /// // we've got a `Date` header! /// } /// /// let body = response.body(); /// // ... /// ``` /// /// Deserialize a response of bytes via json: /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # extern crate http; /// use http::Response; /// use serde::de; /// /// fn deserialize<T>(req: Response<Vec<u8>>) -> serde_json::Result<Response<T>> /// where for<'de> T: de::Deserialize<'de>, /// { /// let (parts, body) = req.into_parts(); /// let body = serde_json::from_slice(&body)?; /// Ok(Response::from_parts(parts, body)) /// } /// # /// # fn main() {} /// ``` /// /// Or alternatively, serialize the body of a response to json /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # extern crate http; /// use http::Response; /// use serde::ser; /// /// fn serialize<T>(req: Response<T>) -> serde_json::Result<Response<Vec<u8>>> /// where T: ser::Serialize, /// { /// let (parts, body) = req.into_parts(); /// let body = serde_json::to_vec(&body)?; /// Ok(Response::from_parts(parts, body)) /// } /// # /// # fn main() {} /// ``` pub struct Response<T> { head: Parts, body: T, } /// Component parts of an HTTP `Response` /// /// The HTTP response head consists of a status, version, and a set of /// header fields. pub struct Parts { /// The response's status pub status: StatusCode, /// The response's version pub version: Version, /// The response's headers pub headers: HeaderMap<HeaderValue>, /// The response's extensions pub extensions: Extensions, _priv: (), } /// An HTTP response builder /// /// This type can be used to construct an instance of `Response` through a /// builder-like pattern. #[derive(Debug)] pub struct Builder { head: Option<Parts>, err: Option<Error>, } impl Response<()> { /// Creates a new builder-style object to manufacture a `Response` /// /// This method returns an instance of `Builder` which can be used to /// create a `Response`. /// /// # Examples /// /// ``` /// # use http::*; /// let response = Response::builder() /// .status(200) /// .header("X-Custom-Foo", "Bar") /// .body(()) /// .unwrap(); /// ``` #[inline] pub fn builder() -> Builder { Builder::new() } } impl<T> Response<T> { /// Creates a new blank `Response` with the body /// /// The component ports of this response will be set to their default, e.g. /// the ok status, no headers, etc. /// /// # Examples /// /// ``` /// # use http::*; /// let response = Response::new("hello world"); /// /// assert_eq!(response.status(), StatusCode::OK); /// assert_eq!(*response.body(), "hello world"); /// ``` #[inline] pub fn new(body: T) -> Response<T> { Response { head: Parts::new(), body: body, } } /// Creates a new `Response` with the given head and body /// /// # Examples /// /// ``` /// # use http::*; /// let response = Response::new("hello world"); /// let (mut parts, body) = response.into_parts(); /// /// parts.status = StatusCode::BAD_REQUEST; /// let response = Response::from_parts(parts, body); /// /// assert_eq!(response.status(), StatusCode::BAD_REQUEST); /// assert_eq!(*response.body(), "hello world"); /// ``` #[inline] pub fn from_parts(parts: Parts, body: T) -> Response<T> { Response { head: parts, body: body, } } /// Returns the `StatusCode`. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<()> = Response::default(); /// assert_eq!(response.status(), StatusCode::OK); /// ``` #[inline] pub fn status(&self) -> StatusCode { self.head.status } /// Returns a mutable reference to the associated `StatusCode`. /// /// # Examples /// /// ``` /// # use http::*; /// let mut response: Response<()> = Response::default(); /// *response.status_mut() = StatusCode::CREATED; /// assert_eq!(response.status(), StatusCode::CREATED); /// ``` #[inline] pub fn status_mut(&mut self) -> &mut StatusCode { &mut self.head.status } /// Returns a reference to the associated version. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<()> = Response::default(); /// assert_eq!(response.version(), Version::HTTP_11); /// ``` #[inline] pub fn version(&self) -> Version { self.head.version } /// Returns a mutable reference to the associated version. /// /// # Examples /// /// ``` /// # use http::*; /// let mut response: Response<()> = Response::default(); /// *response.version_mut() = Version::HTTP_2; /// assert_eq!(response.version(), Version::HTTP_2); /// ``` #[inline] pub fn version_mut(&mut self) -> &mut Version { &mut self.head.version } /// Returns a reference to the associated header field map. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<()> = Response::default(); /// assert!(response.headers().is_empty()); /// ``` #[inline] pub fn headers(&self) -> &HeaderMap<HeaderValue> { &self.head.headers } /// Returns a mutable reference to the associated header field map. /// /// # Examples /// /// ``` /// # use http::*; /// # use http::header::*; /// let mut response: Response<()> = Response::default(); /// response.headers_mut().insert(HOST, HeaderValue::from_static("world")); /// assert!(!response.headers().is_empty()); /// ``` #[inline] pub fn headers_mut(&mut self) -> &mut HeaderMap<HeaderValue> { &mut self.head.headers } /// Returns a reference to the associated extensions. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<()> = Response::default(); /// assert!(response.extensions().get::<i32>().is_none()); /// ``` #[inline] pub fn extensions(&self) -> &Extensions { &self.head.extensions } /// Returns a mutable reference to the associated extensions. /// /// # Examples /// /// ``` /// # use http::*; /// # use http::header::*; /// let mut response: Response<()> = Response::default(); /// response.extensions_mut().insert("hello"); /// assert_eq!(response.extensions().get(), Some(&"hello")); /// ``` #[inline] pub fn extensions_mut(&mut self) -> &mut Extensions { &mut self.head.extensions } /// Returns a reference to the associated HTTP body. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<String> = Response::default(); /// assert!(response.body().is_empty()); /// ``` #[inline] pub fn body(&self) -> &T { &self.body } /// Returns a mutable reference to the associated HTTP body. /// /// # Examples /// /// ``` /// # use http::*; /// let mut response: Response<String> = Response::default(); /// response.body_mut().push_str("hello world"); /// assert!(!response.body().is_empty()); /// ``` #[inline] pub fn body_mut(&mut self) -> &mut T { &mut self.body } /// Consumes the response, returning just the body. /// /// # Examples /// /// ``` /// # use http::Response; /// let response = Response::new(10); /// let body = response.into_body(); /// assert_eq!(body, 10); /// ``` #[inline] pub fn into_body(self) -> T { self.body } /// Consumes the response returning the head and body parts. /// /// # Examples /// /// ``` /// # use http::*; /// let response: Response<()> = Response::default(); /// let (parts, body) = response.into_parts(); /// assert_eq!(parts.status, StatusCode::OK); /// ``` #[inline] pub fn into_parts(self) -> (Parts, T) { (self.head, self.body) } /// Consumes the response returning a new response with body mapped to the /// return type of the passed in function. /// /// # Examples /// /// ``` /// # use http::*; /// let response = Response::builder().body("some string").unwrap(); /// let mapped_response: Response<&[u8]> = response.map(|b| { /// assert_eq!(b, "some string"); /// b.as_bytes() /// }); /// assert_eq!(mapped_response.body(), &"some string".as_bytes()); /// ``` #[inline] pub fn map<F, U>(self, f: F) -> Response<U> where F: FnOnce(T) -> U { Response { body: f(self.body), head: self.head } } } impl<T: Default> Default for Response<T> { #[inline] fn default() -> Response<T> { Response::new(T::default()) } } impl<T: fmt::Debug> fmt::Debug for Response<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Response") .field("status", &self.status()) .field("version", &self.version()) .field("headers", self.headers()) // omits Extensions because not useful .field("body", self.body()) .finish() } } impl Parts { /// Creates a new default instance of `Parts` fn new() -> Parts { Parts{ status: StatusCode::default(), version: Version::default(), headers: HeaderMap::default(), extensions: Extensions::default(), _priv: (), } } } impl fmt::Debug for Parts { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Parts") .field("status", &self.status) .field("version", &self.version) .field("headers", &self.headers) // omits Extensions because not useful // omits _priv because not useful .finish() } } impl Builder { /// Creates a new default instance of `Builder` to construct either a /// `Head` or a `Response`. /// /// # Examples /// /// ``` /// # use http::*; /// /// let response = response::Builder::new() /// .status(200) /// .body(()) /// .unwrap(); /// ``` #[inline] pub fn new() -> Builder { Builder::default() } /// Set the HTTP status for this response. /// /// This function will configure the HTTP status code of the `Response` that /// will be returned from `Builder::build`. /// /// By default this is `200`. /// /// # Examples /// /// ``` /// # use http::*; /// /// let response = Response::builder() /// .status(200) /// .body(()) /// .unwrap(); /// ``` pub fn status<T>(&mut self, status: T) -> &mut Builder where StatusCode: HttpTryFrom<T>, { if let Some(head) = head(&mut self.head, &self.err) { match HttpTryFrom::try_from(status) { Ok(s) => head.status = s, Err(e) => self.err = Some(e.into()), } } self } /// Set the HTTP version for this response. /// /// This function will configure the HTTP version of the `Response` that /// will be returned from `Builder::build`. /// /// By default this is HTTP/1.1 /// /// # Examples /// /// ``` /// # use http::*; /// /// let response = Response::builder() /// .version(Version::HTTP_2) /// .body(()) /// .unwrap(); /// ``` pub fn version(&mut self, version: Version) -> &mut Builder { if let Some(head) = head(&mut self.head, &self.err) { head.version = version; } self } /// Appends a header to this response builder. /// /// This function will append the provided key/value as a header to the /// internal `HeaderMap` being constructed. Essentially this is equivalent /// to calling `HeaderMap::append`. /// /// # Examples /// /// ``` /// # use http::*; /// # use http::header::HeaderValue; /// /// let response = Response::builder() /// .header("Content-Type", "text/html") /// .header("X-Custom-Foo", "bar") /// .header("content-length", 0) /// .body(()) /// .unwrap(); /// ``` pub fn header<K, V>(&mut self, key: K, value: V) -> &mut Builder where HeaderName: HttpTryFrom<K>, HeaderValue: HttpTryFrom<V> { if let Some(head) = head(&mut self.head, &self.err) { match <HeaderName as HttpTryFrom<K>>::try_from(key) { Ok(key) => { match <HeaderValue as HttpTryFrom<V>>::try_from(value) { Ok(value) => { head.headers.append(key, value); } Err(e) => self.err = Some(e.into()), } }, Err(e) => self.err = Some(e.into()), }; } self } /// Get header on this response builder. /// when builder has error returns None /// /// # Example /// /// ``` /// # use http::*; /// # use http::header::HeaderValue; /// # use http::response::Builder; /// let mut res = Response::builder(); /// res.header("Accept", "text/html") /// .header("X-Custom-Foo", "bar"); /// let headers = res.headers_ref().unwrap(); /// assert_eq!( headers["Accept"], "text/html" ); /// assert_eq!( headers["X-Custom-Foo"], "bar" ); /// ``` pub fn headers_ref(&self) -> Option<&HeaderMap<HeaderValue>> { if self.err.is_some() { return None; } match self.head { Some(ref head) => Some(&head.headers), None => None } } /// Get header on this response builder. /// when builder has error returns None /// /// # Example /// /// ``` /// # use http::*; /// # use http::header::HeaderValue; /// # use http::response::Builder; /// let mut res = Response::builder(); /// { /// let headers = res.headers_mut().unwrap(); /// headers.insert("Accept", HeaderValue::from_static("text/html")); /// headers.insert("X-Custom-Foo", HeaderValue::from_static("bar")); /// } /// let headers = res.headers_ref().unwrap(); /// assert_eq!( headers["Accept"], "text/html" ); /// assert_eq!( headers["X-Custom-Foo"], "bar" ); /// ``` pub fn headers_mut(&mut self) -> Option<&mut HeaderMap<HeaderValue>> { if self.err.is_some() { return None; } match self.head { Some(ref mut head) => Some(&mut head.headers), None => None } } /// Adds an extension to this builder /// /// # Examples /// /// ``` /// # use http::*; /// /// let response = Response::builder() /// .extension("My Extension") /// .body(()) /// .unwrap(); /// /// assert_eq!(response.extensions().get::<&'static str>(), /// Some(&"My Extension")); /// ``` pub fn extension<T>(&mut self, extension: T) -> &mut Builder where T: Any + Send + Sync + 'static, { if let Some(head) = head(&mut self.head, &self.err) { head.extensions.insert(extension); } self } fn take_parts(&mut self) -> Result<Parts> { let ret = self.head.take().expect("cannot reuse response builder"); if let Some(e) = self.err.take() { return Err(e) } Ok(ret) } /// "Consumes" this builder, using the provided `body` to return a /// constructed `Response`. /// /// # Errors /// /// This function may return an error if any previously configured argument /// failed to parse or get converted to the internal representation. For /// example if an invalid `head` was specified via `header("Foo", /// "Bar\r\n")` the error will be returned when this function is called /// rather than when `header` was called. /// /// # Panics /// /// This method will panic if the builder is reused. The `body` function can /// only be called once. /// /// # Examples /// /// ``` /// # use http::*; /// /// let response = Response::builder() /// .body(()) /// .unwrap(); /// ``` pub fn body<T>(&mut self, body: T) -> Result<Response<T>> { Ok(Response { head: self.take_parts()?, body: body, }) } } fn head<'a>(head: &'a mut Option<Parts>, err: &Option<Error>) -> Option<&'a mut Parts> { if err.is_some() { return None } head.as_mut() } impl Default for Builder { #[inline] fn default() -> Builder { Builder { head: Some(Parts::new()), err: None, } } } #[cfg(test)] mod tests { use super::*; #[test] fn it_can_map_a_body_from_one_type_to_another() { let response = Response::builder().body("some string").unwrap(); let mapped_response = response.map(|s| { assert_eq!(s, "some string"); 123u32 }); assert_eq!(mapped_response.body(), &123u32); } }
26.596447
81
0.521281
33d62f4674855aa30544d80d11ea44f877c09671
2,440
use anyhow::Result; use deep_thought::optimizer::Optimizer; use deep_thought::prelude::*; use deep_thought_derive::neural_network; use ndarray::prelude::*; use num_traits::Float; // Network size must be known at compile-time // const _NUM_PARAMETERS: usize = 19; fn main() -> Result<()> { neural_network!( let x: usize = NeuralNetwork::new().add_layer(Layer::new(3, 2)).add_layer(Layer::new(3, 1).activation(Activation::default())).add_layer(Layer::new(3, 1)); ); println!("there are {} parameters", _NUM_PARAMETERS); // Build the input and label arrays // let inputs = array![[0., 0.], [0., 1.], [1., 0.], [1., 1.]]; // let labels = array![[0.], [1.], [1.], [0.]]; // let dataset = Dataset::raw(inputs, labels, 1., BatchSize::One)?; // let loss_fn = Loss::MSE; // // Build the neural net // let mut net = NeuralNetwork::<f32, _NUM_PARAMETERS>::new() // .add_layer(Layer::new(2, 3).activation(Activation::Sigmoid)) // .add_layer(Layer::new(3, 3).activation(Activation::Sigmoid)) // .add_layer(Layer::new(3, 1).activation(Activation::Sigmoid)); // let mut optim = optimizer::SGD::new(&net).learning_rate(0.3).momentum(0.); // train the network // for epoch in 0..11000 { // let mut epoch_loss = 0.; // for (samples, labels) in dataset.iter_train() { // let out = net.forward(&samples); // epoch_loss += &loss_fn.compute(&out, &labels).mean().unwrap(); // // optim.step(&mut net, &out); // } // if epoch % 100 == 0 { // println!("training epoch {}", epoch); // println!("Mean Loss: {}\n", epoch_loss / dataset.length() as f64); // println!("data len {}", dataset.length()); // } // } // // evaluate the net // let mut total_loss: f64 = 0.; // // should ofc be iter_test but this dataset is kinda minimalistic // let test_iter = dataset.iter_train(); // let num_test_samples = test_iter.num_batches * test_iter.batch_size; // for (sample, label) in test_iter { // let out = net.forward(&sample); // total_loss += loss_fn.compute(&out, &label).sum(); // println!("{} == {}", out.map(|&x| x.round()), label); // } // println!( // "Mean loss over {} test samples: {:.2}", // num_test_samples, // total_loss / num_test_samples as f64 // ); Ok(()) }
36.969697
162
0.570902
f5641e6f12180ae8be68726f3acf7e3a46b1f2ed
102,985
///! This module contains the gory details of using Diesel to query ///! a database schema that is not known at compile time. The code in this ///! module is mostly concerned with constructing SQL queries and some ///! helpers for serializing and deserializing entities. ///! ///! Code in this module works very hard to minimize the number of allocations ///! that it performs use diesel::pg::{Pg, PgConnection}; use diesel::query_builder::{AstPass, QueryFragment, QueryId}; use diesel::query_dsl::{LoadQuery, RunQueryDsl}; use diesel::result::{Error as DieselError, QueryResult}; use diesel::sql_types::{Array, Binary, Bool, Integer, Jsonb, Range, Text}; use diesel::Connection; use lazy_static::lazy_static; use std::collections::{BTreeMap, HashSet}; use std::convert::TryFrom; use std::env; use std::fmt::{self, Display}; use std::iter::FromIterator; use std::str::FromStr; use graph::data::{schema::FulltextAlgorithm, store::scalar}; use graph::prelude::{ anyhow, q, serde_json, Attribute, BlockNumber, ChildMultiplicity, Entity, EntityCollection, EntityFilter, EntityKey, EntityLink, EntityOrder, EntityRange, EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, }; use crate::entities::STRING_PREFIX_SIZE; use crate::relational::{Column, ColumnType, IdType, Layout, SqlName, Table, PRIMARY_KEY_COLUMN}; use crate::sql_value::SqlValue; use crate::{ block_range::{ BlockRange, BlockRangeContainsClause, BLOCK_RANGE_COLUMN, BLOCK_RANGE_CURRENT, BLOCK_UNVERSIONED, }, primary::Namespace, }; lazy_static! { /// Use a variant of the query for child_type_a when we are looking up /// fewer than this many entities. This variable is only here temporarily /// until we can settle on the right batch size through experimentation /// and should then just become an ordinary constant static ref TYPEA_BATCH_SIZE: usize = { env::var("TYPEA_BATCH_SIZE") .ok() .map(|s| { usize::from_str(&s) .unwrap_or_else(|_| panic!("TYPE_BATCH_SIZE must be a number, but is `{}`", s)) }) .unwrap_or(150) }; /// When we add `order by id` to a query should we add instead /// `order by id, block_range` static ref ORDER_BY_BLOCK_RANGE: bool = { env::var("ORDER_BY_BLOCK_RANGE") .ok() .map(|s| { s == "1" }) .unwrap_or(false) }; } #[derive(Debug)] pub(crate) struct UnsupportedFilter { pub filter: String, pub value: Value, } impl Display for UnsupportedFilter { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!( f, "unsupported filter `{}` for value `{}`", self.filter, self.value ) } } impl std::error::Error for UnsupportedFilter {} impl From<UnsupportedFilter> for diesel::result::Error { fn from(error: UnsupportedFilter) -> Self { diesel::result::Error::QueryBuilderError(Box::new(error)) } } fn str_as_bytes(id: &str) -> QueryResult<scalar::Bytes> { scalar::Bytes::from_str(&id).map_err(|e| DieselError::SerializationError(Box::new(e))) } /// Convert Postgres string representation of bytes "\xdeadbeef" /// to ours of just "deadbeef". fn bytes_as_str(id: &str) -> String { id.trim_start_matches("\\x").to_owned() } /// Conveniences for handling foreign keys depending on whether we are using /// `IdType::Bytes` or `IdType::String` as the primary key /// /// This trait adds some capabilities to `Column` that are very specific to /// how we generate SQL queries. Using a method like `bind_ids` from this /// trait on a given column means "send these values to the database in a form /// that can later be used for comparisons with that column" trait ForeignKeyClauses { /// The type of the column fn column_type(&self) -> &ColumnType; /// The name of the column fn name(&self) -> &str; /// Add `id` as a bind variable to `out`, using the right SQL type fn bind_id(&self, id: &str, out: &mut AstPass<Pg>) -> QueryResult<()> { match self.column_type().id_type() { IdType::String => out.push_bind_param::<Text, _>(&id)?, IdType::Bytes => out.push_bind_param::<Binary, _>(&str_as_bytes(&id)?.as_slice())?, } // Generate '::text' or '::bytea' out.push_sql("::"); out.push_sql(self.column_type().sql_type()); Ok(()) } /// Add `ids` as a bind variable to `out`, using the right SQL type fn bind_ids<S: AsRef<str> + diesel::serialize::ToSql<Text, Pg>>( &self, ids: &[S], out: &mut AstPass<Pg>, ) -> QueryResult<()> { match self.column_type().id_type() { IdType::String => out.push_bind_param::<Array<Text>, _>(&ids)?, IdType::Bytes => { let ids = ids .into_iter() .map(|id| str_as_bytes(id.as_ref())) .collect::<Result<Vec<scalar::Bytes>, _>>()?; let id_slices = ids.iter().map(|id| id.as_slice()).collect::<Vec<_>>(); out.push_bind_param::<Array<Binary>, _>(&id_slices)?; } } // Generate '::text[]' or '::bytea[]' out.push_sql("::"); out.push_sql(self.column_type().sql_type()); out.push_sql("[]"); Ok(()) } /// Generate a clause `{name()} = $id` using the right types to bind `$id` /// into `out` fn eq(&self, id: &str, out: &mut AstPass<Pg>) -> QueryResult<()> { out.push_sql(self.name()); out.push_sql(" = "); self.bind_id(id, out) } /// Generate a clause /// `exists (select 1 from unnest($ids) as p(g$id) where id = p.g$id)` /// using the right types to bind `$ids` into `out` fn is_in(&self, ids: &Vec<&str>, out: &mut AstPass<Pg>) -> QueryResult<()> { out.push_sql("exists (select 1 from unnest("); self.bind_ids(ids, out)?; out.push_sql(") as p(g$id) where id = p.g$id)"); Ok(()) } /// Generate an array of arrays as literal SQL. The `ids` must form a /// valid matrix, i.e. the same numbe of entries in each row. This can /// be achieved by padding them with `None` values. Diesel does not support /// arrays of arrays as bind variables, nor arrays containing nulls, so /// we have to manually serialize the `ids` as literal SQL. fn push_matrix( &self, matrix: &Vec<Vec<Option<SafeString>>>, out: &mut AstPass<Pg>, ) -> QueryResult<()> { out.push_sql("array["); if matrix.is_empty() { // If there are no ids, make sure we are producing an // empty array of arrays out.push_sql("array[null]"); } else { for (i, ids) in matrix.iter().enumerate() { if i > 0 { out.push_sql(", "); } out.push_sql("array["); for (j, id) in ids.iter().enumerate() { if j > 0 { out.push_sql(", "); } match id { None => out.push_sql("null"), Some(id) => match self.column_type().id_type() { IdType::String => { out.push_sql("'"); out.push_sql(&id.0); out.push_sql("'"); } IdType::Bytes => { out.push_sql("'\\x"); out.push_sql(&id.0.trim_start_matches("0x")); out.push_sql("'"); } }, } } out.push_sql("]"); } } // Generate '::text[][]' or '::bytea[][]' out.push_sql("]::"); out.push_sql(self.column_type().sql_type()); out.push_sql("[][]"); Ok(()) } } impl ForeignKeyClauses for Column { fn column_type(&self) -> &ColumnType { &self.column_type } fn name(&self) -> &str { self.name.as_str() } } pub trait FromEntityData: Default + From<Entity> { type Value: FromColumnValue; fn insert_entity_data(&mut self, key: String, v: Self::Value); } impl FromEntityData for Entity { type Value = graph::prelude::Value; fn insert_entity_data(&mut self, key: String, v: Self::Value) { self.insert(key, v); } } impl FromEntityData for BTreeMap<String, q::Value> { type Value = q::Value; fn insert_entity_data(&mut self, key: String, v: Self::Value) { self.insert(key, v); } } pub trait FromColumnValue: Sized { fn is_null(&self) -> bool; fn null() -> Self; fn from_string(s: String) -> Self; fn from_bool(b: bool) -> Self; fn from_i32(i: i32) -> Self; fn from_big_decimal(d: scalar::BigDecimal) -> Self; fn from_big_int(i: serde_json::Number) -> Result<Self, StoreError>; // The string returned by the DB, without the leading '\x' fn from_bytes(i: &str) -> Result<Self, StoreError>; fn from_vec(v: Vec<Self>) -> Self; fn from_column_value( column_type: &ColumnType, json: serde_json::Value, ) -> Result<Self, StoreError> { use serde_json::Value as j; // Many possible conversion errors are already caught by how // we define the schema; for example, we can only get a NULL for // a column that is actually nullable match (json, column_type) { (j::Null, _) => Ok(Self::null()), (j::Bool(b), _) => Ok(Self::from_bool(b)), (j::Number(number), ColumnType::Int) => match number.as_i64() { Some(i) => i32::try_from(i).map(Self::from_i32).map_err(|e| { StoreError::Unknown(anyhow!("failed to convert {} to Int: {}", number, e)) }), None => Err(StoreError::Unknown(anyhow!( "failed to convert {} to Int", number ))), }, (j::Number(number), ColumnType::BigDecimal) => { let s = number.to_string(); scalar::BigDecimal::from_str(s.as_str()) .map(Self::from_big_decimal) .map_err(|e| { StoreError::Unknown(anyhow!( "failed to convert {} to BigDecimal: {}", number, e )) }) } (j::Number(number), ColumnType::BigInt) => Self::from_big_int(number), (j::Number(number), column_type) => Err(StoreError::Unknown(anyhow!( "can not convert number {} to {:?}", number, column_type ))), (j::String(s), ColumnType::String) | (j::String(s), ColumnType::Enum(_)) => { Ok(Self::from_string(s)) } (j::String(s), ColumnType::Bytes) => Self::from_bytes(s.trim_start_matches("\\x")), (j::String(s), ColumnType::BytesId) => Ok(Self::from_string(bytes_as_str(&s))), (j::String(s), column_type) => Err(StoreError::Unknown(anyhow!( "can not convert string {} to {:?}", s, column_type ))), (j::Array(values), _) => Ok(Self::from_vec( values .into_iter() .map(|v| Self::from_column_value(column_type, v)) .collect::<Result<Vec<_>, _>>()?, )), (j::Object(_), _) => { unimplemented!("objects as entity attributes are not needed/supported") } } } } impl FromColumnValue for q::Value { fn is_null(&self) -> bool { self == &q::Value::Null } fn null() -> Self { Self::Null } fn from_string(s: String) -> Self { q::Value::String(s) } fn from_bool(b: bool) -> Self { q::Value::Boolean(b) } fn from_i32(i: i32) -> Self { q::Value::Int(i.into()) } fn from_big_decimal(d: scalar::BigDecimal) -> Self { q::Value::String(d.to_string()) } fn from_big_int(i: serde_json::Number) -> Result<Self, StoreError> { Ok(q::Value::String(i.to_string())) } fn from_bytes(b: &str) -> Result<Self, StoreError> { Ok(q::Value::String(format!("0x{}", b))) } fn from_vec(v: Vec<Self>) -> Self { q::Value::List(v) } } impl FromColumnValue for graph::prelude::Value { fn is_null(&self) -> bool { self == &Value::Null } fn null() -> Self { Self::Null } fn from_string(s: String) -> Self { graph::prelude::Value::String(s) } fn from_bool(b: bool) -> Self { graph::prelude::Value::Bool(b) } fn from_i32(i: i32) -> Self { graph::prelude::Value::Int(i) } fn from_big_decimal(d: scalar::BigDecimal) -> Self { graph::prelude::Value::BigDecimal(d) } fn from_big_int(i: serde_json::Number) -> Result<Self, StoreError> { scalar::BigInt::from_str(&i.to_string()) .map(graph::prelude::Value::BigInt) .map_err(|e| StoreError::Unknown(anyhow!("failed to convert {} to BigInt: {}", i, e))) } fn from_bytes(b: &str) -> Result<Self, StoreError> { scalar::Bytes::from_str(b) .map(graph::prelude::Value::Bytes) .map_err(|e| StoreError::Unknown(anyhow!("failed to convert {} to Bytes: {}", b, e))) } fn from_vec(v: Vec<Self>) -> Self { graph::prelude::Value::List(v) } } /// Helper struct for retrieving entities from the database. With diesel, we /// can only run queries that return columns whose number and type are known /// at compile time. Because of that, we retrieve the actual data for an /// entity as Jsonb by converting the row containing the entity using the /// `to_jsonb` function. #[derive(QueryableByName)] pub struct EntityData { #[sql_type = "Text"] entity: String, #[sql_type = "Jsonb"] data: serde_json::Value, } impl EntityData { pub fn entity_type(&self) -> String { self.entity.clone() } /// Map the `EntityData` using the schema information in `Layout` pub fn deserialize_with_layout<T: FromEntityData>( self, layout: &Layout, ) -> Result<T, StoreError> { let table = layout.table_for_entity(&self.entity)?; use serde_json::Value as j; match self.data { j::Object(map) => { let mut out = T::default(); out.insert_entity_data("__typename".to_owned(), T::Value::from_string(self.entity)); for (key, json) in map { // Simply ignore keys that do not have an underlying table // column; those will be things like the block_range that // is used internally for versioning if key == "g$parent_id" { let value = T::Value::from_column_value(&ColumnType::String, json)?; out.insert_entity_data("g$parent_id".to_owned(), value); } else if let Some(column) = table.column(&SqlName::verbatim(key)) { let value = T::Value::from_column_value(&column.column_type, json)?; if !value.is_null() { out.insert_entity_data(column.field.clone(), value); } } } Ok(out) } _ => unreachable!( "we use `to_json` in our queries, and will therefore always get an object back" ), } } } /// A `QueryValue` makes it possible to bind a `Value` into a SQL query /// using the metadata from Column struct QueryValue<'a>(&'a Value, &'a ColumnType); impl<'a> QueryFragment<Pg> for QueryValue<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); let column_type = self.1; match self.0 { Value::String(s) => match &column_type { ColumnType::String => out.push_bind_param::<Text, _>(s), ColumnType::Enum(enum_type) => { out.push_bind_param::<Text, _>(s)?; out.push_sql("::"); out.push_sql(enum_type.name.as_str()); Ok(()) } ColumnType::TSVector(_) => { out.push_sql("to_tsquery("); out.push_bind_param::<Text, _>(s)?; out.push_sql(")"); Ok(()) } ColumnType::Bytes | ColumnType::BytesId => { let bytes = scalar::Bytes::from_str(&s) .map_err(|e| DieselError::SerializationError(Box::new(e)))?; out.push_bind_param::<Binary, _>(&bytes.as_slice()) } _ => unreachable!( "only string, enum and tsvector columns have values of type string" ), }, Value::Int(i) => out.push_bind_param::<Integer, _>(i), Value::BigDecimal(d) => { out.push_bind_param::<Text, _>(&d.to_string())?; out.push_sql("::numeric"); Ok(()) } Value::Bool(b) => out.push_bind_param::<Bool, _>(b), Value::List(values) => { let sql_values = SqlValue::new_array(values.clone()); match &column_type { ColumnType::BigDecimal | ColumnType::BigInt => { let text_values: Vec<_> = values.iter().map(|v| v.to_string()).collect(); out.push_bind_param::<Array<Text>, _>(&text_values)?; out.push_sql("::numeric[]"); Ok(()) } ColumnType::Boolean => out.push_bind_param::<Array<Bool>, _>(&sql_values), ColumnType::Bytes => out.push_bind_param::<Array<Binary>, _>(&sql_values), ColumnType::Int => out.push_bind_param::<Array<Integer>, _>(&sql_values), ColumnType::String => out.push_bind_param::<Array<Text>, _>(&sql_values), ColumnType::Enum(enum_type) => { out.push_bind_param::<Array<Text>, _>(&sql_values)?; out.push_sql("::"); out.push_sql(enum_type.name.as_str()); out.push_sql("[]"); Ok(()) } // TSVector will only be in a Value::List() for inserts so "to_tsvector" can always be used here ColumnType::TSVector(config) => { if sql_values.is_empty() { out.push_sql("''::tsvector"); } else { out.push_sql("("); for (i, value) in sql_values.iter().enumerate() { if i > 0 { out.push_sql(") || "); } out.push_sql("to_tsvector("); out.push_bind_param::<Text, _>( &config.language.as_str().to_string(), )?; out.push_sql("::regconfig, "); out.push_bind_param::<Text, _>(&value)?; } out.push_sql("))"); } Ok(()) } ColumnType::BytesId => out.push_bind_param::<Array<Binary>, _>(&sql_values), } } Value::Null => { out.push_sql("null"); Ok(()) } Value::Bytes(b) => out.push_bind_param::<Binary, _>(&b.as_slice()), Value::BigInt(i) => { out.push_bind_param::<Text, _>(&i.to_string())?; out.push_sql("::numeric"); Ok(()) } } } } #[derive(Copy, Clone, PartialEq)] enum Comparison { Less, LessOrEqual, Equal, NotEqual, GreaterOrEqual, Greater, Match, } impl Comparison { fn as_str(&self) -> &str { use Comparison::*; match self { Less => " < ", LessOrEqual => " <= ", Equal => " = ", NotEqual => " != ", GreaterOrEqual => " >= ", Greater => " > ", Match => " @@ ", } } } /// Produce a comparison between the string column `column` and the string /// value `text` that makes it obvious to Postgres' optimizer that it can /// first consult the partial index on `left(column, STRING_PREFIX_SIZE)` /// instead of going straight to a sequential scan of the underlying table. /// We do this by writing the comparison `column op text` in a way that /// involves `left(column, STRING_PREFIX_SIZE)` #[derive(Constructor)] struct PrefixComparison<'a> { op: Comparison, column: &'a Column, text: &'a Value, } impl<'a> PrefixComparison<'a> { fn push_column_prefix(column: &Column, mut out: AstPass<Pg>) -> QueryResult<()> { out.push_sql("left("); out.push_identifier(column.name.as_str())?; out.push_sql(", "); out.push_sql(&STRING_PREFIX_SIZE.to_string()); out.push_sql(")"); Ok(()) } fn push_value_prefix(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.push_sql("left("); QueryValue(self.text, &self.column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); out.push_sql(&STRING_PREFIX_SIZE.to_string()); out.push_sql(")"); Ok(()) } fn push_prefix_cmp(&self, op: Comparison, mut out: AstPass<Pg>) -> QueryResult<()> { Self::push_column_prefix(self.column, out.reborrow())?; out.push_sql(op.as_str()); self.push_value_prefix(out.reborrow()) } fn push_full_cmp(&self, op: Comparison, mut out: AstPass<Pg>) -> QueryResult<()> { out.push_identifier(self.column.name.as_str())?; out.push_sql(op.as_str()); QueryValue(self.text, &self.column.column_type).walk_ast(out) } } impl<'a> QueryFragment<Pg> for PrefixComparison<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { use Comparison::*; // For the various comparison operators, we want to write the condition // `column op text` in a way that lets Postgres use the index on // `left(column, STRING_PREFIX_SIZE)`. If at all possible, we also want // the condition in a form that only uses the index, or if that's not // possible, in a form where Postgres can first reduce the number of // rows where a full comparison between `column` and `text` is needed // by consulting the index. // // To ease notation, let `N = STRING_PREFIX_SIZE` and write a string stored in // `column` as `uv` where `len(u) <= N`; that means that `v` is only // nonempty if `len(uv) > N`. We similarly split `text` into `st` where // `len(s) <= N`. In other words, `u = left(column, N)` and // `s = left(text, N)` // // In all these comparisons, if `len(st) <= N - 1`, we can reduce // checking `uv op st` to `u op s`, since in that case `t` is the empty // string, we have `uv op s`. If `len(uv) <= N - 1`, then `v` will // also be the empty string. If `len(uv) >= N`, then `len(u) = N`, // and since `u` will be one character longer than `s`, that character // will decide the outcome of `u op s`, even if `u` and `s` agree on // the first `N-1` characters. // // For equality, we can expand `uv = st` into `u = s && uv = st` which // lets Postgres use the index for the first comparison, and `uv = st` // only needs to be checked for rows that pass the check on the index. // // For inequality, we can write `uv != st` as `u != s || uv != st`, // but that doesn't buy us much since Postgres always needs to check // `uv != st`, for which the index is of little help. // // For `op` either `<` or `>`, we have // uv op st <=> u op s || u = s && uv op st // // For `op` either `<=` or `>=`, we can write (using '<=' as an example) // uv <= st <=> u < s || u = s && uv <= st let large = if let Value::String(s) = self.text { // We need to check the entire string s.len() > STRING_PREFIX_SIZE - 1 } else { unreachable!("text columns are only ever compared to strings"); }; match self.op { Equal => { if large { out.push_sql("("); self.push_prefix_cmp(self.op, out.reborrow())?; out.push_sql(" and "); self.push_full_cmp(self.op, out.reborrow())?; out.push_sql(")"); } else { self.push_prefix_cmp(self.op, out.reborrow())?; } } Match => { self.push_full_cmp(self.op, out.reborrow())?; } NotEqual => { if large { self.push_full_cmp(self.op, out.reborrow())?; } else { self.push_prefix_cmp(self.op, out.reborrow())?; } } LessOrEqual | Less | GreaterOrEqual | Greater => { let prefix_op = match self.op { LessOrEqual => Less, GreaterOrEqual => Greater, op => op, }; if large { out.push_sql("("); self.push_prefix_cmp(prefix_op, out.reborrow())?; out.push_sql(" or ("); self.push_prefix_cmp(Equal, out.reborrow())?; out.push_sql(" and "); self.push_full_cmp(self.op, out.reborrow())?; out.push_sql("))"); } else { self.push_prefix_cmp(self.op, out.reborrow())?; } } } Ok(()) } } /// A `QueryFilter` adds the conditions represented by the `filter` to /// the `where` clause of a SQL query. The attributes mentioned in /// the `filter` must all come from the given `table`, which is used to /// map GraphQL names to column names, and to determine the type of the /// column an attribute refers to #[derive(Debug, Clone)] pub struct QueryFilter<'a> { filter: &'a EntityFilter, table: &'a Table, } impl<'a> QueryFilter<'a> { pub fn new(filter: &'a EntityFilter, table: &'a Table) -> Result<Self, StoreError> { Self::valid_attributes(filter, table)?; Ok(QueryFilter { filter, table }) } fn valid_attributes(filter: &'a EntityFilter, table: &'a Table) -> Result<(), StoreError> { use EntityFilter::*; match filter { And(filters) | Or(filters) => { for filter in filters { Self::valid_attributes(filter, table)?; } } Contains(attr, _) | NotContains(attr, _) | Equal(attr, _) | Not(attr, _) | GreaterThan(attr, _) | LessThan(attr, _) | GreaterOrEqual(attr, _) | LessOrEqual(attr, _) | In(attr, _) | NotIn(attr, _) | StartsWith(attr, _) | NotStartsWith(attr, _) | EndsWith(attr, _) | NotEndsWith(attr, _) => { table.column_for_field(attr)?; } } Ok(()) } fn with(&self, filter: &'a EntityFilter) -> Self { QueryFilter { filter, table: self.table, } } fn column(&self, attribute: &Attribute) -> &'a Column { self.table .column_for_field(attribute) .expect("the constructor already checked that all attribute names are valid") } fn binary_op( &self, filters: &Vec<EntityFilter>, op: &str, on_empty: &str, mut out: AstPass<Pg>, ) -> QueryResult<()> { if !filters.is_empty() { out.push_sql("("); for (i, filter) in filters.iter().enumerate() { if i > 0 { out.push_sql(op); } self.with(&filter).walk_ast(out.reborrow())?; } out.push_sql(")"); } else { out.push_sql(on_empty); } Ok(()) } fn contains( &self, attribute: &Attribute, value: &Value, negated: bool, mut out: AstPass<Pg>, ) -> QueryResult<()> { let column = self.column(attribute); match value { Value::String(s) => { out.push_identifier(column.name.as_str())?; if negated { out.push_sql(" not like "); } else { out.push_sql(" like ") }; if s.starts_with('%') || s.ends_with('%') { out.push_bind_param::<Text, _>(s)?; } else { let s = format!("%{}%", s); out.push_bind_param::<Text, _>(&s)?; } } Value::Bytes(b) => { out.push_sql("position("); out.push_bind_param::<Binary, _>(&b.as_slice())?; out.push_sql(" in "); out.push_identifier(column.name.as_str())?; if negated { out.push_sql(") = 0") } else { out.push_sql(") > 0"); } } Value::List(_) => { if negated { out.push_sql(" not "); out.push_identifier(column.name.as_str())?; out.push_sql(" && "); } else { out.push_identifier(column.name.as_str())?; out.push_sql(" @> "); } QueryValue(value, &column.column_type).walk_ast(out)?; } Value::Null | Value::BigDecimal(_) | Value::Int(_) | Value::Bool(_) | Value::BigInt(_) => { let filter = match negated { false => "contains", true => "not_contains", }; return Err(UnsupportedFilter { filter: filter.to_owned(), value: value.clone(), } .into()); } } Ok(()) } fn equals( &self, attribute: &Attribute, value: &Value, op: Comparison, mut out: AstPass<Pg>, ) -> QueryResult<()> { let column = self.column(attribute); if column.is_text() && value.is_string() { PrefixComparison::new(op, column, value).walk_ast(out.reborrow())?; } else if column.is_fulltext() { out.push_identifier(column.name.as_str())?; out.push_sql(Comparison::Match.as_str()); QueryValue(value, &column.column_type).walk_ast(out)?; } else { out.push_identifier(column.name.as_str())?; match value { Value::String(_) | Value::BigInt(_) | Value::Bool(_) | Value::Bytes(_) | Value::BigDecimal(_) | Value::Int(_) | Value::List(_) => { out.push_sql(op.as_str()); QueryValue(value, &column.column_type).walk_ast(out)?; } Value::Null => { use Comparison as c; match op { c::Equal => out.push_sql(" is null"), c::NotEqual => out.push_sql(" is not null"), _ => unreachable!("we only call equals with '=' or '!='"), } } } } Ok(()) } fn compare( &self, attribute: &Attribute, value: &Value, op: Comparison, mut out: AstPass<Pg>, ) -> QueryResult<()> { let column = self.column(attribute); if column.is_text() && value.is_string() { PrefixComparison::new(op, column, value).walk_ast(out.reborrow())?; } else { out.push_identifier(column.name.as_str())?; out.push_sql(op.as_str()); match value { Value::BigInt(_) | Value::BigDecimal(_) | Value::Int(_) | Value::String(_) => { QueryValue(value, &column.column_type).walk_ast(out)? } Value::Bool(_) | Value::Bytes(_) | Value::List(_) | Value::Null => { return Err(UnsupportedFilter { filter: op.as_str().to_owned(), value: value.clone(), } .into()); } } } Ok(()) } fn in_array( &self, attribute: &Attribute, values: &Vec<Value>, negated: bool, mut out: AstPass<Pg>, ) -> QueryResult<()> { let column = self.column(attribute); if values.is_empty() { out.push_sql("false"); return Ok(()); } // NULLs in SQL are very special creatures, and we need to treat // them special. For non-NULL values, we generate // attribute {in|not in} (value1, value2, ...) // and for NULL values we generate // attribute {is|is not} null // If we have both NULL and non-NULL values we join these // two clauses with OR. // // Note that when we have no non-NULL values at all, we must // not generate `attribute {in|not in} ()` since the empty `()` // is a syntax error // // Because we checked above, one of these two will be true let have_nulls = values.iter().any(|value| value == &Value::Null); let have_non_nulls = values.iter().any(|value| value != &Value::Null); if have_nulls && have_non_nulls { out.push_sql("("); } if have_nulls { out.push_identifier(column.name.as_str())?; if negated { out.push_sql(" is not null"); } else { out.push_sql(" is null") } } if have_nulls && have_non_nulls { out.push_sql(" or "); } if have_non_nulls { if column.is_text() && values.iter().all(|v| match v { Value::String(s) => s.len() <= STRING_PREFIX_SIZE - 1, _ => false, }) { // If all values are shorter than STRING_PREFIX_SIZE - 1, // only check the prefix of the column; that's a fairly common // case and we present it in the best possible way for // Postgres' query optimizer // See PrefixComparison for a more detailed discussion of what // is happening here PrefixComparison::push_column_prefix(&column, out.reborrow())?; } else { out.push_identifier(column.name.as_str())?; } if negated { out.push_sql(" not in ("); } else { out.push_sql(" in ("); } for (i, value) in values .iter() .filter(|value| value != &&Value::Null) .enumerate() { if i > 0 { out.push_sql(", "); } QueryValue(&value, &column.column_type).walk_ast(out.reborrow())?; } out.push_sql(")"); } if have_nulls && have_non_nulls { out.push_sql(")"); } Ok(()) } fn starts_or_ends_with( &self, attribute: &Attribute, value: &Value, op: &str, starts_with: bool, mut out: AstPass<Pg>, ) -> QueryResult<()> { let column = self.column(attribute); out.push_identifier(column.name.as_str())?; out.push_sql(op); match value { Value::String(s) => { let s = if starts_with { format!("{}%", s) } else { format!("%{}", s) }; out.push_bind_param::<Text, _>(&s)? } Value::Bool(_) | Value::BigInt(_) | Value::Bytes(_) | Value::BigDecimal(_) | Value::Int(_) | Value::List(_) | Value::Null => { return Err(UnsupportedFilter { filter: op.to_owned(), value: value.clone(), } .into()); } } Ok(()) } } impl<'a> QueryFragment<Pg> for QueryFilter<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); use Comparison as c; use EntityFilter::*; match &self.filter { And(filters) => self.binary_op(filters, " and ", " true ", out)?, Or(filters) => self.binary_op(filters, " or ", " false ", out)?, Contains(attr, value) => self.contains(attr, value, false, out)?, NotContains(attr, value) => self.contains(attr, value, true, out)?, Equal(attr, value) => self.equals(attr, value, c::Equal, out)?, Not(attr, value) => self.equals(attr, value, c::NotEqual, out)?, GreaterThan(attr, value) => self.compare(attr, value, c::Greater, out)?, LessThan(attr, value) => self.compare(attr, value, c::Less, out)?, GreaterOrEqual(attr, value) => self.compare(attr, value, c::GreaterOrEqual, out)?, LessOrEqual(attr, value) => self.compare(attr, value, c::LessOrEqual, out)?, In(attr, values) => self.in_array(attr, values, false, out)?, NotIn(attr, values) => self.in_array(attr, values, true, out)?, StartsWith(attr, value) => { self.starts_or_ends_with(attr, value, " like ", true, out)? } NotStartsWith(attr, value) => { self.starts_or_ends_with(attr, value, " not like ", true, out)? } EndsWith(attr, value) => self.starts_or_ends_with(attr, value, " like ", false, out)?, NotEndsWith(attr, value) => { self.starts_or_ends_with(attr, value, " not like ", false, out)? } } Ok(()) } } #[derive(Debug, Clone, Constructor)] pub struct FindQuery<'a> { table: &'a Table, id: &'a str, block: BlockNumber, } impl<'a> QueryFragment<Pg> for FindQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Generate // select '..' as entity, to_jsonb(e.*) as data // from schema.table e where id = $1 out.push_sql("select "); out.push_bind_param::<Text, _>(&self.table.object)?; out.push_sql(" as entity, to_jsonb(e.*) as data\n"); out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); self.table.primary_key().eq(&self.id, &mut out)?; out.push_sql(" and "); BlockRangeContainsClause::new(&self.table, "e.", self.block).walk_ast(out) } } impl<'a> QueryId for FindQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, EntityData> for FindQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<EntityData>> { conn.query_by_name(&self) } } impl<'a, Conn> RunQueryDsl<Conn> for FindQuery<'a> {} #[derive(Debug, Clone, Constructor)] pub struct FindManyQuery<'a> { pub(crate) namespace: &'a Namespace, pub(crate) tables: Vec<&'a Table>, // Maps object name to ids. pub(crate) ids_for_type: BTreeMap<&'a str, &'a Vec<&'a str>>, pub(crate) block: BlockNumber, } impl<'a> QueryFragment<Pg> for FindManyQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Generate // select $object0 as entity, to_jsonb(e.*) as data // from schema.<table0> e where {id.is_in($ids0)} // union all // select $object1 as entity, to_jsonb(e.*) as data // from schema.<table1> e where {id.is_in($ids1)) // union all // ... for (i, table) in self.tables.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } out.push_sql("select "); out.push_bind_param::<Text, _>(&table.object)?; out.push_sql(" as entity, to_jsonb(e.*) as data\n"); out.push_sql(" from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" e\n where "); table .primary_key() .is_in(&self.ids_for_type[table.object.as_str()], &mut out)?; out.push_sql(" and "); BlockRangeContainsClause::new(&table, "e.", self.block).walk_ast(out.reborrow())?; } Ok(()) } } impl<'a> QueryId for FindManyQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, EntityData> for FindManyQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<EntityData>> { conn.query_by_name(&self) } } impl<'a, Conn> RunQueryDsl<Conn> for FindManyQuery<'a> {} #[derive(Debug, Clone)] pub struct InsertQuery<'a> { table: &'a Table, key: &'a EntityKey, entity: Entity, block: BlockNumber, } impl<'a> InsertQuery<'a> { pub fn new( table: &'a Table, key: &'a EntityKey, entity: Entity, block: BlockNumber, ) -> Result<InsertQuery<'a>, StoreError> { let mut entity = entity; for column in table.columns.iter() { match column.fulltext_fields.as_ref() { Some(fields) => { let fulltext_field_values = fields .iter() .filter_map(|field| entity.get(field)) .cloned() .collect::<Vec<Value>>(); if !fulltext_field_values.is_empty() { entity.insert(column.field.to_string(), Value::List(fulltext_field_values)); } } None => (), } if !column.is_nullable() && !entity.contains_key(&column.field) { return Err(StoreError::QueryExecutionError(format!( "can not insert entity {}[{}] since value for non-nullable attribute {} is missing. \ To fix this, mark the attribute as nullable in the GraphQL schema or change the \ mapping code to always set this attribute.", key.entity_type, key.entity_id, column.field ))); } } Ok(InsertQuery { table, key, entity, block, }) } } impl<'a> QueryFragment<Pg> for InsertQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // insert into schema.table(column, ...) // values ($1, ...) // and convert and bind the entity's values into it out.push_sql("insert into "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("("); for column in self.table.columns.iter() { if self.entity.contains_key(&column.field) { out.push_identifier(column.name.as_str())?; out.push_sql(", "); } } out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(")\nvalues("); for column in self.table.columns.iter() { if let Some(value) = self.entity.get(&column.field) { QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; out.push_sql(", "); } } let block_range: BlockRange = (self.block..).into(); out.push_bind_param::<Range<Integer>, _>(&block_range)?; out.push_sql(")"); Ok(()) } } impl<'a> QueryId for InsertQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for InsertQuery<'a> {} /// Update an existing entity in place. The `entity` only needs to contain /// the attributes that should be changed, and not the entire entity. In /// particular, it might not have an `id` attribute. If the entity has /// attributes that are not in the table, they are silently ignored. #[derive(Debug, Clone)] pub struct UpdateQuery<'a> { table: &'a Table, key: &'a EntityKey, entity: &'a Entity, } impl<'a> UpdateQuery<'a> { pub fn new( table: &'a Table, key: &'a EntityKey, entity: &'a Entity, ) -> Result<Self, StoreError> { if let Some(Value::String(id)) = entity.get("id") { if id != &key.entity_id { return Err(StoreError::QueryExecutionError(format!( "changing the id of an \ entity is not allowed, and the attempt to change the id of \ entity {}[{}] to {} has failed", key.entity_type, key.entity_id, id ))); } } Ok(UpdateQuery { table, key, entity }) } } impl<'a> QueryFragment<Pg> for UpdateQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); let updateable = { |column: &Column| !column.is_primary_key() }; // Construct a query // update schema.table1 // set col = $1 // ... // where id = $n // and guard out.push_sql("update "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n set "); if !self.table.columns.iter().any(updateable) { // If we do not have any columns to update, we still need // to produce a syntactically correct update statement. This // is syntactically correct, even though it won't change anything out.push_identifier(PRIMARY_KEY_COLUMN)?; out.push_sql(" = null where false"); return Ok(()); } // Change only the attributes mentioned in `entity`, but never the // primary key for (i, (column, value)) in self .table .columns .iter() .filter(|col| updateable(*col)) .filter_map(|col| self.entity.get(&col.field).map(|value| (col, value))) .enumerate() { if i > 0 { out.push_sql(",\n "); } out.push_identifier(column.name.as_str())?; out.push_sql(" = "); QueryValue(value, &column.column_type).walk_ast(out.reborrow())?; } out.push_sql("\n where "); self.table.primary_key().eq(&self.key.entity_id, &mut out)?; Ok(()) } } impl<'a> QueryId for UpdateQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for UpdateQuery<'a> {} /// A query that deletes all versions of an entity #[derive(Debug, Clone, Constructor)] pub struct DeleteQuery<'a> { table: &'a Table, key: &'a EntityKey, } impl<'a> QueryFragment<Pg> for DeleteQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // delete from table // where id = $key.entity_id out.push_sql("delete from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n where "); self.table.primary_key().eq(&self.key.entity_id, &mut out) } } impl<'a> QueryId for DeleteQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for DeleteQuery<'a> {} #[derive(Debug, Clone)] pub struct ConflictingEntityQuery<'a> { layout: &'a Layout, tables: Vec<&'a Table>, entity_id: &'a String, } impl<'a> ConflictingEntityQuery<'a> { pub fn new( layout: &'a Layout, entities: Vec<&'a String>, entity_id: &'a String, ) -> Result<Self, StoreError> { let tables = entities .iter() .map(|entity| layout.table_for_entity(entity).map(|table| table.as_ref())) .collect::<Result<Vec<_>, _>>()?; Ok(ConflictingEntityQuery { layout, tables, entity_id, }) } } impl<'a> QueryFragment<Pg> for ConflictingEntityQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // select 'Type1' as entity from schema.table1 where id = $1 // union all // select 'Type2' as entity from schema.table2 where id = $1 // union all // ... for (i, table) in self.tables.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } out.push_sql("select "); out.push_bind_param::<Text, _>(&table.object)?; out.push_sql(" as entity from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" where id = "); out.push_bind_param::<Text, _>(self.entity_id)?; } Ok(()) } } impl<'a> QueryId for ConflictingEntityQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } #[derive(QueryableByName)] pub struct ConflictingEntityData { #[sql_type = "Text"] pub entity: String, } impl<'a> LoadQuery<PgConnection, ConflictingEntityData> for ConflictingEntityQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<ConflictingEntityData>> { conn.query_by_name(&self) } } impl<'a, Conn> RunQueryDsl<Conn> for ConflictingEntityQuery<'a> {} /// A string where we have checked that it is safe to embed it literally /// in a string in a SQL query. In particular, we have escaped any use /// of the string delimiter `'`. /// /// This is only needed for `ParentIds::List` since we can't send those to /// the database as a bind variable, and therefore need to embed them in /// the query literally #[derive(Debug, Clone)] struct SafeString(String); /// A `ParentLink` where we've made sure for the `List` variant that each /// `Vec<Option<String>>` has the same length /// Use the provided constructors to make sure this invariant holds #[derive(Debug, Clone)] enum ParentIds { List(Vec<Vec<Option<SafeString>>>), Scalar(Vec<String>), } impl ParentIds { fn new(link: ParentLink) -> Self { match link { ParentLink::Scalar(child_ids) => ParentIds::Scalar(child_ids), ParentLink::List(child_ids) => { // Postgres will only accept child_ids, which is a Vec<Vec<String>> // if all Vec<String> are the same length. We therefore pad // shorter ones with None, which become nulls in the database let maxlen = child_ids.iter().map(|ids| ids.len()).max().unwrap_or(0); let child_ids = child_ids .into_iter() .map(|ids| { let mut ids: Vec<_> = ids .into_iter() .map(|s| { if s.contains('\'') { SafeString(s.replace('\'', "''")) } else { SafeString(s) } }) .map(Some) .collect(); ids.resize_with(maxlen, || None); ids }) .collect(); ParentIds::List(child_ids) } } } } /// An `EntityLink` where we've resolved the entity type and attribute to the /// corresponding table and column #[derive(Debug, Clone)] enum TableLink<'a> { Direct(&'a Column, ChildMultiplicity), Parent(ParentIds), } impl<'a> TableLink<'a> { fn new(child_table: &'a Table, link: EntityLink) -> Result<Self, QueryExecutionError> { match link { EntityLink::Direct(attribute, multiplicity) => { let column = child_table.column_for_field(attribute.name())?; Ok(TableLink::Direct(column, multiplicity)) } EntityLink::Parent(parent_link) => Ok(TableLink::Parent(ParentIds::new(parent_link))), } } } /// When we expand the parents for a specific query for children, we /// sometimes (aka interfaces) need to restrict them to a specific /// parent `q.id` that an outer query has already set up. In all other /// cases, we restrict the children to the top n by ordering by a specific /// sort key and limiting #[derive(Copy, Clone)] enum ParentLimit<'a> { /// Limit children to a specific parent Outer, /// Limit children by sorting and picking top n Ranked(&'a SortKey<'a>, &'a FilterRange), } impl<'a> ParentLimit<'a> { fn filter(&self, out: &mut AstPass<Pg>) { match self { ParentLimit::Outer => out.push_sql(" and q.id = p.id"), ParentLimit::Ranked(_, _) => (), } } fn restrict(&self, out: &mut AstPass<Pg>) -> QueryResult<()> { if let ParentLimit::Ranked(sort_key, range) = self { out.push_sql(" "); sort_key.order_by(out)?; range.walk_ast(out.reborrow())?; } Ok(()) } /// Include a 'limit {num_parents}+1' clause for single-object queries /// if that is needed fn single_limit(&self, num_parents: usize, out: &mut AstPass<Pg>) { match self { ParentLimit::Ranked(_, _) => { out.push_sql(" limit "); out.push_sql(&(num_parents + 1).to_string()); } ParentLimit::Outer => { // limiting is taken care of in a wrapper around // the query we are currently building } } } } /// This is the parallel to `EntityWindow`, with names translated to /// the relational layout, and checked against it #[derive(Debug, Clone)] pub struct FilterWindow<'a> { /// The table from which we take entities table: &'a Table, /// The overall filter for the entire query query_filter: Option<QueryFilter<'a>>, /// The parent ids we are interested in. The type in the database /// for these is determined by the `IdType` of the parent table. Since /// we always compare these ids with a column in `table`, and that /// column must have the same type as the primary key of the parent /// table, we can deduce the correct `IdType` that way ids: Vec<String>, /// How to filter by a set of parents link: TableLink<'a>, } impl<'a> FilterWindow<'a> { fn new( layout: &'a Layout, window: EntityWindow, query_filter: Option<&'a EntityFilter>, ) -> Result<Self, QueryExecutionError> { let EntityWindow { child_type, ids, link, } = window; let table = layout.table_for_entity(&child_type).map(|rc| rc.as_ref())?; let query_filter = query_filter .map(|filter| QueryFilter::new(filter, table)) .transpose()?; let link = TableLink::new(table, link)?; Ok(FilterWindow { table, query_filter, ids, link, }) } fn and_filter(&self, mut out: AstPass<Pg>) -> QueryResult<()> { if let Some(filter) = &self.query_filter { out.push_sql("\n and "); filter.walk_ast(out)? } Ok(()) } fn children_type_a( &self, column: &Column, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { assert!(column.is_list()); // Generate // from unnest({parent_ids}) as p(id) // cross join lateral // (select * // from children c // where p.id = any(c.{parent_field}) // and .. other conditions on c .. // order by c.{sort_key} // limit {first} offset {skip}) c // order by c.{sort_key} out.push_sql("\n/* children_type_a */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id) cross join lateral (select * from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and p.id = any(c."); out.push_identifier(column.name.as_str())?; out.push_sql(")"); self.and_filter(out.reborrow())?; limit.restrict(out)?; out.push_sql(") c"); Ok(()) } fn child_type_a( &self, column: &Column, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { assert!(column.is_list()); // Generate // from unnest({parent_ids}) as p(id), // children c // where c.{parent_field} @> array[p.id] // and c.{parent_field} && {parent_ids} // and .. other conditions on c .. // limit {parent_ids.len} + 1 // // The redundant `&&` clause is only added when we have fewer than // TYPEA_BATCH_SIZE children and helps Postgres to narrow down the // rows it needs to pick from `children` to join with `p(id)` out.push_sql("\n/* child_type_a */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and c."); out.push_identifier(column.name.as_str())?; out.push_sql(" @> array[p.id]"); if self.ids.len() < *TYPEA_BATCH_SIZE { out.push_sql(" and c."); out.push_identifier(column.name.as_str())?; out.push_sql(" && "); column.bind_ids(&self.ids, out)?; } self.and_filter(out.reborrow())?; limit.single_limit(self.ids.len(), out); Ok(()) } fn children_type_b( &self, column: &Column, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { assert!(!column.is_list()); // Generate // from unnest({parent_ids}) as p(id) // cross join lateral // (select * // from children c // where p.id = c.{parent_field} // and .. other conditions on c .. // order by c.{sort_key} // limit {first} offset {skip}) c // order by c.{sort_key} out.push_sql("\n/* children_type_b */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id) cross join lateral (select * from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and p.id = c."); out.push_identifier(column.name.as_str())?; self.and_filter(out.reborrow())?; limit.restrict(out)?; out.push_sql(") c"); Ok(()) } fn child_type_b( &self, column: &Column, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { assert!(!column.is_list()); // Generate // from unnest({parent_ids}) as p(id), children c // where c.{parent_field} = p.id // and .. other conditions on c .. // limit {parent_ids.len} + 1 out.push_sql("\n/* child_type_b */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and p.id = c."); out.push_identifier(column.name.as_str())?; self.and_filter(out.reborrow())?; limit.single_limit(self.ids.len(), out); Ok(()) } fn children_type_c( &self, child_ids: &Vec<Vec<Option<SafeString>>>, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { // Generate // from rows from (unnest({parent_ids}), reduce_dim({child_id_matrix})) // as p(id, child_ids) // cross join lateral // (select * // from children c // where c.id = any(p.child_ids) // and .. other conditions on c .. // order by c.{sort_key} // limit {first} offset {skip}) c // order by c.{sort_key} out.push_sql("\n/* children_type_c */ from "); out.push_sql("rows from (unnest("); out.push_bind_param::<Array<Text>, _>(&self.ids)?; out.push_sql("), reduce_dim("); self.table.primary_key().push_matrix(&child_ids, out)?; out.push_sql(")) as p(id, child_ids)"); out.push_sql(" cross join lateral (select * from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and c.id = any(p.child_ids)"); self.and_filter(out.reborrow())?; limit.restrict(out)?; out.push_sql(") c"); Ok(()) } fn child_type_d( &self, child_ids: &Vec<String>, limit: ParentLimit<'_>, block: BlockNumber, out: &mut AstPass<Pg>, ) -> QueryResult<()> { // Generate // from rows from (unnest({parent_ids}), unnest({child_ids})) as p(id, child_id), // children c // where c.id = p.child_id // and .. other conditions on c .. out.push_sql("\n/* child_type_d */ from rows from (unnest("); out.push_bind_param::<Array<Text>, _>(&self.ids)?; out.push_sql("), unnest("); self.table.primary_key().bind_ids(&child_ids, out)?; out.push_sql(")) as p(id, child_id), "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); BlockRangeContainsClause::new(&self.table, "c.", block).walk_ast(out.reborrow())?; limit.filter(out); out.push_sql(" and "); out.push_sql("c.id = p.child_id"); self.and_filter(out.reborrow())?; limit.single_limit(self.ids.len(), out); Ok(()) } fn children( &self, limit: ParentLimit<'_>, block: BlockNumber, mut out: AstPass<Pg>, ) -> QueryResult<()> { match &self.link { TableLink::Direct(column, multiplicity) => { use ChildMultiplicity::*; if column.is_list() { match multiplicity { Many => self.children_type_a(column, limit, block, &mut out), Single => self.child_type_a(column, limit, block, &mut out), } } else { match multiplicity { Many => self.children_type_b(column, limit, block, &mut out), Single => self.child_type_b(column, limit, block, &mut out), } } } TableLink::Parent(ParentIds::List(child_ids)) => { self.children_type_c(child_ids, limit, block, &mut out) } TableLink::Parent(ParentIds::Scalar(child_ids)) => { self.child_type_d(child_ids, limit, block, &mut out) } } } /// Select a basic subset of columns from the child table for use in /// the `matches` CTE of queries that need to retrieve entities of /// different types or entities that link differently to their parents fn children_uniform( &self, sort_key: &SortKey, block: BlockNumber, mut out: AstPass<Pg>, ) -> QueryResult<()> { out.push_sql("select '"); out.push_sql(self.table.object.as_str()); out.push_sql("' as entity, c.id, c.vid, p.id::text as g$parent_id"); sort_key.select(&mut out)?; self.children(ParentLimit::Outer, block, out) } /// Collect all the parent id's from all windows fn collect_parents(windows: &Vec<FilterWindow>) -> Vec<String> { let parent_ids: HashSet<String> = HashSet::from_iter( windows .iter() .map(|window| window.ids.iter().cloned()) .flatten(), ); parent_ids.into_iter().collect() } } /// This is a parallel to `EntityCollection`, but with entity type names /// and filters translated in a form ready for SQL generation #[derive(Debug, Clone)] pub enum FilterCollection<'a> { /// Collection made from all entities in a table; each entry is the table /// and the filter to apply to it, checked and bound to that table All(Vec<(&'a Table, Option<QueryFilter<'a>>)>), /// Collection made from windows of the same or different entity types SingleWindow(FilterWindow<'a>), MultiWindow(Vec<FilterWindow<'a>>, Vec<String>), } impl<'a> FilterCollection<'a> { pub fn new( layout: &'a Layout, collection: EntityCollection, filter: Option<&'a EntityFilter>, ) -> Result<Self, QueryExecutionError> { match collection { EntityCollection::All(entities) => { // This is a little ugly since we need to propagate errors // from the inner closures. We turn each entity type name // into the corresponding table, and check and bind the filter // to it let entities = entities .iter() .map(|entity| { layout .table_for_entity(&entity) .map(|rc| rc.as_ref()) .and_then(|table| { filter .map(|filter| QueryFilter::new(filter, table)) .transpose() .map(|filter| (table, filter)) }) }) .collect::<Result<Vec<_>, _>>()?; Ok(FilterCollection::All(entities)) } EntityCollection::Window(windows) => { let windows = windows .into_iter() .map(|window| FilterWindow::new(layout, window, filter)) .collect::<Result<Vec<_>, _>>()?; let collection = if windows.len() == 1 { let mut windows = windows; FilterCollection::SingleWindow( windows.pop().expect("we just checked there is an element"), ) } else { let parent_ids = FilterWindow::collect_parents(&windows); FilterCollection::MultiWindow(windows, parent_ids) }; Ok(collection) } } } fn first_table(&self) -> Option<&Table> { match self { FilterCollection::All(entities) => entities.first().map(|pair| pair.0), FilterCollection::SingleWindow(window) => Some(window.table), FilterCollection::MultiWindow(windows, _) => windows.first().map(|window| window.table), } } fn is_empty(&self) -> bool { match self { FilterCollection::All(entities) => entities.is_empty(), FilterCollection::SingleWindow(_) => false, FilterCollection::MultiWindow(windows, _) => windows.is_empty(), } } } /// Convenience to pass the name of the column to order by around. If `name` /// is `None`, the sort key should be ignored #[derive(Debug, Clone, Copy)] pub enum SortKey<'a> { None, Id, Key { column: &'a Column, value: Option<&'a str>, direction: &'static str, }, } impl<'a> SortKey<'a> { fn new( order: EntityOrder, table: &'a Table, filter: Option<&'a EntityFilter>, ) -> Result<Self, QueryExecutionError> { const ASC: &str = "asc"; const DESC: &str = "desc"; fn with_key<'a>( table: &'a Table, attribute: String, filter: Option<&'a EntityFilter>, direction: &'static str, ) -> Result<SortKey<'a>, QueryExecutionError> { let column = table.column_for_field(&attribute)?; if column.is_fulltext() { match filter { Some(entity_filter) => match entity_filter { EntityFilter::Equal(_, value) => { let sort_value = value.as_str(); Ok(SortKey::Key { column, value: sort_value, direction, }) } _ => unreachable!(), }, None => unreachable!(), } } else { Ok(SortKey::Key { column, value: None, direction, }) } } match order { EntityOrder::Ascending(attr, _) => with_key(table, attr, filter, ASC), EntityOrder::Descending(attr, _) => with_key(table, attr, filter, DESC), EntityOrder::Default => Ok(SortKey::Id), EntityOrder::Unordered => Ok(SortKey::None), } } /// Generate selecting the sort key if it is needed fn select(&self, out: &mut AstPass<Pg>) -> QueryResult<()> { match self { SortKey::None => Ok(()), SortKey::Id => { if *ORDER_BY_BLOCK_RANGE { out.push_sql(", c."); out.push_sql(BLOCK_RANGE_COLUMN); } Ok(()) } SortKey::Key { column, value: _, direction: _, } => { let name = column.name.as_str(); if !column.is_primary_key() { out.push_sql(", c."); out.push_identifier(name)?; } Ok(()) } } } /// Generate /// order by [name direction], id fn order_by(&self, out: &mut AstPass<Pg>) -> QueryResult<()> { match self { SortKey::None => Ok(()), SortKey::Id => { out.push_sql("order by "); out.push_identifier(PRIMARY_KEY_COLUMN)?; if *ORDER_BY_BLOCK_RANGE { out.push_sql(", "); out.push_sql(BLOCK_RANGE_COLUMN); } Ok(()) } SortKey::Key { column, value, direction, } => { out.push_sql("order by "); SortKey::sort_expr(column, value, direction, out) } } } /// Generate /// order by g$parent_id, [name direction], id fn order_by_parent(&self, out: &mut AstPass<Pg>) -> QueryResult<()> { match self { SortKey::None => Ok(()), SortKey::Id => { out.push_sql("order by g$parent_id, "); out.push_identifier(PRIMARY_KEY_COLUMN) } SortKey::Key { column, value, direction, } => { out.push_sql("order by g$parent_id, "); SortKey::sort_expr(column, value, direction, out) } } } /// Generate /// [name direction,] id fn sort_expr( column: &Column, value: &Option<&str>, direction: &str, out: &mut AstPass<Pg>, ) -> QueryResult<()> { match &column.column_type { ColumnType::TSVector(config) => { let algorithm = match config.algorithm { FulltextAlgorithm::Rank => "ts_rank(", FulltextAlgorithm::ProximityRank => "ts_rank_cd(", }; out.push_sql(algorithm); let name = column.name.as_str(); out.push_identifier(name)?; out.push_sql(", to_tsquery("); out.push_bind_param::<Text, _>(&String::from(value.unwrap()))?; out.push_sql(")) "); out.push_sql(direction); out.push_sql(" nulls last"); if name != PRIMARY_KEY_COLUMN { out.push_sql(", "); out.push_identifier(PRIMARY_KEY_COLUMN)?; } Ok(()) } _ => { let name = column.name.as_str(); out.push_identifier(name)?; out.push_sql(" "); out.push_sql(direction); out.push_sql(" nulls last"); if name != PRIMARY_KEY_COLUMN { out.push_sql(", "); out.push_identifier(PRIMARY_KEY_COLUMN)?; } Ok(()) } } } } /// Generate `[limit {first}] [offset {skip}] #[derive(Debug, Clone)] pub struct FilterRange(EntityRange); impl QueryFragment<Pg> for FilterRange { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { let range = &self.0; if let Some(first) = &range.first { out.push_sql("\n limit "); out.push_sql(&first.to_string()); } if range.skip > 0 { out.push_sql("\noffset "); out.push_sql(&range.skip.to_string()); } Ok(()) } } /// The parallel to `EntityQuery`. /// /// Details of how query generation for `FilterQuery` works can be found /// `https://github.com/graphprotocol/rfcs/blob/master/engineering-plans/0001-graphql-query-prefetching.md` #[derive(Debug, Clone)] pub struct FilterQuery<'a> { collection: &'a FilterCollection<'a>, sort_key: SortKey<'a>, range: FilterRange, block: BlockNumber, query_id: Option<String>, } impl<'a> FilterQuery<'a> { pub fn new( collection: &'a FilterCollection, filter: Option<&'a EntityFilter>, order: EntityOrder, range: EntityRange, block: BlockNumber, query_id: Option<String>, ) -> Result<Self, QueryExecutionError> { // Get the name of the column we order by; if there is more than one // table, we are querying an interface, and the order is on an attribute // in that interface so that all tables have a column for that. It is // therefore enough to just look at the first table to get the name let first_table = collection .first_table() .expect("an entity query always contains at least one entity type/table"); let sort_key = SortKey::new(order, first_table, filter)?; Ok(FilterQuery { collection, sort_key, range: FilterRange(range), block, query_id, }) } /// Generate /// from schema.table c /// where block_range @> $block /// and query_filter /// Only used when the query is against a `FilterCollection::All`, i.e. /// when we do not need to window fn filtered_rows( &self, table: &Table, table_filter: &Option<QueryFilter<'a>>, mut out: AstPass<Pg>, ) -> QueryResult<()> { out.push_sql("\n from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" c"); out.push_sql("\n where "); BlockRangeContainsClause::new(&table, "c.", self.block).walk_ast(out.reborrow())?; if let Some(filter) = table_filter { out.push_sql(" and "); filter.walk_ast(out.reborrow())?; } out.push_sql("\n"); Ok(()) } fn select_entity_and_data(table: &Table, out: &mut AstPass<Pg>) { out.push_sql("select '"); out.push_sql(&table.object); out.push_sql("' as entity, to_jsonb(c.*) as data"); } /// Only one table/filter pair, and no window /// /// The generated query makes sure we only convert the rows we actually /// want to retrieve to JSONB /// /// select '..' as entity, to_jsonb(e.*) as data /// from /// (select * /// from table c /// where block_range @> $block /// and filter /// order by .. limit .. skip ..) c fn query_no_window_one_entity( &self, table: &Table, filter: &Option<QueryFilter>, mut out: AstPass<Pg>, ) -> QueryResult<()> { Self::select_entity_and_data(table, &mut out); out.push_sql(" from (select * "); self.filtered_rows(table, filter, out.reborrow())?; out.push_sql("\n "); self.sort_key.order_by(&mut out)?; self.range.walk_ast(out.reborrow())?; out.push_sql(") c"); Ok(()) } /// Only one table/filter pair, and a window /// /// Generate a query /// select '..' as entity, to_jsonb(e.*) as data /// from (select c.*, p.id as g$parent_id from {window.children(...)}) c /// order by c.g$parent_id, {sort_key} /// limit {first} offset {skip} fn query_window_one_entity( &self, window: &FilterWindow, mut out: AstPass<Pg>, ) -> QueryResult<()> { Self::select_entity_and_data(&window.table, &mut out); out.push_sql(" from (\n"); out.push_sql("select c.*, p.id::text as g$parent_id"); window.children( ParentLimit::Ranked(&self.sort_key, &self.range), self.block, out.reborrow(), )?; out.push_sql(") c"); out.push_sql("\n "); self.sort_key.order_by_parent(&mut out) } /// No windowing, but multiple entity types fn query_no_window( &self, entities: &Vec<(&Table, Option<QueryFilter>)>, mut out: AstPass<Pg>, ) -> QueryResult<()> { // We have multiple tables which might have different schemas since // the entity_types come from implementing the same interface. We // need to do the query in two steps: first we build a CTE with the // id's of entities matching the filter and order/limit. As a second // step, we get matching rows from the underlying tables and convert // them to JSONB. // // Overall, we generate a query // // with matches as ( // select '...' as entity, id, vid, {sort_key} // from {table} c // where {query_filter} // union all // ... // order by {sort_key} // limit n offset m) // select m.entity, to_jsonb(c.*) as data, c.id, c.{sort_key} // from {table} c, matches m // where c.vid = m.vid and m.entity = '...' // union all // ... // order by c.{sort_key} // Step 1: build matches CTE out.push_sql("with matches as ("); for (i, (table, filter)) in entities.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } // select '..' as entity, // c.id, // c.vid, // c.${sort_key} out.push_sql("select '"); out.push_sql(&table.object); out.push_sql("' as entity, c.id, c.vid"); self.sort_key.select(&mut out)?; self.filtered_rows(table, filter, out.reborrow())?; } out.push_sql("\n "); self.sort_key.order_by(&mut out)?; self.range.walk_ast(out.reborrow())?; out.push_sql(")\n"); // Step 2: convert to JSONB for (i, (table, _)) in entities.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } out.push_sql("select m.entity, to_jsonb(c.*) as data, c.id"); self.sort_key.select(&mut out)?; out.push_sql("\n from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" c,"); out.push_sql(" matches m"); out.push_sql("\n where c.vid = m.vid and m.entity = "); out.push_bind_param::<Text, _>(&table.object)?; } out.push_sql("\n "); self.sort_key.order_by(&mut out)?; Ok(()) } /// Multiple windows fn query_window( &self, windows: &Vec<FilterWindow>, parent_ids: &Vec<String>, mut out: AstPass<Pg>, ) -> QueryResult<()> { // Note that a CTE is an optimization fence, and since we use // `matches` multiple times, we actually want to materialize it first // before we fill in JSON data in the main query. As a consequence, we // restrict the matches results per window in the `matches` CTE to // avoid a possibly gigantic materialized `matches` view rather than // leave that to the main query // // Overall, we generate a query // // with matches as ( // select c.* // from (select id from unnest({all_parent_ids}) as q(id)) q // cross join lateral // ({window.children_uniform("q")} // union all // ... range over all windows ... // order by c.{sort_key} // limit $first skip $skip) c) // select m.entity, to_jsonb(c.*) as data, m.parent_id // from matches m, {window.child_table} c // where c.vid = m.vid and m.entity = '{window.child_type}' // union all // ... range over all windows // order by parent_id, c.{sort_key} // Step 1: build matches CTE out.push_sql("with matches as ("); out.push_sql("select c.* from "); out.push_sql("unnest("); out.push_bind_param::<Array<Text>, _>(parent_ids)?; out.push_sql("::text[]) as q(id)\n"); out.push_sql(" cross join lateral ("); for (i, window) in windows.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } window.children_uniform(&self.sort_key, self.block, out.reborrow())?; } out.push_sql("\n"); self.sort_key.order_by(&mut out)?; self.range.walk_ast(out.reborrow())?; out.push_sql(") c)\n"); // Step 2: convert to JSONB // If the parent is an interface, each implementation might store its // relationship to the children in different ways, leading to multiple // windows that use the same table for the children. We need to make // sure each table only appears once in the 'union all' otherwise we'll // duplicate entities in the result // We only use a table's qualified name and object to save ourselves // the hassle of making `Table` hashable let unique_child_tables = windows .iter() .map(|window| (&window.table.qualified_name, &window.table.object)) .collect::<HashSet<_>>(); for (i, (table_name, object)) in unique_child_tables.into_iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } out.push_sql( "select m.*, \ to_jsonb(c.*) || jsonb_build_object('g$parent_id', m.g$parent_id) as data", ); out.push_sql("\n from "); out.push_sql(table_name.as_str()); out.push_sql(" c, matches m\n where c.vid = m.vid and m.entity = '"); out.push_sql(object); out.push_sql("'"); } out.push_sql("\n "); self.sort_key.order_by_parent(&mut out) } } impl<'a> QueryFragment<Pg> for FilterQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); if self.collection.is_empty() { return Ok(()); } if let Some(qid) = &self.query_id { out.push_sql("/* qid: "); out.push_sql(qid); out.push_sql(" */\n"); } // We generate four different kinds of queries, depending on whether // we need to window and whether we query just one or multiple entity // types/windows; the most complex situation is windowing with multiple // entity types. The other cases let us simplify the generated SQL // considerably and produces faster queries // // Details of how all this works can be found in // `https://github.com/graphprotocol/rfcs/blob/master/engineering-plans/0001-graphql-query-prefetching.md` match &self.collection { FilterCollection::All(entities) => { if entities.len() == 1 { let (table, filter) = entities .first() .expect("a query always uses at least one table"); self.query_no_window_one_entity(table, filter, out) } else { self.query_no_window(entities, out) } } FilterCollection::SingleWindow(window) => self.query_window_one_entity(window, out), FilterCollection::MultiWindow(windows, parent_ids) => { self.query_window(windows, parent_ids, out) } } } } impl<'a> QueryId for FilterQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, EntityData> for FilterQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<EntityData>> { conn.query_by_name(&self) } } impl<'a, Conn> RunQueryDsl<Conn> for FilterQuery<'a> {} /// Reduce the upper bound of the current entry's block range to `block` as /// long as that does not result in an empty block range #[derive(Debug, Clone, Constructor)] pub struct ClampRangeQuery<'a> { table: &'a Table, key: &'a EntityKey, block: BlockNumber, } impl<'a> QueryFragment<Pg> for ClampRangeQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { // update table // set block_range = int4range(lower(block_range), $block) // where id = $id // and block_range @> INTMAX out.unsafe_to_cache_prepared(); out.push_sql("update "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n set "); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(" = int4range(lower("); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql("), "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql(")\n where "); self.table.primary_key().eq(&self.key.entity_id, &mut out)?; out.push_sql(" and ("); out.push_sql(BLOCK_RANGE_CURRENT); out.push_sql(")"); Ok(()) } } impl<'a> QueryId for ClampRangeQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for ClampRangeQuery<'a> {} /// Helper struct for returning the id's touched by the RevertRemove and /// RevertExtend queries #[derive(QueryableByName, PartialEq, Eq, Hash)] pub struct RevertEntityData { #[sql_type = "Text"] pub id: String, } impl RevertEntityData { /// Convert primary key ids from Postgres' internal form to the format we /// use by stripping `\\x` off the front of bytes strings fn bytes_as_str(table: &Table, mut data: Vec<RevertEntityData>) -> Vec<RevertEntityData> { match table.primary_key().column_type.id_type() { IdType::String => data, IdType::Bytes => { for entry in data.iter_mut() { entry.id = bytes_as_str(&entry.id); } data } } } } /// A query that removes all versions whose block range lies entirely /// beyond `block`. #[derive(Debug, Clone, Constructor)] pub struct RevertRemoveQuery<'a> { table: &'a Table, block: BlockNumber, } impl<'a> QueryFragment<Pg> for RevertRemoveQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // delete from table // where lower(block_range) >= $block // returning id out.push_sql("delete from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n where lower("); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(") >= "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql("\nreturning "); out.push_sql(PRIMARY_KEY_COLUMN); out.push_sql("::text"); Ok(()) } } impl<'a> QueryId for RevertRemoveQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, RevertEntityData> for RevertRemoveQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<RevertEntityData>> { conn.query_by_name(&self) .map(|data| RevertEntityData::bytes_as_str(&self.table, data)) } } impl<'a, Conn> RunQueryDsl<Conn> for RevertRemoveQuery<'a> {} /// A query that unclamps the block range of all versions that contain /// `block` by setting the upper bound of the block range to infinity. #[derive(Debug, Clone, Constructor)] pub struct RevertClampQuery<'a> { table: &'a Table, block: BlockNumber, } impl<'a> QueryFragment<Pg> for RevertClampQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // update table // set block_range = int4range(lower(block_range), null) // where block_range @> $block // and not block_range @> INTMAX // and lower(block_range) <= $block // and coalesce(upper(block_range), INTMAX) > $block // and coalesce(upper(block_range), INTMAX) < INTMAX // returning id // // The query states the same thing twice, once in terms of ranges // and once in terms of the range bounds. That makes it possible // for Postgres to use either the exclusion index on the table // or the BRIN index out.push_sql("update "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n set "); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(" = int4range(lower("); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql("), null)\n where "); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(" @> "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql(" and not "); out.push_sql(BLOCK_RANGE_CURRENT); out.push_sql(" and lower("); out.push_sql(BLOCK_RANGE_COLUMN); out.push_sql(") <= "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql(" and coalesce(upper("); out.push_sql(BLOCK_RANGE_COLUMN); out.push_sql("), 2147483647) > "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql(" and coalesce(upper("); out.push_sql(BLOCK_RANGE_COLUMN); out.push_sql("), 2147483647) < 2147483647"); out.push_sql("\nreturning "); out.push_sql(PRIMARY_KEY_COLUMN); out.push_sql("::text"); Ok(()) } } impl<'a> QueryId for RevertClampQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, RevertEntityData> for RevertClampQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<RevertEntityData>> { conn.query_by_name(&self) .map(|data| RevertEntityData::bytes_as_str(&self.table, data)) } } impl<'a, Conn> RunQueryDsl<Conn> for RevertClampQuery<'a> {} #[test] fn block_number_max_is_i32_max() { // The code in RevertClampQuery::walk_ast embeds i32::MAX // aka BLOCK_NUMBER_MAX in strings for efficiency. This assertion // makes sure that BLOCK_NUMBER_MAX still is what we think it is assert_eq!(2147483647, graph::prelude::BLOCK_NUMBER_MAX); } /// A query that removes all dynamic data sources for a given subgraph /// whose block range lies entirely beyond `block`. The query only deletes /// the data sources but not any related objects #[derive(Debug, Clone, Constructor)] pub struct DeleteDynamicDataSourcesQuery<'a> { subgraph: &'a str, block: BlockNumber, } impl<'a> QueryFragment<Pg> for DeleteDynamicDataSourcesQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // delete from subgraphs.dynamic_ethereum_contract_data_source // where lower(block_range) >= $block // and deployment = $subgraph // returning id out.push_sql("delete from subgraphs.dynamic_ethereum_contract_data_source\n"); out.push_sql(" where"); if self.block != BLOCK_UNVERSIONED { out.push_sql(" lower("); out.push_identifier(BLOCK_RANGE_COLUMN)?; out.push_sql(") >= "); out.push_bind_param::<Integer, _>(&self.block)?; out.push_sql(" and"); } out.push_sql(" deployment = "); out.push_bind_param::<Text, _>(&self.subgraph)?; out.push_sql("\nreturning "); out.push_identifier(PRIMARY_KEY_COLUMN) } } impl<'a> QueryId for DeleteDynamicDataSourcesQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, RevertEntityData> for DeleteDynamicDataSourcesQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<RevertEntityData>> { conn.query_by_name(&self) } } impl<'a, Conn> RunQueryDsl<Conn> for DeleteDynamicDataSourcesQuery<'a> {} /// Remove all entities from the given table whose id has a prefix that /// matches one of the given prefixes. This query is mostly useful to /// delete subgraph metadata that belongs to a certain dynamic data source #[derive(Debug, Clone, Constructor)] pub struct DeleteByPrefixQuery<'a> { table: &'a Table, prefixes: &'a Vec<String>, prefix_len: i32, } impl<'a> QueryFragment<Pg> for DeleteByPrefixQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // delete from {table} // where left(id, {prefix_len}) = any({prefixes}) // returning id out.push_sql("delete from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql("\n where left("); out.push_sql(PRIMARY_KEY_COLUMN); out.push_sql(","); out.push_bind_param::<Integer, _>(&self.prefix_len)?; out.push_sql(") = any("); out.push_bind_param::<Array<Text>, _>(&self.prefixes)?; out.push_sql(")\nreturning "); out.push_sql(PRIMARY_KEY_COLUMN); out.push_sql("::text"); Ok(()) } } impl<'a> QueryId for DeleteByPrefixQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a> LoadQuery<PgConnection, RevertEntityData> for DeleteByPrefixQuery<'a> { fn internal_load(self, conn: &PgConnection) -> QueryResult<Vec<RevertEntityData>> { conn.query_by_name(&self) .map(|data| RevertEntityData::bytes_as_str(&self.table, data)) } } impl<'a, Conn> RunQueryDsl<Conn> for DeleteByPrefixQuery<'a> {} /// Copy the data of one table to another table #[derive(Debug, Clone)] pub struct CopyEntityDataQuery<'a> { src: &'a Table, dst: &'a Table, // A list of columns common between src and dst that // need to be copied columns: Vec<&'a Column>, } impl<'a> CopyEntityDataQuery<'a> { pub fn new(dst: &'a Table, src: &'a Table) -> Result<Self, StoreError> { let mut columns = Vec::new(); for dcol in &dst.columns { if let Some(scol) = src.column(&dcol.name) { if let Some(msg) = dcol.is_assignable_from(scol, &src.object) { return Err(anyhow!("{}", msg).into()); } else { columns.push(dcol); } } else if !dcol.is_nullable() { return Err(anyhow!( "The attribute {}.{} is non-nullable, \ but there is no such attribute in the source", dst.object, dcol.field ) .into()); } else { columns.push(dcol); } } Ok(Self { src, dst, columns }) } } impl<'a> QueryFragment<Pg> for CopyEntityDataQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); // Construct a query // insert into {dst}({columns}) // select {columns} from {src} out.push_sql("insert into "); out.push_sql(self.dst.qualified_name.as_str()); out.push_sql("("); for column in &self.columns { out.push_identifier(column.name.as_str())?; out.push_sql(", "); } out.push_sql("block_range)"); out.push_sql("\nselect "); for column in &self.columns { out.push_identifier(column.name.as_str())?; if let ColumnType::Enum(enum_type) = &column.column_type { // Have Postgres convert to the right enum type out.push_sql("::text::"); out.push_sql(enum_type.name.as_str()); } out.push_sql(", "); } out.push_sql("block_range from "); out.push_sql(self.src.qualified_name.as_str()); Ok(()) } } impl<'a> QueryId for CopyEntityDataQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for CopyEntityDataQuery<'a> {} /// A query that removes all dynamic data sources for a given subgraph /// whose block range lies entirely beyond `block`. The query only deletes /// the data sources but not any related objects #[derive(Debug, Clone, Constructor)] pub struct CopyDynamicDataSourceQuery<'a> { dds: &'a Vec<String>, new_dds: &'a Vec<String>, subgraph: &'a str, } impl<'a> QueryFragment<Pg> for CopyDynamicDataSourceQuery<'a> { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { // The query in this file was generated with // cargo run -p graph-store-postgres --example layout -- \ // -g copy-dds store/postgres/src/subgraphs.graphql \ // > store/postgres/src/copy_dds.sql // Background on how the query works can be found in layout.rs // in the function `print_copy_dds` // See also: ed42d219c6704a4aab57ce1ea66698e7 // The query must be regenerated when the GraphQL schema changes const QUERY: &str = include_str!("copy_dds.sql"); out.unsafe_to_cache_prepared(); out.push_sql(QUERY); out.push_bind_param_value_only::<Array<Text>, _>(self.dds)?; out.push_bind_param_value_only::<Array<Text>, _>(self.new_dds)?; out.push_bind_param_value_only::<Text, _>(&self.subgraph) } } impl<'a> QueryId for CopyDynamicDataSourceQuery<'a> { type QueryId = (); const HAS_STATIC_QUERY_ID: bool = false; } impl<'a, Conn> RunQueryDsl<Conn> for CopyDynamicDataSourceQuery<'a> {}
35.353587
116
0.525717
d597be03c4e49d586215de5f3a44118db86b05bc
7,392
/// Implementing a custom async UDP socket since tokio::net::UdpSocket has the limitation that /// on windows, due to sends being a 'start, wait for completion' two-step process, /// but tokio/mio do not expose a way to poll the completion step, any failing sends are /// reported as successes. /// /// The implementation just spawns two threads for send/recv and uses the blocking /// std::net::UdpSocket, which is relatively fine as we only need a single UDP socket for /// rally-point. The more scalable solution would be to create improved version of /// mio::net::UdpSocket, but I'm concerned that it would end up being a maintenance burden to /// keep up to date when mio/miow/tokio change, as it would end up being comparatively complex /// and hard to follow. use std::io; use std::mem; use std::net::{SocketAddr, SocketAddrV6, UdpSocket}; use std::os::windows::io::FromRawSocket; use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::task::{self, Poll}; use bytes::Bytes; use futures::prelude::*; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; use winapi::shared::ws2def::{AF_INET6, IPPROTO_IPV6, SOCK_DGRAM}; use winapi::shared::ws2ipdef::{IPV6_V6ONLY, SOCKADDR_IN6_LH}; use winapi::um::winsock2::{ bind, setsockopt, socket, WSAGetLastError, WSAStartup, INVALID_SOCKET, WSADATA, }; pub struct UdpSend { thread_sender: std::sync::mpsc::Sender<(Bytes, SocketAddrV6)>, results: UnboundedReceiver<Result<(), io::Error>>, pending_results: usize, } pub struct UdpRecv { thread_receiver: UnboundedReceiver<Result<(Bytes, SocketAddrV6), io::Error>>, closed: Arc<AtomicBool>, } fn to_ipv6_addr(addr: &SocketAddr) -> SocketAddrV6 { match addr { SocketAddr::V6(v6) => *v6, SocketAddr::V4(v4) => SocketAddrV6::new(v4.ip().to_ipv6_mapped(), v4.port(), 0, 0), } } fn bind_udp_ipv6_ipv4_socket(local_addr: &SocketAddrV6) -> Result<UdpSocket, io::Error> { unsafe { let mut data: WSADATA = mem::zeroed(); let err = WSAStartup(0x202, &mut data); if err != 0 { return Err(io::Error::from_raw_os_error(WSAGetLastError())); } // Call winsock functions as setsockopt has to be called before bind() let socket = socket(AF_INET6, SOCK_DGRAM, 0); if socket == INVALID_SOCKET { return Err(io::Error::from_raw_os_error(WSAGetLastError())); } // Enable using this socket for both IPv4 and IPv6 addresses. // The send thread needs to convert any actual V4 sockaddr to V6 though. let zero = 0u32; let err = setsockopt( socket, IPPROTO_IPV6 as i32, IPV6_V6ONLY, &zero as *const u32 as *const i8, 4, ); if err != 0 { return Err(io::Error::from_raw_os_error(WSAGetLastError())); } let mut raw_addr = SOCKADDR_IN6_LH { sin6_family: AF_INET6 as u16, sin6_port: local_addr.port().to_be(), sin6_flowinfo: 0, ..mem::zeroed() }; *raw_addr.sin6_addr.u.Word_mut() = local_addr.ip().segments(); let err = bind( socket, &raw_addr as *const SOCKADDR_IN6_LH as *const _, mem::size_of::<SOCKADDR_IN6_LH>() as i32, ); if err != 0 { return Err(io::Error::from_raw_os_error(WSAGetLastError())); } Ok(UdpSocket::from_raw_socket(socket as u32)) } } pub fn udp_socket(local_addr: &SocketAddr) -> Result<(UdpSend, UdpRecv), io::Error> { let socket = bind_udp_ipv6_ipv4_socket(&to_ipv6_addr(local_addr))?; debug!("UDP socket bound to {:?}", socket.local_addr()); let socket2 = socket.try_clone()?; let (send, recv) = std::sync::mpsc::channel(); let (send_result, recv_result) = unbounded_channel(); std::thread::spawn(move || { while let Ok((val, addr)) = recv.recv() { let val: Bytes = val; let result = match socket.send_to(&val, addr) { Ok(len) => { if val.len() == len { Ok(()) } else { Err(io::Error::new( io::ErrorKind::Other, "Failed to send all of the data", )) } } Err(e) => Err(e), }; if let Err(_) = send_result.send(result) { break; } } debug!("UDP send thread end"); }); let udp_send = UdpSend { thread_sender: send, results: recv_result, pending_results: 0, }; let closed = Arc::new(AtomicBool::new(false)); let closed2 = closed.clone(); let (send, recv) = unbounded_channel(); std::thread::spawn(move || { let mut buf = vec![0; 2048]; loop { if closed2.load(Ordering::Relaxed) == true { break; } let result = match socket2.recv_from(&mut buf) { Ok((n, addr)) => { let bytes = Bytes::copy_from_slice(&buf[..n]); Ok((bytes, to_ipv6_addr(&addr))) } Err(e) => Err(e), }; if let Err(_) = send.send(result) { break; } } debug!("UDP recv thread end"); }); let udp_recv = UdpRecv { thread_receiver: recv, closed, }; Ok((udp_send, udp_recv)) } impl Drop for UdpRecv { fn drop(&mut self) { self.closed.store(true, Ordering::Relaxed); } } impl Stream for UdpRecv { type Item = Result<(Bytes, SocketAddrV6), io::Error>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Option<Self::Item>> { self.thread_receiver.poll_recv(cx) } } impl Sink<(Bytes, SocketAddr)> for UdpSend { type Error = io::Error; fn poll_ready(self: Pin<&mut Self>, _cx: &mut task::Context) -> Poll<Result<(), io::Error>> { Poll::Ready(Ok(())) } fn start_send(mut self: Pin<&mut Self>, data: (Bytes, SocketAddr)) -> Result<(), io::Error> { match self.thread_sender.send((data.0, to_ipv6_addr(&data.1))) { Ok(()) => { self.pending_results += 1; Ok(()) } Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)), } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Result<(), io::Error>> { while self.pending_results != 0 { match self.results.poll_recv(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Some(Ok(()))) => { self.pending_results -= 1; } Poll::Ready(Some(Err(e))) => { self.pending_results -= 1; return Poll::Ready(Err(e)); } Poll::Ready(None) => { let err = io::Error::new(io::ErrorKind::Other, "Child thread has closed"); return Poll::Ready(Err(err)); } } } Poll::Ready(Ok(())) } fn poll_close(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Result<(), io::Error>> { self.poll_flush(cx) } }
35.368421
100
0.552895
4adcc394638a2e9c2c833885bcee5ec04dac4388
4,723
use std::collections::HashMap; #[cfg(feature = "mysql")] use std::sync::Arc; #[cfg(feature = "mysql")] use diesel::mysql::MysqlConnection; #[cfg(feature = "mysql")] use diesel::prelude::*; #[cfg(feature = "mysql")] use failure::ResultExt; #[cfg(feature = "mysql")] use r2d2::Pool; #[cfg(feature = "mysql")] use r2d2_diesel::ConnectionManager; use chrono::NaiveDateTime; use super::error::*; #[cfg_attr(feature = "mysql", derive(Queryable))] #[derive(Clone, Debug)] pub struct Factoid { pub name: String, pub idx: i32, pub content: String, pub author: String, pub created: NaiveDateTime, } #[cfg_attr(feature = "mysql", derive(Insertable))] #[cfg_attr(feature = "mysql", table_name = "factoids")] pub struct NewFactoid<'a> { pub name: &'a str, pub idx: i32, pub content: &'a str, pub author: &'a str, pub created: NaiveDateTime, } pub trait Database: Send + Sync { fn insert_factoid(&mut self, factoid: &NewFactoid) -> Result<(), FactoidError>; fn get_factoid(&self, name: &str, idx: i32) -> Result<Factoid, FactoidError>; fn delete_factoid(&mut self, name: &str, idx: i32) -> Result<(), FactoidError>; fn count_factoids(&self, name: &str) -> Result<i32, FactoidError>; } // HashMap impl<S: ::std::hash::BuildHasher + Send + Sync> Database for HashMap<(String, i32), Factoid, S> { fn insert_factoid(&mut self, factoid: &NewFactoid) -> Result<(), FactoidError> { let factoid = Factoid { name: factoid.name.to_owned(), idx: factoid.idx, content: factoid.content.to_owned(), author: factoid.author.to_owned(), created: factoid.created, }; let name = factoid.name.clone(); match self.insert((name, factoid.idx), factoid) { None => Ok(()), Some(_) => Err(ErrorKind::Duplicate)?, } } fn get_factoid(&self, name: &str, idx: i32) -> Result<Factoid, FactoidError> { Ok(self .get(&(name.to_owned(), idx)) .cloned() .ok_or(ErrorKind::NotFound)?) } fn delete_factoid(&mut self, name: &str, idx: i32) -> Result<(), FactoidError> { match self.remove(&(name.to_owned(), idx)) { Some(_) => Ok(()), None => Err(ErrorKind::NotFound)?, } } fn count_factoids(&self, name: &str) -> Result<i32, FactoidError> { Ok(self.iter().filter(|&(&(ref n, _), _)| n == name).count() as i32) } } // Diesel automatically defines the factoids module as public. // We create a schema module to keep it private. #[cfg(feature = "mysql")] mod schema { table! { factoids (name, idx) { name -> Varchar, idx -> Integer, content -> Text, author -> Varchar, created -> Timestamp, } } } #[cfg(feature = "mysql")] use self::schema::factoids; #[cfg(feature = "mysql")] impl Database for Arc<Pool<ConnectionManager<MysqlConnection>>> { fn insert_factoid(&mut self, factoid: &NewFactoid) -> Result<(), FactoidError> { let conn = &*self.get().context(ErrorKind::NoConnection)?; diesel::insert_into(factoids::table) .values(factoid) .execute(conn) .context(ErrorKind::MysqlError)?; Ok(()) } fn get_factoid(&self, name: &str, idx: i32) -> Result<Factoid, FactoidError> { let conn = &*self.get().context(ErrorKind::NoConnection)?; Ok(factoids::table .find((name, idx)) .first(conn) .context(ErrorKind::MysqlError)?) } fn delete_factoid(&mut self, name: &str, idx: i32) -> Result<(), FactoidError> { use self::factoids::columns; let conn = &*self.get().context(ErrorKind::NoConnection)?; match diesel::delete( factoids::table .filter(columns::name.eq(name)) .filter(columns::idx.eq(idx)), ) .execute(conn) { Ok(v) => { if v > 0 { Ok(()) } else { Err(ErrorKind::NotFound)? } } Err(e) => Err(e).context(ErrorKind::MysqlError)?, } } fn count_factoids(&self, name: &str) -> Result<i32, FactoidError> { let conn = &*self.get().context(ErrorKind::NoConnection)?; let count: Result<i64, _> = factoids::table .filter(factoids::columns::name.eq(name)) .count() .get_result(conn); match count { Ok(c) => Ok(c as i32), Err(diesel::NotFound) => Ok(0), Err(e) => Err(e).context(ErrorKind::MysqlError)?, } } }
29.51875
97
0.556003
ff262da797d2468152a3a1dc253ddf4a6b94e0cb
3,031
//! Manual definition of the stack probe. //! //! Rust currently fails to reexport symbols in dynamic libraries. This means that the old way of //! including an assembly stack probe in the runtime does not work when embedding in C. //! //! There is an [issue](https://github.com/rust-lang/rust/issues/36342) tracking this, but until //! it's closed we are taking the approach of including the stack probe in every Lucet module, and //! adding custom entries for it into the trap table, so that stack overflows in the probe will be //! treated like any other guest trap. use crate::decls::ModuleDecls; use crate::error::Error; use crate::module::UniqueFuncIndex; use cranelift_codegen::{ ir::{self, types, AbiParam, Signature}, isa::CallConv, }; use cranelift_module::{Backend as ClifBackend, Linkage, Module as ClifModule, TrapSite}; use cranelift_wasm::{WasmFuncType, WasmType}; /// Stack probe symbol name pub const STACK_PROBE_SYM: &str = "lucet_probestack"; /// The binary of the stack probe. pub(crate) const STACK_PROBE_BINARY: &[u8] = &[ // 49 89 c3 mov %rax,%r11 // 48 81 ec 00 10 00 00 sub $0x1000,%rsp // 48 85 64 24 08 test %rsp,0x8(%rsp) // 49 81 eb 00 10 00 00 sub $0x1000,%r11 // 49 81 fb 00 10 00 00 cmp $0x1000,%r11 // 77 e4 ja 4dfd3 <lucet_probestack+0x3> // 4c 29 dc sub %r11,%rsp // 48 85 64 24 08 test %rsp,0x8(%rsp) // 48 01 c4 add %rax,%rsp // c3 retq 0x49, 0x89, 0xc3, 0x48, 0x81, 0xec, 0x00, 0x10, 0x00, 0x00, 0x48, 0x85, 0x64, 0x24, 0x08, 0x49, 0x81, 0xeb, 0x00, 0x10, 0x00, 0x00, 0x49, 0x81, 0xfb, 0x00, 0x10, 0x00, 0x00, 0x77, 0xe4, 0x4c, 0x29, 0xdc, 0x48, 0x85, 0x64, 0x24, 0x08, 0x48, 0x01, 0xc4, 0xc3, ]; pub fn trap_sites() -> Vec<TrapSite> { vec![ TrapSite { offset: 10, /* test %rsp,0x8(%rsp) */ srcloc: ir::SourceLoc::default(), code: ir::TrapCode::StackOverflow, }, TrapSite { offset: 34, /* test %rsp,0x8(%rsp) */ srcloc: ir::SourceLoc::default(), code: ir::TrapCode::StackOverflow, }, ] } pub fn declare<'a, B: ClifBackend>( decls: &mut ModuleDecls<'a>, clif_module: &mut ClifModule<B>, ) -> Result<UniqueFuncIndex, Error> { Ok(decls .declare_new_function( clif_module, STACK_PROBE_SYM.to_string(), Linkage::Local, WasmFuncType { params: vec![].into_boxed_slice(), returns: vec![WasmType::I32].into_boxed_slice(), }, Signature { params: vec![], returns: vec![AbiParam::new(types::I32)], call_conv: CallConv::SystemV, // the stack probe function is very specific to x86_64, and possibly to SystemV ABI platforms? }, ) .unwrap()) }
39.363636
140
0.576377
4a323bffaee1fd17bf9ef27883794466b72a5a70
30,519
use crate::builtins::pystr::{PyStr, PyStrRef}; use crate::builtins::pytype::{PyType, PyTypeRef}; use crate::builtins::singletons::{PyNone, PyNoneRef}; use crate::builtins::traceback::PyTracebackRef; use crate::builtins::tuple::{PyTuple, PyTupleRef}; use crate::common::lock::PyRwLock; use crate::function::FuncArgs; use crate::py_io::{self, Write}; use crate::pyobject::StaticType; use crate::pyobject::{ BorrowValue, IntoPyObject, PyClassImpl, PyContext, PyIterable, PyObjectRef, PyRef, PyResult, PyValue, TryFromObject, TypeProtocol, }; use crate::types::create_type_with_slots; use crate::VirtualMachine; use crate::{py_serde, sysmodule}; use crossbeam_utils::atomic::AtomicCell; use itertools::Itertools; use std::fmt; use std::fs::File; use std::io::{self, BufRead, BufReader}; #[pyclass(module = false, name = "BaseException")] pub struct PyBaseException { traceback: PyRwLock<Option<PyTracebackRef>>, cause: PyRwLock<Option<PyBaseExceptionRef>>, context: PyRwLock<Option<PyBaseExceptionRef>>, suppress_context: AtomicCell<bool>, args: PyRwLock<PyTupleRef>, } impl fmt::Debug for PyBaseException { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // TODO: implement more detailed, non-recursive Debug formatter f.write_str("PyBaseException") } } pub type PyBaseExceptionRef = PyRef<PyBaseException>; pub trait IntoPyException { fn into_pyexception(self, vm: &VirtualMachine) -> PyBaseExceptionRef; } impl PyValue for PyBaseException { fn class(vm: &VirtualMachine) -> &PyTypeRef { &vm.ctx.exceptions.base_exception_type } } #[pyimpl(flags(BASETYPE, HAS_DICT))] impl PyBaseException { pub(crate) fn new(args: Vec<PyObjectRef>, vm: &VirtualMachine) -> PyBaseException { PyBaseException { traceback: PyRwLock::new(None), cause: PyRwLock::new(None), context: PyRwLock::new(None), suppress_context: AtomicCell::new(false), args: PyRwLock::new(PyTupleRef::with_elements(args, &vm.ctx)), } } #[pyslot] fn tp_new(cls: PyTypeRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult<PyRef<Self>> { PyBaseException::new(args.args, vm).into_ref_with_type(vm, cls) } #[pymethod(name = "__init__")] fn init(&self, args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> { *self.args.write() = PyTupleRef::with_elements(args.args, &vm.ctx); Ok(()) } pub fn get_arg(&self, idx: usize) -> Option<PyObjectRef> { self.args.read().borrow_value().get(idx).cloned() } #[pyproperty] pub fn args(&self) -> PyTupleRef { self.args.read().clone() } #[pyproperty(setter)] fn set_args(&self, args: PyIterable, vm: &VirtualMachine) -> PyResult<()> { let args = args.iter(vm)?.collect::<PyResult<Vec<_>>>()?; *self.args.write() = PyTupleRef::with_elements(args, &vm.ctx); Ok(()) } #[pyproperty(name = "__traceback__")] pub fn traceback(&self) -> Option<PyTracebackRef> { self.traceback.read().clone() } #[pyproperty(name = "__traceback__", setter)] pub fn set_traceback(&self, traceback: Option<PyTracebackRef>) { *self.traceback.write() = traceback; } #[pyproperty(name = "__cause__")] pub fn cause(&self) -> Option<PyBaseExceptionRef> { self.cause.read().clone() } #[pyproperty(name = "__cause__", setter)] pub fn set_cause(&self, cause: Option<PyBaseExceptionRef>) { let mut c = self.cause.write(); self.set_suppress_context(true); *c = cause; } #[pyproperty(name = "__context__")] pub fn context(&self) -> Option<PyBaseExceptionRef> { self.context.read().clone() } #[pyproperty(name = "__context__", setter)] pub fn set_context(&self, context: Option<PyBaseExceptionRef>) { *self.context.write() = context; } #[pyproperty(name = "__suppress_context__")] fn get_suppress_context(&self) -> bool { self.suppress_context.load() } #[pyproperty(name = "__suppress_context__", setter)] fn set_suppress_context(&self, suppress_context: bool) { self.suppress_context.store(suppress_context); } #[pymethod] fn with_traceback(zelf: PyRef<Self>, tb: Option<PyTracebackRef>) -> PyResult { *zelf.traceback.write() = tb; Ok(zelf.as_object().clone()) } #[pymethod(name = "__str__")] fn str(&self, vm: &VirtualMachine) -> PyStrRef { let str_args = exception_args_as_string(vm, self.args(), true); match str_args.into_iter().exactly_one() { Err(i) if i.len() == 0 => PyStr::from("").into_ref(vm), Ok(s) => s, Err(i) => PyStr::from(format!("({})", i.format(", "))).into_ref(vm), } } #[pymethod(name = "__repr__")] fn repr(zelf: PyRef<Self>, vm: &VirtualMachine) -> String { let repr_args = exception_args_as_string(vm, zelf.args(), false); let cls = zelf.class(); format!("{}({})", cls.name, repr_args.iter().format(", ")) } } pub fn chain<T>(e1: PyResult<()>, e2: PyResult<T>) -> PyResult<T> { match (e1, e2) { (Err(e1), Err(e)) => { e.set_context(Some(e1)); Err(e) } (Err(e), Ok(_)) | (Ok(()), Err(e)) => Err(e), (Ok(()), Ok(close_res)) => Ok(close_res), } } /// Print exception chain by calling sys.excepthook pub fn print_exception(vm: &VirtualMachine, exc: PyBaseExceptionRef) { let write_fallback = |exc, errstr| { if let Ok(stderr) = sysmodule::get_stderr(vm) { let mut stderr = py_io::PyWriter(stderr, vm); // if this fails stderr might be closed -- ignore it let _ = writeln!(stderr, "{}", errstr); let _ = write_exception(&mut stderr, vm, exc); } else { eprintln!("{}\nlost sys.stderr", errstr); let _ = write_exception(&mut io::stderr(), vm, exc); } }; if let Ok(excepthook) = vm.get_attribute(vm.sys_module.clone(), "excepthook") { let (exc_type, exc_val, exc_tb) = split(exc.clone(), vm); if let Err(eh_exc) = vm.invoke(&excepthook, vec![exc_type, exc_val, exc_tb]) { write_fallback(&eh_exc, "Error in sys.excepthook:"); write_fallback(&exc, "Original exception was:"); } } else { write_fallback(&exc, "missing sys.excepthook"); } } pub fn write_exception<W: Write>( output: &mut W, vm: &VirtualMachine, exc: &PyBaseExceptionRef, ) -> Result<(), W::Error> { if let Some(cause) = exc.cause() { write_exception(output, vm, &cause)?; writeln!( output, "\nThe above exception was the direct cause of the following exception:\n" )?; } else if let Some(context) = exc.context() { write_exception(output, vm, &context)?; writeln!( output, "\nDuring handling of the above exception, another exception occurred:\n" )?; } write_exception_inner(output, vm, exc) } fn print_source_line<W: Write>( output: &mut W, filename: &str, lineno: usize, ) -> Result<(), W::Error> { // TODO: use io.open() method instead, when available, according to https://github.com/python/cpython/blob/master/Python/traceback.c#L393 // TODO: support different encodings let file = match File::open(filename) { Ok(file) => file, Err(_) => return Ok(()), }; let file = BufReader::new(file); for (i, line) in file.lines().enumerate() { if i + 1 == lineno { if let Ok(line) = line { // Indented with 4 spaces writeln!(output, " {}", line.trim_start())?; } return Ok(()); } } Ok(()) } /// Print exception occurrence location from traceback element fn write_traceback_entry<W: Write>( output: &mut W, tb_entry: &PyTracebackRef, ) -> Result<(), W::Error> { let filename = tb_entry.frame.code.source_path.borrow_value(); writeln!( output, r##" File "{}", line {}, in {}"##, filename, tb_entry.lineno, tb_entry.frame.code.obj_name )?; print_source_line(output, filename, tb_entry.lineno)?; Ok(()) } /// Print exception with traceback pub fn write_exception_inner<W: Write>( output: &mut W, vm: &VirtualMachine, exc: &PyBaseExceptionRef, ) -> Result<(), W::Error> { if let Some(tb) = exc.traceback.read().clone() { writeln!(output, "Traceback (most recent call last):")?; for tb in tb.iter() { write_traceback_entry(output, &tb)?; } } let varargs = exc.args(); let args_repr = exception_args_as_string(vm, varargs, true); let exc_name = exc.class().name.clone(); match args_repr.len() { 0 => writeln!(output, "{}", exc_name), 1 => writeln!(output, "{}: {}", exc_name, args_repr[0]), _ => writeln!( output, "{}: ({})", exc_name, args_repr.into_iter().format(", ") ), } } fn exception_args_as_string( vm: &VirtualMachine, varargs: PyTupleRef, str_single: bool, ) -> Vec<PyStrRef> { let varargs = varargs.borrow_value(); match varargs.len() { 0 => vec![], 1 => { let args0_repr = if str_single { vm.to_str(&varargs[0]) .unwrap_or_else(|_| PyStr::from("<element str() failed>").into_ref(vm)) } else { vm.to_repr(&varargs[0]) .unwrap_or_else(|_| PyStr::from("<element repr() failed>").into_ref(vm)) }; vec![args0_repr] } _ => varargs .iter() .map(|vararg| { vm.to_repr(vararg) .unwrap_or_else(|_| PyStr::from("<element repr() failed>").into_ref(vm)) }) .collect(), } } #[derive(Clone)] pub enum ExceptionCtor { Class(PyTypeRef), Instance(PyBaseExceptionRef), } impl TryFromObject for ExceptionCtor { fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> { obj.downcast::<PyType>() .and_then(|cls| { if cls.issubclass(&vm.ctx.exceptions.base_exception_type) { Ok(Self::Class(cls)) } else { Err(cls.into_object()) } }) .or_else(|obj| obj.downcast::<PyBaseException>().map(Self::Instance)) .map_err(|obj| { vm.new_type_error(format!( "exceptions must be classes or instances deriving from BaseException, not {}", obj.class().name )) }) } } pub fn invoke( cls: PyTypeRef, args: Vec<PyObjectRef>, vm: &VirtualMachine, ) -> PyResult<PyBaseExceptionRef> { // TODO: fast-path built-in exceptions by directly instantiating them? Is that really worth it? let res = vm.invoke(cls.as_object(), args)?; PyBaseExceptionRef::try_from_object(vm, res) } impl ExceptionCtor { pub fn instantiate(self, vm: &VirtualMachine) -> PyResult<PyBaseExceptionRef> { match self { Self::Class(cls) => invoke(cls, vec![], vm), Self::Instance(exc) => Ok(exc), } } pub fn instantiate_value( self, value: PyObjectRef, vm: &VirtualMachine, ) -> PyResult<PyBaseExceptionRef> { let exc_inst = value.clone().downcast::<PyBaseException>().ok(); match (self, exc_inst) { // both are instances; which would we choose? (Self::Instance(_exc_a), Some(_exc_b)) => { Err(vm .new_type_error("instance exception may not have a separate value".to_owned())) } // if the "type" is an instance and the value isn't, use the "type" (Self::Instance(exc), None) => Ok(exc), // if the value is an instance of the type, use the instance value (Self::Class(cls), Some(exc)) if exc.isinstance(&cls) => Ok(exc), // otherwise; construct an exception of the type using the value as args (Self::Class(cls), _) => { let args = match_class!(match value { PyNone => vec![], tup @ PyTuple => tup.borrow_value().to_vec(), exc @ PyBaseException => exc.args().borrow_value().to_vec(), obj => vec![obj], }); invoke(cls, args, vm) } } } } pub fn split( exc: PyBaseExceptionRef, vm: &VirtualMachine, ) -> (PyObjectRef, PyObjectRef, PyObjectRef) { let tb = exc.traceback().into_pyobject(vm); (exc.clone_class().into_object(), exc.into_object(), tb) } /// Similar to PyErr_NormalizeException in CPython pub fn normalize( exc_type: PyObjectRef, exc_val: PyObjectRef, exc_tb: PyObjectRef, vm: &VirtualMachine, ) -> PyResult<PyBaseExceptionRef> { let ctor = ExceptionCtor::try_from_object(vm, exc_type)?; let exc = ctor.instantiate_value(exc_val, vm)?; if let Some(tb) = Option::<PyTracebackRef>::try_from_object(vm, exc_tb)? { exc.set_traceback(Some(tb)); } Ok(exc) } #[derive(Debug, Clone)] pub struct ExceptionZoo { pub base_exception_type: PyTypeRef, pub system_exit: PyTypeRef, pub keyboard_interrupt: PyTypeRef, pub generator_exit: PyTypeRef, pub exception_type: PyTypeRef, pub stop_iteration: PyTypeRef, pub stop_async_iteration: PyTypeRef, pub arithmetic_error: PyTypeRef, pub floating_point_error: PyTypeRef, pub overflow_error: PyTypeRef, pub zero_division_error: PyTypeRef, pub assertion_error: PyTypeRef, pub attribute_error: PyTypeRef, pub buffer_error: PyTypeRef, pub eof_error: PyTypeRef, pub import_error: PyTypeRef, pub module_not_found_error: PyTypeRef, pub lookup_error: PyTypeRef, pub index_error: PyTypeRef, pub key_error: PyTypeRef, pub memory_error: PyTypeRef, pub name_error: PyTypeRef, pub unbound_local_error: PyTypeRef, pub os_error: PyTypeRef, pub blocking_io_error: PyTypeRef, pub child_process_error: PyTypeRef, pub connection_error: PyTypeRef, pub broken_pipe_error: PyTypeRef, pub connection_aborted_error: PyTypeRef, pub connection_refused_error: PyTypeRef, pub connection_reset_error: PyTypeRef, pub file_exists_error: PyTypeRef, pub file_not_found_error: PyTypeRef, pub interrupted_error: PyTypeRef, pub is_a_directory_error: PyTypeRef, pub not_a_directory_error: PyTypeRef, pub permission_error: PyTypeRef, pub process_lookup_error: PyTypeRef, pub timeout_error: PyTypeRef, pub reference_error: PyTypeRef, pub runtime_error: PyTypeRef, pub not_implemented_error: PyTypeRef, pub recursion_error: PyTypeRef, pub syntax_error: PyTypeRef, pub target_scope_error: PyTypeRef, pub indentation_error: PyTypeRef, pub tab_error: PyTypeRef, pub system_error: PyTypeRef, pub type_error: PyTypeRef, pub value_error: PyTypeRef, pub unicode_error: PyTypeRef, pub unicode_decode_error: PyTypeRef, pub unicode_encode_error: PyTypeRef, pub unicode_translate_error: PyTypeRef, #[cfg(feature = "jit")] pub jit_error: PyTypeRef, pub warning: PyTypeRef, pub deprecation_warning: PyTypeRef, pub pending_deprecation_warning: PyTypeRef, pub runtime_warning: PyTypeRef, pub syntax_warning: PyTypeRef, pub user_warning: PyTypeRef, pub future_warning: PyTypeRef, pub import_warning: PyTypeRef, pub unicode_warning: PyTypeRef, pub bytes_warning: PyTypeRef, pub resource_warning: PyTypeRef, } impl ExceptionZoo { pub(crate) fn init() -> Self { let base_exception_type = PyBaseException::init_bare_type().clone(); let create_exception_type = |name: &str, base: &PyTypeRef| { create_type_with_slots( name, PyType::static_type(), base, PyBaseException::make_slots(), ) }; // Sorted By Hierarchy then alphabetized. let system_exit = create_exception_type("SystemExit", &base_exception_type); let keyboard_interrupt = create_exception_type("KeyboardInterrupt", &base_exception_type); let generator_exit = create_exception_type("GeneratorExit", &base_exception_type); let exception_type = create_exception_type("Exception", &base_exception_type); let stop_iteration = create_exception_type("StopIteration", &exception_type); let stop_async_iteration = create_exception_type("StopAsyncIteration", &exception_type); let arithmetic_error = create_exception_type("ArithmeticError", &exception_type); let floating_point_error = create_exception_type("FloatingPointError", &arithmetic_error); let overflow_error = create_exception_type("OverflowError", &arithmetic_error); let zero_division_error = create_exception_type("ZeroDivisionError", &arithmetic_error); let assertion_error = create_exception_type("AssertionError", &exception_type); let attribute_error = create_exception_type("AttributeError", &exception_type); let buffer_error = create_exception_type("BufferError", &exception_type); let eof_error = create_exception_type("EOFError", &exception_type); let import_error = create_exception_type("ImportError", &exception_type); let module_not_found_error = create_exception_type("ModuleNotFoundError", &import_error); let lookup_error = create_exception_type("LookupError", &exception_type); let index_error = create_exception_type("IndexError", &lookup_error); let key_error = create_exception_type("KeyError", &lookup_error); let memory_error = create_exception_type("MemoryError", &exception_type); let name_error = create_exception_type("NameError", &exception_type); let unbound_local_error = create_exception_type("UnboundLocalError", &name_error); // os errors let os_error = create_exception_type("OSError", &exception_type); let blocking_io_error = create_exception_type("BlockingIOError", &os_error); let child_process_error = create_exception_type("ChildProcessError", &os_error); let connection_error = create_exception_type("ConnectionError", &os_error); let connection_aborted_error = create_exception_type("ConnectionAbortedError", &connection_error); let connection_refused_error = create_exception_type("ConnectionRefusedError", &connection_error); let connection_reset_error = create_exception_type("ConnectionResetError", &connection_error); let file_exists_error = create_exception_type("FileExistsError", &os_error); let file_not_found_error = create_exception_type("FileNotFoundError", &os_error); let interrupted_error = create_exception_type("InterruptedError", &os_error); let is_a_directory_error = create_exception_type("IsADirectoryError", &os_error); let not_a_directory_error = create_exception_type("NotADirectoryError", &os_error); let broken_pipe_error = create_exception_type("BrokenPipeError", &connection_error); let permission_error = create_exception_type("PermissionError", &os_error); let process_lookup_error = create_exception_type("ProcessLookupError", &os_error); let timeout_error = create_exception_type("TimeoutError", &os_error); let reference_error = create_exception_type("ReferenceError", &exception_type); let runtime_error = create_exception_type("RuntimeError", &exception_type); let not_implemented_error = create_exception_type("NotImplementedError", &runtime_error); let recursion_error = create_exception_type("RecursionError", &runtime_error); let syntax_error = create_exception_type("SyntaxError", &exception_type); let indentation_error = create_exception_type("IndentationError", &syntax_error); let tab_error = create_exception_type("TabError", &indentation_error); let target_scope_error = create_exception_type("TargetScopeError", &syntax_error); let system_error = create_exception_type("SystemError", &exception_type); let type_error = create_exception_type("TypeError", &exception_type); let value_error = create_exception_type("ValueError", &exception_type); let unicode_error = create_exception_type("UnicodeError", &value_error); let unicode_decode_error = create_exception_type("UnicodeDecodeError", &unicode_error); let unicode_encode_error = create_exception_type("UnicodeEncodeError", &unicode_error); let unicode_translate_error = create_exception_type("UnicodeTranslateError", &unicode_error); #[cfg(feature = "jit")] let jit_error = create_exception_type("JitError", &exception_type); let warning = create_exception_type("Warning", &exception_type); let deprecation_warning = create_exception_type("DeprecationWarning", &warning); let pending_deprecation_warning = create_exception_type("PendingDeprecationWarning", &warning); let runtime_warning = create_exception_type("RuntimeWarning", &warning); let syntax_warning = create_exception_type("SyntaxWarning", &warning); let user_warning = create_exception_type("UserWarning", &warning); let future_warning = create_exception_type("FutureWarning", &warning); let import_warning = create_exception_type("ImportWarning", &warning); let unicode_warning = create_exception_type("UnicodeWarning", &warning); let bytes_warning = create_exception_type("BytesWarning", &warning); let resource_warning = create_exception_type("ResourceWarning", &warning); Self { base_exception_type, system_exit, keyboard_interrupt, generator_exit, exception_type, stop_iteration, stop_async_iteration, arithmetic_error, floating_point_error, overflow_error, zero_division_error, assertion_error, attribute_error, buffer_error, eof_error, import_error, module_not_found_error, lookup_error, index_error, key_error, memory_error, name_error, unbound_local_error, os_error, blocking_io_error, child_process_error, connection_error, broken_pipe_error, connection_aborted_error, connection_refused_error, connection_reset_error, file_exists_error, file_not_found_error, interrupted_error, is_a_directory_error, not_a_directory_error, permission_error, process_lookup_error, timeout_error, reference_error, runtime_error, not_implemented_error, recursion_error, syntax_error, target_scope_error, indentation_error, tab_error, system_error, type_error, value_error, unicode_error, unicode_decode_error, unicode_encode_error, unicode_translate_error, #[cfg(feature = "jit")] jit_error, warning, deprecation_warning, pending_deprecation_warning, runtime_warning, syntax_warning, user_warning, future_warning, import_warning, unicode_warning, bytes_warning, resource_warning, } } pub fn extend(ctx: &PyContext) { let excs = &ctx.exceptions; PyBaseException::extend_class(ctx, &excs.base_exception_type); extend_class!(ctx, &excs.syntax_error, { "msg" => ctx.new_readonly_getset("msg", make_arg_getter(0)), // TODO: members "filename" => ctx.none(), "lineno" => ctx.none(), "offset" => ctx.none(), "text" => ctx.none(), }); extend_class!(ctx, &excs.system_exit, { "code" => ctx.new_readonly_getset("code", system_exit_code), }); extend_class!(ctx, &excs.import_error, { "__init__" => ctx.new_method("__init__", import_error_init), "msg" => ctx.new_readonly_getset("msg", make_arg_getter(0)), }); extend_class!(ctx, &excs.stop_iteration, { "value" => ctx.new_readonly_getset("value", make_arg_getter(0)), }); extend_class!(ctx, &excs.key_error, { "__str__" => ctx.new_method("__str__", key_error_str), }); extend_class!(ctx, &excs.os_error, { "errno" => ctx.new_readonly_getset("errno", |exc: PyBaseExceptionRef| { let args = exc.args(); let args = args.borrow_value(); args.get(0).filter(|_| args.len() > 1).cloned() }), }); extend_class!(ctx, &excs.unicode_decode_error, { "encoding" => ctx.new_readonly_getset("encoding", make_arg_getter(0)), "object" => ctx.new_readonly_getset("object", make_arg_getter(1)), "start" => ctx.new_readonly_getset("start", make_arg_getter(2)), "end" => ctx.new_readonly_getset("end", make_arg_getter(3)), "reason" => ctx.new_readonly_getset("reason", make_arg_getter(4)), }); extend_class!(ctx, &excs.unicode_encode_error, { "encoding" => ctx.new_readonly_getset("encoding", make_arg_getter(0)), "object" => ctx.new_readonly_getset("object", make_arg_getter(1)), "start" => ctx.new_readonly_getset("start", make_arg_getter(2)), "end" => ctx.new_readonly_getset("end", make_arg_getter(3)), "reason" => ctx.new_readonly_getset("reason", make_arg_getter(4)), }); extend_class!(ctx, &excs.unicode_translate_error, { "encoding" => ctx.new_readonly_getset("encoding", none_getter), "object" => ctx.new_readonly_getset("object", make_arg_getter(0)), "start" => ctx.new_readonly_getset("start", make_arg_getter(1)), "end" => ctx.new_readonly_getset("end", make_arg_getter(2)), "reason" => ctx.new_readonly_getset("reason", make_arg_getter(3)), }); } } fn import_error_init(exc_self: PyObjectRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> { vm.set_attr( &exc_self, "name", vm.unwrap_or_none(args.kwargs.get("name").cloned()), )?; vm.set_attr( &exc_self, "path", vm.unwrap_or_none(args.kwargs.get("path").cloned()), )?; Ok(()) } fn none_getter(_obj: PyObjectRef, vm: &VirtualMachine) -> PyNoneRef { vm.ctx.none.clone() } fn make_arg_getter(idx: usize) -> impl Fn(PyBaseExceptionRef) -> Option<PyObjectRef> { move |exc| exc.get_arg(idx) } fn key_error_str(exc: PyBaseExceptionRef, vm: &VirtualMachine) -> PyStrRef { let args = exc.args(); if args.borrow_value().len() == 1 { exception_args_as_string(vm, args, false) .into_iter() .exactly_one() .unwrap() } else { exc.str(vm) } } fn system_exit_code(exc: PyBaseExceptionRef) -> Option<PyObjectRef> { exc.args.read().borrow_value().first().map(|code| { match_class!(match code { ref tup @ PyTuple => match tup.borrow_value() { [x] => x.clone(), _ => code.clone(), }, other => other.clone(), }) }) } pub struct SerializeException<'s> { vm: &'s VirtualMachine, exc: &'s PyBaseExceptionRef, } impl<'s> SerializeException<'s> { pub fn new(vm: &'s VirtualMachine, exc: &'s PyBaseExceptionRef) -> Self { SerializeException { vm, exc } } } impl serde::Serialize for SerializeException<'_> { fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { use serde::ser::*; let mut struc = s.serialize_struct("PyBaseException", 7)?; struc.serialize_field("exc_type", &self.exc.class().name)?; let tbs = { struct Tracebacks(PyTracebackRef); impl serde::Serialize for Tracebacks { fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut s = s.serialize_seq(None)?; for tb in self.0.iter() { s.serialize_element(&*tb)?; } s.end() } } self.exc.traceback().map(Tracebacks) }; struc.serialize_field("traceback", &tbs)?; struc.serialize_field( "cause", &self.exc.cause().as_ref().map(|e| Self::new(self.vm, e)), )?; struc.serialize_field( "context", &self.exc.context().as_ref().map(|e| Self::new(self.vm, e)), )?; struc.serialize_field("suppress_context", &self.exc.suppress_context.load())?; let args = { struct Args<'vm>(&'vm VirtualMachine, PyTupleRef); impl serde::Serialize for Args<'_> { fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { s.collect_seq( self.1 .borrow_value() .iter() .map(|arg| py_serde::PyObjectSerializer::new(self.0, arg)), ) } } Args(self.vm, self.exc.args()) }; struc.serialize_field("args", &args)?; let rendered = { let mut rendered = Vec::<u8>::new(); write_exception(&mut rendered, self.vm, &self.exc).map_err(S::Error::custom)?; String::from_utf8(rendered).map_err(S::Error::custom)? }; struc.serialize_field("rendered", &rendered)?; struc.end() } }
37.037621
141
0.614044
c1a7690d5e88e1cc510c3750bc1bf2578d6beb11
114,776
#![doc = "generated by AutoRust"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::models; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new( endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, ) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn operations(&self) -> operations::Client { operations::Client(self.clone()) } pub fn private_endpoint_connections(&self) -> private_endpoint_connections::Client { private_endpoint_connections::Client(self.clone()) } pub fn private_link_resources(&self) -> private_link_resources::Client { private_link_resources::Client(self.clone()) } pub fn secrets(&self) -> secrets::Client { secrets::Client(self.clone()) } pub fn vaults(&self) -> vaults::Client { vaults::Client(self.clone()) } } #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] Vaults_Get(#[from] vaults::get::Error), #[error(transparent)] Vaults_CreateOrUpdate(#[from] vaults::create_or_update::Error), #[error(transparent)] Vaults_Update(#[from] vaults::update::Error), #[error(transparent)] Vaults_Delete(#[from] vaults::delete::Error), #[error(transparent)] Vaults_UpdateAccessPolicy(#[from] vaults::update_access_policy::Error), #[error(transparent)] Vaults_ListByResourceGroup(#[from] vaults::list_by_resource_group::Error), #[error(transparent)] Vaults_ListBySubscription(#[from] vaults::list_by_subscription::Error), #[error(transparent)] Vaults_ListDeleted(#[from] vaults::list_deleted::Error), #[error(transparent)] Vaults_GetDeleted(#[from] vaults::get_deleted::Error), #[error(transparent)] Vaults_PurgeDeleted(#[from] vaults::purge_deleted::Error), #[error(transparent)] Vaults_List(#[from] vaults::list::Error), #[error(transparent)] Vaults_CheckNameAvailability(#[from] vaults::check_name_availability::Error), #[error(transparent)] PrivateEndpointConnections_Get(#[from] private_endpoint_connections::get::Error), #[error(transparent)] PrivateEndpointConnections_Put(#[from] private_endpoint_connections::put::Error), #[error(transparent)] PrivateEndpointConnections_Delete(#[from] private_endpoint_connections::delete::Error), #[error(transparent)] PrivateLinkResources_ListByVault(#[from] private_link_resources::list_by_vault::Error), #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] Secrets_Get(#[from] secrets::get::Error), #[error(transparent)] Secrets_CreateOrUpdate(#[from] secrets::create_or_update::Error), #[error(transparent)] Secrets_Update(#[from] secrets::update::Error), #[error(transparent)] Secrets_List(#[from] secrets::list::Error), } pub mod vaults { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn get( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, subscription_id: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), subscription_id: subscription_id.into(), } } pub fn create_or_update( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, parameters: impl Into<models::VaultCreateOrUpdateParameters>, subscription_id: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn update( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, parameters: impl Into<models::VaultPatchParameters>, subscription_id: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn delete( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, subscription_id: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), subscription_id: subscription_id.into(), } } pub fn update_access_policy( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, operation_kind: impl Into<String>, parameters: impl Into<models::VaultAccessPolicyParameters>, subscription_id: impl Into<String>, ) -> update_access_policy::Builder { update_access_policy::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), operation_kind: operation_kind.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn list_by_resource_group( &self, resource_group_name: impl Into<String>, subscription_id: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), subscription_id: subscription_id.into(), top: None, } } pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder { list_by_subscription::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), top: None, } } pub fn list_deleted(&self, subscription_id: impl Into<String>) -> list_deleted::Builder { list_deleted::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), } } pub fn get_deleted( &self, vault_name: impl Into<String>, location: impl Into<String>, subscription_id: impl Into<String>, ) -> get_deleted::Builder { get_deleted::Builder { client: self.0.clone(), vault_name: vault_name.into(), location: location.into(), subscription_id: subscription_id.into(), } } pub fn purge_deleted( &self, vault_name: impl Into<String>, location: impl Into<String>, subscription_id: impl Into<String>, ) -> purge_deleted::Builder { purge_deleted::Builder { client: self.0.clone(), vault_name: vault_name.into(), location: location.into(), subscription_id: subscription_id.into(), } } pub fn list(&self, filter: impl Into<String>, subscription_id: impl Into<String>) -> list::Builder { list::Builder { client: self.0.clone(), filter: filter.into(), subscription_id: subscription_id.into(), top: None, } } pub fn check_name_availability( &self, vault_name: impl Into<models::VaultCheckNameAvailabilityParameters>, subscription_id: impl Into<String>, ) -> check_name_availability::Builder { check_name_availability::Builder { client: self.0.clone(), vault_name: vault_name.into(), subscription_id: subscription_id.into(), } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Vault, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Created201(models::Vault), Ok200(models::Vault), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] BadRequest400 { value: models::CloudError }, #[error("Error response #response_type")] Conflict409 { value: models::CloudError }, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) parameters: models::VaultCreateOrUpdateParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::BAD_REQUEST => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::BadRequest400 { value: rsp_value }) } http::StatusCode::CONFLICT => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::Conflict409 { value: rsp_value }) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::models; #[derive(Debug)] pub enum Response { Created201(models::Vault), Ok200(models::Vault), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] BadRequest400 { value: models::CloudError }, #[error("Error response #response_type")] Conflict409 { value: models::CloudError }, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) parameters: models::VaultPatchParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Vault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::BAD_REQUEST => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::BadRequest400 { value: rsp_value }) } http::StatusCode::CONFLICT => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::Conflict409 { value: rsp_value }) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] BadRequest400 { value: models::CloudError }, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), http::StatusCode::BAD_REQUEST => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::BadRequest400 { value: rsp_value }) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update_access_policy { use super::models; #[derive(Debug)] pub enum Response { Created201(models::VaultAccessPolicyParameters), Ok200(models::VaultAccessPolicyParameters), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] BadRequest400 { value: models::CloudError }, #[error("Error response #response_type")] NotFound404 { value: models::CloudError }, #[error("Error response #response_type")] Conflict409 { value: models::CloudError }, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) operation_kind: String, pub(crate) parameters: models::VaultAccessPolicyParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/accessPolicies/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.operation_kind ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultAccessPolicyParameters = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultAccessPolicyParameters = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::BAD_REQUEST => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::BadRequest400 { value: rsp_value }) } http::StatusCode::NOT_FOUND => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::NotFound404 { value: rsp_value }) } http::StatusCode::CONFLICT => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::Conflict409 { value: rsp_value }) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_resource_group { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) subscription_id: String, pub(crate) top: Option<i32>, } impl Builder { pub fn top(mut self, top: i32) -> Self { self.top = Some(top); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults", self.client.endpoint(), &self.subscription_id, &self.resource_group_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); if let Some(top) = &self.top { url.query_pairs_mut().append_pair("$top", &top.to_string()); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_subscription { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) top: Option<i32>, } impl Builder { pub fn top(mut self, top: i32) -> Self { self.top = Some(top); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::VaultListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.KeyVault/vaults", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); if let Some(top) = &self.top { url.query_pairs_mut().append_pair("$top", &top.to_string()); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::VaultListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_deleted { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DeletedVaultListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.KeyVault/deletedVaults", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DeletedVaultListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get_deleted { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) vault_name: String, pub(crate) location: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DeletedVault, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.KeyVault/locations/{}/deletedVaults/{}", self.client.endpoint(), &self.subscription_id, &self.location, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DeletedVault = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod purge_deleted { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] BadRequest400 { value: models::CloudError }, #[error("Error response #response_type")] NotFound404 { value: models::CloudError }, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) vault_name: String, pub(crate) location: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.KeyVault/locations/{}/deletedVaults/{}/purge", self.client.endpoint(), &self.subscription_id, &self.location, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::BAD_REQUEST => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::BadRequest400 { value: rsp_value }) } http::StatusCode::NOT_FOUND => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::NotFound404 { value: rsp_value }) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) filter: String, pub(crate) subscription_id: String, pub(crate) top: Option<i32>, } impl Builder { pub fn top(mut self, top: i32) -> Self { self.top = Some(top); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ResourceListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/subscriptions/{}/resources", self.client.endpoint(), &self.subscription_id); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let filter = &self.filter; url.query_pairs_mut().append_pair("$filter", filter); if let Some(top) = &self.top { url.query_pairs_mut().append_pair("$top", &top.to_string()); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ResourceListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod check_name_availability { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) vault_name: models::VaultCheckNameAvailabilityParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future( self, ) -> futures::future::BoxFuture<'static, std::result::Result<models::CheckNameAvailabilityResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.KeyVault/checkNameAvailability", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.vault_name).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CheckNameAvailabilityResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod private_endpoint_connections { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, private_endpoint_connection_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), private_endpoint_connection_name: private_endpoint_connection_name.into(), } } pub fn put( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, private_endpoint_connection_name: impl Into<String>, properties: impl Into<models::PrivateEndpointConnection>, ) -> put::Builder { put::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), private_endpoint_connection_name: private_endpoint_connection_name.into(), properties: properties.into(), } } pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, private_endpoint_connection_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), private_endpoint_connection_name: private_endpoint_connection_name.into(), } } } pub mod get { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::PrivateEndpointConnection), NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) private_endpoint_connection_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/privateEndpointConnections/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod put { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) private_endpoint_connection_name: String, pub(crate) properties: models::PrivateEndpointConnection, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::PrivateEndpointConnection, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/privateEndpointConnections/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.properties).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::PrivateEndpointConnection), Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) private_endpoint_connection_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/privateEndpointConnections/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod private_link_resources { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_vault( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, vault_name: impl Into<String>, ) -> list_by_vault::Builder { list_by_vault::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), } } } pub mod list_by_vault { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) vault_name: String, } impl Builder { pub fn into_future( self, ) -> futures::future::BoxFuture<'static, std::result::Result<models::PrivateLinkResourceListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/privateLinkResources", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::PrivateLinkResourceListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod operations { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/providers/Microsoft.KeyVault/operations", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::OperationListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod secrets { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn get( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, secret_name: impl Into<String>, subscription_id: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), secret_name: secret_name.into(), subscription_id: subscription_id.into(), } } pub fn create_or_update( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, secret_name: impl Into<String>, parameters: impl Into<models::SecretCreateOrUpdateParameters>, subscription_id: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), secret_name: secret_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn update( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, secret_name: impl Into<String>, parameters: impl Into<models::SecretPatchParameters>, subscription_id: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), secret_name: secret_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), } } pub fn list( &self, resource_group_name: impl Into<String>, vault_name: impl Into<String>, subscription_id: impl Into<String>, ) -> list::Builder { list::Builder { client: self.0.clone(), resource_group_name: resource_group_name.into(), vault_name: vault_name.into(), subscription_id: subscription_id.into(), top: None, } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) secret_name: String, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Secret, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/secrets/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.secret_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Secret = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Created201(models::Secret), Ok200(models::Secret), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) secret_name: String, pub(crate) parameters: models::SecretCreateOrUpdateParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/secrets/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.secret_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Secret = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Secret = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::models; #[derive(Debug)] pub enum Response { Created201(models::Secret), Ok200(models::Secret), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) secret_name: String, pub(crate) parameters: models::SecretPatchParameters, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/secrets/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name, &self.secret_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Secret = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Secret = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) resource_group_name: String, pub(crate) vault_name: String, pub(crate) subscription_id: String, pub(crate) top: Option<i32>, } impl Builder { pub fn top(mut self, top: i32) -> Self { self.top = Some(top); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SecretListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}/secrets", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.vault_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-09-01"); if let Some(top) = &self.top { url.query_pairs_mut().append_pair("$top", &top.to_string()); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SecretListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } }
51.262171
140
0.51563