hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
fe711453e5de0906f307c1df4e550e1d5f6f946a
13,760
//! Core traits for rule definitions and rule context. //! As well as an internal prelude to make imports for rules easier. #![allow(unused_variables, unused_imports)] use crate::autofix::Fixer; use crate::Diagnostic; use dyn_clone::DynClone; use rslint_errors::Severity; use rslint_parser::{SyntaxNode, SyntaxNodeExt, SyntaxToken}; use rslint_text_edit::apply_indels; use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::fmt::Debug; use std::marker::{Send, Sync}; use std::ops::{Deref, DerefMut, Drop}; use std::rc::Rc; use std::sync::Arc; /// The main type of rule run by the runner. The rule takes individual /// nodes inside of a Concrete Syntax Tree and checks them. /// It may also take individual syntax tokens. /// Rule must be all be [`Send`] + [`Sync`], because rules are run in parallel. /// /// # Rule Level Configuration /// Rules do not know about the lint level they were configured for, the runner /// runs the rules, then maps any error/warning diagnostics to their appropriate severity. /// This saves on boilerplate code for getting the appropriate diagnostic builder type and config. /// /// # Guidelines /// This is a list of guidelines and tips you should generally follow when implementing a rule: /// - Do not use text based equality, it is inaccurate, instead use [`lexical_eq`](SyntaxNodeExt::lexical_eq). /// - Avoid using `text_range` on nodes, it is inaccurate because it may include whitespace, instead use [`trimmed_range`](SyntaxNodeExt::trimmed_range). /// - Avoid using `text` on nodes for the same reason as the previous, use [`trimmed_text`](SyntaxNodeExt::trimmed_text). /// - If you can offer better diagnostics and more context around a rule error, __always__ do it! It is a central goal /// of the project to offer very helpful diagnostics. /// - Do not be afraid to clone syntax nodes, ast nodes, and syntax tokens. They are all backed by an [`Rc`](std::rc::Rc) around Node data. /// therefore they can be cheaply cloned (but if you can, have your functions take a reference since Rc cloning is not zero cost). /// - Do not try to rely on the result of other rules, it is impossible because rules are run at the same time. /// - Do not rely on file data of different files. There is a separate rule type for this. /// - Do not unwrap pieces of an AST node (sometimes it is ok because they are guaranteed to be there), since that will cause panics /// with error recovery. /// - Do not use node or string coloring outside of diagnostic notes, it messes with termcolor and ends up looking horrible. #[typetag::serde] pub trait CstRule: Rule { /// Check an individual node in the syntax tree. /// You can use the `match_ast` macro to make matching a node to an ast node easier. /// The reason this uses nodes and not a visitor is because nodes are more flexible, /// converting them to an AST node has zero cost and you can easily traverse surrounding nodes. /// Defaults to doing nothing. /// /// The return type is `Option<()>` to allow usage of `?` on the properties of AST nodes which are all optional. #[inline] fn check_node(&self, node: &SyntaxNode, ctx: &mut RuleCtx) -> Option<()> { None } /// Check an individual token in the syntax tree. /// Defaults to doing nothing. #[inline] fn check_token(&self, token: &SyntaxToken, ctx: &mut RuleCtx) -> Option<()> { None } /// Check the root of the tree one time. /// This method is guaranteed to only be called once. /// The root's kind will be either `SCRIPT` or `MODULE`. /// Defaults to doing nothing. #[inline] fn check_root(&self, root: &SyntaxNode, ctx: &mut RuleCtx) -> Option<()> { None } } /// A generic trait which describes things common to a rule regardless on what they run on. /// /// Each rule should have a `new` function for easy instantiation. We however do not require this /// for the purposes of allowing more complex rules to instantiate themselves in a different way. /// However the rules must be easily instantiated because of rule groups. pub trait Rule: Debug + DynClone + Send + Sync { /// A unique, kebab-case name for the rule. fn name(&self) -> &'static str; /// The name of the group this rule belongs to. fn group(&self) -> &'static str; /// Optional docs for the rule, an empty string by default fn docs(&self) -> &'static str { "" } } dyn_clone::clone_trait_object!(Rule); dyn_clone::clone_trait_object!(CstRule); /// A trait describing rules for which their configuration can be automatically deduced (inferred) using /// parsed syntax trees #[typetag::serde] pub trait Inferable: CstRule { /// Infer the options for the rule from multiple nodes (which may be from different trees) and change them fn infer(&mut self, nodes: &[SyntaxNode]); } /// The level configured for a rule. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum RuleLevel { Warning, Error, } /// Context given to a rule when running it. // This is passed by reference and not by Arc, which is very important, // Arcs are very expensive to copy, and for 50 rules running on 50 files we will have a total of // 2500 copies, which is non ideal at best. #[derive(Debug, Clone)] pub struct RuleCtx { /// The file id of the file being linted. pub file_id: usize, /// Whether the linter is run with the `--verbose` option. /// Which dictates whether the linter should include more (potentially spammy) context in diagnostics. pub verbose: bool, /// An empty vector of diagnostics which the rule adds to. pub diagnostics: Vec<Diagnostic>, pub fixer: Option<Fixer>, pub src: Arc<String>, } impl RuleCtx { /// Make a new diagnostic builder. pub fn err(&mut self, code: impl Into<String>, message: impl Into<String>) -> Diagnostic { Diagnostic::error(self.file_id, code.into(), message.into()) } pub fn add_err(&mut self, diagnostic: Diagnostic) { self.diagnostics.push(diagnostic) } /// Make a new fixer for this context and return a mutable reference to it pub fn fix(&mut self) -> &mut Fixer { let fixer = Fixer::new(self.src.clone()); self.fixer = Some(fixer); self.fixer.as_mut().unwrap() } /// Create a context which is used to simply run a rule without needing to know about /// the resulting fixer, therefore the ctx's source is not a valid source pub(crate) fn dummy_ctx() -> Self { Self { file_id: 0, verbose: false, diagnostics: vec![], fixer: None, src: Arc::new(String::new()), } } } /// The result of running a single rule on a syntax tree. #[derive(Debug, Clone)] pub struct RuleResult { pub diagnostics: Vec<Diagnostic>, pub fixer: Option<Fixer>, } impl RuleResult { /// Make a new rule result with diagnostics and an optional fixer. pub fn new(diagnostics: Vec<Diagnostic>, fixer: impl Into<Option<Fixer>>) -> Self { Self { diagnostics, fixer: fixer.into(), } } /// Get the result of running this rule. pub fn outcome(&self) -> Outcome { Outcome::from(&self.diagnostics) } /// Merge two results, this will join `self` and `other`'s diagnostics and take /// `self`'s fixer if available or otherwise take `other`'s fixer pub fn merge(self, other: RuleResult) -> RuleResult { RuleResult { diagnostics: [self.diagnostics, other.diagnostics].concat(), fixer: self.fixer.or(other.fixer), } } /// Attempt to fix the issue if the rule can be autofixed. pub fn fix(&self) -> Option<String> { self.fixer.as_ref().map(|x| x.apply()) } } /// The overall result of running a single rule or linting a file. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Outcome { /// Running the rule resulted in one or more errors. /// The rule result may have also included warnings or notes. Failure, /// Running the rule resulted in one or more warnings. /// May also include notes. Warning, /// Running the rule resulted in no errors or warnings. /// May include note diagnostics (which are very rare). Success, } impl<T> From<T> for Outcome where T: IntoIterator, T::Item: Borrow<Diagnostic>, { fn from(diagnostics: T) -> Self { let mut outcome = Outcome::Success; for diagnostic in diagnostics { match diagnostic.borrow().severity { Severity::Error => outcome = Outcome::Failure, Severity::Warning if outcome != Outcome::Failure => outcome = Outcome::Warning, _ => {} } } outcome } } impl Outcome { pub fn merge(outcomes: impl IntoIterator<Item = impl Borrow<Outcome>>) -> Outcome { let mut overall = Outcome::Success; for outcome in outcomes { match outcome.borrow() { Outcome::Failure => overall = Outcome::Failure, Outcome::Warning if overall != Outcome::Failure => overall = Outcome::Warning, _ => {} } } overall } } #[macro_export] #[doc(hidden)] macro_rules! __pre_parse_docs_from_meta { ( @$cb:tt @[docs $($docs:tt)*] @$other:tt #[doc = $doc:expr] $($rest:tt)* ) => ( $crate::__pre_parse_docs_from_meta! { @$cb @[docs $($docs)* $doc] @$other $($rest)* } ); ( @$cb:tt @$docs:tt @[others $($others:tt)*] #[$other:meta] $($rest:tt)* ) => ( $crate::__pre_parse_docs_from_meta! { @$cb @$docs @[others $($others)* $other] $($rest)* } ); ( @[cb $($cb:tt)*] @[docs $($docs:tt)*] @[others $($others:tt)*] $($rest:tt)* ) => ( $($cb)* ! { #[doc = concat!($(indoc::indoc!($docs), "\n"),*)] $( #[$others] )* $($rest)* } ); ( $(:: $(@ $colon:tt)?)? $($cb:ident)::+ ! { $($input:tt)* } ) => ( $crate::__pre_parse_docs_from_meta! { @[cb $(:: $($colon)?)? $($cb)::+] @[docs ] @[others ] $($input)* } ); } #[macro_export] #[doc(hidden)] macro_rules! __declare_lint_inner { ( #[doc = $doc:expr] $(#[$outer:meta])* // The rule struct name $name:ident, $group:ident, // A unique kebab-case name for the rule $code:expr $(, // Any fields for the rule $( $(#[$inner:meta])* $visibility:vis $key:ident : $val:ty ),* $(,)? )? ) => { use $crate::Rule; use serde::{Deserialize, Serialize}; #[doc = $doc] #[serde(rename_all = "camelCase")] $(#[$outer])* #[derive(Debug, Clone, Deserialize, Serialize)] pub struct $name { $( $( $(#[$inner])* pub $key: $val ), *)? } impl $name { pub fn new() -> Self { Self::default() } } impl Rule for $name { fn name(&self) -> &'static str { $code } fn group(&self) -> &'static str { stringify!($group) } fn docs(&self) -> &'static str { $doc } } }; } /// A macro to easily generate rule boilerplate code. /// /// ```ignore /// declare_lint! { /// /// A description of the rule here /// /// This will be used as the doc for the rule struct /// RuleName, /// // The name of the group this rule belongs to. /// groupname, /// // Make sure this is kebab-case and unique. /// "rule-name", /// /// A description of the attribute here, used for config docs. /// pub config_attr: u8, /// pub another_attr: String /// } /// ``` /// /// # Rule name and docs /// /// The macro's first argument is an identifier for the rule structure. /// This should always be a PascalCase name. You will have to either derive Default for the struct /// or implement it manually. /// /// The macro also accepts any doc comments for the rule name. These comments /// are then used by an xtask script to generate markdown files for user facing docs. /// Each rule doc should include an `Incorrect Code Examples` header. It may also optionally /// include a `Correct Code Examples`. Do not include a `Config` header, it is autogenerated /// from config field docs. /// /// # Config /// /// After the rule code, the macro accepts fields for the struct. Any field which is /// public will be used for config, you can however disable this by using `#[serde(skip)]`. /// Every public (config) field should have a doc comment, the doc comments will be used for /// user facing documentation. Therefore try to be non technical and non rust specific with the doc comments. /// **All config fields will be renamed to camelCase** /// /// /// This will generate a rule struct with `RuleName`, /// and use the optional config attributes defined for the config of the rule. /// You must make sure each config field is Deserializable. #[macro_export] macro_rules! declare_lint { ($($input:tt)*) => { $crate::__pre_parse_docs_from_meta! { $crate::__declare_lint_inner! { $($input)* } } }; }
33.891626
153
0.605596
1cfef2eee17feaab71f83fdc40ff3b1f44ca95f4
3,201
#![allow(clippy::module_inception)] #![allow(clippy::upper_case_acronyms)] #![allow(clippy::large_enum_variant)] #![allow(clippy::wrong_self_convention)] #![allow(clippy::should_implement_trait)] #![allow(clippy::blacklisted_name)] #![allow(clippy::vec_init_then_push)] #![allow(rustdoc::bare_urls)] #![warn(missing_docs)] //! <p></p> //! <p>Amazon Managed Blockchain is a fully managed service for creating and managing blockchain networks using open-source frameworks. Blockchain allows you to build applications where multiple parties can securely and transparently run transactions and share data without the need for a trusted, central authority.</p> //! <p>Managed Blockchain supports the Hyperledger Fabric and Ethereum open-source frameworks. Because of fundamental differences between the frameworks, some API actions or data types may only apply in the context of one framework and not the other. For example, actions related to Hyperledger Fabric network members such as <code>CreateMember</code> and <code>DeleteMember</code> do not apply to Ethereum.</p> //! <p>The description for each action indicates the framework or frameworks to which it applies. Data types and properties that apply only in the context of a particular framework are similarly indicated.</p> //! //! # Crate Organization //! //! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered //! by the service. //! //! Some APIs require complex or nested arguments. These exist in [`model`]. //! //! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta //! error encompassing all possible errors that can be returned by the service. //! //! The other modules within this crate and not required for normal usage. //! //! # Examples // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub use error_meta::Error; pub use config::Config; mod aws_endpoint; /// Client and fluent builders for calling the service. #[cfg(feature = "client")] pub mod client; /// Configuration for the service. pub mod config; /// Errors that can occur when calling the service. pub mod error; mod error_meta; mod idempotency_token; /// Input structures for operations. pub mod input; mod json_deser; mod json_errors; mod json_ser; /// Data structures used by operation inputs/outputs. pub mod model; mod no_credentials; /// All operations that this crate can perform. pub mod operation; mod operation_deser; mod operation_ser; /// Output structures for operations. pub mod output; /// Crate version number. pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); pub use aws_smithy_http::byte_stream::ByteStream; pub use aws_smithy_http::result::SdkError; pub use aws_smithy_types::Blob; pub use aws_smithy_types::DateTime; static API_METADATA: aws_http::user_agent::ApiMetadata = aws_http::user_agent::ApiMetadata::new("managedblockchain", PKG_VERSION); pub use aws_smithy_http::endpoint::Endpoint; pub use aws_smithy_types::retry::RetryConfig; pub use aws_types::app_name::AppName; pub use aws_types::region::Region; pub use aws_types::Credentials; #[cfg(feature = "client")] pub use client::Client;
43.849315
411
0.768822
ed68b198b84f7e44051fee6fa59cc53e8140da5f
18,688
extern crate serde; extern crate rltk; use rltk::{Console, GameState, Rltk, Point}; extern crate specs; use specs::prelude::*; use specs::saveload::{SimpleMarker, SimpleMarkerAllocator}; #[macro_use] extern crate specs_derive; mod components; pub use components::*; mod map; pub use map::*; mod player; use player::*; mod rect; pub use rect::Rect; mod visibility_system; use visibility_system::VisibilitySystem; mod monster_ai_system; use monster_ai_system::MonsterAI; mod map_indexing_system; use map_indexing_system::MapIndexingSystem; mod melee_combat_system; use melee_combat_system::MeleeCombatSystem; mod damage_system; use damage_system::DamageSystem; mod gui; mod gamelog; mod spawner; mod inventory_system; use inventory_system::{ ItemCollectionSystem, ItemUseSystem, ItemDropSystem, ItemRemoveSystem }; pub mod saveload_system; pub mod random_table; pub mod particle_system; pub mod hunger_system; pub mod rex_assets; pub mod trigger_system; pub mod map_builders; const SHOW_MAPGEN_VISUALIZER : bool = false; #[derive(PartialEq, Copy, Clone)] pub enum RunState { AwaitingInput, PreRun, PlayerTurn, MonsterTurn, ShowInventory, ShowDropItem, ShowTargeting { range : i32, item : Entity}, MainMenu { menu_selection : gui::MainMenuSelection }, SaveGame, NextLevel, ShowRemoveItem, GameOver, MagicMapReveal { row : i32 }, MapGeneration } pub struct State { pub ecs: World, mapgen_next_state : Option<RunState>, mapgen_history : Vec<Map>, mapgen_index : usize, mapgen_timer : f32 } impl State { fn run_systems(&mut self) { let mut vis = VisibilitySystem{}; vis.run_now(&self.ecs); let mut mob = MonsterAI{}; mob.run_now(&self.ecs); let mut mapindex = MapIndexingSystem{}; mapindex.run_now(&self.ecs); let mut triggers = trigger_system::TriggerSystem{}; triggers.run_now(&self.ecs); let mut melee = MeleeCombatSystem{}; melee.run_now(&self.ecs); let mut damage = DamageSystem{}; damage.run_now(&self.ecs); let mut pickup = ItemCollectionSystem{}; pickup.run_now(&self.ecs); let mut itemuse = ItemUseSystem{}; itemuse.run_now(&self.ecs); let mut drop_items = ItemDropSystem{}; drop_items.run_now(&self.ecs); let mut item_remove = ItemRemoveSystem{}; item_remove.run_now(&self.ecs); let mut hunger = hunger_system::HungerSystem{}; hunger.run_now(&self.ecs); let mut particles = particle_system::ParticleSpawnSystem{}; particles.run_now(&self.ecs); self.ecs.maintain(); } } impl GameState for State { fn tick(&mut self, ctx : &mut Rltk) { let mut newrunstate; { let runstate = self.ecs.fetch::<RunState>(); newrunstate = *runstate; } ctx.cls(); particle_system::cull_dead_particles(&mut self.ecs, ctx); match newrunstate { RunState::MainMenu{..} => {} RunState::GameOver{..} => {} _ => { draw_map(&self.ecs.fetch::<Map>(), ctx); let positions = self.ecs.read_storage::<Position>(); let renderables = self.ecs.read_storage::<Renderable>(); let hidden = self.ecs.read_storage::<Hidden>(); let map = self.ecs.fetch::<Map>(); let mut data = (&positions, &renderables, !&hidden).join().collect::<Vec<_>>(); data.sort_by(|&a, &b| b.1.render_order.cmp(&a.1.render_order) ); for (pos, render, _hidden) in data.iter() { let idx = map.xy_idx(pos.x, pos.y); if map.visible_tiles[idx] { ctx.set(pos.x, pos.y, render.fg, render.bg, render.glyph) } } gui::draw_ui(&self.ecs, ctx); } } match newrunstate { RunState::MapGeneration => { if !SHOW_MAPGEN_VISUALIZER { newrunstate = self.mapgen_next_state.unwrap(); } else { ctx.cls(); draw_map(&self.mapgen_history[self.mapgen_index], ctx); self.mapgen_timer += ctx.frame_time_ms; if self.mapgen_timer > 200.0 { self.mapgen_timer = 0.0; self.mapgen_index += 1; if self.mapgen_index >= self.mapgen_history.len() { //self.mapgen_index -= 1; newrunstate = self.mapgen_next_state.unwrap(); } } } } RunState::PreRun => { self.run_systems(); self.ecs.maintain(); newrunstate = RunState::AwaitingInput; } RunState::AwaitingInput => { newrunstate = player_input(self, ctx); } RunState::PlayerTurn => { self.run_systems(); self.ecs.maintain(); match *self.ecs.fetch::<RunState>() { RunState::MagicMapReveal{ .. } => newrunstate = RunState::MagicMapReveal{ row: 0 }, _ => newrunstate = RunState::MonsterTurn } } RunState::MonsterTurn => { self.run_systems(); self.ecs.maintain(); newrunstate = RunState::AwaitingInput; } RunState::ShowInventory => { let result = gui::show_inventory(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let is_ranged = self.ecs.read_storage::<Ranged>(); let is_item_ranged = is_ranged.get(item_entity); if let Some(is_item_ranged) = is_item_ranged { newrunstate = RunState::ShowTargeting{ range: is_item_ranged.range, item: item_entity }; } else { let mut intent = self.ecs.write_storage::<WantsToUseItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item: item_entity, target: None }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } } RunState::ShowDropItem => { let result = gui::drop_item_menu(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let mut intent = self.ecs.write_storage::<WantsToDropItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToDropItem{ item: item_entity }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::ShowRemoveItem => { let result = gui::remove_item_menu(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let mut intent = self.ecs.write_storage::<WantsToRemoveItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToRemoveItem{ item: item_entity }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::ShowTargeting{range, item} => { let result = gui::ranged_target(self, ctx, range); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let mut intent = self.ecs.write_storage::<WantsToUseItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item, target: result.1 }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::MainMenu{ .. } => { let result = gui::main_menu(self, ctx); match result { gui::MainMenuResult::NoSelection{ selected } => newrunstate = RunState::MainMenu{ menu_selection: selected }, gui::MainMenuResult::Selected{ selected } => { match selected { gui::MainMenuSelection::NewGame => newrunstate = RunState::PreRun, gui::MainMenuSelection::LoadGame => { saveload_system::load_game(&mut self.ecs); newrunstate = RunState::AwaitingInput; saveload_system::delete_save(); } gui::MainMenuSelection::Quit => { ::std::process::exit(0); } } } } } RunState::GameOver => { let result = gui::game_over(ctx); match result { gui::GameOverResult::NoSelection => {} gui::GameOverResult::QuitToMenu => { self.game_over_cleanup(); newrunstate = RunState::MapGeneration; self.mapgen_next_state = Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame }); } } } RunState::SaveGame => { saveload_system::save_game(&mut self.ecs); newrunstate = RunState::MainMenu{ menu_selection : gui::MainMenuSelection::LoadGame }; } RunState::NextLevel => { self.goto_next_level(); self.mapgen_next_state = Some(RunState::PreRun); newrunstate = RunState::MapGeneration; } RunState::MagicMapReveal{row} => { let mut map = self.ecs.fetch_mut::<Map>(); for x in 0..MAPWIDTH { let idx = map.xy_idx(x as i32,row); map.revealed_tiles[idx] = true; } if row as usize == MAPHEIGHT-1 { newrunstate = RunState::MonsterTurn; } else { newrunstate = RunState::MagicMapReveal{ row: row+1 }; } } } { let mut runwriter = self.ecs.write_resource::<RunState>(); *runwriter = newrunstate; } damage_system::delete_the_dead(&mut self.ecs); } } impl State { fn entities_to_remove_on_level_change(&mut self) -> Vec<Entity> { let entities = self.ecs.entities(); let player = self.ecs.read_storage::<Player>(); let backpack = self.ecs.read_storage::<InBackpack>(); let player_entity = self.ecs.fetch::<Entity>(); let equipped = self.ecs.read_storage::<Equipped>(); let mut to_delete : Vec<Entity> = Vec::new(); for entity in entities.join() { let mut should_delete = true; // Don't delete the player let p = player.get(entity); if let Some(_p) = p { should_delete = false; } // Don't delete the player's equipment let bp = backpack.get(entity); if let Some(bp) = bp { if bp.owner == *player_entity { should_delete = false; } } let eq = equipped.get(entity); if let Some(eq) = eq { if eq.owner == *player_entity { should_delete = false; } } if should_delete { to_delete.push(entity); } } to_delete } fn goto_next_level(&mut self) { // Delete entities that aren't the player or his/her equipment let to_delete = self.entities_to_remove_on_level_change(); for target in to_delete { self.ecs.delete_entity(target).expect("Unable to delete entity"); } // Build a new map and place the player let current_depth; { let worldmap_resource = self.ecs.fetch::<Map>(); current_depth = worldmap_resource.depth; } self.generate_world_map(current_depth + 1); // Notify the player and give them some health let player_entity = self.ecs.fetch::<Entity>(); let mut gamelog = self.ecs.fetch_mut::<gamelog::GameLog>(); gamelog.entries.push("You descend to the next level, and take a moment to heal.".to_string()); let mut player_health_store = self.ecs.write_storage::<CombatStats>(); let player_health = player_health_store.get_mut(*player_entity); if let Some(player_health) = player_health { player_health.hp = i32::max(player_health.hp, player_health.max_hp / 2); } } fn game_over_cleanup(&mut self) { // Delete everything let mut to_delete = Vec::new(); for e in self.ecs.entities().join() { to_delete.push(e); } for del in to_delete.iter() { self.ecs.delete_entity(*del).expect("Deletion failed"); } // Spawn a new player { let player_entity = spawner::player(&mut self.ecs, 0, 0); let mut player_entity_writer = self.ecs.write_resource::<Entity>(); *player_entity_writer = player_entity; } // Build a new map and place the player self.generate_world_map(1); } fn generate_world_map(&mut self, new_depth : i32) { self.mapgen_index = 0; self.mapgen_timer = 0.0; self.mapgen_history.clear(); let mut rng = self.ecs.write_resource::<rltk::RandomNumberGenerator>(); let mut builder = map_builders::random_builder(new_depth, &mut rng); builder.build_map(&mut rng); self.mapgen_history = builder.build_data.history.clone(); let player_start; { let mut worldmap_resource = self.ecs.write_resource::<Map>(); *worldmap_resource = builder.build_data.map.clone(); player_start = builder.build_data.starting_position.as_mut().unwrap().clone(); } // Spawn bad guys std::mem::drop(rng); builder.spawn_entities(&mut self.ecs); // Place the player and update resources let (player_x, player_y) = (player_start.x, player_start.y); let mut player_position = self.ecs.write_resource::<Point>(); *player_position = Point::new(player_x, player_y); let mut position_components = self.ecs.write_storage::<Position>(); let player_entity = self.ecs.fetch::<Entity>(); let player_pos_comp = position_components.get_mut(*player_entity); if let Some(player_pos_comp) = player_pos_comp { player_pos_comp.x = player_x; player_pos_comp.y = player_y; } // Mark the player's visibility as dirty let mut viewshed_components = self.ecs.write_storage::<Viewshed>(); let vs = viewshed_components.get_mut(*player_entity); if let Some(vs) = vs { vs.dirty = true; } } } fn main() { use rltk::RltkBuilder; let mut context = RltkBuilder::simple80x50() .with_title("Roguelike Tutorial") .build(); context.with_post_scanlines(true); let mut gs = State { ecs: World::new(), mapgen_next_state : Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame }), mapgen_index : 0, mapgen_history: Vec::new(), mapgen_timer: 0.0 }; gs.ecs.register::<Position>(); gs.ecs.register::<Renderable>(); gs.ecs.register::<Player>(); gs.ecs.register::<Viewshed>(); gs.ecs.register::<Monster>(); gs.ecs.register::<Name>(); gs.ecs.register::<BlocksTile>(); gs.ecs.register::<CombatStats>(); gs.ecs.register::<WantsToMelee>(); gs.ecs.register::<SufferDamage>(); gs.ecs.register::<Item>(); gs.ecs.register::<ProvidesHealing>(); gs.ecs.register::<InflictsDamage>(); gs.ecs.register::<AreaOfEffect>(); gs.ecs.register::<Consumable>(); gs.ecs.register::<Ranged>(); gs.ecs.register::<InBackpack>(); gs.ecs.register::<WantsToPickupItem>(); gs.ecs.register::<WantsToUseItem>(); gs.ecs.register::<WantsToDropItem>(); gs.ecs.register::<Confusion>(); gs.ecs.register::<SimpleMarker<SerializeMe>>(); gs.ecs.register::<SerializationHelper>(); gs.ecs.register::<Equippable>(); gs.ecs.register::<Equipped>(); gs.ecs.register::<MeleePowerBonus>(); gs.ecs.register::<DefenseBonus>(); gs.ecs.register::<WantsToRemoveItem>(); gs.ecs.register::<ParticleLifetime>(); gs.ecs.register::<HungerClock>(); gs.ecs.register::<ProvidesFood>(); gs.ecs.register::<MagicMapper>(); gs.ecs.register::<Hidden>(); gs.ecs.register::<EntryTrigger>(); gs.ecs.register::<EntityMoved>(); gs.ecs.register::<SingleActivation>(); gs.ecs.register::<BlocksVisibility>(); gs.ecs.register::<Door>(); gs.ecs.insert(SimpleMarkerAllocator::<SerializeMe>::new()); gs.ecs.insert(Map::new(1)); gs.ecs.insert(Point::new(0, 0)); gs.ecs.insert(rltk::RandomNumberGenerator::new()); let player_entity = spawner::player(&mut gs.ecs, 0, 0); gs.ecs.insert(player_entity); gs.ecs.insert(RunState::MapGeneration{} ); gs.ecs.insert(gamelog::GameLog{ entries : vec!["Welcome to Rusty Roguelike".to_string()] }); gs.ecs.insert(particle_system::ParticleBuilder::new()); gs.ecs.insert(rex_assets::RexAssets::new()); gs.generate_world_map(1); rltk::main_loop(context, gs); }
38.691511
156
0.552012
38687c7900268eaf889979a4f49c0a3669d6b03a
950
use super::{identification, producer::Control, protocol, scope::AnonymousScope, Context}; use crate::stat::Alias; use crate::test::samples; use clibri::server; use std::str::FromStr; use uuid::Uuid; type BroadcastStructA = (Vec<Uuid>, protocol::StructA); type BroadcastStructB = (Vec<Uuid>, protocol::StructB); #[allow(unused_variables)] pub async fn emit<E: server::Error, C: server::Control<E>>( event: protocol::Events::EventA, scope: &mut AnonymousScope<'_, E, C>, ) -> Result<(BroadcastStructA, BroadcastStructB), String> { let uuid = match Uuid::from_str(&event.uuid) { Ok(uuid) => uuid, Err(err) => { return Err(format!("Fail to parse uuid {}: {:?}", event.uuid, err)); } }; scope.context.inc_stat(uuid, Alias::StructA); scope.context.inc_stat(uuid, Alias::StructB); Ok(( (vec![uuid], samples::struct_a::get()), (vec![uuid], samples::struct_b::get()), )) }
32.758621
89
0.635789
ac383dfb4f4993c83f5f985e214a8f4cb3aa4258
411
pub mod activity; pub mod alert; pub mod app; pub mod completion; pub mod editor; pub mod explorer; pub mod find; pub mod hover; pub mod keymap; mod logging; pub mod palette; pub mod panel; pub mod picker; pub mod plugin; pub mod problem; pub mod scroll; pub mod search; pub mod settings; pub mod source_control; pub mod split; pub mod status; mod svg; mod tab; pub mod terminal; pub mod title; pub mod window;
15.222222
23
0.754258
ebe3fd86bd134d594f5ef6f9211f7d0361234ac0
2,138
use num_derive::{FromPrimitive, ToPrimitive}; use solana_sdk::decode_error::DecodeError; use snafu::Snafu; /// Reasons the evm execution can fail. #[derive(Debug, Clone, PartialEq, FromPrimitive, ToPrimitive, Snafu)] pub enum EvmError { #[snafu(display("Cross-Program EVM execution disabled."))] CrossExecutionNotEnabled, #[snafu(display("InvokeContext didn't provide evm executor."))] EvmExecutorNotFound, #[snafu(display("Recursive cross-program EVM execution disabled."))] RecursiveCrossExecution, #[snafu(display("Internal executor error."))] InternalExecutorError, #[snafu(display("Internal transaction error."))] InternalTransactionError, #[snafu(display("Instruction expect additional account as argument."))] MissingAccount, #[snafu(display("Instruction expect some account to be a signer."))] MissingRequiredSignature, #[snafu(display("Authorized transaction EVM address should be calculated from sender address using evm_address_for_program."))] AuthorizedTransactionIncorrectAddress, #[snafu(display("Wrong AuthorizedTx account owner.."))] AuthorizedTransactionIncorrectOwner, #[snafu(display("Cannot free ownership of an account that EVM didn't own."))] FreeNotEvmAccount, #[snafu(display("Cannot process swap, sender has no enoght tokens."))] SwapInsufficient, #[snafu(display("Internal Error: Cannot borrow some of account."))] BorrowingFailed, #[snafu(display("Failed to allocate space in storage account."))] AllocateStorageFailed, #[snafu(display("Failed to write data into storage account."))] WriteStorageFailed, #[snafu(display("Failed to deserialize data from account."))] DeserializationError, #[snafu(display("EVM Transaction was reverted."))] RevertTransaction, #[snafu(display("This instruction is not supported yet."))] InstructionNotSupportedYet, #[snafu(display("This instruction cause overflow in fee refund calculation."))] OverflowInRefund, } impl<E> DecodeError<E> for EvmError { fn type_of() -> &'static str { "EvmError" } }
30.985507
131
0.718896
5d96357610c9de4567981909e67b18fe7942884d
11,966
use rand::Rng; use rust_hdl::core::prelude::*; use rust_hdl::hls::prelude::*; use rust_hdl::widgets::prelude::*; #[derive(LogicBlock, Default)] struct ControllerTest { to_cpu: FIFOReadController<Bits<16>>, from_cpu: FIFOWriteController<Bits<16>>, to_cpu_fifo: SyncFIFO<Bits<16>, 6, 7, 1>, from_cpu_fifo: SyncFIFO<Bits<16>, 6, 7, 1>, controller: BaseController<2>, bridge: Bridge<16, 2, 2>, port: MOSIPort<16>, iport: MISOPort<16>, clock: Signal<In, Clock>, } impl Logic for ControllerTest { #[hdl_gen] fn update(&mut self) { // Connect the clocks self.to_cpu_fifo.clock.next = self.clock.val(); self.from_cpu_fifo.clock.next = self.clock.val(); // Connect the test interfaces self.from_cpu.join(&mut self.from_cpu_fifo.bus_write); self.from_cpu_fifo .bus_read .join(&mut self.controller.from_cpu); self.to_cpu.join(&mut self.to_cpu_fifo.bus_read); self.to_cpu_fifo.bus_write.join(&mut self.controller.to_cpu); self.controller.clock.next = self.clock.val(); // Connect the controller to the bridge self.controller.bus.join(&mut self.bridge.upstream); // Connect the MOSI port to node 0 of the bridge self.bridge.nodes[0].join(&mut self.port.bus); self.bridge.nodes[1].join(&mut self.iport.bus); self.port.ready.next = true; } } #[cfg(test)] fn make_controller_test() -> ControllerTest { let mut uut = ControllerTest::default(); uut.clock.connect(); uut.from_cpu.data.connect(); uut.from_cpu.write.connect(); uut.to_cpu.read.connect(); uut.iport.port_in.connect(); uut.iport.ready_in.connect(); uut.connect_all(); uut } #[test] fn test_controller_test_synthesizes() { let uut = make_controller_test(); let vlog = generate_verilog(&uut); yosys_validate("controller", &vlog).unwrap(); } #[test] fn test_ping_works() { let uut = make_controller_test(); let mut sim = Simulation::new(); sim.add_clock(5, |x: &mut Box<ControllerTest>| { x.clock.next = !x.clock.val() }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; // Send a PING command wait_clock_true!(sim, clock, x); for iter in 0..10 { wait_clock_cycles!(sim, clock, x, 5); // A ping is 0x01XX, where XX is the code returned by the controller x.from_cpu.data.next = (0x0167_u16 + iter).into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; wait_clock_cycles!(sim, clock, x, 5); // Insert a NOOP x.from_cpu.data.next = 0_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; wait_clock_cycles!(sim, clock, x, 5); } sim.done(x) }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; wait_clock_true!(sim, clock, x); for iter in 0..10 { x = sim.watch(|x| !x.to_cpu.empty.val(), x)?; sim_assert!(sim, x.to_cpu.data.val() == (0x0167_u16 + iter), x); x.to_cpu.read.next = true; wait_clock_cycle!(sim, clock, x); x.to_cpu.read.next = false; } sim.done(x) }); sim.run_traced( Box::new(uut), 5000, std::fs::File::create(vcd_path!("controller_ping.vcd")).unwrap(), ) .unwrap(); } #[test] fn test_write_command_works() { let uut = make_controller_test(); let mut sim = Simulation::new(); sim.add_clock(5, |x: &mut Box<ControllerTest>| { x.clock.next = !x.clock.val() }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; // Send a PING command wait_clock_true!(sim, clock, x); for iter in 0..10 { wait_clock_cycles!(sim, clock, x, 5); // A write command looks like 0x03XXYYYY, where XX is the address, YYYY is the count // followed by count data elements. // Write the command x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0x0300_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Then the count x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = (iter + 1).into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Then the data elements for ndx in 0..(iter + 1) { x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = (0x7870_u16 + ndx).into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; } // Insert a NOOPd x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; wait_clock_cycles!(sim, clock, x, 5); } sim.done(x) }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; wait_clock_true!(sim, clock, x); for iter in 0..10 { for ndx in 0..(iter + 1) { x = sim.watch(|x| x.port.strobe_out.val(), x)?; sim_assert!(sim, x.port.port_out.val() == (0x7870_u32 + ndx), x); wait_clock_cycle!(sim, clock, x); } } sim.done(x) }); sim.run_traced( Box::new(uut), 5000, std::fs::File::create(vcd_path!("controller_write.vcd")).unwrap(), ) .unwrap(); } #[test] fn test_read_command_works() { let uut = make_controller_test(); let mut sim = Simulation::new(); sim.add_clock(5, |x: &mut Box<ControllerTest>| { x.clock.next = !x.clock.val() }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; // Send a PING command wait_clock_true!(sim, clock, x); for iter in 0..10 { wait_clock_cycles!(sim, clock, x, 5); // A read command looks like 0x02XXYYYY, where XX is the address, YYYY is the count // Write the command x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0x0201_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Then the count x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = (iter + 1).into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Then wait for the data elements to come back to the CPU for ndx in 0..(iter + 1) { x = sim.watch(|x| !x.to_cpu.empty.val(), x)?; sim_assert!(sim, x.to_cpu.data.val() == 0xBEE0_u16 + ndx, x); x.to_cpu.read.next = true; wait_clock_cycle!(sim, clock, x); x.to_cpu.read.next = false; } // Wait 1 clock cycle, and then issue a POLL command wait_clock_cycle!(sim, clock, x); x.from_cpu.data.next = 0x0401_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Read the result of the poll back x = sim.watch(|x| !x.to_cpu.empty.val(), x)?; // Port should always be ready sim_assert!(sim, x.to_cpu.data.val() == 0xFF01_u16, x); x.to_cpu.read.next = true; wait_clock_cycle!(sim, clock, x); x.to_cpu.read.next = false; wait_clock_cycles!(sim, clock, x, 5); } sim.done(x) }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; wait_clock_true!(sim, clock, x); for iter in 0..10 { wait_clock_cycles!(sim, clock, x, 10); for ndx in 0..(iter + 1) { x.iport.port_in.next = (0xBEE0_u16 + ndx).into(); x.iport.ready_in.next = true; x = sim.watch(|x| x.iport.strobe_out.val(), x)?; wait_clock_cycle!(sim, clock, x); } } sim.done(x) }); sim.run_traced( Box::new(uut), 20000, std::fs::File::create(vcd_path!("controller_read.vcd")).unwrap(), ) .unwrap(); } #[test] fn test_stream_command_works() { let uut = make_controller_test(); let mut sim = Simulation::new(); sim.add_clock(5, |x: &mut Box<ControllerTest>| { x.clock.next = !x.clock.val() }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; // Send a PING command wait_clock_true!(sim, clock, x); wait_clock_cycles!(sim, clock, x, 5); // A stream command looks like 0x05XX, where XX is the address to stream from // Write the command x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0x0501_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Wait until we have collected 100 items for iter in 0..100 { x = sim.watch(|x| !x.to_cpu.empty.val(), x)?; sim_assert!(sim, x.to_cpu.data.val() == 0xBAB0_u16 + iter, x); x.to_cpu.read.next = true; wait_clock_cycle!(sim, clock, x); x.to_cpu.read.next = false; } // Send a stop command (anything non-zero) x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0x0501_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // There may be extra data that comes, so discard data until the // CPU fifo is empty... while !x.to_cpu.empty.val() { x.to_cpu.read.next = true; wait_clock_cycle!(sim, clock, x); x.to_cpu.read.next = false; } // Send a ping x = sim.watch(|x| !x.from_cpu.full.val(), x)?; x.from_cpu.data.next = 0x01FF_u16.into(); x.from_cpu.write.next = true; wait_clock_cycle!(sim, clock, x); x.from_cpu.write.next = false; // Wait for it to return x = sim.watch(|x| !x.to_cpu.empty.val(), x)?; sim_assert!(sim, x.to_cpu.data.val() == 0x01FF_u16, x); wait_clock_cycles!(sim, clock, x, 10); sim.done(x) }); sim.add_testbench(move |mut sim: Sim<ControllerTest>| { let mut x = sim.init()?; wait_clock_true!(sim, clock, x); for ndx in 0..100 { x.iport.port_in.next = (0xBAB0_u16 + ndx).into(); x.iport.ready_in.next = true; x = sim.watch(|x| x.iport.strobe_out.val(), x)?; wait_clock_cycle!(sim, clock, x); x.iport.ready_in.next = false; if rand::thread_rng().gen::<f64>() < 0.3 { for _ in 0..(rand::thread_rng().gen::<u8>() % 40) { wait_clock_cycle!(sim, clock, x); } } } sim.done(x) }); sim.run_traced( Box::new(uut), 50000, std::fs::File::create(vcd_path!("controller_stream.vcd")).unwrap(), ) .unwrap(); }
36.932099
96
0.544209
1a397e3540733e6be43879128d695f2775e83620
3,198
// // Copyright 2021 The Project Oak Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! gRPC server for Oak Functions. use crate::{ attestation::AttestationServer, logger::Logger, lookup::LookupData, proto::streaming_session_server::StreamingSessionServer, server::{apply_policy, BoxedExtensionFactory, WasmHandler}, }; use anyhow::Context; use log::Level; use oak_functions_abi::proto::{ConfigurationInfo, Request, ServerPolicy}; use prost::Message; use std::{future::Future, net::SocketAddr, sync::Arc}; async fn handle_request( wasm_handler: WasmHandler, policy: ServerPolicy, request: Request, ) -> anyhow::Result<Vec<u8>> { let function = async move || wasm_handler.clone().handle_invoke(request).await; let policy = policy.clone(); let response = apply_policy(policy, function) .await .context("internal error")?; let mut bytes = vec![]; response .encode(&mut bytes) .context("couldn't encode response")?; Ok(bytes) } /// Creates a [`WasmHandler`] with the given Wasm module, lookup data, metrics aggregator, and /// extensions. pub fn create_wasm_handler( wasm_module_bytes: &[u8], lookup_data: Arc<LookupData>, extensions: Vec<BoxedExtensionFactory>, logger: Logger, ) -> anyhow::Result<WasmHandler> { let wasm_handler = WasmHandler::create(wasm_module_bytes, lookup_data, extensions, logger)?; Ok(wasm_handler) } /// Starts a gRPC server on the given address, serving the `main` function from the given /// [`WasmHandler`]. #[allow(clippy::too_many_arguments)] pub async fn create_and_start_grpc_server<F: Future<Output = ()>>( address: &SocketAddr, wasm_handler: WasmHandler, tee_certificate: Vec<u8>, policy: ServerPolicy, config_info: ConfigurationInfo, terminate: F, logger: Logger, ) -> anyhow::Result<()> { logger.log_public( Level::Info, &format!( "{:?}: Starting gRPC server on {:?}", std::thread::current().id(), &address ), ); let request_handler = async move |request| handle_request(wasm_handler, policy.clone(), request).await; // A `Service` is needed for every connection. Here we create a service using the // `wasm_handler`. tonic::transport::Server::builder() .add_service(StreamingSessionServer::new( AttestationServer::create(tee_certificate, request_handler, config_info, logger) .context("Couldn't create remote attestation server")?, )) .serve_with_shutdown(*address, terminate) .await .context("Couldn't start server")?; Ok(()) }
32.30303
96
0.678549
fed4829e76a2c6f014b06fb8078419d1d527e89b
2,535
use rand::prelude::*; use crate::math::Point; /// Perlin noise generator. pub struct Perlin { rand_f: [f64; Self::POINT_COUNT], perm_x: [usize; Self::POINT_COUNT], perm_y: [usize; Self::POINT_COUNT], perm_z: [usize; Self::POINT_COUNT], } impl Perlin { const POINT_COUNT: usize = 256; pub fn new() -> Self { let rng = &mut rand::thread_rng(); let rand_f = { let mut rand_f = [0.0; Self::POINT_COUNT]; for f in rand_f.iter_mut() { *f = rng.gen(); } rand_f }; let perm_x = Self::gen_perm(rng); let perm_y = Self::gen_perm(rng); let perm_z = Self::gen_perm(rng); Self { rand_f, perm_x, perm_y, perm_z, } } pub fn noise(&self, p: &Point) -> f64 { let u = p.x - p.x.floor(); let v = p.y - p.y.floor(); let w = p.z - p.z.floor(); let i = p.x.floor() as usize; let j = p.y.floor() as usize; let k = p.z.floor() as usize; let mut c = [[[0.0; 2]; 2]; 2]; #[allow(clippy::needless_range_loop)] for di in 0..2 { for dj in 0..2 { for dk in 0..2 { c[di][dj][dk] = self.rand_f[ self.perm_x[(i + di) & 255] ^ self.perm_y[(j + dj) & 255] ^ self.perm_z[(k + dk) & 255] ]; } } } Self::interpolate(c, u, v, w) } fn gen_perm(rng: &mut ThreadRng) -> [usize; Self::POINT_COUNT] { let mut p = [0; Self::POINT_COUNT]; for (i, n) in p.iter_mut().enumerate() { *n = i; } for i in (1..p.len()).rev() { let target = rng.gen_range(0..i); p.swap(i, target); } p } fn interpolate(c: [[[f64; 2]; 2]; 2], u: f64, v: f64, w: f64) -> f64 { let mut acc = 0.0; #[allow(clippy::needless_range_loop)] for i in 0..2 { for j in 0..2 { for k in 0..2 { acc += (i as f64).mul_add(u, (1.0 - i as f64) * (1.0 - u)) * (j as f64).mul_add(v, (1.0 - j as f64) * (1.0 - v)) * (k as f64).mul_add(w, (1.0 - k as f64) * (1.0 - w)) * c[i][j][k]; } } } acc } } impl Default for Perlin { fn default() -> Self { Self::new() } }
24.852941
89
0.413018
8f156bd8d4b4fef589ea307a817946182b6468f9
2,101
#[doc = "Register `FsinEXTCFG` reader"] pub struct R(crate::R<FSINEXTCFG_SPEC>); impl core::ops::Deref for R { type Target = crate::R<FSINEXTCFG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<FSINEXTCFG_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<FSINEXTCFG_SPEC>) -> Self { R(reader) } } #[doc = "Register `FsinEXTCFG` writer"] pub struct W(crate::W<FSINEXTCFG_SPEC>); impl core::ops::Deref for W { type Target = crate::W<FSINEXTCFG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<FSINEXTCFG_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<FSINEXTCFG_SPEC>) -> Self { W(writer) } } impl W { #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "ASRC Input Sample Pulse Extend Configuration Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [fsin_extcfg](index.html) module"] pub struct FSINEXTCFG_SPEC; impl crate::RegisterSpec for FSINEXTCFG_SPEC { type Ux = u32; } #[doc = "`read()` method returns [fsin_extcfg::R](R) reader structure"] impl crate::Readable for FSINEXTCFG_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [fsin_extcfg::W](W) writer structure"] impl crate::Writable for FSINEXTCFG_SPEC { type Writer = W; } #[doc = "`reset()` method sets FsinEXTCFG to value 0"] impl crate::Resettable for FSINEXTCFG_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
32.323077
445
0.63446
f7e3fb1ab02362068baa0c69d530e87a99ecaff4
4,487
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::IFLAG2 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `BUFHI`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BUFHIR { #[doc = "The corresponding buffer has no occurrence of successfully completed transmission or reception."] _0, #[doc = "The corresponding buffer has successfully completed transmission or reception."] _1, #[doc = r" Reserved"] _Reserved(u32), } impl BUFHIR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { match *self { BUFHIR::_0 => 0, BUFHIR::_1 => 1, BUFHIR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u32) -> BUFHIR { match value { 0 => BUFHIR::_0, 1 => BUFHIR::_1, i => BUFHIR::_Reserved(i), } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == BUFHIR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == BUFHIR::_1 } } #[doc = "Values that can be written to the field `BUFHI`"] pub enum BUFHIW { #[doc = "The corresponding buffer has no occurrence of successfully completed transmission or reception."] _0, #[doc = "The corresponding buffer has successfully completed transmission or reception."] _1, } impl BUFHIW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u32 { match *self { BUFHIW::_0 => 0, BUFHIW::_1 => 1, } } } #[doc = r" Proxy"] pub struct _BUFHIW<'a> { w: &'a mut W, } impl<'a> _BUFHIW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: BUFHIW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "The corresponding buffer has no occurrence of successfully completed transmission or reception."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(BUFHIW::_0) } #[doc = "The corresponding buffer has successfully completed transmission or reception."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(BUFHIW::_1) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - Buffer MBi Interrupt"] #[inline] pub fn bufhi(&self) -> BUFHIR { BUFHIR::_from({ const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - Buffer MBi Interrupt"] #[inline] pub fn bufhi(&mut self) -> _BUFHIW { _BUFHIW { w: self } } }
26.708333
110
0.530198
896cd301b9b6f5955f0e1e4e5e3e280995d8c0f1
12,471
//! A typed high-level pipeline interface. use std::borrow::Borrow; use std::mem; use std::marker::PhantomData; use {hal, format, handle}; use hal::image::Layout; use hal::pass::{AttachmentOps, AttachmentLoadOp, AttachmentStoreOp}; use {Backend, Device, Primitive, Supports, Transfer, Graphics, Encoder}; pub use hal::pso::{ ColorValue, DepthValue, StencilValue, Rect, Viewport, DescriptorBinding, DescriptorArrayIndex, Rasterizer, CreationError, InstanceRate, }; #[derive(Debug)] pub struct RawDescriptorSet<B: Backend> { pub(crate) resource: B::DescriptorSet, pub(crate) pool: handle::raw::DescriptorPool<B>, } impl<B: Backend> RawDescriptorSet<B> { pub fn resource(&self) -> &B::DescriptorSet { &self.resource } } pub trait Descriptors<B: Backend>: Sized { type Data: Sized; fn from_raw(handle::raw::DescriptorSetLayout<B>, RawDescriptorSet<B>) -> (Self, Self::Data); fn layout_bindings() -> Vec<hal::pso::DescriptorSetLayoutBinding>; fn layout(&self) -> &B::DescriptorSetLayout; fn set(&self) -> &B::DescriptorSet; } pub trait BindDesc { const TYPE: hal::pso::DescriptorType; const COUNT: usize; } pub trait Bind<B: Backend>: BindDesc { type Handle: 'static + Clone; fn write<'a, I>(views: I) -> Vec<hal::pso::Descriptor<'a, B>> where I: IntoIterator, I::Item: Borrow<&'a Self::Handle>; fn require<'a>( &'a Self::Handle, &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>, &mut Vec<(&'a handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, &mut handle::Bag<B>, ); } macro_rules! define_descriptors { ([$( $array_len:expr ),*] $( $name:ident, )*) => { $( impl<T: BindDesc> BindDesc for [T; $array_len] { const TYPE: hal::pso::DescriptorType = T::TYPE; const COUNT: usize = $array_len * T::COUNT; } impl<B, T> Bind<B> for [T; $array_len] where B: Backend, T: Bind<B> { type Handle = T::Handle; fn write<'a, I>(handles: I) -> Vec<hal::pso::Descriptor<'a, B>> where I: IntoIterator, I::Item: Borrow<&'a Self::Handle> { T::write(handles) } fn require<'a>( handle: &'a Self::Handle, buffers: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>, images: &mut Vec<(&'a handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, others: &mut handle::Bag<B> ) { T::require(handle, buffers, images, others) } } )* $( pub struct $name; impl BindDesc for $name { const TYPE: hal::pso::DescriptorType = hal::pso::DescriptorType::$name; const COUNT: usize = 1; } )* } } // TODO: type-safe formats define_descriptors! { [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ] SampledImage, Sampler, } impl<B: Backend> Bind<B> for SampledImage { type Handle = handle::raw::ImageView<B>; fn write<'a, I>(_views: I) -> Vec<hal::pso::Descriptor<'a, B>> where I: IntoIterator, I::Item: Borrow<&'a Self::Handle>, { Vec::new() /* views .into_iter() .map(|view| { let layout = Layout::ShaderReadOnlyOptimal; (view.borrow().resource(), layout) }).collect())*/ } fn require<'a>( view: &'a Self::Handle, _: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>, images: &mut Vec<(&'a handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, _: &mut handle::Bag<B>, ) { let img = view.info(); let levels = img.info().mip_levels; let layers = img.info().kind.num_layers(); let state = (hal::image::Access::SHADER_READ, Layout::ShaderReadOnlyOptimal); for level in 0..levels { for layer in 0..layers { let subresource = hal::image::Subresource { aspects: img.info().aspects, level, layer }; images.push((img, subresource, state)); } } } } impl<B: Backend> Bind<B> for Sampler { type Handle = handle::raw::Sampler<B>; fn write<'a, I>(_samplers: I) -> Vec<hal::pso::Descriptor<'a, B>> where I: IntoIterator, I::Item: Borrow<&'a Self::Handle>, { Vec::new() /* samplers .into_iter() .map(|sampler| sampler.borrow().resource()) .collect())*/ } fn require<'a>( sampler: &'a Self::Handle, _: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>, _: &mut Vec<(&'a handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, others: &mut handle::Bag<B>, ) { others.add(sampler.clone()); } } pub struct DescriptorSetBindRef<'a, 'b, B: Backend, T: Bind<B>> { pub set: &'a B::DescriptorSet, pub binding: DescriptorBinding, pub handles: &'b mut [Option<T::Handle>], } pub struct DescriptorSetsUpdate<'a, B: Backend> { device: &'a mut Device<B>, writes: Vec<hal::pso::DescriptorSetWrite<'a, B, Vec<hal::pso::Descriptor<'a, B>>>>, } impl<'a, B: Backend> DescriptorSetsUpdate<'a, B> { pub(crate) fn new(device: &'a mut Device<B>) -> Self { DescriptorSetsUpdate { device, writes: Vec::new() } } pub fn write<'b, T: Bind<B>, I>( mut self, bind_ref: DescriptorSetBindRef<'a, 'b, B, T>, array_offset: usize, handles: I, ) -> Self where I: IntoIterator, I::Item: Borrow<&'a T::Handle>, { let handles: Vec<_> = handles.into_iter().map(|handle| *handle.borrow()).collect(); for (slot, &handle) in bind_ref.handles[array_offset..].iter_mut().zip(handles.iter()) { *slot = Some(handle.clone()); } self.writes.push(hal::pso::DescriptorSetWrite { set: bind_ref.set, binding: bind_ref.binding, array_offset: 0, descriptors: T::write(handles), }); self } pub fn finish(self) { use hal::Device; self.device.raw.write_descriptor_sets(self.writes); } } pub trait GraphicsPipelineInit<B: Backend> { type Pipeline; fn create<'a>( self, &mut Device<B>, hal::pso::GraphicsShaderSet<'a, B>, Primitive, Rasterizer ) -> Result<Self::Pipeline, CreationError>; } pub trait GraphicsPipelineMeta<B: Backend> { fn layout(&self) -> &B::PipelineLayout; fn render_pass(&self) -> &B::RenderPass; } pub trait GraphicsPipelineData<B: Backend> { type Pipeline; fn begin_renderpass<'a, 'b, C>( self, encoder: &'a mut Encoder<'b, B, C>, pipeline: &'a Self::Pipeline ) -> hal::command::RenderPassInlineEncoder<'a, B, hal::command::Primary> where Self: 'a, 'b: 'a, C: Supports<Transfer> + Supports<Graphics>; } pub trait Component<'a, B: Backend> { type Init: 'a; type Data: 'a; fn descriptor_layout<'b>(&'b Self::Init) -> Option<&'b B::DescriptorSetLayout> where 'a: 'b { None } fn attachment(&Self::Init) -> Option<Attachment> { None } fn append_desc( Self::Init, &mut hal::pso::GraphicsPipelineDesc<B>, ) {} fn require<'b>( &'b Self::Data, &mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>, &mut Vec<(&'b handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, &mut handle::Bag<B>, ) where 'a: 'b {} fn vertex_buffer<'b>(&'b Self::Data) -> Option<(&'b B::Buffer, hal::buffer::Offset)> where 'a: 'b { None } fn descriptor_set<'b>(&'b Self::Data) -> Option<&'b B::DescriptorSet> where 'a: 'b { None } } pub struct Attachment { pub format: format::Format, pub ops: AttachmentOps, pub stencil_ops: AttachmentOps, pub required_layout: Layout, } pub struct RenderTarget<F: format::AsFormat>(PhantomData<F>); impl<'a, B, F> Component<'a, B> for RenderTarget<F> where B: Backend, F: 'a + format::AsFormat, { type Init = hal::pso::ColorBlendDesc; type Data = &'a handle::ImageView<B, F>; fn attachment(_: &Self::Init) -> Option<Attachment> { Some(Attachment { format: F::SELF, // TODO: AttachmentLoadOp::Clear ops: AttachmentOps::new(AttachmentLoadOp::Load, AttachmentStoreOp::Store), stencil_ops: AttachmentOps::DONT_CARE, required_layout: Layout::ColorAttachmentOptimal, }) } fn append_desc( init: Self::Init, pipeline_desc: &mut hal::pso::GraphicsPipelineDesc<B>, ) { pipeline_desc.blender.targets.push(init); } fn require<'b>( data: &'b Self::Data, _: &mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>, images: &mut Vec<(&'b handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, _: &mut handle::Bag<B>, ) where 'a: 'b { let img = data.as_ref().info(); let levels = img.info().mip_levels; let layers = img.info().kind.num_layers(); // TODO: READ not always necessary let state = (hal::image::Access::COLOR_ATTACHMENT_READ | hal::image::Access::COLOR_ATTACHMENT_WRITE, Layout::ColorAttachmentOptimal); for level in 0..levels { for layer in 0..layers { let subresource = hal::image::Subresource { aspects: img.info().aspects, level, layer }; images.push((img, subresource, state)); } } } } pub trait Structure: Sized { fn elements() -> Vec<hal::pso::Element<format::Format>>; } /// Helper trait to support variable instance rate. pub trait ToInstanceRate { /// The associated init type for PSO component. type Init; /// Get an actual instance rate value from the init. fn get_rate(init: &Self::Init) -> InstanceRate; } /// Helper phantom type for per-vertex attributes. pub enum NonInstanced {} /// Helper phantom type for per-instance attributes. pub enum Instanced {} impl ToInstanceRate for InstanceRate { type Init = InstanceRate; fn get_rate(init: &Self::Init) -> InstanceRate { *init } } impl ToInstanceRate for Instanced { type Init = (); fn get_rate(_: &Self::Init) -> InstanceRate { 1 } } impl ToInstanceRate for NonInstanced { type Init = (); fn get_rate(_: &Self::Init) -> InstanceRate { 0 } } pub struct VertexBuffer<T: Structure, I=NonInstanced>(PhantomData<(T, I)>); impl<'a, B, T, I> Component<'a, B> for VertexBuffer<T, I> where B: Backend, T: 'a + Structure, I: ToInstanceRate, I::Init: 'a { type Init = I::Init; type Data = &'a handle::Buffer<B, T>; fn append_desc( init: Self::Init, pipeline_desc: &mut hal::pso::GraphicsPipelineDesc<B>, ) { let binding = pipeline_desc.vertex_buffers.len() as u32; pipeline_desc.vertex_buffers.push(hal::pso::VertexBufferDesc { stride: mem::size_of::<T>() as u32, rate: I::get_rate(&init), }); let mut location = 0; for element in T::elements() { pipeline_desc.attributes.push(hal::pso::AttributeDesc { location, binding, element, }); location += 1; } } fn require<'b>( data: &'b Self::Data, buffers: &mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>, _: &mut Vec<(&'b handle::raw::Image<B>, hal::image::Subresource, hal::image::State)>, _: &mut handle::Bag<B>, ) where 'a: 'b { buffers.push((data.as_ref(), hal::buffer::Access::VERTEX_BUFFER_READ)); } fn vertex_buffer<'b>(data: &'b Self::Data) -> Option<(&'b B::Buffer, hal::buffer::Offset)> where 'a: 'b { // TODO: offset Some((data.as_ref().resource(), 0)) } } pub type InstanceBuffer<T> = VertexBuffer<T, Instanced>;
29.622328
110
0.553043
8a3bdcbd3d5ed4df4d8c4bf1b0557a09569d2c90
2,421
use libc; pub type __uint32_t = libc::c_uint; pub type uint32_t = __uint32_t; pub type size_t = libc::c_ulong; // https://www.geeksforgeeks.org/move-zeroes-end-array/ pub unsafe fn push_zeroes_to_end(mut arr: *mut uint32_t, mut n: size_t) -> size_t { let mut count: size_t = 0i32 as size_t; let mut i: size_t = 0i32 as size_t; while i < n { if *arr.offset(i as isize) != 0i32 as libc::c_uint { let fresh0 = count; count = count.wrapping_add(1); *arr.offset(fresh0 as isize) = *arr.offset(i as isize) } i = i.wrapping_add(1) } let mut ret: size_t = count; while count < n { let fresh1 = count; count = count.wrapping_add(1); *arr.offset(fresh1 as isize) = 0i32 as uint32_t } return ret; } pub unsafe fn set_add(mut values: *mut uint32_t, mut len: *mut size_t, mut cap: size_t, mut target: uint32_t) -> bool { if *len == cap { return 0i32 != 0 } let mut i: uint32_t = 0i32 as uint32_t; while (i as libc::c_ulong) < *len { if *values.offset(i as isize) == target { return 0i32 != 0 } i = i.wrapping_add(1) } let fresh2 = *len; *len = (*len).wrapping_add(1); *values.offset(fresh2 as isize) = target; return 0i32 != 0; } /* * * Add `target` to `values` if it doesn't exist * "set"s should only be modified with set_* functions * Values MUST be greater than 0 */ /* * * Remove `target` from `values` if it exists * "set"s should only be modified with set_* functions * Values MUST be greater than 0 */ pub unsafe fn set_remove(mut values: *mut uint32_t, mut len: *mut size_t, mut cap: size_t, mut target: uint32_t) -> bool { let mut i: uint32_t = 0i32 as uint32_t; while (i as libc::c_ulong) < *len { if *values.offset(i as isize) == target { // Set to 0 and swap with the end element so that // zeroes exist only after all the values. *len = (*len).wrapping_sub(1); let mut last_elem_pos: size_t = *len; *values.offset(i as isize) = *values.offset(last_elem_pos as isize); *values.offset(last_elem_pos as isize) = 0i32 as uint32_t; return 1i32 != 0 } i = i.wrapping_add(1) } return 0i32 != 0; }
35.086957
70
0.570838
395f56e110f052ff76b6383796269220e5f911b2
2,635
use crate::flows; use crate::flows::core::Variant; use crate::structures::config::Command::{Alfred, Best, Fn, Preview, Query, Repo, Search, Widget}; use crate::structures::config::{AlfredCommand, Config, RepoCommand}; use anyhow::Context; use anyhow::Error; pub fn handle_config(config: Config) -> Result<(), Error> { match config.cmd.as_ref() { None => flows::core::main(Variant::Core, config, true), Some(c) => match c { Preview { line } => flows::preview::main(&line[..]), Query { query } => { let query_clone = query.clone(); flows::query::main(query.clone(), config) .with_context(|| format!("Failed to filter cheatsheets for {}", query_clone)) } Best { query, args } => { let query_clone = query.clone(); flows::best::main(query.clone(), args.to_vec(), config).with_context(|| { format!("Failed to execute snippet similar to {}", query_clone) }) } Search { query } => flows::search::main(query.clone(), config) .context("Failed to search for online cheatsheets"), Widget { shell } => { flows::shell::main(&shell).context("Failed to print shell widget code") } Fn { func, args } => flows::func::main(func.clone(), args.to_vec()) .with_context(|| format!("Failed to execute function `{}`", func)), Repo { cmd } => match cmd { RepoCommand::Add { uri } => flows::repo::add(uri.clone(), &config.finder) .with_context(|| format!("Failed to import cheatsheets from `{}`", uri)), RepoCommand::Browse => flows::repo::browse(&config.finder) .context("Failed to browse featured cheatsheets"), }, Alfred { cmd } => { match cmd { AlfredCommand::Start => flows::alfred::main(config) .context("Failed to call Alfred starting function"), AlfredCommand::Suggestions => flows::alfred::suggestions(config, false) .context("Failed to call Alfred suggestion function"), AlfredCommand::Check => flows::alfred::suggestions(config, true) .context("Failed to call Alfred check function"), AlfredCommand::Transform => flows::alfred::transform() .context("Failed to call Alfred transform function"), } } }, } }
43.916667
97
0.528273
ed4e36cc3dd031a3da7a2f910c976cde6b8042e5
2,821
use std::os::raw::{c_float, c_int}; use skia_safe::{AlphaType, ColorType, ImageInfo, IPoint, ISize, IVector, Rect}; use crate::common::context::Context; use crate::common::context::pixel_manipulation::image_data::ImageData; pub mod image_data; impl Context { pub fn create_image_data(width: c_int, height: c_int) -> ImageData { ImageData::new(width, height) } pub fn get_image_data( &mut self, sx: c_float, sy: c_float, sw: c_float, sh: c_float, ) -> ImageData { let info = ImageInfo::new( ISize::new(sw as i32, sh as i32), ColorType::RGBA8888, AlphaType::Unpremul, None, ); let row_bytes = info.width() * 4; let mut slice = vec![255u8; (row_bytes * info.height()) as usize]; let _ = self.surface.canvas().read_pixels( &info, slice.as_mut_slice(), row_bytes as usize, IPoint::new(sx as i32, sy as i32), ); let mut image_data = ImageData::new(info.width(), info.height()); image_data.set_data(slice); image_data } pub fn put_image_data( &mut self, data: &ImageData, dx: c_float, dy: c_float, sx: c_float, sy: c_float, sw: c_float, sh: c_float, ) { let mut dx = dx; let mut dy = dy; let mut sx = sx; let mut sy = sy; let mut sw = sw; let mut sh = sh; let srect: Rect = Rect::from_xywh(sx, sy, sw, sh); let info: ImageInfo; let row_bytes: usize; if srect.is_empty() { info = ImageInfo::new( ISize::new(data.width(), data.height()), ColorType::RGBA8888, AlphaType::Unpremul, None, ); row_bytes = (data.width() * 4) as usize; } else { if sw < 0.0 { sx += sw; sw = -sw; } if sy < 0.0 { sy += sh; sh = -sh; } if sx + sw > data.width() as f32 { sw = data.width() as f32 - sx; } if sy + sh > data.height() as f32 { sh = data.height() as f32 - sy; } dx += sx; dy += sy; info = ImageInfo::new( ISize::new(sw as i32, sh as i32), ColorType::RGBA8888, AlphaType::Unpremul, None, ); row_bytes = (sw * 4.0) as usize; } let _ = self.surface.canvas().write_pixels( &info, &data.data(), row_bytes, IVector::new(dx as i32, dy as i32), ); } }
26.613208
79
0.458703
26ce8bd7b74c4efa7dc34da105763405419eb463
1,678
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use crate::protocol_runner::init::context::{ ProtocolRunnerInitContextErrorAction, ProtocolRunnerInitContextState, }; use crate::protocol_runner::init::ProtocolRunnerInitState; use crate::protocol_runner::ProtocolRunnerState; use crate::service::ProtocolRunnerService; use crate::{Action, ActionWithMeta, Service, Store}; use super::ProtocolRunnerInitContextPendingAction; pub fn protocol_runner_init_context_effects<S>(store: &mut Store<S>, action: &ActionWithMeta) where S: Service, { if let Action::ProtocolRunnerInitContext(_) = &action.action { let state = store.state.get(); let apply_genesis = match &state.protocol_runner { ProtocolRunnerState::Init(ProtocolRunnerInitState::Context( ProtocolRunnerInitContextState::Init { apply_genesis }, )) => *apply_genesis, _ => return, }; let res = store.service.protocol_runner().init_context( state.config.protocol_runner.storage.clone(), &state.config.protocol_runner.environment, apply_genesis, state.config.protocol_runner.enable_testchain, false, state.config.init_storage_data.patch_context.clone(), state.config.init_storage_data.context_stats_db_path.clone(), ); match res { Ok(token) => store.dispatch(ProtocolRunnerInitContextPendingAction { token }), Err(error) => { store.dispatch(ProtocolRunnerInitContextErrorAction { token: None, error }) } }; } }
39.023256
93
0.678188
71ad86def4f874b069ec8a48dba8f4456491dd50
1,710
#![forbid(unsafe_code)] // #![forbid(rust_2018_idioms)] #![allow(dead_code)] use std::ops::Range; /// A span is a range into a set of bytes - see it as a selection into a Git config file. /// /// Similar to [`std::ops::RangeInclusive`], but tailor made to work for us. /// There are various issues with std ranges, which we don't have to opt into for the simple Range-like item we need. #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)] struct Span { pub start: usize, pub end_inclusive: usize, } impl From<Span> for Range<usize> { fn from(Span { start, end_inclusive }: Span) -> Self { Range { start, end: end_inclusive + 1, } } } impl Span { fn to_range(&self) -> Range<usize> { self.clone().into() } } pub mod file; pub use file::File; /// A module with specialized value types as they exist within git config files. pub mod value; /// Spanned items refer to their content using [`Span`]s, thus they act like a pointer into a byte buffer representing the config file. /// /// These are inherently read-only, as they do not own any data but rather point to a buffer they don't even know. mod spanned; /// Owned versions of what can be found in `spanned`, which allows these items to be altered. /// /// All of these will *may* remember their originating `span` as `Some(…)`, which is the entire region in the config file they point to. This is important /// in case of updates. New owned items thus don't have a `span`, represented by `None`. mod owned; /// Borrowed items are nothing more than a fancy 'handle' to an item stored in a file, which can be made editable to make updates. pub mod borrowed; mod decode;
32.264151
154
0.681871
acbf73bf62a244f544190caa265074fe96f4381d
28,443
pub mod color_format; pub mod error; mod iter; pub mod options; mod utils; use crate::geometry::{ color::Color, mesh::{Face, Mesh, Vertex}, position::Position, }; use self::{ error::{Error, Kind}, iter::OffLines, options::Options, utils::{ConvertVec, StrParts}, }; pub type Result<T = ()> = std::result::Result<T, Error>; /// Parses a [`crate::geometry::mesh::Mesh`] from a `off` string. #[derive(Debug, Clone)] pub struct Parser<'a> { lines: OffLines<'a>, prev_line_index: usize, vertex_count: usize, face_count: usize, edge_count: usize, document: Mesh, options: Options, } impl<'a> Parser<'a> { /// Creates a new [`Parser`] from a `off` string. pub fn new<S: AsRef<str>>(s: &'a S, options: Options) -> Self { let lines = OffLines::new(s.as_ref()); Parser { lines, prev_line_index: 0, vertex_count: 0, face_count: 0, edge_count: 0, document: Mesh::new(), options, } } /// Parses the `off` string and returns a [`Result`] containing the [`crate::geometry::mesh::Mesh`] or an [`Error`]. /// /// # Errors /// /// Will return `Error` if an error occurs while parsing the `off` data. pub fn parse(mut self) -> crate::Result { self.parse_header()?; self.parse_counts()?; self.parse_vertices()?; self.parse_faces()?; Ok(self.finalize()) } /// Progress to the next line. fn next_line(&mut self) -> Option<(usize, &'a str)> { let (line_index, line) = self.lines.next()?; self.prev_line_index = line_index; Some((line_index, line)) } /// Parses the header of the `off` string. fn parse_header(&mut self) -> Result { let (line_index, line) = self .next_line() .ok_or_else(|| Error::without_message(Kind::Empty, 0))?; if line != "OFF" { return Err(Error::with_message( Kind::InvalidHeader, line_index, "First non-comment line should be `OFF`", )); } Ok(()) } /// Parses the counts of the `off` string. fn parse_counts(&mut self) -> Result { let (line_index, line) = self.next_line().ok_or_else(|| { Error::with_message(Kind::Missing, self.prev_line_index + 1, "No counts present") })?; let counts: Vec<&str> = line.split_line(); let num: Vec<usize> = counts.parse_string_to().map_err(|err| { Error::with_message( Kind::InvalidCounts, line_index, format!("Failed to parse count as number ({})", err), ) })?; match num[..] { [vertex_count, face_count, edge_count] => { self.vertex_count = vertex_count; self.face_count = face_count; self.edge_count = edge_count; } [vertex_count, face_count] => { self.vertex_count = vertex_count; self.face_count = face_count; } _ => { return Err(Error::with_message( Kind::InvalidCounts, line_index, format!( "Invalid amount of counts present (expected: 2-3, actual: {})", num.len() ), )); } } // Check for limits if self.vertex_count > self.options.limits.vertex_count { return Err(Error::with_message( Kind::LimitExceeded, line_index, format!( "Vertext count exceeds limit (limit: {}, actual: {})", self.options.limits.vertex_count, self.vertex_count ), )); } if self.face_count > self.options.limits.face_count { return Err(Error::with_message( Kind::LimitExceeded, line_index, format!( "Face count exceeds limit (limit: {}, actual: {})", self.options.limits.face_count, self.face_count ), )); } Ok(()) } /// Parses the vertices of the `off` string. fn parse_vertices(&mut self) -> Result { for _ in 0..self.vertex_count { let (line_index, line) = self.next_line().ok_or_else(|| { Error::with_message( Kind::Missing, self.prev_line_index + 1, "Expected vertex definition", ) })?; let parts = line.split_line(); let vertex = self.parse_vertex(line_index, &parts)?; self.document.vertices.push(vertex); } Ok(()) } /// Parses a vertex from a `off` string. fn parse_vertex(&mut self, line_index: usize, parts: &[&str]) -> Result<Vertex> { if parts.len() < 3 { return Err(Error::with_message( Kind::InvalidVertexPosition, line_index, format!( "Not enough parts for position (expected: >= 3, actual: {})", parts.len() ), )); } let position = Parser::parse_position(line_index, &parts[0..=2])?; let color = if parts.len() > 3 { Some(self.parse_color(line_index, &parts[3..])?) } else { None }; Ok(Vertex { position, color }) } /// Parses a position from a `off` string. fn parse_position(line_index: usize, parts: &[&str]) -> Result<Position> { if parts.len() != 3 { return Err(Error::with_message( Kind::InvalidVertexPosition, line_index, format!( "Invalid number of coordinates given (expected: 3, actual: {})", parts.len() ), )); } let position_parts: Vec<f32> = parts .iter() .map(|s| { s.parse().map_err(|err| { Error::with_message( Kind::InvalidVertexPosition, line_index, format!("Failed to parse coordinate as number: ({})", err), ) }) }) .collect::<Result<Vec<f32>>>()?; Position::try_from(position_parts).map_err(|err| { Error::with_message( Kind::InvalidVertexPosition, line_index, format!("Failed to parse position: ({})", err), ) }) } /// Parses a color from a `off` string. fn parse_color(&mut self, line_index: usize, parts: &[&str]) -> Result<Color> { if parts.len() != self.options.color_format.channel_count() { return Err(Error::with_message( Kind::InvalidColor, line_index, format!( "Invalid number of color elements given (expected: {}, actual: {})", self.options.color_format.channel_count(), parts.len() ), )); } if self.options.color_format.is_float() { // parse as f32 let color_parts = parts .iter() .map(|s| { s.parse::<f32>().map_err(|err| { Error::with_message( Kind::InvalidColor, line_index, format!("Failed to parse color as float: {}", err), ) }) }) .collect::<Result<Vec<f32>>>()?; Color::try_from(color_parts).map_err(|err| { Error::with_message( Kind::InvalidColor, line_index, format!("Failed to parse color: {}", err), ) }) } else { // parse as u8 let color_parts = parts .iter() .map(|s| { s.parse::<u8>().map_err(|err| { Error::with_message( Kind::InvalidColor, line_index, format!("Failed to parse color as u8: {}", err), ) }) }) .collect::<Result<Vec<u8>>>()?; Color::try_from(color_parts).map_err(|err| { Error::with_message( Kind::InvalidColor, line_index, format!("Failed to parse color: {}", err), ) }) } } /// Parses the faces of the `off` string. fn parse_faces(&mut self) -> Result { for _ in 0..self.face_count { let (line_index, line) = self.next_line().ok_or_else(|| { Error::with_message( Kind::Missing, self.prev_line_index + 1, "Expected face definition", ) })?; let parts: Vec<&str> = line.split_line(); let face = self.parse_face(line_index, &parts)?; self.document.faces.push(face); } Ok(()) } /// Parses a face from a `off` string. fn parse_face(&mut self, line_index: usize, mut parts: &[&str]) -> Result<Face> { if parts.len() < 4 { return Err(Error::with_message( Kind::InvalidFace, line_index, format!("Not enough arguments. At least three vertex indicies required (e.g. `3 1 2 3`). {} arguments given", parts.len()), )); } let vertex_count: usize = parts[0].parse().map_err(|err| { Error::with_message( Kind::InvalidFace, line_index, format!("Failed to parse vertex count for face definition: {}", err), ) })?; if vertex_count < 3 { return Err(Error::with_message( Kind::InvalidFace, line_index, format!( "Vertex count should be at least 3 (actual: {})", vertex_count ), )); } if vertex_count > self.options.limits.face_vertex_count { return Err(Error::with_message( Kind::LimitExceeded, line_index, format!( "Vertex count of face exceeds limit (limit: {}, actual: {})", self.options.limits.face_vertex_count, vertex_count ), )); } // "Consume" vertex_count parts = &parts[1..]; // faces are polygons and might have to be triangulated later. Therefore we require at least three vertices if parts.len() < 3 { return Err(Error::with_message( Kind::InvalidFace, line_index, format!( "Not enough vertices for face (expected: >= 3, actual: {})", parts.len() ), )); } // sanity check: at least vertex_count parts if parts.len() < vertex_count { return Err(Error::with_message( Kind::InvalidFace, line_index, format!( "Not enough vertex indices given for face definition (expected: {}, actual: {})", vertex_count, parts.len() ), )); } let vertices = Parser::parse_face_indices(line_index, vertex_count, parts)?; // "Consume" vertex indexes parts = &parts[vertex_count..]; let color = if parts.is_empty() { None } else { Some(self.parse_color(line_index, parts)?) }; Ok(Face { vertices, color }) } /// Parses the face vertex indices from a line. fn parse_face_indices( line_index: usize, vertex_count: usize, parts: &[&str], ) -> Result<Vec<usize>> { let vertices: Vec<usize> = parts .iter() .take(vertex_count) .map(|s| { s.parse().map_err(|err| { Error::with_message( Kind::InvalidFaceIndex, line_index, format!("Failed to parse vertex index as number: ({})", err), ) }) }) .collect::<Result<Vec<usize>>>()?; if vertices.len() != vertex_count { return Err(Error::with_message( Kind::InvalidFaceIndex, line_index, format!( "Invalid number of face indexes given (expected: {}, actual: {})", vertex_count, vertices.len() ), )); } Ok(vertices) } /// Finalizes the parsing by returning the [`Mesh`]. fn finalize(self) -> Mesh { self.document } } #[cfg(test)] #[allow(unused)] mod tests { use crate::parser::color_format::ColorFormat; use super::*; #[test] fn parse_header() { let mut parser = Parser::new(&"OFF", Options::default()); assert!(parser.parse_header().is_ok()); } #[test] fn parse_header_missing() { let mut parser = Parser::new(&"", Options::default()); let header = parser.parse_header(); assert!(header.is_err()); assert!(matches!( header.unwrap_err(), Error { kind: Kind::Empty, .. } )); } #[test] fn parse_header_invalid() { let mut parser = Parser::new(&"COFF", Options::default()); let header = parser.parse_header(); assert!(header.is_err()); assert!(matches!( header.unwrap_err(), Error { kind: Kind::InvalidHeader, .. } )); } #[test] fn parse_counts() { let mut parser = Parser::new(&"8 6 12", Options::default()); assert!(parser.parse_counts().is_ok()); assert_eq!(parser.vertex_count, 8); assert_eq!(parser.face_count, 6); assert_eq!(parser.edge_count, 12); } #[test] fn parse_counts_missing() { let mut parser = Parser::new(&"", Options::default()); let counts = parser.parse_counts(); assert!(counts.is_err()); assert!(matches!( counts.unwrap_err(), Error { kind: Kind::Missing, .. } )); } #[test] fn parse_counts_too_many() { let mut parser = Parser::new(&"8 6 12 16", Options::default()); let counts = parser.parse_counts(); assert!(counts.is_err()); assert!(matches!( counts.unwrap_err(), Error { kind: Kind::InvalidCounts, .. } )); } #[test] fn parse_counts_limits() { let mut parser = Parser::new(&"999999999999 888888888888 777777777", Options::default()); let counts = parser.parse_counts(); assert!(counts.is_err()); assert!(matches!( counts.unwrap_err(), Error { kind: Kind::LimitExceeded, .. } )); } #[test] fn parse_vertices() { let mut parser = Parser::new( &"3.0 1.0 2.0 0.1 0.2 0.3 1.0\n1.0 2.0 3.0 0.1 0.2 0.3 1.0", Options::default(), ); parser.vertex_count = 2; let result = parser.parse_vertices(); assert!(result.is_ok()); assert!(parser.next_line().is_none()); assert!(parser.document.vertices.len() == 2); assert!( parser.document.vertices[0] == Vertex::new( Position::new(3.0, 1.0, 2.0), Some(Color::new(0.1, 0.2, 0.3, 1.0).unwrap()), ) ); assert!( parser.document.vertices[1] == Vertex::new( Position::new(1.0, 2.0, 3.0), Some(Color::new(0.1, 0.2, 0.3, 1.0).unwrap()), ) ); } #[test] fn parse_vertex() { let mut parser = Parser::new(&"", Options::default()); let vertex = parser.parse_vertex(0, &["1.0", "2.0", "3.0"]); assert!(vertex.is_ok()); assert_eq!( vertex.unwrap(), Vertex::new(Position::new(1.0, 2.0, 3.0), None) ); } #[test] fn parse_vertex_too_few_parts() { let mut parser = Parser::new(&"", Options::default()); let vertex = parser.parse_vertex(0, &["1.0", "2.0"]); assert!(vertex.is_err()); assert!(matches!( vertex.unwrap_err(), Error { kind: Kind::InvalidVertexPosition, .. } )); } #[test] fn parse_position() { let position = Parser::parse_position(0, &["1", "2", "3"]); assert!(position.is_ok()); assert_eq!( position.unwrap(), Position { x: 1.0, y: 2.0, z: 3.0 } ); } #[test] fn parse_position_no_number() { let position = Parser::parse_position(0, &["1", "2", "a"]); assert!(position.is_err()); assert!(matches!( position.unwrap_err(), Error { kind: Kind::InvalidVertexPosition, .. } )); } #[test] fn parse_position_too_few_parts() { let position = Parser::parse_position(0, &["1", "2"]); assert!(position.is_err()); assert!(matches!( position.unwrap_err(), Error { kind: Kind::InvalidVertexPosition, .. } )); } #[test] fn parse_position_too_many_parts() { let position = Parser::parse_position(0, &["1", "2", "3", "5"]); assert!(position.is_err()); assert!(matches!( position.unwrap_err(), Error { kind: Kind::InvalidVertexPosition, .. } )); } #[test] fn parse_color_rgbfloat() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBFloat, ..Options::default() }, ); let color = parser.parse_color(0, &["1.0", "0.5", "0.3"]); assert!(color.is_ok()); assert_eq!( color.unwrap(), Color { red: 1.0, green: 0.5, blue: 0.3, alpha: 1.0, } ); } #[test] fn parse_color_rgbafloat() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBAFloat, ..Options::default() }, ); let color = parser.parse_color(0, &["1.0", "0.5", "0.3", "0.5"]); assert!(color.is_ok()); assert_eq!( color.unwrap(), Color { red: 1.0, green: 0.5, blue: 0.3, alpha: 0.5, } ); } #[test] fn parse_color_rgbinterger() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBInteger, ..Options::default() }, ); let color = parser.parse_color(0, &["255", "128", "0"]); assert!(color.is_ok()); assert_eq!( color.unwrap(), Color { red: 1.0, green: 0.501_960_8, blue: 0.0, alpha: 1.0, } ); } #[test] fn parse_color_rgbinterger_fail() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBInteger, ..Options::default() }, ); let color = parser.parse_color(0, &["255", "128.0", "0"]); assert!(color.is_err()); assert!(matches!( color.unwrap_err(), Error { kind: Kind::InvalidColor, .. } )); } #[test] fn parse_color_rgbainterger() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBAInteger, ..Options::default() }, ); let color = parser.parse_color(0, &["255", "128", "0", "255"]); assert!(color.is_ok()); assert_eq!( color.unwrap(), Color { red: 1.0, green: 0.501_960_8, blue: 0.0, alpha: 1.0, } ); } #[test] fn parse_color_element_count() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBFloat, ..Options::default() }, ); let color = parser.parse_color(0, &["1.0", "0.5", "0.3", "0.4"]); assert!(color.is_err()); assert!(matches!( color.unwrap_err(), Error { kind: Kind::InvalidColor, .. } )); } #[test] fn parse_faces() { let mut parser = Parser::new( &"3 1 2 3 0.1 0.2 0.3 1.0\n3 3 2 1 0.2 0.3 0.4 1.0", Options::default(), ); parser.face_count = 2; let result = parser.parse_faces(); assert!(result.is_ok()); assert!(parser.next_line().is_none()); assert!(parser.document.faces.len() == 2); assert!(parser.document.faces[0].vertices == vec![1, 2, 3]); assert!( parser.document.faces[0].color == Some(Color { red: 0.1, green: 0.2, blue: 0.3, alpha: 1.0, }) ); assert!(parser.document.faces[1].vertices == vec![3, 2, 1]); assert!( parser.document.faces[1].color == Some(Color { red: 0.2, green: 0.3, blue: 0.4, alpha: 1.0, }) ); } #[test] fn parse_face() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "1", "2", "3"]); assert!(result.is_ok()); assert_eq!( result.unwrap(), Face { vertices: vec![1, 2, 3], color: None } ); } #[test] fn parse_face_more() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["4", "2", "3", "1", "1337"]); assert!(result.is_ok()); assert_eq!( result.unwrap(), Face { vertices: vec![2, 3, 1, 1337], color: None } ); } #[test] fn parse_face_too_little_parts() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["6", "1", "2", "3"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidFace, .. } )); } #[test] fn parse_face_too_many_parts() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "2", "3", "2", "3"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidColor, .. } )); } #[test] fn parse_face_no_number() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "1", "asdf", "3"]); assert!(result.is_err()); println!("{:?}", result); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidFaceIndex, .. } )); } #[test] fn parse_face_color() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "1", "2", "3", "0.1", "0.2", "0.3", "0.4"]); assert!(result.is_ok()); assert_eq!( result.unwrap(), Face { vertices: vec![1, 2, 3], color: Some(Color { red: 0.1, green: 0.2, blue: 0.3, alpha: 0.4 }) } ); } #[test] fn parse_face_color_fail() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "1", "2", "3", "0.1", "0.2"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidColor, .. } )); } #[test] fn parse_face_color_fail_no_alpha() { let mut parser = Parser::new( &"", Options { color_format: ColorFormat::RGBFloat, ..Options::default() }, ); let result = parser.parse_face(0, &["3", "1", "2", "3", "0.1", "0.2", "0.3"]); assert!(result.is_ok()); assert_eq!( result.unwrap(), Face { vertices: vec![1, 2, 3], color: Some(Color { red: 0.1, green: 0.2, blue: 0.3, alpha: 1.0 }) } ); } #[test] fn parse_face_color_fail_no_alpha_fail() { let mut parser = Parser::new(&"", Options::default()); let result = parser.parse_face(0, &["3", "1", "2", "3", "0.1", "0.2", "0.3"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidColor, .. } )); } #[test] fn parse_face_index() { let result = Parser::parse_face_indices(0, 3, &["1", "2", "3"]); assert!(result.is_ok()); assert_eq!(result.unwrap(), vec![1, 2, 3]); } #[test] fn parse_face_index_more() { let result = Parser::parse_face_indices(0, 5, &["1", "2", "3", "1", "1337"]); assert!(result.is_ok()); assert_eq!(result.unwrap(), vec![1, 2, 3, 1, 1337]); } #[test] fn parse_face_index_too_little_parts() { let result = Parser::parse_face_indices(0, 5, &["1", "2", "3"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidFaceIndex, .. } )); } #[test] fn parse_face_index_too_many_parts() { let result = Parser::parse_face_indices(0, 3, &["1", "2", "3", "2", "3"]); assert!(result.is_ok()); assert_eq!(result.unwrap(), vec![1, 2, 3]); } #[test] fn parse_face_index_no_number() { let result = Parser::parse_face_indices(0, 3, &["1", "asdf", "3"]); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), Error { kind: Kind::InvalidFaceIndex, .. } )); } }
28.846856
139
0.440741
1460635557fe0c572e41cc835ccdb97bb1f10c67
1,458
#[cfg(test)] mod process_token_tests { use super::jwt::JwtToken; use super::process_token; use actix_web::test::TestRequest; #[test] fn no_token_process_token() { let mock_request = TestRequest::with_header("test", "test").to_srv_request(); match process_token(&mock_request) { Err(message) => assert_eq!("there is no token", message), _ => panic!("No token in request header should fail"), } } #[test] fn incorrect_token() { let mock_request = TestRequest::with_header("user-token", "test").to_srv_request(); match process_token(&mock_request) { Err(message) => assert_eq!("Could not decode", message), _ => panic!("Incorrect token should error"), } } #[test] fn correct_token() { let encoded_token: String = JwtToken::encode(32); let mock_request = TestRequest::with_header("user-token", encoded_token).to_srv_request(); match process_token(&mock_request) { Ok(token) => assert_eq!("passed", token), _ => panic!("encoded token should pass"), } } } pub mod jwt; mod processes; use actix_web::dev::ServiceRequest; pub fn process_token(request: &ServiceRequest) -> Result<String, &'static str> { match processes::extract_header_token(request) { Ok(token) => processes::check_password(token), Err(message) => Err(message), } }
29.16
98
0.615912
e223aa7bc765621233d07465ab0865da9a849f4c
36,877
//! Formatters for logging `tracing` events. use super::time::{self, FormatTime, SystemTime}; use crate::{ field::{MakeOutput, MakeVisitor, RecordFields, VisitFmt, VisitOutput}, fmt::fmt_subscriber::FmtContext, fmt::fmt_subscriber::FormattedFields, registry::LookupSpan, }; use std::{ fmt::{self, Write}, iter, marker::PhantomData, }; use tracing_core::{ field::{self, Field, Visit}, span, Collect, Event, Level, }; #[cfg(feature = "tracing-log")] use tracing_log::NormalizeEvent; #[cfg(feature = "ansi")] use ansi_term::{Colour, Style}; #[cfg(feature = "json")] mod json; #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub use json::*; #[cfg(feature = "ansi")] mod pretty; #[cfg(feature = "ansi")] #[cfg_attr(docsrs, doc(cfg(feature = "ansi")))] pub use pretty::*; use fmt::{Debug, Display}; /// A type that can format a tracing `Event` for a `fmt::Write`. /// /// `FormatEvent` is primarily used in the context of [`fmt::Collector`] or [`fmt::Subscriber`]. Each time an event is /// dispatched to [`fmt::Collector`] or [`fmt::Subscriber`], the subscriber or layer forwards it to /// its associated `FormatEvent` to emit a log message. /// /// This trait is already implemented for function pointers with the same /// signature as `format_event`. /// /// [`fmt::Collector`]: super::Collector /// [`fmt::Subscriber`]: super::Subscriber pub trait FormatEvent<S, N> where S: Collect + for<'a> LookupSpan<'a>, N: for<'a> FormatFields<'a> + 'static, { /// Write a log message for `Event` in `Context` to the given `Write`. fn format_event( &self, ctx: &FmtContext<'_, S, N>, writer: &mut dyn fmt::Write, event: &Event<'_>, ) -> fmt::Result; } impl<S, N> FormatEvent<S, N> for fn(ctx: &FmtContext<'_, S, N>, &mut dyn fmt::Write, &Event<'_>) -> fmt::Result where S: Collect + for<'a> LookupSpan<'a>, N: for<'a> FormatFields<'a> + 'static, { fn format_event( &self, ctx: &FmtContext<'_, S, N>, writer: &mut dyn fmt::Write, event: &Event<'_>, ) -> fmt::Result { (*self)(ctx, writer, event) } } /// A type that can format a [set of fields] to a `fmt::Write`. /// /// `FormatFields` is primarily used in the context of [`fmt::Subscriber`]. Each /// time a span or event with fields is recorded, the subscriber will format /// those fields with its associated `FormatFields` implementation. /// /// [set of fields]: RecordFields /// [`fmt::Subscriber`]: super::Subscriber pub trait FormatFields<'writer> { /// Format the provided `fields` to the provided `writer`, returning a result. fn format_fields<R: RecordFields>( &self, writer: &'writer mut dyn fmt::Write, fields: R, ) -> fmt::Result; /// Record additional field(s) on an existing span. /// /// By default, this appends a space to the current set of fields if it is /// non-empty, and then calls `self.format_fields`. If different behavior is /// required, the default implementation of this method can be overridden. fn add_fields(&self, current: &'writer mut String, fields: &span::Record<'_>) -> fmt::Result { if !current.is_empty() { current.push(' '); } self.format_fields(current, fields) } } /// Returns the default configuration for an [event formatter]. /// /// Methods on the returned event formatter can be used for further /// configuration. For example: /// /// ```rust /// let format = tracing_subscriber::fmt::format() /// .without_time() // Don't include timestamps /// .with_target(false) // Don't include event targets. /// .with_level(false) // Don't include event levels. /// .compact(); // Use a more compact, abbreviated format. /// /// // Use the configured formatter when building a new subscriber. /// tracing_subscriber::fmt() /// .event_format(format) /// .init(); /// ``` pub fn format() -> Format { Format::default() } /// Returns the default configuration for a JSON [event formatter]. #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn json() -> Format<Json> { format().json() } /// Returns a [`FormatFields`] implementation that formats fields using the /// provided function or closure. /// pub fn debug_fn<F>(f: F) -> FieldFn<F> where F: Fn(&mut dyn fmt::Write, &Field, &dyn fmt::Debug) -> fmt::Result + Clone, { FieldFn(f) } /// A [`FormatFields`] implementation that formats fields by calling a function /// or closure. /// #[derive(Debug, Clone)] pub struct FieldFn<F>(F); /// The [visitor] produced by [`FieldFn`]'s [`MakeVisitor`] implementation. /// /// [visitor]: super::super::field::Visit /// [`MakeVisitor`]: super::super::field::MakeVisitor pub struct FieldFnVisitor<'a, F> { f: F, writer: &'a mut dyn fmt::Write, result: fmt::Result, } /// Marker for `Format` that indicates that the compact log format should be used. /// /// The compact format only includes the fields from the most recently entered span. #[derive(Default, Debug, Copy, Clone, Eq, PartialEq)] pub struct Compact; /// Marker for `Format` that indicates that the verbose log format should be used. /// /// The full format includes fields from all entered spans. #[derive(Default, Debug, Copy, Clone, Eq, PartialEq)] pub struct Full; /// A pre-configured event formatter. /// /// You will usually want to use this as the `FormatEvent` for a `FmtSubscriber`. /// /// The default logging format, [`Full`] includes all fields in each event and its containing /// spans. The [`Compact`] logging format includes only the fields from the most-recently-entered /// span. #[derive(Debug, Clone)] pub struct Format<F = Full, T = SystemTime> { format: F, pub(crate) timer: T, pub(crate) ansi: bool, pub(crate) display_target: bool, pub(crate) display_level: bool, pub(crate) display_thread_id: bool, pub(crate) display_thread_name: bool, } impl Default for Format<Full, SystemTime> { fn default() -> Self { Format { format: Full, timer: SystemTime, ansi: true, display_target: true, display_level: true, display_thread_id: false, display_thread_name: false, } } } impl<F, T> Format<F, T> { /// Use a less verbose output format. /// /// See [`Compact`]. pub fn compact(self) -> Format<Compact, T> { Format { format: Compact, timer: self.timer, ansi: self.ansi, display_target: false, display_level: self.display_level, display_thread_id: self.display_thread_id, display_thread_name: self.display_thread_name, } } /// Use an excessively pretty, human-readable output format. /// /// See [`Pretty`]. /// /// Note that this requires the "ansi" feature to be enabled. #[cfg(feature = "ansi")] #[cfg_attr(docsrs, doc(cfg(feature = "ansi")))] pub fn pretty(self) -> Format<Pretty, T> { Format { format: Pretty::default(), timer: self.timer, ansi: self.ansi, display_target: self.display_target, display_level: self.display_level, display_thread_id: self.display_thread_id, display_thread_name: self.display_thread_name, } } /// Use the full JSON format. /// /// The full format includes fields from all entered spans. /// /// # Example Output /// /// ```ignore,json /// {"timestamp":"Feb 20 11:28:15.096","level":"INFO","target":"mycrate","fields":{"message":"some message", "key": "value"}} /// ``` /// /// # Options /// /// - [`Format::flatten_event`] can be used to enable flattening event fields into the root /// object. /// #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn json(self) -> Format<Json, T> { Format { format: Json::default(), timer: self.timer, ansi: self.ansi, display_target: self.display_target, display_level: self.display_level, display_thread_id: self.display_thread_id, display_thread_name: self.display_thread_name, } } /// Use the given [`timer`] for log message timestamps. /// /// See [`time`] for the provided timer implementations. /// /// Note that using the `chrono` feature flag enables the /// additional time formatters [`ChronoUtc`] and [`ChronoLocal`]. /// /// [`timer`]: time::FormatTime /// [`ChronoUtc`]: time::ChronoUtc /// [`ChronoLocal`]: time::ChronoLocal pub fn with_timer<T2>(self, timer: T2) -> Format<F, T2> { Format { format: self.format, timer, ansi: self.ansi, display_target: self.display_target, display_level: self.display_level, display_thread_id: self.display_thread_id, display_thread_name: self.display_thread_name, } } /// Do not emit timestamps with log messages. pub fn without_time(self) -> Format<F, ()> { Format { format: self.format, timer: (), ansi: self.ansi, display_target: self.display_target, display_level: self.display_level, display_thread_id: self.display_thread_id, display_thread_name: self.display_thread_name, } } /// Enable ANSI terminal colors for formatted output. pub fn with_ansi(self, ansi: bool) -> Format<F, T> { Format { ansi, ..self } } /// Sets whether or not an event's target is displayed. pub fn with_target(self, display_target: bool) -> Format<F, T> { Format { display_target, ..self } } /// Sets whether or not an event's level is displayed. pub fn with_level(self, display_level: bool) -> Format<F, T> { Format { display_level, ..self } } /// Sets whether or not the [thread ID] of the current thread is displayed /// when formatting events /// /// [thread ID]: std::thread::ThreadId pub fn with_thread_ids(self, display_thread_id: bool) -> Format<F, T> { Format { display_thread_id, ..self } } /// Sets whether or not the [name] of the current thread is displayed /// when formatting events /// /// [name]: std::thread#naming-threads pub fn with_thread_names(self, display_thread_name: bool) -> Format<F, T> { Format { display_thread_name, ..self } } fn format_level(&self, level: Level, writer: &mut dyn fmt::Write) -> fmt::Result where F: LevelNames, { if self.display_level { let fmt_level = { #[cfg(feature = "ansi")] { F::format_level(level, self.ansi) } #[cfg(not(feature = "ansi"))] { F::format_level(level) } }; return write!(writer, "{} ", fmt_level); } Ok(()) } } #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] impl<T> Format<Json, T> { /// Use the full JSON format with the event's event fields flattened. /// /// # Example Output /// /// ```ignore,json /// {"timestamp":"Feb 20 11:28:15.096","level":"INFO","target":"mycrate", "message":"some message", "key": "value"} /// ``` /// See [`Json`] #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn flatten_event(mut self, flatten_event: bool) -> Format<Json, T> { self.format.flatten_event(flatten_event); self } /// Sets whether or not the formatter will include the current span in /// formatted events. /// /// See [`Json`] #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn with_current_span(mut self, display_current_span: bool) -> Format<Json, T> { self.format.with_current_span(display_current_span); self } /// Sets whether or not the formatter will include a list (from root to /// leaf) of all currently entered spans in formatted events. /// /// See [`Json`] #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] pub fn with_span_list(mut self, display_span_list: bool) -> Format<Json, T> { self.format.with_span_list(display_span_list); self } } impl<S, N, T> FormatEvent<S, N> for Format<Full, T> where S: Collect + for<'a> LookupSpan<'a>, N: for<'a> FormatFields<'a> + 'static, T: FormatTime, { fn format_event( &self, ctx: &FmtContext<'_, S, N>, writer: &mut dyn fmt::Write, event: &Event<'_>, ) -> fmt::Result { #[cfg(feature = "tracing-log")] let normalized_meta = event.normalized_metadata(); #[cfg(feature = "tracing-log")] let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); #[cfg(not(feature = "tracing-log"))] let meta = event.metadata(); #[cfg(feature = "ansi")] time::write(&self.timer, writer, self.ansi)?; #[cfg(not(feature = "ansi"))] time::write(&self.timer, writer)?; self.format_level(*meta.level(), writer)?; if self.display_thread_name { let current_thread = std::thread::current(); match current_thread.name() { Some(name) => { write!(writer, "{} ", FmtThreadName::new(name))?; } // fall-back to thread id when name is absent and ids are not enabled None if !self.display_thread_id => { write!(writer, "{:0>2?} ", current_thread.id())?; } _ => {} } } if self.display_thread_id { write!(writer, "{:0>2?} ", std::thread::current().id())?; } let full_ctx = { #[cfg(feature = "ansi")] { FullCtx::new(ctx, event.parent(), self.ansi) } #[cfg(not(feature = "ansi"))] { FullCtx::new(ctx, event.parent()) } }; write!(writer, "{}", full_ctx)?; if self.display_target { write!(writer, "{}: ", meta.target())?; } ctx.format_fields(writer, event)?; writeln!(writer) } } impl<S, N, T> FormatEvent<S, N> for Format<Compact, T> where S: Collect + for<'a> LookupSpan<'a>, N: for<'a> FormatFields<'a> + 'static, T: FormatTime, { fn format_event( &self, ctx: &FmtContext<'_, S, N>, writer: &mut dyn fmt::Write, event: &Event<'_>, ) -> fmt::Result { #[cfg(feature = "tracing-log")] let normalized_meta = event.normalized_metadata(); #[cfg(feature = "tracing-log")] let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); #[cfg(not(feature = "tracing-log"))] let meta = event.metadata(); #[cfg(feature = "ansi")] time::write(&self.timer, writer, self.ansi)?; #[cfg(not(feature = "ansi"))] time::write(&self.timer, writer)?; self.format_level(*meta.level(), writer)?; if self.display_thread_name { let current_thread = std::thread::current(); match current_thread.name() { Some(name) => { write!(writer, "{} ", FmtThreadName::new(name))?; } // fall-back to thread id when name is absent and ids are not enabled None if !self.display_thread_id => { write!(writer, "{:0>2?} ", current_thread.id())?; } _ => {} } } if self.display_thread_id { write!(writer, "{:0>2?} ", std::thread::current().id())?; } if self.display_target { let target = meta.target(); #[cfg(feature = "ansi")] let target = if self.ansi { Style::new().bold().paint(target) } else { Style::new().paint(target) }; write!(writer, "{}:", target)?; } ctx.format_fields(writer, event)?; let span = event .parent() .and_then(|id| ctx.ctx.span(&id)) .or_else(|| ctx.ctx.lookup_current()); let scope = span.into_iter().flat_map(|span| { let parents = span.parents(); iter::once(span).chain(parents) }); #[cfg(feature = "ansi")] let dimmed = if self.ansi { Style::new().dimmed() } else { Style::new() }; for span in scope { let exts = span.extensions(); if let Some(fields) = exts.get::<FormattedFields<N>>() { if !fields.is_empty() { #[cfg(feature = "ansi")] let fields = dimmed.paint(fields.as_str()); write!(writer, " {}", fields)?; } } } writeln!(writer) } } // === impl FormatFields === impl<'writer, M> FormatFields<'writer> for M where M: MakeOutput<&'writer mut dyn fmt::Write, fmt::Result>, M::Visitor: VisitFmt + VisitOutput<fmt::Result>, { fn format_fields<R: RecordFields>( &self, writer: &'writer mut dyn fmt::Write, fields: R, ) -> fmt::Result { let mut v = self.make_visitor(writer); fields.record(&mut v); v.finish() } } /// The default [`FormatFields`] implementation. /// #[derive(Debug)] pub struct DefaultFields { // reserve the ability to add fields to this without causing a breaking // change in the future. _private: (), } /// The [visitor] produced by [`DefaultFields`]'s [`MakeVisitor`] implementation. /// /// [visitor]: super::super::field::Visit /// [`MakeVisitor`]: super::super::field::MakeVisitor pub struct DefaultVisitor<'a> { writer: &'a mut dyn Write, is_empty: bool, result: fmt::Result, } impl DefaultFields { /// Returns a new default [`FormatFields`] implementation. /// pub fn new() -> Self { Self { _private: () } } } impl Default for DefaultFields { fn default() -> Self { Self::new() } } impl<'a> MakeVisitor<&'a mut dyn Write> for DefaultFields { type Visitor = DefaultVisitor<'a>; #[inline] fn make_visitor(&self, target: &'a mut dyn Write) -> Self::Visitor { DefaultVisitor::new(target, true) } } // === impl DefaultVisitor === impl<'a> DefaultVisitor<'a> { /// Returns a new default visitor that formats to the provided `writer`. /// /// # Arguments /// - `writer`: the writer to format to. /// - `is_empty`: whether or not any fields have been previously written to /// that writer. pub fn new(writer: &'a mut dyn Write, is_empty: bool) -> Self { Self { writer, is_empty, result: Ok(()), } } fn maybe_pad(&mut self) { if self.is_empty { self.is_empty = false; } else { self.result = write!(self.writer, " "); } } } impl<'a> field::Visit for DefaultVisitor<'a> { fn record_str(&mut self, field: &Field, value: &str) { if self.result.is_err() { return; } if field.name() == "message" { self.record_debug(field, &format_args!("{}", value)) } else { self.record_debug(field, &value) } } fn record_error(&mut self, field: &Field, value: &(dyn std::error::Error + 'static)) { if let Some(source) = value.source() { self.record_debug(field, &format_args!("{}, {}: {}", value, field, source)) } else { self.record_debug(field, &format_args!("{}", value)) } } fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { if self.result.is_err() { return; } self.maybe_pad(); self.result = match field.name() { "message" => write!(self.writer, "{:?}", value), // Skip fields that are actually log metadata that have already been handled #[cfg(feature = "tracing-log")] name if name.starts_with("log.") => Ok(()), name if name.starts_with("r#") => write!(self.writer, "{}={:?}", &name[2..], value), name => write!(self.writer, "{}={:?}", name, value), }; } } impl<'a> crate::field::VisitOutput<fmt::Result> for DefaultVisitor<'a> { fn finish(self) -> fmt::Result { self.result } } impl<'a> crate::field::VisitFmt for DefaultVisitor<'a> { fn writer(&mut self) -> &mut dyn fmt::Write { self.writer } } impl<'a> fmt::Debug for DefaultVisitor<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DefaultVisitor") .field("writer", &format_args!("<dyn fmt::Write>")) .field("is_empty", &self.is_empty) .field("result", &self.result) .finish() } } struct FullCtx<'a, S, N> where S: Collect + for<'lookup> LookupSpan<'lookup>, N: for<'writer> FormatFields<'writer> + 'static, { ctx: &'a FmtContext<'a, S, N>, span: Option<&'a span::Id>, #[cfg(feature = "ansi")] ansi: bool, } impl<'a, S, N: 'a> FullCtx<'a, S, N> where S: Collect + for<'lookup> LookupSpan<'lookup>, N: for<'writer> FormatFields<'writer> + 'static, { #[cfg(feature = "ansi")] pub(crate) fn new( ctx: &'a FmtContext<'a, S, N>, span: Option<&'a span::Id>, ansi: bool, ) -> Self { Self { ctx, span, ansi } } #[cfg(not(feature = "ansi"))] pub(crate) fn new(ctx: &'a FmtContext<'a, S, N>, span: Option<&'a span::Id>) -> Self { Self { ctx, span } } fn bold(&self) -> Style { #[cfg(feature = "ansi")] { if self.ansi { return Style::new().bold(); } } Style::new() } } impl<'a, S, N> fmt::Display for FullCtx<'a, S, N> where S: Collect + for<'lookup> LookupSpan<'lookup>, N: for<'writer> FormatFields<'writer> + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let bold = self.bold(); let mut seen = false; let span = self .span .and_then(|id| self.ctx.ctx.span(&id)) .or_else(|| self.ctx.ctx.lookup_current()); let scope = span .into_iter() .flat_map(|span| span.from_root().chain(iter::once(span))); for span in scope { write!(f, "{}", bold.paint(span.metadata().name()))?; seen = true; let ext = span.extensions(); let fields = &ext .get::<FormattedFields<N>>() .expect("Unable to find FormattedFields in extensions; this is a bug"); if !fields.is_empty() { write!(f, "{}{}{}", bold.paint("{"), fields, bold.paint("}"))?; } f.write_char(':')?; } if seen { f.write_char(' ')?; } Ok(()) } } #[cfg(not(feature = "ansi"))] struct Style; #[cfg(not(feature = "ansi"))] impl Style { fn new() -> Self { Style } fn paint(&self, d: impl fmt::Display) -> impl fmt::Display { d } } struct FmtThreadName<'a> { name: &'a str, } impl<'a> FmtThreadName<'a> { pub(crate) fn new(name: &'a str) -> Self { Self { name } } } impl<'a> fmt::Display for FmtThreadName<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use std::sync::atomic::{ AtomicUsize, Ordering::{AcqRel, Acquire, Relaxed}, }; // Track the longest thread name length we've seen so far in an atomic, // so that it can be updated by any thread. static MAX_LEN: AtomicUsize = AtomicUsize::new(0); let len = self.name.len(); // Snapshot the current max thread name length. let mut max_len = MAX_LEN.load(Relaxed); while len > max_len { // Try to set a new max length, if it is still the value we took a // snapshot of. match MAX_LEN.compare_exchange(max_len, len, AcqRel, Acquire) { // We successfully set the new max value Ok(_) => break, // Another thread set a new max value since we last observed // it! It's possible that the new length is actually longer than // ours, so we'll loop again and check whether our length is // still the longest. If not, we'll just use the newer value. Err(actual) => max_len = actual, } } // pad thread name using `max_len` write!(f, "{:>width$}", self.name, width = max_len) } } trait LevelNames { const TRACE_STR: &'static str; const DEBUG_STR: &'static str; const INFO_STR: &'static str; const WARN_STR: &'static str; const ERROR_STR: &'static str; #[cfg(feature = "ansi")] fn format_level(level: Level, ansi: bool) -> FmtLevel<Self> { FmtLevel { level, ansi, _f: PhantomData, } } #[cfg(not(feature = "ansi"))] fn format_level(level: Level) -> FmtLevel<Self> { FmtLevel { level, _f: PhantomData, } } } impl LevelNames for Full { const TRACE_STR: &'static str = "TRACE"; const DEBUG_STR: &'static str = "DEBUG"; const INFO_STR: &'static str = " INFO"; const WARN_STR: &'static str = " WARN"; const ERROR_STR: &'static str = "ERROR"; } impl LevelNames for Compact { const TRACE_STR: &'static str = "T"; const DEBUG_STR: &'static str = "D"; const INFO_STR: &'static str = "I"; const WARN_STR: &'static str = "W"; const ERROR_STR: &'static str = "!"; } struct FmtLevel<F: ?Sized> { level: Level, #[cfg(feature = "ansi")] ansi: bool, _f: PhantomData<fn(F)>, } impl<'a, F: LevelNames> fmt::Display for FmtLevel<F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "ansi")] { if self.ansi { return match self.level { Level::TRACE => write!(f, "{}", Colour::Purple.paint(F::TRACE_STR)), Level::DEBUG => write!(f, "{}", Colour::Blue.paint(F::DEBUG_STR)), Level::INFO => write!(f, "{}", Colour::Green.paint(F::INFO_STR)), Level::WARN => write!(f, "{}", Colour::Yellow.paint(F::WARN_STR)), Level::ERROR => write!(f, "{}", Colour::Red.paint(F::ERROR_STR)), }; } } match self.level { Level::TRACE => f.pad(F::TRACE_STR), Level::DEBUG => f.pad(F::DEBUG_STR), Level::INFO => f.pad(F::INFO_STR), Level::WARN => f.pad(F::WARN_STR), Level::ERROR => f.pad(F::ERROR_STR), } } } // === impl FieldFn === impl<'a, F> MakeVisitor<&'a mut dyn fmt::Write> for FieldFn<F> where F: Fn(&mut dyn fmt::Write, &Field, &dyn fmt::Debug) -> fmt::Result + Clone, { type Visitor = FieldFnVisitor<'a, F>; fn make_visitor(&self, writer: &'a mut dyn fmt::Write) -> Self::Visitor { FieldFnVisitor { writer, f: self.0.clone(), result: Ok(()), } } } impl<'a, F> Visit for FieldFnVisitor<'a, F> where F: Fn(&mut dyn fmt::Write, &Field, &dyn fmt::Debug) -> fmt::Result, { fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { if self.result.is_ok() { self.result = (self.f)(&mut self.writer, field, value) } } } impl<'a, F> VisitOutput<fmt::Result> for FieldFnVisitor<'a, F> where F: Fn(&mut dyn fmt::Write, &Field, &dyn fmt::Debug) -> fmt::Result, { fn finish(self) -> fmt::Result { self.result } } impl<'a, F> VisitFmt for FieldFnVisitor<'a, F> where F: Fn(&mut dyn fmt::Write, &Field, &dyn fmt::Debug) -> fmt::Result, { fn writer(&mut self) -> &mut dyn fmt::Write { &mut *self.writer } } impl<'a, F> fmt::Debug for FieldFnVisitor<'a, F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FieldFnVisitor") .field("f", &format_args!("<Fn>")) .field("writer", &format_args!("<dyn fmt::Write>")) .field("result", &self.result) .finish() } } // === printing synthetic Span events === /// Configures what points in the span lifecycle are logged as events. /// /// See also [`with_span_events`](super::CollectorBuilder::with_span_events()). #[derive(Clone, Eq, PartialEq, Ord, PartialOrd)] pub struct FmtSpan(FmtSpanInner); impl FmtSpan { /// spans are ignored (this is the default) pub const NONE: FmtSpan = FmtSpan(FmtSpanInner::None); /// one event per enter/exit of a span pub const ACTIVE: FmtSpan = FmtSpan(FmtSpanInner::Active); /// one event when the span is dropped pub const CLOSE: FmtSpan = FmtSpan(FmtSpanInner::Close); /// events at all points (new, enter, exit, drop) pub const FULL: FmtSpan = FmtSpan(FmtSpanInner::Full); } impl Debug for FmtSpan { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { FmtSpanInner::None => f.write_str("FmtSpan::NONE"), FmtSpanInner::Active => f.write_str("FmtSpan::ACTIVE"), FmtSpanInner::Close => f.write_str("FmtSpan::CLOSE"), FmtSpanInner::Full => f.write_str("FmtSpan::FULL"), } } } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] enum FmtSpanInner { /// spans are ignored (this is the default) None, /// one event per enter/exit of a span Active, /// one event when the span is dropped Close, /// events at all points (new, enter, exit, drop) Full, } pub(super) struct FmtSpanConfig { pub(super) kind: FmtSpan, pub(super) fmt_timing: bool, } impl FmtSpanConfig { pub(super) fn without_time(self) -> Self { Self { kind: self.kind, fmt_timing: false, } } pub(super) fn with_kind(self, kind: FmtSpan) -> Self { Self { kind, fmt_timing: self.fmt_timing, } } pub(super) fn trace_new(&self) -> bool { matches!(self.kind, FmtSpan::FULL) } pub(super) fn trace_active(&self) -> bool { matches!(self.kind, FmtSpan::ACTIVE | FmtSpan::FULL) } pub(super) fn trace_close(&self) -> bool { matches!(self.kind, FmtSpan::CLOSE | FmtSpan::FULL) } } impl Debug for FmtSpanConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.kind.fmt(f) } } impl Default for FmtSpanConfig { fn default() -> Self { Self { kind: FmtSpan::NONE, fmt_timing: true, } } } #[repr(transparent)] pub(super) struct TimingDisplay(pub(super) u64); impl Display for TimingDisplay { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut t = self.0 as f64; for unit in ["ns", "µs", "ms", "s"].iter() { if t < 10.0 { return write!(f, "{:.2}{}", t, unit); } else if t < 100.0 { return write!(f, "{:.1}{}", t, unit); } else if t < 1000.0 { return write!(f, "{:.0}{}", t, unit); } t /= 1000.0; } write!(f, "{:.0}s", t * 1000.0) } } #[cfg(test)] pub(super) mod test { use crate::fmt::{test::MockMakeWriter, time::FormatTime}; use tracing::{ self, collect::with_default, dispatch::{set_default, Dispatch}, }; use super::TimingDisplay; use std::fmt; pub(crate) struct MockTime; impl FormatTime for MockTime { fn format_time(&self, w: &mut dyn fmt::Write) -> fmt::Result { write!(w, "fake time") } } #[cfg(feature = "ansi")] #[test] fn with_ansi_true() { let expected = "\u{1b}[2mfake time\u{1b}[0m \u{1b}[32m INFO\u{1b}[0m tracing_subscriber::fmt::format::test: hello\n"; test_ansi(true, expected); } #[cfg(feature = "ansi")] #[test] fn with_ansi_false() { let expected = "fake time INFO tracing_subscriber::fmt::format::test: hello\n"; test_ansi(false, expected); } #[cfg(not(feature = "ansi"))] #[test] fn without_ansi() { let make_writer = MockMakeWriter::default(); let expected = "fake time INFO tracing_subscriber::fmt::format::test: hello\n"; let subscriber = crate::fmt::Collector::builder() .with_writer(make_writer) .with_timer(MockTime); run_test(subscriber, make_writer, expected); } #[test] fn without_level() { let make_writer = MockMakeWriter::default(); let subscriber = crate::fmt::Collector::builder() .with_writer(make_writer.clone()) .with_level(false) .with_ansi(false) .with_timer(MockTime); let expected = "fake time tracing_subscriber::fmt::format::test: hello\n"; run_test(subscriber, make_writer, expected); } #[cfg(feature = "ansi")] fn test_ansi(is_ansi: bool, expected: &str) { let make_writer = MockMakeWriter::default(); let subscriber = crate::fmt::Collector::builder() .with_writer(make_writer.clone()) .with_ansi(is_ansi) .with_timer(MockTime); run_test(subscriber, make_writer, expected) } fn run_test(subscriber: impl Into<Dispatch>, buf: MockMakeWriter, expected: &str) { let _default = set_default(&subscriber.into()); tracing::info!("hello"); assert_eq!(expected, buf.get_string()) } #[test] fn overridden_parents() { let make_writer = MockMakeWriter::default(); let collector = crate::fmt::Collector::builder() .with_writer(make_writer.clone()) .with_level(false) .with_ansi(false) .with_timer(MockTime) .finish(); with_default(collector, || { let span1 = tracing::info_span!("span1"); let span2 = tracing::info_span!(parent: &span1, "span2"); tracing::info!(parent: &span2, "hello"); }); assert_eq!( "fake time span1:span2: tracing_subscriber::fmt::format::test: hello\n", make_writer.get_string() ); } #[test] fn overridden_parents_in_scope() { let make_writer = MockMakeWriter::default(); let subscriber = crate::fmt::Collector::builder() .with_writer(make_writer.clone()) .with_level(false) .with_ansi(false) .with_timer(MockTime) .finish(); with_default(subscriber, || { let span1 = tracing::info_span!("span1"); let span2 = tracing::info_span!(parent: &span1, "span2"); let span3 = tracing::info_span!("span3"); let _e3 = span3.enter(); tracing::info!("hello"); assert_eq!( "fake time span3: tracing_subscriber::fmt::format::test: hello\n", make_writer.get_string().as_str() ); tracing::info!(parent: &span2, "hello"); assert_eq!( "fake time span1:span2: tracing_subscriber::fmt::format::test: hello\n", make_writer.get_string().as_str() ); }); } #[test] fn format_nanos() { fn fmt(t: u64) -> String { TimingDisplay(t).to_string() } assert_eq!(fmt(1), "1.00ns"); assert_eq!(fmt(12), "12.0ns"); assert_eq!(fmt(123), "123ns"); assert_eq!(fmt(1234), "1.23µs"); assert_eq!(fmt(12345), "12.3µs"); assert_eq!(fmt(123456), "123µs"); assert_eq!(fmt(1234567), "1.23ms"); assert_eq!(fmt(12345678), "12.3ms"); assert_eq!(fmt(123456789), "123ms"); assert_eq!(fmt(1234567890), "1.23s"); assert_eq!(fmt(12345678901), "12.3s"); assert_eq!(fmt(123456789012), "123s"); assert_eq!(fmt(1234567890123), "1235s"); } }
30.227049
129
0.551997
d68f4c9d23c701a33085a9ea03fbb6eb7e4ddfe2
20,287
// Musium -- Music playback daemon with web-based library browser // Copyright 2021 Ruud van Asseldonk // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // A copy of the License has been included in the root of the repository. use std::fs; use std::io; use std::sync::Arc; use std::thread; use tiny_http::{Header, Request, Response, ResponseBox, Server}; use tiny_http::Method::{Get, Post, Put, self}; use crate::config::Config; use crate::database::Database; use crate::database; use crate::mvar::Var; use crate::player::{Millibel, Player}; use crate::prim::{ArtistId, AlbumId, TrackId}; use crate::scan::BackgroundScanner; use crate::serialization; use crate::string_utils::normalize_words; use crate::systemd; use crate::thumb_cache::ThumbCache; use crate::{MetaIndex, MemoryMetaIndex}; fn header_content_type(content_type: &str) -> Header { Header::from_bytes(&b"Content-Type"[..], content_type.as_bytes()) .expect("Failed to create content-type header, value is not ascii.") } fn header_expires_seconds(age_seconds: i64) -> Header { let now = chrono::Utc::now(); let at = now.checked_add_signed(chrono::Duration::seconds(age_seconds)).unwrap(); // The format from https://tools.ietf.org/html/rfc7234#section-5.3. let value = at.format("%a, %e %b %Y %H:%M:%S GMT").to_string(); Header::from_bytes(&b"Expires"[..], value) .expect("Failed to create content-type header, value is not ascii.") } pub struct MetaServer { config: Config, index_var: Var<MemoryMetaIndex>, thumb_cache_var: Var<ThumbCache>, player: Player, scanner: BackgroundScanner, } impl MetaServer { pub fn new( config: Config, index_var: Var<MemoryMetaIndex>, thumb_cache_var: Var<ThumbCache>, player: Player, ) -> MetaServer { MetaServer { config: config, index_var: index_var.clone(), thumb_cache_var: thumb_cache_var.clone(), player: player, scanner: BackgroundScanner::new( index_var, thumb_cache_var, ), } } fn handle_not_found(&self) -> ResponseBox { Response::from_string("Not Found") .with_status_code(404) // "404 Not Found" .boxed() } fn handle_bad_request(&self, reason: &'static str) -> ResponseBox { Response::from_string(reason) .with_status_code(400) // "400 Bad Request" .boxed() } fn handle_error(&self, reason: &'static str) -> ResponseBox { Response::from_string(reason) .with_status_code(500) // "500 Internal Server Error" .boxed() } fn handle_static_file(&self, fname: &str, mime_type: &str) -> ResponseBox { let file = match fs::File::open(fname) { Ok(f) => f, Err(..) => return self.handle_error("Failed to read static file."), }; Response::from_file(file) .with_header(header_content_type(mime_type)) .boxed() } fn handle_album_cover(&self, id: &str) -> ResponseBox { let album_id = match AlbumId::parse(id) { Some(aid) => aid, None => return self.handle_bad_request("Invalid album id."), }; let index = &*self.index_var.get(); let tracks = index.get_album_tracks(album_id); let (_track_id, track) = tracks.first().expect("Albums have at least one track."); let fname = index.get_filename(track.filename); let opts = claxon::FlacReaderOptions { metadata_only: true, read_picture: claxon::ReadPicture::CoverAsVec, read_vorbis_comment: false, }; let reader = match claxon::FlacReader::open_ext(fname, opts) { Ok(r) => r, Err(..) => return self.handle_error("Failed to open flac file."), }; if let Some(cover) = reader.into_pictures().pop() { let content_type = header_content_type(&cover.mime_type); let data = cover.into_vec(); Response::from_data(data) .with_header(content_type) .with_header(header_expires_seconds(3600 * 24 * 30)) .boxed() } else { // The file has no embedded front cover. self.handle_not_found() } } fn handle_thumb(&self, id: &str) -> ResponseBox { // TODO: DRY this track id parsing and loading part. let album_id = match AlbumId::parse(id) { Some(aid) => aid, None => return self.handle_bad_request("Invalid album id."), }; let thumb_cache = self.thumb_cache_var.get(); let img = match thumb_cache.get(album_id) { None => return self.handle_not_found(), Some(bytes) => bytes, }; Response::from_data(img) .with_header(header_content_type("image/jpeg")) .with_header(header_expires_seconds(3600 * 24 * 30)) .boxed() } fn handle_waveform(&self, db: &mut Database, id: &str) -> ResponseBox { use crate::waveform::Waveform; // TODO: DRY this track id parsing and loading part. let track_id = match TrackId::parse(id) { Some(tid) => tid, None => return self.handle_bad_request("Invalid track id."), }; let waveform = match db.select_track_waveform(track_id) { Ok(Some(data)) => Waveform::from_bytes(data), Ok(None) => return self.handle_not_found(), Err(err) => { eprintln!("Error while loading waveform: {:?}", err); return self.handle_error("Database error."); } }; let mut svg = Vec::new(); waveform.write_svg(&mut svg).expect("Write to memory does not fail."); Response::from_data(svg) .with_header(header_content_type("image/svg+xml")) .with_header(header_expires_seconds(3600 * 24 * 30)) .boxed() } fn handle_track(&self, path: &str) -> ResponseBox { // Track urls are of the form `/track/f7c153f2b16dc101.flac`. if !path.ends_with(".flac") { return self.handle_bad_request("Expected a path ending in .flac.") } let id_part = &path[..path.len() - ".flac".len()]; let track_id = match TrackId::parse(id_part) { Some(tid) => tid, None => return self.handle_bad_request("Invalid track id."), }; let index = &*self.index_var.get(); let track = match index.get_track(track_id) { Some(t) => t, None => return self.handle_not_found(), }; let fname = index.get_filename(track.filename); // TODO: Rather than reading the file into memory in userspace, // use sendfile. // TODO: Handle requests with Range header. let file = match fs::File::open(fname) { Ok(f) => f, Err(_) => return self.handle_error("Failed to open file."), }; Response::from_file(file) .with_header(header_content_type("audio/flac")) .boxed() } fn handle_album(&self, id: &str) -> ResponseBox { let album_id = match AlbumId::parse(id) { Some(aid) => aid, None => return self.handle_bad_request("Invalid album id."), }; let index = &*self.index_var.get(); let album = match index.get_album(album_id) { Some(a) => a, None => return self.handle_not_found(), }; let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); serialization::write_album_json(index, &mut w, album_id, album).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_artist(&self, id: &str) -> ResponseBox { let artist_id = match ArtistId::parse(id) { Some(aid) => aid, None => return self.handle_bad_request("Invalid artist id."), }; let index = &*self.index_var.get(); let artist = match index.get_artist(artist_id) { Some(a) => a, None => return self.handle_not_found(), }; let albums = index.get_albums_by_artist(artist_id); let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); serialization::write_artist_json(index, &mut w, artist, albums).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_albums(&self) -> ResponseBox { let index = &*self.index_var.get(); let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); serialization::write_albums_json(index, &mut w).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_queue(&self) -> ResponseBox { let index = &*self.index_var.get(); let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); let queue = self.player.get_queue(); serialization::write_queue_json( index, &mut w, &queue.tracks[..], ).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_enqueue(&self, id: &str) -> ResponseBox { let track_id = match TrackId::parse(id) { Some(tid) => tid, None => return self.handle_bad_request("Invalid track id."), }; let index = &*self.index_var.get(); // Confirm that the track exists before we enqueue it. let _track = match index.get_track(track_id) { Some(t) => t, None => return self.handle_not_found(), }; let queue_id = self.player.enqueue(index, track_id); let queue_id_json = format!(r#""{}""#, queue_id); Response::from_string(queue_id_json) .with_status_code(201) // "201 Created" .with_header(header_content_type("application/json")) .boxed() } fn handle_get_volume(&self) -> ResponseBox { let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); let volume = self.player.get_volume(); serialization::write_volume_json(&mut w, volume).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_change_volume(&self, add: Millibel) -> ResponseBox { let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); let volume = self.player.change_volume(add); serialization::write_volume_json(&mut w, volume).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_search(&self, raw_query: &str) -> ResponseBox { let mut opt_query = None; for (k, v) in url::form_urlencoded::parse(raw_query.as_bytes()) { if k == "q" { opt_query = Some(v); } }; let query = match opt_query { Some(q) => q, None => return self.handle_bad_request("Missing search query."), }; let mut words = Vec::new(); normalize_words(query.as_ref(), &mut words); let mut artists = Vec::new(); let mut albums = Vec::new(); let mut tracks = Vec::new(); let index = &*self.index_var.get(); index.search_artist(&words[..], &mut artists); index.search_album(&words[..], &mut albums); index.search_track(&words[..], &mut tracks); // Cap the number of search results we serve. We can easily produce many // many results (especially when searching for "t", a prefix of "the", // or when searching "a"). Searching is quite fast, but parsing and // rendering the results in the frontend is slow, and having this many // results is not useful anyway, so we cap them. let n_artists = artists.len().min(250); let n_albums = albums.len().min(250); let n_tracks = tracks.len().min(250); let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); serialization::write_search_results_json( index, &mut w, &artists[..n_artists], &albums[..n_albums], &tracks[..n_tracks], ).unwrap(); Response::from_data(w.into_inner()) .with_status_code(200) .with_header(header_content_type("application/json")) .boxed() } fn handle_get_scan_status(&self) -> ResponseBox { // TODO: We could add a long polling query parameter here, and version // the status. Then in the request, include the previous version. If the // current version is newer, respond immediately. If not, block for some // time to wait for a new status, then return the current status. That // way, we could make extremely responsive status updates. let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); let status = self.scanner.get_status(); serialization::write_scan_status_json(&mut w, status).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_start_scan(&self) -> ResponseBox { let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); let status = self.scanner.start(self.config.clone()); serialization::write_scan_status_json(&mut w, Some(status)).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } fn handle_stats(&self) -> ResponseBox { let index = &*self.index_var.get(); let buffer = Vec::new(); let mut w = io::Cursor::new(buffer); serialization::write_stats_json(index, &mut w).unwrap(); Response::from_data(w.into_inner()) .with_header(header_content_type("application/json")) .boxed() } /// Router function for all /api/«endpoint» calls. fn handle_api_request( &self, db: &mut Database, method: &Method, endpoint: &str, arg: Option<&str>, query: &str, ) -> ResponseBox { match (method, endpoint, arg) { // API endpoints. (&Get, "cover", Some(t)) => self.handle_album_cover(t), (&Get, "thumb", Some(t)) => self.handle_thumb(t), (&Get, "waveform", Some(t)) => self.handle_waveform(db, t), (&Get, "track", Some(t)) => self.handle_track(t), (&Get, "album", Some(a)) => self.handle_album(a), (&Get, "artist", Some(a)) => self.handle_artist(a), (&Get, "albums", None) => self.handle_albums(), (&Get, "search", None) => self.handle_search(query), (&Get, "stats", None) => self.handle_stats(), // Play queue manipulation. (&Get, "queue", None) => self.handle_queue(), (&Put, "queue", Some(t)) => self.handle_enqueue(t), // Volume control, volume up/down change the volume by 1 dB. (&Get, "volume", None) => self.handle_get_volume(), (&Post, "volume", Some("up")) => self.handle_change_volume(Millibel( 1_00)), (&Post, "volume", Some("down")) => self.handle_change_volume(Millibel(-1_00)), // Background library scanning. (&Get, "scan", Some("status")) => self.handle_get_scan_status(), (&Post, "scan", Some("start")) => self.handle_start_scan(), _ => self.handle_bad_request("No such (method, endpoint, argument) combination."), } } fn handle_request(&self, db: &mut Database, request: Request) { // Break url into the part before the ? and the part after. The part // before we split on slashes. let mut url_iter = request.url().splitn(2, '?'); // The individual parts in between the slashes. let mut p0 = None; let mut p1 = None; let mut p2 = None; if let Some(base) = url_iter.next() { let mut parts = base.splitn(4, '/').filter(|x| x.len() > 0); p0 = parts.next(); p1 = parts.next(); p2 = parts.next(); } let query = url_iter.next().unwrap_or(""); // A very basic router. See also docs/api.md for an overview. let response = match (request.method(), p0, p1) { // API endpoints go through the API router, to keep this match arm // a bit more concise. (method, Some("api"), Some(endpoint)) => self.handle_api_request(db, method, endpoint, p2, query), // Web endpoints. (&Get, None, None) => self.handle_static_file("app/index.html", "text/html"), (&Get, Some("style.css"), None) => self.handle_static_file("app/style.css", "text/css"), (&Get, Some("dark.css"), None) => self.handle_static_file("app/dark.css", "text/css"), (&Get, Some("manifest.json"), None) => self.handle_static_file("app/manifest.json", "text/javascript"), (&Get, Some("app.js"), None) => self.handle_static_file("app/output/app.js", "text/javascript"), (&Get, Some(path), None) if path.ends_with(".svg") => { let mut file_path = "app/".to_string(); file_path.push_str(path); self.handle_static_file(&file_path, "image/svg+xml") } // Fallback. (&Get, _, _) => self.handle_not_found(), _ => self.handle_bad_request("Expected a GET request."), }; match request.respond(response) { Ok(()) => {}, Err(err) => println!("Error while responding to request: {:?}", err), } } } pub fn serve(bind: &str, service: Arc<MetaServer>) -> ! { let server = match Server::http(bind) { Ok(s) => s, Err(..) => { eprintln!("Failed to start server, could not bind to {}.", bind); std::process::exit(1); } }; let server = Arc::new(server); // Browsers do not make more than 8 requests in parallel, so having more // handler threads is not useful; I expect only a single user to be // browsing at a time. let n_threads = 8; let mut threads = Vec::with_capacity(n_threads); for i in 0..n_threads { let server_i = server.clone(); let service_i = service.clone(); let name = format!("http_server_{}", i); let builder = thread::Builder::new().name(name); let join_handle = builder.spawn(move || { let connection = database::connect_readonly(service_i.config.db_path()) .expect("Failed to connect to database."); let mut db = Database::new(&connection) .expect("Failed to initialize database."); loop { let request = match server_i.recv() { Ok(rq) => rq, Err(e) => { println!("Error: {:?}", e); break; } }; service_i.handle_request(&mut db, request); } }).unwrap(); threads.push(join_handle); } // When running under systemd, the service is ready when the server is // accepting connections, which is now. systemd::notify_ready_if_can_notify(); // Block until the server threads exit, which will not happen. for handle in threads { handle.join().unwrap(); } // This code is unreachable, but serves to satisfy the typechecker. loop {} }
36.885455
115
0.567063
e9f26cb39169caa782a8bd6b96c353a68a6befe5
279
// compile-flags: -Z parse-only // ignore-tidy-linelength struct Foo; impl Foo { fn foo() {} #[stable(feature = "rust1", since = "1.0.0")] } //~ ERROR expected one of `async`, `const`, `crate`, `default`, `existential`, `extern`, `fn`, `pub`, `type`, or fn main() {}
21.461538
114
0.584229
8a2f83e83d9cca779c0cd8e02eeab8db2661fd6c
65
pub mod rpm_tape; pub mod rpm_tape_box; pub mod rpm_tape_tracks;
16.25
24
0.815385
8ab384f8e324f0187ce30718a16aded1ddf16719
440
pub fn part1(_input: &str) -> ! { todo!() } pub fn part2(_input: &str) -> ! { todo!() } #[allow(unreachable_code)] #[cfg(test)] #[test] fn part1_test() { assert_eq!( part1(&std::fs::read_to_string("input/day07.txt").unwrap()), () ); } #[allow(unreachable_code)] #[cfg(test)] #[test] fn part2_test() { assert_eq!( part2(&std::fs::read_to_string("input/day07.txt").unwrap()), () ); }
16.296296
68
0.540909
de35c60ce348b4157914cd91505bfb1f192fad21
32,804
use super::config::ClientConfig; use super::user_config::UserConfig; use failure::{err_msg, format_err}; use rspotify::spotify::client::Spotify; use rspotify::spotify::model::album::{FullAlbum, SavedAlbum, SimplifiedAlbum}; use rspotify::spotify::model::artist::FullArtist; use rspotify::spotify::model::context::FullPlayingContext; use rspotify::spotify::model::device::DevicePayload; use rspotify::spotify::model::offset::for_position; use rspotify::spotify::model::offset::Offset; use rspotify::spotify::model::page::{CursorBasedPage, Page}; use rspotify::spotify::model::playing::PlayHistory; use rspotify::spotify::model::playlist::{PlaylistTrack, SimplifiedPlaylist}; use rspotify::spotify::model::search::{ SearchAlbums, SearchArtists, SearchPlaylists, SearchTracks, }; use rspotify::spotify::model::track::{FullTrack, SavedTrack, SimplifiedTrack}; use rspotify::spotify::model::user::PrivateUser; use rspotify::spotify::senum::{Country, RepeatState}; use std::collections::HashSet; use std::time::Instant; use tui::layout::Rect; use clipboard::ClipboardContext; use clipboard::ClipboardProvider; pub const LIBRARY_OPTIONS: [&str; 6] = [ "Made For You", "Recently Played", "Liked Songs", "Albums", "Artists", "Podcasts", ]; const DEFAULT_ROUTE: Route = Route { id: RouteId::Home, active_block: ActiveBlock::Empty, hovered_block: ActiveBlock::Library, }; #[derive(Clone)] pub struct ScrollableResultPages<T> { index: usize, pages: Vec<T>, } impl<T> ScrollableResultPages<T> { pub fn new() -> ScrollableResultPages<T> { ScrollableResultPages { index: 0, pages: vec![], } } pub fn get_results(&self, at_index: Option<usize>) -> Option<&T> { match at_index { Some(index) => self.pages.get(index), None => self.pages.get(self.index), } } pub fn add_pages(&mut self, new_pages: T) { self.pages.push(new_pages); // Whenever a new page is added, set the active index to the end of the vector self.index = self.pages.len() - 1; } } #[derive(Default)] pub struct SpotifyResultAndSelectedIndex<T> { pub index: usize, pub result: T, } #[derive(Clone)] pub struct Library { pub selected_index: usize, pub saved_tracks: ScrollableResultPages<Page<SavedTrack>>, pub saved_albums: ScrollableResultPages<Page<SavedAlbum>>, pub saved_artists: ScrollableResultPages<CursorBasedPage<FullArtist>>, } #[derive(Clone)] pub struct PlaybackParams { context_uri: Option<String>, uris: Option<Vec<String>>, offset: Option<Offset>, } #[derive(PartialEq, Debug)] pub enum SearchResultBlock { AlbumSearch, SongSearch, ArtistSearch, PlaylistSearch, Empty, } #[derive(Clone, Copy, PartialEq, Debug)] pub enum ActiveBlock { PlayBar, AlbumTracks, AlbumList, Artist, Empty, Error, HelpMenu, Home, Input, Library, MyPlaylists, Podcasts, RecentlyPlayed, SearchResultBlock, SelectDevice, TrackTable, MadeForYou, Artists, } #[derive(Clone, PartialEq, Debug)] pub enum RouteId { AlbumTracks, AlbumList, Artist, Error, Home, RecentlyPlayed, Search, SelectedDevice, TrackTable, MadeForYou, Artists, Podcasts, } pub struct Route { pub id: RouteId, pub active_block: ActiveBlock, pub hovered_block: ActiveBlock, } // Is it possible to compose enums? #[derive(PartialEq, Debug)] pub enum TrackTableContext { MyPlaylists, AlbumSearch, PlaylistSearch, SavedTracks, } #[derive(Clone, PartialEq, Debug)] pub enum AlbumTableContext { Simplified, Full, } pub struct SearchResult { pub albums: Option<SearchAlbums>, pub artists: Option<SearchArtists>, pub playlists: Option<SearchPlaylists>, pub selected_album_index: Option<usize>, pub selected_artists_index: Option<usize>, pub selected_playlists_index: Option<usize>, pub selected_tracks_index: Option<usize>, pub tracks: Option<SearchTracks>, pub hovered_block: SearchResultBlock, pub selected_block: SearchResultBlock, } #[derive(Default)] pub struct TrackTable { pub tracks: Vec<FullTrack>, pub selected_index: usize, pub context: Option<TrackTableContext>, } #[derive(Clone)] pub struct SelectedAlbum { pub album: SimplifiedAlbum, pub tracks: Page<SimplifiedTrack>, pub selected_index: usize, } #[derive(Clone)] pub struct SelectedFullAlbum { pub album: FullAlbum, pub selected_index: usize, } #[derive(Clone)] pub struct ArtistAlbums { pub artist_name: String, pub albums: Page<SimplifiedAlbum>, pub selected_index: usize, } pub struct App { instant_since_last_current_playback_poll: Instant, navigation_stack: Vec<Route>, pub home_scroll: u16, pub client_config: ClientConfig, pub user_config: UserConfig, pub artists: Vec<FullArtist>, pub artist_albums: Option<ArtistAlbums>, pub album_table_context: AlbumTableContext, pub saved_album_tracks_index: usize, pub api_error: String, pub current_playback_context: Option<FullPlayingContext>, pub devices: Option<DevicePayload>, // Inputs: // input is the string for input; // input_idx is the index of the cursor in terms of character; // input_cursor_position is the sum of the width of charaters preceding the cursor. // Reason for this complication is due to non-ASCII characters, they may // take more than 1 bytes to store and more than 1 character width to display. pub input: Vec<char>, pub input_idx: usize, pub input_cursor_position: u16, pub liked_song_ids_set: HashSet<String>, pub large_search_limit: u32, pub library: Library, pub playlist_offset: u32, pub playback_params: PlaybackParams, pub playlist_tracks: Vec<PlaylistTrack>, pub playlists: Option<Page<SimplifiedPlaylist>>, pub recently_played: SpotifyResultAndSelectedIndex<Option<CursorBasedPage<PlayHistory>>>, pub search_results: SearchResult, pub selected_album: Option<SelectedAlbum>, pub selected_album_full: Option<SelectedFullAlbum>, pub selected_device_index: Option<usize>, pub selected_playlist_index: Option<usize>, pub size: Rect, pub small_search_limit: u32, pub song_progress_ms: u128, pub spotify: Option<Spotify>, pub track_table: TrackTable, pub user: Option<PrivateUser>, pub album_list_index: usize, pub artists_list_index: usize, pub clipboard_context: Option<ClipboardContext>, } impl App { pub fn new() -> App { App { album_table_context: AlbumTableContext::Full, album_list_index: 0, artists_list_index: 0, artists: vec![], artist_albums: None, user_config: UserConfig::new(), client_config: Default::default(), saved_album_tracks_index: 0, recently_played: Default::default(), size: Rect::default(), selected_album: None, selected_album_full: None, home_scroll: 0, library: Library { saved_tracks: ScrollableResultPages::new(), saved_albums: ScrollableResultPages::new(), saved_artists: ScrollableResultPages::new(), selected_index: 0, }, liked_song_ids_set: HashSet::new(), navigation_stack: vec![DEFAULT_ROUTE], large_search_limit: 20, small_search_limit: 4, api_error: String::new(), current_playback_context: None, devices: None, input: vec![], input_idx: 0, input_cursor_position: 0, playlist_offset: 0, playlist_tracks: vec![], playlists: None, search_results: SearchResult { hovered_block: SearchResultBlock::SongSearch, selected_block: SearchResultBlock::Empty, albums: None, artists: None, playlists: None, selected_album_index: None, selected_artists_index: None, selected_playlists_index: None, selected_tracks_index: None, tracks: None, }, song_progress_ms: 0, selected_device_index: None, selected_playlist_index: None, spotify: None, track_table: Default::default(), playback_params: PlaybackParams { context_uri: None, uris: None, offset: None, }, user: None, instant_since_last_current_playback_poll: Instant::now(), clipboard_context: None, } } pub fn get_user(&mut self) { if let Some(spotify) = &self.spotify { match spotify.current_user() { Ok(user) => { self.user = Some(user); } Err(e) => { self.handle_error(e); } } } } pub fn handle_get_devices(&mut self) { if let Some(spotify) = &self.spotify { if let Ok(result) = spotify.device() { self.push_navigation_stack(RouteId::SelectedDevice, ActiveBlock::SelectDevice); if !result.devices.is_empty() { self.devices = Some(result); // Select the first device in the list self.selected_device_index = Some(0); } } } } pub fn get_current_playback(&mut self) { if let Some(spotify) = &self.spotify { let context = spotify.current_playback(None); if let Ok(ctx) = context { if let Some(c) = ctx { self.current_playback_context = Some(c.clone()); self.instant_since_last_current_playback_poll = Instant::now(); if let Some(track) = c.item { if let Some(track_id) = track.id { self.current_user_saved_tracks_contains(vec![track_id]); } } } }; } } pub fn current_user_saved_tracks_contains(&mut self, ids: Vec<String>) { if let Some(spotify) = &self.spotify { match spotify.current_user_saved_tracks_contains(&ids) { Ok(is_saved_vec) => { for (i, id) in ids.iter().enumerate() { if let Some(is_liked) = is_saved_vec.get(i) { if *is_liked { self.liked_song_ids_set.insert(id.to_string()); } else { // The song is not liked, so check if it should be removed if self.liked_song_ids_set.contains(id) { self.liked_song_ids_set.remove(id); } } }; } } Err(e) => { self.handle_error(e); } } } } fn poll_current_playback(&mut self) { // Poll every 5 seconds let poll_interval_ms = 5_000; let elapsed = self .instant_since_last_current_playback_poll .elapsed() .as_millis(); if elapsed >= poll_interval_ms { self.get_current_playback(); } } pub fn update_on_tick(&mut self) { self.poll_current_playback(); if let Some(current_playback_context) = &self.current_playback_context { if let (Some(track), Some(progress_ms)) = ( &current_playback_context.item, current_playback_context.progress_ms, ) { if current_playback_context.is_playing { let elapsed = self .instant_since_last_current_playback_poll .elapsed() .as_millis() + u128::from(progress_ms); if elapsed < u128::from(track.duration_ms) { self.song_progress_ms = elapsed; } else { self.song_progress_ms = track.duration_ms.into(); } } } } } fn seek(&mut self, position_ms: u32) { if let (Some(spotify), Some(device_id)) = (&self.spotify, &self.client_config.device_id) { match spotify.seek_track(position_ms, Some(device_id.to_string())) { Ok(()) => { self.get_current_playback(); } Err(e) => { self.handle_error(e); } }; } } pub fn seek_forwards(&mut self) { if let Some(current_playback_context) = &self.current_playback_context { if let Some(track) = &current_playback_context.item { if track.duration_ms - self.song_progress_ms as u32 > self.user_config.behavior.seek_milliseconds { self.seek( self.song_progress_ms as u32 + self.user_config.behavior.seek_milliseconds, ); } else { self.next_track(); } } } } pub fn seek_backwards(&mut self) { let new_progress = if self.song_progress_ms as u32 > self.user_config.behavior.seek_milliseconds { self.song_progress_ms as u32 - self.user_config.behavior.seek_milliseconds } else { 0u32 }; self.seek(new_progress); } pub fn pause_playback(&mut self) { if let (Some(spotify), Some(device_id)) = (&self.spotify, &self.client_config.device_id) { match spotify.pause_playback(Some(device_id.to_string())) { Ok(()) => { self.get_current_playback(); } Err(e) => { self.handle_error(e); } }; } } fn change_volume(&mut self, volume_percent: u8) { if let (Some(spotify), Some(device_id), Some(context)) = ( &self.spotify, &self.client_config.device_id, &mut self.current_playback_context, ) { match spotify.volume(volume_percent, Some(device_id.to_string())) { Ok(()) => { context.device.volume_percent = volume_percent.into(); } Err(e) => { self.handle_error(e); } }; } } pub fn increase_volume(&mut self) { if let Some(context) = self.current_playback_context.clone() { let next_volume = context.device.volume_percent as u8 + 10; if next_volume <= 100 { self.change_volume(next_volume); } } } pub fn decrease_volume(&mut self) { if let Some(context) = self.current_playback_context.clone() { let volume = context.device.volume_percent; if volume >= 10 { let next_volume = context.device.volume_percent as u8 - 10; self.change_volume(next_volume); } } } pub fn handle_error(&mut self, e: failure::Error) { self.push_navigation_stack(RouteId::Error, ActiveBlock::Error); self.api_error = e.to_string(); } pub fn toggle_playback(&mut self) { if let Some(current_playback_context) = &self.current_playback_context { if current_playback_context.is_playing { self.pause_playback(); } else { // When no offset or uris are passed, spotify will resume current playback self.start_playback(None, None, None); } } } pub fn next_track(&mut self) { if let (Some(spotify), Some(device_id)) = (&self.spotify, &self.client_config.device_id) { match spotify.next_track(Some(device_id.to_string())) { Ok(()) => { self.get_current_playback(); } Err(e) => { self.handle_error(e); } }; } } pub fn previous_track(&mut self) { if let (Some(spotify), Some(device_id)) = (&self.spotify, &self.client_config.device_id) { match spotify.previous_track(Some(device_id.to_string())) { Ok(()) => { self.get_current_playback(); } Err(e) => { self.handle_error(e); } }; } } pub fn start_playback( &mut self, context_uri: Option<String>, uris: Option<Vec<String>>, offset: Option<usize>, ) { let (uris, context_uri) = if context_uri.is_some() { (None, context_uri) } else if uris.is_some() { (uris, None) } else { (None, None) }; let offset = offset.and_then(|o| for_position(o as u32)); let result = match &self.client_config.device_id { Some(device_id) => match &self.spotify { Some(spotify) => spotify.start_playback( Some(device_id.to_string()), context_uri.clone(), uris.clone(), offset.clone(), None, ), None => Err(err_msg("Spotify is not ready to be used".to_string())), }, None => Err(err_msg("No device_id selected")), }; match result { Ok(()) => { self.get_current_playback(); self.song_progress_ms = 0; self.playback_params = PlaybackParams { context_uri, uris, offset, } } Err(e) => { self.handle_error(e); } } } pub fn get_playlist_tracks(&mut self, playlist_id: String) { match &self.spotify { Some(spotify) => { if let Ok(playlist_tracks) = spotify.user_playlist_tracks( "spotify", &playlist_id, None, Some(self.large_search_limit), Some(self.playlist_offset), None, ) { self.set_playlist_tracks_to_table(&playlist_tracks); self.playlist_tracks = playlist_tracks.items; if self.get_current_route().id != RouteId::TrackTable { self.push_navigation_stack(RouteId::TrackTable, ActiveBlock::TrackTable); }; }; } None => {} } } // The navigation_stack actually only controls the large block to the right of `library` and // `playlists` pub fn push_navigation_stack( &mut self, next_route_id: RouteId, next_active_block: ActiveBlock, ) { self.navigation_stack.push(Route { id: next_route_id, active_block: next_active_block, hovered_block: next_active_block, }); } pub fn pop_navigation_stack(&mut self) -> Option<Route> { if self.navigation_stack.len() == 1 { None } else { self.navigation_stack.pop() } } pub fn get_current_route(&self) -> &Route { match self.navigation_stack.last() { Some(route) => route, None => &DEFAULT_ROUTE, // if for some reason there is no route return the default } } fn get_current_route_mut(&mut self) -> &mut Route { self.navigation_stack.last_mut().unwrap() } pub fn set_current_route_state( &mut self, active_block: Option<ActiveBlock>, hovered_block: Option<ActiveBlock>, ) { let mut current_route = self.get_current_route_mut(); if let Some(active_block) = active_block { current_route.active_block = active_block; } if let Some(hovered_block) = hovered_block { current_route.hovered_block = hovered_block; } } pub fn copy_song_url(&mut self) { let clipboard = match &mut self.clipboard_context { Some(ctx) => ctx, None => return, }; if let Some(FullPlayingContext { item: Some(FullTrack { id: Some(id), .. }), .. }) = &self.current_playback_context { if let Err(e) = clipboard.set_contents(format!("https://open.spotify.com/track/{}", id)) { self.handle_error(format_err!("failed to set clipboard content: {}", e)); } } } fn set_saved_tracks_to_table(&mut self, saved_track_page: &Page<SavedTrack>) { self.set_tracks_to_table( saved_track_page .items .clone() .into_iter() .map(|item| item.track) .collect::<Vec<FullTrack>>(), ); } fn set_playlist_tracks_to_table(&mut self, playlist_track_page: &Page<PlaylistTrack>) { self.set_tracks_to_table( playlist_track_page .items .clone() .into_iter() .map(|item| item.track) .collect::<Vec<FullTrack>>(), ); } pub fn set_tracks_to_table(&mut self, tracks: Vec<FullTrack>) { self.track_table.tracks = tracks.clone(); self.current_user_saved_tracks_contains( tracks .clone() .into_iter() .filter_map(|item| item.id) .collect::<Vec<String>>(), ); } pub fn get_current_user_saved_tracks(&mut self, offset: Option<u32>) { if let Some(spotify) = &self.spotify { match spotify.current_user_saved_tracks(self.large_search_limit, offset) { Ok(saved_tracks) => { self.set_saved_tracks_to_table(&saved_tracks); self.library.saved_tracks.add_pages(saved_tracks); self.track_table.context = Some(TrackTableContext::SavedTracks); } Err(e) => { self.handle_error(e); } } } } pub fn get_current_user_saved_tracks_next(&mut self) { // Before fetching the next tracks, check if we have already fetched them match self .library .saved_tracks .get_results(Some(self.library.saved_tracks.index + 1)) .cloned() { Some(saved_tracks) => { self.set_saved_tracks_to_table(&saved_tracks); self.library.saved_tracks.index += 1 } None => { if let Some(saved_tracks) = &self.library.saved_tracks.get_results(None) { let offset = Some(saved_tracks.offset + saved_tracks.limit); self.get_current_user_saved_tracks(offset); } } } } pub fn get_current_user_saved_tracks_previous(&mut self) { if self.library.saved_tracks.index > 0 { self.library.saved_tracks.index -= 1; } if let Some(saved_tracks) = &self.library.saved_tracks.get_results(None).cloned() { self.set_saved_tracks_to_table(&saved_tracks); } } pub fn get_album_tracks(&mut self, album: SimplifiedAlbum) { if let Some(album_id) = &album.id { if let Some(spotify) = &self.spotify { match spotify.album_track(&album_id.clone(), self.large_search_limit, 0) { Ok(tracks) => { self.selected_album = Some(SelectedAlbum { album, tracks: tracks.clone(), selected_index: 0, }); self.current_user_saved_tracks_contains( tracks .items .into_iter() .filter_map(|item| item.id) .collect::<Vec<String>>(), ); self.album_table_context = AlbumTableContext::Simplified; self.push_navigation_stack(RouteId::AlbumTracks, ActiveBlock::AlbumTracks); } Err(e) => { self.handle_error(e); } } } } } pub fn toggle_save_track(&mut self, track_id: String) { if let Some(spotify) = &self.spotify { match spotify.current_user_saved_tracks_contains(&[track_id.clone()]) { Ok(saved) => { if saved.first() == Some(&true) { match spotify.current_user_saved_tracks_delete(&[track_id.clone()]) { Ok(()) => { self.liked_song_ids_set.remove(&track_id); } Err(e) => { self.handle_error(e); } } } else { match spotify.current_user_saved_tracks_add(&[track_id.clone()]) { Ok(()) => { // TODO: This should ideally use the same logic as `self.current_user_saved_tracks_contains` self.liked_song_ids_set.insert(track_id); } Err(e) => { self.handle_error(e); } } } } Err(e) => { self.handle_error(e); } } }; } pub fn shuffle(&mut self) { if let (Some(spotify), Some(context)) = (&self.spotify, &mut self.current_playback_context) { match spotify.shuffle(!context.shuffle_state, self.client_config.device_id.clone()) { Ok(()) => { // Update the UI eagerly (otherwise the UI will wait until the next 5 second interval // due to polling playback context) context.shuffle_state = !context.shuffle_state; } Err(e) => { self.handle_error(e); } } }; } pub fn repeat(&mut self) { if let (Some(spotify), Some(context)) = (&self.spotify, &mut self.current_playback_context) { let next_repeat_state = match context.repeat_state { RepeatState::Off => RepeatState::Context, RepeatState::Context => RepeatState::Track, RepeatState::Track => RepeatState::Off, }; match spotify.repeat(next_repeat_state, self.client_config.device_id.clone()) { Ok(()) => { // Update the UI eagerly (otherwise the UI will wait until the next 5 second interval // due to polling playback context) context.repeat_state = next_repeat_state; } Err(e) => { self.handle_error(e); } } } } pub fn get_artist_albums(&mut self, artist_id: &str, artist_name: &str) { if let (Some(spotify), Some(user)) = (&self.spotify, &self.user.to_owned()) { match spotify.artist_albums( artist_id, None, Country::from_str(&user.country.to_owned().unwrap_or_else(|| "".to_string())), Some(self.large_search_limit), Some(0), ) { Ok(result) => { self.artist_albums = Some(ArtistAlbums { artist_name: artist_name.to_owned(), selected_index: 0, albums: result, }); self.push_navigation_stack(RouteId::Artist, ActiveBlock::Artist); } Err(e) => { self.handle_error(e); } }; }; } pub fn get_artists(&mut self, offset: Option<String>) { if let Some(spotify) = &self.spotify { match spotify.current_user_followed_artists(self.large_search_limit, offset) { Ok(saved_artists) => { self.artists = saved_artists.artists.items.to_owned(); self.library.saved_artists.add_pages(saved_artists.artists); } Err(e) => { self.handle_error(e); } }; }; } pub fn get_current_user_saved_albums(&mut self, offset: Option<u32>) { if let Some(spotify) = &self.spotify { match spotify.current_user_saved_albums(self.large_search_limit, offset) { Ok(saved_albums) => { // not to show a blank page if !saved_albums.items.is_empty() { self.library.saved_albums.add_pages(saved_albums); } } Err(e) => { self.handle_error(e); } } } } pub fn get_current_user_saved_albums_next(&mut self) { match self .library .saved_albums .get_results(Some(self.library.saved_albums.index + 1)) .cloned() { Some(_) => self.library.saved_albums.index += 1, None => { if let Some(saved_albums) = &self.library.saved_albums.get_results(None) { let offset = Some(saved_albums.offset + saved_albums.limit); self.get_current_user_saved_albums(offset); } } } } pub fn get_current_user_saved_albums_previous(&mut self) { if self.library.saved_albums.index > 0 { self.library.saved_albums.index -= 1; } } pub fn delete_current_user_saved_album(&mut self) { if let Some(albums) = self.library.saved_albums.get_results(None) { if let Some(selected_album) = albums.items.get(self.album_list_index) { if let Some(spotify) = &mut self.spotify { let album_id = &selected_album.album.id; match spotify.current_user_saved_albums_delete(&[album_id.to_owned()]) { Ok(_) => self.get_current_user_saved_albums(None), Err(e) => self.handle_error(e), } } } } } pub fn user_unfollow_artists(&mut self) { if let Some(artists) = self.library.saved_artists.get_results(None) { if let Some(selected_artist) = artists.items.get(self.artists_list_index) { if let Some(spotify) = &mut self.spotify { let artist_id = &selected_artist.id; match spotify.user_unfollow_artists(&[artist_id.to_owned()]) { Ok(_) => self.get_artists(None), Err(e) => self.handle_error(e), } } } } } pub fn user_follow_artists(&mut self) { if let Some(artists) = &self.search_results.artists { if let Some(selected_index) = self.search_results.selected_artists_index { if let Some(spotify) = &mut self.spotify { let selected_artist: &FullArtist = &artists.artists.items[selected_index]; let artist_id = &selected_artist.id; if let Err(e) = spotify.user_follow_artists(&[artist_id.to_owned()]) { self.handle_error(e); } } } } } }
33.576254
124
0.529417
2624506866904ac93e885c48b12ce2e01a415ffb
1,467
use std::io::{self, BufRead}; use std::collections::BTreeSet; fn password_contains_duplicates(password: &str) -> bool { let mut wordset = BTreeSet::new(); for word in password.split_whitespace() { if wordset.contains(word) { return true; } wordset.insert(word); } false } fn password_contains_duplicate_anagrams(password: &str) -> bool { let mut wordset = BTreeSet::new(); for word in password.split_whitespace() { let mut sorted = Vec::new(); // just ascii should be fine for c in word.bytes() { sorted.push(c); } sorted.sort(); if wordset.contains(&sorted) { return true; } wordset.insert(sorted); } false } fn main() { let stdin = io::stdin(); let buf_stdin = stdin.lock(); let mut num_passwords_no_duplicates = 0; let mut num_passwords_no_duplicate_anagrams = 0; for line in buf_stdin.lines() { if let Ok(l) = line { if !password_contains_duplicates(&l) { num_passwords_no_duplicates += 1; } if !password_contains_duplicate_anagrams(&l) { num_passwords_no_duplicate_anagrams += 1; } } } println!("number of passwords with no duplicates: {}", num_passwords_no_duplicates); println!("number of passwords with no duplicate anagrams: {}", num_passwords_no_duplicate_anagrams); }
28.764706
104
0.597819
873c6c7f69bfbdbab47e5409cab0d80aa8d54edd
4,737
// Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 use crate::{ metrics::DIEM_PRUNER_LEAST_READABLE_VERSION, pruner::db_pruner::DBPruner, transaction::TransactionSchema, TransactionStore, }; use aptos_logger::{error, info}; use aptos_types::transaction::{AtomicVersion, Transaction, Version}; use schemadb::{ReadOptions, SchemaBatch, DB}; use std::{ cmp::min, sync::{atomic::Ordering, Arc}, thread::sleep, time::Duration, }; pub struct TransactionStorePruner { db: Arc<DB>, transaction_store: Arc<TransactionStore>, /// Keeps track of the target version that the pruner needs to achieve. target_version: AtomicVersion, least_readable_version: AtomicVersion, } impl DBPruner for TransactionStorePruner { fn initialize(&self) { loop { match self.initialize_least_readable_version() { Ok(least_readable_version) => { info!( least_readable_version = least_readable_version, "[transaction pruner] initialized." ); self.record_progress(least_readable_version); return; } Err(e) => { error!( error = ?e, "[transaction pruner] Error on first seek. Retrying in 1 second.", ); sleep(Duration::from_secs(1)); } } } } fn prune(&self, max_versions: usize) -> anyhow::Result<Version> { let least_readable_version = self.least_readable_version(); // Current target version might be less than the target version to ensure we don't prune // more than max_version in one go. let current_target_version = min( least_readable_version + max_versions as u64, self.target_version(), ); let candidate_transactions = self .get_pruning_candidate_transactions(least_readable_version, current_target_version)?; let mut db_batch = SchemaBatch::new(); self.transaction_store .prune_transaction_by_hash(&candidate_transactions, &mut db_batch)?; self.transaction_store .prune_transaction_by_account(&candidate_transactions, &mut db_batch)?; self.transaction_store.prune_transaction_schema( self.least_readable_version(), current_target_version, &mut db_batch, )?; self.transaction_store.prune_transaction_info_schema( self.least_readable_version(), current_target_version, &mut db_batch, )?; self.transaction_store.prune_transaction_accumulator( self.least_readable_version(), current_target_version, &mut db_batch, )?; self.db.write_schemas(db_batch)?; self.record_progress(current_target_version); Ok(current_target_version) } fn initialize_least_readable_version(&self) -> anyhow::Result<Version> { let mut iter = self.db.iter::<TransactionSchema>(ReadOptions::default())?; iter.seek_to_first(); let version = iter.next().transpose()?.map_or(0, |(version, _)| version); Ok(version) } fn least_readable_version(&self) -> Version { self.least_readable_version.load(Ordering::Relaxed) } fn set_target_version(&self, target_version: Version) { self.target_version.store(target_version, Ordering::Relaxed) } fn target_version(&self) -> Version { self.target_version.load(Ordering::Relaxed) } fn record_progress(&self, least_readable_version: Version) { self.least_readable_version .store(least_readable_version, Ordering::Relaxed); DIEM_PRUNER_LEAST_READABLE_VERSION .with_label_values(&["transaction_store"]) .set(least_readable_version as i64); } fn is_pruning_pending(&self) -> bool { self.least_readable_version() >= self.target_version() } } impl TransactionStorePruner { pub(super) fn new(db: Arc<DB>, transaction_store: Arc<TransactionStore>) -> Self { TransactionStorePruner { db, transaction_store, target_version: AtomicVersion::new(0), least_readable_version: AtomicVersion::new(0), } } fn get_pruning_candidate_transactions( &self, start: Version, end: Version, ) -> anyhow::Result<Vec<Transaction>> { self.transaction_store .get_transaction_iter(start, (end - start) as usize)? .collect() } } #[cfg(test)] mod test;
33.835714
97
0.618324
229638701c69aaaec05659b2fa82ba4b3389925c
793
use crate::list::ListIndex; use js_sys::Object; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; /// The `ActionDetail` type /// /// [MWC Documentation](https://github.com/material-components/material-components-web-components/tree/master/packages/list#mwc-list-2) #[derive(Debug)] pub struct ActionDetail { #[allow(dead_code)] index: ListIndex, } impl From<JsValue> for ActionDetail { fn from(value: JsValue) -> Self { let detail = value.unchecked_into::<ActionDetailJs>(); let index = ListIndex::from(detail.index()); Self { index } } } #[wasm_bindgen] extern "C" { #[derive(Debug)] #[wasm_bindgen(extends = Object)] type ActionDetailJs; #[wasm_bindgen(method, getter)] pub fn index(this: &ActionDetailJs) -> JsValue; }
24.78125
135
0.673392
4b20e2428950c8e003fa664db4139dc242d9dfb9
36,864
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! USB Power Delivery interface //! //! Used by: stm32g071, stm32g07x, stm32g081 use crate::{RORegister, RWRegister}; #[cfg(not(feature = "nosync"))] use core::marker::PhantomData; /// UCPD configuration register pub mod CFG1 { /// HBITCLKDIV pub mod HBITCLKDIV { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (6 bits: 0x3f << 0) pub const mask: u32 = 0x3f << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// IFRGAP pub mod IFRGAP { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (5 bits: 0b11111 << 6) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRANSWIN pub mod TRANSWIN { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (5 bits: 0b11111 << 11) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// PSC_USBPDCLK pub mod PSC_USBPDCLK { /// Offset (17 bits) pub const offset: u32 = 17; /// Mask (3 bits: 0b111 << 17) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXORDSETEN pub mod RXORDSETEN { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (9 bits: 0x1ff << 20) pub const mask: u32 = 0x1ff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXDMAEN pub mod TXDMAEN { /// Offset (29 bits) pub const offset: u32 = 29; /// Mask (1 bit: 1 << 29) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXDMAEN: pub mod RXDMAEN { /// Offset (30 bits) pub const offset: u32 = 30; /// Mask (1 bit: 1 << 30) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// UCPDEN pub mod UCPDEN { /// Offset (31 bits) pub const offset: u32 = 31; /// Mask (1 bit: 1 << 31) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD configuration register 2 pub mod CFG2 { /// RXFILTDIS pub mod RXFILTDIS { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXFILT2N3 pub mod RXFILT2N3 { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FORCECLK pub mod FORCECLK { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// WUPEN pub mod WUPEN { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD configuration register 3 pub mod CFG3 { /// TRIM1_NG_CCRPD pub mod TRIM1_NG_CCRPD { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (4 bits: 0b1111 << 0) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRIM1_NG_CC1A5 pub mod TRIM1_NG_CC1A5 { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (5 bits: 0b11111 << 4) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRIM1_NG_CC3A0 pub mod TRIM1_NG_CC3A0 { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (4 bits: 0b1111 << 9) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRIM2_NG_CCRPD pub mod TRIM2_NG_CCRPD { /// Offset (16 bits) pub const offset: u32 = 16; /// Mask (4 bits: 0b1111 << 16) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRIM2_NG_CC1A5 pub mod TRIM2_NG_CC1A5 { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (5 bits: 0b11111 << 20) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TRIM2_NG_CC3A0 pub mod TRIM2_NG_CC3A0 { /// Offset (25 bits) pub const offset: u32 = 25; /// Mask (4 bits: 0b1111 << 25) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD control register pub mod CR { /// TXMODE pub mod TXMODE { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (2 bits: 0b11 << 0) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXSEND pub mod TXSEND { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXHRST pub mod TXHRST { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXMODE pub mod RXMODE { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (1 bit: 1 << 4) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// PHYRXEN pub mod PHYRXEN { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// PHYCCSEL pub mod PHYCCSEL { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// ANASUBMODE pub mod ANASUBMODE { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (2 bits: 0b11 << 7) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// ANAMODE pub mod ANAMODE { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// CCENABLE pub mod CCENABLE { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (2 bits: 0b11 << 10) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// DBATTEN pub mod DBATTEN { /// Offset (15 bits) pub const offset: u32 = 15; /// Mask (1 bit: 1 << 15) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FRSRXEN pub mod FRSRXEN { /// Offset (16 bits) pub const offset: u32 = 16; /// Mask (1 bit: 1 << 16) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FRSTX pub mod FRSTX { /// Offset (17 bits) pub const offset: u32 = 17; /// Mask (1 bit: 1 << 17) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RDCH pub mod RDCH { /// Offset (18 bits) pub const offset: u32 = 18; /// Mask (1 bit: 1 << 18) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// CC1TCDIS pub mod CC1TCDIS { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (1 bit: 1 << 20) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// CC2TCDIS pub mod CC2TCDIS { /// Offset (21 bits) pub const offset: u32 = 21; /// Mask (1 bit: 1 << 21) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Interrupt Mask Register pub mod IMR { /// TXISIE pub mod TXISIE { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGDISCIE pub mod TXMSGDISCIE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGSENTIE pub mod TXMSGSENTIE { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGABTIE pub mod TXMSGABTIE { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTDISCIE pub mod HRSTDISCIE { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (1 bit: 1 << 4) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTSENTIE pub mod HRSTSENTIE { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXUNDIE pub mod TXUNDIE { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXNEIE pub mod RXNEIE { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (1 bit: 1 << 8) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXORDDETIE pub mod RXORDDETIE { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXHRSTDETIE pub mod RXHRSTDETIE { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXOVRIE pub mod RXOVRIE { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (1 bit: 1 << 11) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXMSGENDIE pub mod RXMSGENDIE { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (1 bit: 1 << 12) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT1IE pub mod TYPECEVT1IE { /// Offset (14 bits) pub const offset: u32 = 14; /// Mask (1 bit: 1 << 14) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT2IE pub mod TYPECEVT2IE { /// Offset (15 bits) pub const offset: u32 = 15; /// Mask (1 bit: 1 << 15) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FRSEVTIE pub mod FRSEVTIE { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (1 bit: 1 << 20) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Status Register pub mod SR { /// TXIS pub mod TXIS { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGDISC pub mod TXMSGDISC { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGSENT pub mod TXMSGSENT { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGABT pub mod TXMSGABT { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTDISC pub mod HRSTDISC { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (1 bit: 1 << 4) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTSENT pub mod HRSTSENT { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXUND pub mod TXUND { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXNE pub mod RXNE { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (1 bit: 1 << 8) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXORDDET pub mod RXORDDET { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXHRSTDET pub mod RXHRSTDET { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXOVR pub mod RXOVR { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (1 bit: 1 << 11) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXMSGEND pub mod RXMSGEND { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (1 bit: 1 << 12) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXERR pub mod RXERR { /// Offset (13 bits) pub const offset: u32 = 13; /// Mask (1 bit: 1 << 13) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT1 pub mod TYPECEVT1 { /// Offset (14 bits) pub const offset: u32 = 14; /// Mask (1 bit: 1 << 14) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT2 pub mod TYPECEVT2 { /// Offset (15 bits) pub const offset: u32 = 15; /// Mask (1 bit: 1 << 15) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPEC_VSTATE_CC1 pub mod TYPEC_VSTATE_CC1 { /// Offset (16 bits) pub const offset: u32 = 16; /// Mask (2 bits: 0b11 << 16) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPEC_VSTATE_CC2 pub mod TYPEC_VSTATE_CC2 { /// Offset (18 bits) pub const offset: u32 = 18; /// Mask (2 bits: 0b11 << 18) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FRSEVT pub mod FRSEVT { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (1 bit: 1 << 20) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Interrupt Clear Register pub mod ICR { /// TXMSGDISCCF pub mod TXMSGDISCCF { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGSENTCF pub mod TXMSGSENTCF { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXMSGABTCF pub mod TXMSGABTCF { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTDISCCF pub mod HRSTDISCCF { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (1 bit: 1 << 4) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// HRSTSENTCF pub mod HRSTSENTCF { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TXUNDCF pub mod TXUNDCF { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXORDDETCF pub mod RXORDDETCF { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXHRSTDETCF pub mod RXHRSTDETCF { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXOVRCF pub mod RXOVRCF { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (1 bit: 1 << 11) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXMSGENDCF pub mod RXMSGENDCF { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (1 bit: 1 << 12) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT1CF pub mod TYPECEVT1CF { /// Offset (14 bits) pub const offset: u32 = 14; /// Mask (1 bit: 1 << 14) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// TYPECEVT2CF pub mod TYPECEVT2CF { /// Offset (15 bits) pub const offset: u32 = 15; /// Mask (1 bit: 1 << 15) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// FRSEVTCF pub mod FRSEVTCF { /// Offset (20 bits) pub const offset: u32 = 20; /// Mask (1 bit: 1 << 20) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Tx Ordered Set Type Register pub mod TX_ORDSET { /// TXORDSET pub mod TXORDSET { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (20 bits: 0xfffff << 0) pub const mask: u32 = 0xfffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Tx Paysize Register pub mod TX_PAYSZ { /// TXPAYSZ pub mod TXPAYSZ { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (10 bits: 0x3ff << 0) pub const mask: u32 = 0x3ff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Tx Data Register pub mod TXDR { /// TXDATA pub mod TXDATA { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u32 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Rx Ordered Set Register pub mod RX_ORDSET { /// RXORDSET pub mod RXORDSET { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (3 bits: 0b111 << 0) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXSOP3OF4 pub mod RXSOP3OF4 { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// RXSOPKINVALID pub mod RXSOPKINVALID { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (3 bits: 0b111 << 4) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Rx Paysize Register pub mod RX_PAYSZ { /// RXPAYSZ pub mod RXPAYSZ { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (10 bits: 0x3ff << 0) pub const mask: u32 = 0x3ff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Receive Data Register pub mod RXDR { /// RXDATA pub mod RXDATA { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u32 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Rx Ordered Set Extension Register pub mod RX_ORDEXT1 { /// RXSOPX1 pub mod RXSOPX1 { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (20 bits: 0xfffff << 0) pub const mask: u32 = 0xfffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD Rx Ordered Set Extension Register pub mod RX_ORDEXT2 { /// RXSOPX2 pub mod RXSOPX2 { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (20 bits: 0xfffff << 0) pub const mask: u32 = 0xfffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD IP ID register pub mod IPVER { /// IPVER pub mod IPVER { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD IP ID register pub mod IPID { /// IPID pub mod IPID { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (32 bits: 0xffffffff << 0) pub const mask: u32 = 0xffffffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// UCPD IP ID register pub mod MID { pub use super::IPID::IPID; } #[repr(C)] pub struct RegisterBlock { /// UCPD configuration register pub CFG1: RWRegister<u32>, /// UCPD configuration register 2 pub CFG2: RWRegister<u32>, /// UCPD configuration register 3 pub CFG3: RWRegister<u32>, /// UCPD control register pub CR: RWRegister<u32>, /// UCPD Interrupt Mask Register pub IMR: RWRegister<u32>, /// UCPD Status Register pub SR: RORegister<u32>, /// UCPD Interrupt Clear Register pub ICR: RWRegister<u32>, /// UCPD Tx Ordered Set Type Register pub TX_ORDSET: RWRegister<u32>, /// UCPD Tx Paysize Register pub TX_PAYSZ: RWRegister<u32>, /// UCPD Tx Data Register pub TXDR: RWRegister<u32>, /// UCPD Rx Ordered Set Register pub RX_ORDSET: RORegister<u32>, /// UCPD Rx Paysize Register pub RX_PAYSZ: RWRegister<u32>, /// UCPD Receive Data Register pub RXDR: RORegister<u32>, /// UCPD Rx Ordered Set Extension Register pub RX_ORDEXT1: RWRegister<u32>, /// UCPD Rx Ordered Set Extension Register pub RX_ORDEXT2: RWRegister<u32>, _reserved1: [u32; 238], /// UCPD IP ID register pub IPVER: RORegister<u32>, /// UCPD IP ID register pub IPID: RORegister<u32>, /// UCPD IP ID register pub MID: RORegister<u32>, } pub struct ResetValues { pub CFG1: u32, pub CFG2: u32, pub CFG3: u32, pub CR: u32, pub IMR: u32, pub SR: u32, pub ICR: u32, pub TX_ORDSET: u32, pub TX_PAYSZ: u32, pub TXDR: u32, pub RX_ORDSET: u32, pub RX_PAYSZ: u32, pub RXDR: u32, pub RX_ORDEXT1: u32, pub RX_ORDEXT2: u32, pub IPVER: u32, pub IPID: u32, pub MID: u32, } #[cfg(not(feature = "nosync"))] pub struct Instance { pub(crate) addr: u32, pub(crate) _marker: PhantomData<*const RegisterBlock>, } #[cfg(not(feature = "nosync"))] impl ::core::ops::Deref for Instance { type Target = RegisterBlock; #[inline(always)] fn deref(&self) -> &RegisterBlock { unsafe { &*(self.addr as *const _) } } } #[cfg(feature = "rtic")] unsafe impl Send for Instance {}
25.423448
58
0.477241
09f7121d2de56ff737c8da403f67094c257d0055
11,588
//! Defines types to use with the ACL commands. use crate::types::{ ErrorKind, FromRedisValue, RedisError, RedisResult, RedisWrite, ToRedisArgs, Value, }; macro_rules! not_convertible_error { ($v:expr, $det:expr) => { RedisError::from(( ErrorKind::TypeError, "Response type not convertible", format!("{:?} (response was {:?})", $det, $v), )) }; } /// ACL rules are used in order to activate or remove a flag, or to perform a /// given change to the user ACL, which under the hood are just single words. #[derive(Debug, Eq, PartialEq)] pub enum Rule { /// Enable the user: it is possible to authenticate as this user. On, /// Disable the user: it's no longer possible to authenticate with this /// user, however the already authenticated connections will still work. Off, /// Add the command to the list of commands the user can call. AddCommand(String), /// Remove the command to the list of commands the user can call. RemoveCommand(String), /// Add all the commands in such category to be called by the user. AddCategory(String), /// Remove the commands from such category the client can call. RemoveCategory(String), /// Alias for `+@all`. Note that it implies the ability to execute all the /// future commands loaded via the modules system. AllCommands, /// Alias for `-@all`. NoCommands, /// Add this password to the list of valid password for the user. AddPass(String), /// Remove this password from the list of valid passwords. RemovePass(String), /// Add this SHA-256 hash value to the list of valid passwords for the user. AddHashedPass(String), /// Remove this hash value from from the list of valid passwords RemoveHashedPass(String), /// All the set passwords of the user are removed, and the user is flagged /// as requiring no password: it means that every password will work /// against this user. NoPass, /// Flush the list of allowed passwords. Moreover removes the _nopass_ status. ResetPass, /// Add a pattern of keys that can be mentioned as part of commands. Pattern(String), /// Alias for `~*`. AllKeys, /// Flush the list of allowed keys patterns. ResetKeys, /// Performs the following actions: `resetpass`, `resetkeys`, `off`, `-@all`. /// The user returns to the same state it has immediately after its creation. Reset, } impl ToRedisArgs for Rule { fn write_redis_args<W>(&self, out: &mut W) where W: ?Sized + RedisWrite, { use self::Rule::*; match self { On => out.write_arg(b"on"), Off => out.write_arg(b"off"), AddCommand(cmd) => out.write_arg_fmt(format_args!("+{}", cmd)), RemoveCommand(cmd) => out.write_arg_fmt(format_args!("-{}", cmd)), AddCategory(cat) => out.write_arg_fmt(format_args!("+@{}", cat)), RemoveCategory(cat) => out.write_arg_fmt(format_args!("-@{}", cat)), AllCommands => out.write_arg(b"allcommands"), NoCommands => out.write_arg(b"nocommands"), AddPass(pass) => out.write_arg_fmt(format_args!(">{}", pass)), RemovePass(pass) => out.write_arg_fmt(format_args!("<{}", pass)), AddHashedPass(pass) => out.write_arg_fmt(format_args!("#{}", pass)), RemoveHashedPass(pass) => out.write_arg_fmt(format_args!("!{}", pass)), NoPass => out.write_arg(b"nopass"), ResetPass => out.write_arg(b"resetpass"), Pattern(pat) => out.write_arg_fmt(format_args!("~{}", pat)), AllKeys => out.write_arg(b"allkeys"), ResetKeys => out.write_arg(b"resetkeys"), Reset => out.write_arg(b"reset"), }; } } /// An info dictionary type storing Redis ACL information as multiple `Rule`. /// This type collects key/value data returned by the [`ACL GETUSER`][1] command. /// /// [1]: https://redis.io/commands/acl-getuser #[derive(Debug, Eq, PartialEq)] pub struct AclInfo { /// Describes flag rules for the user. Represented by [`Rule::On`][1], /// [`Rule::Off`][2], [`Rule::AllKeys`][3], [`Rule::AllCommands`][4] and /// [`Rule::NoPass`][5]. /// /// [1]: ./enum.Rule.html#variant.On /// [2]: ./enum.Rule.html#variant.Off /// [3]: ./enum.Rule.html#variant.AllKeys /// [4]: ./enum.Rule.html#variant.AllCommands /// [5]: ./enum.Rule.html#variant.NoPass pub flags: Vec<Rule>, /// Describes the user's passwords. Represented by [`Rule::AddHashedPass`][1]. /// /// [1]: ./enum.Rule.html#variant.AddHashedPass pub passwords: Vec<Rule>, /// Describes capabilities of which commands the user can call. /// Represented by [`Rule::AddCommand`][1], [`Rule::AddCategory`][2], /// [`Rule::RemoveCommand`][3] and [`Rule::RemoveCategory`][4]. /// /// [1]: ./enum.Rule.html#variant.AddCommand /// [2]: ./enum.Rule.html#variant.AddCategory /// [3]: ./enum.Rule.html#variant.RemoveCommand /// [4]: ./enum.Rule.html#variant.RemoveCategory pub commands: Vec<Rule>, /// Describes patterns of keys which the user can access. Represented by /// [`Rule::Pattern`][1]. /// /// [1]: ./enum.Rule.html#variant.Pattern pub keys: Vec<Rule>, } impl FromRedisValue for AclInfo { fn from_redis_value(v: &Value) -> RedisResult<Self> { let mut it = v .as_sequence() .ok_or_else(|| not_convertible_error!(v, ""))? .iter() .skip(1) .step_by(2); let (flags, passwords, commands, keys) = match (it.next(), it.next(), it.next(), it.next()) { (Some(flags), Some(passwords), Some(commands), Some(keys)) => { // Parse flags // Ref: https://git.io/JfNyb let flags = flags .as_sequence() .ok_or_else(|| { not_convertible_error!(flags, "Expect a bulk response of ACL flags") })? .iter() .map(|flag| match flag { Value::Data(flag) => match flag.as_slice() { b"on" => Ok(Rule::On), b"off" => Ok(Rule::Off), b"allkeys" => Ok(Rule::AllKeys), b"allcommands" => Ok(Rule::AllCommands), b"nopass" => Ok(Rule::NoPass), _ => Err(not_convertible_error!(flag, "Expect a valid ACL flag")), }, _ => Err(not_convertible_error!( flag, "Expect an arbitrary binary data" )), }) .collect::<RedisResult<_>>()?; let passwords = passwords .as_sequence() .ok_or_else(|| { not_convertible_error!(flags, "Expect a bulk response of ACL flags") })? .iter() .map(|pass| Ok(Rule::AddHashedPass(String::from_redis_value(pass)?))) .collect::<RedisResult<_>>()?; let commands = match commands { Value::Data(cmd) => std::str::from_utf8(cmd)?, _ => { return Err(not_convertible_error!( commands, "Expect a valid UTF8 string" )) } } .split_terminator(' ') .map(|cmd| match cmd { x if x.starts_with("+@") => Ok(Rule::AddCategory(x[2..].to_owned())), x if x.starts_with("-@") => Ok(Rule::RemoveCategory(x[2..].to_owned())), x if x.starts_with('+') => Ok(Rule::AddCommand(x[1..].to_owned())), x if x.starts_with('-') => Ok(Rule::RemoveCommand(x[1..].to_owned())), _ => Err(not_convertible_error!( cmd, "Expect a command addition/removal" )), }) .collect::<RedisResult<_>>()?; let keys = keys .as_sequence() .ok_or_else(|| not_convertible_error!(keys, ""))? .iter() .map(|pat| Ok(Rule::Pattern(String::from_redis_value(pat)?))) .collect::<RedisResult<_>>()?; (flags, passwords, commands, keys) } _ => { return Err(not_convertible_error!( v, "Expect a resposne from `ACL GETUSER`" )) } }; Ok(Self { flags, passwords, commands, keys, }) } } #[cfg(test)] mod tests { use super::*; macro_rules! assert_args { ($rule:expr, $arg:expr) => { assert_eq!($rule.to_redis_args(), vec![$arg.to_vec()]); }; } #[test] fn test_rule_to_arg() { use self::Rule::*; assert_args!(On, b"on"); assert_args!(Off, b"off"); assert_args!(AddCommand("set".to_owned()), b"+set"); assert_args!(RemoveCommand("set".to_owned()), b"-set"); assert_args!(AddCategory("hyperloglog".to_owned()), b"+@hyperloglog"); assert_args!(RemoveCategory("hyperloglog".to_owned()), b"-@hyperloglog"); assert_args!(AllCommands, b"allcommands"); assert_args!(NoCommands, b"nocommands"); assert_args!(AddPass("mypass".to_owned()), b">mypass"); assert_args!(RemovePass("mypass".to_owned()), b"<mypass"); assert_args!( AddHashedPass( "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2".to_owned() ), b"#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" ); assert_args!( RemoveHashedPass( "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2".to_owned() ), b"!c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" ); assert_args!(NoPass, b"nopass"); assert_args!(Pattern("pat:*".to_owned()), b"~pat:*"); assert_args!(AllKeys, b"allkeys"); assert_args!(ResetKeys, b"resetkeys"); assert_args!(Reset, b"reset"); } #[test] fn test_from_redis_value() { let redis_value = Value::Bulk(vec![ Value::Data("flags".into()), Value::Bulk(vec![Value::Data("on".into())]), Value::Data("passwords".into()), Value::Bulk(vec![]), Value::Data("commands".into()), Value::Data("-@all +get".into()), Value::Data("keys".into()), Value::Bulk(vec![Value::Data("pat:*".into())]), ]); let acl_info = AclInfo::from_redis_value(&redis_value).expect("Parse successfully"); assert_eq!( acl_info, AclInfo { flags: vec![Rule::On], passwords: vec![], commands: vec![ Rule::RemoveCategory("all".to_owned()), Rule::AddCommand("get".to_owned()), ], keys: vec![Rule::Pattern("pat:*".to_owned())], } ); } }
38.370861
99
0.529513
ff0ea0eeb2c020444d922362b22ddbba8538ec96
4,683
mod chat; mod lua; mod object; mod repo; mod util; mod world; use crate::chat::{AppState, ChatSocket}; use crate::util::ResultAnyError; use crate::world::{World, WorldRef}; use actix::clock::Duration; use actix::prelude::*; use actix_web::middleware::Logger; use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder}; use actix_web_actors::ws; use chrono::prelude::*; use futures::executor; use listenfd::ListenFd; use log::info; use std::env; use std::fs::{copy, rename, File}; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; #[macro_use] extern crate scoped_tls; #[macro_use] extern crate lazy_static; async fn index() -> impl Responder { HttpResponse::Ok().body("Hello from orisa!") } async fn socket( req: HttpRequest, stream: web::Payload, data: web::Data<AppState>, ) -> impl Responder { let res = ws::start(ChatSocket::new(data.clone()), &req, stream); res } fn main() -> Result<(), std::io::Error> { env_logger::init(); let mut system = actix_rt::System::builder() .name("main") .stop_on_panic(true) // TODO: this doesn't seem to work (panics kill worker thread (?) but not main thread) .build(); // This reference to _world keeps it alive let (_world, world_ref) = build_world()?; ScheduledSaveActor { world_ref: world_ref.clone(), } .start(); let res = system.block_on(run_server(world_ref.clone())); info!("Saving world before stop"); save_world(world_ref.clone()).expect("Failed to save world"); info!("Save complete"); res } fn world_load_path() -> PathBuf { let state_dir_env = env::var("ORISA_STATE_DIRECTORY").unwrap_or("state".to_string()); let state_dir = Path::new(&state_dir_env); state_dir.join("world.json").to_path_buf() } fn save_world(world_ref: WorldRef) -> ResultAnyError<()> { let state_dir_env = env::var("ORISA_STATE_DIRECTORY").unwrap_or("state".to_string()); let state_dir = Path::new(&state_dir_env); let temp_path = state_dir.join("world-out.json"); let file = File::create(&temp_path)?; world_ref.read(|w| w.save(file))?; let now = Utc::now().to_rfc3339(); copy( temp_path.clone(), state_dir.join(format!("world-{}.json", now)), )?; let final_path = state_dir.join("world.json"); rename(temp_path, final_path)?; Ok(()) } fn build_world() -> Result<(Arc<RwLock<Option<World>>>, WorldRef), std::io::Error> { let arbiter = Arbiter::new(); // default to assuming killpop is checked out next to orisa let code_dir_env = env::var("ORISA_CODE_DIRECTORY").unwrap_or("../../killpop".to_string()); log::info!("Using code directory {}", code_dir_env); let code_remote = env::var("ORISA_CODE_REMOTE").ok(); let code_branch = env::var("ORISA_CODE_BRANCH").ok(); let git_config = match (code_remote, code_branch) { (Some(remote), Some(branch)) => { log::info!( "Configured to pull system code from {} -> {}", remote, branch ); Some(repo::Repo::new(&Path::new(&code_dir_env), remote, branch)) } _ => { log::info!("Not configured to pull system code"); None } }; let path = world_load_path(); let read = if path.exists() { Some(File::open(path).expect("Error opening world")) } else { None }; Ok( World::new(&arbiter, &Path::new(&code_dir_env), git_config, read).expect("error loading world"), ) } async fn run_server(world_ref: WorldRef) -> Result<(), std::io::Error> { let data = web::Data::new(AppState { world_ref: world_ref.clone(), }); let mut listenfd = ListenFd::from_env(); let mut server = HttpServer::new(move || { App::new() .app_data(data.clone()) .wrap(Logger::default()) .route("/", web::get().to(index)) .route("/api/socket", web::get().to(socket)) }) .shutdown_timeout(1) .disable_signals(); server = if let Some(l) = listenfd.take_tcp_listener(0).unwrap() { server.listen(l)? } else { server.bind("0.0.0.0:8080")? }; info!("Starting!"); let running = server.run(); let srv = running.clone(); ctrlc::set_handler(move || { info!("Asking for stop!"); executor::block_on(srv.stop(true)); }) .expect("Error setting Ctrl-C handler"); running.await } struct ScheduledSaveActor { world_ref: WorldRef, } impl Actor for ScheduledSaveActor { type Context = Context<Self>; fn started(&mut self, ctx: &mut Context<Self>) { let six_hours = 60 * 60 * 6; ctx.run_interval(Duration::from_secs(six_hours), |s, _ctx| { log::info!("Saving world on schedule."); let _ = save_world(s.world_ref.clone()) .map_err(|e| log::error!("Unable to save world on schedule: {}", e)); }); } }
25.313514
111
0.646808
79fb569c77a5ce56c33de27fb9c4b31b9182f966
2,438
// This file is part of Substrate. // Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Benchmarks for Utility Pallet #![cfg(feature = "runtime-benchmarks")] use super::*; use frame_system::{RawOrigin, EventRecord}; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; const SEED: u32 = 0; fn assert_last_event<T: Config>(generic_event: <T as Config>::Event) { let events = frame_system::Module::<T>::events(); let system_event: <T as frame_system::Config>::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } benchmarks! { batch { let c in 0 .. 1000; let mut calls: Vec<<T as Config>::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); } let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), calls) verify { assert_last_event::<T>(Event::BatchCompleted.into()) } as_derivative { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark(vec![]).into()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::<T>::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), SEED as u16, call) batch_all { let c in 0 .. 1000; let mut calls: Vec<<T as Config>::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); } let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), calls) verify { assert_last_event::<T>(Event::BatchCompleted.into()) } } impl_benchmark_test_suite!( Module, crate::tests::new_test_ext(), crate::tests::Test, );
31.662338
93
0.700164
224acf5de05ba2148eef9308f381391d2227bb41
4,676
extern crate specs; use specs::prelude::*; use crate::{MyTurn, Faction, Position, Map, raws::Reaction, Viewshed, WantsToFlee, WantsToApproach, Chasing, SpecialAbilities, WantsToCastSpell, Name, SpellTemplate}; pub struct VisibleAI {} impl<'a> System<'a> for VisibleAI { #[allow(clippy::type_complexity)] type SystemData = ( ReadStorage<'a, MyTurn>, ReadStorage<'a, Faction>, ReadStorage<'a, Position>, ReadExpect<'a, Map>, WriteStorage<'a, WantsToApproach>, WriteStorage<'a, WantsToFlee>, Entities<'a>, ReadExpect<'a, Entity>, ReadStorage<'a, Viewshed>, WriteStorage<'a, Chasing>, ReadStorage<'a, SpecialAbilities>, WriteExpect<'a, rltk::RandomNumberGenerator>, WriteStorage<'a, WantsToCastSpell>, ReadStorage<'a, Name>, ReadStorage<'a, SpellTemplate> ); fn run(&mut self, data : Self::SystemData) { let (turns, factions, positions, map, mut want_approach, mut want_flee, entities, player, viewsheds, mut chasing, abilities, mut rng, mut casting, names, spells) = data; for (entity, _turn, my_faction, pos, viewshed) in (&entities, &turns, &factions, &positions, &viewsheds).join() { if entity != *player { let my_idx = map.xy_idx(pos.x, pos.y); let mut reactions : Vec<(usize, Reaction, Entity)> = Vec::new(); let mut flee : Vec<usize> = Vec::new(); for visible_tile in viewshed.visible_tiles.iter() { let idx = map.xy_idx(visible_tile.x, visible_tile.y); if my_idx != idx { evaluate(idx, &map, &factions, &my_faction.name, &mut reactions); } } let mut done = false; for reaction in reactions.iter() { match reaction.1 { Reaction::Attack => { if let Some(abilities) = abilities.get(entity) { let range = rltk::DistanceAlg::Pythagoras.distance2d( rltk::Point::new(pos.x, pos.y), rltk::Point::new(reaction.0 as i32 % map.width, reaction.0 as i32 / map.width) ); for ability in abilities.abilities.iter() { if range >= ability.min_range && range <= ability.range && rng.roll_dice(1,100) <= (ability.chance * 100.0) as i32 { use crate::raws::find_spell_entity_by_name; casting.insert( entity, WantsToCastSpell{ spell : find_spell_entity_by_name(&ability.spell, &names, &spells, &entities).unwrap(), target : Some(rltk::Point::new(reaction.0 as i32 % map.width, reaction.0 as i32 / map.width))} ).expect("Unable to insert"); done = true; } } } if !done { want_approach.insert(entity, WantsToApproach{ idx: reaction.0 as i32 }).expect("Unable to insert"); chasing.insert(entity, Chasing{ target: reaction.2}).expect("Unable to insert"); done = true; } } Reaction::Flee => { flee.push(reaction.0); } _ => {} } } if !done && !flee.is_empty() { want_flee.insert(entity, WantsToFlee{ indices : flee }).expect("Unable to insert"); } } } } } fn evaluate(idx : usize, map : &Map, factions : &ReadStorage<Faction>, my_faction : &str, reactions : &mut Vec<(usize, Reaction, Entity)>) { for other_entity in map.tile_content[idx].iter() { if let Some(faction) = factions.get(*other_entity) { reactions.push(( idx, crate::raws::faction_reaction(my_faction, &faction.name, &crate::raws::RAWS.lock().unwrap()), *other_entity )); } } }
46.29703
142
0.462575
61a26ac5971cdb52f3de1de6ad506afeba867486
17,392
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ cfgir::{ ast::{BasicBlock, BasicBlocks, BlockInfo, LoopEnd, LoopInfo}, remove_no_ops, }, errors::*, hlir::ast::{Command, Command_, Exp, ExpListItem, Label, UnannotatedExp_, UnitCase}, shared::ast_debug::*, }; use move_ir_types::location::*; use std::collections::{BTreeMap, BTreeSet, VecDeque}; //************************************************************************************************** // CFG //************************************************************************************************** pub trait CFG { fn successors(&self, label: Label) -> &BTreeSet<Label>; fn predecessors(&self, label: Label) -> &BTreeSet<Label>; fn commands<'a>(&'a self, label: Label) -> Box<dyn Iterator<Item = (usize, &'a Command)> + 'a>; fn num_blocks(&self) -> usize; fn start_block(&self) -> Label; } //************************************************************************************************** // BlockCFG //************************************************************************************************** #[derive(Debug)] pub struct BlockCFG<'a> { start: Label, blocks: &'a mut BasicBlocks, successor_map: BTreeMap<Label, BTreeSet<Label>>, predecessor_map: BTreeMap<Label, BTreeSet<Label>>, } impl<'a> BlockCFG<'a> { // Returns // - A CFG // - A set of infinite loop heads // - and any errors resulting from building the CFG pub fn new( start: Label, blocks: &'a mut BasicBlocks, block_info: &[(Label, BlockInfo)], ) -> (Self, BTreeSet<Label>, Errors) { let mut cfg = BlockCFG { start, blocks, successor_map: BTreeMap::new(), predecessor_map: BTreeMap::new(), }; remove_no_ops::optimize(&mut cfg); // no dead code let dead_code = cfg.recompute(); let mut errors = Errors::new(); for (_lbl, block) in dead_code { dead_code_error(&mut errors, &block) } let infinite_loop_starts = determine_infinite_loop_starts(&cfg, block_info); (cfg, infinite_loop_starts, errors) } /// Recomputes successor/predecessor maps. returns removed, dead blocks pub fn recompute(&mut self) -> BasicBlocks { let blocks = &self.blocks; let mut seen = BTreeSet::new(); let mut work_list = VecDeque::new(); seen.insert(self.start); work_list.push_back(self.start); // build successor map from reachable code let mut successor_map = BTreeMap::new(); while let Some(label) = work_list.pop_front() { let last_cmd = blocks.get(&label).unwrap().back().unwrap(); let successors = last_cmd.value.successors(); for successor in &successors { if !seen.contains(successor) { seen.insert(*successor); work_list.push_back(*successor) } } let old = successor_map.insert(label, successors); assert!(old.is_none()); } // build inverse map let mut predecessor_map = successor_map .keys() .cloned() .map(|lbl| (lbl, BTreeSet::new())) .collect::<BTreeMap<_, _>>(); for (parent, children) in &successor_map { for child in children { predecessor_map.get_mut(child).unwrap().insert(*parent); } } self.successor_map = successor_map; self.predecessor_map = predecessor_map; let mut dead_block_labels = vec![]; for label in self.blocks.keys() { if !self.successor_map.contains_key(label) { assert!(!self.predecessor_map.contains_key(label)); dead_block_labels.push(*label); } } let mut dead_blocks = BasicBlocks::new(); for label in dead_block_labels { dead_blocks.insert(label, self.blocks.remove(&label).unwrap()); } dead_blocks } pub fn blocks(&self) -> &BasicBlocks { &self.blocks } pub fn blocks_mut(&mut self) -> &mut BasicBlocks { &mut self.blocks } pub fn block(&self, label: Label) -> &BasicBlock { self.blocks.get(&label).unwrap() } pub fn block_mut(&mut self, label: Label) -> &mut BasicBlock { self.blocks.get_mut(&label).unwrap() } pub fn display_blocks(&self) { for (lbl, block) in self.blocks() { println!("--BLOCK {}--", lbl); for cmd in block { println!("{:#?}", cmd.value); } println!(); } } } impl<'a> CFG for BlockCFG<'a> { fn successors(&self, label: Label) -> &BTreeSet<Label> { self.successor_map.get(&label).unwrap() } fn predecessors(&self, label: Label) -> &BTreeSet<Label> { self.predecessor_map.get(&label).unwrap() } fn commands<'s>(&'s self, label: Label) -> Box<dyn Iterator<Item = (usize, &'s Command)> + 's> { Box::new(self.block(label).iter().enumerate()) } fn num_blocks(&self) -> usize { self.blocks.len() } fn start_block(&self) -> Label { self.start } } const DEAD_ERR_CMD: &str = "Unreachable code. This statement (and any following statements) will \ not be executed. In some cases, this will result in unused resource \ values."; const DEAD_ERR_EXP: &str = "Invalid use of a divergent expression. The code following the \ evaluation of this expression will be dead and should be removed. In \ some cases, this is necessary to prevent unused resource values."; fn dead_code_error(errors: &mut Errors, block: &BasicBlock) { let first_command = block.front().unwrap(); match unreachable_loc(first_command) { Some(loc) => errors.add_deprecated(vec![(loc, DEAD_ERR_EXP.into())]), None if is_implicit_control_flow(&block) => (), None => errors.add_deprecated(vec![(first_command.loc, DEAD_ERR_CMD.into())]), } } fn unreachable_loc(sp!(_, cmd_): &Command) -> Option<Loc> { use Command_ as C; match cmd_ { C::Assign(_, e) => unreachable_loc_exp(e), C::Mutate(el, er) => unreachable_loc_exp(el).or_else(|| unreachable_loc_exp(er)), C::Return { exp: e, .. } | C::Abort(e) | C::IgnoreAndPop { exp: e, .. } | C::JumpIf { cond: e, .. } => unreachable_loc_exp(e), C::Jump { .. } => None, C::Break | C::Continue => panic!("ICE break/continue not translated to jumps"), } } fn unreachable_loc_exp(parent_e: &Exp) -> Option<Loc> { use UnannotatedExp_ as E; match &parent_e.exp.value { E::Unreachable => Some(parent_e.exp.loc), E::Unit { .. } | E::Value(_) | E::Constant(_) | E::Spec(_, _) | E::UnresolvedError | E::BorrowLocal(_, _) | E::Copy { .. } | E::Move { .. } => None, E::ModuleCall(mcall) => unreachable_loc_exp(&mcall.arguments), E::Builtin(_, e) | E::Freeze(e) | E::Dereference(e) | E::UnaryExp(_, e) | E::Borrow(_, e, _) | E::Cast(e, _) => unreachable_loc_exp(e), E::BinopExp(e1, _, e2) => unreachable_loc_exp(e1).or_else(|| unreachable_loc_exp(e2)), E::Pack(_, _, fields) => fields.iter().find_map(|(_, _, e)| unreachable_loc_exp(e)), E::ExpList(es) => es.iter().find_map(|item| unreachable_loc_item(item)), } } fn unreachable_loc_item(item: &ExpListItem) -> Option<Loc> { match item { ExpListItem::Single(e, _) | ExpListItem::Splat(_, e, _) => unreachable_loc_exp(e), } } fn is_implicit_control_flow(block: &BasicBlock) -> bool { use Command_ as C; use UnannotatedExp_ as E; block.len() == 1 && match &block.front().unwrap().value { C::Jump { from_user, .. } => !*from_user, C::Return { exp: e, from_user } if !*from_user => matches!( &e.exp.value, E::Unit { case: UnitCase::Implicit } ), _ => false, } } // Relying on the ordered block info (ordered in the linear ordering of the source code) // Determines the infinite loop starts // This cannot be determined in earlier passes due to dead code fn determine_infinite_loop_starts( cfg: &BlockCFG, block_info: &[(Label, BlockInfo)], ) -> BTreeSet<Label> { // Filter dead code let block_info = block_info .iter() .filter(|(lbl, _info)| cfg.blocks().contains_key(lbl)) .collect::<Vec<_>>(); // Fully populate infinite loop starts to be pruned later // And for any block, determine the current loop let mut infinite_loop_starts = BTreeSet::new(); let mut loop_stack: Vec<(Label, LoopEnd)> = vec![]; let mut current_loop_info = Vec::with_capacity(block_info.len()); for (lbl, info) in &block_info { match loop_stack.last() { Some((_, cur_loop_end)) if cur_loop_end.equals(*lbl) => { loop_stack.pop(); } _ => (), } match info { BlockInfo::Other => (), BlockInfo::LoopHead(LoopInfo { is_loop_stmt, .. }) if !*is_loop_stmt => (), BlockInfo::LoopHead(LoopInfo { loop_end, .. }) => { infinite_loop_starts.insert(*lbl); loop_stack.push((*lbl, *loop_end)) } } current_loop_info.push(loop_stack.last().cloned()); } // Given the loop info for any block, determine which loops are infinite // Each 'loop' based loop starts in the set, and is removed if it's break is used, or if a // return or abort is used let mut prev_opt: Option<Label> = None; let zipped = block_info .into_iter() .zip(current_loop_info) .filter_map(|(block_info, cur_loop_opt)| { cur_loop_opt.map(|cur_loop| (block_info, cur_loop)) }); for ((lbl, _info), (cur_loop_start, cur_loop_end)) in zipped { debug_assert!(prev_opt.map(|prev| prev.0 < lbl.0).unwrap_or(true)); maybe_unmark_infinite_loop_starts( &mut infinite_loop_starts, cur_loop_start, cur_loop_end, &cfg.blocks()[&lbl], ); prev_opt = Some(*lbl); } infinite_loop_starts } fn maybe_unmark_infinite_loop_starts( infinite_loop_starts: &mut BTreeSet<Label>, cur_loop_start: Label, cur_loop_end: LoopEnd, block: &BasicBlock, ) { use Command_ as C; // jumps/return/abort are only found at the end of the block match &block.back().unwrap().value { C::Jump { target, .. } if cur_loop_end.equals(*target) => { infinite_loop_starts.remove(&cur_loop_start); } C::JumpIf { if_true, if_false, .. } if cur_loop_end.equals(*if_true) || cur_loop_end.equals(*if_false) => { infinite_loop_starts.remove(&cur_loop_start); } C::Return { .. } | C::Abort(_) => { infinite_loop_starts.remove(&cur_loop_start); } C::Jump { .. } | C::JumpIf { .. } | C::Assign(_, _) | C::Mutate(_, _) | C::IgnoreAndPop { .. } => (), C::Break | C::Continue => panic!("ICE break/continue not translated to jumps"), } } //************************************************************************************************** // Reverse Traversal Block CFG //************************************************************************************************** #[derive(Debug)] pub struct ReverseBlockCFG<'a> { start: Label, blocks: &'a mut BasicBlocks, successor_map: &'a mut BTreeMap<Label, BTreeSet<Label>>, predecessor_map: &'a mut BTreeMap<Label, BTreeSet<Label>>, } impl<'a> ReverseBlockCFG<'a> { pub fn new(forward_cfg: &'a mut BlockCFG, infinite_loop_starts: &BTreeSet<Label>) -> Self { let blocks: &'a mut BasicBlocks = &mut forward_cfg.blocks; let forward_successors = &mut forward_cfg.successor_map; let forward_predecessor = &mut forward_cfg.predecessor_map; let end_blocks = { let mut end_blocks = BTreeSet::new(); for (lbl, successors) in forward_successors.iter() { let loop_start_successors = successors .iter() .filter(|l| infinite_loop_starts.contains(l)); for loop_start_successor in loop_start_successors { if lbl >= loop_start_successor { end_blocks.insert(*lbl); } } } for (lbl, block) in blocks.iter() { let last_cmd = block.back().unwrap(); if last_cmd.value.is_exit() { end_blocks.insert(*lbl); } } end_blocks }; // setup fake terminal block that will act as the start node in reverse traversal let terminal = Label(blocks.keys().map(|lbl| lbl.0).max().unwrap_or(0) + 1); assert!(!blocks.contains_key(&terminal), "{:#?}", blocks); blocks.insert(terminal, BasicBlock::new()); for terminal_predecessor in &end_blocks { forward_successors .entry(*terminal_predecessor) .or_insert_with(BTreeSet::new) .insert(terminal); } forward_predecessor.insert(terminal, end_blocks); // ensure map is not partial forward_successors.insert(terminal, BTreeSet::new()); Self { start: terminal, blocks, successor_map: forward_predecessor, predecessor_map: forward_successors, } } pub fn blocks(&self) -> &BasicBlocks { &self.blocks } pub fn block(&self, label: Label) -> &BasicBlock { self.blocks.get(&label).unwrap() } } impl<'a> Drop for ReverseBlockCFG<'a> { fn drop(&mut self) { let empty_block = self.blocks.remove(&self.start); assert!(empty_block.unwrap().is_empty()); let start_predecessors = self.predecessor_map.remove(&self.start); assert!( start_predecessors.is_some(), "ICE missing start node from predecessors" ); let start_successors = self.successor_map.remove(&self.start).unwrap(); for start_successor in start_successors { self.predecessor_map .get_mut(&start_successor) .unwrap() .remove(&self.start); } } } impl<'a> CFG for ReverseBlockCFG<'a> { fn successors(&self, label: Label) -> &BTreeSet<Label> { self.successor_map.get(&label).unwrap() } fn predecessors(&self, label: Label) -> &BTreeSet<Label> { self.predecessor_map.get(&label).unwrap() } fn commands<'s>(&'s self, label: Label) -> Box<dyn Iterator<Item = (usize, &'s Command)> + 's> { Box::new(self.block(label).iter().enumerate().rev()) } fn num_blocks(&self) -> usize { self.blocks.len() } fn start_block(&self) -> Label { self.start } } //************************************************************************************************** // Debug //************************************************************************************************** impl AstDebug for BlockCFG<'_> { fn ast_debug(&self, w: &mut AstWriter) { let BlockCFG { start, blocks, successor_map, predecessor_map, } = self; w.writeln("--BlockCFG--"); ast_debug_cfg( w, *start, blocks, successor_map.iter(), predecessor_map.iter(), ); } } impl AstDebug for ReverseBlockCFG<'_> { fn ast_debug(&self, w: &mut AstWriter) { let ReverseBlockCFG { start, blocks, successor_map, predecessor_map, } = self; w.writeln("--ReverseBlockCFG--"); ast_debug_cfg( w, *start, blocks, successor_map.iter(), predecessor_map.iter(), ); } } fn ast_debug_cfg<'a>( w: &mut AstWriter, start: Label, blocks: &BasicBlocks, successor_map: impl Iterator<Item = (&'a Label, &'a BTreeSet<Label>)>, predecessor_map: impl Iterator<Item = (&'a Label, &'a BTreeSet<Label>)>, ) { w.write("successor_map:"); w.indent(4, |w| { for (lbl, nexts) in successor_map { w.write(&format!("{} => [", lbl)); w.comma(nexts, |w, next| w.write(&format!("{}", next))); w.writeln("]") } }); w.write("predecessor_map:"); w.indent(4, |w| { for (lbl, nexts) in predecessor_map { w.write(&format!("{} <= [", lbl)); w.comma(nexts, |w, next| w.write(&format!("{}", next))); w.writeln("]") } }); w.writeln(&format!("start: {}", start)); w.writeln("blocks:"); w.indent(4, |w| blocks.ast_debug(w)); }
32.939394
100
0.531796
f519dc0b7ed599dce14e7144a6966031fa37e374
611
use num_traits; use num_traits::NumCast; use noise::{NoiseModule, Perlin, Seedable}; fn cast<T: NumCast, R: NumCast>(val: T) -> R { num_traits::cast(val).unwrap() } pub struct Noise { perlin: Perlin, } impl Noise { pub fn new() -> Noise { info!("[PX8][Noise] new"); Noise { perlin: Perlin::new() } } pub fn get(&mut self, x: f64, y: f64, z: f64) -> f64 { let r: f64 = cast(self.perlin.get([x, y, z])); r } pub fn set_seed(&mut self, seed: u32) { debug!("Change seed to {:?}", seed); self.perlin.set_seed(seed as usize); } }
19.709677
58
0.546645
e48404bf8212801b7426da501c65356290200e26
1,225
//! @brief zk_token_elgamal syscall tests extern crate solana_program; use { solana_program::{custom_panic_default, msg}, solana_zk_token_sdk::zk_token_elgamal::{ ops, pod::{ElGamalCiphertext, Zeroable}, }, }; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { let zero = ElGamalCiphertext::zeroed(); msg!("add_to"); let one = ops::add_to(&zero, 1).expect("add_to"); msg!("subtract_from"); assert_eq!(zero, ops::subtract_from(&one, 1).expect("subtract_from")); msg!("add"); assert_eq!(one, ops::add(&zero, &one).expect("add")); msg!("subtract"); assert_eq!(zero, ops::subtract(&one, &one).expect("subtract")); msg!("add_with_lo_hi"); assert_eq!( one, ops::add_with_lo_hi( &one, &ElGamalCiphertext::zeroed(), &ElGamalCiphertext::zeroed() ) .expect("add_with_lo_hi") ); msg!("subtract_with_lo_hi"); assert_eq!( one, ops::subtract_with_lo_hi( &one, &ElGamalCiphertext::zeroed(), &ElGamalCiphertext::zeroed() ) .expect("subtract_with_lo_hi") ); 0 } custom_panic_default!();
22.685185
74
0.578776
759274a9644a12d9190240ac89c6248d7df6d688
7,195
use crate::build_filter_helper::derive_non_table_filter; use crate::diagnostic_shim::{Diagnostic, DiagnosticShim}; use crate::field::Field; use crate::model::Model; use crate::utils::{is_has_many, wrap_in_dummy_mod}; use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::parse_quote; pub fn derive(item: &syn::DeriveInput) -> Result<TokenStream, Diagnostic> { let model = Model::from_item(item)?; let pg_loading_handler = if cfg!(feature = "postgres") { Some(derive_loading_handler( &model, item, &quote!(diesel::pg::Pg), )?) } else { None }; let sqlite_loading_handler = if cfg!(feature = "sqlite") { Some(derive_loading_handler( &model, item, &quote!(diesel::sqlite::Sqlite), )?) } else { None }; let pg_non_table_field_filter = if cfg!(feature = "postgres") { Some(derive_non_table_filter( &model, item, &quote!(diesel::pg::Pg), )?) } else { None }; let sqlite_non_table_field_filter = if cfg!(feature = "sqlite") { Some(derive_non_table_filter( &model, item, &quote!(diesel::sqlite::Sqlite), )?) } else { None }; let belongs_to = crate::belonging_to::derive_belonging_to(&model, item)?; Ok(wrap_in_dummy_mod( "wundergraph_entity", &model.name, &quote! { use wundergraph::diesel; use wundergraph::query_builder::selection::LoadingHandler; use wundergraph::graphql_type::WundergraphGraphqlMapper; #pg_loading_handler #sqlite_loading_handler #pg_non_table_field_filter #sqlite_non_table_field_filter #(#belongs_to)* }, )) } fn derive_loading_handler( model: &Model, item: &syn::DeriveInput, backend: &TokenStream, ) -> Result<TokenStream, Diagnostic> { let struct_type = &model.name; let (_, ty_generics, _) = item.generics.split_for_impl(); let table = model.table_type()?; let field_names = model.fields().iter().map(Field::graphql_name); let field_list = model.fields().iter().map(|f| &f.ty); let columns = model.fields().iter().filter_map(|f| { if is_has_many(&f.ty) { None } else { let column = f.sql_name(); Some(quote!(#table::#column)) } }); let primary_keys = model.primary_key(); assert!(!primary_keys.is_empty()); let primary_key_index = model .primary_key() .iter() .map(|primary_key| { model .fields() .iter() .enumerate() .find(|(_, f)| f.sql_name() == primary_key) .map(|(i, _)| { let index = syn::Ident::new(&format!("TupleIndex{}", i), Span::call_site()); quote!(wundergraph::helper::#index) }) .ok_or_else(|| { Span::call_site().error( "No primary key found, use `#[primary_key(\"column\")]` to specify one", ) }) }) .collect::<Result<Vec<_>, _>>()?; let primary_key_index = if primary_key_index.len() == 1 { primary_key_index[0].clone() } else { quote!((#(#primary_key_index,)*)) }; let description = model.fields().iter().enumerate().map(|(i, f)| { if let Some(ref d) = f.doc { quote!(#i => std::option::Option::Some(#d)) } else { quote!(#i => std::option::Option::None) } }); let deprecated = model.fields().iter().enumerate().map(|(i, f)| { if let Some(ref d) = f.deprecated { quote!(#i => std::option::Option::Some(std::option::Option::Some(#d))) } else { quote!(#i => std::option::Option::None) } }); let type_description = model.docs.as_ref().map_or_else( || quote!(std::option::Option::None), |d| quote!(std::option::Option::Some(#d)), ); let filter = model.filter_type().map_or_else( || { quote! { wundergraph::query_builder::selection::filter::FilterWrapper<Self, #backend, __Ctx> } }, |p| quote!(#p), ); let mut generics = item.generics.clone(); generics .params .push(parse_quote!(__Ctx: wundergraph::WundergraphContext + 'static)); { let where_clause = generics.where_clause.get_or_insert(parse_quote!(where)); where_clause .predicates .push(parse_quote!(<__Ctx as wundergraph::WundergraphContext>::Connection: wundergraph::diesel::Connection<Backend = #backend>)); } let (impl_generics, _, where_clause) = generics.split_for_impl(); Ok(quote! { impl #impl_generics WundergraphGraphqlMapper<#backend, __Ctx> for #struct_type #ty_generics #where_clause { type GraphQLType = wundergraph::graphql_type::GraphqlWrapper<#struct_type, #backend, __Ctx>; fn register_arguments<'r>( registry: &mut wundergraph::juniper::Registry<'r, wundergraph::scalar::WundergraphScalarValue>, field: wundergraph::juniper::meta::Field<'r, wundergraph::scalar::WundergraphScalarValue> ) -> wundergraph::juniper::meta::Field<'r, wundergraph::scalar::WundergraphScalarValue> { let arg = registry.arg_with_default::< std::option::Option<wundergraph::query_builder::selection::filter::Filter< <Self as LoadingHandler<#backend, __Ctx>>::Filter, <Self as wundergraph::diesel::associations::HasTable>::Table >> >( "filter", &std::option::Option::None, &std::default::Default::default(), ); field.argument(arg) } } impl #impl_generics LoadingHandler<#backend, __Ctx> for #struct_type #ty_generics #where_clause { type Columns = (#(#columns,)*); type FieldList = (#(#field_list,)*); type PrimaryKeyIndex = #primary_key_index; type Filter = #filter; const FIELD_NAMES: &'static [&'static str] = &[#(stringify!(#field_names),)*]; const TYPE_NAME: &'static str = stringify!(#struct_type); const TYPE_DESCRIPTION: std::option::Option<&'static str> = #type_description; fn field_description(idx: usize) -> std::option::Option<&'static str> { match idx { #(#description,)* _ => std::option::Option::None, } } fn field_deprecation(idx: usize) -> std::option::Option<std::option::Option<&'static str>> { match idx { #(#deprecated,)* _ => std::option::Option::None, } } } }) }
33.621495
141
0.535511
16d49954dce7ab214184678fa44a208118d9b10b
683
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This file was auto-generated using 'src/etc/generate-keyword-span-tests.py' #[feature(struct_variant)]; extern mod extra; struct Error; #[deriving(TotalEq)] struct Struct( Error //~ ERROR ); fn main() {}
27.32
78
0.729136
79f8ce22ce6dcc77a37438f1e69b3cf020d0d097
831
#[derive(Debug)] pub struct Scale { pub min: i64, pub max: i64, pub ticks: Vec<i64>, } impl Scale { pub fn new(mut min: i64, mut max: i64) -> Self { debug_assert!(min <= max); if max < min + 3 { max += 2; min -= 2; } if min > 0 && (max - min) * 4 > max { min = 0; } let l = ((max - min) as f64).log10().floor() as u32; let d = 10i64.pow(l); min = (min / d) * d; let mut tick = min; let mut ticks = vec![tick]; loop { tick += d; ticks.push(tick); if tick > max { break; } } max = ticks[ticks.len() - 1]; Self { min, max, ticks } } pub fn range(&self) -> i64 { self.max - self.min } }
23.083333
60
0.399519
38a4a5b587bc26ba1d4781345aad58f67ac9b3f5
7,962
use std::marker::PhantomData; use futures::{Async, Future, Poll}; use super::{IntoNewService, NewService, Service}; use crate::cell::Cell; /// Service for the `and_then` combinator, chaining a computation onto the end /// of another service which completes successfully. /// /// This is created by the `ServiceExt::and_then` method. pub struct AndThen<A, B> { a: A, b: Cell<B>, } impl<A, B> AndThen<A, B> { /// Create new `AndThen` combinator pub fn new(a: A, b: B) -> Self where A: Service, B: Service<Request = A::Response, Error = A::Error>, { Self { a, b: Cell::new(b) } } } impl<A, B> Clone for AndThen<A, B> where A: Clone, { fn clone(&self) -> Self { AndThen { a: self.a.clone(), b: self.b.clone(), } } } impl<A, B> Service for AndThen<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { type Request = A::Request; type Response = B::Response; type Error = A::Error; type Future = AndThenFuture<A, B>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { let not_ready = self.a.poll_ready()?.is_not_ready(); if self.b.get_mut().poll_ready()?.is_not_ready() || not_ready { Ok(Async::NotReady) } else { Ok(Async::Ready(())) } } fn call(&mut self, req: A::Request) -> Self::Future { AndThenFuture::new(self.a.call(req), self.b.clone()) } } pub struct AndThenFuture<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { b: Cell<B>, fut_b: Option<B::Future>, fut_a: Option<A::Future>, } impl<A, B> AndThenFuture<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { fn new(a: A::Future, b: Cell<B>) -> Self { AndThenFuture { b, fut_a: Some(a), fut_b: None, } } } impl<A, B> Future for AndThenFuture<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { type Item = B::Response; type Error = A::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { if let Some(ref mut fut) = self.fut_b { return fut.poll(); } match self.fut_a.as_mut().expect("Bug in actix-service").poll() { Ok(Async::Ready(resp)) => { let _ = self.fut_a.take(); self.fut_b = Some(self.b.get_mut().call(resp)); self.poll() } Ok(Async::NotReady) => Ok(Async::NotReady), Err(err) => Err(err), } } } /// `AndThenNewService` new service combinator pub struct AndThenNewService<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>, { a: A, b: B, _t: PhantomData<C>, } impl<A, B, C> AndThenNewService<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>, { /// Create new `AndThen` combinator pub fn new<F: IntoNewService<B, C>>(a: A, f: F) -> Self { Self { a, b: f.into_new_service(), _t: PhantomData, } } } impl<A, B, C> NewService<C> for AndThenNewService<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>, { type Request = A::Request; type Response = B::Response; type Error = A::Error; type Service = AndThen<A::Service, B::Service>; type InitError = A::InitError; type Future = AndThenNewServiceFuture<A, B, C>; fn new_service(&self, cfg: &C) -> Self::Future { AndThenNewServiceFuture::new(self.a.new_service(cfg), self.b.new_service(cfg)) } } impl<A, B, C> Clone for AndThenNewService<A, B, C> where A: NewService<C> + Clone, B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError> + Clone, { fn clone(&self) -> Self { Self { a: self.a.clone(), b: self.b.clone(), _t: PhantomData, } } } pub struct AndThenNewServiceFuture<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response>, { fut_b: B::Future, fut_a: A::Future, a: Option<A::Service>, b: Option<B::Service>, } impl<A, B, C> AndThenNewServiceFuture<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response>, { fn new(fut_a: A::Future, fut_b: B::Future) -> Self { AndThenNewServiceFuture { fut_a, fut_b, a: None, b: None, } } } impl<A, B, C> Future for AndThenNewServiceFuture<A, B, C> where A: NewService<C>, B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>, { type Item = AndThen<A::Service, B::Service>; type Error = A::InitError; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { if self.a.is_none() { if let Async::Ready(service) = self.fut_a.poll()? { self.a = Some(service); } } if self.b.is_none() { if let Async::Ready(service) = self.fut_b.poll()? { self.b = Some(service); } } if self.a.is_some() && self.b.is_some() { Ok(Async::Ready(AndThen::new( self.a.take().unwrap(), self.b.take().unwrap(), ))) } else { Ok(Async::NotReady) } } } #[cfg(test)] mod tests { use futures::future::{ok, FutureResult}; use futures::{Async, Poll}; use std::cell::Cell; use std::rc::Rc; use super::*; use crate::{NewService, Service, ServiceExt}; struct Srv1(Rc<Cell<usize>>); impl Service for Srv1 { type Request = &'static str; type Response = &'static str; type Error = (); type Future = FutureResult<Self::Response, ()>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.0.set(self.0.get() + 1); Ok(Async::Ready(())) } fn call(&mut self, req: &'static str) -> Self::Future { ok(req) } } #[derive(Clone)] struct Srv2(Rc<Cell<usize>>); impl Service for Srv2 { type Request = &'static str; type Response = (&'static str, &'static str); type Error = (); type Future = FutureResult<Self::Response, ()>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.0.set(self.0.get() + 1); Ok(Async::Ready(())) } fn call(&mut self, req: &'static str) -> Self::Future { ok((req, "srv2")) } } #[test] fn test_poll_ready() { let cnt = Rc::new(Cell::new(0)); let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt.clone())); let res = srv.poll_ready(); assert!(res.is_ok()); assert_eq!(res.unwrap(), Async::Ready(())); assert_eq!(cnt.get(), 2); } #[test] fn test_call() { let cnt = Rc::new(Cell::new(0)); let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt)); let res = srv.call("srv1").poll(); assert!(res.is_ok()); assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2"))); } #[test] fn test_new_service() { let cnt = Rc::new(Cell::new(0)); let cnt2 = cnt.clone(); let blank = move || Ok::<_, ()>(Srv1(cnt2.clone())); let new_srv = blank .into_new_service() .and_then(move || Ok(Srv2(cnt.clone()))); if let Async::Ready(mut srv) = new_srv.new_service(&()).poll().unwrap() { let res = srv.call("srv1").poll(); assert!(res.is_ok()); assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2"))); } else { panic!() } } }
25.601286
96
0.528259
56170bbb869f31e8d3cec75b7e072ae8976fde6d
3,017
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // The wasm32-unknown-unknown target is currently an experimental version of a // wasm-based target which does *not* use the Emscripten toolchain. Instead // this toolchain is based purely on LLVM's own toolchain, using LLVM's native // WebAssembly backend as well as LLD for a native linker. // // There's some trickery below on crate types supported and various defaults // (aka panic=abort by default), but otherwise this is in general a relatively // standard target. use super::{LldFlavor, LinkerFlavor, Target, TargetOptions, PanicStrategy}; pub fn target() -> Result<Target, String> { let opts = TargetOptions { // we allow dynamic linking, but only cdylibs. Basically we allow a // final library artifact that exports some symbols (a wasm module) but // we don't allow intermediate `dylib` crate types dynamic_linking: true, only_cdylib: true, // This means we'll just embed a `start` function in the wasm module executables: true, // relatively self-explanatory! exe_suffix: ".wasm".to_string(), dll_prefix: "".to_string(), dll_suffix: ".wasm".to_string(), linker_is_gnu: false, // A bit of a lie, but "eh" max_atomic_width: Some(32), // Unwinding doesn't work right now, so the whole target unconditionally // defaults to panic=abort. Note that this is guaranteed to change in // the future once unwinding is implemented. Don't rely on this. panic_strategy: PanicStrategy::Abort, // Wasm doesn't have atomics yet, so tell LLVM that we're in a single // threaded model which will legalize atomics to normal operations. singlethread: true, // no dynamic linking, no need for default visibility! default_hidden_visibility: true, .. Default::default() }; Ok(Target { llvm_target: "wasm32-unknown-unknown-wasm".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), target_c_int_width: "32".to_string(), // This is basically guaranteed to change in the future, don't rely on // this. Use `not(target_os = "emscripten")` for now. target_os: "unknown".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(), arch: "wasm32".to_string(), linker_flavor: LinkerFlavor::Lld(LldFlavor::Wasm), options: opts, }) }
41.902778
80
0.672522
e6de0a844e01ad91572731ae73e092c2a8ba25db
467
mod window; use gtk::prelude::*; use gtk::Application; use crate::window::Window; fn main() { // Create a new application let app = Application::builder() .application_id("org.gtk-rs.example") .build(); // Connect to signals app.connect_activate(build_ui); // Run the application app.run(); } fn build_ui(app: &Application) { // Create a new window and show it let window = Window::new(app); window.show(); }
17.961538
45
0.620985
fcea769f76cb6cb5ac28d1d8481000477f74c9b0
825
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LolCollectionsGameDataSplashMetadata { #[serde(rename = "CalculatedColor", skip_serializing_if = "Option::is_none")] pub calculated_color: Option<String>, #[serde(rename = "OverrideColor", skip_serializing_if = "Option::is_none")] pub override_color: Option<String>, } impl LolCollectionsGameDataSplashMetadata { pub fn new() -> LolCollectionsGameDataSplashMetadata { LolCollectionsGameDataSplashMetadata { calculated_color: None, override_color: None, } } }
25.78125
109
0.704242
01847e1e51fa8cc77e2218ed96c2dacb31e281df
56
mod prim; pub use self::prim::{prim, prim_with_start};
14
44
0.714286
5014e7cb7b6511598b06c34a09231f73ad780a4e
2,621
use super::*; use lsp_types::{ DocumentChanges, OneOf, OptionalVersionedTextDocumentIdentifier, Position, PrepareRenameResponse, TextDocumentEdit, TextEdit, Url, WorkspaceEdit, }; pub(crate) fn prepare_rename( uri: Url, position: Position, docs: &Docs, wa: &mut WorkspaceAnalysis, ) -> Option<PrepareRenameResponse> { let (doc, pos) = from_document_position(&uri, position, docs)?; let project = wa.require_project_for_doc(doc); // FIXME: カーソル直下に識別子があって、それの定義がワークスペース内のファイル (commonやhsphelpでない) にあったときだけSomeを返す。 let (_, loc) = project.locate_symbol(doc, pos)?; let range = loc_to_range(loc); Some(PrepareRenameResponse::Range(range)) } pub(crate) fn rename( uri: Url, position: Position, new_name: String, docs: &Docs, wa: &mut WorkspaceAnalysis, ) -> Option<WorkspaceEdit> { // カーソルの下にある識別子と同一のシンボルの出現箇所 (定義箇所および使用箇所) を列挙する。 let locs = { let (doc, pos) = from_document_position(&uri, position, docs)?; let project = wa.require_project_for_doc(doc); let (symbol, _) = project.locate_symbol(doc, pos)?; let mut locs = vec![]; project.collect_symbol_defs(&symbol, &mut locs); project.collect_symbol_uses(&symbol, &mut locs); if locs.is_empty() { return None; } // 1つの出現が定義と使用の両方にカウントされることもあるので、重複を削除する。 // (重複した変更をレスポンスに含めると名前の変更に失敗する。) locs.sort(); locs.dedup(); locs }; // 名前変更の編集手順を構築する。(シンボルが書かれている位置をすべて新しい名前で置き換える。) let changes = { let mut edits = vec![]; for loc in locs { let location = match loc_to_location(loc, docs) { Some(location) => location, None => continue, }; let (uri, range) = (location.uri, location.range); // common ディレクトリのファイルは変更しない。 if uri.as_str().contains("common") { return None; } let version = docs.get_version(loc.doc).unwrap_or(NO_VERSION); let text_document = OptionalVersionedTextDocumentIdentifier { uri, version: Some(version), }; let text_edit = TextEdit { range, new_text: new_name.to_string(), }; edits.push(TextDocumentEdit { text_document, edits: vec![OneOf::Left(text_edit)], }); } DocumentChanges::Edits(edits) }; Some(WorkspaceEdit { document_changes: Some(changes), ..WorkspaceEdit::default() }) }
28.182796
85
0.585273
ef3ccd5aead030131eb2c30dbe34d302bd5fefa3
11,445
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias` /// Returns a pointer to `size` bytes of memory. /// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. #[inline] pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { imp::allocate(size, align) } /// Extends or shrinks the allocation referenced by `ptr` to `size` bytes of /// memory. /// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. /// /// The `old_size` and `align` parameters are the parameters that were used to /// create the allocation referenced by `ptr`. The `old_size` parameter may also /// be the value returned by `usable_size` for the requested size. #[inline] pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 { imp::reallocate(ptr, size, align, old_size) } /// Extends or shrinks the allocation referenced by `ptr` to `size` bytes of /// memory in-place. /// /// Returns true if successful, otherwise false if the allocation was not /// altered. /// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. /// /// The `old_size` and `align` parameters are the parameters that were used to /// create the allocation referenced by `ptr`. The `old_size` parameter may be /// any value in range_inclusive(requested_size, usable_size). #[inline] pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool { imp::reallocate_inplace(ptr, size, align, old_size) } /// Deallocates the memory referenced by `ptr`. /// /// The `ptr` parameter must not be null. /// /// The `size` and `align` parameters are the parameters that were used to /// create the allocation referenced by `ptr`. The `size` parameter may also be /// the value returned by `usable_size` for the requested size. #[inline] pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) { imp::deallocate(ptr, size, align) } /// Returns the usable size of an allocation created with the specified the /// `size` and `align`. #[inline] pub fn usable_size(size: uint, align: uint) -> uint { imp::usable_size(size, align) } /// Prints implementation-defined allocator statistics. /// /// These statistics may be inconsistent if other threads use the allocator /// during the call. #[unstable] pub fn stats_print() { imp::stats_print(); } /// An arbitrary non-null address to represent zero-size allocations. /// /// This preserves the non-null invariant for types like `Box<T>`. The address may overlap with /// non-zero-size memory allocations. pub static EMPTY: *mut () = 0x1 as *mut (); /// The allocator for unique pointers. #[cfg(not(test))] #[lang="exchange_malloc"] #[inline] unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 { if size == 0 { EMPTY as *mut u8 } else { allocate(size, align) } } #[cfg(not(test))] #[lang="exchange_free"] #[inline] unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) { deallocate(ptr, size, align); } // The minimum alignment guaranteed by the architecture. This value is used to // add fast paths for low alignment values. In practice, the alignment is a // constant at the call site and the branch will be optimized out. #[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "mipsel"))] static MIN_ALIGN: uint = 8; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] static MIN_ALIGN: uint = 16; #[cfg(jemalloc)] mod imp { use core::option::{None, Option}; use core::ptr::{RawPtr, null_mut, null}; use core::num::Int; use libc::{c_char, c_int, c_void, size_t}; use super::MIN_ALIGN; #[link(name = "jemalloc", kind = "static")] #[cfg(not(test))] extern {} extern { fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void; fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); fn je_nallocx(size: size_t, flags: c_int) -> size_t; fn je_malloc_stats_print(write_cb: Option<extern "C" fn(cbopaque: *mut c_void, *const c_char)>, cbopaque: *mut c_void, opts: *const c_char); } // -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough #[cfg(all(not(windows), not(target_os = "android")))] #[link(name = "pthread")] extern {} // MALLOCX_ALIGN(a) macro #[inline(always)] fn mallocx_align(a: uint) -> c_int { a.trailing_zeros() as c_int } #[inline(always)] fn align_to_flags(align: uint) -> c_int { if align <= MIN_ALIGN { 0 } else { mallocx_align(align) } } #[inline] pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { let flags = align_to_flags(align); let ptr = je_mallocx(size as size_t, flags) as *mut u8; if ptr.is_null() { ::oom() } ptr } #[inline] pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, _old_size: uint) -> *mut u8 { let flags = align_to_flags(align); let ptr = je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8; if ptr.is_null() { ::oom() } ptr } #[inline] pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool { let flags = align_to_flags(align); let new_size = je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as uint; // checking for failure to shrink is tricky if size < old_size { usable_size(size, align) == new_size as uint } else { new_size >= size } } #[inline] pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) { let flags = align_to_flags(align); je_sdallocx(ptr as *mut c_void, size as size_t, flags) } #[inline] pub fn usable_size(size: uint, align: uint) -> uint { let flags = align_to_flags(align); unsafe { je_nallocx(size as size_t, flags) as uint } } pub fn stats_print() { unsafe { je_malloc_stats_print(None, null_mut(), null()) } } } #[cfg(all(not(jemalloc), unix))] mod imp { use core::cmp; use core::ptr; use libc; use libc_heap; use super::MIN_ALIGN; extern { fn posix_memalign(memptr: *mut *mut libc::c_void, align: libc::size_t, size: libc::size_t) -> libc::c_int; } #[inline] pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { if align <= MIN_ALIGN { libc_heap::malloc_raw(size) } else { let mut out = 0 as *mut libc::c_void; let ret = posix_memalign(&mut out, align as libc::size_t, size as libc::size_t); if ret != 0 { ::oom(); } out as *mut u8 } } #[inline] pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 { if align <= MIN_ALIGN { libc_heap::realloc_raw(ptr, size) } else { let new_ptr = allocate(size, align); ptr::copy_memory(new_ptr, ptr as *const u8, cmp::min(size, old_size)); deallocate(ptr, old_size, align); new_ptr } } #[inline] pub unsafe fn reallocate_inplace(_ptr: *mut u8, size: uint, _align: uint, old_size: uint) -> bool { size == old_size } #[inline] pub unsafe fn deallocate(ptr: *mut u8, _size: uint, _align: uint) { libc::free(ptr as *mut libc::c_void) } #[inline] pub fn usable_size(size: uint, _align: uint) -> uint { size } pub fn stats_print() {} } #[cfg(all(not(jemalloc), windows))] mod imp { use libc::{c_void, size_t}; use libc; use libc_heap; use core::ptr::RawPtr; use super::MIN_ALIGN; extern { fn _aligned_malloc(size: size_t, align: size_t) -> *mut c_void; fn _aligned_realloc(block: *mut c_void, size: size_t, align: size_t) -> *mut c_void; fn _aligned_free(ptr: *mut c_void); } #[inline] pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { if align <= MIN_ALIGN { libc_heap::malloc_raw(size) } else { let ptr = _aligned_malloc(size as size_t, align as size_t); if ptr.is_null() { ::oom(); } ptr as *mut u8 } } #[inline] pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, _old_size: uint) -> *mut u8 { if align <= MIN_ALIGN { libc_heap::realloc_raw(ptr, size) } else { let ptr = _aligned_realloc(ptr as *mut c_void, size as size_t, align as size_t); if ptr.is_null() { ::oom(); } ptr as *mut u8 } } #[inline] pub unsafe fn reallocate_inplace(_ptr: *mut u8, size: uint, _align: uint, old_size: uint) -> bool { size == old_size } #[inline] pub unsafe fn deallocate(ptr: *mut u8, _size: uint, align: uint) { if align <= MIN_ALIGN { libc::free(ptr as *mut libc::c_void) } else { _aligned_free(ptr as *mut c_void) } } #[inline] pub fn usable_size(size: uint, _align: uint) -> uint { size } pub fn stats_print() {} } #[cfg(test)] mod test { extern crate test; use self::test::Bencher; use heap; #[test] fn basic_reallocate_inplace_noop() { unsafe { let size = 4000; let ptr = heap::allocate(size, 8); let ret = heap::reallocate_inplace(ptr, size, 8, size); heap::deallocate(ptr, size, 8); assert!(ret); } } #[bench] fn alloc_owned_small(b: &mut Bencher) { b.iter(|| { box 10i }) } }
31.185286
95
0.577108
fc9c79819e159589f0128e575c25644a1a3deff4
4,790
use crate::read_lines; pub fn part1() -> usize { let lines = read_lines("./data/day11.txt"); let nrow = lines.len(); let ncol = lines[0].len(); let mut layout: Vec<Vec<i8>> = vec![vec![-1i8; ncol + 2]; nrow + 2]; for i in 0..nrow { for (j, c) in lines[i].bytes().into_iter().enumerate() { match c { b'L' => layout[i + 1][j + 1] = 0, b'#' => layout[i + 1][j + 1] = 1, _ => (), } } } let mut change: Vec<(usize, usize)> = vec![]; let mut changing = true; while changing { while let Some((i, j)) = change.pop() { layout[i][j] ^= 1i8; } for i in 1..nrow + 1 { for j in 1..ncol + 1 { match layout[i][j] { 0 => { if count_adj(&layout, i, j) == 0 { change.push((i, j)); } } 1 => { if count_adj(&layout, i, j) > 3 { change.push((i, j)); } } _ => (), } } } changing = !change.is_empty(); } layout .iter() .map(|line| line.iter().filter(|&&x| x == 1).count()) .sum() } #[inline] fn count_adj(layout: &[Vec<i8>], i: usize, j: usize) -> usize { [ (i - 1, j - 1), (i - 1, j), (i - 1, j + 1), (i, j - 1), (i, j + 1), (i + 1, j - 1), (i + 1, j), (i + 1, j + 1), ] .into_iter() .filter(|&(x, y)| layout[x][y] == 1) .count() } pub fn part2() -> usize { let lines = read_lines("./data/day11.txt"); let nrow = lines.len(); let ncol = lines[0].len(); let mut layout: Vec<Vec<i8>> = vec![vec![-1i8; ncol]; nrow]; for i in 0..nrow { for (j, c) in lines[i].bytes().into_iter().enumerate() { match c { b'L' => layout[i][j] = 0, b'#' => layout[i][j] = 1, _ => (), } } } let mut change: Vec<(usize, usize)> = vec![]; let mut changing = true; while changing { while let Some((i, j)) = change.pop() { layout[i][j] ^= 1i8; } for i in 0..nrow { for j in 0..ncol { match layout[i][j] { 0 => { if count_first_look(&layout, i, j) == 0 { change.push((i, j)); } } 1 => { if count_first_look(&layout, i, j) > 4 { change.push((i, j)); } } _ => (), } } } changing = !change.is_empty(); // pretty_print(&layout); } layout .iter() .map(|line| line.iter().filter(|&&x| x == 1).count()) .sum() } fn count_first_look(layout: &[Vec<i8>], i: usize, j: usize) -> usize { let nrow = layout.len() as i32; let ncol = layout[0].len() as i32; let mut acc = 0; for (dx, dy) in [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] { let mut ii = i as i32; let mut jj = j as i32; loop { ii += dx; jj += dy; if ii >= 0 && ii < nrow && jj >= 0 && jj < ncol { match layout[ii as usize][jj as usize] { 1 => { acc += 1; break; } 0 => break, _ => (), } } else { break; } } } acc } #[allow(dead_code)] fn pretty_print(layout: &[Vec<i8>]) { //let nrow = layout.len(); //let ncol = layout[0].len(); for row in layout.iter() { for item in row.iter() { print!( "{}", match *item { 1 => "#", 0 => "L", _ => ".", } ); } println!(); } // for i in 0..nrow { // for j in 0..ncol { // print!( // "{}", // match layout[i][j] { // 1 => "#", // 0 => "L", // _ => ".", // } // ); // } // println!(); // } println!(); } #[test] fn test_11() { assert_eq!(2247, part1()); assert_eq!(2011, part2()); }
24.818653
72
0.320668
3af1382e128462b60a71771ba1aa2af8de4e7b81
22,022
use super::mpz::mp_limb_t; use std; use libc::c_int; #[link(name = "gmp")] extern "C" { static __gmp_bits_per_limb: c_int; } #[test] fn test_limb_size() { // We are assuming that the limb size is the same as the pointer size. assert_eq!(std::mem::size_of::<mp_limb_t>() * 8, unsafe { __gmp_bits_per_limb as usize }); } mod mpz { use super::super::mpz::Mpz; use super::super::mpz::ProbabPrimeResult; use super::super::sign::Sign; use std::str::FromStr; use std::convert::{From, Into}; use std::{i64, u64}; use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; #[test] fn test_set() { let mut x: Mpz = From::<i64>::from(1000); let y: Mpz = From::<i64>::from(5000); assert!(x != y); x.set(&y); assert!(x == y); } #[test] fn test_set_from_str_radix() { let mut x: Mpz = From::<i64>::from(1000); let y: Mpz = From::<i64>::from(5000); assert!(x != y); assert!(x.set_from_str_radix("5000", 10)); assert!(x == y); assert!(!x.set_from_str_radix("aaaa", 2)); } #[test] #[should_panic] fn test_from_str_radix_lower_bound() { let _ = Mpz::from_str_radix("", 1); } #[test] #[should_panic] fn test_from_str_radix_upper_bound() { let _ = Mpz::from_str_radix("", 63); } #[test] #[should_panic] fn test_set_from_str_radix_lower_bound() { let mut x = Mpz::new(); x.set_from_str_radix("", 1); } #[test] #[should_panic] fn test_set_from_str_radix_upper_bound() { let mut x = Mpz::new(); x.set_from_str_radix("", 63); } #[test] fn test_eq() { let x: Mpz = From::<i64>::from(4242142195); let y: Mpz = From::<i64>::from(4242142195); let z: Mpz = From::<i64>::from(4242142196); assert!(x == y); assert!(x != z); assert!(y != z); } #[test] fn test_ord() { let x: Mpz = FromStr::from_str("40000000000000000000000").unwrap(); let y: Mpz = FromStr::from_str("45000000000000000000000").unwrap(); let z: Mpz = FromStr::from_str("50000000000000000000000").unwrap(); assert!(x < y && x < z && y < z); assert!(x <= x && x <= y && x <= z && y <= z); assert!(z > y && z > x && y > x); assert!(z >= z && z >= y && z >= x && y >= x); } #[test] #[should_panic] fn test_div_zero() { let x: Mpz = From::<i64>::from(1); let y = Mpz::new(); x / y; } #[test] #[should_panic] fn test_rem_zero() { let x: Mpz = From::<i64>::from(1); let y = Mpz::new(); x % y; } #[test] fn test_div_round() { let x: Mpz = From::<i64>::from(2); let y: Mpz = From::<i64>::from(3); assert!((&x / &y).to_string() == (2i32 / 3).to_string()); assert!((&x / -&y).to_string() == (2i32 / -3).to_string()); } #[test] fn test_rem() { let x: Mpz = From::<i64>::from(20); let y: Mpz = From::<i64>::from(3); assert!((&x % &y).to_string() == (20i32 % 3).to_string()); assert!((&x % 3).to_string() == (20i32 % 3).to_string()); assert!((&x % -&y).to_string() == (20i32 % -3).to_string()); assert!((-&x % &y).to_string() == (-20i32 % 3).to_string()); } #[test] fn test_add() { let x: Mpz = From::<i64>::from(2); let y: Mpz = From::<i64>::from(3); let str5 = 5i32.to_string(); assert!((&x + &y).to_string() == str5); assert!((&x + 3).to_string() == str5); assert!((&y + 2).to_string() == str5); } #[test] fn test_sub() { let x: Mpz = From::<i64>::from(2); let y: Mpz = From::<i64>::from(3); assert!((&x - &y).to_string() == (-1i32).to_string()); assert!((&y - &x).to_string() == 1i32.to_string()); assert!((&y - 8).to_string() == (-5i32).to_string()); } #[test] fn test_mul() { let x: Mpz = From::<i64>::from(2); let y: Mpz = From::<i64>::from(3); assert!((&x * &y).to_string() == 6i32.to_string()); assert!((&x * 3i64).to_string() == 6i32.to_string()); assert!((&y * -5i64).to_string() == (-15i32).to_string()); // check with values not fitting in 32 bits assert!((&x * 5000000000i64).to_string() == 10000000000i64.to_string()); } #[test] fn test_to_str_radix() { let x: Mpz = From::<i64>::from(255); assert!(x.to_str_radix(16) == "ff".to_string()); } #[test] fn test_to_string() { let x: Mpz = FromStr::from_str("1234567890").unwrap(); assert!(x.to_string() == "1234567890".to_string()); } #[test] fn test_invalid_str() { let x: Result<Mpz, _> = FromStr::from_str("foobar"); assert!(x.is_err()); } #[test] fn test_clone() { let a: Mpz = From::<i64>::from(100); let b = a.clone(); let aplusb: Mpz = From::<i64>::from(200); assert!(b == a); assert!(a + b == aplusb); } #[test] fn test_from_int() { let x: Mpz = From::<i64>::from(150); assert!(x.to_string() == "150".to_string()); assert!(x == FromStr::from_str("150").unwrap()); } #[test] fn test_from_slice_u8() { let v: Vec<u8> = vec!(255, 255); let x: Mpz = From::from(&v[..]); assert!(x.to_string() == "65535".to_string()); } #[test] fn test_abs() { let x: Mpz = From::<i64>::from(1000); let y: Mpz = From::<i64>::from(-1000); assert!(-&x == y); assert!(x == -&y); assert!(x == y.abs()); assert!(x.abs() == y.abs()); } #[test] fn test_div_floor() { let two: Mpz = From::<i64>::from(2); let eight: Mpz = From::<i64>::from(8); let minuseight: Mpz = From::<i64>::from(-8); let three: Mpz = From::<i64>::from(3); let minusthree: Mpz = From::<i64>::from(-3); assert_eq!(eight.div_floor(&three), two); assert_eq!(eight.div_floor(&minusthree), minusthree); assert_eq!(minuseight.div_floor(&three), minusthree); assert_eq!(minuseight.div_floor(&minusthree), two); } #[test] fn test_mod_floor() { let one: Mpz = From::<i64>::from(1); let minusone: Mpz = From::<i64>::from(-1); let two: Mpz = From::<i64>::from(2); let minustwo: Mpz = From::<i64>::from(-2); let three: Mpz = From::<i64>::from(3); let minusthree: Mpz = From::<i64>::from(-3); let eight: Mpz = From::<i64>::from(8); let minuseight: Mpz = From::<i64>::from(-8); assert_eq!(eight.mod_floor(&three), two); assert_eq!(eight.mod_floor(&minusthree), minusone); assert_eq!(minuseight.mod_floor(&three), one); assert_eq!(minuseight.mod_floor(&minusthree), minustwo); } #[test] fn test_bitand() { let a = 0b1001_0111; let b = 0b1100_0100; let mpza: Mpz = From::<i64>::from(a); let mpzb: Mpz = From::<i64>::from(b); let mpzres: Mpz = From::<i64>::from(a & b); assert!(mpza & mpzb == mpzres); } #[test] fn test_bitor() { let a = 0b1001_0111; let b = 0b1100_0100; let mpza: Mpz = From::<i64>::from(a); let mpzb: Mpz = From::<i64>::from(b); let mpzres: Mpz = From::<i64>::from(a | b); assert!(mpza | mpzb == mpzres); } #[test] fn test_bitxor() { let a = 0b1001_0111; let b = 0b1100_0100; let mpza: Mpz = From::<i64>::from(a); let mpzb: Mpz = From::<i64>::from(b); let mpzres: Mpz = From::<i64>::from(a ^ b); assert!(mpza ^ mpzb == mpzres); } #[test] fn test_shifts() { let i = 227; let j: Mpz = From::<i64>::from(i); assert!((i << 4).to_string() == (&j << 4).to_string()); assert!((-i << 4).to_string() == (-&j << 4).to_string()); assert!((i >> 4).to_string() == (&j >> 4).to_string()); assert!((-i >> 4).to_string() == (-&j >> 4).to_string()); } #[test] fn test_compl() { let a: Mpz = From::<i64>::from(13); let b: Mpz = From::<i64>::from(-442); assert!(a.compl().to_string() == (!13i32).to_string()); assert!(b.compl().to_string() == (!-442i32).to_string()); } #[test] fn test_pow() { let a: Mpz = From::<i64>::from(2); let b: Mpz = From::<i64>::from(8); assert!(a.pow(3) == b); assert!(Mpz::ui_pow_ui(2, 3) == b); } #[test] fn test_powm() { let a: Mpz = From::<i64>::from(13); let b: Mpz = From::<i64>::from(7); let p: Mpz = From::<i64>::from(19); let c: Mpz = From::<i64>::from(10); assert!(a.powm(&b, &p) == c); } #[test] fn test_powm_sec() { let a: Mpz = From::<i64>::from(13); let b: Mpz = From::<i64>::from(7); let p: Mpz = From::<i64>::from(19); let c: Mpz = From::<i64>::from(10); assert!(a.powm_sec(&b, &p) == c); } #[test] fn test_popcount() { Mpz::from_str_radix("1010010011", 2).unwrap().popcount() == 5; } #[test] fn test_hamdist() { let a: Mpz = From::<i64>::from(0b1011_0001); let b: Mpz = From::<i64>::from(0b0010_1011); assert!(a.hamdist(&b) == 4); } #[test] fn test_bit_length() { let a: Mpz = From::<i64>::from(0b1011_0000_0001_0000); let b: Mpz = From::<i64>::from(0b101); assert!(a.bit_length() == 16); assert!(b.bit_length() == 3); } #[test] fn test_probab_prime() { let prime: Mpz = From::<i64>::from(2); assert!(prime.probab_prime(15) == ProbabPrimeResult::Prime); let not_prime: Mpz = From::<i64>::from(4); assert!(not_prime.probab_prime(15) == ProbabPrimeResult::NotPrime); } #[test] fn test_nextprime() { let a: Mpz = From::<i64>::from(123456); let b: Mpz = From::<i64>::from(123457); assert!(a.nextprime() == b); } #[test] fn test_gcd() { let zero: Mpz = From::<i64>::from(0); let three: Mpz = From::<i64>::from(3); let six: Mpz = From::<i64>::from(6); let eighteen: Mpz = From::<i64>::from(18); let twentyfour: Mpz = From::<i64>::from(24); assert!(zero.gcd(&zero) == zero); assert!(three.gcd(&six) == three); assert!(eighteen.gcd(&twentyfour) == six); } #[test] fn test_gcdext() { let six: Mpz = From::<i64>::from(6); let eighteen: Mpz = From::<i64>::from(18); let twentyfour: Mpz = From::<i64>::from(24); let (g, s, t) = eighteen.gcdext(&twentyfour); assert!(g == six); assert!(g == s*eighteen + t*twentyfour); } #[test] fn test_lcm() { let zero: Mpz = From::<i64>::from(0); let three: Mpz = From::<i64>::from(3); let five: Mpz = From::<i64>::from(5); let six: Mpz = From::<i64>::from(6); let eighteen: Mpz = From::<i64>::from(18); let twentyfour: Mpz = From::<i64>::from(24); let seventytwo: Mpz = From::<i64>::from(72); assert!(zero.lcm(&five) == zero); assert!(five.lcm(&zero) == zero); assert!(three.lcm(&six) == six); assert!(eighteen.lcm(&twentyfour) == seventytwo); } #[test] fn test_is_multiple_of() { let two: Mpz = From::<i64>::from(2); let three: Mpz = From::<i64>::from(3); let six: Mpz = From::<i64>::from(6); assert!(six.is_multiple_of(&two)); assert!(six.is_multiple_of(&three)); assert!(!three.is_multiple_of(&two)); } #[test] fn test_modulus() { let minusone: Mpz = From::<i64>::from(-1); let two: Mpz = From::<i64>::from(2); let three: Mpz = From::<i64>::from(3); assert_eq!(two.modulus(&three), two); assert_eq!(minusone.modulus(&three), two); } #[test] fn test_invert() { let two: Mpz = From::<i64>::from(2); let three: Mpz = From::<i64>::from(3); let four: Mpz = From::<i64>::from(4); let five: Mpz = From::<i64>::from(5); let eleven: Mpz = From::<i64>::from(11); assert!(three.invert(&eleven) == Some(four.clone())); assert!(four.invert(&eleven) == Some(three.clone())); assert!(two.invert(&five) == Some(three.clone())); assert!(three.invert(&five) == Some(two.clone())); assert!(two.invert(&four).is_none()); } #[test] fn test_one() { let onea: Mpz = From::<i64>::from(1); let oneb: Mpz = From::<i64>::from(1); assert!(onea == oneb); } #[test] fn test_bit_fiddling() { let mut xs: Mpz = From::<i64>::from(0b1010_1000_0010_0011); assert!(xs.bit_length() == 16); let mut ys = [true, false, true, false, true, false, false, false, false, false, true, false, false, false, true, true]; ys.reverse(); for i in 0..xs.bit_length() { assert!(xs.tstbit(i) == ys[i]); } xs.setbit(0); ys[0] = true; xs.setbit(3); ys[3] = true; xs.clrbit(1); ys[1] = false; xs.clrbit(5); ys[5] = false; xs.combit(14); ys[14] = !ys[14]; xs.combit(15); ys[15] = !ys[15]; for i in 0..xs.bit_length() { assert!(xs.tstbit(i) == ys[i]); } } #[test] fn test_root() { let x: Mpz = From::<i64>::from(123456); let y: Mpz = From::<i64>::from(49); assert!(x.root(3) == y); } #[test] fn test_sqrt() { let x: Mpz = From::<i64>::from(567); let y: Mpz = From::<i64>::from(23); assert!(x.sqrt() == y); } #[test] fn test_hash_short() { let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let two = &one + &one; let hash = |x : &Mpz| { let mut hasher = DefaultHasher::new(); x.hash(&mut hasher); hasher.finish() }; assert!(hash(&zero) != hash(&one)); assert_eq!(hash(&one), hash(&(&two - &one))); } #[test] fn test_hash_long() { let a = Mpz::from_str_radix("348917329847193287498312749187234192387", 10) .unwrap(); let b = Mpz::from_str_radix("348917329847193287498312749187234192386", 10) .unwrap(); let one: Mpz = From::<i64>::from(1); let hash = |x : &Mpz| { let mut hasher = DefaultHasher::new(); x.hash(&mut hasher); hasher.finish() }; assert!(hash(&a) != hash(&b)); assert_eq!(hash(&a), hash(&(&b + &one))); assert_eq!(hash(&(&a - &a)), hash(&(&one - &one))); } #[test] fn test_to_vec_u8() { let minus_five: Mpz = From::<i64>::from(-5); let minus_one: Mpz = From::<i64>::from(-1); let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let five: Mpz = From::<i64>::from(5); let xffff: Mpz = From::<i64>::from(65535); let max_u64: Mpz = From::<u64>::from(u64::MAX); assert_eq!(Into::<Vec<u8>>::into(&minus_five), vec!(5u8)); assert_eq!(Into::<Vec<u8>>::into(&minus_one), vec!(1u8)); assert_eq!(Into::<Vec<u8>>::into(&zero), vec!(0u8)); assert_eq!(Into::<Vec<u8>>::into(&one), vec!(1u8)); assert_eq!(Into::<Vec<u8>>::into(&five), vec!(5u8)); assert_eq!(Into::<Vec<u8>>::into(&xffff), vec!(255u8, 255u8)); assert_eq!(Into::<Vec<u8>>::into(&max_u64), vec!(255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8)); } #[test] fn test_to_u64() { let minus_five: Mpz = From::<i64>::from(-5); let minus_one: Mpz = From::<i64>::from(-1); let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let five: Mpz = From::<i64>::from(5); let max_u64: Mpz = From::<u64>::from(u64::MAX); assert_eq!(Into::<Option<u64>>::into(&minus_five), None); assert_eq!(Into::<Option<u64>>::into(&minus_one), None); assert_eq!(Into::<Option<u64>>::into(&zero), Some(0u64)); assert_eq!(Into::<Option<u64>>::into(&one), Some(1u64)); assert_eq!(Into::<Option<u64>>::into(&five), Some(5u64)); assert_eq!(Into::<Option<u64>>::into(&max_u64), Some(u64::MAX)); assert_eq!(Into::<Option<u64>>::into(&(&max_u64 + &one)), None); } #[test] fn test_to_i64() { let min_i64: Mpz = From::<i64>::from(i64::MIN); let minus_five: Mpz = From::<i64>::from(-5); let minus_one: Mpz = From::<i64>::from(-1); let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let five: Mpz = From::<i64>::from(5); let max_i64: Mpz = From::<i64>::from(i64::MAX); assert_eq!(Into::<Option<i64>>::into(&(&min_i64 - &one)), None); assert_eq!(Into::<Option<i64>>::into(&min_i64), Some(i64::MIN)); assert_eq!(Into::<Option<i64>>::into(&minus_five), Some(-5i64)); assert_eq!(Into::<Option<i64>>::into(&minus_one), Some(-1i64)); assert_eq!(Into::<Option<i64>>::into(&zero), Some(0i64)); assert_eq!(Into::<Option<i64>>::into(&one), Some(1i64)); assert_eq!(Into::<Option<i64>>::into(&five), Some(5i64)); assert_eq!(Into::<Option<i64>>::into(&max_i64), Some(i64::MAX)); assert_eq!(Into::<Option<i64>>::into(&(&max_i64 + &one)), None); } #[test] fn test_sign() { let zero: Mpz = From::<i64>::from(0); let five: Mpz = From::<i64>::from(5); let minus_five: Mpz = From::<i64>::from(-5); assert_eq!(zero.sign(), Sign::Zero); assert_eq!(five.sign(), Sign::Positive); assert_eq!(minus_five.sign(), Sign::Negative); } } mod rand { use std::convert::From; use super::super::mpz::Mpz; use super::super::rand::RandState; #[test] fn test_randstate() { let mut state = RandState::new(); state.seed_ui(42); for _ in 1u32..1000 { for x in 1i64..10 { let upper: Mpz = From::<i64>::from(x); assert!(state.urandom(&upper) < upper); } } } } mod mpq { use std::convert::From; use std::u64; use super::super::mpq::Mpq; use super::super::mpz::Mpz; use super::super::sign::Sign; #[test] fn test_one() { let onea: Mpq = From::<i64>::from(1); let oneb: Mpq = From::<i64>::from(1); assert!(onea == oneb); } #[test] #[should_panic] fn test_div_zero() { let x: Mpq = From::<i64>::from(1); let y = Mpq::new(); x / y; } #[test] #[should_panic] fn test_invert_zero() { Mpq::new().invert(); } #[test] fn test_fmt() { let fourty: Mpq = From::<i64>::from(40); let six: Mpq = From::<i64>::from(6); let fourty_sixths = &fourty / &six; assert_eq!(format!("{:?}", fourty), "40"); assert_eq!(format!("{:?}", -&fourty), "-40"); assert_eq!(format!("{:?}", fourty_sixths), "20/3"); assert_eq!(format!("{:?}", -&fourty_sixths), "-20/3"); } #[test] fn test_floor() { let half = Mpq::ratio(&Mpz::from(1), &Mpz::from(2)); assert_eq!(half.floor(), Mpz::from(0)); let big = Mpz::from(u64::MAX) * Mpz::from(u64::MAX); let slightly_more_than_one = Mpq::ratio(&(&big + Mpz::from(1)), &big); assert_eq!(slightly_more_than_one.floor(), Mpz::from(1)); let minus_half = -half; assert_eq!(minus_half.floor(), Mpz::from(-1)); } #[test] fn test_ceil() { let half = Mpq::ratio(&Mpz::from(1), &Mpz::from(2)); assert_eq!(half.ceil(), Mpz::from(1)); let minus_half = -half; assert_eq!(minus_half.ceil(), Mpz::from(0)); } #[test] fn test_sign() { let zero: Mpq = From::<i64>::from(0); let five: Mpq = From::<i64>::from(5); let minus_five: Mpq = From::<i64>::from(-5); assert_eq!(zero.sign(), Sign::Zero); assert_eq!(five.sign(), Sign::Positive); assert_eq!(minus_five.sign(), Sign::Negative); } #[test] fn test_ratio() { let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let minus_one = -&one; let two = &one + &one; let four = &two + &two; assert_eq!(Mpq::ratio(&one, &minus_one), Mpq::ratio(&minus_one, &one)); assert_eq!(Mpq::ratio(&zero, &one), Mpq::ratio(&zero, &minus_one)); assert_eq!(Mpq::ratio(&zero, &one), Mpq::ratio(&zero, &two)); assert_eq!(Mpq::ratio(&two, &four), Mpq::ratio(&one, &two)); } #[test] fn test_from_str_radix() { let zero: Mpz = From::<i64>::from(0); let one: Mpz = From::<i64>::from(1); let minus_one = -&one; let two = &one + &one; assert_eq!(Mpq::from_str_radix("1/-1", 10).unwrap(), Mpq::ratio(&minus_one, &one)); assert_eq!(Mpq::from_str_radix("0/2", 10).unwrap(), Mpq::ratio(&zero, &one)); assert_eq!(Mpq::from_str_radix("2/4", 10).unwrap(), Mpq::ratio(&one, &two)); } } mod mpf { use super::super::mpf::Mpf; use super::super::sign::Sign; #[test] #[should_panic] fn test_div_zero() { let x = Mpf::new(0); &x / &x; } #[test] fn test_sign() { let zero = Mpf::zero(); let mut five = Mpf::zero(); Mpf::set_from_si(&mut five, 5); let mut minus_five = Mpf::zero(); Mpf::set_from_si(&mut minus_five, -5); assert_eq!(zero.sign(), Sign::Zero); assert_eq!(five.sign(), Sign::Positive); assert_eq!(minus_five.sign(), Sign::Negative); } }
30.714086
114
0.502815
d98d532acf6517c0835577d6d6d293d15119e6ac
965
//! A TCP client. //! //! First start a server: //! //! ``` //! cargo run --example tcp-server //! ``` //! //! Then start a client: //! //! ``` //! cargo run --example tcp-client //! ``` use std::net::TcpStream; use smol::{future, io, Async, Unblock}; fn main() -> io::Result<()> { smol::run(async { // Create async stdin and stdout handles. let stdin = Unblock::new(std::io::stdin()); let mut stdout = Unblock::new(std::io::stdout()); // Connect to the server. let stream = Async::<TcpStream>::connect(([127, 0, 0, 1], 7000)).await?; println!("Connected to {}", stream.get_ref().peer_addr()?); println!("Type a message and hit enter!\n"); // Pipe messages from stdin to the server and pipe messages from the server to stdout. future::try_join( io::copy(stdin, &mut &stream), io::copy(&stream, &mut stdout), ) .await?; Ok(()) }) }
24.125
94
0.537824
bbae49626b344911ea12889def34d147428eb317
8,155
//! A cross platform Rust library that returns the vendor and product IDs of //! currently connected USB devices //! //! [![Actions Status](https://github.com/timfish/usb-enumeration/workflows/Build/badge.svg)](https://github.com/timfish/usb-enumeration/actions) //! //! # Example //! ```no_run //! let devices = usb_enumeration::enumerate(None, None); //! //! println!("{:#?}", devices); //! //! // Outputs: //! // [ //! // UsbDevice { //! // id: "USB\\VID_0CE9&PID_1220\\0000000004BE", //! // vendor_id: 3305, //! // product_id: 4640, //! // description: Some( //! // "PicoScope 4000 series PC Oscilloscope", //! // ), //! // }, //! // UsbDevice { //! // id: "USB\\VID_046D&PID_C52B\\5&17411534&0&11", //! // vendor_id: 1133, //! // product_id: 50475, //! // description: Some( //! // "USB Composite Device", //! // ), //! // }, //! // UsbDevice { //! // id: "USB\\VID_046D&PID_C52B&MI_00\\6&12D311A2&0&0000", //! // vendor_id: 1133, //! // product_id: 50475, //! // description: Some( //! // "Logitech USB Input Device", //! // ), //! // }, //! // etc... //! // ] //! ``` //! You can also subscribe to events using the `Observer`: //! ```no_run //! use usb_enumeration::{Observer, Event}; //! //! let sub = Observer::new() //! .with_poll_interval(2) //! .with_vendor_id(0x1234) //! .with_product_id(0x5678) //! .subscribe(); //! //! // when sub is dropped, the background thread will close //! //! for event in sub.rx_event.iter() { //! match event { //! Event::Initial(d) => println!("Initial devices: {:?}", d), //! Event::Connect(d) => println!("Connected device: {:?}", d), //! Event::Disconnect(d) => println!("Disconnected device: {:?}", d), //! } //! } //! ``` #![cfg_attr(feature = "strict", deny(warnings))] mod common; pub use common::UsbDevice; use crossbeam::channel::{bounded, unbounded, Receiver, Sender}; use std::{collections::HashSet, thread, time::Duration}; #[cfg(target_os = "windows")] mod windows; #[cfg(target_os = "windows")] use crate::windows::*; #[cfg(target_os = "macos")] mod macos; #[cfg(target_os = "macos")] use crate::macos::*; #[cfg(target_os = "linux")] mod linux; #[cfg(target_os = "linux")] use crate::linux::*; /// # Enumerates connected USB devices /// /// * `vendor_id` - Optional USB Vendor ID to filter /// * `product_id` - Optional USB Product ID to filter /// /// ```no_run /// let devices = usb_enumeration::enumerate(None, None); /// ``` /// You can also optionally filter by vendor or product ID: /// ```no_run /// let devices = usb_enumeration::enumerate(Some(0x1234), None); /// ``` pub fn enumerate(vendor_id: Option<u16>, product_id: Option<u16>) -> Vec<UsbDevice> { enumerate_platform(vendor_id, product_id) } /// Events send from the Observer #[derive(Debug, Clone)] pub enum Event { /// Initial list of devices when polling starts Initial(Vec<UsbDevice>), /// A device that has just been connected Connect(UsbDevice), /// A device that has just disconnected Disconnect(UsbDevice), } #[derive(Clone)] pub struct Subscription { pub rx_event: Receiver<Event>, // When this gets dropped, the channel will become disconnected and the // background thread will close tx_close: Sender<()>, } #[derive(Debug, Clone)] pub struct Observer { poll_interval: u32, vendor_id: Option<u16>, product_id: Option<u16>, } impl Default for Observer { fn default() -> Self { Observer::new() } } impl Observer { /// Create a new Observer with the poll interval specified in seconds pub fn new() -> Self { Observer { poll_interval: 1, vendor_id: None, product_id: None, } } pub fn with_poll_interval(mut self, seconds: u32) -> Self { self.poll_interval = seconds; self } /// Filter results by USB Vendor ID pub fn with_vendor_id(mut self, vendor_id: u16) -> Self { self.vendor_id = Some(vendor_id); self } /// Filter results by USB Product ID pub fn with_product_id(mut self, product_id: u16) -> Self { self.product_id = Some(product_id); self } /// Start the background thread and poll for device changes pub fn subscribe(&self) -> Subscription { let (tx_event, rx_event) = unbounded(); let (tx_close, rx_close) = bounded::<()>(0); thread::Builder::new() .name("USB Enumeration Thread".to_string()) .spawn({ let this = self.clone(); move || { let device_list = enumerate(this.vendor_id, this.product_id); // Send initially connected devices if tx_event.send(Event::Initial(device_list.clone())).is_err() { return; } let mut device_list: HashSet<UsbDevice> = device_list.into_iter().collect(); let mut wait_seconds = this.poll_interval as f32; loop { while wait_seconds > 0.0 { // Check whether the subscription has been disposed if let Err(crossbeam::channel::RecvTimeoutError::Disconnected) = rx_close.recv_timeout(Duration::from_millis(250)) { return; } wait_seconds -= 0.25; } wait_seconds = this.poll_interval as f32; let next_devices: HashSet<UsbDevice> = enumerate(this.vendor_id, this.product_id) .into_iter() .collect(); // Send Disconnect for missing devices for device in &device_list { if !next_devices.contains(&device) && tx_event.send(Event::Disconnect(device.clone())).is_err() { return; } } // Send Connect for new devices for device in &next_devices { if !device_list.contains(&device) && tx_event.send(Event::Connect(device.clone())).is_err() { return; } } device_list = next_devices; } } }) .expect("Could not spawn background thread"); Subscription { rx_event, tx_close } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_enumerate() { let devices = enumerate(None, None); println!("Enumerated devices: {:#?}", devices); assert!(!devices.is_empty()); } #[test] fn test_subscribe() { let subscription = Observer::new().subscribe(); let mut iter = subscription.rx_event.iter(); let initial = iter.next().expect("Should get an Event"); assert!(matches!(initial, Event::Initial(_))); println!("Connect a USB device"); let connect_event = iter.next().expect("Should get an Event"); let connect_device = if let Event::Connect(device) = connect_event { device } else { panic!("Expected Event::Connect. Actual: {:?}", connect_event); }; println!("Disconnect that same device"); let disconnect_event = iter.next().expect("Should get an Event"); let disconnect_device = if let Event::Disconnect(device) = disconnect_event { device } else { panic!("Expected Event::Disconnect. Actual: {:?}", disconnect_event); }; assert_eq!(connect_device, disconnect_device); } }
30.657895
145
0.529123
1ab29e6ce5e74ef07b4f1c91dd2f96fd816db9f7
2,280
use crate::error::{Error, ErrorKind}; use std::os::raw::c_char; use std::sync::RwLock; use ffi_support::rust_string_to_c; use once_cell::sync::Lazy; static LAST_ERROR: Lazy<RwLock<Option<Error>>> = Lazy::new(|| RwLock::new(None)); #[derive(Debug, PartialEq, Copy, Clone, Serialize)] #[repr(i64)] pub enum ErrorCode { Success = 0, Backend = 1, Busy = 2, Duplicate = 3, Encryption = 4, Input = 5, NotFound = 6, Unexpected = 7, Unsupported = 8, Custom = 100, } impl From<ErrorKind> for ErrorCode { fn from(kind: ErrorKind) -> ErrorCode { match kind { ErrorKind::Backend => ErrorCode::Backend, ErrorKind::Busy => ErrorCode::Busy, ErrorKind::Custom => ErrorCode::Custom, ErrorKind::Duplicate => ErrorCode::Duplicate, ErrorKind::Encryption => ErrorCode::Encryption, ErrorKind::Input => ErrorCode::Input, ErrorKind::NotFound => ErrorCode::NotFound, ErrorKind::Unexpected => ErrorCode::Unexpected, ErrorKind::Unsupported => ErrorCode::Unsupported, } } } impl<T> From<Result<T, Error>> for ErrorCode { fn from(result: Result<T, Error>) -> ErrorCode { match result { Ok(_) => ErrorCode::Success, Err(err) => ErrorCode::from(err.kind()), } } } #[no_mangle] pub extern "C" fn askar_get_current_error(error_json_p: *mut *const c_char) -> ErrorCode { trace!("askar_get_current_error"); let error = rust_string_to_c(get_current_error_json()); unsafe { *error_json_p = error }; ErrorCode::Success } pub fn get_current_error_json() -> String { if let Some(err) = Option::take(&mut *LAST_ERROR.write().unwrap()) { let message = err.to_string(); let code = ErrorCode::from(err.kind()) as usize; // let extra = err.extra(); json!({"code": code, "message": message}).to_string() } else { r#"{"code":0,"message":null}"#.to_owned() } } pub fn set_last_error(error: Option<Error>) -> ErrorCode { trace!("askar_set_last_error"); let code = match error.as_ref() { Some(err) => err.kind.into(), None => ErrorCode::Success, }; *LAST_ERROR.write().unwrap() = error; code }
27.804878
90
0.602632
5b8f36b150701d2d63532d7652778030a27eacbd
2,738
use crate::data::TypedData; use crate::errors::Error; //Don't know why awsm_web needs FutureExt but awsm_renderer doesn't... use futures::future::{self, TryFutureExt, FutureExt}; use std::future::Future; use js_sys::ArrayBuffer; use wasm_bindgen_futures::JsFuture; use web_sys::{ AudioBuffer, AudioContext }; pub fn audio_buffer<'a>( array_buffer: &ArrayBuffer, ctx: &AudioContext,) -> impl Future<Output = Result<AudioBuffer, Error>> { match ctx.decode_audio_data(&array_buffer) { Ok(promise) => future::ok(promise), Err(err) => future::err(err.into()), } .and_then(|promise| JsFuture::from(promise)) .map(|res| match res { Ok(x) => Ok(AudioBuffer::from(x)), Err(x) => Err(Error::from(x)), }) } //convenince helpers for loading slices, vecs, etc. pub fn audio_u8<T: AsRef<[u8]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_u16<T: AsRef<[u16]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_u32<T: AsRef<[u32]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_i8<T: AsRef<[i8]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_i16<T: AsRef<[i16]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_i32<T: AsRef<[i32]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_f32<T: AsRef<[f32]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) } pub fn audio_f64<T: AsRef<[f64]>>( data: T, ctx: &AudioContext, ) -> impl Future<Output = Result<AudioBuffer, Error>> { let array_buffer: ArrayBuffer = TypedData::new(data.as_ref()).into(); audio_buffer(&array_buffer, &ctx) }
32.987952
127
0.663623
bf22badeff800eedd8cd432c4a276a1529d3b25e
1,766
//! Attribute-related types used by the proc macro use crate::Asn1Type; use syn::{Attribute, Lit, Meta, MetaList, MetaNameValue, NestedMeta}; #[derive(Debug)] pub(crate) struct Asn1Attrs { /// Value of the `#[asn1(type = "...")]` attribute if provided pub asn1_type: Option<Asn1Type>, } impl Asn1Attrs { /// Parse attributes from a field or enum variant pub fn new(attrs: &[Attribute]) -> Self { let mut asn1_type = None; for attr in attrs { if !attr.path.is_ident("asn1") { continue; } match attr.parse_meta().expect("error parsing `asn1` attribute") { Meta::List(MetaList { nested, .. }) if nested.len() == 1 => { match nested.first() { Some(NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit: Lit::Str(lit_str), .. }))) => { // Parse the `type = "..."` attribute if !path.is_ident("type") { panic!("unknown `asn1` attribute: {:?}", path); } if let Some(ty) = asn1_type { panic!("duplicate ASN.1 `type` attribute: {:?}", ty); } asn1_type = Some(Asn1Type::new(&lit_str.value())); } other => panic!("malformed `asn1` attribute: {:?}", other), } } other => panic!("malformed `asn1` attribute: {:?}", other), } } Self { asn1_type } } }
34.627451
85
0.426387
0e09d34cc9bd3774c0c0ee13936cc266ce66c483
9,575
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Representing terms // // Terms are structured as a straightforward tree. Rather than rely on // GC, we allocate terms out of a bounded arena (the lifetime of this // arena is the lifetime 'a that is threaded around). // // We assign a unique index to each type/region parameter whose variance // is to be inferred. We refer to such variables as "inferreds". An // `InferredIndex` is a newtype'd int representing the index of such // a variable. use arena::TypedArena; use dep_graph::DepTrackingMapConfig; use rustc::ty::{self, TyCtxt}; use rustc::ty::maps::ItemVariances; use std::fmt; use std::rc::Rc; use syntax::ast; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use util::nodemap::NodeMap; use self::VarianceTerm::*; pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; #[derive(Copy, Clone, Debug)] pub struct InferredIndex(pub usize); #[derive(Copy, Clone)] pub enum VarianceTerm<'a> { ConstantTerm(ty::Variance), TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>), InferredTerm(InferredIndex), } impl<'a> fmt::Debug for VarianceTerm<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ConstantTerm(c1) => write!(f, "{:?}", c1), TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), InferredTerm(id) => { write!(f, "[{}]", { let InferredIndex(i) = id; i }) } } } } // The first pass over the crate simply builds up the set of inferreds. pub struct TermsContext<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, pub arena: &'a TypedArena<VarianceTerm<'a>>, pub empty_variances: Rc<Vec<ty::Variance>>, // For marker types, UnsafeCell, and other lang items where // variance is hardcoded, records the item-id and the hardcoded // variance. pub lang_items: Vec<(ast::NodeId, Vec<ty::Variance>)>, // Maps from the node id of a type/generic parameter to the // corresponding inferred index. pub inferred_map: NodeMap<InferredIndex>, // Maps from an InferredIndex to the info for that variable. pub inferred_infos: Vec<InferredInfo<'a>>, } pub struct InferredInfo<'a> { pub item_id: ast::NodeId, pub index: usize, pub param_id: ast::NodeId, pub term: VarianceTermPtr<'a>, // Initial value to use for this parameter when inferring // variance. For most parameters, this is Bivariant. But for lang // items and input type parameters on traits, it is different. pub initial_variance: ty::Variance, } pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, arena: &'a mut TypedArena<VarianceTerm<'a>>) -> TermsContext<'a, 'tcx> { let mut terms_cx = TermsContext { tcx: tcx, arena: arena, inferred_map: NodeMap(), inferred_infos: Vec::new(), lang_items: lang_items(tcx), // cache and share the variance struct used for items with // no type/region parameters empty_variances: Rc::new(vec![]), }; // See README.md for a discussion on dep-graph management. tcx.visit_all_item_likes_in_krate(|def_id| ItemVariances::to_dep_node(&def_id), &mut terms_cx); terms_cx } fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec<ty::Variance>)> { let all = vec![ (tcx.lang_items.phantom_data(), vec![ty::Covariant]), (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]), // Deprecated: (tcx.lang_items.covariant_type(), vec![ty::Covariant]), (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]), (tcx.lang_items.invariant_type(), vec![ty::Invariant]), (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]), (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]), (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]), ]; all.into_iter() // iterating over (Option<DefId>, Variance) .filter(|&(ref d,_)| d.is_some()) .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance) .filter_map(|(d, v)| tcx.hir.as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance) .collect() } impl<'a, 'tcx> TermsContext<'a, 'tcx> { fn add_inferreds_for_item(&mut self, item_id: ast::NodeId, has_self: bool, generics: &hir::Generics) { //! Add "inferreds" for the generic parameters declared on this //! item. This has a lot of annoying parameters because we are //! trying to drive this from the AST, rather than the //! ty::Generics, so that we can get span info -- but this //! means we must accommodate syntactic distinctions. //! // NB: In the code below for writing the results back into the // tcx, we rely on the fact that all inferreds for a particular // item are assigned continuous indices. let inferreds_on_entry = self.num_inferred(); if has_self { self.add_inferred(item_id, 0, item_id); } for (i, p) in generics.lifetimes.iter().enumerate() { let id = p.lifetime.id; let i = has_self as usize + i; self.add_inferred(item_id, i, id); } for (i, p) in generics.ty_params.iter().enumerate() { let i = has_self as usize + generics.lifetimes.len() + i; self.add_inferred(item_id, i, p.id); } // If this item has no type or lifetime parameters, // then there are no variances to infer, so just // insert an empty entry into the variance map. // Arguably we could just leave the map empty in this // case but it seems cleaner to be able to distinguish // "invalid item id" from "item id with no // parameters". if self.num_inferred() == inferreds_on_entry { let item_def_id = self.tcx.hir.local_def_id(item_id); let newly_added = self.tcx .item_variance_map .borrow_mut() .insert(item_def_id, self.empty_variances.clone()) .is_none(); assert!(newly_added); } } fn add_inferred(&mut self, item_id: ast::NodeId, index: usize, param_id: ast::NodeId) { let inf_index = InferredIndex(self.inferred_infos.len()); let term = self.arena.alloc(InferredTerm(inf_index)); let initial_variance = self.pick_initial_variance(item_id, index); self.inferred_infos.push(InferredInfo { item_id: item_id, index: index, param_id: param_id, term: term, initial_variance: initial_variance, }); let newly_added = self.inferred_map.insert(param_id, inf_index).is_none(); assert!(newly_added); debug!("add_inferred(item_path={}, \ item_id={}, \ index={}, \ param_id={}, \ inf_index={:?}, \ initial_variance={:?})", self.tcx.item_path_str(self.tcx.hir.local_def_id(item_id)), item_id, index, param_id, inf_index, initial_variance); } fn pick_initial_variance(&self, item_id: ast::NodeId, index: usize) -> ty::Variance { match self.lang_items.iter().find(|&&(n, _)| n == item_id) { Some(&(_, ref variances)) => variances[index], None => ty::Bivariant, } } pub fn num_inferred(&self) -> usize { self.inferred_infos.len() } } impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for TermsContext<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { debug!("add_inferreds for item {}", self.tcx.hir.node_to_string(item.id)); match item.node { hir::ItemEnum(_, ref generics) | hir::ItemStruct(_, ref generics) | hir::ItemUnion(_, ref generics) => { self.add_inferreds_for_item(item.id, false, generics); } hir::ItemTrait(_, ref generics, ..) => { // Note: all inputs for traits are ultimately // constrained to be invariant. See `visit_item` in // the impl for `ConstraintContext` in `constraints.rs`. self.add_inferreds_for_item(item.id, true, generics); } hir::ItemExternCrate(_) | hir::ItemUse(..) | hir::ItemDefaultImpl(..) | hir::ItemImpl(..) | hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) | hir::ItemMod(..) | hir::ItemForeignMod(..) | hir::ItemTy(..) => {} } } fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) { } fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { } }
35.861423
99
0.587258
5028ed77422f4d0c750c94ac02253509579ffdb7
891
/// This function divides two numbers. /// /// # Example #1: 10 / 2 == 5 /// /// ``` /// let result = doctest001::div(10, 2); // TODO: finish this test! /// assert_eq!(result, 5); /// ``` /// /// # Example #2: 6 / 2 = 3 /// /// ``` /// let result = doctest001::div(6, 2); /// assert_eq!(result, 3); /// ``` /// /// # Panics /// /// The function panics if the second argument is zero. /// /// ```rust,should_panic /// doctest001::div(1, 0); /// ``` pub fn div(a: i32, b: i32) -> i32 { if b == 0 { panic!("Divide-by-zero error"); } a / b } /// This function subtracts two numbers. /// /// # Example #1: 9 - 2 == 7 /// /// ``` /// let result = doctest001::sub(9, 2); /// assert_eq!(result, 7); /// ``` /// /// # Example #2: 6 - 9 == -3 /// /// ``` /// let result = doctest001::sub(6, 9); /// assert_eq!(result, -3); /// ``` pub fn sub(a: i32, b: i32) -> i32 { a - b }
18.5625
68
0.483726
289c912abf9c36cf0766a9cfa1411d1e4ef4437c
124
#[derive(Debug, Clone, Copy)] pub enum AttributeComponentSize { One = 1, Two = 2, Three = 3, Four = 4 }
17.714286
33
0.548387
017763880f248abb03bef9da09e2c24b77894ffe
1,099
use chrono::{DateTime, Local}; use serde::{Deserialize, Serialize}; use crate::User; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Release { pub url: String, pub assets_url: String, pub upload_url: String, pub html_url: String, pub id: usize, pub author: User, pub node_id: String, pub tag_name: String, pub target_commitish: String, pub name: Option<String>, pub draft: bool, pub prerelease: bool, pub created_at: DateTime<Local>, pub published_at: DateTime<Local>, pub assets: Vec<ReleaseAsset>, pub tarball_url: String, pub zipball_url: String, pub body: Option<String>, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ReleaseAsset { pub url: String, pub id: usize, pub node_id: String, pub name: String, pub label: Option<String>, pub uploader: User, pub content_type: String, pub state: String, pub size: usize, pub download_count: usize, pub created_at: DateTime<Local>, pub updated_at: DateTime<Local>, pub browser_download_url: String, }
24.977273
47
0.673339
e2b0297d31c37a4d6cedce6185d6e72b9a63f851
1,925
#[cfg(test)] mod test; use bson::{doc, Document}; use serde::Deserialize; use crate::{ cmap::{Command, CommandResponse, StreamDescription}, error::Result, operation::{append_options, Operation}, options::ListDatabasesOptions, selection_criteria::{ReadPreference, SelectionCriteria}, }; #[derive(Debug)] pub(crate) struct ListDatabases { filter: Option<Document>, name_only: bool, options: Option<ListDatabasesOptions>, } impl ListDatabases { pub fn new( filter: Option<Document>, name_only: bool, options: Option<ListDatabasesOptions>, ) -> Self { ListDatabases { filter, name_only, options, } } #[cfg(test)] pub(crate) fn empty() -> Self { ListDatabases { filter: None, name_only: false, options: None, } } } impl Operation for ListDatabases { type O = Vec<Document>; const NAME: &'static str = "listDatabases"; fn build(&self, description: &StreamDescription) -> Result<Command> { let mut body: Document = doc! { Self::NAME: 1, "nameOnly": self.name_only }; if let Some(ref filter) = self.filter { body.insert("filter", filter.clone()); } append_options(&mut body, self.options.as_ref())?; Ok(Command::new( Self::NAME.to_string(), "admin".to_string(), body, )) } fn handle_response(&self, response: CommandResponse) -> Result<Self::O> { response.body::<ResponseBody>().map(|body| body.databases) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { Some(SelectionCriteria::ReadPreference(ReadPreference::Primary)).as_ref() } } #[derive(Debug, Deserialize)] struct ResponseBody { databases: Vec<Document>, total_size: Option<i64>, }
23.47561
81
0.58961
8f2505a4ecbe8a496c069de42839abe5dcbd665e
30,342
#![cfg(feature = "stargate")] // The CosmosMsg variants are defined in results/cosmos_msg.rs // The rest of the IBC related functionality is defined here use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::cmp::{Ord, Ordering, PartialOrd}; #[cfg(feature = "ibc3")] use crate::addresses::Addr; use crate::binary::Binary; use crate::coins::Coin; use crate::errors::StdResult; use crate::results::{Attribute, CosmosMsg, Empty, Event, SubMsg}; use crate::serde::to_binary; use crate::timestamp::Timestamp; /// These are messages in the IBC lifecycle. Only usable by IBC-enabled contracts /// (contracts that directly speak the IBC protocol via 6 entry points) #[non_exhaustive] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum IbcMsg { /// Sends bank tokens owned by the contract to the given address on another chain. /// The channel must already be established between the ibctransfer module on this chain /// and a matching module on the remote chain. /// We cannot select the port_id, this is whatever the local chain has bound the ibctransfer /// module to. Transfer { /// exisiting channel to send the tokens over channel_id: String, /// address on the remote chain to receive these tokens to_address: String, /// packet data only supports one coin /// https://github.com/cosmos/cosmos-sdk/blob/v0.40.0/proto/ibc/applications/transfer/v1/transfer.proto#L11-L20 amount: Coin, /// when packet times out, measured on remote chain timeout: IbcTimeout, }, /// Sends an IBC packet with given data over the existing channel. /// Data should be encoded in a format defined by the channel version, /// and the module on the other side should know how to parse this. SendPacket { channel_id: String, data: Binary, /// when packet times out, measured on remote chain timeout: IbcTimeout, }, /// This will close an existing channel that is owned by this contract. /// Port is auto-assigned to the contract's IBC port CloseChannel { channel_id: String }, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct IbcEndpoint { pub port_id: String, pub channel_id: String, } /// In IBC each package must set at least one type of timeout: /// the timestamp or the block height. Using this rather complex enum instead of /// two timeout fields we ensure that at least one timeout is set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct IbcTimeout { // use private fields to enforce the use of constructors, which ensure that at least one is set block: Option<IbcTimeoutBlock>, timestamp: Option<Timestamp>, } impl IbcTimeout { pub fn with_block(block: IbcTimeoutBlock) -> Self { IbcTimeout { block: Some(block), timestamp: None, } } pub fn with_timestamp(timestamp: Timestamp) -> Self { IbcTimeout { block: None, timestamp: Some(timestamp), } } pub fn with_both(block: IbcTimeoutBlock, timestamp: Timestamp) -> Self { IbcTimeout { block: Some(block), timestamp: Some(timestamp), } } pub fn block(&self) -> Option<IbcTimeoutBlock> { self.block } pub fn timestamp(&self) -> Option<Timestamp> { self.timestamp } } impl From<Timestamp> for IbcTimeout { fn from(timestamp: Timestamp) -> IbcTimeout { IbcTimeout::with_timestamp(timestamp) } } impl From<IbcTimeoutBlock> for IbcTimeout { fn from(original: IbcTimeoutBlock) -> IbcTimeout { IbcTimeout::with_block(original) } } // These are various messages used in the callbacks /// IbcChannel defines all information on a channel. /// This is generally used in the hand-shake process, but can be queried directly. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcChannel { pub endpoint: IbcEndpoint, pub counterparty_endpoint: IbcEndpoint, pub order: IbcOrder, /// Note: in ibcv3 this may be "", in the IbcOpenChannel handshake messages pub version: String, /// The connection upon which this channel was created. If this is a multi-hop /// channel, we only expose the first hop. pub connection_id: String, } impl IbcChannel { /// Construct a new IbcChannel. pub fn new( endpoint: IbcEndpoint, counterparty_endpoint: IbcEndpoint, order: IbcOrder, version: impl Into<String>, connection_id: impl Into<String>, ) -> Self { Self { endpoint, counterparty_endpoint, order, version: version.into(), connection_id: connection_id.into(), } } } /// IbcOrder defines if a channel is ORDERED or UNORDERED /// Values come from https://github.com/cosmos/cosmos-sdk/blob/v0.40.0/proto/ibc/core/channel/v1/channel.proto#L69-L80 /// Naming comes from the protobuf files and go translations. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub enum IbcOrder { #[serde(rename = "ORDER_UNORDERED")] Unordered, #[serde(rename = "ORDER_ORDERED")] Ordered, } /// IBCTimeoutHeight Height is a monotonically increasing data type /// that can be compared against another Height for the purposes of updating and /// freezing clients. /// Ordering is (revision_number, timeout_height) #[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialEq, Eq, JsonSchema)] pub struct IbcTimeoutBlock { /// the version that the client is currently on /// (eg. after reseting the chain this could increment 1 as height drops to 0) pub revision: u64, /// block height after which the packet times out. /// the height within the given revision pub height: u64, } impl IbcTimeoutBlock { pub fn is_zero(&self) -> bool { self.revision == 0 && self.height == 0 } } impl PartialOrd for IbcTimeoutBlock { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for IbcTimeoutBlock { fn cmp(&self, other: &Self) -> Ordering { match self.revision.cmp(&other.revision) { Ordering::Equal => self.height.cmp(&other.height), other => other, } } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcPacket { /// The raw data sent from the other side in the packet pub data: Binary, /// identifies the channel and port on the sending chain. pub src: IbcEndpoint, /// identifies the channel and port on the receiving chain. pub dest: IbcEndpoint, /// The sequence number of the packet on the given channel pub sequence: u64, pub timeout: IbcTimeout, } impl IbcPacket { /// Construct a new IbcPacket. pub fn new( data: impl Into<Binary>, src: IbcEndpoint, dest: IbcEndpoint, sequence: u64, timeout: IbcTimeout, ) -> Self { Self { data: data.into(), src, dest, sequence, timeout, } } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcAcknowledgement { pub data: Binary, // we may add more info here in the future (meta-data from the acknowledgement) // there have been proposals to extend this type in core ibc for future versions } impl IbcAcknowledgement { pub fn new(data: impl Into<Binary>) -> Self { IbcAcknowledgement { data: data.into() } } pub fn encode_json(data: &impl Serialize) -> StdResult<Self> { Ok(IbcAcknowledgement { data: to_binary(data)?, }) } } /// The message that is passed into `ibc_channel_open` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] #[non_exhaustive] pub enum IbcChannelOpenMsg { /// The ChanOpenInit step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management OpenInit { channel: IbcChannel }, /// The ChanOpenTry step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management OpenTry { channel: IbcChannel, counterparty_version: String, }, } impl IbcChannelOpenMsg { pub fn new_init(channel: IbcChannel) -> Self { Self::OpenInit { channel } } pub fn new_try(channel: IbcChannel, counterparty_version: impl Into<String>) -> Self { Self::OpenTry { channel, counterparty_version: counterparty_version.into(), } } pub fn channel(&self) -> &IbcChannel { match self { Self::OpenInit { channel } => channel, Self::OpenTry { channel, .. } => channel, } } pub fn counterparty_version(&self) -> Option<&str> { match self { Self::OpenTry { counterparty_version, .. } => Some(counterparty_version), _ => None, } } } impl From<IbcChannelOpenMsg> for IbcChannel { fn from(msg: IbcChannelOpenMsg) -> IbcChannel { match msg { IbcChannelOpenMsg::OpenInit { channel } => channel, IbcChannelOpenMsg::OpenTry { channel, .. } => channel, } } } /// Note that this serializes as "null". #[cfg(not(feature = "ibc3"))] pub type IbcChannelOpenResponse = (); /// This serializes either as "null" or a JSON object. #[cfg(feature = "ibc3")] pub type IbcChannelOpenResponse = Option<Ibc3ChannelOpenResponse>; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct Ibc3ChannelOpenResponse { /// We can set the channel version to a different one than we were called with pub version: String, } /// The message that is passed into `ibc_channel_connect` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] #[non_exhaustive] pub enum IbcChannelConnectMsg { /// The ChanOpenAck step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management OpenAck { channel: IbcChannel, counterparty_version: String, }, /// The ChanOpenConfirm step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management OpenConfirm { channel: IbcChannel }, } impl IbcChannelConnectMsg { pub fn new_ack(channel: IbcChannel, counterparty_version: impl Into<String>) -> Self { Self::OpenAck { channel, counterparty_version: counterparty_version.into(), } } pub fn new_confirm(channel: IbcChannel) -> Self { Self::OpenConfirm { channel } } pub fn channel(&self) -> &IbcChannel { match self { Self::OpenAck { channel, .. } => channel, Self::OpenConfirm { channel } => channel, } } pub fn counterparty_version(&self) -> Option<&str> { match self { Self::OpenAck { counterparty_version, .. } => Some(counterparty_version), _ => None, } } } impl From<IbcChannelConnectMsg> for IbcChannel { fn from(msg: IbcChannelConnectMsg) -> IbcChannel { match msg { IbcChannelConnectMsg::OpenAck { channel, .. } => channel, IbcChannelConnectMsg::OpenConfirm { channel } => channel, } } } /// The message that is passed into `ibc_channel_close` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] #[non_exhaustive] pub enum IbcChannelCloseMsg { /// The ChanCloseInit step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management CloseInit { channel: IbcChannel }, /// The ChanCloseConfirm step from https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#channel-lifecycle-management CloseConfirm { channel: IbcChannel }, // pub channel: IbcChannel, } impl IbcChannelCloseMsg { pub fn new_init(channel: IbcChannel) -> Self { Self::CloseInit { channel } } pub fn new_confirm(channel: IbcChannel) -> Self { Self::CloseConfirm { channel } } pub fn channel(&self) -> &IbcChannel { match self { Self::CloseInit { channel } => channel, Self::CloseConfirm { channel } => channel, } } } impl From<IbcChannelCloseMsg> for IbcChannel { fn from(msg: IbcChannelCloseMsg) -> IbcChannel { match msg { IbcChannelCloseMsg::CloseInit { channel } => channel, IbcChannelCloseMsg::CloseConfirm { channel } => channel, } } } /// The message that is passed into `ibc_packet_receive` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcPacketReceiveMsg { pub packet: IbcPacket, #[cfg(feature = "ibc3")] pub relayer: Addr, } impl IbcPacketReceiveMsg { #[cfg(not(feature = "ibc3"))] pub fn new(packet: IbcPacket) -> Self { Self { packet } } #[cfg(feature = "ibc3")] pub fn new(packet: IbcPacket, relayer: Addr) -> Self { Self { packet, relayer } } } /// The message that is passed into `ibc_packet_ack` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcPacketAckMsg { pub acknowledgement: IbcAcknowledgement, pub original_packet: IbcPacket, #[cfg(feature = "ibc3")] pub relayer: Addr, } impl IbcPacketAckMsg { #[cfg(not(feature = "ibc3"))] pub fn new(acknowledgement: IbcAcknowledgement, original_packet: IbcPacket) -> Self { Self { acknowledgement, original_packet, } } #[cfg(feature = "ibc3")] pub fn new( acknowledgement: IbcAcknowledgement, original_packet: IbcPacket, relayer: Addr, ) -> Self { Self { acknowledgement, original_packet, relayer, } } } /// The message that is passed into `ibc_packet_timeout` #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcPacketTimeoutMsg { pub packet: IbcPacket, #[cfg(feature = "ibc3")] pub relayer: Addr, } impl IbcPacketTimeoutMsg { #[cfg(not(feature = "ibc3"))] pub fn new(packet: IbcPacket) -> Self { Self { packet } } #[cfg(feature = "ibc3")] pub fn new(packet: IbcPacket, relayer: Addr) -> Self { Self { packet, relayer } } } /// This is the return value for the majority of the ibc handlers. /// That are able to dispatch messages / events on their own, /// but have no meaningful return value to the calling code. /// /// Callbacks that have return values (like receive_packet) /// or that cannot redispatch messages (like the handshake callbacks) /// will use other Response types #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcBasicResponse<T = Empty> { /// Optional list of messages to pass. These will be executed in order. /// If the ReplyOn member is set, they will invoke this contract's `reply` entry point /// after execution. Otherwise, they act like "fire and forget". /// Use `SubMsg::new` to create messages with the older "fire and forget" semantics. pub messages: Vec<SubMsg<T>>, /// The attributes that will be emitted as part of a `wasm` event. /// /// More info about events (and their attributes) can be found in [*Cosmos SDK* docs]. /// /// [*Cosmos SDK* docs]: https://docs.cosmos.network/v0.42/core/events.html pub attributes: Vec<Attribute>, /// Extra, custom events separate from the main `wasm` one. These will have /// `wasm-` prepended to the type. /// /// More info about events can be found in [*Cosmos SDK* docs]. /// /// [*Cosmos SDK* docs]: https://docs.cosmos.network/v0.42/core/events.html pub events: Vec<Event>, } // Custom imlementation in order to implement it for all `T`, even if `T` is not `Default`. impl<T> Default for IbcBasicResponse<T> { fn default() -> Self { IbcBasicResponse { messages: vec![], attributes: vec![], events: vec![], } } } impl<T> IbcBasicResponse<T> { pub fn new() -> Self { Self::default() } /// Add an attribute included in the main `wasm` event. pub fn add_attribute(mut self, key: impl Into<String>, value: impl Into<String>) -> Self { self.attributes.push(Attribute::new(key, value)); self } /// This creates a "fire and forget" message, by using `SubMsg::new()` to wrap it, /// and adds it to the list of messages to process. pub fn add_message(mut self, msg: impl Into<CosmosMsg<T>>) -> Self { self.messages.push(SubMsg::new(msg)); self } /// This takes an explicit SubMsg (creates via eg. `reply_on_error`) /// and adds it to the list of messages to process. pub fn add_submessage(mut self, msg: SubMsg<T>) -> Self { self.messages.push(msg); self } /// Adds an extra event to the response, separate from the main `wasm` event /// that is always created. /// /// The `wasm-` prefix will be appended by the runtime to the provided type /// of event. pub fn add_event(mut self, event: Event) -> Self { self.events.push(event); self } /// Bulk add attributes included in the main `wasm` event. /// /// Anything that can be turned into an iterator and yields something /// that can be converted into an `Attribute` is accepted. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{attr, IbcBasicResponse}; /// /// let attrs = vec![ /// ("action", "reaction"), /// ("answer", "42"), /// ("another", "attribute"), /// ]; /// let res: IbcBasicResponse = IbcBasicResponse::new().add_attributes(attrs.clone()); /// assert_eq!(res.attributes, attrs); /// ``` pub fn add_attributes<A: Into<Attribute>>( mut self, attrs: impl IntoIterator<Item = A>, ) -> Self { self.attributes.extend(attrs.into_iter().map(A::into)); self } /// Bulk add "fire and forget" messages to the list of messages to process. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{CosmosMsg, IbcBasicResponse}; /// /// fn make_response_with_msgs(msgs: Vec<CosmosMsg>) -> IbcBasicResponse { /// IbcBasicResponse::new().add_messages(msgs) /// } /// ``` pub fn add_messages<M: Into<CosmosMsg<T>>>(self, msgs: impl IntoIterator<Item = M>) -> Self { self.add_submessages(msgs.into_iter().map(SubMsg::new)) } /// Bulk add explicit SubMsg structs to the list of messages to process. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{SubMsg, IbcBasicResponse}; /// /// fn make_response_with_submsgs(msgs: Vec<SubMsg>) -> IbcBasicResponse { /// IbcBasicResponse::new().add_submessages(msgs) /// } /// ``` pub fn add_submessages(mut self, msgs: impl IntoIterator<Item = SubMsg<T>>) -> Self { self.messages.extend(msgs.into_iter()); self } /// Bulk add custom events to the response. These are separate from the main /// `wasm` event. /// /// The `wasm-` prefix will be appended by the runtime to the provided types /// of events. pub fn add_events(mut self, events: impl IntoIterator<Item = Event>) -> Self { self.events.extend(events.into_iter()); self } } // This defines the return value on packet response processing. // This "success" case should be returned even in application-level errors, // Where the acknowledgement bytes contain an encoded error message to be returned to // the calling chain. (Returning ContractResult::Err will abort processing of this packet // and not inform the calling chain). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[non_exhaustive] pub struct IbcReceiveResponse<T = Empty> { /// The bytes we return to the contract that sent the packet. /// This may represent a success or error of exection pub acknowledgement: Binary, /// Optional list of messages to pass. These will be executed in order. /// If the ReplyOn member is set, they will invoke this contract's `reply` entry point /// after execution. Otherwise, they act like "fire and forget". /// Use `call` or `msg.into()` to create messages with the older "fire and forget" semantics. pub messages: Vec<SubMsg<T>>, /// The attributes that will be emitted as part of a "wasm" event. /// /// More info about events (and their attributes) can be found in [*Cosmos SDK* docs]. /// /// [*Cosmos SDK* docs]: https://docs.cosmos.network/v0.42/core/events.html pub attributes: Vec<Attribute>, /// Extra, custom events separate from the main `wasm` one. These will have /// `wasm-` prepended to the type. /// /// More info about events can be found in [*Cosmos SDK* docs]. /// /// [*Cosmos SDK* docs]: https://docs.cosmos.network/v0.42/core/events.html pub events: Vec<Event>, } // Custom imlementation in order to implement it for all `T`, even if `T` is not `Default`. impl<T> Default for IbcReceiveResponse<T> { fn default() -> Self { IbcReceiveResponse { acknowledgement: Binary(vec![]), messages: vec![], attributes: vec![], events: vec![], } } } impl<T> IbcReceiveResponse<T> { pub fn new() -> Self { Self::default() } /// Set the acknowledgement for this response. pub fn set_ack(mut self, ack: impl Into<Binary>) -> Self { self.acknowledgement = ack.into(); self } /// Add an attribute included in the main `wasm` event. pub fn add_attribute(mut self, key: impl Into<String>, value: impl Into<String>) -> Self { self.attributes.push(Attribute::new(key, value)); self } /// This creates a "fire and forget" message, by using `SubMsg::new()` to wrap it, /// and adds it to the list of messages to process. pub fn add_message(mut self, msg: impl Into<CosmosMsg<T>>) -> Self { self.messages.push(SubMsg::new(msg)); self } /// This takes an explicit SubMsg (creates via eg. `reply_on_error`) /// and adds it to the list of messages to process. pub fn add_submessage(mut self, msg: SubMsg<T>) -> Self { self.messages.push(msg); self } /// Adds an extra event to the response, separate from the main `wasm` event /// that is always created. /// /// The `wasm-` prefix will be appended by the runtime to the provided type /// of event. pub fn add_event(mut self, event: Event) -> Self { self.events.push(event); self } /// Bulk add attributes included in the main `wasm` event. /// /// Anything that can be turned into an iterator and yields something /// that can be converted into an `Attribute` is accepted. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{attr, IbcReceiveResponse}; /// /// let attrs = vec![ /// ("action", "reaction"), /// ("answer", "42"), /// ("another", "attribute"), /// ]; /// let res: IbcReceiveResponse = IbcReceiveResponse::new().add_attributes(attrs.clone()); /// assert_eq!(res.attributes, attrs); /// ``` pub fn add_attributes<A: Into<Attribute>>( mut self, attrs: impl IntoIterator<Item = A>, ) -> Self { self.attributes.extend(attrs.into_iter().map(A::into)); self } /// Bulk add "fire and forget" messages to the list of messages to process. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{CosmosMsg, IbcReceiveResponse}; /// /// fn make_response_with_msgs(msgs: Vec<CosmosMsg>) -> IbcReceiveResponse { /// IbcReceiveResponse::new().add_messages(msgs) /// } /// ``` pub fn add_messages<M: Into<CosmosMsg<T>>>(self, msgs: impl IntoIterator<Item = M>) -> Self { self.add_submessages(msgs.into_iter().map(SubMsg::new)) } /// Bulk add explicit SubMsg structs to the list of messages to process. /// /// ## Examples /// /// ``` /// use cosmwasm_std::{SubMsg, IbcReceiveResponse}; /// /// fn make_response_with_submsgs(msgs: Vec<SubMsg>) -> IbcReceiveResponse { /// IbcReceiveResponse::new().add_submessages(msgs) /// } /// ``` pub fn add_submessages(mut self, msgs: impl IntoIterator<Item = SubMsg<T>>) -> Self { self.messages.extend(msgs.into_iter()); self } /// Bulk add custom events to the response. These are separate from the main /// `wasm` event. /// /// The `wasm-` prefix will be appended by the runtime to the provided types /// of events. pub fn add_events(mut self, events: impl IntoIterator<Item = Event>) -> Self { self.events.extend(events.into_iter()); self } } #[cfg(test)] mod tests { use super::*; use serde_json_wasm::to_string; #[test] // added this to check json format for go compat, as I was unsure how some messages are snake encoded fn serialize_msg() { let msg = IbcMsg::Transfer { channel_id: "channel-123".to_string(), to_address: "my-special-addr".into(), amount: Coin::new(12345678, "uatom"), timeout: IbcTimeout::with_timestamp(Timestamp::from_nanos(1234567890)), }; let encoded = to_string(&msg).unwrap(); let expected = r#"{"transfer":{"channel_id":"channel-123","to_address":"my-special-addr","amount":{"denom":"uatom","amount":"12345678"},"timeout":{"block":null,"timestamp":"1234567890"}}}"#; assert_eq!(encoded.as_str(), expected); } #[test] fn ibc_timeout_serialize() { let timestamp = IbcTimeout::with_timestamp(Timestamp::from_nanos(684816844)); let expected = r#"{"block":null,"timestamp":"684816844"}"#; assert_eq!(to_string(&timestamp).unwrap(), expected); let block = IbcTimeout::with_block(IbcTimeoutBlock { revision: 12, height: 129, }); let expected = r#"{"block":{"revision":12,"height":129},"timestamp":null}"#; assert_eq!(to_string(&block).unwrap(), expected); let both = IbcTimeout::with_both( IbcTimeoutBlock { revision: 12, height: 129, }, Timestamp::from_nanos(684816844), ); let expected = r#"{"block":{"revision":12,"height":129},"timestamp":"684816844"}"#; assert_eq!(to_string(&both).unwrap(), expected); } #[test] #[allow(clippy::eq_op)] fn ibc_timeout_block_ord() { let epoch1a = IbcTimeoutBlock { revision: 1, height: 1000, }; let epoch1b = IbcTimeoutBlock { revision: 1, height: 3000, }; let epoch2a = IbcTimeoutBlock { revision: 2, height: 500, }; let epoch2b = IbcTimeoutBlock { revision: 2, height: 2500, }; // basic checks assert!(epoch1a == epoch1a); assert!(epoch1a < epoch1b); assert!(epoch1b > epoch1a); assert!(epoch2a > epoch1a); assert!(epoch2b > epoch1a); // ensure epoch boundaries are correctly handled assert!(epoch1b > epoch1a); assert!(epoch2a > epoch1b); assert!(epoch2b > epoch2a); assert!(epoch2b > epoch1b); // and check the inverse compare assert!(epoch1a < epoch1b); assert!(epoch1b < epoch2a); assert!(epoch2a < epoch2b); assert!(epoch1b < epoch2b); } #[test] fn ibc_packet_serialize() { let packet = IbcPacket { data: b"foo".into(), src: IbcEndpoint { port_id: "their-port".to_string(), channel_id: "channel-1234".to_string(), }, dest: IbcEndpoint { port_id: "our-port".to_string(), channel_id: "chan33".into(), }, sequence: 27, timeout: IbcTimeout::with_both( IbcTimeoutBlock { revision: 1, height: 12345678, }, Timestamp::from_nanos(4611686018427387904), ), }; let expected = r#"{"data":"Zm9v","src":{"port_id":"their-port","channel_id":"channel-1234"},"dest":{"port_id":"our-port","channel_id":"chan33"},"sequence":27,"timeout":{"block":{"revision":1,"height":12345678},"timestamp":"4611686018427387904"}}"#; assert_eq!(to_string(&packet).unwrap(), expected); let no_timestamp = IbcPacket { data: b"foo".into(), src: IbcEndpoint { port_id: "their-port".to_string(), channel_id: "channel-1234".to_string(), }, dest: IbcEndpoint { port_id: "our-port".to_string(), channel_id: "chan33".into(), }, sequence: 27, timeout: IbcTimeout::with_block(IbcTimeoutBlock { revision: 1, height: 12345678, }), }; let expected = r#"{"data":"Zm9v","src":{"port_id":"their-port","channel_id":"channel-1234"},"dest":{"port_id":"our-port","channel_id":"chan33"},"sequence":27,"timeout":{"block":{"revision":1,"height":12345678},"timestamp":null}}"#; assert_eq!(to_string(&no_timestamp).unwrap(), expected); } }
33.638581
256
0.619768
5ba94cd60e599d5d4e5a3c8eaa2a60c0991ce07f
2,114
use crate::commands::WholeStreamCommand; use crate::context::CommandRegistry; use crate::prelude::*; use nu_errors::ShellError; use nu_protocol::{Signature, SyntaxShape, UntaggedValue}; use nu_source::Tagged; pub struct Keep; #[derive(Deserialize)] pub struct KeepArgs { rows: Option<Tagged<usize>>, } #[async_trait] impl WholeStreamCommand for Keep { fn name(&self) -> &str { "keep" } fn signature(&self) -> Signature { Signature::build("keep").optional( "rows", SyntaxShape::Int, "starting from the front, the number of rows to keep", ) } fn usage(&self) -> &str { "Keep the number of rows only" } async fn run( &self, args: CommandArgs, registry: &CommandRegistry, ) -> Result<OutputStream, ShellError> { keep(args, registry).await } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Keep the first row", example: "echo [1 2 3] | keep", result: Some(vec![UntaggedValue::int(1).into()]), }, Example { description: "Keep the first four rows", example: "echo [1 2 3 4 5] | keep 4", result: Some(vec![ UntaggedValue::int(1).into(), UntaggedValue::int(2).into(), UntaggedValue::int(3).into(), UntaggedValue::int(4).into(), ]), }, ] } } async fn keep(args: CommandArgs, registry: &CommandRegistry) -> Result<OutputStream, ShellError> { let registry = registry.clone(); let (KeepArgs { rows }, input) = args.process(&registry).await?; let rows_desired = if let Some(quantity) = rows { *quantity } else { 1 }; Ok(input.take(rows_desired).to_output_stream()) } #[cfg(test)] mod tests { use super::Keep; #[test] fn examples_work_as_expected() { use crate::examples::test as test_examples; test_examples(Keep {}) } }
24.870588
98
0.543519
1e57df5197f9d8673f6a4d76d9a538d042b42d14
52
pub mod camera; pub mod buffer; pub mod resolution;
13
19
0.769231
50f4dc95ba7bbdbd01c1475ca347e5d25313455a
666
// Copyright 2018, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT> #![allow(deprecated)] extern crate cairo; extern crate cairo_sys; extern crate glib_sys; extern crate gobject_sys; extern crate pango; extern crate pango_cairo_sys; extern crate pango_sys; #[macro_use] extern crate glib; extern crate bitflags; extern crate libc; #[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] #[allow(unused_imports)] mod auto; pub use auto::functions::*; pub use auto::*; pub mod prelude; mod font_map;
23.785714
96
0.767267
8a7707562e8eea4b9b75702ce3edb546e32c9883
7,486
#[doc = r" Value read from the register"] pub struct R { bits: u8, } #[doc = r" Value to write to the register"] pub struct W { bits: u8, } impl super::INTENSET { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits }; let mut w = W { bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct PRECR { bits: bool, } impl PRECR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AMATCHR { bits: bool, } impl AMATCHR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct DRDYR { bits: bool, } impl DRDYR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct ERRORR { bits: bool, } impl ERRORR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _PRECW<'a> { w: &'a mut W, } impl<'a> _PRECW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u8) << OFFSET); self.w.bits |= ((value & MASK) as u8) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AMATCHW<'a> { w: &'a mut W, } impl<'a> _AMATCHW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u8) << OFFSET); self.w.bits |= ((value & MASK) as u8) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DRDYW<'a> { w: &'a mut W, } impl<'a> _DRDYW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u8) << OFFSET); self.w.bits |= ((value & MASK) as u8) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ERRORW<'a> { w: &'a mut W, } impl<'a> _ERRORW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u8) << OFFSET); self.w.bits |= ((value & MASK) as u8) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } #[doc = "Bit 0 - Stop Received Interrupt Enable"] #[inline] pub fn prec(&self) -> PRECR { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u8) != 0 }; PRECR { bits } } #[doc = "Bit 1 - Address Match Interrupt Enable"] #[inline] pub fn amatch(&self) -> AMATCHR { let bits = { const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u8) != 0 }; AMATCHR { bits } } #[doc = "Bit 2 - Data Interrupt Enable"] #[inline] pub fn drdy(&self) -> DRDYR { let bits = { const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u8) != 0 }; DRDYR { bits } } #[doc = "Bit 7 - Combined Error Interrupt Enable"] #[inline] pub fn error(&self) -> ERRORR { let bits = { const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u8) != 0 }; ERRORR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u8) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Stop Received Interrupt Enable"] #[inline] pub fn prec(&mut self) -> _PRECW { _PRECW { w: self } } #[doc = "Bit 1 - Address Match Interrupt Enable"] #[inline] pub fn amatch(&mut self) -> _AMATCHW { _AMATCHW { w: self } } #[doc = "Bit 2 - Data Interrupt Enable"] #[inline] pub fn drdy(&mut self) -> _DRDYW { _DRDYW { w: self } } #[doc = "Bit 7 - Combined Error Interrupt Enable"] #[inline] pub fn error(&mut self) -> _ERRORW { _ERRORW { w: self } } }
24.870432
58
0.493855
21ae62cd5ab4951c43d2f25c60418807084b3424
1,498
/* * Twilio - Api * * This is the public Twilio REST API. * * The version of the OpenAPI document: 1.25.0 * Contact: support@twilio.com * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] pub struct ApiV2010AccountNewKey { /// The RFC 2822 date and time in GMT that the resource was created #[serde(rename = "date_created", skip_serializing_if = "Option::is_none")] pub date_created: Option<String>, /// The RFC 2822 date and time in GMT that the resource was last updated #[serde(rename = "date_updated", skip_serializing_if = "Option::is_none")] pub date_updated: Option<String>, /// The string that you assigned to describe the resource #[serde(rename = "friendly_name", skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, /// The secret your application uses to sign Access Tokens and to authenticate to the REST API. #[serde(rename = "secret", skip_serializing_if = "Option::is_none")] pub secret: Option<String>, /// The unique string that identifies the resource #[serde(rename = "sid", skip_serializing_if = "Option::is_none")] pub sid: Option<String>, } impl ApiV2010AccountNewKey { pub fn new() -> ApiV2010AccountNewKey { ApiV2010AccountNewKey { date_created: None, date_updated: None, friendly_name: None, secret: None, sid: None, } } }
32.565217
99
0.668224
6169408da10c858eaa41f9a4e2cbc8ee9eb7d14f
27,636
use std::io::{Cursor}; use std::error::Error; use std::{i16, i32, i64}; use std::sync::Arc; use std::time::UNIX_EPOCH; use bytes::{Bytes, Buf}; use edgedb_protocol::codec::{build_codec, build_input_codec}; use edgedb_protocol::codec::{Codec, ObjectShape}; use edgedb_protocol::value::{Value, Duration}; use edgedb_protocol::value::{LocalDatetime, LocalDate, LocalTime}; use edgedb_protocol::descriptors::{Descriptor, TypePos}; use edgedb_protocol::descriptors::BaseScalarTypeDescriptor; use edgedb_protocol::descriptors::{ObjectShapeDescriptor, ShapeElement}; use edgedb_protocol::descriptors::{SetDescriptor}; use edgedb_protocol::descriptors::{ScalarTypeDescriptor}; use edgedb_protocol::descriptors::{TupleTypeDescriptor}; use edgedb_protocol::descriptors::{NamedTupleTypeDescriptor, TupleElement}; use edgedb_protocol::descriptors::ArrayTypeDescriptor; use edgedb_protocol::descriptors::EnumerationTypeDescriptor; mod base; macro_rules! encoding_eq { ($codec: expr, $bytes: expr, $value: expr) => { let orig_value = $value; let value = decode($codec, $bytes)?; assert_eq!(value, orig_value); let mut bytes = bytes::BytesMut::new(); $codec.encode(&mut bytes, &orig_value)?; println!("Serialized bytes {:?}", bytes); let bytes = bytes.freeze(); assert_eq!(&bytes[..], $bytes); } } fn decode(codec: &Arc<dyn Codec>, data: &[u8]) -> Result<Value, Box<dyn Error>> { let bytes = Bytes::copy_from_slice(data); let mut cur = Cursor::new(bytes); let res = codec.decode(&mut cur)?; assert!(cur.bytes() == b""); Ok(res) } #[test] fn int16() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000103".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0", Value::Int16(0)); encoding_eq!(&codec, b"\x01\x05", Value::Int16(0x105)); encoding_eq!(&codec, b"\x7F\xFF", Value::Int16(i16::MAX)); encoding_eq!(&codec, b"\x80\x00", Value::Int16(i16::MIN)); encoding_eq!(&codec, b"\xFF\xFF", Value::Int16(-1)); Ok(()) } #[test] fn int32() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000104".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\0\0", Value::Int32(0)); encoding_eq!(&codec, b"\0\0\x01\x05", Value::Int32(0x105)); encoding_eq!(&codec, b"\x7F\xFF\xFF\xFF", Value::Int32(i32::MAX)); encoding_eq!(&codec, b"\x80\x00\x00\x00", Value::Int32(i32::MIN)); encoding_eq!(&codec, b"\xFF\xFF\xFF\xFF", Value::Int32(-1)); Ok(()) } #[test] fn int64() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000105".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\0\0\0\0\0\0", Value::Int64(0)); encoding_eq!(&codec, b"\0\0\0\0\0\0\x01\x05", Value::Int64(0x105)); encoding_eq!(&codec, b"\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF", Value::Int64(i64::MAX)); encoding_eq!(&codec, b"\x80\x00\x00\x00\x00\x00\x00\x00", Value::Int64(i64::MIN)); encoding_eq!(&codec, b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", Value::Int64(-1)); Ok(()) } #[test] fn float32() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000106".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\0\0", Value::Float32(0.0)); encoding_eq!(&codec, b"\x80\0\0\0", Value::Float32(-0.0)); encoding_eq!(&codec, b"?\x80\0\0", Value::Float32(1.0)); encoding_eq!(&codec, b"\xbf\x8f\xbew", Value::Float32(-1.123)); match decode(&codec, b"\x7f\xc0\0\0")? { Value::Float32(val) => assert!(val.is_nan()), _ => panic!("could not parse NaN") }; match decode(&codec, b"\x7f\x80\0\0")? { Value::Float32(val) => { assert!(val.is_infinite()); assert!(val.is_sign_positive()) }, _ => panic!("could not parse +inf") }; match decode(&codec, b"\xff\x80\0\0")? { Value::Float32(val) => { assert!(val.is_infinite()); assert!(val.is_sign_negative()) } _ => panic!("could not parse -inf") }; Ok(()) } #[test] fn float64() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000107".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\0\0\0\0\0\0", Value::Float64(0.0)); encoding_eq!(&codec, b"\x80\0\0\0\0\0\0\0", Value::Float64(-0.0)); encoding_eq!(&codec, b"?\xf0\0\0\0\0\0\0", Value::Float64(1.0)); encoding_eq!(&codec, b"T\xb2I\xad%\x94\xc3}", Value::Float64(1e100)); match decode(&codec, b"\x7f\xf8\0\0\0\0\0\0")? { Value::Float64(val) => assert!(val.is_nan()), _ => panic!("could not parse NaN") }; match decode(&codec, b"\x7f\xf0\0\0\0\0\0\0")? { Value::Float64(val) => { assert!(val.is_infinite()); assert!(val.is_sign_positive()) } _ => panic!("could not parse +inf") }; match decode(&codec, b"\xff\xf0\0\0\0\0\0\0")? { Value::Float64(val) => { assert!(val.is_infinite()); assert!(val.is_sign_negative()) }, _ => panic!("could not parse -inf") }; Ok(()) } #[test] fn str() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }) ] )?; encoding_eq!(&codec, b"hello", Value::Str(String::from("hello"))); encoding_eq!(&codec, b"", Value::Str(String::from(""))); encoding_eq!(&codec, b"\xd0\xbf\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82", Value::Str(String::from("привет"))); Ok(()) } #[test] fn bytes() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000102".parse()?, }) ] )?; encoding_eq!(&codec, b"hello", Value::Bytes(b"hello".to_vec())); encoding_eq!(&codec, b"", Value::Bytes(b"".to_vec())); encoding_eq!(&codec, b"\x00\x01\x02\x03\x81", Value::Bytes(b"\x00\x01\x02\x03\x81".to_vec())); Ok(()) } #[test] fn uuid() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000100".parse()?, }) ] )?; encoding_eq!(&codec, b"I(\xcc\x1e e\x11\xea\x88H{S\xa6\xad\xb3\x83", Value::Uuid("4928cc1e-2065-11ea-8848-7b53a6adb383".parse()?)); Ok(()) } #[test] fn duration() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010e".parse()?, }) ] )?; // SELECT <datetime>'2019-11-29T00:00:00Z'-<datetime>'2000-01-01T00:00:00Z' encoding_eq!(&codec, b"\0\x02;o\xad\xff\0\0\0\0\0\0\0\0\0\0", Value::Duration(Duration::from_micros(7272*86400*1000_000))); // SELECT <datetime>'2019-11-29T00:00:00Z'-<datetime>'2019-11-28T01:00:00Z' encoding_eq!(&codec, b"\0\0\0\x13GC\xbc\0\0\0\0\0\0\0\0\0", Value::Duration(Duration::from_micros(82800*1000_000))); encoding_eq!(&codec, b"\xff\xff\xff\xff\xd3,\xba\xe0\0\0\0\0\0\0\0\0", Value::Duration(Duration::from_micros(-752043296))); assert_eq!( decode(&codec, b"\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0") .unwrap_err().to_string(), "non-zero reserved bytes received in data"); Ok(()) } #[test] fn null_codec() -> Result<(), Box<dyn Error>> { let codec = build_codec(None, &[])?; encoding_eq!(&codec, b"", Value::Nothing); Ok(()) } #[test] fn object_codec() -> Result<(), Box<dyn Error>> { let elements = vec![ ShapeElement { flag_implicit: true, flag_link_property: false, flag_link: false, name: String::from("__tid__"), type_pos: TypePos(0), }, ShapeElement { flag_implicit: false, flag_link_property: false, flag_link: false, name: String::from("id"), type_pos: TypePos(0), }, ]; let shape = elements.as_slice().into(); let codec = build_codec(Some(TypePos(1)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000100".parse()?, }), Descriptor::ObjectShape(ObjectShapeDescriptor { id: "5d5ebe41-eac8-eab7-a24e-cc3a8cd2766c".parse()?, elements, }), ] )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!( b"\0\0\0\x02\0\0\x00\x00\0\0\0\x100Wd\0 d" b"\x11\xea\x98\xc53\xc5\xcf\xb4r^\0\0\x00" b"\x00\0\0\0\x10I(\xcc\x1e e\x11\xea\x88H{S" b"\xa6\xad\xb3\x83"), Value::Object { shape, fields: vec![ Some(Value::Uuid("30576400-2064-11ea-98c5-33c5cfb4725e" .parse()?)), Some(Value::Uuid("4928cc1e-2065-11ea-8848-7b53a6adb383" .parse()?)), ] }); Ok(()) } #[test] fn set_codec() -> Result<(), Box<dyn Error>> { let inner_elements = vec![ ShapeElement { flag_implicit: true, flag_link_property: false, flag_link: false, name: "__tid__".into(), type_pos: TypePos(0), }, ShapeElement { flag_implicit: true, flag_link_property: false, flag_link: false, name: "id".into(), type_pos: TypePos(0), }, ShapeElement { flag_implicit: false, flag_link_property: false, flag_link: false, name: "first_name".into(), type_pos: TypePos(1), }, ]; let outer_elements = vec![ ShapeElement { flag_implicit: true, flag_link_property: false, flag_link: false, name: "__tid__".into(), type_pos: TypePos(0), }, ShapeElement { flag_implicit: true, flag_link_property: false, flag_link: false, name: "id".into(), type_pos: TypePos(0), }, ShapeElement { flag_implicit: false, flag_link_property: false, flag_link: false, name: "first_name".into(), type_pos: TypePos(1), }, ShapeElement { flag_implicit: false, flag_link_property: false, flag_link: true, name: "collegues".into(), type_pos: TypePos(3), }, ]; let inner_shape = ObjectShape::from(&inner_elements[..]); let outer_shape = ObjectShape::from(&outer_elements[..]); let codec = build_codec(Some(TypePos(4)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000100".parse()?, }), Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }), Descriptor::ObjectShape(ObjectShapeDescriptor { id: "8faa7193-48c6-4263-18d3-1a127652569b".parse()?, elements: inner_elements, }), Descriptor::Set(SetDescriptor { id: "afbb389d-aa73-2aae-9310-84a9163cb5ed".parse()?, type_pos: TypePos(2), }), Descriptor::ObjectShape(ObjectShapeDescriptor { id: "9740ff04-324e-08a4-4ac7-2192d72c6967".parse()?, elements: outer_elements, }), ] )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!( b"\0\0\0\x04\0\0\x00\x00\0\0\0\x10\x0c\xf06\xbd " b"\xbd\x11\xea\xa4\xeb\xe9T\xb4(\x13\x91\0\0\x00\x00\0\0\0\x10" b"[\xe3\x9c( \xbd\x11\xea\xaa\xb9g4\x82*\xf1\xc9\0\0\0\x00\0\0\0" b"\x04Ryan\0\0\x00\x00\0\0\0\x9f\0\0\0\x01\0\0\0\0\0\0\x00\x00\0" b"\0\0\x02\0\0\0\x01\0\0\0?\0\0\0\x03\0\0\x00\x00\0\0\0\x10\x0c\xf0" b"6\xbd \xbd\x11\xea\xa4\xeb\xe9T\xb4(\x13\x91\0\0\x00\x00\0\0\0\x10" b"[\xe3\x9e\x80 \xbd\x11\xea\xaa\xb9\x17]\xbf\x18G\xe5\0\0\0\x00\0\0" b"\0\x03Ana\0\0\0D\0\0\0\x03\0\0\x00\x00\0\0\0\x10\x0c\xf06\xbd " b"\xbd\x11\xea\xa4\xeb\xe9T\xb4(\x13\x91\0\0\x00\x00\0\0\0\x10[" b"\xe3\x97\x14 \xbd\x11\xea\xaa\xb9?7\xe7 \xb8T\0\0\0\x00\0\0\0" b"\x08Harrison" ), Value::Object { shape: outer_shape.clone(), fields: vec![ Some(Value::Uuid("0cf036bd-20bd-11ea-a4eb-e954b4281391".parse()?)), Some(Value::Uuid("5be39c28-20bd-11ea-aab9-6734822af1c9".parse()?)), Some(Value::Str(String::from("Ryan"))), Some(Value::Set(vec![ Value::Object { shape: inner_shape.clone(), fields: vec![ Some(Value::Uuid("0cf036bd-20bd-11ea-a4eb-e954b4281391" .parse()?)), Some(Value::Uuid("5be39e80-20bd-11ea-aab9-175dbf1847e5" .parse()?)), Some(Value::Str(String::from("Ana"))), ]}, Value::Object { shape: inner_shape, fields: vec![ Some(Value::Uuid("0cf036bd-20bd-11ea-a4eb-e954b4281391" .parse()?)), Some(Value::Uuid("5be39714-20bd-11ea-aab9-3f37e720b854" .parse()?)), Some(Value::Str(String::from("Harrison"))), ] }])), ] }); encoding_eq!(&codec, bconcat!(b"\0\0\0\x04\0\0\x00\x00\0\0\0\x10\x0c\xf06" b"\xbd \xbd\x11\xea\xa4\xeb\xe9T\xb4(\x13\x91\0\0\x00\x00\0\0\0\x10" b"[\xe3\x9c( \xbd\x11\xea\xaa\xb9g4\x82*\xf1\xc9\0\0\0\x00" b"\0\0\0\x04Ryan\0\0\x00\x00\0\0\0\x0c\0\0\0\0\0\0\0\0\0\0\x00\x00" ), Value::Object { shape: outer_shape.clone(), fields: vec![ Some(Value::Uuid("0cf036bd-20bd-11ea-a4eb-e954b4281391".parse()?)), Some(Value::Uuid("5be39c28-20bd-11ea-aab9-6734822af1c9".parse()?)), Some(Value::Str(String::from("Ryan"))), Some(Value::Set(vec![])), ] }); encoding_eq!(&codec, bconcat!(b"\0\0\0\x04\0\0\x00\x00\0\0\0\x10\x0c\xf06" b"\xbd \xbd\x11\xea\xa4\xeb\xe9T\xb4(\x13\x91\0\0\x00\x00\0\0\0\x10" b"[\xe3\x9c( \xbd\x11\xea\xaa\xb9g4\x82*\xf1\xc9\0\0\0\x00" b"\xFF\xFF\xFF\xFF\0\0\x00\x00\0\0\0\x0c\0\0\0\0\0\0\0\0\0\0\x00\x00" ), Value::Object { shape: outer_shape, fields: vec![ Some(Value::Uuid("0cf036bd-20bd-11ea-a4eb-e954b4281391".parse()?)), Some(Value::Uuid("5be39c28-20bd-11ea-aab9-6734822af1c9".parse()?)), None, Some(Value::Set(vec![])), ] }); Ok(()) } #[test] #[cfg(feature="num-bigint")] fn bigint() -> Result<(), Box<dyn Error>> { use num_bigint::BigInt; use std::convert::TryInto; use std::str::FromStr; let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000110".parse()?, }, ), ] )?; encoding_eq!(&codec, b"\0\x01\0\0\0\0\0\0\0*", Value::BigInt(42.into())); encoding_eq!(&codec, b"\0\x01\0\x01\0\0\0\0\0\x03", Value::BigInt((30000).into())); encoding_eq!(&codec, b"\0\x02\0\x01\0\0\0\0\0\x03\0\x01", Value::BigInt((30001).into())); encoding_eq!(&codec, b"\0\x02\0\x01@\0\0\0\0\x01\x13\x88", Value::BigInt((-15000).into())); encoding_eq!(&codec, b"\0\x01\0\x05\0\0\0\0\0\n", Value::BigInt( BigInt::from_str("1000000000000000000000")?.try_into()?)); Ok(()) } #[test] #[cfg(feature="bigdecimal")] fn decimal() -> Result<(), Box<dyn Error>> { use bigdecimal::BigDecimal; use std::convert::TryInto; use std::str::FromStr; let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000108".parse()?, }, ), ] )?; encoding_eq!(&codec, b"\0\x01\0\0\0\0\0\x02\0*", Value::Decimal(BigDecimal::from_str("42.00")?.try_into()?)); encoding_eq!(&codec, b"\0\x05\0\x01\0\0\0\t\x04\xd2\x16.#4\r\x80\x1bX", Value::Decimal( BigDecimal::from_str("12345678.901234567")?.try_into()?)); encoding_eq!(&codec, b"\0\x01\0\x19\0\0\0\0\0\x01", Value::Decimal(BigDecimal::from_str("1e100")?.try_into()?)); encoding_eq!(&codec, b"\0\x06\0\x0b@\0\0\0\0\x07\x01P\x1cB\x08\x9e$!\0\xc8", Value::Decimal(BigDecimal::from_str( "-703367234220692490200000000000000000000000000")?.try_into()?)); encoding_eq!(&codec, b"\0\x06\0\x0b@\0\0\0\0\x07\x01P\x1cB\x08\x9e$!\0\xc8", Value::Decimal(BigDecimal::from_str( "-7033672342206924902e26")?.try_into()?)); Ok(()) } #[test] fn bool() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000109".parse()?, }, ), ] )?; encoding_eq!(&codec, b"\x01", Value::Bool(true)); encoding_eq!(&codec, b"\x00", Value::Bool(false)); Ok(()) } #[test] fn datetime() -> Result<(), Box<dyn Error>> { use std::time::Duration; let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010a".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\x02=^\x1bTc\xe7", Value::Datetime( UNIX_EPOCH + Duration::new(1577109148, 156903000))); Ok(()) } #[test] fn local_datetime() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010b".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\x02=^@\xf9\x1f\xfd", Value::LocalDatetime(LocalDatetime::from_micros(630424979709949))); Ok(()) } #[test] fn local_date() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010c".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\x1c\x80", Value::LocalDate(LocalDate::from_days(7296))); Ok(()) } #[test] fn local_time() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010d".parse()?, }) ] )?; encoding_eq!(&codec, b"\0\0\0\x0b\xd7\x84\0\x01", Value::LocalTime(LocalTime::from_micros(50860392449))); Ok(()) } #[test] fn json() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar(BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-00000000010f".parse()?, }) ] )?; encoding_eq!(&codec, b"\x01\"txt\"", Value::Json(String::from(r#""txt""#))); Ok(()) } #[test] fn custom_scalar() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }, ), Descriptor::Scalar( ScalarTypeDescriptor { id: "234dc787-2646-11ea-bebd-010d530c06ca".parse()?, base_type_pos: TypePos(0), }, ), ] )?; encoding_eq!(&codec, b"xx", Value::Str(String::from("xx"))); Ok(()) } #[test] fn tuple() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(2)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000105".parse()?, }, ), Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }, ), Descriptor::Tuple( TupleTypeDescriptor { id: "6c87a50a-fce2-dcae-6872-8c4c9c4d1e7c".parse()?, element_types: vec![TypePos(0), TypePos(1)], }, ), ], )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!(b"\0\0\0\x02\0\0\0\x00\0\0\0" b"\x08\0\0\0\0\0\0\0\x01\0\0\0\x00\0\0\0\x03str"), Value::Tuple(vec![ Value::Int64(1), Value::Str("str".into()), ])); Ok(()) } #[test] fn input_tuple() -> Result<(), Box<dyn Error>> { let codec = build_input_codec(Some(TypePos(1)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }, ), Descriptor::Tuple( TupleTypeDescriptor { id: "6c87a50a-fce2-dcae-6872-8c4c9c4d1e7c".parse()?, element_types: vec![TypePos(0)], }, ), ], )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!(b"\0\0\0\x01\0\0\0\x04test"), Value::Tuple(vec![ Value::Str("test".into()), ])); Ok(()) } #[test] fn named_tuple() -> Result<(), Box<dyn Error>> { let elements = vec![ TupleElement { name: "a".into(), type_pos: TypePos(0), }, TupleElement { name: "b".into(), type_pos: TypePos(1), }, ]; let shape = elements.as_slice().into(); let codec = build_codec(Some(TypePos(2)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000105".parse()?, }, ), Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }, ), Descriptor::NamedTuple( NamedTupleTypeDescriptor { id: "101385c1-d6d5-ec67-eec4-b2b88be8a197".parse()?, elements, }, ), ], )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!(b"\0\0\0\x02\0\0\0\x00\0\0\0" b"\x08\0\0\0\0\0\0\0\x01\0\0\0\x00\0\0\0\x01x"), Value::NamedTuple { shape, fields: vec![ Value::Int64(1), Value::Str("x".into()), ], }); Ok(()) } #[test] fn input_named_tuple() -> Result<(), Box<dyn Error>> { let elements = vec![ TupleElement { name: "a".into(), type_pos: TypePos(0), }, TupleElement { name: "b".into(), type_pos: TypePos(1), }, ]; let shape = elements.as_slice().into(); let codec = build_input_codec(Some(TypePos(2)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000105".parse()?, }, ), Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000101".parse()?, }, ), Descriptor::NamedTuple( NamedTupleTypeDescriptor { id: "101385c1-d6d5-ec67-eec4-b2b88be8a197".parse()?, elements, }, ), ], )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!(b"\0\0\0\x02\0\0\0" b"\x08\0\0\0\0\0\0\0\x01\0\0\0\x01x"), Value::NamedTuple { shape, fields: vec![ Value::Int64(1), Value::Str("x".into()), ], }); Ok(()) } #[test] fn array() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(1)), &[ Descriptor::BaseScalar( BaseScalarTypeDescriptor { id: "00000000-0000-0000-0000-000000000105".parse()?, }, ), Descriptor::Array( ArrayTypeDescriptor { id: "b0105467-a177-635f-e207-0a21867f9be0".parse()?, type_pos: TypePos(0), dimensions: vec![None], }, ), ], )?; // TODO(tailhook) test with non-zero reserved bytes encoding_eq!(&codec, bconcat!(b"\0\0\0\x01\0\0\0\0\0\0\0\x00\0\0\0\x03" b"\0\0\0\x01\0\0\0\x08\0\0\0\0\0\0\0\x01" b"\0\0\0\x08\0\0\0\0\0\0\0\x02" b"\0\0\0\x08\0\0\0\0\0\0\0\x03"), Value::Array(vec![ Value::Int64(1), Value::Int64(2), Value::Int64(3), ])); encoding_eq!(&codec, bconcat!(b"\0\0\0\0\0\0\0\0\0\0\0\x00"), Value::Array(vec![])); Ok(()) } #[test] fn enums() -> Result<(), Box<dyn Error>> { let codec = build_codec(Some(TypePos(0)), &[ Descriptor::Enumeration( EnumerationTypeDescriptor { id: "ac5dc6a4-2656-11ea-aa6d-233f91e80ff6".parse()?, members: vec![ "x".into(), "y".into(), ], }, ), ] )?; encoding_eq!(&codec, bconcat!(b"x"), Value::Enum("x".into())); Ok(()) }
33.017921
79
0.516826
d957755ed841637f69e965bfa887ce8d0c9c9393
10,396
#![deny(unused)] #[macro_use] extern crate log; pub mod constants; #[macro_use] mod utils; mod config; mod logic; mod processor; pub use config::Config; use std::{fmt, io, net, thread}; use std::collections::HashMap; use std::sync::{atomic, mpsc, Arc, Mutex}; use bitcoin::network::message::NetworkMessage; use bitcoin::network::message_blockdata::Inventory; use bitcoin::network::message_network::VersionMessage; use processor::Ctrl; #[derive(Debug)] pub enum Error { /// No peer with given ID known. He must have been disconnected. PeerDisconnected(PeerId), /// We have already shut down. Shutdown, /// An I/O error. Io(io::Error), /// Can't reach the peer. PeerUnreachable(io::Error), /// The handshake with the peer is not finished. PeerHandshakePending, } impl From<io::Error> for Error { fn from(e: io::Error) -> Error { Error::Io(e) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::PeerDisconnected(id) => write!(f, "peer disconnected: {}", id), Error::Shutdown => write!(f, "P2P is already shut down"), Error::Io(ref e) => write!(f, "I/O error: {}", e), Error::PeerUnreachable(ref e) => write!(f, "can't reach the peer: {}", e), Error::PeerHandshakePending => write!(f, "peer didn't finish the handshake yet"), } } } impl std::error::Error for Error {} /// A peer identifier. /// /// Can never be 0. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PeerId(pub(crate) usize); impl fmt::Display for PeerId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// A signal received from a peer. #[derive(Debug, Clone, PartialEq, Eq)] pub enum P2PEvent { /// The new peer connected, performed the handshake and can be /// communicated with. Connected(PeerId), /// The peer disconnected. Disconnected(PeerId), /// The peer sent a message. Message(PeerId, NetworkMessage), } /// Whether a peer was inbound or outbound. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum PeerType { Inbound, Outbound, } /// The internal state for a peer. #[derive(Debug)] pub struct PeerState { addr: net::SocketAddr, local_addr: net::SocketAddr, peer_type: PeerType, /// Whether this peer prefers to get sent headers first. send_headers: bool, /// Handshake state for this peer. handshake: logic::handshake::Handshake, /// Ping statistics to disconnect stale peers. ping_stats: logic::pingpong::PingStats, /// Our view of the peer's inventory. inventory: logic::inventory::PeerInventory, } /// Used to add new peers. pub trait IntoMioTcpStream { fn into_mio_tcp_stream(self) -> mio::net::TcpStream; } impl IntoMioTcpStream for mio::net::TcpStream { fn into_mio_tcp_stream(self) -> mio::net::TcpStream { self } } impl IntoMioTcpStream for std::net::TcpStream { fn into_mio_tcp_stream(self) -> mio::net::TcpStream { mio::net::TcpStream::from_std(self) } } /// The main struct coordinating all P2P activity. pub struct P2P { config: Config, /// Tally of the next peer ID to use. next_peer_id: atomic::AtomicUsize, /// Some consensus-level state for each peer. //TODO(stevenroose) consider RwLock? peers: Mutex<HashMap<PeerId, PeerState>>, /// Handle to send control messages to the processor thread. ctrl_tx: Mutex<mpsc::Sender<Ctrl>>, /// Waker to wake up the processor thread. waker: mio::Waker, /// The incoming event channel. /// It's an Option so that the user can take it away. event_rx: Mutex<Option<mpsc::Receiver<P2PEvent>>>, /// The current block height of our client to advertise to peers. block_height: atomic::AtomicU32, /// A global signal for our thread to shut down. quit: atomic::AtomicBool, } impl P2P { /// Instantiate a P2P coordinator. pub fn new(config: Config) -> Result<Arc<P2P>, Error> { let poll = mio::Poll::new()?; let waker = mio::Waker::new(poll.registry(), processor::WAKE_TOKEN)?; // Create the control message channel. let (ctrl_tx, ctrl_rx) = mpsc::channel(); // Create the incoming message channel. let (event_tx, event_rx) = mpsc::channel(); //TODO(stevenroose) make sync let p2p = Arc::new(P2P { config: config, next_peer_id: atomic::AtomicUsize::new(1), peers: Mutex::new(HashMap::new()), ctrl_tx: Mutex::new(ctrl_tx), waker: waker, event_rx: Mutex::new(Some(event_rx)), block_height: atomic::AtomicU32::new(0), quit: atomic::AtomicBool::new(false), }); let p2p_cloned = p2p.clone(); thread::spawn(|| processor::processor(p2p_cloned, poll, ctrl_rx, event_tx)); Ok(p2p) } /// Get the configuration of the P2P instance. pub fn config(&self) -> &Config { &self.config } /// Set the latest block height to advertise to peers. pub fn set_height(&self, block_height: u32) { self.block_height.store(block_height, atomic::Ordering::Relaxed); } /// Assign the next [PeerId]. fn next_peer_id(&self) -> PeerId { PeerId(self.next_peer_id.fetch_add(1, atomic::Ordering::Relaxed)) } /// Ensure we are still running, returning an error otherwise. fn ensure_up(&self) -> Result<(), Error> { if self.quit.load(atomic::Ordering::Relaxed) { Err(Error::Shutdown) } else { Ok(()) } } /// Check if the peer is known, returning an error if it doesn't. fn ensure_known_peer(&self, peer: PeerId) -> Result<(), Error> { if !self.peers.lock().unwrap().contains_key(&peer) { Err(Error::PeerDisconnected(peer)) } else { Ok(()) } } /// Shut down the p2p operation. Any subsequent call will be a no-op. pub fn shutdown(&self) { self.quit.store(true, atomic::Ordering::Relaxed); let _ = self.waker.wake(); //TODO(stevenroose) log error? } /// Add a new peer from an opened TCP stream. pub fn add_peer<S: IntoMioTcpStream>( &self, conn: S, peer_type: PeerType, ) -> Result<PeerId, Error> { self.ensure_up()?; let conn = conn.into_mio_tcp_stream(); if let Some(err) = conn.take_error()? { return Err(Error::PeerUnreachable(err)); } let id = self.next_peer_id(); let mut state = PeerState { addr: conn.peer_addr()?, local_addr: conn.local_addr()?, peer_type: peer_type, send_headers: false, handshake: Default::default(), ping_stats: Default::default(), inventory: logic::inventory::PeerInventory::new(&self.config), }; let start_height = self.block_height.load(atomic::Ordering::Relaxed); let version = logic::handshake::make_version_msg(&self.config, id, &mut state, start_height); state.handshake.version_sent = true; assert!(self.peers.lock().unwrap().insert(id, state).is_none()); // If this errors, it means that shutdown was called between our check // of ensure_up and now. let _ = self.ctrl_tx.lock().unwrap().send(Ctrl::Connect(id, conn)); // Then send the version message to start the handshake. self.send_control(Ctrl::SendMsg(id, NetworkMessage::Version(version))); Ok(id) } /// Connect to the given peer. pub fn connect_peer(&self, addr: net::SocketAddr) -> Result<PeerId, Error> { let conn = mio::net::TcpStream::connect(addr).map_err(Error::PeerUnreachable)?; self.add_peer(conn, PeerType::Outbound) } /// Disconnect the given peer. pub fn disconnect_peer(&self, peer: PeerId) -> Result<(), Error> { self.ensure_up()?; self.ensure_known_peer(peer)?; let _ = self.ctrl_tx.lock().unwrap().send(Ctrl::Disconnect(peer)); Ok(()) } /// Disconnect the peer and don't reconnect to it. pub fn ban_peer(&self, peer: PeerId) -> Result<(), Error> { self.disconnect_peer(peer)?; //TODO(stevenroose) keep an LRU list of banned peers. Ok(()) } /// The number of connected peers. pub fn nb_connected_peers(&self) -> usize { self.peers.lock().unwrap().len() } /// Get the peer's version message. pub fn peer_version(&self, peer: PeerId) -> Result<Option<VersionMessage>, Error> { match self.peers.lock().unwrap().get(&peer) { Some(s) => Ok(s.handshake.version.clone()), None => Err(Error::PeerDisconnected(peer)), } } /// Take the incoming event channel out of the P2P struct. /// This can be done only once. pub fn take_event_channel(&self) -> Option<mpsc::Receiver<P2PEvent>> { self.event_rx.lock().unwrap().take() } /// Queue a new control message to the processor thread. fn send_control(&self, ctrl: Ctrl) { self.ctrl_tx.lock().unwrap().send(ctrl).expect("processor quit"); self.waker.wake().expect("processor waker error"); } /// Send a message to the given peer. pub fn send_message(&self, peer: PeerId, msg: NetworkMessage) -> Result<(), Error> { self.ensure_up()?; let peers_lock = self.peers.lock().unwrap(); let state = match peers_lock.get(&peer) { Some(s) => s, None => return Err(Error::PeerDisconnected(peer)), }; if !state.handshake.finished() { return Err(Error::PeerHandshakePending); } self.send_control(Ctrl::SendMsg(peer, msg)); Ok(()) } /// Broadcast the message to all peers. pub fn broadcast_message(&self, msg: NetworkMessage) -> Result<(), Error> { self.ensure_up()?; self.send_control(Ctrl::BroadcastMsg(msg)); Ok(()) } /// Add an inventory item to send to the peer. /// They are trickled with randomized intervals. /// /// Blocks are not queued up, but sent immediatelly. /// Don't use this to send `inv` messages directly, f.e. when replying to `mempool`. /// Just use `send_message` for that. pub fn queue_inventory(&self, peer: PeerId, inv: Inventory) -> Result<(), Error> { self.ensure_up()?; let mut peers_lock = self.peers.lock().unwrap(); let state = match peers_lock.get_mut(&peer) { Some(s) => s, None => return Err(Error::PeerDisconnected(peer)), }; if !state.handshake.finished() { return Err(Error::PeerHandshakePending); } if let Some(msg) = logic::inventory::queue_inventory(state, inv) { self.send_control(Ctrl::SendMsg(peer, msg)); } Ok(()) } /// Broadcast an inventory item to all peers. /// They are tricled with randomized intervals. /// /// Blocks are not queued up, but sent immediatelly. pub fn broadcast_inventory(&self, inv: Inventory) -> Result<(), Error> { self.ensure_up()?; //TODO(stevenroose) consider doing this in the processor instead for (peer, state) in self.peers.lock().unwrap().iter_mut() { if let Some(msg) = logic::inventory::queue_inventory(state, inv) { self.send_control(Ctrl::SendMsg(*peer, msg)); } } Ok(()) } }
28.021563
85
0.680743
75aa75a2f02315fa9288523f9164a28ef0dd088d
9,773
use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use runiversal::common::{ btree_multimap_insert, mk_cid, mk_sid, mk_t, BasicIOCtx, CoreIOCtx, GeneralTraceMessage, GossipData, SlaveIOCtx, SlaveTraceMessage, Timestamp, }; use runiversal::coord::{CoordConfig, CoordContext, CoordForwardMsg, CoordState}; use runiversal::model::common::{ CoordGroupId, EndpointId, Gen, LeadershipId, PaxosGroupId, PaxosGroupIdTrait, SlaveGroupId, TabletGroupId, }; use runiversal::model::message as msg; use runiversal::multiversion_map::MVM; use runiversal::paxos::PaxosConfig; use runiversal::slave::{ FullSlaveInput, SlaveBackMessage, SlaveConfig, SlaveContext, SlaveState, SlaveTimerInput, }; use runiversal::tablet::{TabletContext, TabletCreateHelper, TabletForwardMsg, TabletState}; use runiversal::test_utils::mk_seed; use std::collections::BTreeMap; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{SystemTime, UNIX_EPOCH}; // ----------------------------------------------------------------------------------------------- // ProdSlaveIOCtx // ----------------------------------------------------------------------------------------------- /// The granularity in which Timer events are executed, in microseconds const TIMER_INCREMENT: u64 = 250; pub struct ProdSlaveIOCtx { // Basic rand: XorShiftRng, net_conn_map: Arc<Mutex<BTreeMap<EndpointId, Sender<Vec<u8>>>>>, // Constructing and communicating with Tablets to_slave: Sender<FullSlaveInput>, tablet_map: BTreeMap<TabletGroupId, Sender<TabletForwardMsg>>, // Coord coord_map: BTreeMap<CoordGroupId, Sender<CoordForwardMsg>>, // Deferred timer tasks tasks: Arc<Mutex<BTreeMap<Timestamp, Vec<SlaveTimerInput>>>>, } impl ProdSlaveIOCtx { /// Construct a helper thread that will poll `SlaveTimerInput` from `tasks` and push /// them back to the Slave via `to_slave`. fn start(&mut self) { let to_slave = self.to_slave.clone(); let tasks = self.tasks.clone(); thread::spawn(move || loop { // Sleep let increment = std::time::Duration::from_micros(TIMER_INCREMENT); thread::sleep(increment); // Poll all tasks from `tasks` prior to the current time, and push them to the Slave. let now = mk_t(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis()); let mut tasks = tasks.lock().unwrap(); while let Some((next_timestamp, _)) = tasks.first_key_value() { if next_timestamp <= &now { // All data in this first entry should be dispatched. let next_timestamp = next_timestamp.clone(); for timer_input in tasks.remove(&next_timestamp).unwrap() { to_slave.send(FullSlaveInput::SlaveTimerInput(timer_input)); } } } }); } } impl BasicIOCtx for ProdSlaveIOCtx { type RngCoreT = XorShiftRng; fn rand(&mut self) -> &mut Self::RngCoreT { &mut self.rand } fn now(&mut self) -> Timestamp { mk_t(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis()) } fn send(&mut self, eid: &EndpointId, msg: msg::NetworkMessage) { let net_conn_map = self.net_conn_map.lock().unwrap(); let sender = net_conn_map.get(eid).unwrap(); sender.send(rmp_serde::to_vec(&msg).unwrap()).unwrap(); } fn general_trace(&mut self, _: GeneralTraceMessage) {} } impl SlaveIOCtx for ProdSlaveIOCtx { fn create_tablet(&mut self, helper: TabletCreateHelper) { // Create an RNG using the random seed provided by the Slave. let rand = XorShiftRng::from_seed(helper.rand_seed); // Create mpsc queue for Slave-Tablet communication. let (to_tablet_sender, to_tablet_receiver) = mpsc::channel::<TabletForwardMsg>(); self.tablet_map.insert(helper.this_tid.clone(), to_tablet_sender); // Spawn a new thread and create the Tablet. let tablet_context = TabletContext::new(helper); let mut io_ctx = ProdCoreIOCtx { net_conn_map: self.net_conn_map.clone(), rand, to_slave: self.to_slave.clone(), }; thread::spawn(move || { let mut tablet = TabletState::new(tablet_context); loop { let tablet_msg = to_tablet_receiver.recv().unwrap(); tablet.handle_input(&mut io_ctx, tablet_msg); } }); } fn tablet_forward(&mut self, tablet_group_id: &TabletGroupId, msg: TabletForwardMsg) { self.tablet_map.get(tablet_group_id).unwrap().send(msg).unwrap(); } fn all_tids(&self) -> Vec<TabletGroupId> { self.tablet_map.keys().cloned().collect() } fn num_tablets(&self) -> usize { self.tablet_map.keys().len() } fn coord_forward(&mut self, coord_group_id: &CoordGroupId, msg: CoordForwardMsg) { self.coord_map.get(coord_group_id).unwrap().send(msg).unwrap(); } fn all_cids(&self) -> Vec<CoordGroupId> { self.coord_map.keys().cloned().collect() } fn defer(&mut self, defer_time: Timestamp, timer_input: SlaveTimerInput) { let timestamp = self.now().add(defer_time); let mut tasks = self.tasks.lock().unwrap(); if let Some(timer_inputs) = tasks.get_mut(&timestamp) { timer_inputs.push(timer_input); } else { tasks.insert(timestamp.clone(), vec![timer_input]); } } fn trace(&mut self, _: SlaveTraceMessage) {} } // ----------------------------------------------------------------------------------------------- // ProdCoreIOCtx // ----------------------------------------------------------------------------------------------- pub struct ProdCoreIOCtx { // Basic rand: XorShiftRng, net_conn_map: Arc<Mutex<BTreeMap<EndpointId, Sender<Vec<u8>>>>>, // Slave to_slave: Sender<FullSlaveInput>, } impl BasicIOCtx for ProdCoreIOCtx { type RngCoreT = XorShiftRng; fn rand(&mut self) -> &mut Self::RngCoreT { &mut self.rand } fn now(&mut self) -> Timestamp { mk_t(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis()) } fn send(&mut self, eid: &EndpointId, msg: msg::NetworkMessage) { let net_conn_map = self.net_conn_map.lock().unwrap(); let sender = net_conn_map.get(eid).unwrap(); sender.send(rmp_serde::to_vec(&msg).unwrap()).unwrap(); } fn general_trace(&mut self, _: GeneralTraceMessage) {} } impl CoreIOCtx for ProdCoreIOCtx { fn slave_forward(&mut self, msg: SlaveBackMessage) { self.to_slave.send(FullSlaveInput::SlaveBackMessage(msg)); } } // ----------------------------------------------------------------------------------------------- // SlaveStarter // ----------------------------------------------------------------------------------------------- const NUM_COORDS: u32 = 3; /// This initializes a Slave for a system that is bootstrapping. All Slave and Master /// nodes should already be constrcuted and network connections should already be /// established before this function is called. pub fn start_server( to_server_sender: Sender<FullSlaveInput>, to_server_receiver: Receiver<FullSlaveInput>, net_conn_map: &Arc<Mutex<BTreeMap<EndpointId, Sender<Vec<u8>>>>>, this_eid: EndpointId, this_sid: SlaveGroupId, slave_address_config: BTreeMap<SlaveGroupId, Vec<EndpointId>>, master_address_config: Vec<EndpointId>, ) { // Create Slave RNG. let mut rand = XorShiftRng::from_entropy(); // Create common Gossip let gossip = Arc::new(GossipData::new(slave_address_config.clone(), master_address_config)); // Construct LeaderMap let mut leader_map = BTreeMap::<PaxosGroupId, LeadershipId>::new(); leader_map.insert( PaxosGroupId::Master, LeadershipId { gen: Gen(0), eid: master_address_config[0].clone() }, ); for (sid, eids) in slave_address_config { let eid = eids.into_iter().next().unwrap(); leader_map.insert(sid.to_gid(), LeadershipId { gen: Gen(0), eid }); } // Create the Coord let mut coord_map = BTreeMap::<CoordGroupId, Sender<CoordForwardMsg>>::new(); let mut coord_positions: Vec<CoordGroupId> = Vec::new(); for _ in 0..NUM_COORDS { let coord_group_id = mk_cid(&mut rand); coord_positions.push(coord_group_id.clone()); // Create the seed for the Tablet's RNG. We use the Slave's // RNG to create a random seed. let rand = XorShiftRng::from_seed(mk_seed(&mut rand)); // Create mpsc queue for Slave-Coord communication. let (to_coord_sender, to_coord_receiver) = mpsc::channel(); coord_map.insert(coord_group_id.clone(), to_coord_sender); // Create the Tablet let coord_context = CoordContext::new( CoordConfig::default(), this_sid.clone(), coord_group_id, this_eid.clone(), gossip.clone(), leader_map.clone(), ); let mut io_ctx = ProdCoreIOCtx { net_conn_map: net_conn_map.clone(), rand, to_slave: to_server_sender.clone(), }; thread::spawn(move || { let mut coord = CoordState::new(coord_context); loop { let coord_msg = to_coord_receiver.recv().unwrap(); coord.handle_input(&mut io_ctx, coord_msg); } }); } // Construct the SlaveState let mut io_ctx = ProdSlaveIOCtx { rand, net_conn_map: net_conn_map.clone(), to_slave: to_server_sender.clone(), tablet_map: Default::default(), coord_map, tasks: Arc::new(Mutex::new(Default::default())), }; io_ctx.start(); let slave_context = SlaveContext::new( coord_positions, SlaveConfig::default(), this_sid, this_eid, gossip, leader_map, PaxosConfig::prod(), ); let mut slave = SlaveState::new(slave_context); loop { // Receive data from the `to_server_receiver` and update the SlaveState accordingly. // This is the steady state that the slaves enters. let full_input = to_server_receiver.recv().unwrap(); slave.handle_input(&mut io_ctx, full_input); } }
33.241497
98
0.650466
f9d1c115b5d41e8ad321b2b093cb68400c632acd
3,782
use instruction_def::*; use test::run_test; use Operand::*; use Reg::*; use RegScale::*; use RegType::*; use {BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; #[test] fn mulx_1() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(ECX)), operand2: Some(Direct(ESP)), operand3: Some(Direct(EDX)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 91, 246, 202], OperandSize::Dword, ) } #[test] fn mulx_2() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(EBP)), operand2: Some(Direct(EBX)), operand3: Some(IndirectScaledDisplaced( ECX, Two, 620740657, Some(OperandSize::Dword), None, )), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 99, 246, 44, 77, 49, 192, 255, 36], OperandSize::Dword, ) } #[test] fn mulx_3() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(EBP)), operand2: Some(Direct(ESI)), operand3: Some(Direct(EBX)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 75, 246, 235], OperandSize::Qword, ) } #[test] fn mulx_4() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(ESI)), operand2: Some(Direct(EDI)), operand3: Some(IndirectScaledIndexedDisplaced( RDI, RCX, Four, 121392359, Some(OperandSize::Dword), None, )), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 67, 246, 180, 143, 231, 76, 60, 7], OperandSize::Qword, ) } #[test] fn mulx_5() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(RSP)), operand2: Some(Direct(RBP)), operand3: Some(Direct(RDX)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 211, 246, 226], OperandSize::Qword, ) } #[test] fn mulx_6() { run_test( &Instruction { mnemonic: Mnemonic::MULX, operand1: Some(Direct(RSP)), operand2: Some(Direct(RCX)), operand3: Some(IndirectScaledIndexedDisplaced( RDI, RSI, Four, 812365078, Some(OperandSize::Qword), None, )), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[196, 226, 243, 246, 164, 183, 22, 181, 107, 48], OperandSize::Qword, ) }
24.558442
95
0.453464
1a4a8c1a637e7a1e0c0df8a0294f0750265f40b8
68,978
// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. //! The top-level network map tracking logic lives here. use bitcoin::secp256k1::key::PublicKey; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; use chain::chaininterface::{ChainError, ChainWatchInterface}; use ln::features::{ChannelFeatures, NodeFeatures}; use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; use std::{cmp, fmt}; use std::sync::{RwLock, RwLockReadGuard}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::BTreeMap; use std::collections::btree_map::Entry as BtreeEntry; use std::ops::Deref; use bitcoin::hashes::hex::ToHex; /// Represents the network as nodes and channels between them #[derive(PartialEq)] pub struct NetworkGraph { channels: BTreeMap<u64, ChannelInfo>, nodes: BTreeMap<PublicKey, NodeInfo>, } /// A simple newtype for RwLockReadGuard<'a, NetworkGraph>. /// This exists only to make accessing a RwLock<NetworkGraph> possible from /// the C bindings, as it can be done directly in Rust code. pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>); /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. pub struct NetGraphMsgHandler<C: Deref, L: Deref> where C::Target: ChainWatchInterface, L::Target: Logger { secp_ctx: Secp256k1<secp256k1::VerifyOnly>, /// Representation of the payment channel network pub network_graph: RwLock<NetworkGraph>, chain_monitor: C, full_syncs_requested: AtomicUsize, logger: L, } impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: ChainWatchInterface, L::Target: Logger { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming a fresh network graph. /// Chain monitor is used to make sure announced channels exist on-chain, /// channel data is correct, and that the announcement is signed with /// channel owners' keys. pub fn new(chain_monitor: C, logger: L) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), network_graph: RwLock::new(NetworkGraph { channels: BTreeMap::new(), nodes: BTreeMap::new(), }), full_syncs_requested: AtomicUsize::new(0), chain_monitor, logger, } } /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing Network Graph. pub fn from_net_graph(chain_monitor: C, logger: L, network_graph: NetworkGraph) -> Self { NetGraphMsgHandler { secp_ctx: Secp256k1::verification_only(), network_graph: RwLock::new(network_graph), full_syncs_requested: AtomicUsize::new(0), chain_monitor, logger, } } /// Take a read lock on the network_graph and return it in the C-bindings /// newtype helper. This is likely only useful when called via the C /// bindings as you can call `self.network_graph.read().unwrap()` in Rust /// yourself. pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { LockedNetworkGraph(self.network_graph.read().unwrap()) } } impl<'a> LockedNetworkGraph<'a> { /// Get a reference to the NetworkGraph which this read-lock contains. pub fn graph(&self) -> &NetworkGraph { &*self.0 } } macro_rules! secp_verify_sig { ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => { match $secp_ctx.verify($msg, $sig, $pubkey) { Ok(_) => {}, Err(_) => return Err(LightningError{err: "Invalid signature from remote node".to_owned(), action: ErrorAction::IgnoreError}), } }; } impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: ChainWatchInterface, L::Target: Logger { fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { self.network_graph.write().unwrap().update_node_from_announcement(msg, Some(&self.secp_ctx)) } fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 { return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError}); } let utxo_value = match self.chain_monitor.get_chain_utxo(msg.contents.chain_hash, msg.contents.short_channel_id) { Ok((script_pubkey, value)) => { let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) .push_slice(&msg.contents.bitcoin_key_1.serialize()) .push_slice(&msg.contents.bitcoin_key_2.serialize()) .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); if script_pubkey != expected_script { return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError}); } //TODO: Check if value is worth storing, use it to inform routing, and compare it //to the new HTLC max field in channel_update Some(value) }, Err(ChainError::NotSupported) => { // Tentatively accept, potentially exposing us to DoS attacks None }, Err(ChainError::NotWatched) => { return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.contents.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError}); }, Err(ChainError::UnknownTx) => { return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError}); }, }; let result = self.network_graph.write().unwrap().update_channel_from_announcement(msg, utxo_value, Some(&self.secp_ctx)); log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }); result } fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { match update { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { let _ = self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)); }, &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); }, &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { self.network_graph.write().unwrap().fail_node(node_id, is_permanent); }, } } fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)) } fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = network_graph.get_channels().range(starting_point..); while result.len() < batch_amount as usize { if let Some((_, ref chan)) = iter.next() { if chan.announcement_message.is_some() { let chan_announcement = chan.announcement_message.clone().unwrap(); let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None; let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None; if let Some(one_to_two) = chan.one_to_two.as_ref() { one_to_two_announcement = one_to_two.last_update_message.clone(); } if let Some(two_to_one) = chan.two_to_one.as_ref() { two_to_one_announcement = two_to_one.last_update_message.clone(); } result.push((chan_announcement, one_to_two_announcement, two_to_one_announcement)); } else { // TODO: We may end up sending un-announced channel_updates if we are sending // initial sync data while receiving announce/updates for this channel. } } else { return result; } } result } fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = if let Some(pubkey) = starting_point { let mut iter = network_graph.get_nodes().range((*pubkey)..); iter.next(); iter } else { network_graph.get_nodes().range(..) }; while result.len() < batch_amount as usize { if let Some((_, ref node)) = iter.next() { if let Some(node_info) = node.announcement_info.as_ref() { if node_info.announcement_message.is_some() { result.push(node_info.announcement_message.clone().unwrap()); } } } else { return result; } } result } fn should_request_full_sync(&self, _node_id: &PublicKey) -> bool { //TODO: Determine whether to request a full sync based on the network map. const FULL_SYNCS_TO_REQUEST: usize = 5; if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST { self.full_syncs_requested.fetch_add(1, Ordering::AcqRel); true } else { false } } } #[derive(PartialEq, Debug)] /// Details about one direction of a channel. Received /// within a channel update. pub struct DirectionalChannelInfo { /// When the last update to the channel direction was issued. /// Value is opaque, as set in the announcement. pub last_update: u32, /// Whether the channel can be currently used for payments (in this one direction). pub enabled: bool, /// The difference in CLTV values that you must have when routing through this channel. pub cltv_expiry_delta: u16, /// The minimum value, which must be relayed to the next hop via the channel pub htlc_minimum_msat: u64, /// The maximum value which may be relayed to the next hop via the channel. pub htlc_maximum_msat: Option<u64>, /// Fees charged when the channel is used for routing pub fees: RoutingFees, /// Most recent update for the channel received from the network /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. pub last_update_message: Option<ChannelUpdate>, } impl fmt::Display for DirectionalChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}", self.last_update, self.enabled, self.cltv_expiry_delta, self.htlc_minimum_msat, self.fees)?; Ok(()) } } impl_writeable!(DirectionalChannelInfo, 0, { last_update, enabled, cltv_expiry_delta, htlc_minimum_msat, htlc_maximum_msat, fees, last_update_message }); #[derive(PartialEq)] /// Details about a channel (both directions). /// Received within a channel announcement. pub struct ChannelInfo { /// Protocol features of a channel communicated during its announcement pub features: ChannelFeatures, /// Source node of the first direction of a channel pub node_one: PublicKey, /// Details about the first direction of a channel pub one_to_two: Option<DirectionalChannelInfo>, /// Source node of the second direction of a channel pub node_two: PublicKey, /// Details about the second direction of a channel pub two_to_one: Option<DirectionalChannelInfo>, /// The channel capacity as seen on-chain, if chain lookup is available. pub capacity_sats: Option<u64>, /// An initial announcement of the channel /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. pub announcement_message: Option<ChannelAnnouncement>, } impl fmt::Display for ChannelInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}", log_bytes!(self.features.encode()), log_pubkey!(self.node_one), self.one_to_two, log_pubkey!(self.node_two), self.two_to_one)?; Ok(()) } } impl_writeable!(ChannelInfo, 0, { features, node_one, one_to_two, node_two, two_to_one, capacity_sats, announcement_message }); /// Fees for routing via a given channel or a node #[derive(Eq, PartialEq, Copy, Clone, Debug)] pub struct RoutingFees { /// Flat routing fee in satoshis pub base_msat: u32, /// Liquidity-based routing fee in millionths of a routed amount. /// In other words, 10000 is 1%. pub proportional_millionths: u32, } impl Readable for RoutingFees{ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<RoutingFees, DecodeError> { let base_msat: u32 = Readable::read(reader)?; let proportional_millionths: u32 = Readable::read(reader)?; Ok(RoutingFees { base_msat, proportional_millionths, }) } } impl Writeable for RoutingFees { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { self.base_msat.write(writer)?; self.proportional_millionths.write(writer)?; Ok(()) } } #[derive(PartialEq, Debug)] /// Information received in the latest node_announcement from this node. pub struct NodeAnnouncementInfo { /// Protocol features the node announced support for pub features: NodeFeatures, /// When the last known update to the node state was issued. /// Value is opaque, as set in the announcement. pub last_update: u32, /// Color assigned to the node pub rgb: [u8; 3], /// Moniker assigned to the node. /// May be invalid or malicious (eg control chars), /// should not be exposed to the user. pub alias: [u8; 32], /// Internet-level addresses via which one can connect to the node pub addresses: Vec<NetAddress>, /// An initial announcement of the node /// Mostly redundant with the data we store in fields explicitly. /// Everything else is useful only for sending out for initial routing sync. /// Not stored if contains excess data to prevent DoS. pub announcement_message: Option<NodeAnnouncement> } impl Writeable for NodeAnnouncementInfo { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { self.features.write(writer)?; self.last_update.write(writer)?; self.rgb.write(writer)?; self.alias.write(writer)?; (self.addresses.len() as u64).write(writer)?; for ref addr in &self.addresses { addr.write(writer)?; } self.announcement_message.write(writer)?; Ok(()) } } impl Readable for NodeAnnouncementInfo { fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NodeAnnouncementInfo, DecodeError> { let features = Readable::read(reader)?; let last_update = Readable::read(reader)?; let rgb = Readable::read(reader)?; let alias = Readable::read(reader)?; let addresses_count: u64 = Readable::read(reader)?; let mut addresses = Vec::with_capacity(cmp::min(addresses_count, MAX_ALLOC_SIZE / 40) as usize); for _ in 0..addresses_count { match Readable::read(reader) { Ok(Ok(addr)) => { addresses.push(addr); }, Ok(Err(_)) => return Err(DecodeError::InvalidValue), Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor), _ => unreachable!(), } } let announcement_message = Readable::read(reader)?; Ok(NodeAnnouncementInfo { features, last_update, rgb, alias, addresses, announcement_message }) } } #[derive(PartialEq)] /// Details about a node in the network, known from the network announcement. pub struct NodeInfo { /// All valid channels a node has announced pub channels: Vec<u64>, /// Lowest fees enabling routing via any of the enabled, known channels to a node. /// The two fields (flat and proportional fee) are independent, /// meaning they don't have to refer to the same channel. pub lowest_inbound_channel_fees: Option<RoutingFees>, /// More information about a node from node_announcement. /// Optional because we store a Node entry after learning about it from /// a channel announcement, but before receiving a node announcement. pub announcement_info: Option<NodeAnnouncementInfo> } impl fmt::Display for NodeInfo { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "lowest_inbound_channel_fees: {:?}, channels: {:?}, announcement_info: {:?}", self.lowest_inbound_channel_fees, &self.channels[..], self.announcement_info)?; Ok(()) } } impl Writeable for NodeInfo { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (self.channels.len() as u64).write(writer)?; for ref chan in self.channels.iter() { chan.write(writer)?; } self.lowest_inbound_channel_fees.write(writer)?; self.announcement_info.write(writer)?; Ok(()) } } const MAX_ALLOC_SIZE: u64 = 64*1024; impl Readable for NodeInfo { fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NodeInfo, DecodeError> { let channels_count: u64 = Readable::read(reader)?; let mut channels = Vec::with_capacity(cmp::min(channels_count, MAX_ALLOC_SIZE / 8) as usize); for _ in 0..channels_count { channels.push(Readable::read(reader)?); } let lowest_inbound_channel_fees = Readable::read(reader)?; let announcement_info = Readable::read(reader)?; Ok(NodeInfo { channels, lowest_inbound_channel_fees, announcement_info, }) } } impl Writeable for NetworkGraph { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (self.channels.len() as u64).write(writer)?; for (ref chan_id, ref chan_info) in self.channels.iter() { (*chan_id).write(writer)?; chan_info.write(writer)?; } (self.nodes.len() as u64).write(writer)?; for (ref node_id, ref node_info) in self.nodes.iter() { node_id.write(writer)?; node_info.write(writer)?; } Ok(()) } } impl Readable for NetworkGraph { fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> { let channels_count: u64 = Readable::read(reader)?; let mut channels = BTreeMap::new(); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; let mut nodes = BTreeMap::new(); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; nodes.insert(node_id, node_info); } Ok(NetworkGraph { channels, nodes, }) } } impl fmt::Display for NetworkGraph { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "Network map\n[Channels]\n")?; for (key, val) in self.channels.iter() { write!(f, " {}: {}\n", key, val)?; } write!(f, "[Nodes]\n")?; for (key, val) in self.nodes.iter() { write!(f, " {}: {}\n", log_pubkey!(key), val)?; } Ok(()) } } impl NetworkGraph { /// Returns all known valid channels' short ids along with announced channel info. /// /// (C-not exported) because we have no mapping for `BTreeMap`s pub fn get_channels<'a>(&'a self) -> &'a BTreeMap<u64, ChannelInfo> { &self.channels } /// Returns all known nodes' public keys along with announced node info. /// /// (C-not exported) because we have no mapping for `BTreeMap`s pub fn get_nodes<'a>(&'a self) -> &'a BTreeMap<PublicKey, NodeInfo> { &self.nodes } /// Get network addresses by node id. /// Returns None if the requested node is completely unknown, /// or if node announcement for the node was never received. /// /// (C-not exported) as there is no practical way to track lifetimes of returned values. pub fn get_addresses<'a>(&'a self, pubkey: &PublicKey) -> Option<&'a Vec<NetAddress>> { if let Some(node) = self.nodes.get(pubkey) { if let Some(node_info) = node.announcement_info.as_ref() { return Some(&node_info.addresses) } } None } /// Creates a new, empty, network graph. pub fn new() -> NetworkGraph { Self { channels: BTreeMap::new(), nodes: BTreeMap::new(), } } /// For an already known node (from channel announcements), update its stored properties from a given node announcement /// Announcement signatures are checked here only if Secp256k1 object is provided. fn update_node_from_announcement(&mut self, msg: &msgs::NodeAnnouncement, secp_ctx: Option<&Secp256k1<secp256k1::VerifyOnly>>) -> Result<bool, LightningError> { if let Some(sig_verifier) = secp_ctx { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &msg.contents.node_id); } match self.nodes.get_mut(&msg.contents.node_id) { None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}), Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { if node_info.last_update >= msg.contents.timestamp { return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); } } let should_relay = msg.contents.excess_data.is_empty() && msg.contents.excess_address_data.is_empty(); node.announcement_info = Some(NodeAnnouncementInfo { features: msg.contents.features.clone(), last_update: msg.contents.timestamp, rgb: msg.contents.rgb, alias: msg.contents.alias, addresses: msg.contents.addresses.clone(), announcement_message: if should_relay { Some(msg.clone()) } else { None }, }); Ok(should_relay) } } } /// For a new or already known (from previous announcement) channel, store or update channel info. /// Also store nodes (if not stored yet) the channel is between, and make node aware of this channel. /// Checking utxo on-chain is useful if we receive an update for already known channel id, /// which is probably result of a reorg. In that case, we update channel info only if the /// utxo was checked, otherwise stick to the existing update, to prevent DoS risks. /// Announcement signatures are checked here only if Secp256k1 object is provided. fn update_channel_from_announcement(&mut self, msg: &msgs::ChannelAnnouncement, utxo_value: Option<u64>, secp_ctx: Option<&Secp256k1<secp256k1::VerifyOnly>>) -> Result<bool, LightningError> { if let Some(sig_verifier) = secp_ctx { let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); secp_verify_sig!(sig_verifier, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1); secp_verify_sig!(sig_verifier, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2); secp_verify_sig!(sig_verifier, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1); secp_verify_sig!(sig_verifier, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2); } let should_relay = msg.contents.excess_data.is_empty(); let chan_info = ChannelInfo { features: msg.contents.features.clone(), node_one: msg.contents.node_id_1.clone(), one_to_two: None, node_two: msg.contents.node_id_2.clone(), two_to_one: None, capacity_sats: utxo_value, announcement_message: if should_relay { Some(msg.clone()) } else { None }, }; match self.channels.entry(msg.contents.short_channel_id) { BtreeEntry::Occupied(mut entry) => { //TODO: because asking the blockchain if short_channel_id is valid is only optional //in the blockchain API, we need to handle it smartly here, though it's unclear //exactly how... if utxo_value.is_some() { // Either our UTXO provider is busted, there was a reorg, or the UTXO provider // only sometimes returns results. In any case remove the previous entry. Note // that the spec expects us to "blacklist" the node_ids involved, but we can't // do that because // a) we don't *require* a UTXO provider that always returns results. // b) we don't track UTXOs of channels we know about and remove them if they // get reorg'd out. // c) it's unclear how to do so without exposing ourselves to massive DoS risk. Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.contents.short_channel_id); *entry.get_mut() = chan_info; } else { return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) } }, BtreeEntry::Vacant(entry) => { entry.insert(chan_info); } }; macro_rules! add_channel_to_node { ( $node_id: expr ) => { match self.nodes.entry($node_id) { BtreeEntry::Occupied(node_entry) => { node_entry.into_mut().channels.push(msg.contents.short_channel_id); }, BtreeEntry::Vacant(node_entry) => { node_entry.insert(NodeInfo { channels: vec!(msg.contents.short_channel_id), lowest_inbound_channel_fees: None, announcement_info: None, }); } } }; } add_channel_to_node!(msg.contents.node_id_1); add_channel_to_node!(msg.contents.node_id_2); Ok(should_relay) } /// Close a channel if a corresponding HTLC fail was sent. /// If permanent, removes a channel from the local storage. /// May cause the removal of nodes too, if this was their last channel. /// If not permanent, makes channels unavailable for routing. pub fn close_channel_from_update(&mut self, short_channel_id: u64, is_permanent: bool) { if is_permanent { if let Some(chan) = self.channels.remove(&short_channel_id) { Self::remove_channel_in_nodes(&mut self.nodes, &chan, short_channel_id); } } else { if let Some(chan) = self.channels.get_mut(&short_channel_id) { if let Some(one_to_two) = chan.one_to_two.as_mut() { one_to_two.enabled = false; } if let Some(two_to_one) = chan.two_to_one.as_mut() { two_to_one.enabled = false; } } } } fn fail_node(&mut self, _node_id: &PublicKey, is_permanent: bool) { if is_permanent { // TODO: Wholly remove the node } else { // TODO: downgrade the node } } /// For an already known (from announcement) channel, update info about one of the directions of a channel. /// Announcement signatures are checked here only if Secp256k1 object is provided. fn update_channel(&mut self, msg: &msgs::ChannelUpdate, secp_ctx: Option<&Secp256k1<secp256k1::VerifyOnly>>) -> Result<bool, LightningError> { let dest_node_id; let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); let chan_was_enabled; match self.channels.get_mut(&msg.contents.short_channel_id) { None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}), Some(channel) => { if let OptionalField::Present(htlc_maximum_msat) = msg.contents.htlc_maximum_msat { if htlc_maximum_msat > MAX_VALUE_MSAT { return Err(LightningError{err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(), action: ErrorAction::IgnoreError}); } if let Some(capacity_sats) = channel.capacity_sats { // It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None). // Don't query UTXO set here to reduce DoS risks. if htlc_maximum_msat > capacity_sats * 1000 { return Err(LightningError{err: "htlc_maximum_msat is larger than channel capacity".to_owned(), action: ErrorAction::IgnoreError}); } } } macro_rules! maybe_update_channel_info { ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { if existing_chan_info.last_update >= msg.contents.timestamp { return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); } chan_was_enabled = existing_chan_info.enabled; } else { chan_was_enabled = false; } let last_update_message = if msg.contents.excess_data.is_empty() { Some(msg.clone()) } else { None }; let updated_channel_dir_info = DirectionalChannelInfo { enabled: chan_enabled, last_update: msg.contents.timestamp, cltv_expiry_delta: msg.contents.cltv_expiry_delta, htlc_minimum_msat: msg.contents.htlc_minimum_msat, htlc_maximum_msat: if let OptionalField::Present(max_value) = msg.contents.htlc_maximum_msat { Some(max_value) } else { None }, fees: RoutingFees { base_msat: msg.contents.fee_base_msat, proportional_millionths: msg.contents.fee_proportional_millionths, }, last_update_message }; $target = Some(updated_channel_dir_info); } } let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); if msg.contents.flags & 1 == 1 { dest_node_id = channel.node_one.clone(); if let Some(sig_verifier) = secp_ctx { secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &channel.node_two); } maybe_update_channel_info!(channel.two_to_one, channel.node_two); } else { dest_node_id = channel.node_two.clone(); if let Some(sig_verifier) = secp_ctx { secp_verify_sig!(sig_verifier, &msg_hash, &msg.signature, &channel.node_one); } maybe_update_channel_info!(channel.one_to_two, channel.node_one); } } } if chan_enabled { let node = self.nodes.get_mut(&dest_node_id).unwrap(); let mut base_msat = msg.contents.fee_base_msat; let mut proportional_millionths = msg.contents.fee_proportional_millionths; if let Some(fees) = node.lowest_inbound_channel_fees { base_msat = cmp::min(base_msat, fees.base_msat); proportional_millionths = cmp::min(proportional_millionths, fees.proportional_millionths); } node.lowest_inbound_channel_fees = Some(RoutingFees { base_msat, proportional_millionths }); } else if chan_was_enabled { let node = self.nodes.get_mut(&dest_node_id).unwrap(); let mut lowest_inbound_channel_fees = None; for chan_id in node.channels.iter() { let chan = self.channels.get(chan_id).unwrap(); let chan_info_opt; if chan.node_one == dest_node_id { chan_info_opt = chan.two_to_one.as_ref(); } else { chan_info_opt = chan.one_to_two.as_ref(); } if let Some(chan_info) = chan_info_opt { if chan_info.enabled { let fees = lowest_inbound_channel_fees.get_or_insert(RoutingFees { base_msat: u32::max_value(), proportional_millionths: u32::max_value() }); fees.base_msat = cmp::min(fees.base_msat, chan_info.fees.base_msat); fees.proportional_millionths = cmp::min(fees.proportional_millionths, chan_info.fees.proportional_millionths); } } } node.lowest_inbound_channel_fees = lowest_inbound_channel_fees; } Ok(msg.contents.excess_data.is_empty()) } fn remove_channel_in_nodes(nodes: &mut BTreeMap<PublicKey, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64) { macro_rules! remove_from_node { ($node_id: expr) => { if let BtreeEntry::Occupied(mut entry) = nodes.entry($node_id) { entry.get_mut().channels.retain(|chan_id| { short_channel_id != *chan_id }); if entry.get().channels.is_empty() { entry.remove_entry(); } } else { panic!("Had channel that pointed to unknown node (ie inconsistent network map)!"); } } } remove_from_node!(chan.node_one); remove_from_node!(chan.node_two); } } #[cfg(test)] mod tests { use chain::chaininterface; use ln::features::{ChannelFeatures, NodeFeatures}; use routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; use ln::msgs::{OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement, UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate, MAX_VALUE_MSAT}; use util::test_utils; use util::logger::Logger; use util::ser::{Readable, Writeable}; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; use hex; use bitcoin::secp256k1::key::{PublicKey, SecretKey}; use bitcoin::secp256k1::{All, Secp256k1}; use std::sync::Arc; fn create_net_graph_msg_handler() -> (Secp256k1<All>, NetGraphMsgHandler<Arc<chaininterface::ChainWatchInterfaceUtil>, Arc<test_utils::TestLogger>>) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet)); let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor, Arc::clone(&logger)); (secp_ctx, net_graph_msg_handler) } #[test] fn request_full_sync_finite_times() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap()); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); assert!(net_graph_msg_handler.should_request_full_sync(&node_id)); assert!(!net_graph_msg_handler.should_request_full_sync(&node_id)); } #[test] fn handling_node_announcements() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let zero_hash = Sha256dHash::hash(&[0; 32]); let first_announcement_time = 500; let mut unsigned_announcement = UnsignedNodeAnnouncement { features: NodeFeatures::known(), timestamp: first_announcement_time, node_id: node_id_1, rgb: [0; 3], alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), }; let mut msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!("No existing channels for node_announcement", e.err) }; { // Announce a channel to add a corresponding node. let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; } match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(res) => assert!(res), Err(_) => panic!() }; let fake_msghash = hash_to_message!(&zero_hash); match net_graph_msg_handler.handle_node_announcement( &NodeAnnouncement { signature: secp_ctx.sign(&fake_msghash, node_1_privkey), contents: unsigned_announcement.clone() }) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature from remote node") }; unsigned_announcement.timestamp += 1000; unsigned_announcement.excess_data.push(1); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let announcement_with_data = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_announcement.clone() }; // Return false because contains excess data. match net_graph_msg_handler.handle_node_announcement(&announcement_with_data) { Ok(res) => assert!(!res), Err(_) => panic!() }; unsigned_announcement.excess_data = Vec::new(); // Even though previous announcement was not relayed further, we still accepted it, // so we now won't accept announcements before the previous one. unsigned_announcement.timestamp -= 10; msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let outdated_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&outdated_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Update older than last processed update") }; } #[test] fn handling_channel_announcements() { let secp_ctx = Secp256k1::new(); let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(test_utils::TestChainWatcher::new()); let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor.clone(), Arc::clone(&logger)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); let mut unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let mut msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; // Test if the UTXO lookups were not supported *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::NotSupported); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&unsigned_announcement.short_channel_id) { None => panic!(), Some(_) => () } } // If we receive announcement for the same channel (with UTXO lookups disabled), // drop new one on the floor, since we can't see any changes. match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Already have knowledge of channel") }; // Test if an associated transaction were not on-chain (or not confirmed). *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx); unsigned_announcement.short_channel_id += 1; msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") }; // Now test if the transaction is found in the UTXO set and the script is correct. unsigned_announcement.short_channel_id += 1; *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script.clone(), 0)); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&unsigned_announcement.short_channel_id) { None => panic!(), Some(_) => () } } // If we receive announcement for the same channel (but TX is not confirmed), // drop new one on the floor, since we can't see any changes. *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx); match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry") }; // But if it is confirmed, replace the channel *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script, 0)); unsigned_announcement.features = ChannelFeatures::empty(); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&unsigned_announcement.short_channel_id) { Some(channel_entry) => { assert_eq!(channel_entry.features, ChannelFeatures::empty()); }, _ => panic!() } } // Don't relay valid channels with excess data unsigned_announcement.short_channel_id += 1; unsigned_announcement.excess_data.push(1); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(!res), _ => panic!() }; unsigned_announcement.excess_data = Vec::new(); let invalid_sig_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_1_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&invalid_sig_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature from remote node") }; unsigned_announcement.node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let channel_to_itself_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_1_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&channel_to_itself_announcement) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself") }; } #[test] fn handling_channel_update() { let secp_ctx = Secp256k1::new(); let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(test_utils::TestChainWatcher::new()); let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor.clone(), Arc::clone(&logger)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let zero_hash = Sha256dHash::hash(&[0; 32]); let short_channel_id = 0; let chain_hash = genesis_block(Network::Testnet).header.block_hash(); let amount_sats = 1000_000; { // Announce a channel we will update let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2) .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize()) .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize()) .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh(); *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script.clone(), amount_sats)); let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), chain_hash, short_channel_id, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_channel_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() }; } let mut unsigned_channel_update = UnsignedChannelUpdate { chain_hash, short_channel_id, timestamp: 100, flags: 0, cltv_expiry_delta: 144, htlc_minimum_msat: 1000000, htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, excess_data: Vec::new() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!() }; { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144); assert!(channel_info.two_to_one.is_none()); } } } unsigned_channel_update.timestamp += 100; unsigned_channel_update.excess_data.push(1); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; // Return false because contains excess data match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(!res), _ => panic!() }; unsigned_channel_update.timestamp += 10; unsigned_channel_update.short_channel_id += 1; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Couldn't find channel for update") }; unsigned_channel_update.short_channel_id = short_channel_id; unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(MAX_VALUE_MSAT + 1); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats") }; unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; unsigned_channel_update.htlc_maximum_msat = OptionalField::Present(amount_sats * 1000 + 1); let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than channel capacity") }; unsigned_channel_update.htlc_maximum_msat = OptionalField::Absent; // Even though previous update was not relayed further, we still accepted it, // so we now won't accept update before the previous one. unsigned_channel_update.timestamp -= 10; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Update older than last processed update") }; unsigned_channel_update.timestamp += 500; let fake_msghash = hash_to_message!(&zero_hash); let invalid_sig_channel_update = ChannelUpdate { signature: secp_ctx.sign(&fake_msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&invalid_sig_channel_update) { Ok(_) => panic!(), Err(e) => assert_eq!(e.err, "Invalid signature from remote node") }; } #[test] fn handling_htlc_fail_channel_update() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 0; let chain_hash = genesis_block(Network::Testnet).header.block_hash(); { // There is no nodes in the table at the beginning. let network = net_graph_msg_handler.network_graph.read().unwrap(); assert_eq!(network.get_nodes().len(), 0); } { // Announce a channel we will update let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), chain_hash, short_channel_id, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_channel_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() }; let unsigned_channel_update = UnsignedChannelUpdate { chain_hash, short_channel_id, timestamp: 100, flags: 0, cltv_expiry_delta: 144, htlc_minimum_msat: 1000000, htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, excess_data: Vec::new() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(res) => assert!(res), _ => panic!() }; } // Non-permanent closing just disables a channel { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert!(channel_info.one_to_two.is_some()); } } } let channel_close_msg = HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent: false }; net_graph_msg_handler.handle_htlc_fail_channel_update(&channel_close_msg); // Non-permanent closing just disables a channel { let network = net_graph_msg_handler.network_graph.read().unwrap(); match network.get_channels().get(&short_channel_id) { None => panic!(), Some(channel_info) => { assert!(!channel_info.one_to_two.as_ref().unwrap().enabled); } } } let channel_close_msg = HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent: true }; net_graph_msg_handler.handle_htlc_fail_channel_update(&channel_close_msg); // Permanent closing deletes a channel { let network = net_graph_msg_handler.network_graph.read().unwrap(); assert_eq!(network.get_channels().len(), 0); // Nodes are also deleted because there are no associated channels anymore assert_eq!(network.get_nodes().len(), 0); } // TODO: Test HTLCFailChannelUpdate::NodeFailure, which is not implemented yet. } #[test] fn getting_next_channel_announcements() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 1; let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // Channels were not announced yet. let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(0, 1); assert_eq!(channels_with_announcements.len(), 0); { // Announce a channel we will update let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), chain_hash, short_channel_id, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_channel_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() }; } // Contains initial channel announcement now. let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(short_channel_id, 1); assert_eq!(channels_with_announcements.len(), 1); if let Some(channel_announcements) = channels_with_announcements.first() { let &(_, ref update_1, ref update_2) = channel_announcements; assert_eq!(update_1, &None); assert_eq!(update_2, &None); } else { panic!(); } { // Valid channel update let unsigned_channel_update = UnsignedChannelUpdate { chain_hash, short_channel_id, timestamp: 101, flags: 0, cltv_expiry_delta: 144, htlc_minimum_msat: 1000000, htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, excess_data: Vec::new() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => (), Err(_) => panic!() }; } // Now contains an initial announcement and an update. let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(short_channel_id, 1); assert_eq!(channels_with_announcements.len(), 1); if let Some(channel_announcements) = channels_with_announcements.first() { let &(_, ref update_1, ref update_2) = channel_announcements; assert_ne!(update_1, &None); assert_eq!(update_2, &None); } else { panic!(); } { // Channel update with excess data. let unsigned_channel_update = UnsignedChannelUpdate { chain_hash, short_channel_id, timestamp: 102, flags: 0, cltv_expiry_delta: 144, htlc_minimum_msat: 1000000, htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 10000, fee_proportional_millionths: 20, excess_data: [1; 3].to_vec() }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]); let valid_channel_update = ChannelUpdate { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_channel_update.clone() }; match net_graph_msg_handler.handle_channel_update(&valid_channel_update) { Ok(_) => (), Err(_) => panic!() }; } // Test that announcements with excess data won't be returned let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(short_channel_id, 1); assert_eq!(channels_with_announcements.len(), 1); if let Some(channel_announcements) = channels_with_announcements.first() { let &(_, ref update_1, ref update_2) = channel_announcements; assert_eq!(update_1, &None); assert_eq!(update_2, &None); } else { panic!(); } // Further starting point have no channels after it let channels_with_announcements = net_graph_msg_handler.get_next_channel_announcements(short_channel_id + 1000, 1); assert_eq!(channels_with_announcements.len(), 0); } #[test] fn getting_next_node_announcements() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); let short_channel_id = 1; let chain_hash = genesis_block(Network::Testnet).header.block_hash(); // No nodes yet. let next_announcements = net_graph_msg_handler.get_next_node_announcements(None, 10); assert_eq!(next_announcements.len(), 0); { // Announce a channel to add 2 nodes let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::empty(), chain_hash, short_channel_id, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_channel_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_channel_announcement) { Ok(_) => (), Err(_) => panic!() }; } // Nodes were never announced let next_announcements = net_graph_msg_handler.get_next_node_announcements(None, 3); assert_eq!(next_announcements.len(), 0); { let mut unsigned_announcement = UnsignedNodeAnnouncement { features: NodeFeatures::known(), timestamp: 1000, node_id: node_id_1, rgb: [0; 3], alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() }; unsigned_announcement.node_id = node_id_2; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_2_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() }; } let next_announcements = net_graph_msg_handler.get_next_node_announcements(None, 3); assert_eq!(next_announcements.len(), 2); // Skip the first node. let next_announcements = net_graph_msg_handler.get_next_node_announcements(Some(&node_id_1), 2); assert_eq!(next_announcements.len(), 1); { // Later announcement which should not be relayed (excess data) prevent us from sharing a node let unsigned_announcement = UnsignedNodeAnnouncement { features: NodeFeatures::known(), timestamp: 1010, node_id: node_id_2, rgb: [0; 3], alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: [1; 3].to_vec(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_2_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(res) => assert!(!res), Err(_) => panic!() }; } let next_announcements = net_graph_msg_handler.get_next_node_announcements(Some(&node_id_1), 2); assert_eq!(next_announcements.len(), 0); } #[test] fn network_graph_serialization() { let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap(); let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap(); // Announce a channel to add a corresponding node. let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey); let unsigned_announcement = UnsignedChannelAnnouncement { features: ChannelFeatures::known(), chain_hash: genesis_block(Network::Testnet).header.block_hash(), short_channel_id: 0, node_id_1, node_id_2, bitcoin_key_1: PublicKey::from_secret_key(&secp_ctx, node_1_btckey), bitcoin_key_2: PublicKey::from_secret_key(&secp_ctx, node_2_btckey), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = ChannelAnnouncement { node_signature_1: secp_ctx.sign(&msghash, node_1_privkey), node_signature_2: secp_ctx.sign(&msghash, node_2_privkey), bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey), bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey), contents: unsigned_announcement.clone(), }; match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) { Ok(res) => assert!(res), _ => panic!() }; let node_id = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); let unsigned_announcement = UnsignedNodeAnnouncement { features: NodeFeatures::known(), timestamp: 100, node_id, rgb: [0; 3], alias: [0; 32], addresses: Vec::new(), excess_address_data: Vec::new(), excess_data: Vec::new(), }; let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]); let valid_announcement = NodeAnnouncement { signature: secp_ctx.sign(&msghash, node_1_privkey), contents: unsigned_announcement.clone() }; match net_graph_msg_handler.handle_node_announcement(&valid_announcement) { Ok(_) => (), Err(_) => panic!() }; let network = net_graph_msg_handler.network_graph.write().unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); assert!(!network.get_nodes().is_empty()); assert!(!network.get_channels().is_empty()); network.write(&mut w).unwrap(); assert!(<NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network); } }
38.992651
197
0.720186
0972acccb11678fff24e25d2db8ef4646f6557d0
3,569
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! Basic-timers //! //! Used by: stm32l0x1, stm32l0x2, stm32l0x3 #[cfg(not(feature = "nosync"))] pub use crate::stm32l0::peripherals::tim6::Instance; pub use crate::stm32l0::peripherals::tim6::{RegisterBlock, ResetValues}; pub use crate::stm32l0::peripherals::tim6::{ARR, CNT, CR1, CR2, DIER, EGR, PSC, SR}; /// Access functions for the TIM6 peripheral instance pub mod TIM6 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40001000, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in TIM6 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000000, DIER: 0x00000000, SR: 0x00000000, EGR: 0x00000000, CNT: 0x00000000, PSC: 0x00000000, ARR: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut TIM6_TAKEN: bool = false; /// Safe access to TIM6 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if TIM6_TAKEN { None } else { TIM6_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to TIM6 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if TIM6_TAKEN && inst.addr == INSTANCE.addr { TIM6_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal TIM6 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { TIM6_TAKEN = true; INSTANCE } } /// Raw pointer to TIM6 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const TIM6: *const RegisterBlock = 0x40001000 as *const _;
32.743119
84
0.615859
bffd32a502cd4b00dac52b9a8be86dcad60e7552
933
#![no_std] use contract::{ contract_api::{account, runtime}, unwrap_or_revert::UnwrapOrRevert, }; use types::{ account::{PublicKey, Weight}, ApiError, }; enum Arg { Account = 0, Weight, } #[repr(u16)] enum Error { AddAssociatedKey = 100, } impl Into<ApiError> for Error { fn into(self) -> ApiError { ApiError::User(self as u16) } } #[no_mangle] pub extern "C" fn call() { let account: PublicKey = runtime::get_arg(Arg::Account as u32) .unwrap_or_revert_with(ApiError::MissingArgument) .unwrap_or_revert_with(ApiError::InvalidArgument); let weight_val: u32 = runtime::get_arg(Arg::Weight as u32) .unwrap_or_revert_with(ApiError::MissingArgument) .unwrap_or_revert_with(ApiError::InvalidArgument); let weight = Weight::new(weight_val as u8); account::add_associated_key(account, weight).unwrap_or_revert_with(Error::AddAssociatedKey); }
23.325
96
0.6806
23eb2d439a4a013956c9b8180be70ee875212e66
521
use anyhow::Result; use std::sync::{Arc, Barrier}; use std::thread; use std::{error::Error, time::Duration}; /** * Barrier: 栅栏,线程间同步机制 */ fn main() -> Result<(), Box<dyn Error>> { let barrier = Arc::new(Barrier::new(10)); for _ in 0..10 { let c = Arc::clone(&barrier); thread::spawn(move || { println!("before wait"); c.wait(); println!("after wait"); }); } thread::sleep(Duration::new(5, 0)); println!("in the end ..."); Ok(()) }
20.84
45
0.506718
0388f16d1506c976844b780e7ab1989790b71a28
1,186
// Dummy platform to let it compile and do nothing. Only useful if you don't want a graphical backend. use crate::prelude::BTerm; use crate::Result; use parking_lot::Mutex; pub use winit::event::VirtualKeyCode; use pancurses::Window; mod main_loop; pub use main_loop::main_loop; mod font; pub use font::*; mod init; mod shader; pub use init::init_raw; pub use shader::*; mod color; pub use color::*; mod scancode_helper; pub use scancode_helper::virtual_key_code_to_scan; pub struct InitHints { pub vsync: bool, pub fullscreen: bool, pub frame_sleep_time: Option<f32>, } impl InitHints { pub fn new() -> Self { Self { vsync: true, fullscreen: false, frame_sleep_time: None, } } } pub struct PlatformGL { window: Option<Window>, color_map: Vec<CursesColor>, pub frame_sleep_time: Option<u64>, } lazy_static! { pub static ref BACKEND: Mutex<PlatformGL> = Mutex::new(PlatformGL { window: None, color_map: Vec::new(), frame_sleep_time: None }); } unsafe impl Send for PlatformGL {} unsafe impl Sync for PlatformGL {} pub fn log(s: &str) { println!("{}", s); }
19.766667
102
0.65683
2934d921868a8ec76c96d8d0508c6d934fa3a2b0
23,225
use rustc_errors::{Applicability, DiagnosticBuilder}; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::mir::*; use rustc_middle::ty; use rustc_mir_dataflow::move_paths::{ IllegalMoveOrigin, IllegalMoveOriginKind, LookupResult, MoveError, MovePathIndex, }; use rustc_span::{sym, Span, DUMMY_SP}; use rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions; use crate::diagnostics::{FnSelfUseKind, UseSpans}; use crate::prefixes::PrefixSet; use crate::MirBorrowckCtxt; // Often when desugaring a pattern match we may have many individual moves in // MIR that are all part of one operation from the user's point-of-view. For // example: // // let (x, y) = foo() // // would move x from the 0 field of some temporary, and y from the 1 field. We // group such errors together for cleaner error reporting. // // Errors are kept separate if they are from places with different parent move // paths. For example, this generates two errors: // // let (&x, &y) = (&String::new(), &String::new()); #[derive(Debug)] enum GroupedMoveError<'tcx> { // Place expression can't be moved from, // e.g., match x[0] { s => (), } where x: &[String] MovesFromPlace { original_path: Place<'tcx>, span: Span, move_from: Place<'tcx>, kind: IllegalMoveOriginKind<'tcx>, binds_to: Vec<Local>, }, // Part of a value expression can't be moved from, // e.g., match &String::new() { &x => (), } MovesFromValue { original_path: Place<'tcx>, span: Span, move_from: MovePathIndex, kind: IllegalMoveOriginKind<'tcx>, binds_to: Vec<Local>, }, // Everything that isn't from pattern matching. OtherIllegalMove { original_path: Place<'tcx>, use_spans: UseSpans<'tcx>, kind: IllegalMoveOriginKind<'tcx>, }, } impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { pub(crate) fn report_move_errors(&mut self, move_errors: Vec<(Place<'tcx>, MoveError<'tcx>)>) { let grouped_errors = self.group_move_errors(move_errors); for error in grouped_errors { self.report(error); } } fn group_move_errors( &self, errors: Vec<(Place<'tcx>, MoveError<'tcx>)>, ) -> Vec<GroupedMoveError<'tcx>> { let mut grouped_errors = Vec::new(); for (original_path, error) in errors { self.append_to_grouped_errors(&mut grouped_errors, original_path, error); } grouped_errors } fn append_to_grouped_errors( &self, grouped_errors: &mut Vec<GroupedMoveError<'tcx>>, original_path: Place<'tcx>, error: MoveError<'tcx>, ) { match error { MoveError::UnionMove { .. } => { unimplemented!("don't know how to report union move errors yet.") } MoveError::IllegalMove { cannot_move_out_of: IllegalMoveOrigin { location, kind } } => { // Note: that the only time we assign a place isn't a temporary // to a user variable is when initializing it. // If that ever stops being the case, then the ever initialized // flow could be used. if let Some(StatementKind::Assign(box ( place, Rvalue::Use(Operand::Move(move_from)), ))) = self.body.basic_blocks()[location.block] .statements .get(location.statement_index) .map(|stmt| &stmt.kind) { if let Some(local) = place.as_local() { let local_decl = &self.body.local_decls[local]; // opt_match_place is the // match_span is the span of the expression being matched on // match *x.y { ... } match_place is Some(*x.y) // ^^^^ match_span is the span of *x.y // // opt_match_place is None for let [mut] x = ... statements, // whether or not the right-hand side is a place expression if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var( VarBindingForm { opt_match_place: Some((opt_match_place, match_span)), binding_mode: _, opt_ty_info: _, pat_span: _, }, )))) = local_decl.local_info { let stmt_source_info = self.body.source_info(location); self.append_binding_error( grouped_errors, kind, original_path, *move_from, local, opt_match_place, match_span, stmt_source_info.span, ); return; } } } let move_spans = self.move_spans(original_path.as_ref(), location); grouped_errors.push(GroupedMoveError::OtherIllegalMove { use_spans: move_spans, original_path, kind, }); } } } fn append_binding_error( &self, grouped_errors: &mut Vec<GroupedMoveError<'tcx>>, kind: IllegalMoveOriginKind<'tcx>, original_path: Place<'tcx>, move_from: Place<'tcx>, bind_to: Local, match_place: Option<Place<'tcx>>, match_span: Span, statement_span: Span, ) { debug!("append_binding_error(match_place={:?}, match_span={:?})", match_place, match_span); let from_simple_let = match_place.is_none(); let match_place = match_place.unwrap_or(move_from); match self.move_data.rev_lookup.find(match_place.as_ref()) { // Error with the match place LookupResult::Parent(_) => { for ge in &mut *grouped_errors { if let GroupedMoveError::MovesFromPlace { span, binds_to, .. } = ge { if match_span == *span { debug!("appending local({:?}) to list", bind_to); if !binds_to.is_empty() { binds_to.push(bind_to); } return; } } } debug!("found a new move error location"); // Don't need to point to x in let x = ... . let (binds_to, span) = if from_simple_let { (vec![], statement_span) } else { (vec![bind_to], match_span) }; grouped_errors.push(GroupedMoveError::MovesFromPlace { span, move_from, original_path, kind, binds_to, }); } // Error with the pattern LookupResult::Exact(_) => { let mpi = match self.move_data.rev_lookup.find(move_from.as_ref()) { LookupResult::Parent(Some(mpi)) => mpi, // move_from should be a projection from match_place. _ => unreachable!("Probably not unreachable..."), }; for ge in &mut *grouped_errors { if let GroupedMoveError::MovesFromValue { span, move_from: other_mpi, binds_to, .. } = ge { if match_span == *span && mpi == *other_mpi { debug!("appending local({:?}) to list", bind_to); binds_to.push(bind_to); return; } } } debug!("found a new move error location"); grouped_errors.push(GroupedMoveError::MovesFromValue { span: match_span, move_from: mpi, original_path, kind, binds_to: vec![bind_to], }); } }; } fn report(&mut self, error: GroupedMoveError<'tcx>) { let (mut err, err_span) = { let (span, use_spans, original_path, kind): ( Span, Option<UseSpans<'tcx>>, Place<'tcx>, &IllegalMoveOriginKind<'_>, ) = match error { GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. } | GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => { (span, None, original_path, kind) } GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => { (use_spans.args_or_use(), Some(use_spans), original_path, kind) } }; debug!( "report: original_path={:?} span={:?}, kind={:?} \ original_path.is_upvar_field_projection={:?}", original_path, span, kind, self.is_upvar_field_projection(original_path.as_ref()) ); ( match kind { IllegalMoveOriginKind::BorrowedContent { target_place } => self .report_cannot_move_from_borrowed_content( original_path, *target_place, span, use_spans, ), IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => { self.cannot_move_out_of_interior_of_drop(span, ty) } IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => { self.cannot_move_out_of_interior_noncopy(span, ty, Some(*is_index)) } }, span, ) }; self.add_move_hints(error, &mut err, err_span); self.buffer_error(err); } fn report_cannot_move_from_static( &mut self, place: Place<'tcx>, span: Span, ) -> DiagnosticBuilder<'a> { let description = if place.projection.len() == 1 { format!("static item {}", self.describe_any_place(place.as_ref())) } else { let base_static = PlaceRef { local: place.local, projection: &[ProjectionElem::Deref] }; format!( "{} as {} is a static item", self.describe_any_place(place.as_ref()), self.describe_any_place(base_static), ) }; self.cannot_move_out_of(span, &description) } fn report_cannot_move_from_borrowed_content( &mut self, move_place: Place<'tcx>, deref_target_place: Place<'tcx>, span: Span, use_spans: Option<UseSpans<'tcx>>, ) -> DiagnosticBuilder<'a> { // Inspect the type of the content behind the // borrow to provide feedback about why this // was a move rather than a copy. let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty; let upvar_field = self .prefixes(move_place.as_ref(), PrefixSet::All) .find_map(|p| self.is_upvar_field_projection(p)); let deref_base = match deref_target_place.projection.as_ref() { [proj_base @ .., ProjectionElem::Deref] => { PlaceRef { local: deref_target_place.local, projection: &proj_base } } _ => bug!("deref_target_place is not a deref projection"), }; if let PlaceRef { local, projection: [] } = deref_base { let decl = &self.body.local_decls[local]; if decl.is_ref_for_guard() { let mut err = self.cannot_move_out_of( span, &format!("`{}` in pattern guard", self.local_names[local].unwrap()), ); err.note( "variables bound in patterns cannot be moved from \ until after the end of the pattern guard", ); return err; } else if decl.is_ref_to_static() { return self.report_cannot_move_from_static(move_place, span); } } debug!("report: ty={:?}", ty); let mut err = match ty.kind() { ty::Array(..) | ty::Slice(..) => { self.cannot_move_out_of_interior_noncopy(span, ty, None) } ty::Closure(def_id, closure_substs) if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() => { let closure_kind_ty = closure_substs.as_closure().kind_ty(); let closure_kind = match closure_kind_ty.to_opt_closure_kind() { Some(kind @ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut)) => kind, Some(ty::ClosureKind::FnOnce) => { bug!("closure kind does not match first argument type") } None => bug!("closure kind not inferred by borrowck"), }; let capture_description = format!("captured variable in an `{}` closure", closure_kind); let upvar = &self.upvars[upvar_field.unwrap().index()]; let upvar_hir_id = upvar.place.get_root_variable(); let upvar_name = upvar.place.to_string(self.infcx.tcx); let upvar_span = self.infcx.tcx.hir().span(upvar_hir_id); let place_name = self.describe_any_place(move_place.as_ref()); let place_description = if self.is_upvar_field_projection(move_place.as_ref()).is_some() { format!("{}, a {}", place_name, capture_description) } else { format!("{}, as `{}` is a {}", place_name, upvar_name, capture_description) }; debug!( "report: closure_kind_ty={:?} closure_kind={:?} place_description={:?}", closure_kind_ty, closure_kind, place_description, ); let mut diag = self.cannot_move_out_of(span, &place_description); diag.span_label(upvar_span, "captured outer variable"); diag.span_label( self.body.span, format!("captured by this `{}` closure", closure_kind), ); diag } _ => { let source = self.borrowed_content_source(deref_base); match (self.describe_place(move_place.as_ref()), source.describe_for_named_place()) { (Some(place_desc), Some(source_desc)) => self.cannot_move_out_of( span, &format!("`{}` which is behind a {}", place_desc, source_desc), ), (_, _) => self.cannot_move_out_of( span, &source.describe_for_unnamed_place(self.infcx.tcx), ), } } }; let ty = move_place.ty(self.body, self.infcx.tcx).ty; let def_id = match *ty.kind() { ty::Adt(self_def, _) => self_def.did, ty::Foreign(def_id) | ty::FnDef(def_id, _) | ty::Closure(def_id, _) | ty::Generator(def_id, ..) | ty::Opaque(def_id, _) => def_id, _ => return err, }; let diag_name = self.infcx.tcx.get_diagnostic_name(def_id); if matches!(diag_name, Some(sym::Option | sym::Result)) && use_spans.map_or(true, |v| !v.for_closure()) { err.span_suggestion_verbose( span.shrink_to_hi(), &format!("consider borrowing the `{}`'s content", diag_name.unwrap()), ".as_ref()".to_string(), Applicability::MaybeIncorrect, ); } else if let Some(UseSpans::FnSelfUse { kind: FnSelfUseKind::Normal { implicit_into_iter: true, .. }, .. }) = use_spans { let suggest = match self.infcx.tcx.get_diagnostic_item(sym::IntoIterator) { Some(def_id) => self.infcx.tcx.infer_ctxt().enter(|infcx| { type_known_to_meet_bound_modulo_regions( &infcx, self.param_env, infcx .tcx .mk_imm_ref(infcx.tcx.lifetimes.re_erased, infcx.tcx.erase_regions(ty)), def_id, DUMMY_SP, ) }), _ => false, }; if suggest { err.span_suggestion_verbose( span.shrink_to_lo(), &format!("consider iterating over a slice of the `{}`'s content", ty), "&".to_string(), Applicability::MaybeIncorrect, ); } } err } fn add_move_hints( &self, error: GroupedMoveError<'tcx>, err: &mut DiagnosticBuilder<'a>, span: Span, ) { match error { GroupedMoveError::MovesFromPlace { mut binds_to, move_from, .. } => { if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) { err.span_suggestion( span, "consider borrowing here", format!("&{}", snippet), Applicability::Unspecified, ); } if binds_to.is_empty() { let place_ty = move_from.ty(self.body, self.infcx.tcx).ty; let place_desc = match self.describe_place(move_from.as_ref()) { Some(desc) => format!("`{}`", desc), None => "value".to_string(), }; self.note_type_does_not_implement_copy( err, &place_desc, place_ty, Some(span), "", ); } else { binds_to.sort(); binds_to.dedup(); self.add_move_error_details(err, &binds_to); } } GroupedMoveError::MovesFromValue { mut binds_to, .. } => { binds_to.sort(); binds_to.dedup(); self.add_move_error_suggestions(err, &binds_to); self.add_move_error_details(err, &binds_to); } // No binding. Nothing to suggest. GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => { let span = use_spans.var_or_use(); let place_ty = original_path.ty(self.body, self.infcx.tcx).ty; let place_desc = match self.describe_place(original_path.as_ref()) { Some(desc) => format!("`{}`", desc), None => "value".to_string(), }; self.note_type_does_not_implement_copy(err, &place_desc, place_ty, Some(span), ""); use_spans.args_span_label(err, format!("move out of {} occurs here", place_desc)); use_spans.var_span_label( err, format!("move occurs due to use{}", use_spans.describe()), "moved", ); } } } fn add_move_error_suggestions(&self, err: &mut DiagnosticBuilder<'a>, binds_to: &[Local]) { let mut suggestions: Vec<(Span, &str, String)> = Vec::new(); for local in binds_to { let bind_to = &self.body.local_decls[*local]; if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var( VarBindingForm { pat_span, .. }, )))) = bind_to.local_info { if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span) { if let Some(stripped) = pat_snippet.strip_prefix('&') { let pat_snippet = stripped.trim_start(); let (suggestion, to_remove) = if pat_snippet.starts_with("mut") && pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace) { (pat_snippet["mut".len()..].trim_start(), "&mut") } else { (pat_snippet, "&") }; suggestions.push((pat_span, to_remove, suggestion.to_owned())); } } } } suggestions.sort_unstable_by_key(|&(span, _, _)| span); suggestions.dedup_by_key(|&mut (span, _, _)| span); for (span, to_remove, suggestion) in suggestions { err.span_suggestion( span, &format!("consider removing the `{}`", to_remove), suggestion, Applicability::MachineApplicable, ); } } fn add_move_error_details(&self, err: &mut DiagnosticBuilder<'a>, binds_to: &[Local]) { for (j, local) in binds_to.iter().enumerate() { let bind_to = &self.body.local_decls[*local]; let binding_span = bind_to.source_info.span; if j == 0 { err.span_label(binding_span, "data moved here"); } else { err.span_label(binding_span, "...and here"); } if binds_to.len() == 1 { self.note_type_does_not_implement_copy( err, &format!("`{}`", self.local_names[*local].unwrap()), bind_to.ty, Some(binding_span), "", ); } } if binds_to.len() > 1 { err.note( "move occurs because these variables have types that \ don't implement the `Copy` trait", ); } } }
40.532286
100
0.481249
e9f6235ca1d05d959e373816a83afe5a9d6b6a73
4,013
//! Systems and components specific to player entities. use crate::broadcasters::movement::LastKnownPositions; use crate::chunk_logic::ChunkHolder; use crate::entity; use crate::entity::{CreationPacketCreator, EntityId, Name, SpawnPacketCreator}; use crate::io::NewClientInfo; use crate::join::Joined; use crate::network::Network; use crate::p_inventory::EntityInventory; use crate::state::State; use crate::util::degrees_to_stops; use feather_core::network::packet::implementation::{PlayerInfo, PlayerInfoAction, SpawnPlayer}; use feather_core::{ClientboundAnimation, Gamemode, Packet, Position}; use legion::entity::Entity; use mojang_api::ProfileProperty; use tonks::{EntityAccessor, PreparedWorld}; use uuid::Uuid; pub mod chat; pub const PLAYER_EYE_HEIGHT: f64 = 1.62; /// Profile properties of a player. #[derive(Debug, Clone)] pub struct ProfileProperties(pub Vec<ProfileProperty>); /// Zero-sized component used to mark players. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Player; /// Event triggered when a player joins. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PlayerJoinEvent { pub player: Entity, } /// Event triggered when a player causes an animation. #[derive(Debug, Clone)] pub struct PlayerAnimationEvent { pub player: Entity, pub animation: ClientboundAnimation, } /// Creates a new player from the given `NewClientInfo`. /// /// This function also triggers the `PlayerJoinEvent` for this player. pub fn create(state: &State, info: NewClientInfo) { entity::base(state, info.position) .with_component(info.uuid) .with_component(Network { sender: info.sender, receiver: info.receiver, }) .with_component(info.ip) .with_component(ProfileProperties(info.profile)) .with_component(Name(info.username)) .with_component(ChunkHolder::default()) .with_component(Joined(false)) .with_component(LastKnownPositions::default()) .with_component(SpawnPacketCreator(&create_spawn_packet)) .with_component(CreationPacketCreator(&create_initialization_packet)) .with_component(Gamemode::Creative) // TOOD: proper gamemode handling .with_component(EntityInventory::default()) .with_component(Player) .with_exec(|_, scheduler, player| { scheduler.trigger(PlayerJoinEvent { player }); }) .build(); } /// Function to create a `SpawnPlayer` packet to spawn the player. fn create_spawn_packet(accessor: &EntityAccessor, world: &PreparedWorld) -> Box<dyn Packet> { let entity_id = accessor.get_component::<EntityId>(world).unwrap().0; let player_uuid = *accessor.get_component::<Uuid>(world).unwrap(); let pos = *accessor.get_component::<Position>(world).unwrap(); // TODO: metadata let packet = SpawnPlayer { entity_id, player_uuid, x: pos.x, y: pos.y, z: pos.z, yaw: degrees_to_stops(pos.yaw), pitch: degrees_to_stops(pos.pitch), metadata: Default::default(), }; Box::new(packet) } /// Function to create a `PlayerInfo` packet to broadcast when the player joins. fn create_initialization_packet( accessor: &EntityAccessor, world: &PreparedWorld, ) -> Box<dyn Packet> { let name = accessor.get_component::<Name>(world).unwrap(); let props = accessor.get_component::<ProfileProperties>(world).unwrap(); let uuid = *accessor.get_component::<Uuid>(world).unwrap(); let props = props .0 .iter() .map(|prop| { ( prop.name.clone(), prop.value.clone(), prop.signature.clone(), ) }) .collect::<Vec<_>>(); let display_name = json!({ "text": name.0 }) .to_string(); let action = PlayerInfoAction::AddPlayer(name.0.clone(), props, Gamemode::Creative, 50, display_name); let packet = PlayerInfo { action, uuid }; Box::new(packet) }
32.104
97
0.67082
f9973668639c2f64aa964dd6727c9f47a414c0a7
3,056
use crate::{ AppliedMigration, CommitTransaction, DefaultQueries, Error, ExecuteMultiple, Migrate, MigrateGrouped, Query, Transaction, WrapMigrationError, }; use chrono::{DateTime, Local}; use postgres::{ transaction::Transaction as PgTransaction, Connection as PgConnection, Error as PgError, }; fn query_applied_migrations( transaction: &PgTransaction, query: &str, ) -> Result<Vec<AppliedMigration>, PgError> { let rows = transaction.query(query, &[])?; let mut applied = Vec::new(); for row in rows.into_iter() { let version: i32 = row.get(0); let applied_on: String = row.get(2); let applied_on = DateTime::parse_from_rfc3339(&applied_on) .unwrap() .with_timezone(&Local); applied.push(AppliedMigration { version: version as usize, name: row.get(1), applied_on, checksum: row.get(3), }); } Ok(applied) } impl<'a> Transaction for PgTransaction<'a> { type Error = PgError; fn execute(&mut self, query: &str) -> Result<usize, Self::Error> { let count = PgTransaction::execute(self, query, &[])?; Ok(count as usize) } } impl<'a> CommitTransaction for PgTransaction<'a> { fn commit(self) -> Result<(), Self::Error> { PgTransaction::commit(self) } } impl<'a> Query<Vec<AppliedMigration>> for PgTransaction<'a> { fn query(&mut self, query: &str) -> Result<Option<Vec<AppliedMigration>>, Self::Error> { let applied = query_applied_migrations(self, query)?; Ok(Some(applied)) } } impl<'a> DefaultQueries for PgTransaction<'a> {} impl<'a> MigrateGrouped<'a> for PgConnection { type Transaction = PgTransaction<'a>; fn transaction(&'a mut self) -> Result<PgTransaction<'a>, Error> { PgConnection::transaction(self).migration_err("error starting transaction") } } impl Transaction for PgConnection { type Error = PgError; fn execute(&mut self, query: &str) -> Result<usize, Self::Error> { let transaction = PgConnection::transaction(&self)?; let count = PgTransaction::execute(&transaction, query, &[])?; transaction.commit()?; Ok(count as usize) } } impl ExecuteMultiple for PgConnection { fn execute_multiple(&mut self, queries: &[&str]) -> Result<usize, Self::Error> { let transaction = PgConnection::transaction(&self)?; let mut count = 0; for query in queries.iter() { count += PgTransaction::execute(&transaction, query, &[])?; } transaction.commit()?; Ok(count as usize) } } impl Query<Vec<AppliedMigration>> for PgConnection { fn query(&mut self, query: &str) -> Result<Option<Vec<AppliedMigration>>, Self::Error> { let transaction = PgConnection::transaction(self)?; let applied = query_applied_migrations(&transaction, query)?; transaction.commit()?; Ok(Some(applied)) } } impl DefaultQueries for PgConnection {} impl Migrate for PgConnection {}
30.56
92
0.635144
1df6ce8ec1caecbe443497b21a9b1d11fb4bc799
5,882
use futures::{future, Future}; use rain_core::{errors::*, types::*}; use std::path::Path; use std::sync::Arc; use super::TaskResult; use governor::data::{Data, DataBuilder}; use governor::graph::TaskRef; use governor::state::State; /// Task that merge all input blobs and merge them into one blob pub fn task_concat(state: &mut State, task_ref: TaskRef) -> TaskResult { let inputs = { let task = task_ref.get(); task.inputs_data() }; for (i, input) in inputs.iter().enumerate() { if !input.is_blob() { bail!("Input {} object is not blob", i); } } let state_ref = state.self_ref(); Ok(Box::new(future::lazy(move || { let result_size: usize = inputs.iter().map(|d| d.size()).sum(); let state = state_ref.get(); let work_dir = state.work_dir(); let mut builder = DataBuilder::new(work_dir, DataType::Blob, Some(result_size)); for input in inputs { builder.write_blob(&input).unwrap(); } let result = builder.build(work_dir); let output = task_ref.get().output(0); output.get_mut().set_data(Arc::new(result))?; Ok(()) }))) } /// Task that returns the input argument after a given number of milliseconds pub fn task_sleep(_state: &mut State, task_ref: TaskRef) -> TaskResult { let sleep_s: f32 = { let task = task_ref.get(); task.check_number_of_args(1)?; task.spec.parse_config()? }; let now = ::std::time::Instant::now(); debug!("Starting sleep task for {}s", sleep_s); let duration = ::std::time::Duration::from_millis((sleep_s * 1_000f32).round() as u64); Ok(Box::new( ::tokio_timer::Delay::new(now + duration) .map_err(|e| e.into()) .and_then(move |()| { { let task = task_ref.get(); let output = task.output(0); output.get_mut().set_data(task.input_data(0))?; } Ok(()) }), )) } #[derive(Deserialize)] struct OpenConfig { path: String, } /// Open external file pub fn task_open(state: &mut State, task_ref: TaskRef) -> TaskResult { { let task = task_ref.get(); task.check_number_of_args(0)?; } let state_ref = state.self_ref(); Ok(Box::new(future::lazy(move || { { let task = task_ref.get(); let config: OpenConfig = task.spec.parse_config()?; let path = Path::new(&config.path); if !path.is_absolute() { bail!("Path {:?} is not absolute", path); } let metadata = &::std::fs::metadata(&path) .map_err(|_| ErrorKind::Msg(format!("Path '{}' not found", config.path)))?; let target_path = state_ref.get().work_dir().new_path_for_dataobject(); let data = Data::new_by_fs_copy( &path, metadata, target_path, state_ref.get().work_dir().data_path(), )?; let output = task_ref.get().output(0); output.get_mut().set_data(Arc::new(data))?; } Ok(()) }))) } #[derive(Deserialize)] struct ExportConfig { path: String, } /// Export internal file to external file system pub fn task_export(_: &mut State, task_ref: TaskRef) -> TaskResult { { let task = task_ref.get(); task.check_number_of_args(1)?; } Ok(Box::new(future::lazy(move || { let task = task_ref.get(); let config: ExportConfig = task.spec.parse_config()?; let path = Path::new(&config.path); if !path.is_absolute() { bail!("Path {:?} is not absolute", path); } let input = task.input_data(0); input.write_to_path(path) }))) } #[derive(Deserialize)] struct MakeDirectoryConfig { paths: Vec<String>, } /// Make directory pub fn task_make_directory(state: &mut State, task_ref: TaskRef) -> TaskResult { let state_ref = state.self_ref(); Ok(Box::new(future::lazy(move || { let state = state_ref.get(); let task = task_ref.get(); let dir = state.work_dir().make_task_temp_dir(task.spec.id)?; let main_dir = dir.path().join("newdir"); ::std::fs::create_dir(&main_dir)?; let config: MakeDirectoryConfig = task.spec.parse_config()?; task.check_number_of_args(config.paths.len())?; for (ref path, ref data) in config.paths.iter().zip(task.inputs_data().iter()) { let p = Path::new(path); if !p.is_relative() { bail!("Path '{}' is not relative", path); } let target_path = main_dir.join(&p); ::std::fs::create_dir_all(&target_path.parent().unwrap())?; data.link_to_path(&target_path)?; } let output = task.output(0); let mut obj = output.get_mut(); obj.set_data_by_fs_move(&main_dir, None, state.work_dir()) }))) } #[derive(Deserialize)] struct SliceDirectoryConfig { path: String, } /// Make directory pub fn task_slice_directory(state: &mut State, task_ref: TaskRef) -> TaskResult { let state_ref = state.self_ref(); Ok(Box::new(future::lazy(move || { let state = state_ref.get(); let task = task_ref.get(); task.check_number_of_args(1)?; let config: SliceDirectoryConfig = task.spec.parse_config()?; let data = task.input_data(0); let dir = state.work_dir().make_task_temp_dir(task.spec.id)?; let main_dir = dir.path().join("newdir"); data.link_to_path(&main_dir).unwrap(); let path = main_dir.join(&config.path); let output = task.output(0); let mut obj = output.get_mut(); obj.set_data_by_fs_move(&path, Some(&config.path), state.work_dir()) }))) }
32.860335
91
0.569874
1e39ccf25e93742f72d54e61e6afdc21f844ed4b
7,359
use common::external::{get_version, Camera, Injection}; use memory_rs::external::process::Process; use std::f32; use std::io::Error; use std::rc::Rc; use std::thread; use std::time::{Duration, Instant}; use winapi::shared::windef::POINT; use winapi::um::winuser; use winapi::um::winuser::{GetAsyncKeyState, GetCursorPos, SetCursorPos}; const INITIAL_POS: i32 = 500; extern "C" { static get_camera_data: u8; static get_camera_data_end: u8; static get_controller_input: u8; static get_controller_input_end: u8; } fn detect_activation_by_controller(value: u64, activation: u64) -> bool { let result = value & activation; result == activation } pub fn main() -> Result<(), Error> { let mut mouse_pos: POINT = POINT::default(); // latest mouse positions let mut latest_x = 0; let mut latest_y = 0; println!("Yakuza 0 Freecam v{} by @etra0", get_version()); println!( " INSTRUCTIONS: PAUSE/L2 + X - Activate/Deactivate Free Camera END/L2 + Square - Pause the cinematic DEL - Deattach Mouse W, A, S, D/Left Stick - Move the camera Mouse/Right Stick - Point the camera CTRL, SPACE/TRIANGLE, X - Move UP or DOWN PG UP, PG DOWN/DPAD UP, DPAD DOWN - Increase/Decrease speed multiplier DPAD LEFT, DPAD RIGHT - Increase/Decrease Right Stick Sensitivity F1, F2/L2, R2 - Increase/Decrease FOV respectively Q, E/L1, R1 - Rotate the camera WARNING: Once you deattach the camera (PAUSE), your mouse will be set in a fixed position, so in order to attach/deattach the mouse to the camera, you can press DEL WARNING: If you're in freeroam and you stop hearing audio, it's probably because you have the paused option activated, simply press END to deactivate it. " ); println!("Waiting for the game to start"); let yakuza = loop { if let Ok(p) = Process::new("Yakuza0.exe") { break Rc::new(p); }; thread::sleep(Duration::from_secs(5)); }; println!("Game hooked"); let entry_point: usize = 0x18FD38; let p_shellcode = unsafe { yakuza.inject_shellcode( entry_point, 5, &get_camera_data as *const u8, &get_camera_data_end as *const u8, ) }; let p_controller = unsafe { yakuza.inject_shellcode( 0xEC1F, 6, &get_controller_input as *const u8, &get_controller_input_end as *const u8, ) }; let mut cam = Camera::new(yakuza.clone(), p_shellcode); // function that changes the focal length of the cinematics, when // active, nop this cam.injections.push(Injection { entry_point: 0x187616, f_orig: vec![0xF3, 0x0F, 0x11, 0x89, 0xAC, 0x00, 0x00, 0x00], f_rep: vec![0x90; 8], }); // WIP: Pause the cinematics of the world. let pause_cinematic_f: Vec<u8> = vec![0x41, 0x8A, 0x8E, 0xC9, 0x00, 0x00, 0x00]; let pause_cinematic_rep: Vec<u8> = vec![0xB1, 0x01, 0x90, 0x90, 0x90, 0x90, 0x90]; let pause_cinematic_offset = 0xB720DE; let mut pause_world = false; let mut active = false; let mut capture_mouse = false; let mut restart_mouse = false; loop { if capture_mouse & restart_mouse { unsafe { SetCursorPos(INITIAL_POS, INITIAL_POS) }; restart_mouse = !restart_mouse; latest_x = INITIAL_POS; latest_y = INITIAL_POS; continue; } let start = Instant::now(); // poll rate thread::sleep(Duration::from_millis(10)); unsafe { GetCursorPos(&mut mouse_pos) }; let duration = start.elapsed().as_millis() as f32; let speed_x = ((mouse_pos.x - latest_x) as f32) / duration; let speed_y = ((mouse_pos.y - latest_y) as f32) / duration; let controller_structure_p: usize = yakuza.read_value(p_controller + 0x200, true); let controller_state = match controller_structure_p { 0 => 0, _ => yakuza.read_value::<u64>(controller_structure_p, true), }; if active && capture_mouse { cam.update_position(speed_x, speed_y); unsafe { cam.handle_keyboard_input() }; } if active && (controller_structure_p != 0) { let [pos_x, pos_y, pitch, yaw] = yakuza.read_value::<[f32; 4]>(controller_structure_p + 0x10, true); // L1 & R1 check match controller_state & 0x30 { 0x20 => cam.update_fov(0.01), 0x10 => cam.update_fov(-0.01), _ => (), }; let speed: i8 = match controller_state & 0x3000 { 0x1000 => 1, 0x2000 => -1, _ => 0, }; let dp_up = match controller_state & 0x9 { 0x8 => 2f32, 0x1 => -2f32, _ => 0f32, }; let dir_speed = match controller_state & 0xC000 { 0x8000 => 1, 0x4000 => -1, _ => 0, }; let rotation: i8 = match controller_state & 0xC0 { 0x40 => 1, 0x80 => -1, 0xC0 => 2, _ => 0, }; cam.update_values(-pos_y, -pos_x, dp_up, speed, dir_speed, rotation); //dp_up, speed, dir_speed, rotation); cam.update_position(pitch, yaw); } latest_x = mouse_pos.x; latest_y = mouse_pos.y; // to scroll infinitely restart_mouse = !restart_mouse; unsafe { if detect_activation_by_controller(controller_state, 0x11) || (GetAsyncKeyState(winuser::VK_PAUSE) as u32 & 0x8000) != 0 { active = !active; if controller_state & 0x11 != 0x11 { capture_mouse = active; } let c_status = if active { "Deattached" } else { "Attached" }; println!("status of camera: {}", c_status); if active { cam.deattach(); } else { cam.attach(); } thread::sleep(Duration::from_millis(500)); } if active & (GetAsyncKeyState(winuser::VK_DELETE) as u32 & 0x8000 != 0) { capture_mouse = !capture_mouse; let c_status = if !capture_mouse { "Deattached" } else { "Attached" }; println!("status of mouse: {}", c_status); thread::sleep(Duration::from_millis(500)); } if detect_activation_by_controller(controller_state, 0x14) || (GetAsyncKeyState(winuser::VK_END) as u32 & 0x8000) != 0 { pause_world = !pause_world; println!("status of pausing: {}", pause_world); if pause_world { yakuza.write_aob(pause_cinematic_offset, &pause_cinematic_rep, false); } else { yakuza.write_aob(pause_cinematic_offset, &pause_cinematic_f, false); } thread::sleep(Duration::from_millis(500)); } } } }
31.448718
119
0.549803
ffca5a079f3316c9395845d8cd2189908fcb80b6
3,654
use rand::seq::SliceRandom; /// Generates a random name from a list of 69 random first and last /// nicknames through a `rng` /// /// Requires the `SliceRandom` trait from `rand` /// /// Name returned as a `String` pub fn generate_random_alias() -> String { let names = [ "Baby Oil", "Bad News", "Big Burps", "Bill 'Beenie-Weenie'", "Bob 'Stinkbug'", "Bowel Noises", "Boxelder", "Bud 'Lite'", "Butterbean", "Buttermilk", "Buttocks", "Chad", "Chesterfield", "Chewy", "Chigger", "Cinnabuns", "Cleet", "Cornbread", "Crab Meat", "Crapps", "Dark Skies", "Dennis Clawhammer", "Dicman", "Elphonso", "Fancypants", "Figgs", "Foncy", "Gootsy", "Greasy Jim", "Huckleberry", "Huggy", "Ignatious", "Jimbo", "Joe 'Pottin Soil'", "Johnny", "Lemongrass", "Lil Debil", "Longbranch", "'Lunch Money'", "Mergatroid", "'Mr Peabody'", "Oil-Can", "Oinks", "Old Scratch", "Ovaltine", "Pennywhistle", "Pitchfork Ben", "Potato Bug", "Pushmeet", "Rock Candy", "Schlomo", "Scratchensniff", "Scut", "Sid 'The Squirts'", "Skidmark", "Slaps", "Snakes", "Snoobs", "Snorki", "Soupcan Sam", "Spitzitout", "Squids", "Stinky", "Storyboard", "Sweet Tea", "TeeTee", "Wheezy Joe", "Winston 'Jazz Hands'", "'Worms'", ]; let last = [ "Appleyard", "Bigmeat", "Bloominshine", "Boogerbottom", "Breedslovetrout", "Butterbaugh", "Clovenhoof", "Clutterbuck", "Cocktoasten", "Endicott", "Fewhairs", "Gooberdapple", "Goodensmith", "Goodpasture", "Guster", "Henderson", "Hooperbag", "Hoosenater", "Hootkins", "Jefferson", "Jenkins", "Jingley-Schmidt", "Johnson", "Kingfish", "Listenbee", "M'Bembo", "McFadden", "Moonshine", "Nettles", "Noseworthy", "Olivetti", "Outerbridge", "Overpeck", "Overturf", "Oxhandler", "Pealike", "Pennywhistle", "Peterson", "Pieplow", "Pinkerton", "Porkins", "Putney", "Quakenbush", "Rainwater", "Rosenthal", "Rubbins", "Sackrider", "Snuggleshine", "Splern", "Stevens", "Stroganoff", "Sugar-Gold", "Swackhamer", "Tippins", "Turnipseed", "Vinaigrette", "Ng'ombe", "Walkingstick", "Wallbanger", "Weewax", "Weiners", "Whipkey", "Wigglesworth", "Wimplesnatch", "Winterkorn", "Woolysocks", ]; let mut rng = rand::thread_rng(); format!( "You are from now on...\n{} {}", names.choose(&mut rng).unwrap(), last.choose(&mut rng).unwrap() ) } pub fn parse_args(message: &str) -> Option<(&str, &str)> { for (i, c) in message.chars().enumerate() { if c == ' ' { return Some((&message[..i], &message[i + 1..])); } } None } pub fn get_r6_stats(_platform: &str, _username: &str) -> String { unimplemented!() }
21.244186
67
0.445539
ed8943df42b4f22b1501e95f8e80037fbfec1c97
19,838
#![doc = "generated by AutoRust"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] #![allow(clippy::redundant_clone)] use super::models; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new( endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, ) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn managed_private_endpoints(&self) -> managed_private_endpoints::Client { managed_private_endpoints::Client(self.clone()) } } pub mod managed_private_endpoints { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn get( &self, managed_virtual_network_name: impl Into<String>, managed_private_endpoint_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), managed_virtual_network_name: managed_virtual_network_name.into(), managed_private_endpoint_name: managed_private_endpoint_name.into(), } } pub fn create( &self, managed_virtual_network_name: impl Into<String>, managed_private_endpoint_name: impl Into<String>, managed_private_endpoint: impl Into<models::ManagedPrivateEndpoint>, ) -> create::Builder { create::Builder { client: self.0.clone(), managed_virtual_network_name: managed_virtual_network_name.into(), managed_private_endpoint_name: managed_private_endpoint_name.into(), managed_private_endpoint: managed_private_endpoint.into(), } } pub fn delete( &self, managed_virtual_network_name: impl Into<String>, managed_private_endpoint_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), managed_virtual_network_name: managed_virtual_network_name.into(), managed_private_endpoint_name: managed_private_endpoint_name.into(), } } pub fn list(&self, managed_virtual_network_name: impl Into<String>) -> list::Builder { list::Builder { client: self.0.clone(), managed_virtual_network_name: managed_virtual_network_name.into(), } } } pub mod get { use super::models; use azure_core::error::ResultExt; type Response = models::ManagedPrivateEndpoint; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) managed_virtual_network_name: String, pub(crate) managed_private_endpoint_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/managedVirtualNetworks/{}/managedPrivateEndpoints/{}", this.client.endpoint(), &this.managed_virtual_network_name, &this.managed_private_endpoint_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::ManagedPrivateEndpoint = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod create { use super::models; use azure_core::error::ResultExt; type Response = models::ManagedPrivateEndpoint; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) managed_virtual_network_name: String, pub(crate) managed_private_endpoint_name: String, pub(crate) managed_private_endpoint: models::ManagedPrivateEndpoint, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/managedVirtualNetworks/{}/managedPrivateEndpoints/{}", this.client.endpoint(), &this.managed_virtual_network_name, &this.managed_private_endpoint_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&this.managed_private_endpoint)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::ManagedPrivateEndpoint = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod delete { use super::models; use azure_core::error::ResultExt; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) managed_virtual_network_name: String, pub(crate) managed_private_endpoint_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> { Box::pin({ let this = self.clone(); async move { let url_str = &format!( "{}/managedVirtualNetworks/{}/managedPrivateEndpoints/{}", this.client.endpoint(), &this.managed_virtual_network_name, &this.managed_private_endpoint_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; let rsp = this .client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }) } } } pub mod list { use super::models; use azure_core::error::ResultExt; type Response = models::ManagedPrivateEndpointListResponse; #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) managed_virtual_network_name: String, } impl Builder { pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> { let make_request = move |continuation: Option<azure_core::prelude::Continuation>| { let this = self.clone(); async move { let url_str = &format!( "{}/managedVirtualNetworks/{}/managedPrivateEndpoints", this.client.endpoint(), &this.managed_virtual_network_name ); let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?; let mut req_builder = http::request::Builder::new(); let rsp = match continuation { Some(token) => { url.set_path(""); url = url .join(&token.into_raw()) .context(azure_core::error::ErrorKind::DataConversion, "parse url")?; let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version"); if !has_api_version_already { url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); } req_builder = req_builder.uri(url.as_str()); req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); let req_body = azure_core::EMPTY_BODY; let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } None => { req_builder = req_builder.method(http::Method::GET); let credential = this.client.token_credential(); let token_response = credential .get_token(&this.client.scopes().join(" ")) .await .context(azure_core::error::ErrorKind::Other, "get bearer token")?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2021-06-01-preview"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(azure_core::error::ErrorKind::Other, "build request")?; this.client .send(req) .await .context(azure_core::error::ErrorKind::Io, "execute request")? } }; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?; let rsp_value: models::ManagedPrivateEndpointListResponse = serde_json::from_slice(&rsp_body)?; Ok(rsp_value) } status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse { status: status_code.as_u16(), error_code: None, })), } } }; azure_core::Pageable::new(make_request) } } } }
50.350254
139
0.486138
2628baefa685318005a92c38aa3adea8d51efce2
131,241
use crate::{ ancestors::Ancestors, contains::Contains, inline_spl_token_v2_0::{self, SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, secondary_index::*, }; use bv::BitVec; use dashmap::DashSet; use log::*; use ouroboros::self_referencing; use panoptes_measure::measure::Measure; use panoptes_sdk::{ clock::{BankId, Slot}, pubkey::{Pubkey, PUBKEY_BYTES}, }; use std::{ collections::{ btree_map::{self, BTreeMap}, HashSet, }, ops::{ Bound, Bound::{Excluded, Included, Unbounded}, Range, RangeBounds, }, sync::{ atomic::{AtomicU64, Ordering}, Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }; use thiserror::Error; pub const ITER_BATCH_SIZE: usize = 1000; pub type ScanResult<T> = Result<T, ScanError>; pub type SlotList<T> = Vec<(Slot, T)>; pub type SlotSlice<'s, T> = &'s [(Slot, T)]; pub type RefCount = u64; pub type AccountMap<K, V> = BTreeMap<K, V>; type AccountMapEntry<T> = Arc<AccountMapEntryInner<T>>; pub trait IsCached { fn is_cached(&self) -> bool; } impl IsCached for bool { fn is_cached(&self) -> bool { false } } impl IsCached for u64 { fn is_cached(&self) -> bool { false } } #[derive(Error, Debug, PartialEq)] pub enum ScanError { #[error("Node detected it replayed bad version of slot {slot:?} with id {bank_id:?}, thus the scan on said slot was aborted")] SlotRemoved { slot: Slot, bank_id: BankId }, } enum ScanTypes<R: RangeBounds<Pubkey>> { Unindexed(Option<R>), Indexed(IndexKey), } #[derive(Debug, Clone, Copy)] pub enum IndexKey { ProgramId(Pubkey), SplTokenMint(Pubkey), SplTokenOwner(Pubkey), } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum AccountIndex { ProgramId, SplTokenMint, SplTokenOwner, } #[derive(Debug, PartialEq, Eq, Clone)] pub struct AccountSecondaryIndexesIncludeExclude { pub exclude: bool, pub keys: HashSet<Pubkey>, } #[derive(Debug, Default, Clone)] pub struct AccountSecondaryIndexes { pub keys: Option<AccountSecondaryIndexesIncludeExclude>, pub indexes: HashSet<AccountIndex>, } impl AccountSecondaryIndexes { pub fn is_empty(&self) -> bool { self.indexes.is_empty() } pub fn contains(&self, index: &AccountIndex) -> bool { self.indexes.contains(index) } pub fn include_key(&self, key: &Pubkey) -> bool { match &self.keys { Some(options) => options.exclude ^ options.keys.contains(key), None => true, // include all keys } } } #[derive(Debug)] pub struct AccountMapEntryInner<T> { ref_count: AtomicU64, pub slot_list: RwLock<SlotList<T>>, } impl<T> AccountMapEntryInner<T> { pub fn ref_count(&self) -> u64 { self.ref_count.load(Ordering::Relaxed) } } pub enum AccountIndexGetResult<'a, T: 'static> { Found(ReadAccountMapEntry<T>, usize), NotFoundOnFork, Missing(AccountMapsReadLock<'a, T>), } #[self_referencing] pub struct ReadAccountMapEntry<T: 'static> { owned_entry: AccountMapEntry<T>, #[borrows(owned_entry)] slot_list_guard: RwLockReadGuard<'this, SlotList<T>>, } impl<T: Clone> ReadAccountMapEntry<T> { pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self { ReadAccountMapEntryBuilder { owned_entry: account_map_entry, slot_list_guard_builder: |lock| lock.slot_list.read().unwrap(), } .build() } pub fn slot_list(&self) -> &SlotList<T> { &*self.borrow_slot_list_guard() } pub fn ref_count(&self) -> &AtomicU64 { &self.borrow_owned_entry_contents().ref_count } pub fn unref(&self) { self.ref_count().fetch_sub(1, Ordering::Relaxed); } pub fn addref(&self) { self.ref_count().fetch_add(1, Ordering::Relaxed); } } #[self_referencing] pub struct WriteAccountMapEntry<T: 'static> { owned_entry: AccountMapEntry<T>, #[borrows(owned_entry)] slot_list_guard: RwLockWriteGuard<'this, SlotList<T>>, } impl<T: 'static + Clone + IsCached> WriteAccountMapEntry<T> { pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self { WriteAccountMapEntryBuilder { owned_entry: account_map_entry, slot_list_guard_builder: |lock| lock.slot_list.write().unwrap(), } .build() } pub fn slot_list(&mut self) -> &SlotList<T> { &*self.borrow_slot_list_guard() } pub fn slot_list_mut<RT>( &mut self, user: impl for<'this> FnOnce(&mut RwLockWriteGuard<'this, SlotList<T>>) -> RT, ) -> RT { self.with_slot_list_guard_mut(user) } pub fn ref_count(&self) -> &AtomicU64 { &self.borrow_owned_entry_contents().ref_count } // create an entry that is equivalent to this process: // 1. new empty (refcount=0, slot_list={}) // 2. update(slot, account_info) // This code is called when the first entry [ie. (slot,account_info)] for a pubkey is inserted into the index. pub fn new_entry_after_update(slot: Slot, account_info: &T) -> AccountMapEntry<T> { let ref_count = if account_info.is_cached() { 0 } else { 1 }; Arc::new(AccountMapEntryInner { ref_count: AtomicU64::new(ref_count), slot_list: RwLock::new(vec![(slot, account_info.clone())]), }) } // Try to update an item in the slot list the given `slot` If an item for the slot // already exists in the list, remove the older item, add it to `reclaims`, and insert // the new item. pub fn update(&mut self, slot: Slot, account_info: T, reclaims: &mut SlotList<T>) { let mut addref = !account_info.is_cached(); self.slot_list_mut(|list| { // find other dirty entries from the same slot for list_index in 0..list.len() { let (s, previous_update_value) = &list[list_index]; if *s == slot { addref = addref && previous_update_value.is_cached(); let mut new_item = (slot, account_info); std::mem::swap(&mut new_item, &mut list[list_index]); reclaims.push(new_item); list[(list_index + 1)..] .iter() .for_each(|item| assert!(item.0 != slot)); return; // this returns from self.slot_list_mut above } } // if we make it here, we did not find the slot in the list list.push((slot, account_info)); }); if addref { // If it's the first non-cache insert, also bump the stored ref count self.ref_count().fetch_add(1, Ordering::Relaxed); } } } #[derive(Debug, Default, AbiExample, Clone)] pub struct RollingBitField { max_width: u64, min: u64, max: u64, // exclusive bits: BitVec, count: usize, // These are items that are true and lower than min. // They would cause us to exceed max_width if we stored them in our bit field. // We only expect these items in conditions where there is some other bug in the system // or in testing when large ranges are created. excess: HashSet<u64>, } impl PartialEq<RollingBitField> for RollingBitField { fn eq(&self, other: &Self) -> bool { // 2 instances could have different internal data for the same values, // so we have to compare data. self.len() == other.len() && { for item in self.get_all() { if !other.contains(&item) { return false; } } true } } } // functionally similar to a hashset // Relies on there being a sliding window of key values. The key values continue to increase. // Old key values are removed from the lesser values and do not accumulate. impl RollingBitField { pub fn new(max_width: u64) -> Self { assert!(max_width > 0); assert!(max_width.is_power_of_two()); // power of 2 to make dividing a shift let bits = BitVec::new_fill(false, max_width); Self { max_width, bits, count: 0, min: 0, max: 0, excess: HashSet::new(), } } // find the array index fn get_address(&self, key: &u64) -> u64 { key % self.max_width } pub fn range_width(&self) -> u64 { // note that max isn't updated on remove, so it can be above the current max self.max - self.min } pub fn min(&self) -> Option<u64> { if self.is_empty() { None } else if self.excess.is_empty() { Some(self.min) } else { let mut min = if self.all_items_in_excess() { u64::MAX } else { self.min }; for item in &self.excess { min = std::cmp::min(min, *item); } Some(min) } } pub fn insert(&mut self, key: u64) { let mut bits_empty = self.count == 0 || self.all_items_in_excess(); let update_bits = if bits_empty { true // nothing in bits, so in range } else if key < self.min { // bits not empty and this insert is before min, so add to excess if self.excess.insert(key) { self.count += 1; } false } else if key < self.max { true // fits current bit field range } else { // key is >= max let new_max = key + 1; loop { let new_width = new_max.saturating_sub(self.min); if new_width <= self.max_width { // this key will fit the max range break; } // move the min item from bits to excess and then purge from min to make room for this new max let inserted = self.excess.insert(self.min); assert!(inserted); let key = self.min; let address = self.get_address(&key); self.bits.set(address, false); self.purge(&key); if self.all_items_in_excess() { // if we moved the last existing item to excess, then we are ready to insert the new item in the bits bits_empty = true; break; } } true // moved things to excess if necessary, so update bits with the new entry }; if update_bits { let address = self.get_address(&key); let value = self.bits.get(address); if !value { self.bits.set(address, true); if bits_empty { self.min = key; self.max = key + 1; } else { self.min = std::cmp::min(self.min, key); self.max = std::cmp::max(self.max, key + 1); assert!( self.min + self.max_width >= self.max, "min: {}, max: {}, max_width: {}", self.min, self.max, self.max_width ); } self.count += 1; } } } pub fn remove(&mut self, key: &u64) -> bool { if key >= &self.min { // if asked to remove something bigger than max, then no-op if key < &self.max { let address = self.get_address(key); let get = self.bits.get(address); if get { self.count -= 1; self.bits.set(address, false); self.purge(key); } get } else { false } } else { // asked to remove something < min. would be in excess if it exists let remove = self.excess.remove(key); if remove { self.count -= 1; } remove } } fn all_items_in_excess(&self) -> bool { self.excess.len() == self.count } // after removing 'key' where 'key' = min, make min the correct new min value fn purge(&mut self, key: &u64) { if self.count > 0 && !self.all_items_in_excess() { if key == &self.min { let start = self.min + 1; // min just got removed for key in start..self.max { if self.contains_assume_in_range(&key) { self.min = key; break; } } } } else { // The idea is that there are no items in the bitfield anymore. // But, there MAY be items in excess. The model works such that items < min go into excess. // So, after purging all items from bitfield, we hold max to be what it previously was, but set min to max. // Thus, if we lookup >= max, answer is always false without having to look in excess. // If we changed max here to 0, we would lose the ability to know the range of items in excess (if any). // So, now, with min updated = max: // If we lookup < max, then we first check min. // If >= min, then we look in bitfield. // Otherwise, we look in excess since the request is < min. // So, resetting min like this after a remove results in the correct behavior for the model. // Later, if we insert and there are 0 items total (excess + bitfield), then we reset min/max to reflect the new item only. self.min = self.max; } } fn contains_assume_in_range(&self, key: &u64) -> bool { // the result may be aliased. Caller is responsible for determining key is in range. let address = self.get_address(key); self.bits.get(address) } // This is the 99% use case. // This needs be fast for the most common case of asking for key >= min. pub fn contains(&self, key: &u64) -> bool { if key < &self.max { if key >= &self.min { // in the bitfield range self.contains_assume_in_range(key) } else { self.excess.contains(key) } } else { false } } pub fn len(&self) -> usize { self.count } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn clear(&mut self) { let mut n = Self::new(self.max_width); std::mem::swap(&mut n, self); } pub fn max(&self) -> u64 { self.max } pub fn get_all(&self) -> Vec<u64> { let mut all = Vec::with_capacity(self.count); self.excess.iter().for_each(|slot| all.push(*slot)); for key in self.min..self.max { if self.contains_assume_in_range(&key) { all.push(key); } } all } } #[derive(Debug)] pub struct RootsTracker { roots: RollingBitField, max_root: Slot, uncleaned_roots: HashSet<Slot>, previous_uncleaned_roots: HashSet<Slot>, } impl Default for RootsTracker { fn default() -> Self { // we expect to keep a rolling set of 400k slots around at a time // 4M gives us plenty of extra(?!) room to handle a width 10x what we should need. // cost is 4M bits of memory, which is .5MB RootsTracker::new(4194304) } } impl RootsTracker { pub fn new(max_width: u64) -> Self { Self { roots: RollingBitField::new(max_width), max_root: 0, uncleaned_roots: HashSet::new(), previous_uncleaned_roots: HashSet::new(), } } pub fn min_root(&self) -> Option<Slot> { self.roots.min() } } #[derive(Debug, Default)] pub struct AccountsIndexRootsStats { pub roots_len: usize, pub uncleaned_roots_len: usize, pub previous_uncleaned_roots_len: usize, pub roots_range: u64, pub rooted_cleaned_count: usize, pub unrooted_cleaned_count: usize, } pub struct AccountsIndexIterator<'a, T> { account_maps: &'a LockMapType<T>, start_bound: Bound<Pubkey>, end_bound: Bound<Pubkey>, is_finished: bool, } impl<'a, T> AccountsIndexIterator<'a, T> { fn clone_bound(bound: Bound<&Pubkey>) -> Bound<Pubkey> { match bound { Unbounded => Unbounded, Included(k) => Included(*k), Excluded(k) => Excluded(*k), } } pub fn new<R>(account_maps: &'a LockMapType<T>, range: Option<R>) -> Self where R: RangeBounds<Pubkey>, { Self { start_bound: range .as_ref() .map(|r| Self::clone_bound(r.start_bound())) .unwrap_or(Unbounded), end_bound: range .as_ref() .map(|r| Self::clone_bound(r.end_bound())) .unwrap_or(Unbounded), account_maps, is_finished: false, } } } impl<'a, T: 'static + Clone> Iterator for AccountsIndexIterator<'a, T> { type Item = Vec<(Pubkey, AccountMapEntry<T>)>; fn next(&mut self) -> Option<Self::Item> { if self.is_finished { return None; } let chunk: Vec<(Pubkey, AccountMapEntry<T>)> = self .account_maps .read() .unwrap() .range((self.start_bound, self.end_bound)) .map(|(pubkey, account_map_entry)| (*pubkey, account_map_entry.clone())) .take(ITER_BATCH_SIZE) .collect(); if chunk.is_empty() { self.is_finished = true; return None; } self.start_bound = Excluded(chunk.last().unwrap().0); Some(chunk) } } pub trait ZeroLamport { fn is_zero_lamport(&self) -> bool; } type MapType<T> = AccountMap<Pubkey, AccountMapEntry<T>>; type LockMapType<T> = RwLock<MapType<T>>; type AccountMapsWriteLock<'a, T> = RwLockWriteGuard<'a, MapType<T>>; type AccountMapsReadLock<'a, T> = RwLockReadGuard<'a, MapType<T>>; #[derive(Debug, Default)] pub struct ScanSlotTracker { is_removed: bool, ref_count: u64, } impl ScanSlotTracker { pub fn is_removed(&self) -> bool { self.is_removed } pub fn mark_removed(&mut self) { self.is_removed = true; } } #[derive(Debug)] pub struct AccountsIndex<T> { pub account_maps: LockMapType<T>, program_id_index: SecondaryIndex<DashMapSecondaryIndexEntry>, spl_token_mint_index: SecondaryIndex<DashMapSecondaryIndexEntry>, spl_token_owner_index: SecondaryIndex<RwLockSecondaryIndexEntry>, roots_tracker: RwLock<RootsTracker>, ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>, // Each scan has some latest slot `S` that is the tip of the fork the scan // is iterating over. The unique id of that slot `S` is recorded here (note we don't use // `S` as the id because there can be more than one version of a slot `S`). If a fork // is abandoned, all of the slots on that fork up to `S` will be removed via // `AccountsDb::remove_unrooted_slots()`. When the scan finishes, it'll realize that the // results of the scan may have been corrupted by `remove_unrooted_slots` and abort its results. // // `removed_bank_ids` tracks all the slot ids that were removed via `remove_unrooted_slots()` so any attempted scans // on any of these slots fails. This is safe to purge once the associated Bank is dropped and // scanning the fork with that Bank at the tip is no longer possible. pub removed_bank_ids: Mutex<HashSet<BankId>>, zero_lamport_pubkeys: DashSet<Pubkey>, } impl<T> Default for AccountsIndex<T> { fn default() -> Self { Self { account_maps: LockMapType::<T>::default(), program_id_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new( "program_id_index_stats", ), spl_token_mint_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new( "spl_token_mint_index_stats", ), spl_token_owner_index: SecondaryIndex::<RwLockSecondaryIndexEntry>::new( "spl_token_owner_index_stats", ), roots_tracker: RwLock::<RootsTracker>::default(), ongoing_scan_roots: RwLock::<BTreeMap<Slot, u64>>::default(), removed_bank_ids: Mutex::<HashSet<BankId>>::default(), zero_lamport_pubkeys: DashSet::<Pubkey>::default(), } } } impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> { fn iter<R>(&self, range: Option<R>) -> AccountsIndexIterator<T> where R: RangeBounds<Pubkey>, { AccountsIndexIterator::new(&self.account_maps, range) } fn do_checked_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, scan_bank_id: BankId, func: F, scan_type: ScanTypes<R>, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { { let locked_removed_bank_ids = self.removed_bank_ids.lock().unwrap(); if locked_removed_bank_ids.contains(&scan_bank_id) { return Err(ScanError::SlotRemoved { slot: ancestors.max_slot(), bank_id: scan_bank_id, }); } } let max_root = { let mut w_ongoing_scan_roots = self // This lock is also grabbed by clean_accounts(), so clean // has at most cleaned up to the current `max_root` (since // clean only happens *after* BankForks::set_root() which sets // the `max_root`) .ongoing_scan_roots .write() .unwrap(); // `max_root()` grabs a lock while // the `ongoing_scan_roots` lock is held, // make sure inverse doesn't happen to avoid // deadlock let max_root = self.max_root(); *w_ongoing_scan_roots.entry(max_root).or_default() += 1; max_root }; // First we show that for any bank `B` that is a descendant of // the current `max_root`, it must be true that and `B.ancestors.contains(max_root)`, // regardless of the pattern of `squash()` behavior, where `ancestors` is the set // of ancestors that is tracked in each bank. // // Proof: At startup, if starting from a snapshot, generate_index() adds all banks // in the snapshot to the index via `add_root()` and so `max_root` will be the // greatest of these. Thus, so the claim holds at startup since there are no // descendants of `max_root`. // // Now we proceed by induction on each `BankForks::set_root()`. // Assume the claim holds when the `max_root` is `R`. Call the set of // descendants of `R` present in BankForks `R_descendants`. // // Then for any banks `B` in `R_descendants`, it must be that `B.ancestors.contains(S)`, // where `S` is any ancestor of `B` such that `S >= R`. // // For example: // `R` -> `A` -> `C` -> `B` // Then `B.ancestors == {R, A, C}` // // Next we call `BankForks::set_root()` at some descendant of `R`, `R_new`, // where `R_new > R`. // // When we squash `R_new`, `max_root` in the AccountsIndex here is now set to `R_new`, // and all nondescendants of `R_new` are pruned. // // Now consider any outstanding references to banks in the system that are descended from // `max_root == R_new`. Take any one of these references and call it `B`. Because `B` is // a descendant of `R_new`, this means `B` was also a descendant of `R`. Thus `B` // must be a member of `R_descendants` because `B` was constructed and added to // BankForks before the `set_root`. // // This means by the guarantees of `R_descendants` described above, because // `R_new` is an ancestor of `B`, and `R < R_new < B`, then `B.ancestors.contains(R_new)`. // // Now until the next `set_root`, any new banks constructed from `new_from_parent` will // also have `max_root == R_new` in their ancestor set, so the claim holds for those descendants // as well. Once the next `set_root` happens, we once again update `max_root` and the same // inductive argument can be applied again to show the claim holds. // Check that the `max_root` is present in `ancestors`. From the proof above, if // `max_root` is not present in `ancestors`, this means the bank `B` with the // given `ancestors` is not descended from `max_root, which means // either: // 1) `B` is on a different fork or // 2) `B` is an ancestor of `max_root`. // In both cases we can ignore the given ancestors and instead just rely on the roots // present as `max_root` indicates the roots present in the index are more up to date // than the ancestors given. let empty = Ancestors::default(); let ancestors = if ancestors.contains_key(&max_root) { ancestors } else { /* This takes of edge cases like: Diagram 1: slot 0 | slot 1 / \ slot 2 | | slot 3 (max root) slot 4 (scan) By the time the scan on slot 4 is called, slot 2 may already have been cleaned by a clean on slot 3, but slot 4 may not have been cleaned. The state in slot 2 would have been purged and is not saved in any roots. In this case, a scan on slot 4 wouldn't accurately reflect the state when bank 4 was frozen. In cases like this, we default to a scan on the latest roots by removing all `ancestors`. */ &empty }; /* Now there are two cases, either `ancestors` is empty or nonempty: 1) If ancestors is empty, then this is the same as a scan on a rooted bank, and `ongoing_scan_roots` provides protection against cleanup of roots necessary for the scan, and passing `Some(max_root)` to `do_scan_accounts()` ensures newer roots don't appear in the scan. 2) If ancestors is non-empty, then from the `ancestors_contains(&max_root)` above, we know that the fork structure must look something like: Diagram 2: Build fork structure: slot 0 | slot 1 (max_root) / \ slot 2 | | slot 3 (potential newer max root) slot 4 | slot 5 (scan) Consider both types of ancestors, ancestor <= `max_root` and ancestor > `max_root`, where `max_root == 1` as illustrated above. a) The set of `ancestors <= max_root` are all rooted, which means their state is protected by the same guarantees as 1). b) As for the `ancestors > max_root`, those banks have at least one reference discoverable through the chain of `Bank::BankRc::parent` starting from the calling bank. For instance bank 5's parent reference keeps bank 4 alive, which will prevent the `Bank::drop()` from running and cleaning up bank 4. Furthermore, no cleans can happen past the saved max_root == 1, so a potential newer max root at 3 will not clean up any of the ancestors > 1, so slot 4 will not be cleaned in the middle of the scan either. (NOTE similar reasoning is employed for assert!() justification in AccountsDb::retry_to_get_account_accessor) */ match scan_type { ScanTypes::Unindexed(range) => { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_scan_accounts(metric_name, ancestors, func, range, Some(max_root)); } ScanTypes::Indexed(IndexKey::ProgramId(program_id)) => { self.do_scan_secondary_index( ancestors, func, &self.program_id_index, &program_id, Some(max_root), ); } ScanTypes::Indexed(IndexKey::SplTokenMint(mint_key)) => { self.do_scan_secondary_index( ancestors, func, &self.spl_token_mint_index, &mint_key, Some(max_root), ); } ScanTypes::Indexed(IndexKey::SplTokenOwner(owner_key)) => { self.do_scan_secondary_index( ancestors, func, &self.spl_token_owner_index, &owner_key, Some(max_root), ); } } { let mut ongoing_scan_roots = self.ongoing_scan_roots.write().unwrap(); let count = ongoing_scan_roots.get_mut(&max_root).unwrap(); *count -= 1; if *count == 0 { ongoing_scan_roots.remove(&max_root); } } // If the fork with tip at bank `scan_bank_id` was removed during our scan, then the scan // may have been corrupted, so abort the results. let was_scan_corrupted = self .removed_bank_ids .lock() .unwrap() .contains(&scan_bank_id); if was_scan_corrupted { Err(ScanError::SlotRemoved { slot: ancestors.max_slot(), bank_id: scan_bank_id, }) } else { Ok(()) } } fn do_unchecked_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, func: F, range: Option<R>, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { self.do_scan_accounts(metric_name, ancestors, func, range, None); } // Scan accounts and return latest version of each account that is either: // 1) rooted or // 2) present in ancestors fn do_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, mut func: F, range: Option<R>, max_root: Option<Slot>, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { // TODO: expand to use mint index to find the `pubkey_list` below more efficiently // instead of scanning the entire range let mut total_elapsed_timer = Measure::start("total"); let mut num_keys_iterated = 0; let mut latest_slot_elapsed = 0; let mut load_account_elapsed = 0; let mut read_lock_elapsed = 0; let mut iterator_elapsed = 0; let mut iterator_timer = Measure::start("iterator_elapsed"); for pubkey_list in self.iter(range) { iterator_timer.stop(); iterator_elapsed += iterator_timer.as_us(); for (pubkey, list) in pubkey_list { num_keys_iterated += 1; let mut read_lock_timer = Measure::start("read_lock"); let list_r = &list.slot_list.read().unwrap(); read_lock_timer.stop(); read_lock_elapsed += read_lock_timer.as_us(); let mut latest_slot_timer = Measure::start("latest_slot"); if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) { latest_slot_timer.stop(); latest_slot_elapsed += latest_slot_timer.as_us(); let mut load_account_timer = Measure::start("load_account"); func(&pubkey, (&list_r[index].1, list_r[index].0)); load_account_timer.stop(); load_account_elapsed += load_account_timer.as_us(); } } iterator_timer = Measure::start("iterator_elapsed"); } total_elapsed_timer.stop(); if !metric_name.is_empty() { datapoint_info!( metric_name, ("total_elapsed", total_elapsed_timer.as_us(), i64), ("latest_slot_elapsed", latest_slot_elapsed, i64), ("read_lock_elapsed", read_lock_elapsed, i64), ("load_account_elapsed", load_account_elapsed, i64), ("iterator_elapsed", iterator_elapsed, i64), ("num_keys_iterated", num_keys_iterated, i64), ) } } fn do_scan_secondary_index< F, SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( &self, ancestors: &Ancestors, mut func: F, index: &SecondaryIndex<SecondaryIndexEntryType>, index_key: &Pubkey, max_root: Option<Slot>, ) where F: FnMut(&Pubkey, (&T, Slot)), { for pubkey in index.get(index_key) { // Maybe these reads from the AccountsIndex can be batched every time it // grabs the read lock as well... if let AccountIndexGetResult::Found(list_r, index) = self.get(&pubkey, Some(ancestors), max_root) { func( &pubkey, (&list_r.slot_list()[index].1, list_r.slot_list()[index].0), ); } } } pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option<ReadAccountMapEntry<T>> { let lock = self.get_account_maps_read_lock(); self.get_account_read_entry_with_lock(pubkey, &lock) } pub fn get_account_read_entry_with_lock( &self, pubkey: &Pubkey, lock: &AccountMapsReadLock<'_, T>, ) -> Option<ReadAccountMapEntry<T>> { lock.get(pubkey) .cloned() .map(ReadAccountMapEntry::from_account_map_entry) } fn get_account_write_entry(&self, pubkey: &Pubkey) -> Option<WriteAccountMapEntry<T>> { self.account_maps .read() .unwrap() .get(pubkey) .cloned() .map(WriteAccountMapEntry::from_account_map_entry) } fn insert_new_entry_if_missing( &self, pubkey: &Pubkey, slot: Slot, info: &T, w_account_maps: Option<&mut AccountMapsWriteLock<T>>, ) -> Option<WriteAccountMapEntry<T>> { let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, info); match w_account_maps { Some(w_account_maps) => { self.insert_new_entry_if_missing_with_lock(pubkey, w_account_maps, new_entry) } None => { let mut w_account_maps = self.get_account_maps_write_lock(); self.insert_new_entry_if_missing_with_lock(pubkey, &mut w_account_maps, new_entry) } } } // return None if item was created new // if entry for pubkey already existed, return Some(entry). Caller needs to call entry.update. fn insert_new_entry_if_missing_with_lock( &self, pubkey: &Pubkey, w_account_maps: &mut AccountMapsWriteLock<T>, new_entry: AccountMapEntry<T>, ) -> Option<WriteAccountMapEntry<T>> { let mut is_newly_inserted = false; let account_entry = w_account_maps.entry(*pubkey).or_insert_with(|| { is_newly_inserted = true; new_entry }); if is_newly_inserted { None } else { Some(WriteAccountMapEntry::from_account_map_entry( account_entry.clone(), )) } } fn get_account_write_entry_else_create( &self, pubkey: &Pubkey, slot: Slot, info: &T, ) -> Option<WriteAccountMapEntry<T>> { let w_account_entry = self.get_account_write_entry(pubkey); w_account_entry.or_else(|| self.insert_new_entry_if_missing(pubkey, slot, info, None)) } pub fn handle_dead_keys( &self, dead_keys: &[&Pubkey], account_indexes: &AccountSecondaryIndexes, ) { if !dead_keys.is_empty() { for key in dead_keys.iter() { let mut w_index = self.get_account_maps_write_lock(); if let btree_map::Entry::Occupied(index_entry) = w_index.entry(**key) { if index_entry.get().slot_list.read().unwrap().is_empty() { index_entry.remove(); // Note it's only safe to remove all the entries for this key // because we have the lock for this key's entry in the AccountsIndex, // so no other thread is also updating the index self.purge_secondary_indexes_by_inner_key(key, account_indexes); } } } } } /// call func with every pubkey and index visible from a given set of ancestors pub(crate) fn scan_accounts<F>( &self, ancestors: &Ancestors, scan_bank_id: BankId, func: F, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_checked_scan_accounts( "", ancestors, scan_bank_id, func, ScanTypes::Unindexed(None::<Range<Pubkey>>), ) } pub(crate) fn unchecked_scan_accounts<F>( &self, metric_name: &'static str, ancestors: &Ancestors, func: F, ) where F: FnMut(&Pubkey, (&T, Slot)), { self.do_unchecked_scan_accounts(metric_name, ancestors, func, None::<Range<Pubkey>>); } /// call func with every pubkey and index visible from a given set of ancestors with range pub(crate) fn range_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, range: R, func: F, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { // Only the rent logic should be calling this, which doesn't need the safety checks self.do_unchecked_scan_accounts(metric_name, ancestors, func, Some(range)); } /// call func with every pubkey and index visible from a given set of ancestors pub(crate) fn index_scan_accounts<F>( &self, ancestors: &Ancestors, scan_bank_id: BankId, index_key: IndexKey, func: F, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_checked_scan_accounts( "", ancestors, scan_bank_id, func, ScanTypes::<Range<Pubkey>>::Indexed(index_key), ) } pub fn get_rooted_entries(&self, slice: SlotSlice<T>, max: Option<Slot>) -> SlotList<T> { let max = max.unwrap_or(Slot::MAX); let lock = &self.roots_tracker.read().unwrap().roots; slice .iter() .filter(|(slot, _)| *slot <= max && lock.contains(slot)) .cloned() .collect() } // returns the rooted entries and the storage ref count pub fn roots_and_ref_count( &self, locked_account_entry: &ReadAccountMapEntry<T>, max: Option<Slot>, ) -> (SlotList<T>, RefCount) { ( self.get_rooted_entries(locked_account_entry.slot_list(), max), locked_account_entry.ref_count().load(Ordering::Relaxed), ) } pub fn purge_exact<'a, C>( &'a self, pubkey: &Pubkey, slots_to_purge: &'a C, reclaims: &mut SlotList<T>, ) -> bool where C: Contains<'a, Slot>, { if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) { write_account_map_entry.slot_list_mut(|slot_list| { slot_list.retain(|(slot, item)| { let should_purge = slots_to_purge.contains(slot); if should_purge { reclaims.push((*slot, item.clone())); false } else { true } }); slot_list.is_empty() }) } else { true } } pub fn min_ongoing_scan_root(&self) -> Option<Slot> { self.ongoing_scan_roots .read() .unwrap() .keys() .next() .cloned() } // Given a SlotSlice `L`, a list of ancestors and a maximum slot, find the latest element // in `L`, where the slot `S` is an ancestor or root, and if `S` is a root, then `S <= max_root` fn latest_slot( &self, ancestors: Option<&Ancestors>, slice: SlotSlice<T>, max_root: Option<Slot>, ) -> Option<usize> { let mut current_max = 0; let mut rv = None; if let Some(ancestors) = ancestors { if !ancestors.is_empty() { for (i, (slot, _t)) in slice.iter().rev().enumerate() { if (rv.is_none() || *slot > current_max) && ancestors.contains_key(slot) { rv = Some(i); current_max = *slot; } } } } let max_root = max_root.unwrap_or(Slot::MAX); let mut tracker = None; for (i, (slot, _t)) in slice.iter().rev().enumerate() { if (rv.is_none() || *slot > current_max) && *slot <= max_root { let lock = match tracker { Some(inner) => inner, None => self.roots_tracker.read().unwrap(), }; if lock.roots.contains(slot) { rv = Some(i); current_max = *slot; } tracker = Some(lock); } } rv.map(|index| slice.len() - 1 - index) } /// Get an account /// The latest account that appears in `ancestors` or `roots` is returned. pub(crate) fn get( &self, pubkey: &Pubkey, ancestors: Option<&Ancestors>, max_root: Option<Slot>, ) -> AccountIndexGetResult<'_, T> { let read_lock = self.account_maps.read().unwrap(); let account = read_lock .get(pubkey) .cloned() .map(ReadAccountMapEntry::from_account_map_entry); match account { Some(locked_entry) => { drop(read_lock); let slot_list = locked_entry.slot_list(); let found_index = self.latest_slot(ancestors, slot_list, max_root); match found_index { Some(found_index) => AccountIndexGetResult::Found(locked_entry, found_index), None => AccountIndexGetResult::NotFoundOnFork, } } None => AccountIndexGetResult::Missing(read_lock), } } // Get the maximum root <= `max_allowed_root` from the given `slice` fn get_newest_root_in_slot_list( roots: &RollingBitField, slice: SlotSlice<T>, max_allowed_root: Option<Slot>, ) -> Slot { let mut max_root = 0; for (f, _) in slice.iter() { if let Some(max_allowed_root) = max_allowed_root { if *f > max_allowed_root { continue; } } if *f > max_root && roots.contains(f) { max_root = *f; } } max_root } pub(crate) fn update_secondary_indexes( &self, pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], account_indexes: &AccountSecondaryIndexes, ) { if account_indexes.is_empty() { return; } if account_indexes.contains(&AccountIndex::ProgramId) && account_indexes.include_key(account_owner) { self.program_id_index.insert(account_owner, pubkey); } // Note because of the below check below on the account data length, when an // account hits zero lamports and is reset to AccountSharedData::Default, then we skip // the below updates to the secondary indexes. // // Skipping means not updating secondary index to mark the account as missing. // This doesn't introduce false positives during a scan because the caller to scan // provides the ancestors to check. So even if a zero-lamport account is not yet // removed from the secondary index, the scan function will: // 1) consult the primary index via `get(&pubkey, Some(ancestors), max_root)` // and find the zero-lamport version // 2) When the fetch from storage occurs, it will return AccountSharedData::Default // (as persisted tombstone for snapshots). This will then ultimately be // filtered out by post-scan filters, like in `get_filtered_spl_token_accounts_by_owner()`. if *account_owner == inline_spl_token_v2_0::id() && account_data.len() == inline_spl_token_v2_0::state::Account::get_packed_len() { if account_indexes.contains(&AccountIndex::SplTokenOwner) { let owner_key = Pubkey::new( &account_data[SPL_TOKEN_ACCOUNT_OWNER_OFFSET ..SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES], ); if account_indexes.include_key(&owner_key) { self.spl_token_owner_index.insert(&owner_key, pubkey); } } if account_indexes.contains(&AccountIndex::SplTokenMint) { let mint_key = Pubkey::new( &account_data[SPL_TOKEN_ACCOUNT_MINT_OFFSET ..SPL_TOKEN_ACCOUNT_MINT_OFFSET + PUBKEY_BYTES], ); if account_indexes.include_key(&mint_key) { self.spl_token_mint_index.insert(&mint_key, pubkey); } } } } fn get_account_maps_write_lock(&self) -> AccountMapsWriteLock<T> { self.account_maps.write().unwrap() } pub(crate) fn get_account_maps_read_lock(&self) -> AccountMapsReadLock<T> { self.account_maps.read().unwrap() } // Same functionally to upsert, but: // 1. operates on a batch of items // 2. holds the write lock for the duration of adding the items // Can save time when inserting lots of new keys. // But, does NOT update secondary index // This is designed to be called at startup time. #[allow(clippy::needless_collect)] pub(crate) fn insert_new_if_missing_into_primary_index( &self, slot: Slot, items: Vec<(&Pubkey, T)>, ) -> Vec<bool> { let potentially_new_items = items .iter() .map(|(_pubkey, account_info)| { // this value is equivalent to what update() below would have created if we inserted a new item WriteAccountMapEntry::new_entry_after_update(slot, account_info) }) .collect::<Vec<_>>(); // collect here so we have created all data prior to obtaining lock let mut _reclaims = SlotList::new(); let mut w_account_maps = self.get_account_maps_write_lock(); items .into_iter() .zip(potentially_new_items.into_iter()) .map(|((pubkey, account_info), new_item)| { let account_entry = self.insert_new_entry_if_missing_with_lock( pubkey, &mut w_account_maps, new_item, ); if account_info.is_zero_lamport() { self.zero_lamport_pubkeys.insert(*pubkey); } if let Some(mut w_account_entry) = account_entry { w_account_entry.update(slot, account_info, &mut _reclaims); true } else { false } }) .collect() } // Updates the given pubkey at the given slot with the new account information. // Returns true if the pubkey was newly inserted into the index, otherwise, if the // pubkey updates an existing entry in the index, returns false. pub fn upsert( &self, slot: Slot, pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], account_indexes: &AccountSecondaryIndexes, account_info: T, reclaims: &mut SlotList<T>, ) -> bool { let is_newly_inserted = { let w_account_entry = self.get_account_write_entry_else_create(pubkey, slot, &account_info); // We don't atomically update both primary index and secondary index together. // This certainly creates small time window with inconsistent state across the two indexes. // However, this is acceptable because: // // - A strict consistent view at any given moment of time is not necessary, because the only // use case for the secondary index is `scan`, and `scans` are only supported/require consistency // on frozen banks, and this inconsistency is only possible on working banks. // // - The secondary index is never consulted as primary source of truth for gets/stores. // So, what the accounts_index sees alone is sufficient as a source of truth for other non-scan // account operations. if account_info.is_zero_lamport() { self.zero_lamport_pubkeys.insert(*pubkey); } if let Some(mut w_account_entry) = w_account_entry { w_account_entry.update(slot, account_info, reclaims); false } else { true } }; self.update_secondary_indexes(pubkey, account_owner, account_data, account_indexes); is_newly_inserted } pub fn remove_zero_lamport_key(&self, pubkey: &Pubkey) { self.zero_lamport_pubkeys.remove(pubkey); } pub fn zero_lamport_pubkeys(&self) -> &DashSet<Pubkey> { &self.zero_lamport_pubkeys } pub fn unref_from_storage(&self, pubkey: &Pubkey) { if let Some(locked_entry) = self.get_account_read_entry(pubkey) { locked_entry.unref(); } } pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount { if let Some(locked_entry) = self.get_account_read_entry(pubkey) { locked_entry.ref_count().load(Ordering::Relaxed) } else { 0 } } fn purge_secondary_indexes_by_inner_key<'a>( &'a self, inner_key: &Pubkey, account_indexes: &AccountSecondaryIndexes, ) { if account_indexes.contains(&AccountIndex::ProgramId) { self.program_id_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenOwner) { self.spl_token_owner_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenMint) { self.spl_token_mint_index.remove_by_inner_key(inner_key); } } fn purge_older_root_entries( &self, slot_list: &mut SlotList<T>, reclaims: &mut SlotList<T>, max_clean_root: Option<Slot>, ) { let roots_tracker = &self.roots_tracker.read().unwrap(); let newest_root_in_slot_list = Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root); let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root); let mut purged_slots: HashSet<Slot> = HashSet::new(); slot_list.retain(|(slot, value)| { let should_purge = Self::can_purge_older_entries(max_clean_root, newest_root_in_slot_list, *slot) && !value.is_cached(); if should_purge { reclaims.push((*slot, value.clone())); purged_slots.insert(*slot); } !should_purge }); } pub fn clean_rooted_entries( &self, pubkey: &Pubkey, reclaims: &mut SlotList<T>, max_clean_root: Option<Slot>, ) { let mut is_slot_list_empty = false; if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) { locked_entry.slot_list_mut(|slot_list| { self.purge_older_root_entries(slot_list, reclaims, max_clean_root); is_slot_list_empty = slot_list.is_empty(); }); } // If the slot list is empty, remove the pubkey from `account_maps`. Make sure to grab the // lock and double check the slot list is still empty, because another writer could have // locked and inserted the pubkey inbetween when `is_slot_list_empty=true` and the call to // remove() below. if is_slot_list_empty { let mut w_maps = self.get_account_maps_write_lock(); if let Some(x) = w_maps.get(pubkey) { if x.slot_list.read().unwrap().is_empty() { w_maps.remove(pubkey); } } } } /// When can an entry be purged? /// /// If we get a slot update where slot != newest_root_in_slot_list for an account where slot < /// max_clean_root, then we know it's safe to delete because: /// /// a) If slot < newest_root_in_slot_list, then we know the update is outdated by a later rooted /// update, namely the one in newest_root_in_slot_list /// /// b) If slot > newest_root_in_slot_list, then because slot < max_clean_root and we know there are /// no roots in the slot list between newest_root_in_slot_list and max_clean_root, (otherwise there /// would be a bigger newest_root_in_slot_list, which is a contradiction), then we know slot must be /// an unrooted slot less than max_clean_root and thus safe to clean as well. fn can_purge_older_entries( max_clean_root: Slot, newest_root_in_slot_list: Slot, slot: Slot, ) -> bool { slot < max_clean_root && slot != newest_root_in_slot_list } /// Given a list of slots, return a new list of only the slots that are rooted pub fn get_rooted_from_list<'a>(&self, slots: impl Iterator<Item = &'a Slot>) -> Vec<Slot> { let roots_tracker = self.roots_tracker.read().unwrap(); slots .filter_map(|s| { if roots_tracker.roots.contains(s) { Some(*s) } else { None } }) .collect() } pub fn is_root(&self, slot: Slot) -> bool { self.roots_tracker.read().unwrap().roots.contains(&slot) } pub fn add_root(&self, slot: Slot, caching_enabled: bool) { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.roots.insert(slot); // we delay cleaning until flushing! if !caching_enabled { w_roots_tracker.uncleaned_roots.insert(slot); } // `AccountsDb::flush_accounts_cache()` relies on roots being added in order assert!(slot >= w_roots_tracker.max_root); w_roots_tracker.max_root = slot; } pub fn add_uncleaned_roots<I>(&self, roots: I) where I: IntoIterator<Item = Slot>, { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.extend(roots); } pub fn max_root(&self) -> Slot { self.roots_tracker.read().unwrap().max_root } /// Remove the slot when the storage for the slot is freed /// Accounts no longer reference this slot. pub fn clean_dead_slot(&self, slot: Slot) -> Option<AccountsIndexRootsStats> { let (roots_len, uncleaned_roots_len, previous_uncleaned_roots_len, roots_range) = { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); let removed_from_unclean_roots = w_roots_tracker.uncleaned_roots.remove(&slot); let removed_from_previous_uncleaned_roots = w_roots_tracker.previous_uncleaned_roots.remove(&slot); if !w_roots_tracker.roots.remove(&slot) { if removed_from_unclean_roots { error!("clean_dead_slot-removed_from_unclean_roots: {}", slot); inc_new_counter_error!("clean_dead_slot-removed_from_unclean_roots", 1, 1); } if removed_from_previous_uncleaned_roots { error!( "clean_dead_slot-removed_from_previous_uncleaned_roots: {}", slot ); inc_new_counter_error!( "clean_dead_slot-removed_from_previous_uncleaned_roots", 1, 1 ); } return None; } ( w_roots_tracker.roots.len(), w_roots_tracker.uncleaned_roots.len(), w_roots_tracker.previous_uncleaned_roots.len(), w_roots_tracker.roots.range_width(), ) }; Some(AccountsIndexRootsStats { roots_len, uncleaned_roots_len, previous_uncleaned_roots_len, roots_range, rooted_cleaned_count: 0, unrooted_cleaned_count: 0, }) } pub fn min_root(&self) -> Option<Slot> { self.roots_tracker.read().unwrap().min_root() } pub fn reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> { let mut cleaned_roots = HashSet::new(); let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.retain(|root| { let is_cleaned = max_clean_root .map(|max_clean_root| *root <= max_clean_root) .unwrap_or(true); if is_cleaned { cleaned_roots.insert(*root); } // Only keep the slots that have yet to be cleaned !is_cleaned }); std::mem::replace(&mut w_roots_tracker.previous_uncleaned_roots, cleaned_roots) } #[cfg(test)] pub fn clear_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> { let mut cleaned_roots = HashSet::new(); let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.retain(|root| { let is_cleaned = max_clean_root .map(|max_clean_root| *root <= max_clean_root) .unwrap_or(true); if is_cleaned { cleaned_roots.insert(*root); } // Only keep the slots that have yet to be cleaned !is_cleaned }); cleaned_roots } pub fn is_uncleaned_root(&self, slot: Slot) -> bool { self.roots_tracker .read() .unwrap() .uncleaned_roots .contains(&slot) } pub fn num_roots(&self) -> usize { self.roots_tracker.read().unwrap().roots.len() } pub fn all_roots(&self) -> Vec<Slot> { let tracker = self.roots_tracker.read().unwrap(); tracker.roots.get_all() } #[cfg(test)] pub fn clear_roots(&self) { self.roots_tracker.write().unwrap().roots.clear() } #[cfg(test)] pub fn uncleaned_roots_len(&self) -> usize { self.roots_tracker.read().unwrap().uncleaned_roots.len() } #[cfg(test)] // filter any rooted entries and return them along with a bool that indicates // if this account has no more entries. Note this does not update the secondary // indexes! pub fn purge_roots(&self, pubkey: &Pubkey) -> (SlotList<T>, bool) { let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap(); write_account_map_entry.slot_list_mut(|slot_list| { let reclaims = self.get_rooted_entries(slot_list, None); slot_list.retain(|(slot, _)| !self.is_root(*slot)); (reclaims, slot_list.is_empty()) }) } } #[cfg(test)] pub mod tests { use super::*; use panoptes_sdk::signature::{Keypair, Signer}; pub enum SecondaryIndexTypes<'a> { RwLock(&'a SecondaryIndex<RwLockSecondaryIndexEntry>), DashMap(&'a SecondaryIndex<DashMapSecondaryIndexEntry>), } pub fn spl_token_mint_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenMint); AccountSecondaryIndexes { indexes: account_indexes, keys: None, } } pub fn spl_token_owner_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenOwner); AccountSecondaryIndexes { indexes: account_indexes, keys: None, } } impl<'a, T: 'static> AccountIndexGetResult<'a, T> { pub fn unwrap(self) -> (ReadAccountMapEntry<T>, usize) { match self { AccountIndexGetResult::Found(lock, size) => (lock, size), _ => { panic!("trying to unwrap AccountIndexGetResult with non-Success result"); } } } pub fn is_none(&self) -> bool { !self.is_some() } pub fn is_some(&self) -> bool { matches!(self, AccountIndexGetResult::Found(_lock, _size)) } pub fn map<V, F: FnOnce((ReadAccountMapEntry<T>, usize)) -> V>(self, f: F) -> Option<V> { match self { AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))), _ => None, } } } fn create_dashmap_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::<bool>::default(); let _type_check = SecondaryIndexTypes::DashMap(&index.spl_token_mint_index); } (0, PUBKEY_BYTES, spl_token_mint_index_enabled()) } fn create_rwlock_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::<bool>::default(); let _type_check = SecondaryIndexTypes::RwLock(&index.spl_token_owner_index); } ( SPL_TOKEN_ACCOUNT_OWNER_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES, spl_token_owner_index_enabled(), ) } #[test] fn test_bitfield_delete_non_excess() { panoptes_logger::setup(); let len = 16; let mut bitfield = RollingBitField::new(len); assert_eq!(bitfield.min(), None); bitfield.insert(0); assert_eq!(bitfield.min(), Some(0)); let too_big = len + 1; bitfield.insert(too_big); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.min(), Some(0)); assert_eq!(bitfield.max, too_big + 1); // delete the thing that is NOT in excess bitfield.remove(&too_big); assert_eq!(bitfield.min, too_big + 1); assert_eq!(bitfield.max, too_big + 1); let too_big_times_2 = too_big * 2; bitfield.insert(too_big_times_2); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big_times_2)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min(), bitfield.excess.iter().min().copied()); assert_eq!(bitfield.min, too_big_times_2); assert_eq!(bitfield.max, too_big_times_2 + 1); bitfield.remove(&0); bitfield.remove(&too_big_times_2); assert!(bitfield.is_empty()); let other = 5; bitfield.insert(other); assert!(bitfield.contains(&other)); assert!(bitfield.excess.is_empty()); assert_eq!(bitfield.min, other); assert_eq!(bitfield.max, other + 1); } #[test] fn test_bitfield_insert_excess() { panoptes_logger::setup(); let len = 16; let mut bitfield = RollingBitField::new(len); bitfield.insert(0); let too_big = len + 1; bitfield.insert(too_big); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert!(bitfield.excess.contains(&0)); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); // delete the thing that IS in excess // this does NOT affect min/max bitfield.remove(&0); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); // re-add to excess bitfield.insert(0); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); } #[test] fn test_bitfield_permutations() { panoptes_logger::setup(); let mut bitfield = RollingBitField::new(2097152); let mut hash = HashSet::new(); let min = 101_000; let width = 400_000; let dead = 19; let mut slot = min; while hash.len() < width { slot += 1; if slot % dead == 0 { continue; } hash.insert(slot); bitfield.insert(slot); } compare(&hash, &bitfield); let max = slot + 1; let mut time = Measure::start(""); let mut count = 0; for slot in (min - 10)..max + 100 { if hash.contains(&slot) { count += 1; } } time.stop(); let mut time2 = Measure::start(""); let mut count2 = 0; for slot in (min - 10)..max + 100 { if bitfield.contains(&slot) { count2 += 1; } } time2.stop(); info!( "{}ms, {}ms, {} ratio", time.as_ms(), time2.as_ms(), time.as_ns() / time2.as_ns() ); assert_eq!(count, count2); } #[test] #[should_panic(expected = "assertion failed: max_width.is_power_of_two()")] fn test_bitfield_power_2() { let _ = RollingBitField::new(3); } #[test] #[should_panic(expected = "assertion failed: max_width > 0")] fn test_bitfield_0() { let _ = RollingBitField::new(0); } fn setup_empty(width: u64) -> RollingBitFieldTester { let bitfield = RollingBitField::new(width); let hash_set = HashSet::new(); RollingBitFieldTester { bitfield, hash_set } } struct RollingBitFieldTester { pub bitfield: RollingBitField, pub hash_set: HashSet<u64>, } impl RollingBitFieldTester { fn insert(&mut self, slot: u64) { self.bitfield.insert(slot); self.hash_set.insert(slot); assert!(self.bitfield.contains(&slot)); compare(&self.hash_set, &self.bitfield); } fn remove(&mut self, slot: &u64) -> bool { let result = self.bitfield.remove(slot); assert_eq!(result, self.hash_set.remove(slot)); assert!(!self.bitfield.contains(slot)); self.compare(); result } fn compare(&self) { compare(&self.hash_set, &self.bitfield); } } fn setup_wide(width: u64, start: u64) -> RollingBitFieldTester { let mut tester = setup_empty(width); tester.compare(); tester.insert(start); tester.insert(start + 1); tester } #[test] fn test_bitfield_insert_wide() { panoptes_logger::setup(); let width = 16; let start = 0; let mut tester = setup_wide(width, start); let slot = start + width; let all = tester.bitfield.get_all(); // higher than max range by 1 tester.insert(slot); let bitfield = tester.bitfield; for slot in all { assert!(bitfield.contains(&slot)); } assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.count, 3); } #[test] fn test_bitfield_insert_wide_before() { panoptes_logger::setup(); let width = 16; let start = 100; let mut bitfield = setup_wide(width, start).bitfield; let slot = start + 1 - width; // assert here - would make min too low, causing too wide of a range bitfield.insert(slot); assert_eq!(1, bitfield.excess.len()); assert_eq!(3, bitfield.count); assert!(bitfield.contains(&slot)); } #[test] fn test_bitfield_insert_wide_before_ok() { panoptes_logger::setup(); let width = 16; let start = 100; let mut bitfield = setup_wide(width, start).bitfield; let slot = start + 2 - width; // this item would make our width exactly equal to what is allowed, but it is also inserting prior to min bitfield.insert(slot); assert_eq!(1, bitfield.excess.len()); assert!(bitfield.contains(&slot)); assert_eq!(3, bitfield.count); } #[test] fn test_bitfield_contains_wide_no_assert() { { let width = 16; let start = 0; let bitfield = setup_wide(width, start).bitfield; let mut slot = width; assert!(!bitfield.contains(&slot)); slot += 1; assert!(!bitfield.contains(&slot)); } { let width = 16; let start = 100; let bitfield = setup_wide(width, start).bitfield; // too large let mut slot = width; assert!(!bitfield.contains(&slot)); slot += 1; assert!(!bitfield.contains(&slot)); // too small, before min slot = 0; assert!(!bitfield.contains(&slot)); } } #[test] fn test_bitfield_remove_wide() { let width = 16; let start = 0; let mut tester = setup_wide(width, start); let slot = width; assert!(!tester.remove(&slot)); } #[test] fn test_bitfield_excess2() { panoptes_logger::setup(); let width = 16; let mut tester = setup_empty(width); let slot = 100; // insert 1st slot tester.insert(slot); assert!(tester.bitfield.excess.is_empty()); // insert a slot before the previous one. this is 'excess' since we don't use this pattern in normal operation let slot2 = slot - 1; tester.insert(slot2); assert_eq!(tester.bitfield.excess.len(), 1); // remove the 1st slot. we will be left with only excess tester.remove(&slot); assert!(tester.bitfield.contains(&slot2)); assert_eq!(tester.bitfield.excess.len(), 1); // re-insert at valid range, making sure we don't insert into excess tester.insert(slot); assert_eq!(tester.bitfield.excess.len(), 1); // remove the excess slot. tester.remove(&slot2); assert!(tester.bitfield.contains(&slot)); assert!(tester.bitfield.excess.is_empty()); // re-insert the excess slot tester.insert(slot2); assert_eq!(tester.bitfield.excess.len(), 1); } #[test] fn test_bitfield_excess() { panoptes_logger::setup(); // start at slot 0 or a separate, higher slot for width in [16, 4194304].iter() { let width = *width; let mut tester = setup_empty(width); for start in [0, width * 5].iter().cloned() { // recreate means create empty bitfield with each iteration, otherwise re-use for recreate in [false, true].iter().cloned() { let max = start + 3; // first root to add for slot in start..max { // subsequent roots to add for slot2 in (slot + 1)..max { // reverse_slots = 1 means add slots in reverse order (max to min). This causes us to add second and later slots to excess. for reverse_slots in [false, true].iter().cloned() { let maybe_reverse = |slot| { if reverse_slots { max - slot } else { slot } }; if recreate { let recreated = setup_empty(width); tester = recreated; } // insert for slot in slot..=slot2 { let slot_use = maybe_reverse(slot); tester.insert(slot_use); debug!( "slot: {}, bitfield: {:?}, reverse: {}, len: {}, excess: {:?}", slot_use, tester.bitfield, reverse_slots, tester.bitfield.len(), tester.bitfield.excess ); assert!( (reverse_slots && tester.bitfield.len() > 1) ^ tester.bitfield.excess.is_empty() ); } if start > width * 2 { assert!(!tester.bitfield.contains(&(start - width * 2))); } assert!(!tester.bitfield.contains(&(start + width * 2))); let len = (slot2 - slot + 1) as usize; assert_eq!(tester.bitfield.len(), len); assert_eq!(tester.bitfield.count, len); // remove for slot in slot..=slot2 { let slot_use = maybe_reverse(slot); assert!(tester.remove(&slot_use)); assert!( (reverse_slots && !tester.bitfield.is_empty()) ^ tester.bitfield.excess.is_empty() ); } assert!(tester.bitfield.is_empty()); assert_eq!(tester.bitfield.count, 0); if start > width * 2 { assert!(!tester.bitfield.contains(&(start - width * 2))); } assert!(!tester.bitfield.contains(&(start + width * 2))); } } } } } } } #[test] fn test_bitfield_remove_wide_before() { let width = 16; let start = 100; let mut tester = setup_wide(width, start); let slot = start + 1 - width; assert!(!tester.remove(&slot)); } fn compare_internal(hashset: &HashSet<u64>, bitfield: &RollingBitField) { assert_eq!(hashset.len(), bitfield.len()); assert_eq!(hashset.is_empty(), bitfield.is_empty()); if !bitfield.is_empty() { let mut min = Slot::MAX; let mut overall_min = Slot::MAX; let mut max = Slot::MIN; for item in bitfield.get_all() { assert!(hashset.contains(&item)); if !bitfield.excess.contains(&item) { min = std::cmp::min(min, item); max = std::cmp::max(max, item); } overall_min = std::cmp::min(overall_min, item); } assert_eq!(bitfield.min(), Some(overall_min)); assert_eq!(bitfield.get_all().len(), hashset.len()); // range isn't tracked for excess items if bitfield.excess.len() != bitfield.len() { let width = if bitfield.is_empty() { 0 } else { max + 1 - min }; assert!( bitfield.range_width() >= width, "hashset: {:?}, bitfield: {:?}, bitfield.range_width: {}, width: {}", hashset, bitfield.get_all(), bitfield.range_width(), width, ); } } else { assert_eq!(bitfield.min(), None); } } fn compare(hashset: &HashSet<u64>, bitfield: &RollingBitField) { compare_internal(hashset, bitfield); let clone = bitfield.clone(); compare_internal(hashset, &clone); assert!(clone.eq(bitfield)); assert_eq!(clone, *bitfield); } #[test] fn test_bitfield_functionality() { panoptes_logger::setup(); // bitfield sizes are powers of 2, cycle through values of 1, 2, 4, .. 2^9 for power in 0..10 { let max_bitfield_width = 2u64.pow(power) as u64; let width_iteration_max = if max_bitfield_width > 1 { // add up to 2 items so we can test out multiple items 3 } else { // 0 or 1 items is all we can fit with a width of 1 item 2 }; for width in 0..width_iteration_max { let mut tester = setup_empty(max_bitfield_width); let min = 101_000; let dead = 19; let mut slot = min; while tester.hash_set.len() < width { slot += 1; if max_bitfield_width > 2 && slot % dead == 0 { // with max_bitfield_width of 1 and 2, there is no room for dead slots continue; } tester.insert(slot); } let max = slot + 1; for slot in (min - 10)..max + 100 { assert_eq!( tester.bitfield.contains(&slot), tester.hash_set.contains(&slot) ); } if width > 0 { assert!(tester.remove(&slot)); assert!(!tester.remove(&slot)); } let all = tester.bitfield.get_all(); // remove the rest, including a call that removes slot again for item in all.iter() { assert!(tester.remove(item)); assert!(!tester.remove(item)); } let min = max + ((width * 2) as u64) + 3; let slot = min; // several widths past previous min let max = slot + 1; tester.insert(slot); for slot in (min - 10)..max + 100 { assert_eq!( tester.bitfield.contains(&slot), tester.hash_set.contains(&slot) ); } } } } fn bitfield_insert_and_test(bitfield: &mut RollingBitField, slot: Slot) { let len = bitfield.len(); let old_all = bitfield.get_all(); let (new_min, new_max) = if bitfield.is_empty() { (slot, slot + 1) } else { ( std::cmp::min(bitfield.min, slot), std::cmp::max(bitfield.max, slot + 1), ) }; bitfield.insert(slot); assert_eq!(bitfield.min, new_min); assert_eq!(bitfield.max, new_max); assert_eq!(bitfield.len(), len + 1); assert!(!bitfield.is_empty()); assert!(bitfield.contains(&slot)); // verify aliasing is what we expect assert!(bitfield.contains_assume_in_range(&(slot + bitfield.max_width))); let get_all = bitfield.get_all(); old_all .into_iter() .for_each(|slot| assert!(get_all.contains(&slot))); assert!(get_all.contains(&slot)); assert!(get_all.len() == len + 1); } #[test] fn test_bitfield_clear() { let mut bitfield = RollingBitField::new(4); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); bitfield_insert_and_test(&mut bitfield, 0); bitfield.clear(); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 1); bitfield.clear(); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 4); } #[test] fn test_bitfield_wrapping() { let mut bitfield = RollingBitField::new(4); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); bitfield_insert_and_test(&mut bitfield, 0); assert_eq!(bitfield.get_all(), vec![0]); bitfield_insert_and_test(&mut bitfield, 2); assert_eq!(bitfield.get_all(), vec![0, 2]); bitfield_insert_and_test(&mut bitfield, 3); bitfield.insert(3); // redundant insert assert_eq!(bitfield.get_all(), vec![0, 2, 3]); assert!(bitfield.remove(&0)); assert!(!bitfield.remove(&0)); assert_eq!(bitfield.min, 2); assert_eq!(bitfield.max, 4); assert_eq!(bitfield.len(), 2); assert!(!bitfield.remove(&0)); // redundant remove assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.get_all(), vec![2, 3]); bitfield.insert(4); // wrapped around value - same bit as '0' assert_eq!(bitfield.min, 2); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 3); assert_eq!(bitfield.get_all(), vec![2, 3, 4]); assert!(bitfield.remove(&2)); assert_eq!(bitfield.min, 3); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.get_all(), vec![3, 4]); assert!(bitfield.remove(&3)); assert_eq!(bitfield.min, 4); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 1); assert_eq!(bitfield.get_all(), vec![4]); assert!(bitfield.remove(&4)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 8); assert!(bitfield.remove(&8)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 9); assert!(bitfield.remove(&9)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); } #[test] fn test_bitfield_smaller() { // smaller bitfield, fewer entries, including 0 panoptes_logger::setup(); for width in 0..34 { let mut bitfield = RollingBitField::new(4096); let mut hash_set = HashSet::new(); let min = 1_010_000; let dead = 19; let mut slot = min; while hash_set.len() < width { slot += 1; if slot % dead == 0 { continue; } hash_set.insert(slot); bitfield.insert(slot); } let max = slot + 1; let mut time = Measure::start(""); let mut count = 0; for slot in (min - 10)..max + 100 { if hash_set.contains(&slot) { count += 1; } } time.stop(); let mut time2 = Measure::start(""); let mut count2 = 0; for slot in (min - 10)..max + 100 { if bitfield.contains(&slot) { count2 += 1; } } time2.stop(); info!( "{}, {}, {}", time.as_ms(), time2.as_ms(), time.as_ns() / time2.as_ns() ); assert_eq!(count, count2); } } #[test] fn test_get_empty() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } #[test] fn test_secondary_index_include_exclude() { let pk1 = Pubkey::new_unique(); let pk2 = Pubkey::new_unique(); let mut index = AccountSecondaryIndexes::default(); assert!(!index.contains(&AccountIndex::ProgramId)); index.indexes.insert(AccountIndex::ProgramId); assert!(index.contains(&AccountIndex::ProgramId)); assert!(index.include_key(&pk1)); assert!(index.include_key(&pk2)); let exclude = false; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(index.include_key(&pk1)); assert!(!index.include_key(&pk2)); let exclude = true; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(!index.include_key(&pk1)); assert!(index.include_key(&pk2)); let exclude = true; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(!index.include_key(&pk1)); assert!(!index.include_key(&pk2)); let exclude = false; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(index.include_key(&pk1)); assert!(index.include_key(&pk2)); } #[test] fn test_insert_no_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } type AccountInfoTest = f64; impl IsCached for AccountInfoTest { fn is_cached(&self) -> bool { true } } impl ZeroLamport for AccountInfoTest { fn is_zero_lamport(&self) -> bool { true } } #[test] fn test_insert_new_with_lock_no_ancestors() { let key = Keypair::new(); let pubkey = &key.pubkey(); let slot = 0; let index = AccountsIndex::<bool>::default(); let account_info = true; let items = vec![(pubkey, account_info)]; index.insert_new_if_missing_into_primary_index(slot, items); assert!(index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); assert!(index.get(pubkey, Some(&ancestors), None).is_none()); assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(pubkey, Some(&ancestors), None).is_some()); assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); // not zero lamports let index = AccountsIndex::<AccountInfoTest>::default(); let account_info: AccountInfoTest = 0 as AccountInfoTest; let items = vec![(pubkey, account_info)]; index.insert_new_if_missing_into_primary_index(slot, items); assert!(!index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); assert!(index.get(pubkey, Some(&ancestors), None).is_none()); assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(pubkey, Some(&ancestors), None).is_some()); assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0 index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } #[test] fn test_new_entry() { let slot = 0; // account_info type that IS cached let account_info = AccountInfoTest::default(); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, &account_info); assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 0); assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1); assert_eq!( new_entry.slot_list.read().unwrap().to_vec(), vec![(slot, account_info)] ); // account_info type that is NOT cached let account_info = true; let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, &account_info); assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 1); assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1); assert_eq!( new_entry.slot_list.read().unwrap().to_vec(), vec![(slot, account_info)] ); } #[test] fn test_batch_insert() { let slot0 = 0; let key0 = Keypair::new().pubkey(); let key1 = Keypair::new().pubkey(); let index = AccountsIndex::<bool>::default(); let account_infos = [true, false]; index.insert_new_if_missing_into_primary_index( slot0, vec![(&key0, account_infos[0]), (&key1, account_infos[1])], ); for (i, key) in [key0, key1].iter().enumerate() { let entry = index.get_account_read_entry(key).unwrap(); assert_eq!(entry.ref_count().load(Ordering::Relaxed), 1); assert_eq!(entry.slot_list().to_vec(), vec![(slot0, account_infos[i]),]); } } fn test_new_entry_code_paths_helper< T: 'static + Clone + IsCached + ZeroLamport + std::cmp::PartialEq + std::fmt::Debug, >( account_infos: [T; 2], is_cached: bool, upsert: bool, ) { let slot0 = 0; let slot1 = 1; let key = Keypair::new().pubkey(); let index = AccountsIndex::<T>::default(); let mut gc = Vec::new(); if upsert { // insert first entry for pubkey. This will use new_entry_after_update and not call update. index.upsert( slot0, &key, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), account_infos[0].clone(), &mut gc, ); } else { index.insert_new_if_missing_into_primary_index( slot0, vec![(&key, account_infos[0].clone())], ); } assert!(gc.is_empty()); // verify the added entry matches expected { let entry = index.get_account_read_entry(&key).unwrap(); assert_eq!( entry.ref_count().load(Ordering::Relaxed), if is_cached { 0 } else { 1 } ); let expected = vec![(slot0, account_infos[0].clone())]; assert_eq!(entry.slot_list().to_vec(), expected); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot0, &account_infos[0]); assert_eq!( entry.slot_list().to_vec(), new_entry.slot_list.read().unwrap().to_vec(), ); } // insert second entry for pubkey. This will use update and NOT use new_entry_after_update. if upsert { index.upsert( slot1, &key, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), account_infos[1].clone(), &mut gc, ); } else { index.insert_new_if_missing_into_primary_index( slot1, vec![(&key, account_infos[1].clone())], ); } assert!(gc.is_empty()); for lock in &[false, true] { let read_lock = if *lock { Some(index.get_account_maps_read_lock()) } else { None }; let entry = if *lock { index .get_account_read_entry_with_lock(&key, read_lock.as_ref().unwrap()) .unwrap() } else { index.get_account_read_entry(&key).unwrap() }; assert_eq!( entry.ref_count().load(Ordering::Relaxed), if is_cached { 0 } else { 2 } ); assert_eq!( entry.slot_list().to_vec(), vec![ (slot0, account_infos[0].clone()), (slot1, account_infos[1].clone()) ] ); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot1, &account_infos[1]); assert_eq!(entry.slot_list()[1], new_entry.slot_list.read().unwrap()[0],); } } #[test] fn test_new_entry_and_update_code_paths() { for is_upsert in &[false, true] { // account_info type that IS cached test_new_entry_code_paths_helper([1.0, 2.0], true, *is_upsert); // account_info type that is NOT cached test_new_entry_code_paths_helper([true, false], false, *is_upsert); } } #[test] fn test_insert_with_lock_no_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let slot = 0; let account_info = true; let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, &account_info); let mut w_account_maps = index.get_account_maps_write_lock(); let write = index.insert_new_entry_if_missing_with_lock( &key.pubkey(), &mut w_account_maps, new_entry, ); assert!(write.is_none()); drop(w_account_maps); let mut ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_some()); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } #[test] fn test_insert_wrong_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = vec![(1, 1)].into_iter().collect(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } #[test] fn test_insert_with_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = vec![(0, 0)].into_iter().collect(); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); let mut num = 0; let mut found_key = false; index.unchecked_scan_accounts("", &ancestors, |pubkey, _index| { if pubkey == &key.pubkey() { found_key = true }; num += 1 }); assert_eq!(num, 1); assert!(found_key); } fn setup_accounts_index_keys(num_pubkeys: usize) -> (AccountsIndex<bool>, Vec<Pubkey>) { let index = AccountsIndex::<bool>::default(); let root_slot = 0; let mut pubkeys: Vec<Pubkey> = std::iter::repeat_with(|| { let new_pubkey = panoptes_sdk::pubkey::new_rand(); index.upsert( root_slot, &new_pubkey, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut vec![], ); new_pubkey }) .take(num_pubkeys.saturating_sub(1)) .collect(); if num_pubkeys != 0 { pubkeys.push(Pubkey::default()); index.upsert( root_slot, &Pubkey::default(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut vec![], ); } index.add_root(root_slot, false); (index, pubkeys) } fn run_test_range( index: &AccountsIndex<bool>, pubkeys: &[Pubkey], start_bound: Bound<usize>, end_bound: Bound<usize>, ) { // Exclusive `index_start` let (pubkey_start, index_start) = match start_bound { Unbounded => (Unbounded, 0), Included(i) => (Included(pubkeys[i]), i), Excluded(i) => (Excluded(pubkeys[i]), i + 1), }; // Exclusive `index_end` let (pubkey_end, index_end) = match end_bound { Unbounded => (Unbounded, pubkeys.len()), Included(i) => (Included(pubkeys[i]), i + 1), Excluded(i) => (Excluded(pubkeys[i]), i), }; let pubkey_range = (pubkey_start, pubkey_end); let ancestors = Ancestors::default(); let mut scanned_keys = HashSet::new(); index.range_scan_accounts("", &ancestors, pubkey_range, |pubkey, _index| { scanned_keys.insert(*pubkey); }); let mut expected_len = 0; for key in &pubkeys[index_start..index_end] { expected_len += 1; assert!(scanned_keys.contains(key)); } assert_eq!(scanned_keys.len(), expected_len); } fn run_test_range_indexes( index: &AccountsIndex<bool>, pubkeys: &[Pubkey], start: Option<usize>, end: Option<usize>, ) { let start_options = start .map(|i| vec![Included(i), Excluded(i)]) .unwrap_or_else(|| vec![Unbounded]); let end_options = end .map(|i| vec![Included(i), Excluded(i)]) .unwrap_or_else(|| vec![Unbounded]); for start in &start_options { for end in &end_options { run_test_range(index, pubkeys, *start, *end); } } } #[test] fn test_range_scan_accounts() { let (index, mut pubkeys) = setup_accounts_index_keys(3 * ITER_BATCH_SIZE); pubkeys.sort(); run_test_range_indexes(&index, &pubkeys, None, None); run_test_range_indexes(&index, &pubkeys, Some(ITER_BATCH_SIZE), None); run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE as usize)); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE as usize), Some(2 * ITER_BATCH_SIZE as usize), ); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE as usize), Some(2 * ITER_BATCH_SIZE as usize - 1), ); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE - 1_usize), Some(2 * ITER_BATCH_SIZE as usize + 1), ); } fn run_test_scan_accounts(num_pubkeys: usize) { let (index, _) = setup_accounts_index_keys(num_pubkeys); let ancestors = Ancestors::default(); let mut scanned_keys = HashSet::new(); index.unchecked_scan_accounts("", &ancestors, |pubkey, _index| { scanned_keys.insert(*pubkey); }); assert_eq!(scanned_keys.len(), num_pubkeys); } #[test] fn test_scan_accounts() { run_test_scan_accounts(0); run_test_scan_accounts(1); run_test_scan_accounts(ITER_BATCH_SIZE * 10); run_test_scan_accounts(ITER_BATCH_SIZE * 10 - 1); run_test_scan_accounts(ITER_BATCH_SIZE * 10 + 1); } #[test] fn test_accounts_iter_finished() { let (index, _) = setup_accounts_index_keys(0); let mut iter = index.iter(None::<Range<Pubkey>>); assert!(iter.next().is_none()); let mut gc = vec![]; index.upsert( 0, &panoptes_sdk::pubkey::new_rand(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(iter.next().is_none()); } #[test] fn test_is_root() { let index = AccountsIndex::<bool>::default(); assert!(!index.is_root(0)); index.add_root(0, false); assert!(index.is_root(0)); } #[test] fn test_insert_with_root() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.add_root(0, false); let (list, idx) = index.get(&key.pubkey(), None, None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); } #[test] fn test_clean_first() { let index = AccountsIndex::<bool>::default(); index.add_root(0, false); index.add_root(1, false); index.clean_dead_slot(0); assert!(index.is_root(1)); assert!(!index.is_root(0)); } #[test] fn test_clean_last() { //this behavior might be undefined, clean up should only occur on older slots let index = AccountsIndex::<bool>::default(); index.add_root(0, false); index.add_root(1, false); index.clean_dead_slot(1); assert!(!index.is_root(1)); assert!(index.is_root(0)); } #[test] fn test_clean_and_unclean_slot() { let index = AccountsIndex::<bool>::default(); assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); index.add_root(0, false); index.add_root(1, false); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 0, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.reset_uncleaned_roots(None); assert_eq!(2, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 2, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.add_root(2, false); index.add_root(3, false); assert_eq!(4, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 2, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.clean_dead_slot(1); assert_eq!(3, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 1, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.clean_dead_slot(2); assert_eq!(2, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(1, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 1, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); } #[test] fn test_update_last_wins() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); drop(list); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); assert_eq!(gc, vec![(0, true)]); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, false)); } #[test] fn test_update_new_slot() { panoptes_logger::setup(); let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); assert!(gc.is_empty()); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); let ancestors = vec![(1, 0)].into_iter().collect(); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (1, false)); } #[test] fn test_update_gc_purged_slot() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); index.upsert( 2, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); index.upsert( 3, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); index.add_root(0, false); index.add_root(1, false); index.add_root(3, false); index.upsert( 4, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); // Updating index should not purge older roots, only purges // previous updates within the same slot assert_eq!(gc, vec![]); let (list, idx) = index.get(&key.pubkey(), None, None).unwrap(); assert_eq!(list.slot_list()[idx], (3, true)); let mut num = 0; let mut found_key = false; index.unchecked_scan_accounts("", &Ancestors::default(), |pubkey, _index| { if pubkey == &key.pubkey() { found_key = true; assert_eq!(_index, (&true, 3)); }; num += 1 }); assert_eq!(num, 1); assert!(found_key); } #[test] fn test_purge() { let key = Keypair::new(); let index = AccountsIndex::<u64>::default(); let mut gc = Vec::new(); assert!(index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 12, &mut gc )); assert!(!index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 10, &mut gc )); let purges = index.purge_roots(&key.pubkey()); assert_eq!(purges, (vec![], false)); index.add_root(1, false); let purges = index.purge_roots(&key.pubkey()); assert_eq!(purges, (vec![(1, 10)], true)); assert!(!index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 9, &mut gc )); } #[test] fn test_latest_slot() { let slot_slice = vec![(0, true), (5, true), (3, true), (7, true)]; let index = AccountsIndex::<bool>::default(); // No ancestors, no root, should return None assert!(index.latest_slot(None, &slot_slice, None).is_none()); // Given a root, should return the root index.add_root(5, false); assert_eq!(index.latest_slot(None, &slot_slice, None).unwrap(), 1); // Given a max_root == root, should still return the root assert_eq!(index.latest_slot(None, &slot_slice, Some(5)).unwrap(), 1); // Given a max_root < root, should filter out the root assert!(index.latest_slot(None, &slot_slice, Some(4)).is_none()); // Given a max_root, should filter out roots < max_root, but specified // ancestors should not be affected let ancestors = vec![(3, 1), (7, 1)].into_iter().collect(); assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, Some(4)) .unwrap(), 3 ); assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, Some(7)) .unwrap(), 3 ); // Given no max_root, should just return the greatest ancestor or root assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, None) .unwrap(), 3 ); } fn run_test_purge_exact_secondary_index< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, key_start: usize, key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { // No roots, should be no reclaims let slots = vec![1, 2, 5, 9]; let index_key = Pubkey::new_unique(); let account_key = Pubkey::new_unique(); let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Insert slots into secondary index for slot in &slots { index.upsert( *slot, &account_key, // Make sure these accounts are added to secondary index &inline_spl_token_v2_0::id(), &account_data, secondary_indexes, true, &mut vec![], ); } // Only one top level index entry exists assert_eq!(secondary_index.index.get(&index_key).unwrap().len(), 1); // In the reverse index, one account maps across multiple slots // to the same top level key assert_eq!( secondary_index .reverse_index .get(&account_key) .unwrap() .value() .read() .unwrap() .len(), 1 ); index.purge_exact( &account_key, &slots.into_iter().collect::<HashSet<Slot>>(), &mut vec![], ); index.handle_dead_keys(&[&account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_purge_exact_dashmap_secondary_index() { let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_mint_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_purge_exact_rwlock_secondary_index() { let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_owner_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_purge_older_root_entries() { // No roots, should be no reclaims let index = AccountsIndex::<bool>::default(); let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; let mut reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Add a later root, earlier slots should be reclaimed slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; index.add_root(1, false); // Note 2 is not a root index.add_root(5, false); reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Add a later root that is not in the list, should not affect the outcome slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; index.add_root(6, false); reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root >= than any root in the slot list, should not affect // outcome slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root, earlier slots should be reclaimed slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root 2. This means the latest root < 2 is 1 because 2 is not a root // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Pass a max root 1. This means the latest root < 3 is 1 because 2 is not a root // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Pass a max root that doesn't exist in the list but is greater than // some of the roots in the list, shouldn't return those smaller roots slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); } fn check_secondary_index_mapping_correct<SecondaryIndexEntryType>( secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, secondary_index_keys: &[Pubkey], account_key: &Pubkey, ) where SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, { // Check secondary index has unique mapping from secondary index key // to the account key and slot for secondary_index_key in secondary_index_keys { assert_eq!(secondary_index.index.len(), secondary_index_keys.len()); let account_key_map = secondary_index.get(secondary_index_key); assert_eq!(account_key_map.len(), 1); assert_eq!(account_key_map, vec![*account_key]); } // Check reverse index contains all of the `secondary_index_keys` let secondary_index_key_map = secondary_index.reverse_index.get(account_key).unwrap(); assert_eq!( &*secondary_index_key_map.value().read().unwrap(), secondary_index_keys ); } fn run_test_secondary_indexes< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, key_start: usize, key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { let mut secondary_indexes = secondary_indexes.clone(); let account_key = Pubkey::new_unique(); let index_key = Pubkey::new_unique(); let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Wrong program id index.upsert( 0, &account_key, &Pubkey::default(), &account_data, &secondary_indexes, true, &mut vec![], ); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); // Wrong account data size index.upsert( 0, &account_key, &inline_spl_token_v2_0::id(), &account_data[1..], &secondary_indexes, true, &mut vec![], ); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); secondary_indexes.keys = None; // Just right. Inserting the same index multiple times should be ok for _ in 0..2 { index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); } // included assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [index_key].iter().cloned().collect::<HashSet<_>>(), exclude: false, }); secondary_index.index.clear(); secondary_index.reverse_index.clear(); index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); // not-excluded secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [].iter().cloned().collect::<HashSet<_>>(), exclude: true, }); secondary_index.index.clear(); secondary_index.reverse_index.clear(); index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); secondary_indexes.keys = None; index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| slot_list.clear()); // Everything should be deleted index.handle_dead_keys(&[&account_key], &secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_dashmap_secondary_index() { let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes( &index, &index.spl_token_mint_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_rwlock_secondary_index() { let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes( &index, &index.spl_token_owner_index, key_start, key_end, &secondary_indexes, ); } fn run_test_secondary_indexes_same_slot_and_forks< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, index_key_start: usize, index_key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { let account_key = Pubkey::new_unique(); let secondary_key1 = Pubkey::new_unique(); let secondary_key2 = Pubkey::new_unique(); let slot = 1; let mut account_data1 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data1[index_key_start..index_key_end] .clone_from_slice(&(secondary_key1.to_bytes())); let mut account_data2 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data2[index_key_start..index_key_end] .clone_from_slice(&(secondary_key2.to_bytes())); // First write one mint index index.upsert( slot, &account_key, &inline_spl_token_v2_0::id(), &account_data1, secondary_indexes, true, &mut vec![], ); // Now write a different mint index for the same account index.upsert( slot, &account_key, &inline_spl_token_v2_0::id(), &account_data2, secondary_indexes, true, &mut vec![], ); // Both pubkeys will now be present in the index check_secondary_index_mapping_correct( secondary_index, &[secondary_key1, secondary_key2], &account_key, ); // If a later slot also introduces secondary_key1, then it should still exist in the index let later_slot = slot + 1; index.upsert( later_slot, &account_key, &inline_spl_token_v2_0::id(), &account_data1, secondary_indexes, true, &mut vec![], ); assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]); // If we set a root at `later_slot`, and clean, then even though the account with secondary_key1 // was outdated by the update in the later slot, the primary account key is still alive, // so both secondary keys will still be kept alive. index.add_root(later_slot, false); index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| { index.purge_older_root_entries(slot_list, &mut vec![], None) }); check_secondary_index_mapping_correct( secondary_index, &[secondary_key1, secondary_key2], &account_key, ); // Removing the remaining entry for this pubkey in the index should mark the // pubkey as dead and finally remove all the secondary indexes let mut reclaims = vec![]; index.purge_exact(&account_key, &later_slot, &mut reclaims); index.handle_dead_keys(&[&account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_dashmap_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes_same_slot_and_forks( &index, &index.spl_token_mint_index, key_start, key_end, &account_index, ); } #[test] fn test_rwlock_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes_same_slot_and_forks( &index, &index.spl_token_owner_index, key_start, key_end, &account_index, ); } impl ZeroLamport for bool { fn is_zero_lamport(&self) -> bool { false } } impl ZeroLamport for u64 { fn is_zero_lamport(&self) -> bool { false } } }
34.848911
151
0.544418
38de02108cf1dd040a970f1a645c4db7e4158a01
3,797
use crate::bi_type_app::*; pub enum FunctionF {} pub enum FunctionMutF {} pub enum FunctionOnceF {} pub trait IsFnOnce: BiTypeCon { fn apply_once<'a, A: 'a, B: 'a>( f: BiApp<'a, Self, A, B>, a: A, ) -> B; } pub trait IsFnMut: IsFnOnce { fn apply_mut<'a, A: 'a, B: 'a>( f: &mut BiApp<'a, Self, A, B>, a: A, ) -> B; } pub trait IsFn: IsFnMut { fn apply<'a, A: 'a, B: 'a>( f: &BiApp<'a, Self, A, B>, a: A, ) -> B; } impl BiTypeCon for FunctionF {} impl<'a, A: 'a, B: 'a> BiTypeApp<'a, A, B> for FunctionF { type Applied = dyn Fn(A) -> B + 'a; } impl BiTypeCon for FunctionMutF {} impl<'a, A: 'a, B: 'a> BiTypeApp<'a, A, B> for FunctionMutF { type Applied = dyn FnMut(A) -> B + 'a; } impl BiTypeCon for FunctionOnceF {} impl<'a, A: 'a, B: 'a> BiTypeApp<'a, A, B> for FunctionOnceF { type Applied = dyn FnOnce(A) -> B + 'a; } impl IsFn for FunctionF { fn apply<'a, A: 'a, B: 'a>( f: &BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_borrow()(a) } } impl IsFnMut for FunctionF { fn apply_mut<'a, A: 'a, B: 'a>( f: &mut BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_borrow()(a) } } impl IsFnOnce for FunctionF { fn apply_once<'a, A: 'a, B: 'a>( f: BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_box()(a) } } impl IsFnMut for FunctionMutF { fn apply_mut<'a, A: 'a, B: 'a>( f: &mut BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_borrow_mut()(a) } } impl IsFnOnce for FunctionMutF { fn apply_once<'a, A: 'a, B: 'a>( f: BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_box()(a) } } impl IsFnOnce for FunctionOnceF { fn apply_once<'a, A: 'a, B: 'a>( f: BiApp<'a, Self, A, B>, a: A, ) -> B { f.get_applied_box()(a) } } pub fn wrap_function<'a, F: 'a, A: 'a, B: 'a>( f: F ) -> BiApp<'a, FunctionF, A, B> where F: Fn(A) -> B, { struct Applied<F>(F); impl<'a, F: 'a, A: 'a, B: 'a> HasBiTypeApp<'a, FunctionF, A, B> for Applied<F> where F: Fn(A) -> B, FunctionF: BiTypeApp<'a, A, B, Applied = dyn Fn(A) -> B + 'a>, { fn get_applied_box(self: Box<Self>) -> Box<dyn Fn(A) -> B + 'a> { Box::new(self.0) } fn get_applied_borrow(&self) -> &(dyn Fn(A) -> B + 'a) { &self.0 } fn get_applied_borrow_mut(&mut self) -> &mut (dyn Fn(A) -> B + 'a) { &mut self.0 } } Box::new(Applied(f)) } pub fn wrap_function_once<'a, F: 'a, A: 'a, B: 'a>( f: F ) -> BiApp<'a, FunctionOnceF, A, B> where F: FnOnce(A) -> B, { struct Applied<F>(F); impl<'a, F: 'a, A: 'a, B: 'a> HasBiTypeApp<'a, FunctionOnceF, A, B> for Applied<F> where F: FnOnce(A) -> B, FunctionOnceF: BiTypeApp<'a, A, B, Applied = dyn FnOnce(A) -> B + 'a>, { fn get_applied_box(self: Box<Self>) -> Box<dyn FnOnce(A) -> B + 'a> { Box::new(self.0) } fn get_applied_borrow(&self) -> &(dyn FnOnce(A) -> B + 'a) { &self.0 } fn get_applied_borrow_mut(&mut self) -> &mut (dyn FnOnce(A) -> B + 'a) { &mut self.0 } } Box::new(Applied(f)) } pub fn wrap_function_mut<'a, F: 'a, A: 'a, B: 'a>( f: F ) -> BiApp<'a, FunctionMutF, A, B> where F: FnMut(A) -> B, { struct Applied<F>(F); impl<'a, F: 'a, A: 'a, B: 'a> HasBiTypeApp<'a, FunctionMutF, A, B> for Applied<F> where F: FnMut(A) -> B, FunctionMutF: BiTypeApp<'a, A, B, Applied = dyn FnMut(A) -> B + 'a>, { fn get_applied_box(self: Box<Self>) -> Box<dyn FnMut(A) -> B + 'a> { Box::new(self.0) } fn get_applied_borrow(&self) -> &(dyn FnMut(A) -> B + 'a) { &self.0 } fn get_applied_borrow_mut(&mut self) -> &mut (dyn FnMut(A) -> B + 'a) { &mut self.0 } } Box::new(Applied(f)) }
17.660465
80
0.515407
330876373735488138e8746c273aafe6c9eec8d1
4,581
use super::*; #[test] fn with_different_process_sends_message_when_timer_expires() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run( &(milliseconds(), strategy::term(arc_process.clone())), |(milliseconds, message)| { let time = arc_process.integer(milliseconds).unwrap(); let destination_arc_process = process::test(&arc_process); let destination = destination_arc_process.pid_term(); let result = erlang::start_timer_3::native( arc_process.clone(), time, destination, message, ); prop_assert!( result.is_ok(), "Timer reference not returned. Got {:?}", result ); let timer_reference = result.unwrap(); prop_assert!(timer_reference.is_local_reference()); let timeout_message = timeout_message(timer_reference, message, &arc_process); prop_assert!(!has_message(&destination_arc_process, timeout_message)); thread::sleep(Duration::from_millis(milliseconds + 1)); timer::timeout(); prop_assert!(has_message(&destination_arc_process, timeout_message)); Ok(()) }, ) .unwrap(); }); } #[test] fn with_same_process_sends_message_when_timer_expires() { TestRunner::new(Config::with_source_file(file!())) .run( &(milliseconds(), strategy::process()).prop_flat_map(|(milliseconds, arc_process)| { ( Just(milliseconds), Just(arc_process.clone()), strategy::term(arc_process), ) }), |(milliseconds, arc_process, message)| { let time = arc_process.integer(milliseconds).unwrap(); let destination = arc_process.pid_term(); let result = erlang::start_timer_3::native(arc_process.clone(), time, destination, message); prop_assert!( result.is_ok(), "Timer reference not returned. Got {:?}", result ); let timer_reference = result.unwrap(); prop_assert!(timer_reference.is_local_reference()); let timeout_message = timeout_message(timer_reference, message, &arc_process); prop_assert!(!has_message(&arc_process, timeout_message)); thread::sleep(Duration::from_millis(milliseconds + 1)); timer::timeout(); prop_assert!(has_message(&arc_process, timeout_message)); Ok(()) }, ) .unwrap(); } #[test] fn without_process_sends_nothing_when_timer_expires() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run( &(milliseconds(), strategy::term(arc_process.clone())), |(milliseconds, message)| { let time = arc_process.integer(milliseconds).unwrap(); let destination = next_pid(); let result = erlang::start_timer_3::native( arc_process.clone(), time, destination, message, ); prop_assert!( result.is_ok(), "Timer reference not returned. Got {:?}", result ); let timer_reference = result.unwrap(); prop_assert!(timer_reference.is_local_reference()); let timeout_message = arc_process .tuple_from_slice(&[atom_unchecked("timeout"), timer_reference, message]) .unwrap(); thread::sleep(Duration::from_millis(milliseconds + 1)); timer::timeout(); // does not send to original process either prop_assert!(!has_message(&arc_process, timeout_message)); Ok(()) }, ) .unwrap(); }); }
33.933333
99
0.485702
bb15b168a5688debfce7e69a3b378e33dd623406
34,485
use std::collections::{HashMap, HashSet}; use std::ffi::OsString; use std::path::PathBuf; use lazy_static::lazy_static; use structopt::clap::AppSettings::{ColorAlways, ColoredHelp, DeriveDisplayOrder}; use structopt::{clap, StructOpt}; use syntect::highlighting::Theme as SyntaxTheme; use syntect::parsing::SyntaxSet; use crate::bat_utils::assets::HighlightingAssets; use crate::bat_utils::output::PagingMode; use crate::git_config::{GitConfig, GitConfigEntry}; use crate::options; #[derive(StructOpt, Default)] #[structopt( name = "delta", about = "A viewer for git and diff output", setting(ColorAlways), setting(ColoredHelp), setting(DeriveDisplayOrder), after_help = "\ GIT CONFIG ---------- By default, delta takes settings from a section named \"delta\" in git config files, if one is present. The git config file to use for delta options will usually be ~/.gitconfig, but delta follows the rules given in https://git-scm.com/docs/git-config#FILES. Most delta options can be given in a git config file, using the usual option names but without the initial '--'. An example is [delta] line-numbers = true zero-style = dim syntax FEATURES -------- A feature is a named collection of delta options in git config. An example is: [delta \"my-delta-feature\"] syntax-theme = Dracula plus-style = bold syntax \"#002800\" To activate those options, you would use: delta --features my-delta-feature A feature name may not contain whitespace. You can activate multiple features: [delta] features = my-highlight-styles-colors-feature my-line-number-styles-feature If more than one feature sets the same option, the last one wins. STYLES ------ All options that have a name like --*-style work the same way. It is very similar to how colors/styles are specified in a gitconfig file: https://git-scm.com/docs/git-config#Documentation/git-config.txt-color Here is an example: --minus-style 'red bold ul \"#ffeeee\"' That means: For removed lines, set the foreground (text) color to 'red', make it bold and underlined, and set the background color to '#ffeeee'. See the COLORS section below for how to specify a color. In addition to real colors, there are 4 special color names: 'auto', 'normal', 'raw', and 'syntax'. Here is an example of using special color names together with a single attribute: --minus-style 'syntax bold auto' That means: For removed lines, syntax-highlight the text, and make it bold, and do whatever delta normally does for the background. The available attributes are: 'blink', 'bold', 'dim', 'hidden', 'italic', 'reverse', 'strike', and 'ul' (or 'underline'). The attribute 'omit' is supported by commit-style, file-style, and hunk-header-style, meaning to remove the element entirely from the output. A complete description of the style string syntax follows: - If the input that delta is receiving already has colors, and you want delta to output those colors unchanged, then use the special style string 'raw'. Otherwise, delta will strip any colors from its input. - A style string consists of 0, 1, or 2 colors, together with an arbitrary number of style attributes, all separated by spaces. - The first color is the foreground (text) color. The second color is the background color. Attributes can go in any position. - This means that in order to specify a background color you must also specify a foreground (text) color. - If you want delta to choose one of the colors automatically, then use the special color 'auto'. This can be used for both foreground and background. - If you want the foreground/background color to be your terminal's foreground/background color, then use the special color 'normal'. - If you want the foreground text to be syntax-highlighted according to its language, then use the special foreground color 'syntax'. This can only be used for the foreground (text). - The minimal style specification is the empty string ''. This means: do not apply any colors or styling to the element in question. COLORS ------ There are three ways to specify a color (this section applies to foreground and background colors within a style string): 1. RGB hex code An example of using an RGB hex code is: --file-style=\"#0e7c0e\" 2. ANSI color name There are 8 ANSI color names: black, red, green, yellow, blue, magenta, cyan, white. In addition, all of them have a bright form: brightblack, brightred, brightgreen, brightyellow, brightblue, brightmagenta, brightcyan, brightwhite. An example of using an ANSI color name is: --file-style=\"green\" Unlike RGB hex codes, ANSI color names are just names: you can choose the exact color that each name corresponds to in the settings of your terminal application (the application you use to enter commands at a shell prompt). This means that if you use ANSI color names, and you change the color theme used by your terminal, then delta's colors will respond automatically, without needing to change the delta command line. \"purple\" is accepted as a synonym for \"magenta\". Color names and codes are case-insensitive. 3. ANSI color number An example of using an ANSI color number is: --file-style=28 There are 256 ANSI color numbers: 0-255. The first 16 are the same as the colors described in the \"ANSI color name\" section above. See https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit. Specifying colors like this is useful if your terminal only supports 256 colors (i.e. doesn\'t support 24-bit color). LINE NUMBERS ------------ To display line numbers, use --line-numbers. Line numbers are displayed in two columns. Here's what it looks like by default: 1 ⋮ 1 │ unchanged line 2 ⋮ │ removed line ⋮ 2 │ added line In that output, the line numbers for the old (minus) version of the file appear in the left column, and the line numbers for the new (plus) version of the file appear in the right column. In an unchanged (zero) line, both columns contain a line number. The following options allow the line number display to be customized: --line-numbers-left-format: Change the contents of the left column --line-numbers-right-format: Change the contents of the right column --line-numbers-left-style: Change the style applied to the left column --line-numbers-right-style: Change the style applied to the right column --line-numbers-minus-style: Change the style applied to line numbers in minus lines --line-numbers-zero-style: Change the style applied to line numbers in unchanged lines --line-numbers-plus-style: Change the style applied to line numbers in plus lines Options --line-numbers-left-format and --line-numbers-right-format allow you to change the contents of the line number columns. Their values are arbitrary format strings, which are allowed to contain the placeholders {nm} for the line number associated with the old version of the file and {np} for the line number associated with the new version of the file. The placeholders support a subset of the string formatting syntax documented here: https://doc.rust-lang.org/std/fmt/#formatting-parameters. Specifically, you can use the alignment and width syntax. For example, the default value of --line-numbers-left-format is '{nm:^4}⋮'. This means that the left column should display the minus line number (nm), center-aligned, padded with spaces to a width of 4 characters, followed by a unicode dividing-line character (⋮). Similarly, the default value of --line-numbers-right-format is '{np:^4}│'. This means that the right column should display the plus line number (np), center-aligned, padded with spaces to a width of 4 characters, followed by a unicode dividing-line character (│). Use '<' for left-align, '^' for center-align, and '>' for right-align. If something isn't working correctly, or you have a feature request, please open an issue at https://github.com/dandavison/delta/issues. " )] pub struct Opt { /// Use default colors appropriate for a light terminal background. For more control, see the /// style options and --syntax-theme. #[structopt(long = "light")] pub light: bool, /// Use default colors appropriate for a dark terminal background. For more control, see the /// style options and --syntax-theme. #[structopt(long = "dark")] pub dark: bool, /// Display line numbers next to the diff. See LINE NUMBERS section. #[structopt(short = "n", long = "line-numbers")] pub line_numbers: bool, /// Display a side-by-side diff view instead of the traditional view. #[structopt(short = "s", long = "side-by-side")] pub side_by_side: bool, #[structopt(long = "diff-highlight")] /// Emulate diff-highlight (https://github.com/git/git/tree/master/contrib/diff-highlight) pub diff_highlight: bool, #[structopt(long = "diff-so-fancy")] /// Emulate diff-so-fancy (https://github.com/so-fancy/diff-so-fancy) pub diff_so_fancy: bool, #[structopt(long = "navigate")] /// Activate diff navigation: use n to jump forwards and N to jump backwards. To change the /// file labels used see --file-modified-label, --file-removed-label, --file-added-label, /// --file-renamed-label. pub navigate: bool, #[structopt(long = "relative-paths")] /// Output all file paths relative to the current directory so that they /// resolve correctly when clicked on or used in shell commands. pub relative_paths: bool, #[structopt(long = "hyperlinks")] /// Render commit hashes, file names, and line numbers as hyperlinks, /// according to the hyperlink spec for terminal emulators: /// https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda. By /// default, file names and line numbers link to the local file using a file /// URL, whereas commit hashes link to the commit in GitHub, if the remote /// repository is hosted by GitHub. See --hyperlinks-file-link-format for /// full control over the file URLs emitted. Hyperlinks are supported by /// several common terminal emulators. To make them work, you must use less /// version >= 581 with the -R flag (or use -r with older less versions, but /// this will break e.g. --navigate). If you use tmux, then you will also /// need a patched fork of tmux (see https://github.com/dandavison/tmux). pub hyperlinks: bool, #[structopt(long = "keep-plus-minus-markers")] /// Prefix added/removed lines with a +/- character, exactly as git does. By default, delta /// does not emit any prefix, so code can be copied directly from delta's output. pub keep_plus_minus_markers: bool, /// Display the active values for all Delta options. Style options are displayed with /// foreground and background colors. This can be used to experiment with colors by combining /// this option with other options such as --minus-style, --zero-style, --plus-style, --light, /// --dark, etc. #[structopt(long = "show-config")] pub show_config: bool, /// List supported languages and associated file extensions. #[structopt(long = "list-languages")] pub list_languages: bool, /// List available syntax-highlighting color themes. #[structopt(long = "list-syntax-themes")] pub list_syntax_themes: bool, /// Show all available syntax-highlighting themes, each with an example of highlighted diff output. /// If diff output is supplied on standard input then this will be used for the demo. For /// example: `git show | delta --show-syntax-themes`. #[structopt(long = "show-syntax-themes")] pub show_syntax_themes: bool, /// Show available delta themes, each with an example of highlighted diff /// output. A delta theme is a delta named feature (see --features) that /// sets either `light` or `dark`. See /// https://github.com/dandavison/delta#custom-color-themes. If diff output /// is supplied on standard input then this will be used for the demo. For /// example: `git show | delta --show-themes`. By default shows dark or /// light themes only, according to whether delta is in dark or light mode /// (as set by the user or inferred from BAT_THEME). To control the themes /// shown, use --dark or --light, or both, on the command line together with /// this option. #[structopt(long = "show-themes")] pub show_themes: bool, #[structopt(long = "no-gitconfig")] /// Do not take any settings from git config. See GIT CONFIG section. pub no_gitconfig: bool, #[structopt(long = "raw")] /// Do not alter the input in any way. This is mainly intended for testing delta. pub raw: bool, #[structopt(long = "color-only")] /// Do not alter the input structurally in any way, but color and highlight hunk lines /// according to your delta configuration. This is mainly intended for other tools that use /// delta. pub color_only: bool, //////////////////////////////////////////////////////////////////////////////////////////// #[structopt(long = "features", default_value = "", env = "DELTA_FEATURES")] /// Name of delta features to use (space-separated). A feature is a named collection of delta /// options in ~/.gitconfig. See FEATURES section. pub features: String, #[structopt(long = "syntax-theme", env = "BAT_THEME")] /// The code syntax-highlighting theme to use. Use --show-syntax-themes to demo available /// themes. If the syntax-highlighting theme is not set using this option, it will be taken /// from the BAT_THEME environment variable, if that contains a valid theme name. /// --syntax-theme=none disables all syntax highlighting. pub syntax_theme: Option<String>, #[structopt(long = "minus-style", default_value = "normal auto")] /// Style (foreground, background, attributes) for removed lines. See STYLES section. pub minus_style: String, #[structopt(long = "zero-style", default_value = "syntax normal")] /// Style (foreground, background, attributes) for unchanged lines. See STYLES section. pub zero_style: String, #[structopt(long = "plus-style", default_value = "syntax auto")] /// Style (foreground, background, attributes) for added lines. See STYLES section. pub plus_style: String, #[structopt(long = "minus-emph-style", default_value = "normal auto")] /// Style (foreground, background, attributes) for emphasized sections of removed lines. See /// STYLES section. pub minus_emph_style: String, #[structopt(long = "minus-non-emph-style", default_value = "auto auto")] /// Style (foreground, background, attributes) for non-emphasized sections of removed lines /// that have an emphasized section. Defaults to --minus-style. See STYLES section. pub minus_non_emph_style: String, #[structopt(long = "plus-emph-style", default_value = "syntax auto")] /// Style (foreground, background, attributes) for emphasized sections of added lines. See /// STYLES section. pub plus_emph_style: String, #[structopt(long = "plus-non-emph-style", default_value = "auto auto")] /// Style (foreground, background, attributes) for non-emphasized sections of added lines that /// have an emphasized section. Defaults to --plus-style. See STYLES section. pub plus_non_emph_style: String, #[structopt(long = "commit-style", default_value = "raw")] /// Style (foreground, background, attributes) for the commit hash line. See STYLES section. /// The style 'omit' can be used to remove the commit hash line from the output. pub commit_style: String, #[structopt(long = "commit-decoration-style", default_value = "")] /// Style (foreground, background, attributes) for the commit hash decoration. See STYLES /// section. The style string should contain one of the special attributes 'box', 'ul' /// (underline), 'ol' (overline), or the combination 'ul ol'. pub commit_decoration_style: String, /// The regular expression used to identify the commit line when parsing git output. #[structopt(long = "commit-regex", default_value = r"^commit ")] pub commit_regex: String, #[structopt(long = "file-style", default_value = "blue")] /// Style (foreground, background, attributes) for the file section. See STYLES section. The /// style 'omit' can be used to remove the file section from the output. pub file_style: String, #[structopt(long = "file-decoration-style", default_value = "blue ul")] /// Style (foreground, background, attributes) for the file decoration. See STYLES section. The /// style string should contain one of the special attributes 'box', 'ul' (underline), 'ol' /// (overline), or the combination 'ul ol'. pub file_decoration_style: String, /// Format string for commit hyperlinks (requires --hyperlinks). The /// placeholder "{commit}" will be replaced by the commit hash. For example: /// --hyperlinks-commit-link-format='https://mygitrepo/{commit}/' #[structopt(long = "hyperlinks-commit-link-format")] pub hyperlinks_commit_link_format: Option<String>, /// Format string for file hyperlinks (requires --hyperlinks). The /// placeholders "{path}" and "{line}" will be replaced by the absolute file /// path and the line number, respectively. The default value of this option /// creates hyperlinks using standard file URLs; your operating system /// should open these in the application registered for that file type. /// However, these do not make use of the line number. In order for the link /// to open the file at the correct line number, you could use a custom URL /// format such as "file-line://{path}:{line}" and register an application /// to handle the custom "file-line" URL scheme by opening the file in your /// editor/IDE at the indicated line number. See /// https://github.com/dandavison/open-in-editor for an example. #[structopt(long = "hyperlinks-file-link-format", default_value = "file://{path}")] pub hyperlinks_file_link_format: String, #[structopt(long = "hunk-header-style", default_value = "line-number syntax")] /// Style (foreground, background, attributes) for the hunk-header. See STYLES section. Special /// attributes 'file' and 'line-number' can be used to include the file path, and number of /// first hunk line, in the hunk header. The style 'omit' can be used to remove the hunk header /// section from the output. pub hunk_header_style: String, #[structopt(long = "hunk-header-file-style", default_value = "blue")] /// Style (foreground, background, attributes) for the file path part of the hunk-header. See /// STYLES section. The file path will only be displayed if hunk-header-style contains the /// 'file' special attribute. pub hunk_header_file_style: String, #[structopt(long = "hunk-header-line-number-style", default_value = "blue")] /// Style (foreground, background, attributes) for the line number part of the hunk-header. See /// STYLES section. The line number will only be displayed if hunk-header-style contains the /// 'line-number' special attribute. pub hunk_header_line_number_style: String, #[structopt(long = "hunk-header-decoration-style", default_value = "blue box")] /// Style (foreground, background, attributes) for the hunk-header decoration. See STYLES /// section. The style string should contain one of the special attributes 'box', 'ul' /// (underline), 'ol' (overline), or the combination 'ul ol'. pub hunk_header_decoration_style: String, /// Format string for git blame commit metadata. Available placeholders are /// "{timestamp}", "{author}", and "{commit}". #[structopt( long = "blame-format", default_value = "{timestamp:<15} {author:<15} {commit:<8} │ " )] pub blame_format: String, /// Background colors used for git blame lines (space-separated string). /// Lines added by the same commit are painted with the same color; colors /// are recycled as needed. #[structopt(long = "blame-palette", default_value = "#FFFFFF #DDDDDD #BBBBBB")] pub blame_palette: String, /// Format of `git blame` timestamp in raw git output received by delta. #[structopt( long = "blame-timestamp-format", default_value = "%Y-%m-%d %H:%M:%S %z" )] pub blame_timestamp_format: String, /// Default language used for syntax highlighting when this cannot be /// inferred from a filename. It will typically make sense to set this in /// per-repository git config ().git/config) #[structopt(long = "default-language")] pub default_language: Option<String>, /// The regular expression used to decide what a word is for the within-line highlight /// algorithm. For less fine-grained matching than the default try --word-diff-regex="\S+" /// --max-line-distance=1.0 (this is more similar to `git --word-diff`). #[structopt(long = "word-diff-regex", default_value = r"\w+")] pub tokenization_regex: String, /// The maximum distance between two lines for them to be inferred to be homologous. Homologous /// line pairs are highlighted according to the deletion and insertion operations transforming /// one into the other. #[structopt(long = "max-line-distance", default_value = "0.6")] pub max_line_distance: f64, /// Style (foreground, background, attributes) for line numbers in the old (minus) version of /// the file. See STYLES and LINE NUMBERS sections. #[structopt(long = "line-numbers-minus-style", default_value = "auto")] pub line_numbers_minus_style: String, /// Style (foreground, background, attributes) for line numbers in unchanged (zero) lines. See /// STYLES and LINE NUMBERS sections. #[structopt(long = "line-numbers-zero-style", default_value = "auto")] pub line_numbers_zero_style: String, /// Style (foreground, background, attributes) for line numbers in the new (plus) version of /// the file. See STYLES and LINE NUMBERS sections. #[structopt(long = "line-numbers-plus-style", default_value = "auto")] pub line_numbers_plus_style: String, /// Format string for the left column of line numbers. A typical value would be "{nm:^4}⋮" /// which means to display the line numbers of the minus file (old version), center-aligned, /// padded to a width of 4 characters, followed by a dividing character. See the LINE NUMBERS /// section. #[structopt(long = "line-numbers-left-format", default_value = "{nm:^4}⋮")] pub line_numbers_left_format: String, /// Format string for the right column of line numbers. A typical value would be "{np:^4}│ " /// which means to display the line numbers of the plus file (new version), center-aligned, /// padded to a width of 4 characters, followed by a dividing character, and a space. See the /// LINE NUMBERS section. #[structopt(long = "line-numbers-right-format", default_value = "{np:^4}│")] pub line_numbers_right_format: String, /// Style (foreground, background, attributes) for the left column of line numbers. See STYLES /// and LINE NUMBERS sections. #[structopt(long = "line-numbers-left-style", default_value = "auto")] pub line_numbers_left_style: String, /// Style (foreground, background, attributes) for the right column of line numbers. See STYLES /// and LINE NUMBERS sections. #[structopt(long = "line-numbers-right-style", default_value = "auto")] pub line_numbers_right_style: String, #[structopt(long = "file-modified-label", default_value = "")] /// Text to display in front of a modified file path. pub file_modified_label: String, #[structopt(long = "file-removed-label", default_value = "removed:")] /// Text to display in front of a removed file path. pub file_removed_label: String, #[structopt(long = "file-added-label", default_value = "added:")] /// Text to display in front of a added file path. pub file_added_label: String, #[structopt(long = "file-copied-label", default_value = "copied:")] /// Text to display in front of a copied file path. pub file_copied_label: String, #[structopt(long = "file-renamed-label", default_value = "renamed:")] /// Text to display in front of a renamed file path. pub file_renamed_label: String, #[structopt(long = "hunk-label", default_value = "")] /// Text to display in front of a hunk header. pub hunk_label: String, #[structopt(long = "max-line-length", default_value = "512")] /// Truncate lines longer than this. To prevent any truncation, set to zero. Note that /// delta will be slow on very long lines (e.g. minified .js) if truncation is disabled. pub max_line_length: usize, /// The width of underline/overline decorations. Use --width=variable to extend decorations and /// background colors to the end of the text only. Otherwise background colors extend to the /// full terminal width. #[structopt(short = "w", long = "width")] pub width: Option<String>, /// Width allocated for file paths in a diff stat section. If a relativized /// file path exceeds this width then the diff stat will be misaligned. #[structopt(long = "diff-stat-align-width", default_value = "48")] pub diff_stat_align_width: usize, /// The number of spaces to replace tab characters with. Use --tabs=0 to pass tab characters /// through directly, but note that in that case delta will calculate line widths assuming tabs /// occupy one character's width on the screen: if your terminal renders tabs as more than than /// one character wide then delta's output will look incorrect. #[structopt(long = "tabs", default_value = "4")] pub tab_width: usize, /// Whether to emit 24-bit ("true color") RGB color codes. Options are auto, always, and never. /// "auto" means that delta will emit 24-bit color codes if the environment variable COLORTERM /// has the value "truecolor" or "24bit". If your terminal application (the application you use /// to enter commands at a shell prompt) supports 24 bit colors, then it probably already sets /// this environment variable, in which case you don't need to do anything. #[structopt(long = "true-color", default_value = "auto")] pub true_color: String, /// Deprecated: use --true-color. #[structopt(long = "24-bit-color")] pub _24_bit_color: Option<String>, /// Whether to examine ANSI color escape sequences in raw lines received from Git and handle /// lines colored in certain ways specially. This is on by default: it is how Delta supports /// Git's --color-moved feature. Set this to "false" to disable this behavior. #[structopt(long = "inspect-raw-lines", default_value = "true")] pub inspect_raw_lines: String, #[structopt(long)] /// Which pager to use. The default pager is `less`. You can also change pager /// by setting the environment variables DELTA_PAGER, BAT_PAGER, or PAGER /// (and that is their order of priority). This option overrides all environment /// variables above. pub pager: Option<String>, /// Whether to use a pager when displaying output. Options are: auto, always, and never. #[structopt(long = "paging", default_value = "auto")] pub paging_mode: String, /// First file to be compared when delta is being used in diff mode: `delta file_1 file_2` is /// equivalent to `diff -u file_1 file_2 | delta`. #[structopt(parse(from_os_str))] pub minus_file: Option<PathBuf>, /// Second file to be compared when delta is being used in diff mode. #[structopt(parse(from_os_str))] pub plus_file: Option<PathBuf>, /// Style for removed empty line marker (used only if --minus-style has no background color) #[structopt( long = "--minus-empty-line-marker-style", default_value = "normal auto" )] pub minus_empty_line_marker_style: String, /// Style for added empty line marker (used only if --plus-style has no background color) #[structopt(long = "--plus-empty-line-marker-style", default_value = "normal auto")] pub plus_empty_line_marker_style: String, /// Style for whitespace errors. Defaults to color.diff.whitespace if that is set in git /// config, or else 'magenta reverse'. #[structopt(long = "whitespace-error-style", default_value = "auto auto")] pub whitespace_error_style: String, #[structopt(long = "line-buffer-size", default_value = "32")] /// Size of internal line buffer. Delta compares the added and removed versions of nearby lines /// in order to detect and highlight changes at the level of individual words/tokens. /// Therefore, nearby lines must be buffered internally before they are painted and emitted. /// Increasing this value might improve highlighting of some large diff hunks. However, setting /// this to a high value will adversely affect delta's performance when entire files are /// added/removed. pub line_buffer_size: usize, #[structopt(long = "minus-color")] /// Deprecated: use --minus-style='normal my_background_color'. pub deprecated_minus_background_color: Option<String>, #[structopt(long = "minus-emph-color")] /// Deprecated: use --minus-emph-style='normal my_background_color'. pub deprecated_minus_emph_background_color: Option<String>, #[structopt(long = "plus-color")] /// Deprecated: Use --plus-style='syntax my_background_color' to change the background color /// while retaining syntax-highlighting. pub deprecated_plus_background_color: Option<String>, #[structopt(long = "plus-emph-color")] /// Deprecated: Use --plus-emph-style='syntax my_background_color' to change the background /// color while retaining syntax-highlighting. pub deprecated_plus_emph_background_color: Option<String>, #[structopt(long = "highlight-removed")] /// Deprecated: use --minus-style='syntax'. pub deprecated_highlight_minus_lines: bool, #[structopt(long = "commit-color")] /// Deprecated: use --commit-style='my_foreground_color' /// --commit-decoration-style='my_foreground_color'. pub deprecated_commit_color: Option<String>, #[structopt(long = "file-color")] /// Deprecated: use --file-style='my_foreground_color' /// --file-decoration-style='my_foreground_color'. pub deprecated_file_color: Option<String>, #[structopt(long = "hunk-style")] /// Deprecated: synonym of --hunk-header-decoration-style. pub deprecated_hunk_style: Option<String>, #[structopt(long = "hunk-color")] /// Deprecated: use --hunk-header-style='my_foreground_color' /// --hunk-header-decoration-style='my_foreground_color'. pub deprecated_hunk_color: Option<String>, #[structopt(long = "theme")] /// Deprecated: use --syntax-theme. pub deprecated_theme: Option<String>, #[structopt(skip)] pub computed: ComputedValues, #[structopt(skip)] pub git_config: Option<GitConfig>, #[structopt(skip)] pub git_config_entries: HashMap<String, GitConfigEntry>, } #[derive(Default, Clone, Debug)] pub struct ComputedValues { pub available_terminal_width: usize, pub background_color_extends_to_terminal_width: bool, pub decorations_width: Width, pub inspect_raw_lines: InspectRawLines, pub is_light_mode: bool, pub paging_mode: PagingMode, pub syntax_set: SyntaxSet, pub syntax_theme: Option<SyntaxTheme>, pub true_color: bool, } #[derive(Clone, Debug, PartialEq)] pub enum Width { Fixed(usize), Variable, } impl Default for Width { fn default() -> Self { Width::Variable } } #[derive(Clone, Debug, PartialEq)] pub enum InspectRawLines { True, False, } impl Default for InspectRawLines { fn default() -> Self { InspectRawLines::False } } impl Default for PagingMode { fn default() -> Self { PagingMode::Never } } impl Opt { pub fn from_args_and_git_config( git_config: Option<GitConfig>, assets: HighlightingAssets, ) -> Self { Self::from_clap_and_git_config(Self::clap().get_matches(), git_config, assets) } pub fn from_iter_and_git_config<I>(iter: I, git_config: Option<GitConfig>) -> Self where I: IntoIterator, I::Item: Into<OsString> + Clone, { let assets = HighlightingAssets::new(); Self::from_clap_and_git_config(Self::clap().get_matches_from(iter), git_config, assets) } fn from_clap_and_git_config( arg_matches: clap::ArgMatches, mut git_config: Option<GitConfig>, assets: HighlightingAssets, ) -> Self { let mut opt = Opt::from_clap(&arg_matches); options::rewrite::apply_rewrite_rules(&mut opt, &arg_matches); options::set::set_options(&mut opt, &mut git_config, &arg_matches, assets); opt.git_config = git_config; opt } #[allow(dead_code)] pub fn get_option_names<'a>() -> HashMap<&'a str, &'a str> { itertools::chain( Self::clap() .p .opts .iter() .map(|opt| (opt.b.name, opt.s.long.unwrap())), Self::clap() .p .flags .iter() .map(|opt| (opt.b.name, opt.s.long.unwrap())), ) .filter(|(name, _)| !IGNORED_OPTION_NAMES.contains(name)) .collect() } } // Option names to exclude when listing options to process for various purposes. These are // (1) Deprecated options // (2) Pseudo-flag commands such as --list-languages lazy_static! { static ref IGNORED_OPTION_NAMES: HashSet<&'static str> = vec![ "deprecated-file-color", "deprecated-hunk-style", "deprecated-minus-background-color", "deprecated-minus-emph-background-color", "deprecated-hunk-color", "deprecated-plus-emph-background-color", "deprecated-plus-background-color", "deprecated-highlight-minus-lines", "deprecated-theme", "deprecated-commit-color", "list-languages", "list-syntax-themes", "show-config", "show-syntax-themes", ] .into_iter() .collect(); }
44.268293
105
0.695201
29d6567509e4fc0958a8fe9210fc92806a96fdba
748
use rayon::prelude::*; /// A single executable segment #[derive(Debug, PartialEq, Eq, Hash, Clone)] pub struct Segment { pub addr: u64, pub bytes: Vec<u8>, } impl Segment { /// Constructor pub fn new(addr: u64, bytes: Vec<u8>) -> Segment { Segment { addr, bytes } } /// Check if contains address pub fn contains(&self, addr: u64) -> bool { (self.addr <= addr) && (addr < (self.addr + self.bytes.len() as u64)) } /// Get offsets of byte occurrences pub fn get_matching_offsets(&self, vals: &[u8]) -> Vec<usize> { self.bytes .par_iter() .enumerate() .filter(|&(_, b)| vals.contains(b)) .map(|(i, _)| i) .collect() } }
24.129032
77
0.534759
7a182d130e91f0e53fa0a3f9341abb478e92dead
1,631
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(feature = "sgx")] use std::sync::SgxRwLock as RwLock; #[cfg(feature = "std")] use std::sync::RwLock; use crate::{ concurrent_access::ValidatorAccess, error::{Error, Result}, mocks::validator_mock::ValidatorMock, }; use itp_types::Block; /// Mock for the validator access. /// /// Does not execute anything, just a stub. #[derive(Default)] pub struct ValidatorAccessMock { validator: RwLock<ValidatorMock>, } impl ValidatorAccess<Block> for ValidatorAccessMock { type ValidatorType = ValidatorMock; fn execute_on_validator<F, R>(&self, getter_function: F) -> Result<R> where F: FnOnce(&Self::ValidatorType) -> Result<R>, { let validator_lock = self.validator.read().map_err(|_| Error::PoisonedLock)?; getter_function(&validator_lock) } fn execute_mut_on_validator<F, R>(&self, mutating_function: F) -> Result<R> where F: FnOnce(&mut Self::ValidatorType) -> Result<R>, { let mut validator_lock = self.validator.write().map_err(|_| Error::PoisonedLock)?; mutating_function(&mut validator_lock) } }
28.12069
84
0.741876
fb4dcd4021169b0252367a8d6e23efc1e164ba72
2,808
use std::env; use sentry_core::protocol::{DebugImage, SymbolicDebugImage}; use sentry_core::types::{CodeId, DebugId, Uuid}; use findshlibs::{SharedLibrary, SharedLibraryId, TargetSharedLibrary, TARGET_SUPPORTED}; const UUID_SIZE: usize = 16; /// Converts an ELF object identifier into a `DebugId`. /// /// The identifier data is first truncated or extended to match 16 byte size of /// Uuids. If the data is declared in little endian, the first three Uuid fields /// are flipped to match the big endian expected by the breakpad processor. /// /// The `DebugId::appendix` field is always `0` for ELF. fn debug_id_from_build_id(build_id: &[u8]) -> Option<DebugId> { let mut data = [0u8; UUID_SIZE]; let len = build_id.len().min(UUID_SIZE); data[0..len].copy_from_slice(&build_id[0..len]); #[cfg(target_endian = "little")] { // The ELF file targets a little endian architecture. Convert to // network byte order (big endian) to match the Breakpad processor's // expectations. For big endian object files, this is not needed. data[0..4].reverse(); // uuid field 1 data[4..6].reverse(); // uuid field 2 data[6..8].reverse(); // uuid field 3 } Uuid::from_slice(&data).map(DebugId::from_uuid).ok() } pub fn debug_images() -> Vec<DebugImage> { let mut images = vec![]; if !TARGET_SUPPORTED { return images; } TargetSharedLibrary::each(|shlib| { let maybe_debug_id = shlib.debug_id().and_then(|id| match id { SharedLibraryId::Uuid(bytes) => Some(DebugId::from_uuid(Uuid::from_bytes(bytes))), SharedLibraryId::GnuBuildId(ref id) => debug_id_from_build_id(id), SharedLibraryId::PdbSignature(guid, age) => DebugId::from_guid_age(&guid, age).ok(), _ => None, }); let debug_id = match maybe_debug_id { Some(debug_id) => debug_id, None => return, }; let mut name = shlib.name().to_string_lossy().to_string(); if name.is_empty() { name = env::current_exe() .map(|x| x.display().to_string()) .unwrap_or_else(|_| "<main>".to_string()); } let code_id = shlib.id().map(|id| CodeId::new(format!("{}", id))); let debug_name = shlib.debug_name().map(|n| n.to_string_lossy().to_string()); images.push( SymbolicDebugImage { id: debug_id, name, arch: None, image_addr: shlib.actual_load_addr().0.into(), image_size: shlib.len() as u64, image_vmaddr: shlib.stated_load_addr().0.into(), code_id, debug_file: debug_name, } .into(), ); }); images }
34.666667
96
0.594373
ed5649310e3fd61e13ae24958fcd86db04d0fc60
19,454
use super::diagnostics::Error; use super::expr::LhsExpr; use super::pat::GateOr; use super::path::PathStyle; use super::{BlockMode, Parser, PrevTokenKind, Restrictions, SemiColonMode}; use crate::maybe_whole; use crate::DirectoryOwnership; use rustc_errors::{Applicability, PResult}; use syntax::ast; use syntax::ast::{AttrStyle, AttrVec, Attribute, Mac, MacStmtStyle, VisibilityKind}; use syntax::ast::{Block, BlockCheckMode, Expr, ExprKind, Local, Stmt, StmtKind, DUMMY_NODE_ID}; use syntax::ptr::P; use syntax::token; use syntax::util::classify; use syntax_pos::source_map::{respan, Span}; use syntax_pos::symbol::{kw, sym, Symbol}; use std::mem; impl<'a> Parser<'a> { /// Parses a statement. This stops just before trailing semicolons on everything but items. /// e.g., a `StmtKind::Semi` parses to a `StmtKind::Expr`, leaving the trailing `;` unconsumed. pub fn parse_stmt(&mut self) -> PResult<'a, Option<Stmt>> { Ok(self.parse_stmt_without_recovery(true).unwrap_or_else(|mut e| { e.emit(); self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); None })) } fn parse_stmt_without_recovery( &mut self, macro_legacy_warnings: bool, ) -> PResult<'a, Option<Stmt>> { maybe_whole!(self, NtStmt, |x| Some(x)); let attrs = self.parse_outer_attributes()?; let lo = self.token.span; if self.eat_keyword(kw::Let) { return self.parse_local_mk(lo, attrs.into()).map(Some); } if self.is_kw_followed_by_ident(kw::Mut) { return self.recover_stmt_local(lo, attrs.into(), "missing keyword", "let mut"); } if self.is_kw_followed_by_ident(kw::Auto) { self.bump(); // `auto` let msg = "write `let` instead of `auto` to introduce a new variable"; return self.recover_stmt_local(lo, attrs.into(), msg, "let"); } if self.is_kw_followed_by_ident(sym::var) { self.bump(); // `var` let msg = "write `let` instead of `var` to introduce a new variable"; return self.recover_stmt_local(lo, attrs.into(), msg, "let"); } let mac_vis = respan(lo, VisibilityKind::Inherited); if let Some(macro_def) = self.eat_macro_def(&attrs, &mac_vis, lo)? { return Ok(Some(self.mk_stmt(lo.to(self.prev_span), StmtKind::Item(macro_def)))); } // Starts like a simple path, being careful to avoid contextual keywords // such as a union items, item with `crate` visibility or auto trait items. // Our goal here is to parse an arbitrary path `a::b::c` but not something that starts // like a path (1 token), but it fact not a path. if self.token.is_path_start() && !self.token.is_qpath_start() && !self.is_union_item() // `union::b::c` - path, `union U { ... }` - not a path. && !self.is_crate_vis() // `crate::b::c` - path, `crate struct S;` - not a path. && !self.is_auto_trait_item() && !self.is_async_fn() { let path = self.parse_path(PathStyle::Expr)?; if self.eat(&token::Not) { return self.parse_stmt_mac(lo, attrs.into(), path, macro_legacy_warnings); } let expr = if self.check(&token::OpenDelim(token::Brace)) { self.parse_struct_expr(lo, path, AttrVec::new())? } else { let hi = self.prev_span; self.mk_expr(lo.to(hi), ExprKind::Path(None, path), AttrVec::new()) }; let expr = self.with_res(Restrictions::STMT_EXPR, |this| { let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs.into())?; this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr)) })?; return Ok(Some(self.mk_stmt(lo.to(self.prev_span), StmtKind::Expr(expr)))); } // FIXME: Bad copy of attrs let old_directory_ownership = mem::replace(&mut self.directory.ownership, DirectoryOwnership::UnownedViaBlock); let item = self.parse_item_(attrs.clone(), false, true)?; self.directory.ownership = old_directory_ownership; if let Some(item) = item { return Ok(Some(self.mk_stmt(lo.to(item.span), StmtKind::Item(item)))); } // Do not attempt to parse an expression if we're done here. if self.token == token::Semi { self.error_outer_attrs(&attrs); self.bump(); let mut last_semi = lo; while self.token == token::Semi { last_semi = self.token.span; self.bump(); } // We are encoding a string of semicolons as an an empty tuple that spans // the excess semicolons to preserve this info until the lint stage. let kind = StmtKind::Semi(self.mk_expr( lo.to(last_semi), ExprKind::Tup(Vec::new()), AttrVec::new(), )); return Ok(Some(self.mk_stmt(lo.to(last_semi), kind))); } if self.token == token::CloseDelim(token::Brace) { self.error_outer_attrs(&attrs); return Ok(None); } // Remainder are line-expr stmts. let e = self.parse_expr_res(Restrictions::STMT_EXPR, Some(attrs.into()))?; Ok(Some(self.mk_stmt(lo.to(e.span), StmtKind::Expr(e)))) } /// Parses a statement macro `mac!(args)` provided a `path` representing `mac`. /// At this point, the `!` token after the path has already been eaten. fn parse_stmt_mac( &mut self, lo: Span, attrs: AttrVec, path: ast::Path, legacy_warnings: bool, ) -> PResult<'a, Option<Stmt>> { let args = self.parse_mac_args()?; let delim = args.delim(); let hi = self.prev_span; let style = if delim == token::Brace { MacStmtStyle::Braces } else { MacStmtStyle::NoBraces }; let mac = Mac { path, args, prior_type_ascription: self.last_type_ascription }; let kind = if delim == token::Brace || self.token == token::Semi || self.token == token::Eof { StmtKind::Mac(P((mac, style, attrs.into()))) } // We used to incorrectly stop parsing macro-expanded statements here. // If the next token will be an error anyway but could have parsed with the // earlier behavior, stop parsing here and emit a warning to avoid breakage. else if legacy_warnings && self.token.can_begin_expr() && match self.token.kind { // These can continue an expression, so we can't stop parsing and warn. token::OpenDelim(token::Paren) | token::OpenDelim(token::Bracket) | token::BinOp(token::Minus) | token::BinOp(token::Star) | token::BinOp(token::And) | token::BinOp(token::Or) | token::AndAnd | token::OrOr | token::DotDot | token::DotDotDot | token::DotDotEq => false, _ => true, } { self.warn_missing_semicolon(); StmtKind::Mac(P((mac, style, attrs))) } else { // Since none of the above applied, this is an expression statement macro. let e = self.mk_expr(lo.to(hi), ExprKind::Mac(mac), AttrVec::new()); let e = self.maybe_recover_from_bad_qpath(e, true)?; let e = self.parse_dot_or_call_expr_with(e, lo, attrs)?; let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?; StmtKind::Expr(e) }; Ok(Some(self.mk_stmt(lo.to(hi), kind))) } /// Error on outer attributes in this context. /// Also error if the previous token was a doc comment. fn error_outer_attrs(&self, attrs: &[Attribute]) { if !attrs.is_empty() { if self.prev_token_kind == PrevTokenKind::DocComment { self.span_fatal_err(self.prev_span, Error::UselessDocComment).emit(); } else if attrs.iter().any(|a| a.style == AttrStyle::Outer) { self.span_err(self.token.span, "expected statement after outer attribute"); } } } fn is_kw_followed_by_ident(&self, kw: Symbol) -> bool { self.token.is_keyword(kw) && self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident()) } fn recover_stmt_local( &mut self, lo: Span, attrs: AttrVec, msg: &str, sugg: &str, ) -> PResult<'a, Option<Stmt>> { let stmt = self.parse_local_mk(lo, attrs)?; self.struct_span_err(lo, "invalid variable declaration") .span_suggestion(lo, msg, sugg.to_string(), Applicability::MachineApplicable) .emit(); Ok(Some(stmt)) } fn parse_local_mk(&mut self, lo: Span, attrs: AttrVec) -> PResult<'a, Stmt> { let local = self.parse_local(attrs.into())?; Ok(self.mk_stmt(lo.to(self.prev_span), StmtKind::Local(local))) } /// Parses a local variable declaration. fn parse_local(&mut self, attrs: AttrVec) -> PResult<'a, P<Local>> { let lo = self.prev_span; let pat = self.parse_top_pat(GateOr::Yes)?; let (err, ty) = if self.eat(&token::Colon) { // Save the state of the parser before parsing type normally, in case there is a `:` // instead of an `=` typo. let parser_snapshot_before_type = self.clone(); let colon_sp = self.prev_span; match self.parse_ty() { Ok(ty) => (None, Some(ty)), Err(mut err) => { // Rewind to before attempting to parse the type and continue parsing. let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); let snippet = self.span_to_snippet(pat.span).unwrap(); err.span_label(pat.span, format!("while parsing the type for `{}`", snippet)); (Some((parser_snapshot_after_type, colon_sp, err)), None) } } } else { (None, None) }; let init = match (self.parse_initializer(err.is_some()), err) { (Ok(init), None) => { // init parsed, ty parsed init } (Ok(init), Some((_, colon_sp, mut err))) => { // init parsed, ty error // Could parse the type as if it were the initializer, it is likely there was a // typo in the code: `:` instead of `=`. Add suggestion and emit the error. err.span_suggestion_short( colon_sp, "use `=` if you meant to assign", " =".to_string(), Applicability::MachineApplicable, ); err.emit(); // As this was parsed successfully, continue as if the code has been fixed for the // rest of the file. It will still fail due to the emitted error, but we avoid // extra noise. init } (Err(mut init_err), Some((snapshot, _, ty_err))) => { // init error, ty error init_err.cancel(); // Couldn't parse the type nor the initializer, only raise the type error and // return to the parser state before parsing the type as the initializer. // let x: <parse_error>; mem::replace(self, snapshot); return Err(ty_err); } (Err(err), None) => { // init error, ty parsed // Couldn't parse the initializer and we're not attempting to recover a failed // parse of the type, return the error. return Err(err); } }; let hi = if self.token == token::Semi { self.token.span } else { self.prev_span }; Ok(P(ast::Local { ty, pat, init, id: DUMMY_NODE_ID, span: lo.to(hi), attrs })) } /// Parses the RHS of a local variable declaration (e.g., '= 14;'). fn parse_initializer(&mut self, skip_eq: bool) -> PResult<'a, Option<P<Expr>>> { if self.eat(&token::Eq) { Ok(Some(self.parse_expr()?)) } else if skip_eq { Ok(Some(self.parse_expr()?)) } else { Ok(None) } } fn is_auto_trait_item(&self) -> bool { // auto trait (self.token.is_keyword(kw::Auto) && self.is_keyword_ahead(1, &[kw::Trait])) || // unsafe auto trait (self.token.is_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Auto]) && self.is_keyword_ahead(2, &[kw::Trait])) } /// Parses a block. No inner attributes are allowed. pub fn parse_block(&mut self) -> PResult<'a, P<Block>> { maybe_whole!(self, NtBlock, |x| x); let lo = self.token.span; if !self.eat(&token::OpenDelim(token::Brace)) { return self.error_block_no_opening_brace(); } self.parse_block_tail(lo, BlockCheckMode::Default) } fn error_block_no_opening_brace<T>(&mut self) -> PResult<'a, T> { let sp = self.token.span; let tok = self.this_token_descr(); let mut e = self.span_fatal(sp, &format!("expected `{{`, found {}", tok)); let do_not_suggest_help = self.token.is_keyword(kw::In) || self.token == token::Colon; // Check to see if the user has written something like // // if (cond) // bar; // // which is valid in other languages, but not Rust. match self.parse_stmt_without_recovery(false) { Ok(Some(stmt)) => { if self.look_ahead(1, |t| t == &token::OpenDelim(token::Brace)) || do_not_suggest_help { // If the next token is an open brace (e.g., `if a b {`), the place- // inside-a-block suggestion would be more likely wrong than right. e.span_label(sp, "expected `{`"); return Err(e); } let stmt_span = if self.eat(&token::Semi) { // Expand the span to include the semicolon. stmt.span.with_hi(self.prev_span.hi()) } else { stmt.span }; if let Ok(snippet) = self.span_to_snippet(stmt_span) { e.span_suggestion( stmt_span, "try placing this code inside a block", format!("{{ {} }}", snippet), // Speculative; has been misleading in the past (#46836). Applicability::MaybeIncorrect, ); } } Err(mut e) => { self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); e.cancel(); } _ => {} } e.span_label(sp, "expected `{`"); return Err(e); } /// Parses a block. Inner attributes are allowed. pub(super) fn parse_inner_attrs_and_block( &mut self, ) -> PResult<'a, (Vec<Attribute>, P<Block>)> { maybe_whole!(self, NtBlock, |x| (Vec::new(), x)); let lo = self.token.span; self.expect(&token::OpenDelim(token::Brace))?; Ok((self.parse_inner_attributes()?, self.parse_block_tail(lo, BlockCheckMode::Default)?)) } /// Parses the rest of a block expression or function body. /// Precondition: already parsed the '{'. pub(super) fn parse_block_tail( &mut self, lo: Span, s: BlockCheckMode, ) -> PResult<'a, P<Block>> { let mut stmts = vec![]; while !self.eat(&token::CloseDelim(token::Brace)) { if self.token == token::Eof { break; } let stmt = match self.parse_full_stmt(false) { Err(mut err) => { self.maybe_annotate_with_ascription(&mut err, false); err.emit(); self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore); Some(self.mk_stmt( self.token.span, StmtKind::Expr(self.mk_expr_err(self.token.span)), )) } Ok(stmt) => stmt, }; if let Some(stmt) = stmt { stmts.push(stmt); } else { // Found only `;` or `}`. continue; }; } Ok(P(ast::Block { stmts, id: DUMMY_NODE_ID, rules: s, span: lo.to(self.prev_span) })) } /// Parses a statement, including the trailing semicolon. pub fn parse_full_stmt(&mut self, macro_legacy_warnings: bool) -> PResult<'a, Option<Stmt>> { // Skip looking for a trailing semicolon when we have an interpolated statement. maybe_whole!(self, NtStmt, |x| Some(x)); let mut stmt = match self.parse_stmt_without_recovery(macro_legacy_warnings)? { Some(stmt) => stmt, None => return Ok(None), }; let mut eat_semi = true; match stmt.kind { StmtKind::Expr(ref expr) if self.token != token::Eof => { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(expr) { // Just check for errors and recover; do not eat semicolon yet. if let Err(mut e) = self.expect_one_of(&[], &[token::Semi, token::CloseDelim(token::Brace)]) { e.emit(); self.recover_stmt(); // Don't complain about type errors in body tail after parse error (#57383). let sp = expr.span.to(self.prev_span); stmt.kind = StmtKind::Expr(self.mk_expr_err(sp)); } } } StmtKind::Local(..) => { // We used to incorrectly allow a macro-expanded let statement to lack a semicolon. if macro_legacy_warnings && self.token != token::Semi { self.warn_missing_semicolon(); } else { self.expect_semi()?; eat_semi = false; } } _ => {} } if eat_semi && self.eat(&token::Semi) { stmt = stmt.add_trailing_semicolon(); } stmt.span = stmt.span.to(self.prev_span); Ok(Some(stmt)) } fn warn_missing_semicolon(&self) { self.diagnostic() .struct_span_warn(self.token.span, { &format!("expected `;`, found {}", self.this_token_descr()) }) .note({ "this was erroneously allowed and will become a hard error in a future release" }) .emit(); } fn mk_stmt(&self, span: Span, kind: StmtKind) -> Stmt { Stmt { id: DUMMY_NODE_ID, kind, span } } }
40.698745
100
0.535365
11d7714136a595a0fbdf828f8c39a2d6dad5c0aa
366
use ksql::parser::{Parser, Value}; use std::error::Error; fn main() -> Result<(), Box<dyn Error>> { let src = r#"{"name":"MyCompany", "properties":{"employees": 50}"#.as_bytes(); let ex = Parser::parse(".properties.employees > 20")?; let result = ex.calculate(src)?; println!("{}", &result); assert_eq!(Value::Bool(true), result); Ok(()) }
28.153846
82
0.587432
bf84c2fa3ed5cee857213957bac1fc7ee562a949
146
// Only the meta-provider is exposed outside this module. pub mod provider; pub mod args; mod file_provider; mod lib_provider; mod http_provider;
20.857143
57
0.794521
9b0c879d591a01a6cabea057827dd8c8d392630b
2,595
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::KEYR3 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct AES_KEYR3R { bits: u32, } impl AES_KEYR3R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _AES_KEYR3W<'a> { w: &'a mut W, } impl<'a> _AES_KEYR3W<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - AES key register (MSB key \\[127:96\\])"] #[inline] pub fn aes_keyr3(&self) -> AES_KEYR3R { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; AES_KEYR3R { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - AES key register (MSB key \\[127:96\\])"] #[inline] pub fn aes_keyr3(&mut self) -> _AES_KEYR3W { _AES_KEYR3W { w: self } } }
24.481132
66
0.503276
23723947eb1c5771959fa94acd4d2f8c41428c00
5,092
use crate::arkworks::pasta_fp::WasmPastaFp; use crate::arkworks::pasta_fq::WasmPastaFq; use mina_curves::pasta::{ pallas::Affine as AffinePallas, pallas::G_GENERATOR_X as GeneratorPallasX, pallas::G_GENERATOR_Y as GeneratorPallasY, vesta::Affine as AffineVesta, vesta::G_GENERATOR_X as GeneratorVestaX, vesta::G_GENERATOR_Y as GeneratorVestaY, }; use wasm_bindgen::prelude::*; // // handy types // #[wasm_bindgen] #[derive(Clone, Copy, Debug)] pub struct WasmGPallas { pub x: WasmPastaFp, pub y: WasmPastaFp, pub infinity: bool, } #[wasm_bindgen] #[derive(Clone, Copy, Debug)] pub struct WasmGVesta { pub x: WasmPastaFq, pub y: WasmPastaFq, pub infinity: bool, } // Conversions from/to AffineVesta impl From<AffineVesta> for WasmGVesta { fn from(point: AffineVesta) -> Self { WasmGVesta { x: point.x.into(), y: point.y.into(), infinity: point.infinity, } } } impl From<&AffineVesta> for WasmGVesta { fn from(point: &AffineVesta) -> Self { WasmGVesta { x: point.x.into(), y: point.y.into(), infinity: point.infinity, } } } impl From<WasmGVesta> for AffineVesta { fn from(point: WasmGVesta) -> Self { AffineVesta::new(point.x.into(), point.y.into(), point.infinity) } } impl From<&WasmGVesta> for AffineVesta { fn from(point: &WasmGVesta) -> Self { AffineVesta::new(point.x.into(), point.y.into(), point.infinity) } } // Conversion from/to AffinePallas impl From<AffinePallas> for WasmGPallas { fn from(point: AffinePallas) -> Self { WasmGPallas { x: point.x.into(), y: point.y.into(), infinity: point.infinity, } } } impl From<&AffinePallas> for WasmGPallas { fn from(point: &AffinePallas) -> Self { WasmGPallas { x: point.x.into(), y: point.y.into(), infinity: point.infinity, } } } impl From<WasmGPallas> for AffinePallas { fn from(point: WasmGPallas) -> Self { AffinePallas::new(point.x.into(), point.y.into(), point.infinity) } } impl From<&WasmGPallas> for AffinePallas { fn from(point: &WasmGPallas) -> Self { AffinePallas::new(point.x.into(), point.y.into(), point.infinity) } } #[wasm_bindgen] pub fn caml_pallas_affine_one() -> WasmGPallas { WasmGPallas { x: WasmPastaFp::from(GeneratorPallasX), y: WasmPastaFp::from(GeneratorPallasY), infinity: false, } } #[wasm_bindgen] pub fn caml_vesta_affine_one() -> WasmGVesta { WasmGVesta { x: WasmPastaFq::from(GeneratorVestaX), y: WasmPastaFq::from(GeneratorVestaY), infinity: false, } } /* #[wasm_bindgen] pub fn caml_pasta_pallas_one() -> WasmPallasGProjective { ProjectivePallas::prime_subgroup_generator().into() } #[wasm_bindgen] pub fn caml_pasta_pallas_add( x: &WasmPallasGProjective, y: &WasmPallasGProjective, ) -> WasmPallasGProjective { (**x + **y).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_sub( x: &WasmPallasGProjective, y: &WasmPallasGProjective, ) -> WasmPallasGProjective { (**x - **y).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_negate(x: &WasmPallasGProjective) -> WasmPallasGProjective { (-(**x)).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_double(x: &WasmPallasGProjective) -> WasmPallasGProjective { (x.double()).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_scale(x: &WasmPallasGProjective, y: WasmPastaFq) -> WasmPallasGProjective { (x.mul(y.0)).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_random() -> WasmPallasGProjective { let rng = &mut rand_core::OsRng; WasmPallasGProjective(UniformRand::rand(rng)) } #[wasm_bindgen] pub fn caml_pasta_pallas_rng(i: u32) -> WasmPallasGProjective { let i: u64 = i.into(); let mut rng: StdRng = rand::SeedableRng::seed_from_u64(i); WasmPallasGProjective(UniformRand::rand(&mut rng)) } #[wasm_bindgen] pub fn caml_pasta_pallas_endo_base() -> WasmPastaFp { let (endo_q, _endo_r) = commitment_dlog::srs::endos::<GAffine>(); WasmPastaFp(endo_q) } #[wasm_bindgen] pub fn caml_pasta_pallas_endo_scalar() -> WasmPastaFq { let (_endo_q, endo_r) = commitment_dlog::srs::endos::<GAffine>(); WasmPastaFq(endo_r) } #[wasm_bindgen] pub fn caml_pasta_pallas_to_affine(x: &WasmPallasGProjective) -> WasmPallasGAffine { Into::<&GProjective>::into(x).into_affine().into() } #[wasm_bindgen] pub fn caml_pasta_pallas_of_affine(x: &WasmPallasGAffine) -> WasmPallasGProjective { Into::<GAffine>::into(x).into_projective().into() } #[wasm_bindgen] pub fn caml_pasta_pallas_of_affine_coordinates(x: WasmPastaFp, y: WasmPastaFp) -> WasmPallasGProjective { GProjective::new(x.0, y.0, Fp::one()).into() } #[wasm_bindgen] pub fn caml_pasta_pallas_affine_deep_copy(x: &WasmPallasGAffine) -> WasmPallasGAffine { x.clone() } #[wasm_bindgen] pub fn caml_pasta_pallas_affine_one() -> WasmPallasGAffine { GAffine::prime_subgroup_generator().into() } */
25.083744
105
0.667125
6a07a74d8378799bd241a9d5dab8061dece45885
2,308
use std::collections::HashMap; use serde::{Deserialize, Serialize}; /// Response from executing /// [ReadConfigurationRequest][crate::api::auth::oidc::requests::ReadConfigurationRequest] #[derive(Deserialize, Debug, Serialize)] pub struct ReadConfigurationResponse { pub bound_issuer: Option<String>, pub default_role: Option<String>, pub jwks_ca_pem: Option<String>, pub jwt_supported_algs: Option<Vec<String>>, pub jwks_url: Option<String>, pub jwt_validation_pubkeys: Option<Vec<String>>, pub namespace_in_state: Option<bool>, pub oidc_discovery_ca_pem: Option<String>, pub oidc_discovery_url: Option<String>, pub oidc_client_id: Option<String>, pub oidc_client_secret: Option<String>, pub oidc_response_mode: Option<String>, pub oidc_response_types: Option<Vec<String>>, pub provider_config: Option<HashMap<String, String>>, } /// Response from executing /// [ReadRoleRequest][crate::api::auth::oidc::requests::ReadRoleRequest] #[derive(Deserialize, Debug, Serialize)] pub struct ReadRoleResponse { pub allowed_redirect_uris: Vec<String>, pub user_claim: String, pub bound_subject: String, pub bound_claims: Option<HashMap<String, String>>, pub bound_claims_type: String, pub bound_audiences: Option<Vec<String>>, pub claim_mappings: Option<HashMap<String, String>>, pub clock_skew_leeway: u64, pub expiration_leeway: u64, pub groups_claim: String, pub max_age: u64, pub not_before_leeway: u64, pub oidc_scopes: Option<Vec<String>>, pub role_type: String, pub token_bound_cidrs: Vec<String>, pub token_explicit_max_ttl: u64, pub token_no_default_policy: bool, pub token_num_uses: u64, pub token_period: u64, pub token_policies: Vec<String>, pub token_ttl: u64, pub token_max_ttl: u64, pub token_type: String, pub verbose_oidc_logging: bool, } /// Response from executing /// [ListRolesRequest][crate::api::auth::oidc::requests::ListRolesRequest] #[derive(Deserialize, Debug, Serialize)] pub struct ListRolesResponse { pub keys: Vec<String>, } /// Response from executing /// [OIDCAuthRequest][crate::api::auth::oidc::requests::OIDCAuthRequest] #[derive(Deserialize, Debug, Serialize)] pub struct OIDCAuthResponse { pub auth_url: String, }
33.941176
90
0.731802
4ad4ef60a5294728fef1b8d20bbb089bb2cb2695
649
// run-pass #![allow(unused_imports)] #![feature(rustc_private)] extern crate rustc_macros; extern crate rustc_serialize; use rustc_macros::{Decodable, Encodable}; use rustc_serialize::opaque::{MemDecoder, MemEncoder}; use rustc_serialize::{Decodable, Encodable, Encoder}; #[derive(Encodable, Decodable)] struct A { foo: Box<[bool]>, } fn main() { let obj = A { foo: Box::new([true, false]) }; let mut encoder = MemEncoder::new(); obj.encode(&mut encoder); let data = encoder.finish().unwrap(); let mut decoder = MemDecoder::new(&data, 0); let obj2 = A::decode(&mut decoder); assert_eq!(obj.foo, obj2.foo); }
21.633333
54
0.670262
7a3347d47435752e0302e0605f5df67f129e3627
26,282
//! Commit, Data Change and Rollback Notification Callbacks #![allow(non_camel_case_types)] use std::os::raw::{c_char, c_int, c_void}; use std::panic::{catch_unwind, RefUnwindSafe}; use std::ptr; use crate::ffi; use crate::{Connection, InnerConnection}; /// Action Codes #[derive(Clone, Copy, Debug, PartialEq)] #[repr(i32)] #[non_exhaustive] #[allow(clippy::upper_case_acronyms)] pub enum Action { /// Unsupported / unexpected action UNKNOWN = -1, /// DELETE command SQLITE_DELETE = ffi::SQLITE_DELETE, /// INSERT command SQLITE_INSERT = ffi::SQLITE_INSERT, /// UPDATE command SQLITE_UPDATE = ffi::SQLITE_UPDATE, } impl From<i32> for Action { #[inline] fn from(code: i32) -> Action { match code { ffi::SQLITE_DELETE => Action::SQLITE_DELETE, ffi::SQLITE_INSERT => Action::SQLITE_INSERT, ffi::SQLITE_UPDATE => Action::SQLITE_UPDATE, _ => Action::UNKNOWN, } } } /// The context recieved by an authorizer hook. /// /// See <https://sqlite.org/c3ref/set_authorizer.html> for more info. #[derive(Clone, Copy, Debug, PartialEq)] pub struct AuthContext<'c> { /// The action to be authorized. pub action: AuthAction<'c>, /// The database name, if applicable. pub database_name: Option<&'c str>, /// The inner-most trigger or view responsible for the access attempt. /// `None` if the access attempt was made by top-level SQL code. pub accessor: Option<&'c str>, } /// Actions and arguments found within a statement during /// preparation. /// /// See <https://sqlite.org/c3ref/c_alter_table.html> for more info. #[derive(Clone, Copy, Debug, PartialEq)] #[non_exhaustive] #[allow(missing_docs)] pub enum AuthAction<'c> { /// This variant is not normally produced by SQLite. You may encounter it // if you're using a different version than what's supported by this library. Unknown { /// The unknown authorization action code. code: i32, /// The third arg to the authorizer callback. arg1: Option<&'c str>, /// The fourth arg to the authorizer callback. arg2: Option<&'c str>, }, CreateIndex { index_name: &'c str, table_name: &'c str, }, CreateTable { table_name: &'c str, }, CreateTempIndex { index_name: &'c str, table_name: &'c str, }, CreateTempTable { table_name: &'c str, }, CreateTempTrigger { trigger_name: &'c str, table_name: &'c str, }, CreateTempView { view_name: &'c str, }, CreateTrigger { trigger_name: &'c str, table_name: &'c str, }, CreateView { view_name: &'c str, }, Delete { table_name: &'c str, }, DropIndex { index_name: &'c str, table_name: &'c str, }, DropTable { table_name: &'c str, }, DropTempIndex { index_name: &'c str, table_name: &'c str, }, DropTempTable { table_name: &'c str, }, DropTempTrigger { trigger_name: &'c str, table_name: &'c str, }, DropTempView { view_name: &'c str, }, DropTrigger { trigger_name: &'c str, table_name: &'c str, }, DropView { view_name: &'c str, }, Insert { table_name: &'c str, }, Pragma { pragma_name: &'c str, /// The pragma value, if present (e.g., `PRAGMA name = value;`). pragma_value: Option<&'c str>, }, Read { table_name: &'c str, column_name: &'c str, }, Select, Transaction { operation: TransactionOperation, }, Update { table_name: &'c str, column_name: &'c str, }, Attach { filename: &'c str, }, Detach { database_name: &'c str, }, AlterTable { database_name: &'c str, table_name: &'c str, }, Reindex { index_name: &'c str, }, Analyze { table_name: &'c str, }, CreateVtable { table_name: &'c str, module_name: &'c str, }, DropVtable { table_name: &'c str, module_name: &'c str, }, Function { function_name: &'c str, }, Savepoint { operation: TransactionOperation, savepoint_name: &'c str, }, #[cfg(feature = "modern_sqlite")] Recursive, } impl<'c> AuthAction<'c> { fn from_raw(code: i32, arg1: Option<&'c str>, arg2: Option<&'c str>) -> Self { match (code, arg1, arg2) { (ffi::SQLITE_CREATE_INDEX, Some(index_name), Some(table_name)) => Self::CreateIndex { index_name, table_name, }, (ffi::SQLITE_CREATE_TABLE, Some(table_name), _) => Self::CreateTable { table_name }, (ffi::SQLITE_CREATE_TEMP_INDEX, Some(index_name), Some(table_name)) => { Self::CreateTempIndex { index_name, table_name, } } (ffi::SQLITE_CREATE_TEMP_TABLE, Some(table_name), _) => { Self::CreateTempTable { table_name } } (ffi::SQLITE_CREATE_TEMP_TRIGGER, Some(trigger_name), Some(table_name)) => { Self::CreateTempTrigger { trigger_name, table_name, } } (ffi::SQLITE_CREATE_TEMP_VIEW, Some(view_name), _) => { Self::CreateTempView { view_name } } (ffi::SQLITE_CREATE_TRIGGER, Some(trigger_name), Some(table_name)) => { Self::CreateTrigger { trigger_name, table_name, } } (ffi::SQLITE_CREATE_VIEW, Some(view_name), _) => Self::CreateView { view_name }, (ffi::SQLITE_DELETE, Some(table_name), None) => Self::Delete { table_name }, (ffi::SQLITE_DROP_INDEX, Some(index_name), Some(table_name)) => Self::DropIndex { index_name, table_name, }, (ffi::SQLITE_DROP_TABLE, Some(table_name), _) => Self::DropTable { table_name }, (ffi::SQLITE_DROP_TEMP_INDEX, Some(index_name), Some(table_name)) => { Self::DropTempIndex { index_name, table_name, } } (ffi::SQLITE_DROP_TEMP_TABLE, Some(table_name), _) => { Self::DropTempTable { table_name } } (ffi::SQLITE_DROP_TEMP_TRIGGER, Some(trigger_name), Some(table_name)) => { Self::DropTempTrigger { trigger_name, table_name, } } (ffi::SQLITE_DROP_TEMP_VIEW, Some(view_name), _) => Self::DropTempView { view_name }, (ffi::SQLITE_DROP_TRIGGER, Some(trigger_name), Some(table_name)) => Self::DropTrigger { trigger_name, table_name, }, (ffi::SQLITE_DROP_VIEW, Some(view_name), _) => Self::DropView { view_name }, (ffi::SQLITE_INSERT, Some(table_name), _) => Self::Insert { table_name }, (ffi::SQLITE_PRAGMA, Some(pragma_name), pragma_value) => Self::Pragma { pragma_name, pragma_value, }, (ffi::SQLITE_READ, Some(table_name), Some(column_name)) => Self::Read { table_name, column_name, }, (ffi::SQLITE_SELECT, ..) => Self::Select, (ffi::SQLITE_TRANSACTION, Some(operation_str), _) => Self::Transaction { operation: TransactionOperation::from_str(operation_str), }, (ffi::SQLITE_UPDATE, Some(table_name), Some(column_name)) => Self::Update { table_name, column_name, }, (ffi::SQLITE_ATTACH, Some(filename), _) => Self::Attach { filename }, (ffi::SQLITE_DETACH, Some(database_name), _) => Self::Detach { database_name }, (ffi::SQLITE_ALTER_TABLE, Some(database_name), Some(table_name)) => Self::AlterTable { database_name, table_name, }, (ffi::SQLITE_REINDEX, Some(index_name), _) => Self::Reindex { index_name }, (ffi::SQLITE_ANALYZE, Some(table_name), _) => Self::Analyze { table_name }, (ffi::SQLITE_CREATE_VTABLE, Some(table_name), Some(module_name)) => { Self::CreateVtable { table_name, module_name, } } (ffi::SQLITE_DROP_VTABLE, Some(table_name), Some(module_name)) => Self::DropVtable { table_name, module_name, }, (ffi::SQLITE_FUNCTION, _, Some(function_name)) => Self::Function { function_name }, (ffi::SQLITE_SAVEPOINT, Some(operation_str), Some(savepoint_name)) => Self::Savepoint { operation: TransactionOperation::from_str(operation_str), savepoint_name, }, #[cfg(feature = "modern_sqlite")] // 3.8.3 (ffi::SQLITE_RECURSIVE, ..) => Self::Recursive, (code, arg1, arg2) => Self::Unknown { code, arg1, arg2 }, } } } pub(crate) type BoxedAuthorizer = Box<dyn for<'c> FnMut(AuthContext<'c>) -> Authorization + Send + 'static>; /// A transaction operation. #[derive(Clone, Copy, Debug, PartialEq)] #[non_exhaustive] #[allow(missing_docs)] pub enum TransactionOperation { Unknown, Begin, Release, Rollback, } impl TransactionOperation { fn from_str(op_str: &str) -> Self { match op_str { "BEGIN" => Self::Begin, "RELEASE" => Self::Release, "ROLLBACK" => Self::Rollback, _ => Self::Unknown, } } } /// [`authorizer`](Connection::authorizer) return code #[derive(Clone, Copy, Debug, PartialEq)] #[non_exhaustive] pub enum Authorization { /// Authorize the action. Allow, /// Don't allow access, but don't trigger an error either. Ignore, /// Trigger an error. Deny, } impl Authorization { fn into_raw(self) -> c_int { match self { Self::Allow => ffi::SQLITE_OK, Self::Ignore => ffi::SQLITE_IGNORE, Self::Deny => ffi::SQLITE_DENY, } } } impl Connection { /// Register a callback function to be invoked whenever /// a transaction is committed. /// /// The callback returns `true` to rollback. #[inline] pub fn commit_hook<F>(&self, hook: Option<F>) where F: FnMut() -> bool + Send + 'static, { self.db.borrow_mut().commit_hook(hook); } /// Register a callback function to be invoked whenever /// a transaction is committed. #[inline] pub fn rollback_hook<F>(&self, hook: Option<F>) where F: FnMut() + Send + 'static, { self.db.borrow_mut().rollback_hook(hook); } /// Register a callback function to be invoked whenever /// a row is updated, inserted or deleted in a rowid table. /// /// The callback parameters are: /// /// - the type of database update (`SQLITE_INSERT`, `SQLITE_UPDATE` or /// `SQLITE_DELETE`), /// - the name of the database ("main", "temp", ...), /// - the name of the table that is updated, /// - the ROWID of the row that is updated. #[inline] pub fn update_hook<F>(&self, hook: Option<F>) where F: FnMut(Action, &str, &str, i64) + Send + 'static, { self.db.borrow_mut().update_hook(hook); } /// Register a query progress callback. /// /// The parameter `num_ops` is the approximate number of virtual machine /// instructions that are evaluated between successive invocations of the /// `handler`. If `num_ops` is less than one then the progress handler /// is disabled. /// /// If the progress callback returns `true`, the operation is interrupted. pub fn progress_handler<F>(&self, num_ops: c_int, handler: Option<F>) where F: FnMut() -> bool + Send + RefUnwindSafe + 'static, { self.db.borrow_mut().progress_handler(num_ops, handler); } /// Register an authorizer callback that's invoked /// as a statement is being prepared. #[inline] pub fn authorizer<'c, F>(&self, hook: Option<F>) where F: for<'r> FnMut(AuthContext<'r>) -> Authorization + Send + RefUnwindSafe + 'static, { self.db.borrow_mut().authorizer(hook); } } impl InnerConnection { #[inline] pub fn remove_hooks(&mut self) { self.update_hook(None::<fn(Action, &str, &str, i64)>); self.commit_hook(None::<fn() -> bool>); self.rollback_hook(None::<fn()>); self.progress_handler(0, None::<fn() -> bool>); self.authorizer(None::<fn(AuthContext<'_>) -> Authorization>); } fn commit_hook<F>(&mut self, hook: Option<F>) where F: FnMut() -> bool + Send + 'static, { unsafe extern "C" fn call_boxed_closure<F>(p_arg: *mut c_void) -> c_int where F: FnMut() -> bool, { let r = catch_unwind(|| { let boxed_hook: *mut F = p_arg.cast::<F>(); (*boxed_hook)() }); if let Ok(true) = r { 1 } else { 0 } } // unlike `sqlite3_create_function_v2`, we cannot specify a `xDestroy` with // `sqlite3_commit_hook`. so we keep the `xDestroy` function in // `InnerConnection.free_boxed_hook`. let free_commit_hook = if hook.is_some() { Some(free_boxed_hook::<F> as unsafe fn(*mut c_void)) } else { None }; let previous_hook = match hook { Some(hook) => { let boxed_hook: *mut F = Box::into_raw(Box::new(hook)); unsafe { ffi::sqlite3_commit_hook( self.db(), Some(call_boxed_closure::<F>), boxed_hook.cast(), ) } } _ => unsafe { ffi::sqlite3_commit_hook(self.db(), None, ptr::null_mut()) }, }; if !previous_hook.is_null() { if let Some(free_boxed_hook) = self.free_commit_hook { unsafe { free_boxed_hook(previous_hook) }; } } self.free_commit_hook = free_commit_hook; } fn rollback_hook<F>(&mut self, hook: Option<F>) where F: FnMut() + Send + 'static, { unsafe extern "C" fn call_boxed_closure<F>(p_arg: *mut c_void) where F: FnMut(), { drop(catch_unwind(|| { let boxed_hook: *mut F = p_arg.cast::<F>(); (*boxed_hook)(); })); } let free_rollback_hook = if hook.is_some() { Some(free_boxed_hook::<F> as unsafe fn(*mut c_void)) } else { None }; let previous_hook = match hook { Some(hook) => { let boxed_hook: *mut F = Box::into_raw(Box::new(hook)); unsafe { ffi::sqlite3_rollback_hook( self.db(), Some(call_boxed_closure::<F>), boxed_hook.cast(), ) } } _ => unsafe { ffi::sqlite3_rollback_hook(self.db(), None, ptr::null_mut()) }, }; if !previous_hook.is_null() { if let Some(free_boxed_hook) = self.free_rollback_hook { unsafe { free_boxed_hook(previous_hook) }; } } self.free_rollback_hook = free_rollback_hook; } fn update_hook<F>(&mut self, hook: Option<F>) where F: FnMut(Action, &str, &str, i64) + Send + 'static, { unsafe extern "C" fn call_boxed_closure<F>( p_arg: *mut c_void, action_code: c_int, p_db_name: *const c_char, p_table_name: *const c_char, row_id: i64, ) where F: FnMut(Action, &str, &str, i64), { let action = Action::from(action_code); drop(catch_unwind(|| { let boxed_hook: *mut F = p_arg.cast::<F>(); (*boxed_hook)( action, expect_utf8(p_db_name, "database name"), expect_utf8(p_table_name, "table name"), row_id, ); })); } let free_update_hook = if hook.is_some() { Some(free_boxed_hook::<F> as unsafe fn(*mut c_void)) } else { None }; let previous_hook = match hook { Some(hook) => { let boxed_hook: *mut F = Box::into_raw(Box::new(hook)); unsafe { ffi::sqlite3_update_hook( self.db(), Some(call_boxed_closure::<F>), boxed_hook.cast(), ) } } _ => unsafe { ffi::sqlite3_update_hook(self.db(), None, ptr::null_mut()) }, }; if !previous_hook.is_null() { if let Some(free_boxed_hook) = self.free_update_hook { unsafe { free_boxed_hook(previous_hook) }; } } self.free_update_hook = free_update_hook; } fn progress_handler<F>(&mut self, num_ops: c_int, handler: Option<F>) where F: FnMut() -> bool + Send + RefUnwindSafe + 'static, { unsafe extern "C" fn call_boxed_closure<F>(p_arg: *mut c_void) -> c_int where F: FnMut() -> bool, { let r = catch_unwind(|| { let boxed_handler: *mut F = p_arg.cast::<F>(); (*boxed_handler)() }); if let Ok(true) = r { 1 } else { 0 } } if let Some(handler) = handler { let boxed_handler = Box::new(handler); unsafe { ffi::sqlite3_progress_handler( self.db(), num_ops, Some(call_boxed_closure::<F>), &*boxed_handler as *const F as *mut _, ); } self.progress_handler = Some(boxed_handler); } else { unsafe { ffi::sqlite3_progress_handler(self.db(), num_ops, None, ptr::null_mut()) } self.progress_handler = None; }; } fn authorizer<'c, F>(&'c mut self, authorizer: Option<F>) where F: for<'r> FnMut(AuthContext<'r>) -> Authorization + Send + RefUnwindSafe + 'static, { unsafe extern "C" fn call_boxed_closure<'c, F>( p_arg: *mut c_void, action_code: c_int, param1: *const c_char, param2: *const c_char, db_name: *const c_char, trigger_or_view_name: *const c_char, ) -> c_int where F: FnMut(AuthContext<'c>) -> Authorization + Send + 'static, { catch_unwind(|| { let action = AuthAction::from_raw( action_code, expect_optional_utf8(param1, "authorizer param 1"), expect_optional_utf8(param2, "authorizer param 2"), ); let auth_ctx = AuthContext { action, database_name: expect_optional_utf8(db_name, "database name"), accessor: expect_optional_utf8( trigger_or_view_name, "accessor (inner-most trigger or view)", ), }; let boxed_hook: *mut F = p_arg.cast::<F>(); (*boxed_hook)(auth_ctx) }) .map_or_else(|_| ffi::SQLITE_ERROR, Authorization::into_raw) } let callback_fn = authorizer .as_ref() .map(|_| call_boxed_closure::<'c, F> as unsafe extern "C" fn(_, _, _, _, _, _) -> _); let boxed_authorizer = authorizer.map(Box::new); match unsafe { ffi::sqlite3_set_authorizer( self.db(), callback_fn, boxed_authorizer .as_ref() .map_or_else(ptr::null_mut, |f| &**f as *const F as *mut _), ) } { ffi::SQLITE_OK => { self.authorizer = boxed_authorizer.map(|ba| ba as _); } err_code => { // The only error that `sqlite3_set_authorizer` returns is `SQLITE_MISUSE` // when compiled with `ENABLE_API_ARMOR` and the db pointer is invalid. // This library does not allow constructing a null db ptr, so if this branch // is hit, something very bad has happened. Panicking instead of returning // `Result` keeps this hook's API consistent with the others. panic!("unexpectedly failed to set_authorizer: {}", unsafe { crate::error::error_from_handle(self.db(), err_code) }); } } } } unsafe fn free_boxed_hook<F>(p: *mut c_void) { drop(Box::from_raw(p.cast::<F>())); } unsafe fn expect_utf8<'a>(p_str: *const c_char, description: &'static str) -> &'a str { expect_optional_utf8(p_str, description) .unwrap_or_else(|| panic!("received empty {}", description)) } unsafe fn expect_optional_utf8<'a>( p_str: *const c_char, description: &'static str, ) -> Option<&'a str> { if p_str.is_null() { return None; } std::str::from_utf8(std::ffi::CStr::from_ptr(p_str).to_bytes()) .unwrap_or_else(|_| panic!("received non-utf8 string as {}", description)) .into() } #[cfg(test)] mod test { use super::Action; use crate::{Connection, Result}; use std::sync::atomic::{AtomicBool, Ordering}; #[test] fn test_commit_hook() -> Result<()> { let db = Connection::open_in_memory()?; static CALLED: AtomicBool = AtomicBool::new(false); db.commit_hook(Some(|| { CALLED.store(true, Ordering::Relaxed); false })); db.execute_batch("BEGIN; CREATE TABLE foo (t TEXT); COMMIT;")?; assert!(CALLED.load(Ordering::Relaxed)); Ok(()) } #[test] fn test_fn_commit_hook() -> Result<()> { let db = Connection::open_in_memory()?; fn hook() -> bool { true } db.commit_hook(Some(hook)); db.execute_batch("BEGIN; CREATE TABLE foo (t TEXT); COMMIT;") .unwrap_err(); Ok(()) } #[test] fn test_rollback_hook() -> Result<()> { let db = Connection::open_in_memory()?; static CALLED: AtomicBool = AtomicBool::new(false); db.rollback_hook(Some(|| { CALLED.store(true, Ordering::Relaxed); })); db.execute_batch("BEGIN; CREATE TABLE foo (t TEXT); ROLLBACK;")?; assert!(CALLED.load(Ordering::Relaxed)); Ok(()) } #[test] fn test_update_hook() -> Result<()> { let db = Connection::open_in_memory()?; static CALLED: AtomicBool = AtomicBool::new(false); db.update_hook(Some(|action, db: &str, tbl: &str, row_id| { assert_eq!(Action::SQLITE_INSERT, action); assert_eq!("main", db); assert_eq!("foo", tbl); assert_eq!(1, row_id); CALLED.store(true, Ordering::Relaxed); })); db.execute_batch("CREATE TABLE foo (t TEXT)")?; db.execute_batch("INSERT INTO foo VALUES ('lisa')")?; assert!(CALLED.load(Ordering::Relaxed)); Ok(()) } #[test] fn test_progress_handler() -> Result<()> { let db = Connection::open_in_memory()?; static CALLED: AtomicBool = AtomicBool::new(false); db.progress_handler( 1, Some(|| { CALLED.store(true, Ordering::Relaxed); false }), ); db.execute_batch("BEGIN; CREATE TABLE foo (t TEXT); COMMIT;")?; assert!(CALLED.load(Ordering::Relaxed)); Ok(()) } #[test] fn test_progress_handler_interrupt() -> Result<()> { let db = Connection::open_in_memory()?; fn handler() -> bool { true } db.progress_handler(1, Some(handler)); db.execute_batch("BEGIN; CREATE TABLE foo (t TEXT); COMMIT;") .unwrap_err(); Ok(()) } #[test] fn test_authorizer() -> Result<()> { use super::{AuthAction, AuthContext, Authorization}; let db = Connection::open_in_memory()?; db.execute_batch("CREATE TABLE foo (public TEXT, private TEXT)") .unwrap(); let authorizer = move |ctx: AuthContext<'_>| match ctx.action { AuthAction::Read { column_name, .. } if column_name == "private" => { Authorization::Ignore } AuthAction::DropTable { .. } => Authorization::Deny, AuthAction::Pragma { .. } => panic!("shouldn't be called"), _ => Authorization::Allow, }; db.authorizer(Some(authorizer)); db.execute_batch( "BEGIN TRANSACTION; INSERT INTO foo VALUES ('pub txt', 'priv txt'); COMMIT;", ) .unwrap(); db.query_row_and_then("SELECT * FROM foo", [], |row| -> Result<()> { assert_eq!(row.get::<_, String>("public")?, "pub txt"); assert!(row.get::<_, Option<String>>("private")?.is_none()); Ok(()) }) .unwrap(); db.execute_batch("DROP TABLE foo").unwrap_err(); db.authorizer(None::<fn(AuthContext<'_>) -> Authorization>); db.execute_batch("PRAGMA user_version=1").unwrap(); // Disallowed by first authorizer, but it's now removed. Ok(()) } }
32.208333
116
0.524618
28ca48abfea1460584e56353f97df7839365b735
3,406
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "wasm-bindgen" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = NotificationEventInit)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `NotificationEventInit` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `NotificationEventInit`*"] pub type NotificationEventInit; } impl NotificationEventInit { #[cfg(feature = "Notification")] #[doc = "Construct a new `NotificationEventInit`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `Notification`, `NotificationEventInit`*"] pub fn new(notification: &Notification) -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret.notification(notification); ret } #[doc = "Change the `bubbles` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `NotificationEventInit`*"] pub fn bubbles(&mut self, val: bool) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("bubbles"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `cancelable` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `NotificationEventInit`*"] pub fn cancelable(&mut self, val: bool) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("cancelable"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `composed` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `NotificationEventInit`*"] pub fn composed(&mut self, val: bool) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("composed"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(feature = "Notification")] #[doc = "Change the `notification` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `Notification`, `NotificationEventInit`*"] pub fn notification(&mut self, val: &Notification) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("notification"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
36.234043
120
0.572813
288521eef0f5583894ffd7fada38768ba1f3c52a
5,245
// /// Reinterprets this `MappedPages`'s underlying memory region as a struct of the given type, // /// i.e., overlays a struct on top of this mapped memory region. // /// // /// # Arguments // /// `offset`: the offset into the memory region at which the struct is located (where it should start). // /// // /// Returns a reference to the new struct (`&T`) that is formed from the underlying memory region, // /// with a lifetime dependent upon the lifetime of this `MappedPages` object. // /// This ensures safety by guaranteeing that the returned struct reference // /// cannot be used after this `MappedPages` object is dropped and unmapped. // pub fn as_unsized_type<T: Sized, U>(&self, offset: usize) -> Result<&U, &'static str> { // let size = mem::size_of::<T>(); // if true { // debug!("MappedPages::as_unsized_type(): requested type {} -> {} with size {} at offset {}, MappedPages size {}!", // core::any::type_name::<T>(), // core::any::type_name::<U>(), // size, offset, self.size_in_bytes() // ); // } // // check that size of the type T fits within the size of the mapping // let end = offset + size; // if end > self.size_in_bytes() { // error!("MappedPages::as_type(): requested type {} has size {}, which is too large at offset {} for MappedPages of size {}!", // core::any::type_name::<T>(), // size, offset, self.size_in_bytes() // ); // return Err("requested type and offset would not fit within the MappedPages bounds"); // } // // SAFE: we guarantee the size and lifetime are within that of this MappedPages object // let t: &T = unsafe { // mem::transmute(self.pages.start_address() + offset) // }; // let u: &U = &*t; // Ok(u) // } // /// Reinterprets this `MappedPages`'s underlying memory region as a dynamically-sized type, i.e., // /// a tuple composed of an type `T` followed directly by `[S]`, // /// a dynamically-sized slice of `slice_length` elements of type `S`. // /// // /// In other words, the returned reference is `(&T, &[S])`. // /// // /// The slice will start directly after the `T` type ends, so if the size of `T` is 32 bytes, // /// the slice will start at `(offset + 32)` and end at `(offset + 32) + (slice_length * size_of::<S>())`. // /// // /// # Arguments // /// * `offset`: the offset into the memory region at which the struct is located (where it should start). // /// * `slice_length`: the number of elements of type `S` that comprise the end of the struct. // /// // /// This is effectively a composition of [`as_type`](#method.as_type) and [`as_slice`](#method.as_slice). // /// // /// # Alignment Warning // /// Because this function returns a tuple, the type `T` must end on a normal alignment boundary (at least 32-bit aligned). // /// Otherwise, the data may be incorrectly represented; however, this is always a problem with reinterpreting // /// MappedPages as any arbitrary type -- that type must be defined properly. // pub fn as_dynamically_sized_type<T: Sized, S: Sized>(&self, offset: usize, slice_length: usize) -> Result<(&T, &[S]), &'static str> { // let type_size = mem::size_of::<T>(); // let slice_offset = offset + type_size; // let slice_size = slice_length * mem::size_of::<S>(); // let total_size = type_size + slice_size; // if true { // debug!("MappedPages::as_dynamically_sized_type(): total size {}, requested type {} (size {}) and slice [{}; {}] (slice size {}) at offset {}, MappedPages size {}!", // total_size, // core::any::type_name::<T>(), // type_size, // core::any::type_name::<S>(), // slice_length, // slice_size, // offset, // self.size_in_bytes() // ); // } // let end = offset + total_size; // if end > self.size_in_bytes() { // error!("MappedPages::as_dynamically_sized_type(): requested type {} (size {}) and slice [{}; {}] (slice size {}) at offset {} is too large for MappedPages size {}, its total size is {}.", // core::any::type_name::<T>(), // type_size, // core::any::type_name::<S>(), // slice_length, // slice_size, // offset, // self.size_in_bytes(), // total_size, // ); // return Err("requested type, slice, and offset would not fit within the MappedPages bounds"); // } // // SAFE: we guarantee the size and lifetime are within that of this MappedPages object // Ok( unsafe { // ( // mem::transmute(self.pages.start_address().value() + offset), // slice::from_raw_parts((self.pages.start_address().value() + slice_offset) as *const S, slice_length), // ) // }) // }
50.92233
202
0.547188
1d755632055560af0748e99fb03635162f394136
8,061
// This file specifies an output builder for the Receipt stage. use crate::bank::BankBox; use crate::error::ProtocolError; use crate::input_boxes::{ReserveCoinBox, StableCoinBox}; use crate::parameters::{MIN_BOX_VALUE, RESERVECOIN_TOKEN_ID, STABLECOIN_TOKEN_ID}; use ergo_headless_dapp_framework::{ create_candidate, find_and_sum_other_tokens, WrapBox, WrappedBox, }; use ergo_headless_dapp_framework::{ encoding::build_token, BlockHeight, NanoErg, P2PKAddressString, }; use ergo_lib::chain::ergo_box::{ErgoBox, ErgoBoxCandidate}; use ergo_lib::chain::token::Token; /// The struct which represents the `Receipt` stage. #[derive(Debug, Clone, WrapBox)] pub struct ReceiptBox { ergo_box: ErgoBox, } impl ReceiptBox { /// Create an `ErgoBoxCandidate` for an output Receipt box for the /// `Mint ReserveCoin` action pub fn create_mint_reservecoin_candidate( amount_to_mint: u64, user_address: &P2PKAddressString, current_height: BlockHeight, transaction_fee: NanoErg, implementor_fee: NanoErg, reservecoin_value_in_base: NanoErg, bank_box: &BankBox, input_ergs_total: NanoErg, ) -> Result<ErgoBoxCandidate, ProtocolError> { // Define the ReserveCoin token let rb_reservecoin_token = new_reservecoin_token(amount_to_mint)?; // Define the Receipt Box tokens let rb_tokens = vec![rb_reservecoin_token]; // Specify the registers in the Receipt box let rb_registers_vec = vec![ (amount_to_mint as i64).into(), (reservecoin_value_in_base as i64).into(), ]; // Create the Receipt box candidate let candidate = create_candidate( input_ergs_total - reservecoin_value_in_base - transaction_fee - implementor_fee - MIN_BOX_VALUE, &user_address, &rb_tokens, &rb_registers_vec, current_height, )?; Ok(candidate) } /// Create an `ErgoBoxCandidate` for an output Receipt box for the /// `Mint StableCoin` action pub fn create_mint_stablecoin_candidate( amount_to_mint: u64, user_address: &P2PKAddressString, current_height: BlockHeight, transaction_fee: NanoErg, implementor_fee: NanoErg, stablecoin_value_in_base: NanoErg, bank_box: &BankBox, input_ergs_total: NanoErg, ) -> Result<ErgoBoxCandidate, ProtocolError> { // Define the StableCoin token let rb_stablecoin_token = new_stablecoin_token(amount_to_mint)?; // Define the Receipt Box tokens let rb_tokens = vec![rb_stablecoin_token]; // Specify the registers in the Receipt box let rb_registers_vec = vec![ (amount_to_mint as i64).into(), (stablecoin_value_in_base as i64).into(), ]; // Create the Receipt box candidate let candidate = create_candidate( input_ergs_total - stablecoin_value_in_base - transaction_fee - implementor_fee - MIN_BOX_VALUE, &user_address, &rb_tokens, &rb_registers_vec, current_height, )?; Ok(candidate) } /// Create an `ErgoBoxCandidate` for an output Receipt box for the /// `Redeem ReserveCoin` action pub fn create_redeem_reservecoin_candidate( amount_to_redeem: u64, user_address: &P2PKAddressString, current_height: BlockHeight, transaction_fee: NanoErg, reservecoin_value_in_base: NanoErg, bank_box: &BankBox, rc_boxes: &Vec<ReserveCoinBox>, no_bank_inputs: &Vec<ErgoBox>, implementor_fee: NanoErg, ) -> Result<ErgoBoxCandidate, ProtocolError> { // Find how many nanoErgs are inside of the ReserveCoin boxes let rc_boxes_value = ReserveCoinBox::sum_nano_ergs_value(&rc_boxes); // Find how many StableCoins are inside of the StableCoin boxes let rc_boxes_total_rc = ReserveCoinBox::sum_token_amount(rc_boxes); // The amount of nanoErgs in the rc_boxes + the value of the // ReserveCoins being redeemed - the transaction fee let rb_value = rc_boxes_value + reservecoin_value_in_base - transaction_fee - implementor_fee; // Specify the registers in the Receipt box let rb_registers_vec = vec![ (0 - amount_to_redeem as i64).into(), (0 - reservecoin_value_in_base as i64).into(), ]; // Define the tokens let mut rb_tokens = vec![]; // Check if there are any extra tokens that aren't being redeemed // and include them in the output if rc_boxes_total_rc > amount_to_redeem { // Define the StableCoin token let amount = rc_boxes_total_rc - amount_to_redeem; let new_rc_token = new_reservecoin_token(amount)?; rb_tokens.push(new_rc_token) } // Find all other tokens held in user-provided input boxes let mut other_tokens = find_and_sum_other_tokens(&vec![bank_box.tokens()[1].clone()], &no_bank_inputs); rb_tokens.append(&mut other_tokens); let candidate = create_candidate( rb_value, &user_address, &rb_tokens, &rb_registers_vec, current_height, )?; Ok(candidate) } /// Create an `ErgoBoxCandidate` for an output Receipt box for the /// `Redeem StableCoin` action pub fn create_redeem_stablecoin_candidate( amount_to_redeem: u64, user_address: &P2PKAddressString, current_height: BlockHeight, transaction_fee: NanoErg, stablecoin_value_in_base: NanoErg, bank_box: &BankBox, sc_boxes: &Vec<StableCoinBox>, no_bank_inputs: &Vec<ErgoBox>, implementor_fee: NanoErg, ) -> Result<ErgoBoxCandidate, ProtocolError> { // Find how many nanoErgs are inside of the StableCoin boxes let sc_boxes_value = StableCoinBox::sum_nano_ergs_value(&sc_boxes); // Find how many StableCoins are inside of the StableCoin boxes let sc_boxes_total_sc = StableCoinBox::sum_token_amount(sc_boxes); // The amount of nanoErgs in the rc_boxes + the value of the // ReserveCoins being redeemed - the transaction fee let rb_value = sc_boxes_value + stablecoin_value_in_base - implementor_fee - transaction_fee; // Specify the registers in the Receipt box let rb_registers_vec = vec![ (0 - amount_to_redeem as i64).into(), (0 - stablecoin_value_in_base as i64).into(), ]; // Define the tokens let mut rb_tokens = vec![]; // Check if there are any extra tokens that aren't being redeemed // and include them in the output if sc_boxes_total_sc > amount_to_redeem { // Define the StableCoin token let amount = sc_boxes_total_sc - amount_to_redeem; let new_sc_token = new_stablecoin_token(amount)?; rb_tokens.push(new_sc_token) } // Find all other tokens held in user-provided input boxes let mut other_tokens = find_and_sum_other_tokens(&vec![bank_box.tokens()[0].clone()], &no_bank_inputs); rb_tokens.append(&mut other_tokens); let candidate = create_candidate( rb_value, &user_address, &rb_tokens, &rb_registers_vec, current_height, )?; Ok(candidate) } } // Creates a new StableCoin token with a custom amount fn new_stablecoin_token(amount: u64) -> Result<Token, ProtocolError> { Ok(build_token(STABLECOIN_TOKEN_ID, amount)?) } // Creates a new ReserveCoin token with a custom amount fn new_reservecoin_token(amount: u64) -> Result<Token, ProtocolError> { Ok(build_token(RESERVECOIN_TOKEN_ID, amount)?) }
37.493023
92
0.643344
1d01aa47cd0bcd80703248094d04fbe01cf4b4f4
1,015
extern crate think_cap; extern crate time; use think_cap::{NN, HaltCondition, LearningMode}; #[test] fn xor_4layers() { // create examples of the xor function let examples = [ (vec![0f64, 0f64], vec![0f64]), (vec![0f64, 1f64], vec![1f64]), (vec![1f64, 0f64], vec![1f64]), (vec![1f64, 1f64], vec![0f64]), ]; // create a new neural network let mut net1 = NN::new(&[2,3,3,1]); // train the network net1.train(&examples) .log_interval(Some(1000)) .halt_condition( HaltCondition::MSE(0.01) ) .learning_mode( LearningMode::Incremental ) .momentum(0.1) .go(); // make sure json encoding/decoding works as expected let json = net1.to_json(); let net2 = NN::from_json(&json); // test the trained network for &(ref inputs, ref outputs) in examples.iter() { let results = net2.run(inputs); let (result, key) = (results[0].round(), outputs[0]); assert!(result == key); } }
26.710526
61
0.581281
48b9c318bcc452176ce0574a17fc63cad9f15749
5,870
use crate::{checks, LocalNetwork}; use clap::ArgMatches; use futures::prelude::*; use node_test_rig::{ environment::EnvironmentBuilder, testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use std::cmp::max; use std::net::{IpAddr, Ipv4Addr}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::time::{sleep_until, Instant}; use types::{Epoch, EthSpec, MainnetEthSpec}; pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); let validators_per_node = value_t!(matches, "validators_per_node", usize) .expect("missing validators_per_node default"); let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); let continue_after_checks = matches.is_present("continue_after_checks"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. let validator_files = (0..node_count) .into_par_iter() .map(|i| { println!( "Generating keystores for validator {} of {}", i + 1, node_count ); let indices = (i * validators_per_node..(i + 1) * validators_per_node).collect::<Vec<_>>(); ValidatorFiles::with_keystores(&indices).unwrap() }) .collect::<Vec<_>>(); let log_level = "debug"; let log_format = None; let mut env = EnvironmentBuilder::mainnet() .async_logger(log_level, log_format)? .multi_threaded_tokio_runtime()? .build()?; let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); let spec = &mut env.eth2_config.spec; let total_validator_count = validators_per_node * node_count; spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); spec.eth1_follow_distance = 16; spec.genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; spec.seconds_per_eth1_block = 1; let genesis_delay = Duration::from_secs(5); let genesis_time = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|_| "should get system time")? + genesis_delay; let genesis_instant = Instant::now() + genesis_delay; let slot_duration = Duration::from_secs(spec.seconds_per_slot); let context = env.core_context(); let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::Interop { validator_count: total_validator_count, genesis_time: genesis_time.as_secs(), }; beacon_config.dummy_eth1_backend = true; beacon_config.sync_eth1_chain = true; beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); let main_future = async { let network = LocalNetwork::new(context, beacon_config.clone()).await?; /* * One by one, add beacon nodes to the network. */ for _ in 0..node_count - 1 { network.add_beacon_node(beacon_config.clone()).await?; } /* * Create a future that will add validator clients to the network. Each validator client is * attached to a single corresponding beacon node. */ let add_validators_fut = async { for (i, files) in validator_files.into_iter().enumerate() { network .add_validator_client(testing_validator_config(), i, files, i % 2 == 0) .await?; } Ok::<(), String>(()) }; /* * The processes that will run checks on the network as it runs. */ let checks_fut = async { sleep_until(genesis_instant).await; let (finalization, block_prod) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), // Check that a block is produced at every slot. checks::verify_full_block_production_up_to( network.clone(), Epoch::new(4).start_slot(MainnetEthSpec::slots_per_epoch()), slot_duration, ) ); finalization?; block_prod?; Ok::<(), String>(()) }; let (add_validators, start_checks) = futures::join!(add_validators_fut, checks_fut); add_validators?; start_checks?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. if continue_after_checks { future::pending::<()>().await; } /* * End the simulation by dropping the network. This will kill all running beacon nodes and * validator clients. */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", network.beacon_node_count(), network.validator_client_count() ); // Be explicit about dropping the network, as this kills all the nodes. This ensures // all the checks have adequate time to pass. drop(network); Ok::<(), String>(()) }; env.runtime().block_on(main_future).unwrap(); env.fire_signal(); env.shutdown_on_idle(); Ok(()) }
35.149701
101
0.622658