blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
e6ff39dc211604d77374fdc6e9b4009cf76ba3f4
|
Rust
|
Zazcallabah/aoc
|
/2019/6.rs
|
UTF-8
| 2,726 | 3.4375 | 3 |
[] |
no_license
|
use std::collections::HashMap;
type Map = HashMap<String,Stellar>;
struct Stellar {
name:String,
parent:String,
children:Vec<String>,
}
impl Stellar {
fn new(name:String) -> Stellar {
Stellar{name,children: Vec::new(),parent:"".to_owned()}
}
}
fn map(data:&str) -> Map {
let mut objects : Map = HashMap::new();
for mut l in data.lines().map(|l| l.split(')') ) {
let parentstr = l.next().unwrap();
let childstr = l.next().unwrap();
if objects.contains_key(childstr) {
let mut c = objects.get_mut(childstr).unwrap();
c.parent = parentstr.to_owned();
}
else {
let mut child = Stellar::new(childstr.to_owned());
child.parent = parentstr.to_owned();
objects.insert(child.name.clone(), child);
}
let parent = objects.entry(parentstr.to_owned()).or_insert_with(|| Stellar::new(parentstr.to_owned()) );
parent.children.push(childstr.to_owned());
}
objects
}
fn ancestor(map:&Map)-> String {
let (_,anc) = map.iter().find(|(_,s)| s.parent == "" ).unwrap();
anc.name.clone()
}
fn ancestry(map:&Map,key:&str) -> Vec<String> {
let mut v = Vec::new();
let mut k = key.to_owned();
loop {
let n = map.get(&k).unwrap();
v.push(n.name.clone());
if n.parent == "" {
return v
}
k = n.parent.clone();
}
}
fn count(map:&Map,level:usize,start:&str) -> usize {
let mut sum = level;
for c in &map.get(start).unwrap().children {
sum += count(map,level+1,c);
}
sum
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_map(){
let m = map(r"COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L");
let c = m.get("COM").unwrap();
assert_eq!("COM",c.name);
assert_eq!("",c.parent);
assert_eq!(vec!["B"],c.children);
}
#[test]
fn test_find_ancestor(){
let m = map(r"COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L");
assert_eq!("COM",ancestor(&m));
}
#[test]
fn test_count(){
let m = map(r"COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L");
assert_eq!(42,count(&m,0,&"COM"));
}
#[test]
fn test_ancestry(){
let m = map(r"COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L");
assert_eq!(vec!["J","E","D","C","B","COM"],ancestry(&m,&"J"));
}
#[test]
fn test_travel(){
let m = map(r"COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN");
assert_eq!(4,transfer(&m,"YOU","SAN"));
}}
fn main(){
let m = map(&std::fs::read_to_string("2019/6.txt").unwrap());
println!("part 1: {}",count(&m,0,&ancestor(&m)));
println!("part 2: {}",transfer(&m,"YOU","SAN"));
}
fn transfer(map:&Map,from:&str,to:&str) -> usize {
let anc_a = ancestry(&map,&from);
let anc_b = ancestry(&map,&to);
for (i,a) in anc_a.iter().enumerate() {
for (j,b) in anc_b.iter().enumerate() {
if a == b {
return i+j - 2
}
}
}
panic!("no common ancestor found");
}
| true |
c280f3ce53aee37189fcaf10a2d1ded749364983
|
Rust
|
Keruspe/adventofcode2020
|
/src/bin/14.rs
|
UTF-8
| 2,869 | 3.28125 | 3 |
[] |
no_license
|
#![feature(str_split_once)]
static INPUT: &str = include_str!("./14.txt");
use std::str::FromStr;
use std::collections::BTreeMap;
#[derive(Debug)]
enum Instruction {
Mask(Mask),
Assign(usize, u64),
}
impl FromStr for Instruction {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (lhs, rhs) = s.split_once(" = ").ok_or(())?;
Ok(match lhs {
"mask" => Instruction::Mask(rhs.parse()?),
lhs if lhs.starts_with("mem[") && lhs.ends_with("]") => Instruction::Assign(lhs[4..(lhs.len() - 1)].parse().map_err(|_| ())?, rhs.parse().map_err(|_| ())?),
_ => return Err(()),
})
}
}
#[derive(Clone, Debug)]
struct Mask {
zeroes: u64,
ones: u64,
xs: Vec<usize>,
}
impl Mask {
fn apply(&self, input: u64) -> u64 {
input & self.zeroes | self.ones
}
fn apply2(&self, input: usize) -> Vec<usize> {
self.apply2_internal(&self.xs[..], input | self.ones as usize)
}
fn apply2_internal(&self, xs: &[usize], input: usize) -> Vec<usize> {
if let Some((bit, xs)) = xs.split_first() {
let mut res = self.apply2_internal(xs, input & !(1 << bit));
res.append(&mut self.apply2_internal(xs, input | 1 << bit));
res
} else {
vec![input]
}
}
}
impl Default for Mask {
fn default() -> Self {
Self {
zeroes: !0,
ones: 0,
xs: Vec::new(),
}
}
}
impl FromStr for Mask {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.chars().enumerate().fold(Ok(Mask::default()), |acc, (idx, c)| {
acc.and_then(|mut acc| {
match c {
'0' => acc.zeroes &= !(1 << (35 - idx)),
'1' => acc.ones |= 1 << (35 - idx),
'X' => acc.xs.push(35 - idx),
_ => return Err(()),
}
Ok(acc)
})
})
}
}
fn main() {
let mut memory: BTreeMap<usize, u64> = BTreeMap::new();
let mut mask = Mask::default();
let instructions = INPUT.lines().map(|line| line.parse::<Instruction>().unwrap()).collect::<Vec<_>>();
for instr in &instructions {
match instr {
Instruction::Mask(m) => mask = m.clone(),
Instruction::Assign(idx, val) => drop(memory.insert(*idx, mask.apply(*val))),
}
}
println!("{}", memory.values().sum::<u64>());
memory = BTreeMap::new();
mask = Mask::default();
for instr in &instructions {
match instr {
Instruction::Mask(m) => mask = m.clone(),
Instruction::Assign(idx, val) => for idx in mask.apply2(*idx) {
memory.insert(idx, *val);
}
}
}
println!("{}", memory.values().sum::<u64>());
}
| true |
ea04897320cee297be1fc84f7229ae573a87d758
|
Rust
|
forkeith/ldraw.rs
|
/ldraw/src/library.rs
|
UTF-8
| 7,357 | 2.71875 | 3 |
[] |
no_license
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::hash;
use std::ops::Deref;
use std::rc::Rc;
use serde::{Deserialize, Serialize};
use crate::document::{Document, MultipartDocument};
use crate::elements::PartReference;
use crate::AliasType;
use crate::NormalizedAlias;
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
pub enum PartKind {
Primitive,
Part,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PartEntry<T> {
pub kind: PartKind,
pub locator: T,
}
impl<T> Clone for PartEntry<T>
where
T: Clone,
{
fn clone(&self) -> PartEntry<T> {
PartEntry {
kind: self.kind,
locator: self.locator.clone(),
}
}
}
impl<T> hash::Hash for PartEntry<T>
where
T: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.locator.hash(state)
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PartDirectory<T> {
pub primitives: HashMap<NormalizedAlias, PartEntry<T>>,
pub parts: HashMap<NormalizedAlias, PartEntry<T>>,
}
impl<T> Default for PartDirectory<T> {
fn default() -> PartDirectory<T> {
PartDirectory {
primitives: HashMap::new(),
parts: HashMap::new(),
}
}
}
impl<T> PartDirectory<T> {
pub fn add(&mut self, key: NormalizedAlias, entry: PartEntry<T>) {
match entry.kind {
PartKind::Primitive => self.primitives.insert(key, entry),
PartKind::Part => self.parts.insert(key, entry),
};
}
pub fn query(&self, key: &NormalizedAlias) -> Option<&PartEntry<T>> {
match self.parts.get(key) {
Some(v) => Some(v),
None => match self.primitives.get(&key) {
Some(v) => Some(v),
None => None,
},
}
}
}
#[derive(Debug)]
pub struct PartCache {
primitives: HashMap<NormalizedAlias, Rc<Document>>,
parts: HashMap<NormalizedAlias, Rc<Document>>,
}
#[derive(Copy, Clone, Debug)]
pub enum CacheCollectionStrategy {
Parts,
Primitives,
PartsAndPrimitives,
}
impl Default for PartCache {
fn default() -> PartCache {
PartCache {
parts: HashMap::new(),
primitives: HashMap::new(),
}
}
}
impl Drop for PartCache {
fn drop(&mut self) {
self.collect(CacheCollectionStrategy::PartsAndPrimitives);
}
}
impl PartCache {
pub fn register(&mut self, kind: PartKind, alias: NormalizedAlias, document: Document) {
match kind {
PartKind::Part => self.parts.insert(alias, Rc::new(document)),
PartKind::Primitive => self.primitives.insert(alias, Rc::new(document)),
};
}
pub fn query(&self, alias: &NormalizedAlias) -> Option<Rc<Document>> {
match self.parts.get(alias) {
Some(part) => Some(Rc::clone(&part)),
None => match self.primitives.get(alias) {
Some(prim) => Some(Rc::clone(&prim)),
None => None,
},
}
}
fn collect_round(&mut self, collection_strategy: CacheCollectionStrategy) -> usize {
let prev_size = self.parts.len() + self.primitives.len();
match collection_strategy {
CacheCollectionStrategy::Parts => {
self.parts
.retain(|_, v| Rc::strong_count(&v) > 1 || Rc::weak_count(&v) > 0);
}
CacheCollectionStrategy::Primitives => {
self.primitives
.retain(|_, v| Rc::strong_count(&v) > 1 || Rc::weak_count(&v) > 0);
}
CacheCollectionStrategy::PartsAndPrimitives => {
self.parts
.retain(|_, v| Rc::strong_count(&v) > 1 || Rc::weak_count(&v) > 0);
self.primitives
.retain(|_, v| Rc::strong_count(&v) > 1 || Rc::weak_count(&v) > 0);
}
};
prev_size - self.parts.len() - self.primitives.len()
}
pub fn collect(&mut self, collection_strategy: CacheCollectionStrategy) -> usize {
let mut total_collected = 0;
loop {
let collected = self.collect_round(collection_strategy);
if collected == 0 {
break;
}
total_collected += collected;
}
total_collected
}
}
#[derive(Clone, Debug)]
pub enum ResolutionResult<'a, T> {
Missing,
Pending(PartEntry<T>),
Subpart(&'a Document),
Associated(Rc<Document>),
}
#[derive(Clone, Debug)]
pub struct ResolutionMap<'a, T> {
directory: Rc<RefCell<PartDirectory<T>>>,
cache: Rc<RefCell<PartCache>>,
pub map: HashMap<NormalizedAlias, ResolutionResult<'a, T>>,
}
impl<'a, 'b, T: Clone> ResolutionMap<'a, T> {
pub fn new(
directory: Rc<RefCell<PartDirectory<T>>>,
cache: Rc<RefCell<PartCache>>,
) -> ResolutionMap<'a, T> {
ResolutionMap {
directory,
cache,
map: HashMap::new(),
}
}
pub fn get_pending(&'b self) -> impl Iterator<Item = (&'b NormalizedAlias, &'b PartEntry<T>)> {
self.map.iter().filter_map(|(key, value)| match value {
ResolutionResult::Pending(a) => Some((key, a)),
_ => None,
})
}
pub fn resolve<D: Deref<Target = Document>>(
&mut self,
document: &D,
parent: Option<&'a MultipartDocument>,
) {
for i in document.iter_refs() {
let name = &i.name;
if self.map.contains_key(name) {
continue;
}
if let Some(e) = parent {
if let Some(doc) = e.subparts.get(name) {
self.map
.insert(name.clone(), ResolutionResult::Subpart(&doc));
self.resolve(&doc, parent);
continue;
}
}
let cached = self.cache.borrow().query(name);
if let Some(e) = cached {
self.map
.insert(name.clone(), ResolutionResult::Associated(Rc::clone(&e)));
self.resolve(&e, None);
continue;
}
if let Some(e) = self.directory.borrow().query(name) {
self.map
.insert(name.clone(), ResolutionResult::Pending(e.clone()));
} else {
self.map.insert(name.clone(), ResolutionResult::Missing);
}
}
}
pub fn update(&mut self, key: &NormalizedAlias, document: Rc<Document>) {
self.resolve(&Rc::clone(&document), None);
self.map.insert(
key.clone(),
ResolutionResult::Associated(Rc::clone(&document)),
);
}
pub fn query(&'a self, elem: &PartReference) -> Option<&'a Document> {
match self.map.get(&elem.name) {
Some(e) => match e {
ResolutionResult::Missing => None,
ResolutionResult::Pending(_) => None,
ResolutionResult::Subpart(e) => Some(e),
ResolutionResult::Associated(e) => Some(&e),
},
None => None,
}
}
pub fn get(&self, elem: &PartReference) -> Option<&ResolutionResult<T>> {
self.map.get(&elem.name)
}
}
#[cfg(not(target_arch = "wasm32"))]
pub use crate::library_native::*;
| true |
00c86829d1715f45ad10e4d65f1ac0d58ffa4e34
|
Rust
|
vain0x/text-position-rs
|
/src/position/utf16_position.rs
|
UTF-8
| 5,767 | 3.46875 | 3 |
[
"CC0-1.0"
] |
permissive
|
// LICENSE: CC0-1.0
use crate::TextPosition;
use std::{
cmp::Ordering,
fmt::{self, Debug, Display, Formatter},
ops::{Add, AddAssign},
};
/// Text position as (row, column) pair.
/// Column number (= length of the final line) is measured as number of UTF-16 code units (basically half of bytes).
/// Start from 0.
#[derive(Copy, Clone, Default, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct Utf16Position {
pub row: u32,
pub column: u32,
}
impl Utf16Position {
pub const fn new(row: u32, column: u32) -> Self {
Self { row, column }
}
}
impl TextPosition for Utf16Position {
const ZERO: Self = Self { row: 0, column: 0 };
fn from_str(s: &str) -> Self {
let mut row = 0;
let mut head = 0;
while let Some(offset) = s[head..].find('\n') {
row += 1;
head += offset + 1;
}
Self {
row: row as u32,
column: s[head..].encode_utf16().count() as u32,
}
}
fn saturating_sub(self, rhs: Self) -> Self {
match self.row.cmp(&rhs.row) {
Ordering::Less => Self::ZERO,
Ordering::Equal => Self {
row: 0,
column: self.column.saturating_sub(rhs.column),
},
Ordering::Greater => Self {
row: self.row - rhs.row,
column: self.column,
},
}
}
}
impl Add for Utf16Position {
type Output = Self;
fn add(self, rhs: Self) -> Self {
if rhs.row == 0 {
Self {
row: self.row,
column: self.column + rhs.column,
}
} else {
Self {
row: self.row + rhs.row,
column: rhs.column,
}
}
}
}
impl AddAssign for Utf16Position {
fn add_assign(&mut self, rhs: Self) {
let sum = *self + rhs;
*self = sum;
}
}
impl From<char> for Utf16Position {
fn from(c: char) -> Self {
if c == '\n' {
Self { row: 1, column: 0 }
} else {
Self {
row: 0,
column: c.len_utf16() as u32,
}
}
}
}
impl From<&'_ str> for Utf16Position {
fn from(s: &str) -> Self {
Self::from_str(s)
}
}
impl From<Utf16Position> for (u32, u32) {
fn from(pos: Utf16Position) -> (u32, u32) {
(pos.row, pos.column)
}
}
impl Debug for Utf16Position {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self, f)
}
}
impl Display for Utf16Position {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.row + 1, self.column + 1)
}
}
#[cfg(test)]
mod tests {
use crate::{TextPosition, Utf16Position};
const ZERO: Utf16Position = Utf16Position::ZERO;
fn pos_at(row: u32, column: u32) -> Utf16Position {
Utf16Position::new(row, column)
}
fn pos_of(s: &str) -> Utf16Position {
Utf16Position::from_str(s)
}
#[test]
fn test_from_str_empty() {
assert_eq!(pos_of(""), ZERO);
}
#[test]
fn test_from_str_ascii_single_line() {
assert_eq!(pos_of("Hello, world!"), pos_at(0, 13));
}
#[test]
fn test_from_str_ascii_multiple_line() {
assert_eq!(pos_of("12345\n1234567\n12345"), pos_at(2, 5));
}
#[test]
fn test_from_str_unicode() {
assert_eq!(pos_of("いろはにほへと"), pos_at(0, 7));
}
#[test]
fn test_from_str_surrogate_pair() {
assert_eq!(pos_of("🐧"), pos_at(0, 2));
}
#[test]
fn test_from_str_crlf() {
assert_eq!(pos_of("\r\n"), pos_at(1, 0));
}
#[test]
fn test_add_single_line() {
assert_eq!(pos_of("12345") + pos_of("6789"), pos_at(0, 9))
}
#[test]
fn test_add_newline() {
assert_eq!(pos_of("12345") + pos_of("\n"), pos_at(1, 0));
}
#[test]
fn test_add_multiple_line() {
assert_eq!(pos_of("12345\n12345") + pos_of("67\n12345"), pos_at(2, 5))
}
#[test]
fn test_saturating_sub_minus_row() {
assert_eq!(
pos_of("\n\n\n\n123456").saturating_sub(pos_of("\n\n\n\n\n1")),
ZERO
);
}
#[test]
fn test_saturating_sub_minus_column() {
assert_eq!(
pos_of("\n\n\n\n123456").saturating_sub(pos_of("\n\n\n\n1234567")),
ZERO
);
}
#[test]
fn test_saturating_sub_equal() {
let pos = pos_of("\n\n\n\n123456");
assert_eq!(pos.saturating_sub(pos), ZERO);
}
#[test]
fn test_saturating_sub_plus_row() {
assert_eq!(
pos_of("\n\n\n12\n123456").saturating_sub(pos_of("\n\n\n12")),
pos_of("\n123456")
);
}
#[test]
fn test_saturating_sub_plus_column() {
assert_eq!(
pos_of("\n\n\n\n123456").saturating_sub(pos_of("\n\n\n\n1")),
pos_of("23456")
);
}
#[test]
fn test_saturating_sub_minus_row_in_number() {
assert_eq!(pos_at(4, 6).saturating_sub(pos_at(5, 1)), ZERO);
}
#[test]
fn test_saturating_sub_minus_column_in_number() {
assert_eq!(pos_at(4, 6).saturating_sub(pos_at(4, 7)), ZERO);
}
#[test]
fn test_saturating_sub_plus_row_in_number() {
assert_eq!(pos_at(4, 6).saturating_sub(pos_at(3, 2)), pos_at(1, 6));
}
#[test]
fn test_saturating_sub_plus_column_in_number() {
assert_eq!(pos_at(4, 6).saturating_sub(pos_at(4, 1)), pos_at(0, 5));
}
#[test]
fn test_display_zero() {
assert_eq!(format!("{}", ZERO), "1:1");
}
#[test]
fn test_display_nonzero() {
assert_eq!(format!("{}", pos_at(3, 1)), "4:2");
}
}
| true |
0a0846fbf6939b342a0c00e911d6bec4dded354d
|
Rust
|
ratijas/mess
|
/mess-client/src/gui/_mvc.rs
|
UTF-8
| 20,957 | 3.3125 | 3 |
[] |
no_license
|
//! Concepts of view controller, view, model and delegate.
//!
//! View controller is stateful object capable of handling events (e.g. input),
//! it also can implement one or more delegate protocols. Composition of controllers
//! makes up a tree, which is an acyclic (non-recursive) directed graph. It makes it possible
//! to attach parent's lifetime to descendant controllers.
//!
//! View is a disposable object, created from the corresponding controller,
//! configured, rendered and thrown away. In terms of `tui` crate, View is a [Widget].
//!
//! Normally (?), each view type corresponds to exactly one view controller.
//!
//! Model is storage for some data, and methods to manipulate it. `String` is a good example.
//! `Vec<User>` will also do.
//!
//! Delegate protocol is (in terms of Rust programming language) a trait, where each method
//! corresponds to some controller's event or request.
//!
//! Controller may possess interface to work with as many delegate protocols as desired.
//!
//! # What about lifetimes?
//!
//! ## View
//!
//! View takes an immutable reference to its controller, so it's always `'a`.
//!
//! ## Delegate
//!
//! A reference or references to delegates (if any) must be hold inside controller. Such structure
//! prevents us from using delegates as mutable objects from now on, which is unacceptable.
//! Instead delegates could be implemented as separate types, while controller holding `RefCel`
//! with a delegate and passing around `&'a` references to it.
//!
//! Another approach would be to use `Arc<RefCell<T>>` wherever possible.
//!
//! # Event handling
//!
//! View controller's primary job is to handle input. It exposes via `ViewController` trait several
//! methods for that. When an event is emitted by top-level event system, there is some view
//! controller which must handle it. Search starts with root view controller and goes down the
//! chain of "active" subview until some controller handles it or there's no more active subview
//! left.
//!
//! Schematically:
//!
//! ```python
//! def handle_event(self, event):
//! if self.bubble_down(event):
//! # event handled during "bubble-down" phase
//! return True
//! else if self.active_subview is not None:
//! # going down
//! if self.active_subview.handle_event(event):
//! # event handled by subview
//! return True
//! # both else cases
//! return self.bubble_up(event)
//! ```
//!
//! [Widget]: https://docs.rs/tui/0.1.3/tui/widgets/trait.Widget.html
mod imports {
pub use std::cell::RefCell;
pub use std::sync::{Arc, Weak};
pub use tui::backend::{Backend, TermionBackend};
pub use tui::buffer::{Buffer, Cell};
pub use tui::layout::{Direction, Group, Rect, Size};
pub use tui::style::{Color, Modifier, Style};
pub use tui::widgets::{border, Block, List, SelectableList, Paragraph, Widget};
pub use tui::Terminal;
pub use termion::event::{Event, Key};
}
use self::imports::*;
mod boxes {
use super::*;
pub type Boxed<T> = Arc<RefCell<Box<T>>>;
pub type WeakBoxed<T> = Weak<RefCell<Box<T>>>;
pub trait BoxedExt {
fn boxed(self) -> Boxed<Self>;
}
impl<T> BoxedExt for T {
fn boxed(self) -> Boxed<Self> {
boxed(self)
}
}
pub fn boxed<T>(t: T) -> Arc<RefCell<Box<T>>> {
Arc::new(RefCell::new(Box::new(t)))
}
}
pub use self::boxes::*;
#[macro_export]
macro_rules! boxed_as {
($t:ty, $value:expr) => {
Arc::new(RefCell::new(Arc::try_unwrap($value).map_err(drop).expect("rebox").into_inner() as Box<$t>))
};
}
#[derive(Default)]
pub struct ViewControllerImpl {
/// Weak reference to the parent view controller
parent_view_controller: Weak<RefCell<Box<ViewController>>>,
// /// Child view controllers that
// subviews: Vec<Arc<RefCell<Box<ViewController>>>>,
/// Weak reference to the next child responder down the hierarchy
active_child_view_controller: Weak<RefCell<Box<ViewController>>>,
}
pub trait ViewController: Widget {
fn as_view_controller(&self) -> &ViewControllerImpl;
fn as_view_controller_mut(&mut self) -> &mut ViewControllerImpl;
/// return true if this controller has captured the event during bubble down or bubble up phase.
fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool;
/// Weak reference to the parent_view_controller view controller
fn parent_view_controller(&self) -> WeakBoxed<ViewController>;
fn set_parent_view_controller(&mut self, parent: WeakBoxed<ViewController>);
/// Weak reference to the next child responder down the hierarchy
fn active_child_view_controller(&self) -> WeakBoxed<ViewController>;
fn set_active_child_view_controller(&mut self, child: Boxed<ViewController>);
fn render_on_termion(&self, t: &mut Terminal<TermionBackend>, area: &Rect);
}
default impl<T: Widget + Sized> ViewController for T {
default fn as_view_controller(&self) -> &ViewControllerImpl { unimplemented!() }
default fn as_view_controller_mut(&mut self) -> &mut ViewControllerImpl { unimplemented!() }
default fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool { unimplemented!() }
default fn parent_view_controller(&self) -> WeakBoxed<ViewController> {
self.as_view_controller().parent_view_controller.clone()
}
default fn set_parent_view_controller(&mut self, parent: WeakBoxed<ViewController>) {
self.as_view_controller_mut().parent_view_controller = parent;
}
default fn active_child_view_controller(&self) -> WeakBoxed<ViewController> {
self.as_view_controller().active_child_view_controller.clone()
}
default fn set_active_child_view_controller(&mut self, child: Boxed<ViewController>) {
self.as_view_controller_mut().active_child_view_controller = Arc::downgrade(&child);
}
/* final */ fn render_on_termion(&self, t: &mut Terminal<TermionBackend>, area: &Rect) {
self.render(t, area);
}
}
macro_rules! view_controller_impl {
() => {
fn as_view_controller(&self) -> &ViewControllerImpl { &self._inner }
fn as_view_controller_mut(&mut self) -> &mut ViewControllerImpl { &mut self._inner }
fn render_on_termion(&self, t: &mut Terminal<TermionBackend>, area: &Rect) {
self.render(t, area);
}
};
}
pub mod button {
use super::*;
#[derive(Default)]
pub struct Button {
_inner: ViewControllerImpl,
pub title: String,
pub highlighted: bool,
}
// builder pattern
impl Button {
pub fn title(mut self, title: String) -> Button {
self.title = title;
self
}
pub fn highlighted(mut self, highlighted: bool) -> Button {
self.highlighted = highlighted;
self
}
}
impl ViewController for Button {
view_controller_impl!();
fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool {
match event {
Event::Key(Key::Char('\t')) => {
self.highlighted = !self.highlighted;
true
}
_ => false,
}
}
}
impl Widget for Button {
fn draw(&self, area: &Rect, buf: &mut Buffer) {
Paragraph::default()
.text(&format!("< {} >", &self.title))
.block(Block::default().borders(border::ALL))
.style(Style::default().modifier(if self.highlighted { Modifier::NoInvert } else { Modifier::Invert }))
.draw(area, buf);
}
}
}
pub mod window {
use super::*;
pub struct Window {
_inner: ViewControllerImpl,
root: Arc<RefCell<Box<ViewController>>>,
modal: Option<Arc<RefCell<Box<ViewController>>>>,
}
impl Window {
pub fn new() -> Boxed<Window> {
let root: Boxed<ViewController> = boxed_as!(ViewController, main_view_controller::MainViewController::new());
let mut window = Window {
_inner: Default::default(),
root: Arc::clone(&root),
modal: None,
};
window.set_active_child_view_controller(root);
window.boxed()
}
}
impl ViewController for Window {
view_controller_impl!();
fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool {
if let Some(ref modal) = self.modal {
return modal.borrow_mut().handle_event(event, bubble_down);
}
false
}
}
impl Widget for Window {
fn draw(&self, area: &Rect, buf: &mut Buffer) {
(*self.root).borrow().draw(area, buf);
if let Some(ref modal) = self.modal {
(**modal).borrow().draw(area, buf);
}
}
}
}
pub mod main_view_controller {
use super::*;
use super::list_view_controller::*;
pub struct MainViewController {
_inner: ViewControllerImpl,
users_online_controller: Boxed<ListViewController>,
status_bar: Boxed<ViewController>,
}
impl MainViewController {
pub fn new() -> Boxed<MainViewController> {
let mut status_bar: Boxed<ViewController> = boxed_as!(ViewController, text_field::TextField::new());
let this = MainViewController {
_inner: Default::default(),
users_online_controller: ListViewController::new(
boxed_as!(ListViewDataSource,
DummyListViewDataSource::new(
&["one", "two", "three"],
Some(1),
)
)
),
status_bar: Arc::clone(&status_bar),
}.boxed();
// status_bar.borrow_mut().set_parent_view_controller(Arc::downgrade(&this));
this.borrow_mut().set_active_child_view_controller(status_bar);
this
}
}
impl ViewController for MainViewController {
view_controller_impl!();
fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool {
false
}
}
impl Widget for MainViewController {
fn draw(&self, area: &Rect, buf: &mut Buffer) {
// Group::default()
// .direction(Direction::Vertical)
// .sizes(&[Size::Min(0), Size::Fixed(3)])
// .render( ??? );
self.users_online_controller.borrow().draw(area, buf);
}
}
}
pub mod list_view_controller {
use super::*;
pub struct ListViewController {
_inner: ViewControllerImpl,
data_source: Arc<RefCell<Box<ListViewDataSource>>>,
}
/// ListViewDataSource protocol
pub trait ListViewDataSource {
fn list_view_number_of_rows(
&self,
list_view: &ListViewController,
) -> usize;
fn list_view_cell_for_row_at_index(
&self,
list_view: &ListViewController,
index: usize,
) -> Option<String>;
fn list_view_selection_index(
&self,
list_view: &ListViewController,
) -> Option<usize>;
// this actually should be in delegate protocol
fn list_view_selection_changed(
&mut self,
list_view: &ListViewController,
index: Option<usize>,
);
}
pub struct DummyListViewDataSource {
pub items: Vec<String>,
pub selection: Option<usize>,
}
pub trait OptionalSelection<'a> {
fn select_optional(&'a mut self, index: Option<usize>) -> &'a mut Self;
}
impl ListViewController {
pub fn new(data_source: Boxed<ListViewDataSource>) -> Boxed<ListViewController> {
ListViewController {
_inner: Default::default(),
data_source,
}.boxed()
}
fn select_up(&mut self) {
if 0 == (*self.data_source).borrow().list_view_number_of_rows(&self) { return; }
let index = (*self.data_source).borrow().list_view_selection_index(&self).unwrap_or(0);
if index == 0 { return; }
let new_index = index - 1;
(*self.data_source).borrow_mut().list_view_selection_changed(&self, Some(new_index));
}
fn select_down(&mut self) {
let n = (*self.data_source).borrow().list_view_number_of_rows(&self);
if n == 0 { return; }
let index = (*self.data_source).borrow().list_view_selection_index(&self).unwrap_or(0);
if n == index { return; }
let new_index = index + 1;
(*self.data_source).borrow_mut().list_view_selection_changed(&self, Some(new_index));
}
fn items(&self) -> Vec<String> {
let ds = (*self.data_source).borrow();
let n = ds.list_view_number_of_rows(&self);
let mut items = Vec::with_capacity(n);
for i in 0..n {
match ds.list_view_cell_for_row_at_index(&self, i) {
Some(item) => items.push(item),
None => break,
}
}
items
}
}
impl ViewController for ListViewController {
view_controller_impl!();
fn handle_event(&mut self, event: Event, bubble_down: bool) -> bool {
match event {
Event::Key(Key::Up) => self.select_up(),
Event::Key(Key::Down) => self.select_down(),
_ => return false,
}
true
}
}
impl Widget for ListViewController {
fn draw(&self, area: &Rect, buf: &mut Buffer) {
SelectableList::default()
.items(&self.items())
.select_optional((*self.data_source).borrow().list_view_selection_index(&self))
.highlight_symbol("> ")
.draw(area, buf);
}
}
impl<'a> OptionalSelection<'a> for SelectableList<'a> {
fn select_optional(&'a mut self, index: Option<usize>) -> &'a mut SelectableList<'a> {
match index {
Some(index) => self.select(index),
None => self,
}
}
}
impl DummyListViewDataSource {
pub fn new<I: AsRef<str>>(items: &[I], selection: Option<usize>) -> Boxed<DummyListViewDataSource> {
DummyListViewDataSource {
items: items.iter().map(AsRef::as_ref).map(Into::into).collect(),
selection,
}.boxed()
}
}
impl ListViewDataSource for DummyListViewDataSource {
fn list_view_number_of_rows(&self, list_view: &ListViewController) -> usize {
self.items.len()
}
fn list_view_cell_for_row_at_index(&self, list_view: &ListViewController, index: usize) -> Option<String> {
self.items.get(index).cloned()
}
fn list_view_selection_index(&self, list_view: &ListViewController) -> Option<usize> {
self.selection
}
fn list_view_selection_changed(&mut self, list_view: &ListViewController, index: Option<usize>) {
self.selection = index;
}
}
}
/*
pub mod modal {
use super::*;
use super::text_field::TextField;
/// Stateful view controller
#[derive(Default)]
pub struct Modal {
title: String,
editor: TextField,
shadow: bool
}
impl Modal {
pub fn title(mut self, title: String) -> Self {
self.title = title;
self
}
// pub fn input(mut self, input: String) -> Self {
// self.input = input;
// self
// }
pub fn shadow(mut self, shadow: bool) -> Self {
self.shadow = shadow;
self
}
/// edit line until `key` is the Enter key. Then return input content.
pub fn handle(&mut self, key: Key) -> Option<String> {
match key {
Key::Char('\n') => Some(self.editor.buffer.clone()),
_ => {
self.editor.handle(key);
None
}
}
}
pub fn render<B: Backend>(&self, t: &mut Terminal<B>, area: &Rect) {
Group::default()
.direction(Direction::Horizontal)
.margin(0)
.sizes(&[Size::Percent(15), Size::Min(1), Size::Percent(15)])
.render(t, area, |t, chunks| {
Group::default()
.direction(Direction::Vertical)
.margin(0)
.sizes(&[Size::Percent(45), Size::Fixed(5), Size::Percent(45)])
.render(t, &chunks[1], |t, chunks| {
Block::default()
.borders(border::ALL)
.border_style(Style::default().bg(Color::DarkGray).fg(Color::DarkGray))
.render(t, &chunks[1]);
let area = &chunks[1].inner(1);
TextField::default()
.title(&self.title)
.text(self.editor.buffer())
.cursor(self.editor.cursor)
.render(t, &area);
});
});
}
}
}
*/
pub mod text_field {
use super::*;
/// Single line of editable text
#[derive(Default)]
pub struct TextField {
_inner: ViewControllerImpl,
title: String,
text: String,
cursor: usize,
}
impl TextField {
pub fn new() -> Boxed<TextField> {
boxed(Default::default())
}
pub fn title<I: AsRef<str>>(mut self, title: I) -> TextField {
self.title = title.as_ref().into();
self
}
pub fn text<I: AsRef<str>>(mut self, text: I) -> TextField {
self.text = text.as_ref().into();
self
}
pub fn cursor(mut self, cursor: usize) -> TextField {
self.cursor = cursor;
self
}
}
impl ViewController for TextField {
view_controller_impl!();
fn handle_event(&mut self, event: Event, _bubble_down: bool) -> bool {
if let Event::Key(key) = event {
match key {
Key::Char(ch) => {
self.text.insert(self.cursor, ch);
self.cursor += 1;
}
Key::Backspace => {
if self.cursor > 0 {
self.text.remove(self.cursor - 1);
self.cursor -= 1;
}
}
Key::Left => {
if self.cursor > 0 {
self.cursor -= 1;
}
}
Key::Right => {
if self.cursor + 1 <= self.text.len() {
self.cursor += 1;
}
}
_ => { return false; }
}
true
} else {
false
}
}
}
impl Widget for TextField {
fn draw(&self, area: &Rect, buf: &mut Buffer) {
// line itself + borders
if area.height < 3 {
return;
}
let bg = Style::default().bg(Color::Green).fg(Color::Black);
buf.clear_area(area);
Paragraph::default()
.block(
Block::default()
.borders(border::ALL)
.border_style(bg.clone())
.title(&self.title)
.title_style(bg.clone().modifier(Modifier::Invert))
)
.wrap(false)
.raw(true)
.text(&self.text)
.draw(area, buf);
buf.get_mut((1 + area.left() + self.cursor as u16).min(area.right() - 1),
1 + area.top())
.style.modifier = Modifier::Invert;
}
}
pub trait BufferCleaner {
fn clear_area(&mut self, area: &Rect);
}
impl BufferCleaner for Buffer {
fn clear_area(&mut self, area: &Rect) {
for y in area.top()..area.bottom() {
for x in area.left()..area.right() {
self.get_mut(x, y).symbol = " ".into();
}
}
}
}
}
| true |
e65c75d2ac0a4e1ca931ed140b20e22cf2e7c50a
|
Rust
|
astro/rust-lpc43xx
|
/src/ethernet/mac_intr/mod.rs
|
UTF-8
| 2,796 | 3.078125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::MAC_INTR {
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = r" Value of the field"]
pub struct PMTR {
bits: bool,
}
impl PMTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct TSR {
bits: bool,
}
impl TSR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 3 - PMT Interrupt Status This bit is set whenever a Magic packet or Wake-on-LAN frame is received in Power- Down mode (See bits 5 and 6 in Table 560). This bit is cleared when both bits[6:5] are cleared because of a read operation to the PMT Control and Status register."]
#[inline]
pub fn pmt(&self) -> PMTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
PMTR { bits }
}
#[doc = "Bit 9 - Timestamp interrupt status When Advanced Timestamp feature is enabled, this bit is set when any of the following conditions is true: - The system time value equals or exceeds the value specified in the Target Time High and Low registers - There is an overflow in the seconds register This bit is cleared on reading the byte 0 of the Timestamp Status register (Table 576). Otherwise, when default Time stamping is enabled, this bit when set indicates that the system time value equals or exceeds the value specified in the Target Time registers. In this mode, this bit is cleared after the completion of the read of this Interrupt Status Register[9]. In all other modes, this bit is reserved."]
#[inline]
pub fn ts(&self) -> TSR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
};
TSR { bits }
}
}
| true |
d1634064e6add1be5ef39e4daf8235d167308caa
|
Rust
|
iotanbo/rust_playground
|
/BASICS/r03_ofbook/src/functions.rs
|
UTF-8
| 542 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
//https://doc.rust-lang.org/book/ch03-03-how-functions-work.html
// * Statements do not return values
// * Expressions evaluate to something and return result as a value
// Example of function that returns a value
fn fourty_two() -> i32 {
// 42 is an expression, there is no semicolon after it.
// it is same as return 42;
42 // NO SEMICOLON!
}
pub fn demo() {
println!("== functions demo begin ==");
println!(" * fourty_two(): {}", fourty_two());
println!("== functions demo end ==");
println!();
}
| true |
ecc075f1156f32cfb34deb4551250b08c9d0b4ec
|
Rust
|
imerkle/shuttle-core
|
/src/memo.rs
|
UTF-8
| 1,904 | 3.65625 | 4 |
[
"Apache-2.0"
] |
permissive
|
use error::{Error, Result};
const MAX_MEMO_TEXT_LEN: usize = 28;
/// Memo attached to transactions.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Memo {
/// No memo
None,
/// Text Memo
Text(String),
/// Id Memo
Id(u64),
/// Hash Memo
Hash([u8; 32]),
/// Return Memo
Return([u8; 32]),
}
impl Memo {
/// Create new empty memo.
pub fn none() -> Memo {
Memo::None
}
/// Create new id memo.
pub fn id(id: u64) -> Memo {
Memo::Id(id)
}
/// Create new text memo. `text` must be shorter than 28 bytes.
pub fn text<S: Into<String>>(text: S) -> Result<Memo> {
let text = text.into();
if text.len() > MAX_MEMO_TEXT_LEN {
Err(Error::InvalidMemoText)
} else {
Ok(Memo::Text(text))
}
}
/// Create new hash memo.
pub fn hash(h: [u8; 32]) -> Memo {
Memo::Hash(h)
}
/// Create new return memo.
pub fn return_(r: [u8; 32]) -> Memo {
Memo::Return(r)
}
/// Return `true` if memo is `None`.
pub fn is_none(&self) -> bool {
match *self {
Memo::None => true,
_ => false,
}
}
/// Return `true` if memo is `Id`.
pub fn is_id(&self) -> bool {
match *self {
Memo::Id(_) => true,
_ => false,
}
}
/// Return `true` if memo is `Text`.
pub fn is_text(&self) -> bool {
match *self {
Memo::Text(_) => true,
_ => false,
}
}
/// Return `true` if memo is `Hash`.
pub fn is_hash(&self) -> bool {
match *self {
Memo::Hash(_) => true,
_ => false,
}
}
/// Return `true` if memo is `Return`.
pub fn is_return(&self) -> bool {
match *self {
Memo::Return(_) => true,
_ => false,
}
}
}
| true |
109e57cfd3ea0f53c72ac2140fbc468caab9098a
|
Rust
|
codeworm96/hikari
|
/src/metal.rs
|
UTF-8
| 843 | 2.8125 | 3 |
[] |
no_license
|
use rand::prelude::*;
use crate::hitable::HitRecord;
use crate::material::Material;
use crate::ray::Ray;
use crate::util::random_in_unit_sphere;
use crate::vec3::{dot, Vec3};
pub struct Metal {
albedo: Vec3,
fuzz: f64,
}
impl Metal {
pub fn new(a: Vec3, f: f64) -> Metal {
Metal { albedo: a, fuzz: f }
}
}
fn reflect(v: &Vec3, n: &Vec3) -> Vec3 {
*v - *n * 2.0 * dot(v, n)
}
impl Material for Metal {
fn scatter(&self, r: &Ray, rec: &HitRecord, rng: &mut ThreadRng) -> Option<(Vec3, Ray)> {
let reflected = reflect(&r.direction().unit(), &rec.normal);
let direction = reflected + random_in_unit_sphere(rng) * self.fuzz;
if dot(&direction, &rec.normal) > 0.0 {
Some((self.albedo, Ray::new(rec.p, direction, r.time())))
} else {
None
}
}
}
| true |
c0f165ff016655d560faa79cf22937b8e13a7506
|
Rust
|
EFanZh/LeetCode
|
/src/problem_0164_maximum_gap/radix_sort.rs
|
UTF-8
| 2,409 | 3.328125 | 3 |
[] |
no_license
|
pub struct Solution;
// ------------------------------------------------------ snip ------------------------------------------------------ //
use std::mem;
impl Solution {
fn radix_sort(mut nums: Vec<i32>, max: i32) -> Vec<i32> {
// From the book Introduction to Algorithms, third edition, page 199.
let n = nums.len();
let num_bits = 32 - max.leading_zeros();
let log2_n = mem::size_of_val(&n) as u32 * 8 - 1 - n.leading_zeros();
let mask_bits = num_bits.min(log2_n);
let mask = (1 << mask_bits) - 1;
let mut counts = vec![0_u32; 1 << mask_bits];
let mut offset = 0;
let mut temp = vec![0; n];
loop {
// Radix sort `n` `b`-bit numbers with ⌈`b` / `r`⌉ `r`-bit digits.
// Count numbers.
for num in &*nums {
counts[((num >> offset) & mask) as usize] += 1;
}
// Calculate indices.
for i in 1..counts.len() {
counts[i] += counts[i - 1];
}
// Place result into `temp`.
for &num in nums.iter().rev() {
let key = ((num >> offset) & mask) as usize;
temp[counts[key] as usize - 1] = num;
counts[key] -= 1;
}
// Reset counters.
for count in &mut counts {
*count = 0;
}
mem::swap(&mut nums, &mut temp);
offset += mask_bits;
if offset >= num_bits {
break;
}
}
nums
}
pub fn maximum_gap(nums: Vec<i32>) -> i32 {
let n = nums.len() as u32;
let max = nums.iter().copied().max().unwrap();
if max > 0 && n > 1 {
let nums = Self::radix_sort(nums, max);
nums.iter()
.zip(&nums[1..])
.map(|(previous, current)| current - previous)
.max()
.unwrap()
} else {
0
}
}
}
// ------------------------------------------------------ snip ------------------------------------------------------ //
impl super::Solution for Solution {
fn maximum_gap(nums: Vec<i32>) -> i32 {
Self::maximum_gap(nums)
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_solution() {
super::super::tests::run::<super::Solution>();
}
}
| true |
86d3fca5209baafac4730ec9523713e737c48172
|
Rust
|
CloudSetDrive/game
|
/network/src/lib.rs
|
UTF-8
| 1,259 | 2.59375 | 3 |
[] |
no_license
|
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate bincode;
mod packet;
// Reexports
pub use packet::ServerPacket as ServerPacket;
pub use packet::ClientPacket as ClientPacket;
use std::io;
use std::net::{UdpSocket, SocketAddr, IpAddr, Ipv4Addr};
use packet::Serialize;
pub struct ServerConn {
sock: UdpSocket,
}
impl ServerConn {
pub fn new(bind_addr: &str, remote_addr: &str) -> io::Result<ServerConn> {
Ok(ServerConn {
sock: UdpSocket::bind(bind_addr)?,
})
}
pub fn send(&self, addr: &str, pack: ServerPacket) -> bool{
match pack.serialize() {
Some(data) => self.sock.send(&data).is_ok(),
None => false,
}
}
}
pub struct ClientHandle {
addr: SocketAddr,
}
pub struct ClientConn {
sock: UdpSocket,
}
impl ClientConn {
pub fn new(bind_addr: &str, remote_addr: &str) -> io::Result<ClientConn> {
let sock = UdpSocket::bind(bind_addr)?;
sock.connect(remote_addr)?;
Ok(ClientConn {
sock,
})
}
pub fn send(&self, pack: ServerPacket) -> bool {
match pack.serialize() {
Some(data) => self.sock.send(&data).is_ok(),
None => false,
}
}
}
| true |
21b0792e0f2c7c6195692c4d364251cabd6f467d
|
Rust
|
leandronsp/fun
|
/rust/dsa/tests/008-structs.rs
|
UTF-8
| 2,103 | 4.21875 | 4 |
[] |
no_license
|
// Struct allows to package together and name multiple related values
// in a meaningful group
// - Similar to Tuples, they both hold multiple related values of different types
// - Unlike Tuples, Structs hold a meaningful name
#[cfg(test)]
mod tests {
#[test]
fn structs() {
struct User {
active: bool,
name: String,
score: u64
}
let user = User {
active: true,
name: String::from("Leandro"),
score: 8
};
assert_eq!(user.name, "Leandro");
assert_eq!(user.active, true);
assert_eq!(user.score, 8);
}
#[test]
fn tuple_structs() {
struct Color(i32, i32, i32);
let black = Color(0, 0, 0);
assert_eq!(black.0, 0);
}
#[test]
fn struct_methods() {
// Methods are similar to functions, but
// they are declared within the context of a Struct
// Their first parameter is always `self`
struct Account {
name: String,
balance: u32
}
impl Account {
fn deposit(&mut self, amount: u32) {
self.balance = self.balance + amount;
}
fn display(&self) -> String {
format!("{}'s balance is {}", self.name, self.balance)
}
}
let mut account_a = Account {
name: "Leandro".to_string(),
balance: 0
};
account_a.deposit(10);
assert_eq!(account_a.display(), "Leandro's balance is 10");
}
#[test]
fn associated_functions() {
struct Account {
name: String,
balance: u32
}
impl Account {
fn new(name: String, balance: u32) -> Self {
Self {
name,
balance
}
}
}
let account_a = Account::new(
"Leandro".to_string(),
50
);
assert_eq!(account_a.name, "Leandro");
assert_eq!(account_a.balance, 50);
}
}
| true |
eb0b7353680c698b172313a02fd6803419b2c25f
|
Rust
|
meltinglava/gura-rs-parser
|
/tests/variables.rs
|
UTF-8
| 2,841 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
use gura::{
errors::{DuplicatedVariableError, ParseError, VariableNotDefinedError},
object,
parser::{parse, GuraType},
};
use std::env;
mod common;
fn get_expected() -> GuraType {
object! {
plain: 5,
in_array_middle: [1, 5, 3],
in_array_last: [1, 2, 5],
in_object: {
name: "Aníbal",
surname: "Troilo",
year_of_birth: 1914
}
}
}
const PARENT_FOLDER: &str = "variables";
#[test]
/// Tests variables definition
fn test_normal() {
let parsed_data = common::get_file_content_parsed(PARENT_FOLDER, "normal.ura").unwrap();
assert_eq!(parsed_data, get_expected());
}
#[test]
/// Tests errors in variables definition
fn test_with_error() {
let parsed_data = parse(&"test: $false_var");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<VariableNotDefinedError>()
.is_some());
}
#[test]
/// Tests errors in variables definition
fn test_with_duplicated() {
let parsed_data = parse(&"$a_var: 14\n$a_var: 15");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<DuplicatedVariableError>()
.is_some());
}
#[test]
/// Tests using environment variables
fn test_env_var() {
// Sets a new environment variable to check the correct value retrieval from Gura
let env_var_name = "env_var_value";
let env_value = "using_env_var";
env::set_var(env_var_name, env_value);
let parsed_data = parse(&format!("test: ${}", env_var_name)).unwrap();
assert_eq!(parsed_data, object! {test: env_value});
env::remove_var(env_var_name);
}
#[test]
/// Tests invalid variable value type
fn test_invalid_variable() {
let parsed_data = parse(&"$invalid: true");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<ParseError>()
.is_some());
}
#[test]
/// Tests invalid variable value type
fn test_invalid_variable_2() {
let parsed_data = parse(&"$invalid: false");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<ParseError>()
.is_some());
}
#[test]
/// Tests invalid variable value type
fn test_invalid_variable_3() {
let parsed_data = parse(&"$invalid: null");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<ParseError>()
.is_some());
}
#[test]
/// Tests invalid variable value type
fn test_invalid_variable_4() {
let parsed_data = parse(&"$invalid: [ 1, 2, 3]");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<ParseError>()
.is_some());
}
#[test]
/// Tests invalid variable value type
fn test_invalid_variable_5() {
let parsed_data =
common::get_file_content_parsed(PARENT_FOLDER, "invalid_variable_with_object.ura");
assert!(parsed_data
.unwrap_err()
.downcast_ref::<ParseError>()
.is_some());
}
| true |
fbb0e0220c0146b072bfbf56696da15d75f82e9a
|
Rust
|
gushernobindsme/rust-typical90
|
/q-027/src/main.rs
|
UTF-8
| 650 | 3.03125 | 3 |
[] |
no_license
|
// -*- coding:utf-8-unix -*-
use proconio::input;
use std::collections::HashMap;
fn main() {
input! {
n: usize,
s: [String; n],
}
// (ユーザ名, 登録日) のマップを作る
// 降順にして一回だけ走査し、同じ値だった場合は上書きする
let mut map: HashMap<String, usize> = HashMap::new();
for i in (0..n).rev() {
map.insert(s[i].to_string(), i + 1);
}
// 登録日の昇順でソートする
let mut vec = map.into_iter().collect::<Vec<_>>();
vec.sort_by(|a, b| a.1.cmp(&b.1));
for (_, date) in vec.iter() {
println!("{}", date);
}
}
| true |
a23b0acbe92d6235225091e82d96f4c639c04309
|
Rust
|
mneumann/ego
|
/ego-cli/src/main.rs
|
UTF-8
| 1,083 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
extern crate ego;
extern crate rand;
extern crate serde_json;
use ego::driver::{Config, SimulationConfig};
use std::env;
use std::fs::File;
use std::io::Read;
fn run_with_config(config: Config) {
let mut simulation =
SimulationConfig::new_from_config(config).create_simulation(Box::new(rand::thread_rng()));
println!("iter\tbest_i\tbest_fit\tns_current\tns_total");
let max_iterations = 100;
loop {
simulation.print_statistics();
if simulation.iteration >= max_iterations {
break;
}
simulation = simulation.next_generation();
}
}
fn from_command_line() {
let config_file = env::args().nth(1).expect("config file");
println!("config file: {}", config_file);
let mut file = File::open(config_file).expect("Unable to open config file");
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("Unable to read file");
let config: Config = serde_json::from_str(&contents).unwrap();
run_with_config(config);
}
fn main() {
from_command_line();
}
| true |
f2fec6949d98746ba222afbdac943dcca8e7d886
|
Rust
|
fossabot/improc
|
/viewer/src/app.rs
|
UTF-8
| 2,911 | 2.6875 | 3 |
[] |
no_license
|
use anyhow::Result;
use cgmath::Point3;
use image::DynamicImage;
use crate::{
image_manager::{Color, ImageManager},
presenter::Presenter,
viewer::Viewer,
};
const VIEWER_WINDOW_TITLE: &str = "Image Viewer";
pub struct App {
viewer: Viewer,
presenter: Presenter,
image_manager: ImageManager,
}
#[allow(dead_code)]
impl App {
pub fn new(width: u32, height: u32) -> Result<App> {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
{
let gl_attr = video_subsystem.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 1);
let (major, minor) = gl_attr.context_version();
println!("OK : init OpenGL: version = {}.{}", major, minor);
}
let window = video_subsystem
.window("SDL", width, height)
.opengl()
.position_centered()
.build()
.unwrap();
let gl_context = window.gl_create_context().unwrap();
gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _);
let app = App {
viewer: Viewer::new(sdl_context, video_subsystem, window, gl_context),
presenter: Presenter::new(width, height),
image_manager: ImageManager::new(),
};
Ok(app)
}
pub fn run(self) -> Result<()> {
self.viewer.render(self.presenter, self.image_manager)
}
pub fn add_image(mut self, image: &DynamicImage, id: &str) -> Self {
self.image_manager.add_image(image, id);
self
}
pub fn add_images(mut self, images: &Vec<DynamicImage>, id_base: &str) -> Self {
for i in 0..images.len() {
let id = format!("{}_{}", id_base, i);
self = self.add_image(images.get(i).unwrap(), &id);
}
self
}
pub fn add_point(mut self, point: &Point3<f32>, image_id: &str, color: &Color) -> Self {
self.image_manager.add_point(point, image_id, color);
self
}
pub fn add_points(self, points: &Vec<Point3<f32>>, image_id: &str, color: &Color) -> Self {
points
.iter()
.fold(self, |app, point| app.add_point(point, image_id, color))
}
pub fn add_point_relation(
mut self,
points: &Vec<Point3<f32>>,
image_ids: &Vec<String>,
) -> Self {
assert_eq!(points.len(), image_ids.len());
self.image_manager.add_point_relation();
self
}
pub fn add_point_relations(
mut self,
points: &Vec<Vec<Point3<f32>>>,
image_ids: &Vec<Vec<String>>,
) -> Self {
assert_eq!(points.len(), image_ids.len());
for i in 0..points.len() {
self = self.add_point_relation(points.get(i).unwrap(), image_ids.get(i).unwrap());
}
self
}
}
| true |
c0f6d60e9cb8fd20461ccbdcc16245925b3de9fd
|
Rust
|
ilovelll/learn-rust-by-example
|
/ch8-flow-control/src/main.rs
|
UTF-8
| 7,281 | 3.75 | 4 |
[] |
no_license
|
fn main() {
let mut counter = 0;
let result = loop {
counter += 1;
if counter == 10 {
break counter * 2; // return value with break
}
};
assert_eq!(result, 20);
let mut n = 1;
while n < 101 {
if n % 15 == 0 {
println!("fizzbuzz");
} else if n % 3 == 0 {
println!("fizz");
} else if n % 5 == 0 {
println!("buzz");
} else {
println!("{}", n);
}
n += 1;
}
println!("===================");
// a..b or a..=b
for n in 1..=100 {
match n {
// `if condition` part is a guard
x if x % 15 ==0 => println!("fizzbuzz, {}", x),
x if x % 3 == 0 => println!("fizz"),
x if x % 5 == 0 => println!("buzz"),
_ => println!("{}", n),
}
}
let names = vec!["Bob", "Alice", "Frank"];
// `iter` borrow each element of the collection throgh each iteration
// `names` untouched and available for reuse after the loop
for name in names.iter() {
match name {
&"Frank" => println!("There is a rustacean among us!"),
_ => println!("Hello {}", name),
}
}
println!("{:#?}", names);
// `into_iter` consumes the collection, not borrow
// `names` no longer live
for name in names.into_iter() {
match name {
"Frank" => println!("There is a rustacean among us!"),
_ => println!("Hello {}", name),
}
}
// should comment below, `names` has been move
// println!("{:#?}", names);
let mut names = vec!["Alice", "Bob", "Frank"];
// `iter_mut` can modified the value
for name in names.iter_mut() {
match name {
&mut "Frank" => println!("There is a rustacean among us!"),
_ => println!("Hello {}", name),
}
}
println!("{:#?}", names);
// match can destruct enums,pointers,structures,tuples
// match tuple
let pair = (0, -2);
println!("Tell me about {:?}", pair);
match pair {
(0, y) => println!("First is `0` and `y` is `{:?}`", y),
(x, 0) => println!("`x` is {:?} and last is `0`", x),
_ => println!("It doesn't matter what they are"),
}
//match enums
// `allow` required to silence warnings because only
// one variant is used.
#[allow(dead_code)]
enum Color {
// These 3 are specified solely by their name.
Red,
Blue,
Green,
// These likewise tie `u32` tuples to different names: color models.
RGB(u32, u32, u32),
HSV(u32, u32, u32),
HSL(u32, u32, u32),
CMY(u32, u32, u32),
CMYK(u32, u32, u32, u32),
}
let color = Color::RGB(122, 17, 40);
// TODO ^ Try different variants for `color`
println!("What color is it?");
// An `enum` can be destructured using a `match`.
match color {
Color::Red => println!("The color is Red!"),
Color::Blue => println!("The color is Blue!"),
Color::Green => println!("The color is Green!"),
Color::RGB(r, g, b) =>
println!("Red: {}, green: {}, and blue: {}!", r, g, b),
Color::HSV(h, s, v) =>
println!("Hue: {}, saturation: {}, value: {}!", h, s, v),
Color::HSL(h, s, l) =>
println!("Hue: {}, saturation: {}, lightness: {}!", h, s, l),
Color::CMY(c, m, y) =>
println!("Cyan: {}, magenta: {}, yellow: {}!", c, m, y),
Color::CMYK(c, m, y, k) =>
println!("Cyan: {}, magenta: {}, yellow: {}, key (black): {}!",
c, m, y, k),
// Don't need another arm because all variants have been examined
}
// match ref
// a reference
let reference = &4;
println!("{}", *reference);
// desctructuring
match reference {
&val => println!("Got a value via destructuring: {:?}", val),
}
// dereferencing
match *reference {
val => println!("Got a value via dereferencing: {:?}", val),
}
let _not_a_reference = 4;
let ref _is_a_reference = 3;
let value = 5;
let mut mut_value = 6;
match value {
ref r => println!("Got a reference to a value: {:?}", r),
}
match mut_value {
ref mut m => {
*m += 10;
println!("Added 10. `mut_value`: {:?}", m);
},
}
struct Foo { x: (u32, u32), y: u32 }
// destructure members of the struct
let foo = Foo { x: (1, 2), y: 3 };
let Foo { x: (a, b), y } = foo;
println!("a = {}, b = {}, y = {} ", a, b, y);
// you can destructure structs and rename the variables,
// the order is not important
let Foo { y: i, x: j } = foo;
println!("i = {:?}, j = {:?}", i, j);
// and you can also ignore some variables:
let Foo { y, .. } = foo;
println!("y = {}", y);
// this will give an error: pattern does not mention field `x`
// let Foo { y } = foo;
// Binding at match use `@`
match age() {
0 => println!("I'm not born yet I guess"),
n @ 1 ..= 12 => println!("I'm a child of age {:?}", n),
n @ 13..= 19 => println!("Im a teen of age {:?}", n),
n => println!("I'm a old person of age {:?}", n),
}
// if let is cleaner with enums
let number = Some(6);
let letter: Option<i32> = None;
let emotion: Option<i32> = None;
if let Some(i) = number {
println!("Matched {:?}", i);
}
if let Some(i) = letter {
println!("Matched {:?}", i);
} else {
println!("Didn't match a number.");
}
let i_like_letters = false;
if let Some(i) = emotion {
println!("Matched {:?}", i);
} else if i_like_letters {
println!("Didn't match a number.");
} else {
println!("Didn't match a number. emotion :)");
}
enum Foot {
Bar,
Baz,
Qux(u32)
}
let a = Foot::Bar;
let b = Foot::Baz;
let c = Foot::Qux(100);
if let Foot::Bar = a {
println!("a is footer");
}
if let Foot::Qux(i) = c {
println!("c is {}", i);
}
// while let is same with enums
let mut optional = Some(4);
// Repeatedly try this test.
// loop {
// match optional {
// // If `optional` destructures, evaluate the block.
// Some(i) => {
// if i > 9 {
// println!("Greater than 9, quit!");
// optional = None;
// } else {
// println!("`i` is `{:?}`. Try again.", i);
// optional = Some(i + 1);
// }
// // ^ Requires 3 indentations!
// },
// // Quit the loop when the destructure fails:
// _ => { break; }
// // ^ Why should this be required? There must be a better way!
// }
// }
while let Some(i) = optional {
if i > 9 {
println!("Greater than 9, quit");
optional = None;
} else {
println!("`i` is `{:?}`. Try again.", i);
optional = Some(i + 1);
}
}
}
fn age() -> u32 {
15
}
| true |
109fbcf8a3aa417d55a019408e6eda3ce9d9eb6b
|
Rust
|
darayus/deuterium
|
/src/sql/select.rs
|
UTF-8
| 3,099 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
use from::{FromSelect};
use select_query::{
Select,
SelectQuery, RcSelectQuery,
SelectFor
};
use sql::{SqlContext, ToSql, QueryToSql};
use sql::value::{ToPredicateValue};
use sql::from::{FromToSql};
impl<T, L, M> FromToSql for FromSelect<T, L, M> {
fn to_from_sql(&self, ctx: &mut SqlContext) -> String {
format!("({}) as {}", self.select.to_sql(ctx), self.alias.to_string())
}
}
impl ToSql for SelectFor {
fn to_sql(&self, _ctx: &mut SqlContext) -> String {
match self {
&SelectFor::Update => "FOR UPDATE",
&SelectFor::UpdateNoWait => "FOR UPDATE NOWAIT",
&SelectFor::Share => "FOR SHARE",
&SelectFor::ShareNoWait => "FOR SHARE NOWAIT",
}.to_string()
}
}
impl<T, L, M> ToSql for SelectQuery<T, L, M> {
fn to_sql(&self, ctx: &mut SqlContext) -> String {
let mut sql = "SELECT".to_string();
if self.distinct.is_some() {
sql = format!("{} {}", sql, self.distinct.as_ref().unwrap().to_sql(ctx));
}
sql = format!("{} {} FROM {}",
sql,
self.select.to_sql(ctx),
self.from.as_sql().to_from_sql(ctx)
);
if !self.joins.is_empty() {
let joins: Vec<String> = self.joins.iter().map(|join| join.to_sql(ctx)).collect();
sql = format!("{} {}", sql, joins.connect(" "))
}
if self.where_.is_some() {
sql = format!("{} WHERE {}", sql, self.where_.as_ref().unwrap().to_sql(false, ctx));
}
if self.group_by.is_some() {
sql = format!("{}{}", sql, self.group_by.as_ref().unwrap().to_sql(ctx));
}
if self.having.is_some() {
sql = format!("{} HAVING {}", sql, self.having.as_ref().unwrap().to_sql(false, ctx));
}
if !self.order_by.is_empty() {
let orders: Vec<String> = self.order_by.iter().map(|ord| ord.to_sql(ctx)).collect();
sql = format!("{} ORDER BY {}", sql, orders.connect(", "))
}
if self.limit.is_some() {
sql = format!("{} LIMIT {}", sql, self.limit.unwrap())
}
if self.offset.is_some() {
sql = format!("{} OFFSET {}", sql, self.offset.unwrap())
}
if self.for_.is_some() {
sql = format!("{} {}", sql, self.for_.as_ref().unwrap().to_sql(ctx))
}
sql
}
}
impl<T, L, M> QueryToSql for SelectQuery<T, L, M> {}
impl ToSql for RcSelectQuery {
fn to_sql(&self, ctx: &mut SqlContext) -> String {
(**self).to_sql(ctx)
}
}
impl ToSql for Select {
fn to_sql(&self, ctx: &mut SqlContext) -> String {
match self {
&Select::Only(ref fields) => {
let defs: Vec<String> = fields.iter().map(|f| f.expression_as_sql().to_sql(ctx)).collect();
defs.connect(", ")
},
&Select::All => "*".to_string()
}
}
}
impl<T, L, M> ToPredicateValue for SelectQuery<T, L, M> {
fn to_predicate_value(&self, ctx: &mut SqlContext) -> String { self.to_sql(ctx) }
}
| true |
6e025ee88112de579910e16fe7613b4a5fb29ab6
|
Rust
|
Gordon-F/rust-by-example-ru
|
/examples/hello/print/print.rs
|
UTF-8
| 2,422 | 3.84375 | 4 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
fn main() {
// `{}` автоматически будет заменено на
// аргументы. Они будут преобразованы в строку.
println!("{} days", 31);
// Без суффиксов, 31 является i32. Можно изменить тип 31,
// используя суффикс.
// Существует множество способов работы с форматированным выводом. Можно указать
// позицию для каждого аргумента.
println!("{0}, this is {1}. {1}, this is {0}", "Alice", "Bob");
// Так же можно именовать аргументы.
println!("{subject} {verb} {object}",
object="the lazy dog",
subject="the quick brown fox",
verb="jumps over");
// Специальное форматирование может быть определено после `:`.
println!("{} of {:b} people know binary, the other half don't", 1, 2);
// Можно выравнивать текст, сдвигая его на указанную ширину.
// Данный макрос отобразит в консоли
// " 1". 5 пробелов и "1".
println!("{number:>width$}", number=1, width=6);
// Можно добавить к цифрам пару нулей. Данный макрос выведет "000001".
println!("{number:>0width$}", number=1, width=6);
// Компилятор обязательно проверит, что в макрос передано правильное количество
// аргументов.
println!("My name is {0}, {1} {0}", "Bond");
// ИСПРАВЬТЕ ^ Добавьте недостающий аргумент: "James"
// Создаем структуру, которая хранит в себе `i32`. Назовем ее `Structure`.
#[allow(dead_code)]
struct Structure(i32);
// Однако, пользовательские типы данных, например, как эта структура
// требуют более сложной обработки для вывода. Данный код не будет работать.
println!("This struct `{}` won't print...", Structure(3));
// ИСПРАВЬТЕ ^ Закоментируйте эту строку.
}
| true |
89d6b2e460aa2e9676da77beb3a1978f8835e9ea
|
Rust
|
seansfkelley/rt-rs
|
/src/core/color.rs
|
UTF-8
| 4,673 | 3.609375 | 4 |
[] |
no_license
|
use std::ops::{ Add, Sub, Div, Mul, AddAssign, SubAssign, DivAssign, MulAssign };
use std::fmt::{ Display, Debug, Formatter, Result };
use std::f64::{ INFINITY, NEG_INFINITY };
use math::*;
#[derive(Clone, Copy)]
pub struct Color {
pub r: f64,
pub g: f64,
pub b: f64,
}
impl Color {
pub const BLACK: Color = Color { r: 0f64, g: 0f64, b: 0f64 };
pub const WHITE: Color = Color { r: 1f64, g: 1f64, b: 1f64 };
pub fn new(r: f64, g: f64, b: f64) -> Color {
Color { r, g, b }
}
pub fn as_bytes(&self) -> [u8; 3] {
let clamped = self.clamp();
[
(clamped.r * 255f64) as u8,
(clamped.g * 255f64) as u8,
(clamped.b * 255f64) as u8,
]
}
pub fn clamp(&self) -> Color {
Color {
r: self.r.clamp(0f64, 1f64),
g: self.g.clamp(0f64, 1f64),
b: self.b.clamp(0f64, 1f64),
}
}
pub fn is_nonzero(&self) -> bool {
self.r > 0f64 || self.g > 0f64 || self.b > 0f64
}
pub fn average(&self) -> f64 {
(self.r + self.g + self.b) / 3f64
}
fn format(&self, f: &mut Formatter) -> Result {
match f.precision() {
Some(p) => {
write!(f, "Color<{:.*}, {:.*}, {:.*}>", p, self.r, p, self.g, p, self.b)
}
None => {
write!(f, "Color<{}, {}, {}>", self.r, self.g, self.b)
}
}
}
}
impl Add for Color {
type Output = Color;
fn add(self, other: Color) -> Color {
Color {
r: self.r + other.r,
g: self.g + other.g,
b: self.b + other.b,
}
}
}
impl AddAssign for Color {
fn add_assign(&mut self, other: Color) {
self.r += other.r;
self.g += other.g;
self.b += other.b;
}
}
impl Sub for Color {
type Output = Color;
fn sub(self, other: Color) -> Color {
Color {
r: self.r - other.r,
g: self.g - other.g,
b: self.b - other.b,
}
}
}
impl SubAssign for Color {
fn sub_assign(&mut self, other: Color) {
self.r -= other.r;
self.g -= other.g;
self.b -= other.b;
}
}
impl Div<f64> for Color {
type Output = Color;
fn div(self, divisor: f64) -> Color {
Color {
r: self.r / divisor,
g: self.g / divisor,
b: self.b / divisor,
}
}
}
impl DivAssign for Color {
fn div_assign(&mut self, other: Color) {
self.r /= other.r;
self.g /= other.g;
self.b /= other.b;
}
}
impl Mul<f64> for Color {
type Output = Color;
fn mul(self, multiplicand: f64) -> Color {
Color {
r: self.r * multiplicand,
g: self.g * multiplicand,
b: self.b * multiplicand,
}
}
}
impl Mul<Color> for f64 {
type Output = Color;
fn mul(self, other: Color) -> Color {
other * self
}
}
impl MulAssign<f64> for Color {
fn mul_assign(&mut self, multiplicand: f64) {
self.r *= multiplicand;
self.g *= multiplicand;
self.b *= multiplicand;
}
}
impl Mul for Color {
type Output = Color;
fn mul(self, other: Color) -> Color {
Color {
r: self.r * other.r,
g: self.g * other.g,
b: self.b * other.b,
}
}
}
impl MulAssign for Color {
fn mul_assign(&mut self, other: Color) {
self.r *= other.r;
self.g *= other.g;
self.b *= other.b;
}
}
impl Display for Color {
fn fmt(&self, f: &mut Formatter) -> Result {
self.format(f)
}
}
impl Debug for Color {
fn fmt(&self, f: &mut Formatter) -> Result {
self.format(f)
}
}
pub fn min_vs_max(colors: &[Color]) -> f64 {
let mut min_r = INFINITY;
let mut max_r = NEG_INFINITY;
let mut min_g = INFINITY;
let mut max_g = NEG_INFINITY;
let mut min_b = INFINITY;
let mut max_b = NEG_INFINITY;
for color in colors {
min_r = min_r.min(color.r);
max_r = max_r.max(color.r);
min_g = min_g.min(color.g);
max_g = max_g.max(color.g);
min_b = min_b.min(color.b);
max_b = max_b.max(color.b);
}
max_r - min_r + max_g - min_g + max_b - min_b
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_should_be_nonzero_if_any_terms_are_greater_than_zero() {
assert!(Color::new(0f64, 0f64, 1f64).is_nonzero());
assert!(Color::new(0f64, 1f64, 0f64).is_nonzero());
assert!(Color::new(1f64, 0f64, 0f64).is_nonzero());
assert!(!Color::new(0f64, 0f64, 0f64).is_nonzero());
}
}
| true |
c102cb418a16899bb75e0c789899c383f0bde59c
|
Rust
|
renellc/rusty-chip
|
/src/chip8/instructions_test.rs
|
UTF-8
| 6,790 | 3.109375 | 3 |
[] |
no_license
|
#[cfg(test)]
mod instructions_parse_test {
use crate::chip8::instructions::Instruction;
use std::convert::TryFrom;
#[test]
fn try_into_test_1nnn() {
let opcode = 0x1FA3;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::FlowJump(addr) = instr {
assert_eq!(addr, 0xFA3);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_2nnn() {
let opcode = 0x2A02;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::FlowCall(addr) = instr {
assert_eq!(addr, 0xA02);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_3xnn() {
let opcode = 0x3B22;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::CondVxNNEq(reg, byte) = instr {
assert_eq!(reg, 0xB);
assert_eq!(byte, 0x22);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_4xnn() {
let opcode = 0x4C37;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::CondVxNNNeq(reg, byte) = instr {
assert_eq!(reg, 0xC);
assert_eq!(byte, 0x37);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_5xy0() {
let opcode = 0x5CA0;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::CondVxVyEq(x, y) = instr {
assert_eq!(x, 0xC);
assert_eq!(y, 0xA);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_6xnn() {
let opcode = 0x65D7;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::ConstVxNN(reg, byte) = instr {
assert_eq!(reg, 0x5);
assert_eq!(byte, 0xD7);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_7xnn() {
let opcode = 0x7E15;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::ConstVxAddNN(reg, byte) = instr {
assert_eq!(reg, 0xE);
assert_eq!(byte, 0x15);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
#[test]
fn try_into_test_8xyn() {
// 8xy0
let opcode = 0x8420;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::AssignVxVy(x, y) = instr {
assert_eq!(x, 0x4);
assert_eq!(y, 0x2);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy1
let opcode = 0x8611;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::BitOpOR(x, y) = instr {
assert_eq!(x, 0x6);
assert_eq!(y, 0x1);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy2
let opcode = 0x8DA2;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::BitOpAND(x, y) = instr {
assert_eq!(x, 0xD);
assert_eq!(y, 0xA);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy3
let opcode = 0x8543;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::BitOpXOR(x, y) = instr {
assert_eq!(x, 0x5);
assert_eq!(y, 0x4);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy4
let opcode = 0x8AE4;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::MathVxVyAdd(x, y) = instr {
assert_eq!(x, 0xA);
assert_eq!(y, 0xE);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy5
let opcode = 0x8715;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::MathVxVySub(x, y) = instr {
assert_eq!(x, 0x7);
assert_eq!(y, 0x1);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy6
let opcode = 0x8166;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::BitOpShiftRight(x, y) = instr {
assert_eq!(x, 0x1);
assert_eq!(y, 0x6);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xy7
let opcode = 0x8297;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::MathVyVxSub(x, y) = instr {
assert_eq!(x, 0x2);
assert_eq!(y, 0x9);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
// 8xyE
let opcode = 0x893E;
let instr = Instruction::try_from(opcode).unwrap();
if let Instruction::BitOpShiftLeft(x, y) = instr {
assert_eq!(x, 0x9);
assert_eq!(y, 0x3);
} else {
panic!(
"Opcode: {:?} failed to parse into correct instruction. Got {:?}",
opcode, instr
);
}
}
}
| true |
cd6d8c9a03e475ff99551bce643df11f290e94f4
|
Rust
|
dannymcgee/lox
|
/packages/vm/src/vector/mod.rs
|
UTF-8
| 2,053 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
use std::{
alloc::{self, Layout},
mem,
ptr::{self, NonNull},
};
mod debug;
mod into_iter;
mod iter;
pub use into_iter::IntoIter;
#[cfg(test)]
mod tests;
#[macro_export]
macro_rules! vector {
[] => {
$crate::vector::Vector::new()
};
[$($elem:expr),*$(,)?] => {{
let mut vec = $crate::vector::Vector::new();
$(vec.push($elem);)*
vec
}};
}
pub use vector;
pub struct Vector<T> {
pub(super) ptr: NonNull<T>,
pub(super) cap: usize,
len: usize,
}
unsafe impl<T: Send> Send for Vector<T> {}
unsafe impl<T: Sync> Sync for Vector<T> {}
impl<T> Vector<T> {
pub fn new() -> Self {
assert!(mem::size_of::<T>() != 0);
Self {
ptr: NonNull::dangling(),
cap: 0,
len: 0,
}
}
pub(super) fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
pub fn push(&mut self, element: T) {
if self.len >= self.cap {
self.grow();
}
unsafe {
ptr::write(self.ptr().add(self.len), element);
}
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
self.len -= 1;
Some(unsafe { ptr::read(self.ptr().add(self.len)) })
}
}
pub(super) fn grow(&mut self) {
let (new_cap, new_layout) = if self.cap == 0 {
(8, Layout::array::<T>(8).unwrap())
} else {
let new_cap = self.cap * 2;
let new_layout = Layout::array::<T>(new_cap).unwrap();
(new_cap, new_layout)
};
assert!(
new_layout.size() <= isize::MAX as usize,
"Allocation too large"
);
let new_ptr = if self.cap == 0 {
unsafe { alloc::alloc(new_layout) }
} else {
let old_layout = Layout::array::<T>(self.cap).unwrap();
let old_ptr = self.ptr() as *mut u8;
unsafe { alloc::realloc(old_ptr, old_layout, new_layout.size()) }
};
self.ptr = match NonNull::new(new_ptr as *mut T) {
Some(ptr) => ptr,
None => alloc::handle_alloc_error(new_layout),
};
self.cap = new_cap;
}
}
impl<T> Drop for Vector<T> {
fn drop(&mut self) {
if self.cap != 0 {
let layout = Layout::array::<T>(self.cap).unwrap();
unsafe { alloc::dealloc(self.ptr() as *mut u8, layout) }
}
}
}
| true |
72bcc9f995aa8614aa79b8968086af5aabc04d26
|
Rust
|
alcarney/iaith
|
/iaith/src/main.rs
|
UTF-8
| 382 | 2.578125 | 3 |
[] |
no_license
|
use iaith::brainf::Program;
use std::env;
use std::process;
fn main() {
let mut args = env::args();
args.next();
let mut prog = match args.next() {
Some(p) => Program::new(&p),
None => {
eprintln!("You must specify a program.");
process::exit(1);
}
};
let output = prog.execute();
println!("{}", output);
}
| true |
0b16389ab1a18f936c4a7a6d94602ec99faf96ae
|
Rust
|
PI-Victor/blog-api
|
/src/http/routes.rs
|
UTF-8
| 434 | 2.625 | 3 |
[] |
no_license
|
use crate::api::types::{DBConn, NewPost, NewUser};
use rocket_contrib::json::Json;
#[get("/", format = "json")]
pub fn get_posts(conn: DBConn) {}
#[get("/<id>", format = "json")]
pub fn get_post(id: usize) {}
#[post("/new", format = "application/json", data = "<post>")]
pub fn new_post(conn: DBConn, post: Json<NewPost>) {
info!("THIS IS MY POST: {:?}", post)
}
#[get("/<id>", format = "json")]
pub fn get_user(id: usize) {}
| true |
3d54a136ff31517000be949ad74bb4d5f8df6295
|
Rust
|
cjkenn/tyr
|
/src/sym_tab.rs
|
UTF-8
| 1,003 | 3.734375 | 4 |
[] |
no_license
|
use std::collections::HashMap;
/// SymbolTable is used to help determine program
/// addresses to jump to when executing jump
/// instructions.
pub struct SymbolTable {
/// Hash table mapping a label name to an address in a program.
table: HashMap<String, usize>
}
impl SymbolTable {
pub fn new() -> SymbolTable {
SymbolTable {
table: HashMap::new()
}
}
pub fn insert(&mut self, key: String, val: usize) {
self.table.insert(key, val);
}
pub fn get(&self, key: &String) -> Option<&usize> {
self.table.get(key)
}
pub fn is_duplicate(&self, key: &String) -> bool {
self.table.get(key).is_some()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_duplicate() {
let mut sym_tab = SymbolTable::new();
let key = "test".to_string();
sym_tab.insert("test".to_string(), 5);
let result = sym_tab.is_duplicate(&key);
assert_eq!(result, true);
}
}
| true |
868d3909baf4499348232308963a3484320b025b
|
Rust
|
jiri/thesis-assembler
|
/src/grammar.rs
|
UTF-8
| 2,993 | 2.84375 | 3 |
[] |
no_license
|
use std::collections::HashMap;
pub type Label = String;
#[derive(Debug)]
pub struct Register(pub u8);
impl Register {
fn new(n: u8) -> Result<Register, &'static str> {
if n <= 15 {
Ok(Register(n))
} else {
Err("register index between 0 and 15")
}
}
}
#[derive(Debug)]
pub enum Address {
Label(Label),
Immediate(u16),
}
pub type Opcode = u8;
#[derive(Debug)]
pub enum Serializable {
Byte(u8),
String(String),
}
#[derive(Debug)]
pub enum Nibble {
Both,
High,
Low,
}
#[derive(Debug)]
pub enum Value {
Immediate(u8),
Addr(Address, Nibble),
}
#[derive(Debug)]
pub enum Instruction {
Db(Vec<Serializable>),
Ds(u16),
Org(u16),
Include(String),
Nullary(Opcode),
UnaryReg(Opcode, Register),
UnaryAddr(Opcode, Address),
BinaryRegIm(Opcode, Register, Value),
BinaryRegReg(Opcode, Register, Register),
}
impl Instruction {
pub fn opcode(&self) -> Option<Opcode> {
use self::Instruction::*;
match self {
Db(_) | Ds(_) | Org(_) | Include(_) => None,
Nullary(op)
| UnaryReg(op, _)
| UnaryAddr(op, _)
| BinaryRegIm(op, _, _)
| BinaryRegReg(op, _, _) => Some(*op),
}
}
}
#[derive(Debug)]
pub struct Line {
pub label: Option<Label>,
pub instruction: Option<Instruction>,
}
lazy_static! {
pub static ref OPCODES: HashMap<&'static str, Opcode> = {
let mut map = HashMap::new();
/* Utility */
map.insert("nop", 0x00);
map.insert("sleep", 0x02);
map.insert("break", 0x03);
map.insert("sei", 0x04);
map.insert("sec", 0x05);
map.insert("sez", 0x06);
map.insert("cli", 0x07);
map.insert("clc", 0x08);
map.insert("clz", 0x09);
/* Arithmetic */
map.insert("add", 0x10);
map.insert("adc", 0x11);
map.insert("sub", 0x12);
map.insert("sbc", 0x13);
map.insert("inc", 0x14);
map.insert("dec", 0x15);
map.insert("and", 0x16);
map.insert("or", 0x17);
map.insert("xor", 0x18);
map.insert("cp", 0x19);
map.insert("cpi", 0x1A);
/* Flow control */
map.insert("jmp", 0x20);
map.insert("call", 0x21);
map.insert("ret", 0x22);
map.insert("reti", 0x23);
map.insert("brc", 0x24);
map.insert("brnc", 0x25);
map.insert("brz", 0x26);
map.insert("brnz", 0x27);
/* Load / store */
map.insert("mov", 0x30);
map.insert("ldi", 0x31);
map.insert("ld", 0x32);
map.insert("st", 0x33);
map.insert("push", 0x34);
map.insert("pop", 0x35);
map.insert("lpm", 0x36);
map.insert("in", 0x3A);
map.insert("out", 0x3B);
map
};
}
include!(concat!(env!("OUT_DIR"), "/gpr.rs"));
| true |
4ff45a63561c33288e57b0315cad166ea38ba0a6
|
Rust
|
xkikeg/rust-examples
|
/p003_simple_list_and_move.rs
|
UTF-8
| 659 | 2.890625 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
// Copyright (c) 2014 liquid_amber
// This file is distributed under MIT license.
// See LICENSE file.
enum SimpleList<T> {
Cons(T, Box<SimpleList<T>>),
Nil,
}
fn length<T>(xs: &SimpleList<T>) -> i32 {
match xs {
&SimpleList::Cons(_, ref ys) => 1 + length(ys),
&SimpleList::Nil => 0,
}
}
fn main() {
let mut xs = Box::new(SimpleList::Nil);
xs = Box::new(SimpleList::Cons(3, xs));
xs = Box::new(SimpleList::Cons(2, xs));
xs = Box::new(SimpleList::Cons(1, xs));
let ys = xs;
println!("{}", length(&*ys)); // OK
// xs is moved. It will be compiler error.
// println!("{}", length(xs)); // NG!
}
| true |
73f1748a440882336310c7fc790a95c5572c8b8d
|
Rust
|
k-ymmt/splash
|
/src/main.rs
|
UTF-8
| 2,937 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
use termion::event::Key;
use termion::event::Event;
use termion::raw::{IntoRawMode, RawTerminal};
use termion::input::TermRead;
use termion::cursor::DetectCursorPos;
use std::io::{Write, Read, stdin, stdout, Stdin, Stdout, StdoutLock};
fn main() {
let stdin = stdin();
let stdin = stdin.lock();
let stdout = stdout().into_raw_mode().unwrap();
let mut manager = TerminalManager::new(stdout);
for event in stdin.keys() {
match event {
Ok(key) => {
match key {
Key::Esc => return,
Key::Char(i) => {
manager.write(i).unwrap();
}
Key::Backspace => {
manager.backspace().unwrap();
}
Key::Ctrl(v) => {
match v {
'a' => {
manager.move_caret_first().unwrap();
}
a => {}
}
}
_ => continue,
}
}
Err(err) => {}
}
}
}
struct TerminalManager {
// stdin: Stdin,
stdout: RawTerminal<Stdout>,
buffer: Vec<String>,
current_line_string: String,
}
impl TerminalManager {
fn new(stdout: RawTerminal<Stdout>) -> TerminalManager {
TerminalManager {
// stdin: stdin(),
stdout,
buffer: Vec::new(),
current_line_string: String::new(),
}
}
fn write(&mut self, c: char) -> Result<(), std::io::Error> {
let mut stdout = self.stdout.lock();
self.current_line_string.push(c);
if c == '\n' {
write!(stdout, "{}", '\n')?;
self.buffer.push(self.current_line_string.to_string());
self.current_line_string.clear();
}
write!(stdout, "{}{}{}", termion::clear::CurrentLine, '\r', self.current_line_string)?;
stdout.flush()?;
Ok(())
}
fn backspace(&mut self) -> Result<(), std::io::Error> {
let mut stdout = self.stdout.lock();
let c = self.current_line_string.pop();
if c == None {
if let Some(v) = self.buffer.pop() {
self.current_line_string = v;
write!(stdout, "{}", termion::cursor::Up(1))?;
self.current_line_string.pop();
} else {
return Ok(());
}
}
write!(stdout, "{}{}{}", termion::clear::CurrentLine, '\r', self.current_line_string)?;
stdout.flush()?;
Ok(())
}
fn move_caret_first(&mut self) -> Result<(), std::io::Error> {
write!(self.stdout.lock(), "{}", '\r')?;
let (_, col) = self.stdout.cursor_pos()?;
write!(self.stdout.lock(), "{}", termion::cursor::Goto(0, col))?;
Ok(())
}
}
| true |
a818c131defb5a142d8aa34283ddef4c723666b5
|
Rust
|
rust-user-group-graz/05-data-structures
|
/examples/asmdump/src/main.rs
|
UTF-8
| 1,436 | 2.703125 | 3 |
[] |
no_license
|
#![feature(asm)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn dump_stack() {
let nr_elements = 70;
for i in 0..nr_elements {
let offset = nr_elements - i - 1;
let mut result: u64;
unsafe {
asm!("movq %rsp, %rax
addq %rbx, %rax
movq (%rax), %rcx"
: "={rcx}"(result)
: "{rbx}"(8 * offset)
: "rax", "rbx", "rcx"
:)
}
println!("sp+{:03} ⇒ value {:016x}", 8 * offset, result);
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[inline(always)]
fn print_current_address() {
// https://stackoverflow.com/a/52050776
let mut result: u64;
unsafe {
asm!("leaq (%rip), %rax"
: "={rax}"(result)
:
: "rax"
:)
}
println!("rip = {:016x}", result);
}
fn sub(sub_arg: u64) -> u64 {
print_current_address();
let sub_local = 0xDEAD_C0DE;
let sub_sum = sub_arg + sub_local;
dump_stack();
sub_sum
}
fn main() {
let _main_a: u64 = 0xDEAD_BEEF;
let main_arg: u64 = 0xFEED_C0DE;
print_current_address();
let main_ret = sub(main_arg);
assert_eq!(main_ret, 0x0000_0001_DD9B_81BC);
let s = sub as *const ();
let m = main as *const ();
println!("sub = {:016p}", s);
println!("main = {:016p}", m);
print_current_address();
}
| true |
960a889e2c3fe7dc7e48433b6de5e1140121be09
|
Rust
|
sheosi/lily
|
/common/src/audio/playdevice.rs
|
UTF-8
| 2,952 | 2.875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use std::io::Cursor;
use std::time::Duration;
use crate::audio::{Audio, AudioRaw, Data};
use crate::vars::MAX_SAMPLES_PER_SECOND;
use ogg_opus::decode;
use rodio::{source::Source, OutputStream, OutputStreamHandle, StreamError};
use thiserror::Error;
use tokio::time::sleep;
pub struct PlayDevice {
_stream: OutputStream, // We need to preserve this
stream_handle: OutputStreamHandle,
}
#[derive(Error, Debug)]
pub enum PlayAudioError {
#[error("Failed while doing IO")]
IoErr(#[from] std::io::Error),
#[error("Failed while decoding")]
DecoderError(#[from] rodio::decoder::DecoderError),
#[error("Couldn't play audio, reason: {}", .0)]
PlayError(String),
#[error("Coudln't transform audio")]
TransformationError(#[from] ogg_opus::Error),
}
impl From<rodio::PlayError> for PlayAudioError {
fn from(err: rodio::PlayError) -> Self {
PlayAudioError::PlayError(format!("{:?}", err))
}
}
impl PlayDevice {
pub fn new() -> Result<PlayDevice, StreamError> {
let (_stream, stream_handle) = rodio::OutputStream::try_default()?;
Ok(PlayDevice {
_stream,
stream_handle,
})
}
pub fn play_file(&mut self, path: &str) -> Result<(), PlayAudioError> {
let file = std::fs::File::open(path)?;
let source = rodio::Decoder::new(std::io::BufReader::new(file))?;
self.stream_handle.play_raw(source.convert_samples())?;
Ok(())
}
pub fn play_audio(&mut self, audio: Audio) -> Result<(), PlayAudioError> {
match audio.buffer {
Data::Raw(raw_data) => {
let source = rodio::buffer::SamplesBuffer::new(
1,
AudioRaw::get_samples_per_second(),
raw_data.buffer,
);
self.stream_handle.play_raw(source.convert_samples())?;
}
Data::Encoded(enc_data) => {
if enc_data.is_ogg_opus() {
let (audio, play_data) =
decode::<_, MAX_SAMPLES_PER_SECOND>(Cursor::new(enc_data.data))?;
let source = rodio::buffer::SamplesBuffer::new(
play_data.channels,
MAX_SAMPLES_PER_SECOND,
audio,
);
self.stream_handle.play_raw(source.convert_samples())?;
} else {
let source = rodio::Decoder::new(std::io::Cursor::new(enc_data.data))?;
self.stream_handle.play_raw(source.convert_samples())?;
}
}
}
Ok(())
}
pub async fn wait_audio(&mut self, audio: Audio) -> Result<(), PlayAudioError> {
let seconds = audio.len_s();
self.play_audio(audio)?;
let ms_wait = (seconds * 1000.0).ceil() as u64;
sleep(Duration::from_millis(ms_wait)).await;
Ok(())
}
}
| true |
a8f43db27eed0d249d450723e157c3dbe88c60bc
|
Rust
|
jlricon/advent-code-2019-rust
|
/src/bin/day_03_part2.rs
|
UTF-8
| 2,054 | 3.515625 | 4 |
[] |
no_license
|
use std::collections::HashSet;
use std::iter::FromIterator;
enum Directions {
U,
D,
R,
L,
}
struct Step {
dir: Directions,
num: u32,
}
#[derive(Debug, PartialEq, Hash, Eq)]
struct Point(i32, i32);
impl Point {
fn displace(&self, other: &Point) -> Point {
Point(self.0 + other.0, self.1 + other.1)
}
}
impl From<&str> for Step {
fn from(item: &str) -> Self {
use crate::Directions::*;
let dir = match item.chars().nth(0).unwrap() {
'D' => D,
'U' => U,
'R' => R,
'L' => L,
_ => unreachable!(),
};
let num = item[1..].parse().unwrap();
Step { dir, num }
}
}
fn get_vector(steps: Vec<Step>) -> Vec<Point> {
fn push_into_vec_n(v: &mut Vec<Point>, num: u32, point: Point) -> () {
(0..num).for_each(|_| v.push(v.last().unwrap().displace(&point)))
}
let mut v = vec![Point(0, 0)];
use Directions::*;
steps.iter().for_each(|x| match x {
Step { dir: D, num } => push_into_vec_n(&mut v, *num, Point(0, -1)),
Step { dir: U, num } => push_into_vec_n(&mut v, *num, Point(0, 1)),
Step { dir: R, num } => push_into_vec_n(&mut v, *num, Point(1, 0)),
Step { dir: L, num } => push_into_vec_n(&mut v, *num, Point(-1, 0)),
});
v
}
fn solve(input: &str) {
let points = input
.lines()
.map(|x| x.split(',').map(|x| x.into()).collect())
.map(get_vector)
.collect::<Vec<Vec<Point>>>();
let first_line: HashSet<&Point> = HashSet::from_iter(&points[0]);
// For each intersection, find the number of steps to it
let min_dist = points[1]
.iter()
.filter(|x| first_line.contains(x))
.map(|intr| {
points[0].iter().position(|x| x == intr).unwrap()
+ points[1].iter().position(|x| x == intr).unwrap()
})
.filter(|x| *x != 0)
.min();
dbg!(min_dist);
}
fn main() {
let points = include_str!("day_03_data.txt");
solve(points);
}
| true |
e85c270e1cac2e3ab5b72be3c98639556ff71ef2
|
Rust
|
bfffs/bfffs
|
/bfffs-core/tests/cacheable_space.rs
|
UTF-8
| 17,706 | 2.515625 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
//! Measures the actual memory consumption of Cacheable implementors
//!
//! Can't use the standard test harness because we need to run single-threaded.
use bfffs_core::{
cache::{Cacheable, CacheRef},
ddml::DRP,
dml::{Compression, DML},
fs_tree::*,
idml::RidtEntry,
property::Property,
tree::*,
LbaT,
PBA,
Result,
RID,
TxgT,
writeback::{Credit, WriteBack}
};
use clap::Parser;
use divbuf::DivBufShared;
use futures::{Future, FutureExt};
use std::{
alloc::{GlobalAlloc, Layout, System},
ffi::OsString,
mem,
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering::SeqCst},
Arc,
}
};
struct Counter;
static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
unsafe impl GlobalAlloc for Counter {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = System.alloc(layout);
if !ret.is_null() {
ALLOCATED.fetch_add(layout.size(), SeqCst);
}
ret
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
System.dealloc(ptr, layout);
ALLOCATED.fetch_sub(layout.size(), SeqCst);
}
}
#[global_allocator]
static A: Counter = Counter;
/// This program will never do disk I/O, but it needs a stub DML to satisfy the
/// compiler.
struct StubDML {}
impl DML for StubDML {
type Addr = RID;
fn delete(&self, _addr: &Self::Addr, _txg: TxgT)
-> Pin<Box<dyn Future<Output=Result<()>> + Send>>
{
unimplemented!()
}
fn evict(&self, _addr: &Self::Addr)
{
unimplemented!()
}
fn get<T: Cacheable, R: CacheRef>(&self, _addr: &Self::Addr)
-> Pin<Box<dyn Future<Output=Result<Box<R>>> + Send>>
{
unimplemented!()
}
fn pop<T: Cacheable, R: CacheRef>(&self, _rid: &Self::Addr, _txg: TxgT)
-> Pin<Box<dyn Future<Output=Result<Box<T>>> + Send>>
{
unimplemented!()
}
fn put<T: Cacheable>(&self, _cacheable: T, _compression: Compression,
_txg: TxgT)
-> Pin<Box<dyn Future<Output=Result<<Self as DML>::Addr>> + Send>>
{
unimplemented!()
}
fn repay(&self, _credit: Credit)
{
unimplemented!()
}
fn sync_all(&self, _txg: TxgT)
-> Pin<Box<dyn Future<Output=Result<()>> + Send>>
{
unimplemented!()
}
}
/// Borrow enough credit for an insertion.
///
/// This test program doesn't really care about credit. Borrow enough to
/// satisfy Node's assertions.
fn borrow_credit<V: Value>(wb: &WriteBack, v: &V) -> Credit {
let want = mem::size_of::<(FSKey, V)>() + v.allocated_space();
wb.borrow(want).now_or_never().unwrap()
}
trait CacheableForgetable: Cacheable {
fn forget(self: Box<Self>) -> Credit;
}
impl<K: Key, V: Value> CacheableForgetable for Arc<Node<DRP, K, V>> {
fn forget(self: Box<Self>) -> Credit {
Credit::null()
}
}
impl CacheableForgetable for Arc<Node<RID, FSKey, FSValue>> {
fn forget(self: Box<Self>) -> Credit {
let nd = Arc::try_unwrap(*self).unwrap().try_unwrap().unwrap();
if nd.is_leaf() {
nd.into_leaf().forget()
} else {
Credit::null()
}
}
}
fn alloct_leaf(_wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = PBA::new(1, i as LbaT);
let v = RID(i as u64);
ld.insert(k, v, TxgT::from(0), &StubDML{}, Credit::null())
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<DRP, PBA, RID>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn alloct_int(_wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let txgs = TxgT::from(0)..TxgT::from(1);
let mut children = Vec::with_capacity(n);
for i in 0..n {
let addr = PBA::new(0, i as LbaT);
let k = PBA::new(1, i as LbaT);
let drp = DRP::new(addr, Compression::None, 40000, 40000, 0);
let child = IntElem::new(k, txgs.clone(), TreePtr::Addr(drp));
children.push(child);
}
let node_data = NodeData::<DRP, PBA, RID>::Int(IntData::new(children));
Box::new(Arc::new(Node::new(node_data)))
}
fn ridt_int(_wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let txgs = TxgT::from(0)..TxgT::from(1);
let mut children = Vec::with_capacity(n);
for i in 0..n {
let addr = PBA::new(0, i as LbaT);
let k = RID(i as u64);
let drp = DRP::new(addr, Compression::None, 40000, 40000, 0);
let child = IntElem::new(k, txgs.clone(), TreePtr::Addr(drp));
children.push(child);
}
let nd = NodeData::<DRP, RID, RidtEntry>::Int(IntData::new(children));
Box::new(Arc::new(Node::new(nd)))
}
fn ridt_leaf(_wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = RID(i as u64);
let addr = PBA::new(0, i as LbaT);
let drp = DRP::new(addr, Compression::None, 40000, 40000, 0);
let v = RidtEntry::new(drp);
ld.insert(k, v, TxgT::from(0), &StubDML{}, Credit::null())
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<DRP, RID, RidtEntry>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_int(_wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let txgs = TxgT::from(0)..TxgT::from(1);
let mut children = Vec::with_capacity(n);
for i in 0..n {
let addr = RID(i as u64);
let k = FSKey::new(i as u64, ObjKey::Inode);
let child = IntElem::new(k, txgs.clone(), TreePtr::Addr(addr));
children.push(child);
}
let nd = NodeData::<RID, FSKey, FSValue>::Int(IntData::new(children));
Box::new(Arc::new(Node::new(nd)))
}
fn fs_leaf_blob_extent(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable>
{
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let extent = BlobExtent {
lsize: 4096,
rid: RID(i as u64)
};
let v = FSValue::BlobExtent(extent);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_direntry(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let dirent = Dirent {
ino: 0,
dtype: libc::DT_REG,
name: OsString::from("something_moderately_long_but_not_too_long")
};
let v = FSValue::DirEntry(dirent);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_direntries(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let dirent0 = Dirent {
ino: i as u64,
dtype: libc::DT_REG,
name: OsString::from("something_moderately_long_but_not_too_long")
};
let dirent1 = Dirent {
ino: 10000 + i as u64,
dtype: libc::DT_REG,
name: OsString::from("something_also_pretty_long_string")
};
let v = FSValue::DirEntries(vec![dirent0, dirent1]);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_dyinginode(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let v = FSValue::DyingInode(DyingInode::from(0));
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_extattr_blob(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let extent = BlobExtent {
lsize: 4096,
rid: RID(i as u64)
};
let blob_ext_attr = BlobExtAttr {
namespace: ExtAttrNamespace::User,
name: OsString::from("Some extended attribute stored as a blob"),
extent,
};
let extattr = ExtAttr::Blob(blob_ext_attr);
let v = FSValue::ExtAttr(extattr);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_extattr_inline(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let dbs = DivBufShared::from(vec![42u8; 1024]);
let extent = InlineExtent::new(Arc::new(dbs));
let inline_ext_attr = InlineExtAttr {
namespace: ExtAttrNamespace::User,
name: OsString::from("Some extended attribute stored as a blob"),
extent,
};
let extattr = ExtAttr::Inline(inline_ext_attr);
let v = FSValue::ExtAttr(extattr);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_extattrs(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let extent0 = BlobExtent {
lsize: 4096,
rid: RID(i as u64)
};
let blob_ext_attr0 = BlobExtAttr {
namespace: ExtAttrNamespace::User,
name: OsString::from("The first extended attribute"),
extent: extent0,
};
let extattr0 = ExtAttr::Blob(blob_ext_attr0);
let extent1 = BlobExtent {
lsize: 4096,
rid: RID((10000 + i) as u64)
};
let blob_ext_attr1 = BlobExtAttr {
namespace: ExtAttrNamespace::User,
name: OsString::from("The second extended attribute"),
extent: extent1,
};
let extattr1 = ExtAttr::Blob(blob_ext_attr1);
let v = FSValue::ExtAttrs(vec![extattr0, extattr1]);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_inline_extent(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let dbs = DivBufShared::from(vec![42u8; 2048]);
let extent = InlineExtent::new(Arc::new(dbs));
let v = FSValue::InlineExtent(extent);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_inode(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let inode = Inode {
size: 0,
bytes: 0,
nlink: 1,
flags: 0,
atime: Timespec{sec: 0, nsec: 0},
mtime: Timespec{sec: 0, nsec: 0},
ctime: Timespec{sec: 0, nsec: 0},
birthtime: Timespec{sec: 0, nsec: 0},
uid: 0,
gid: 0,
perm: 0o644,
file_type: FileType::Reg(17)
};
let v = FSValue::inode(inode);
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn fs_leaf_property(wb: &WriteBack, n: usize) -> Box<dyn CacheableForgetable> {
let mut ld = LeafData::default();
for i in 0..n {
let k = FSKey::new(i as u64, ObjKey::Inode);
let v = FSValue::Property(Property::RecordSize(17));
let credit = borrow_credit(wb, &v);
ld.insert(k, v, TxgT::from(0), &StubDML{}, credit)
.now_or_never()
.unwrap()
.0.unwrap();
}
let node_data = NodeData::<RID, FSKey, FSValue>::Leaf(ld);
Box::new(Arc::new(Node::new(node_data)))
}
fn logrange(min: usize, max: usize) -> impl Iterator<Item=usize> {
let minf = min as f64;
let grange = (max as f64) / minf;
let mult = grange.powf(1.0/8f64);
(1..=8).map(move |exp| ((minf * mult.powf(exp as f64)).round()) as usize)
}
fn measure(name: &str, pos: &str, n: usize, verbose: bool,
f: fn(&WriteBack, usize) -> Box<dyn CacheableForgetable>) -> bool
{
let wb = WriteBack::limitless();
let before = ALLOCATED.load(SeqCst);
let c = f(&wb, n);
let after = ALLOCATED.load(SeqCst);
let actual = after - before;
let calc = c.cache_space();
wb.repay(c.forget());
let err = 100.0 * (calc as f64) / (actual as f64) - 100.0;
if verbose {
println!("{name:>8}{pos:>22}{n:>8}{actual:>12}{calc:>12}{err:>12.2}%");
}
err.abs() <= 5.0
}
#[derive(Parser, Clone, Debug)]
struct Cli {
/// Must be present when specifying a test case name
#[clap(long = "exact")]
exact: bool,
/// Ignored. For compatibility purposes only
#[clap(long = "format")]
format: Option<String>,
/// Run only ignored tests
#[clap(long = "ignored")]
ignored: bool,
/// List all tests and benchmarks
#[clap(long = "list")]
list: bool,
/// Print detailed test output
#[clap(long = "nocapture")]
verbose: bool,
testcase: Option<String>
}
fn cacheable_space(verbose: bool) {
let mut pass = true;
if verbose {
println!("{:>8}{:>22}{:>8}{:>12}{:>12}{:>12}", "Table", "Position", "N",
"Actual size", "Calculated", "Error");
}
for n in logrange(109, 433) {
pass &= measure("AllocT", "Int", n, verbose, alloct_int);
}
for n in logrange(134, 535) {
pass &= measure("AllocT", "Leaf", n, verbose, alloct_leaf);
}
for n in logrange(98, 389) {
pass &= measure("RIDT", "Int", n, verbose, ridt_int);
}
for n in logrange(114, 454) {
pass &= measure("RIDT", "Leaf", n, verbose, ridt_leaf);
}
for n in logrange(91, 364) {
pass &= measure("FS", "Int", n, verbose, fs_int);
}
for n in logrange(576, 2302) {
measure("FS", "Leaf (Blob Extent)", n, verbose, fs_leaf_blob_extent);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (DirEntry)", n, verbose, fs_leaf_direntry);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (DirEntries)", n, verbose,
fs_leaf_direntries);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Dying Inode)", n, verbose,
fs_leaf_dyinginode);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Blob Extattr)", n, verbose,
fs_leaf_extattr_blob);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Inline Extattr)", n, verbose,
fs_leaf_extattr_inline);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Extattrs)", n, verbose, fs_leaf_extattrs);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Inline Extent)", n, verbose,
fs_leaf_inline_extent);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Inode)", n, verbose, fs_leaf_inode);
}
for n in logrange(576, 2302) {
pass &= measure("FS", "Leaf (Property)", n, verbose, fs_leaf_property);
}
if !pass {
panic!("Calculated size out of tolerance in at least one case");
}
}
fn main() {
const TCNAME: &str = "cacheable_space::cacheable_space";
let cli = Cli::parse();
if cli.list {
if !cli.ignored{
println!("{TCNAME}: test");
}
return;
}
if let Some(tc) = cli.testcase {
if cli.exact && tc != TCNAME || !TCNAME.contains(&tc) {
return;
}
}
cacheable_space(cli.verbose);
}
| true |
c2aff606cb7e660eddb2e3f105bc64e86fb0f059
|
Rust
|
errord/weld
|
/weld/src/sir/optimizations/simplify_assignments.rs
|
UTF-8
| 4,339 | 3.484375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
//! An SIR pass that removes unnecessary assignments.
//!
//! This pass replaces assignment expressions that assign the same source to a particular target in
//! each basic block with the source directly. For example:
//!
//! ```sir
//! B1:
//! fn1_tmp__0 = x
//! jump B3
//! B2:
//! fn1_tmp__0 = x
//! jump B3
//! B3:
//! return fn1_tmp__0
//! ```
//!
//! will become:
//!
//! ```sir
//! B1:
//! jump B3
//! B2:
//! jump B3
//! B3:
//! return x
//! ```
use fnv;
use crate::sir::StatementKind::{Assign, AssignLiteral};
use crate::sir::*;
/// Applies the simplifying assignments pass.
pub fn simplify_assignments(prog: &mut SirProgram) -> WeldResult<()> {
for func in prog.funcs.iter_mut() {
simplify_assignments_in_function(func)?
}
Ok(())
}
fn simplify_assignments_in_function(func: &mut SirFunction) -> WeldResult<()> {
// XXX This is a hack! Currently, the code gen assumes that the symbol names in a for loop body
// will match the symbol names for the same data in the calling function (e.g., if the vector
// to loop over is called `data1` in the calling function, it must be called `data1` in the
// loop body). If this pass deletes `data1` because of a redundant assignment, it will cause
// an error when compiling the loop body function unless this pass becomes interprocedural. It
// will be better to just simplify the SIR so that the loop body functions are self contained
// and don't make any assumptions about symbol names -- they should just refer to their own
// parameters instead.
//
// For now, we circumvent deleting these symbols by just applying this optimization to
// functions that are loop bodies and are innermost loops (i.e., are guaranteed to not call
// another for loop).
if !func.loop_body || !func.innermost_loop {
return Ok(());
}
// Assignments that are redundant and should be deleted.
let mut assignments = fnv::FnvHashMap::default();
// Valid assignments where the target variable is assigned different source variables in
// different basic blocks.
let mut validset = fnv::FnvHashSet::default();
// Find the assignments that should be simplified by checking whether each assignment is
// assigned different values in different basic blocks.
for block in func.blocks.iter() {
for statement in block.statements.iter() {
if let Assign(ref source) = statement.kind {
let target = statement.output.as_ref().unwrap();
// We've already seen this target and shown that it is valid.
if validset.contains(target) {
continue;
}
// The assignment is valid if different basic blocks assign the target different
// values.
let is_valid = match assignments.get(target) {
Some(ref current_source) => *current_source != source,
None => {
// We haven't seen an assigment to this target before.
assignments.insert(target.clone(), source.clone());
false
}
};
// Move to validset if the assignment is valid.
if is_valid {
validset.insert(assignments.remove(target).unwrap());
}
}
// Always treat assignments to a literal as valid.
if let AssignLiteral(_) = statement.kind {
validset.insert(statement.output.clone().unwrap());
// Remove the target in case it was added before.
assignments.remove(statement.output.as_ref().unwrap());
}
}
}
// Delete assignments that are still in the assigments map.
for block in func.blocks.iter_mut() {
block.statements.retain(|ref statement| {
if let Assign(_) = statement.kind {
let target = statement.output.as_ref().unwrap();
!assignments.contains_key(target)
} else {
true
}
});
// Perform the substitution.
for (key, value) in assignments.iter() {
block.substitute_symbol(key, value);
}
}
Ok(())
}
| true |
bbd4feb063fbe62d7eb4179e7cdf434ed664c87e
|
Rust
|
mikeyhc/blog_os
|
/src/memory/mod.rs
|
UTF-8
| 4,295 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
pub use self::area_frame_allocator::AreaFrameAllocator;
use self::paging::{PhysicalAddress, Page};
pub use self::paging::test_paging;
pub use self::paging::remap_the_kernel;
use multiboot2::BootInformation;
use hole_list_allocator::{HEAP_START, HEAP_SIZE};
pub use self::stack_allocator::Stack;
mod area_frame_allocator;
mod paging;
mod stack_allocator;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize,
}
const PAGE_SIZE: usize = 4096;
impl Frame {
fn containing_address(address: usize) -> Frame {
Frame{ number: address / PAGE_SIZE }
}
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
fn clone(&self) -> Frame {
Frame { number: self.number }
}
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
}
}
}
struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}
pub fn init(boot_info: &BootInformation) -> MemoryController {
assert_has_not_been_called!("memory::init must be called only once");
let memory_map_tag = boot_info.memory_map_tag()
.expect("Memory map tag required");
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Elf sections tag required");
let kernel_start = elf_sections_tag.sections()
.filter(|s| s.is_allocated())
.map(|s| s.addr)
.min()
.unwrap();
let kernel_end = elf_sections_tag.sections()
.filter(|s| s.is_allocated())
.map(|s| s.addr + s.size)
.max()
.unwrap();
println!("kernel start: {:#x}, kernel end: {:#x}",
kernel_start, kernel_end);
println!("multiboot start: {:#x}, multiboot end: {:#x}",
boot_info.start_address(), boot_info.end_address());
let mut frame_allocator = AreaFrameAllocator::new(
kernel_start as usize,
kernel_end as usize,
boot_info.start_address(),
boot_info.end_address(),
memory_map_tag.memory_areas());
let mut active_table = paging::remap_the_kernel(&mut frame_allocator,
boot_info);
let heap_start_page = Page::containing_address(HEAP_START);
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE - 1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
}
let stack_allocator = {
let stack_alloc_start = heap_end_page + 1;
let stack_alloc_end = stack_alloc_start + 100;
let stack_alloc_range = Page::range_inclusive(stack_alloc_start,
stack_alloc_end);
stack_allocator::StackAllocator::new(stack_alloc_range)
};
MemoryController {
active_table: active_table,
frame_allocator: frame_allocator,
stack_allocator: stack_allocator,
}
}
pub struct MemoryController {
active_table: paging::ActivePageTable,
frame_allocator: AreaFrameAllocator,
stack_allocator: stack_allocator::StackAllocator,
}
impl MemoryController {
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option<Stack> {
let &mut MemoryController { ref mut active_table,
ref mut frame_allocator,
ref mut stack_allocator } = self;
stack_allocator.allocate_stack(active_table, frame_allocator,
size_in_pages)
}
}
| true |
2c50cd2f4356c27676a64689aec54d834c84e341
|
Rust
|
gavinzheng/rust-threshold-secret-sharing
|
/src/numtheory/fft.rs
|
UTF-8
| 8,702 | 2.6875 | 3 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
// Copyright (c) 2016 rust-threshold-secret-sharing developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! This module implements in-place 2-radix and 3-radix numeric theory
//! transformations (FFT on modular fields) by in-place Cooley-Tukey algorithms.
use fields::Field;
use fields::Encode;
/// 2-radix FFT.
///
/// * zp is the modular field
/// * data is the data to transform
/// * omega is the root-of-unity to use
///
/// `data.len()` must be a power of 2. omega must be a root of unity of order
/// `data.len()`
pub fn fft2<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field, F::E: Clone
{
fft2_in_place_rearrange(zp, &mut *data);
fft2_in_place_compute(zp, &mut *data, omega);
}
/// 2-radix inverse FFT.
///
/// * zp is the modular field
/// * data is the data to transform
/// * omega is the root-of-unity to use
///
/// `data.len()` must be a power of 2. omega must be a root of unity of order
/// `data.len()`
pub fn fft2_inverse<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field + Encode<u32>, F::E: Clone
{
let omega_inv = zp.inv(omega);
let len = data.len();
let len_inv = zp.inv(zp.encode(len as u32));
fft2(zp, data, &omega_inv);
for mut x in data {
*x = zp.mul(&*x, &len_inv);
}
}
fn fft2_in_place_rearrange<F>(_zp: &F, data: &mut [F::E])
where F: Field
{
let mut target = 0;
for pos in 0..data.len() {
if target > pos {
data.swap(target, pos)
}
let mut mask = data.len() >> 1;
while target & mask != 0 {
target &= !mask;
mask >>= 1;
}
target |= mask;
}
}
fn fft2_in_place_compute<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field, F::E: Clone
{
let mut depth = 0usize;
while 1usize << depth < data.len() {
let step = 1usize << depth;
let jump = 2 * step;
let factor_stride = zp.pow(omega, (data.len() / step / 2) as u32);
let mut factor = zp.one();
for group in 0usize..step {
let mut pair = group;
while pair < data.len() {
let x = data[pair].clone();
let y = zp.mul(&data[pair + step], &factor);
data[pair] = zp.add(&x, &y);
data[pair + step] = zp.sub(&x, &y);
pair += jump;
}
factor = zp.mul(factor, &factor_stride);
}
depth += 1;
}
}
/// 3-radix FFT.
///
/// * zp is the modular field
/// * data is the data to transform
/// * omega is the root-of-unity to use
///
/// `data.len()` must be a power of 2. omega must be a root of unity of order
/// `data.len()`
pub fn fft3<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field, F::E: Clone
{
fft3_in_place_rearrange(zp, &mut *data);
fft3_in_place_compute(zp, &mut *data, omega);
}
/// 3-radix inverse FFT.
///
/// * zp is the modular field
/// * data is the data to transform
/// * omega is the root-of-unity to use
///
/// `data.len()` must be a power of 2. omega must be a root of unity of order
/// `data.len()`
pub fn fft3_inverse<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field + Encode<u32>, F::E: Clone
{
let omega_inv = zp.inv(omega);
let len_inv = zp.inv(zp.encode(data.len() as u32));
fft3(zp, data, &omega_inv);
for mut x in data {
*x = zp.mul(&*x, &len_inv);
}
}
fn trigits_len(n: usize) -> usize {
let mut result = 1;
let mut value = 3;
while value < n + 1 {
result += 1;
value *= 3;
}
result
}
fn fft3_in_place_rearrange<F: Field>(_zp: &F, data: &mut [F::E]) {
let mut target = 0isize;
let trigits_len = trigits_len(data.len() - 1);
let mut trigits: Vec<u8> = ::std::iter::repeat(0).take(trigits_len).collect();
let powers: Vec<isize> = (0..trigits_len).map(|x| 3isize.pow(x as u32)).rev().collect();
for pos in 0..data.len() {
if target as usize > pos {
data.swap(target as usize, pos)
}
for pow in 0..trigits_len {
if trigits[pow] < 2 {
trigits[pow] += 1;
target += powers[pow];
break;
} else {
trigits[pow] = 0;
target -= 2 * powers[pow];
}
}
}
}
fn fft3_in_place_compute<F>(zp: &F, data: &mut [F::E], omega: &F::E)
where F: Field, F::E: Clone
{
let mut step = 1;
let big_omega = zp.pow(omega, (data.len() as u32 / 3));
let big_omega_sq = zp.mul(&big_omega, &big_omega);
while step < data.len() {
let jump = 3 * step;
let factor_stride = zp.pow(omega, (data.len() / step / 3) as u32);
let mut factor = zp.one();
for group in 0usize..step {
let factor_sq = zp.mul(&factor, &factor);
let mut pair = group;
while pair < data.len() {
let x = data[pair].clone();
let y = zp.mul(&data[pair + step], &factor);
let z = zp.mul(&data[pair + 2 * step], &factor_sq);
data[pair] = zp.add(zp.add(&x, &y), &z);
data[pair + step] = zp.add(zp.add(&x, zp.mul(&big_omega, &y)), zp.mul(&big_omega_sq, &z));
data[pair + 2 * step] = zp.add(zp.add(&x, zp.mul(&big_omega_sq, &y)), zp.mul(&big_omega, &z));
pair += jump;
}
factor = zp.mul(&factor, &factor_stride);
}
step = jump;
}
}
#[cfg(test)]
pub mod test {
use super::*;
use ::fields::{PrimeField, New, Encode, Decode, SliceEncode, SliceDecode};
pub fn test_fft2<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
// field is Z_433 in which 354 is an 8th root of unity
let field = F::new(433);
let omega = field.encode(354);
let mut data = field.encode_slice([1, 2, 3, 4, 5, 6, 7, 8]);
fft2(&field, &mut data, &omega);
assert_eq!(field.decode_slice(data), [36, 303, 146, 3, 429, 422, 279, 122]);
}
pub fn test_fft2_inverse<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
// field is Z_433 in which 354 is an 8th root of unity
let field = F::new(433);
let omega = field.encode(354);
let mut data = field.encode_slice([36, 303, 146, 3, 429, 422, 279, 122]);
fft2_inverse(&field, &mut *data, &omega);
assert_eq!(field.decode_slice(data), [1, 2, 3, 4, 5, 6, 7, 8])
}
pub fn test_fft2_big<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
let field = F::new(5038849);
let omega = field.encode(4318906);
let mut data: Vec<_> = (0..256).map(|a| field.encode(a)).collect();
fft2(&field, &mut *data, &omega);
fft2_inverse(&field, &mut data, &omega);
assert_eq!(field.decode_slice(data), (0..256).collect::<Vec<_>>());
}
pub fn test_fft3<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
// field is Z_433 in which 150 is an 9th root of unity
let field = F::new(433);
let omega = field.encode(150);
let mut data = field.encode_slice([1, 2, 3, 4, 5, 6, 7, 8, 9]);
fft3(&field, &mut data, &omega);
assert_eq!(field.decode_slice(data), [45, 404, 407, 266, 377, 47, 158, 17, 20]);
}
pub fn test_fft3_inverse<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
// field is Z_433 in which 150 is an 9th root of unity
let field = F::new(433);
let omega = field.encode(150);
let mut data = field.encode_slice([45, 404, 407, 266, 377, 47, 158, 17, 20]);
fft3_inverse(&field, &mut *data, &omega);
assert_eq!(field.decode_slice(data), [1, 2, 3, 4, 5, 6, 7, 8, 9])
}
pub fn test_fft3_big<F>()
where F: PrimeField + New<u32> + Encode<u32> + Decode<u32>, F::E: Clone, F::P: From<u32>
{
let field = F::new(5038849);
let omega = field.encode(1814687);
let mut data: Vec<_> = (0..19683).map(|a| field.encode(a)).collect();
fft3(&field, &mut data, &omega);
fft3_inverse(&field, &mut data, &omega);
assert_eq!(field.decode_slice(data), (0..19683).collect::<Vec<_>>());
}
}
| true |
62704addc26bd89e01a15d8bc188aed7eaff20fb
|
Rust
|
kumo86/zemeroth
|
/zgui/src/lib.rs
|
UTF-8
| 29,570 | 2.765625 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! Tiny and opinionated GUI.
use std::{
cell::RefCell,
error::Error as StdError,
fmt::{self, Debug},
rc::Rc,
sync::mpsc::{channel, Receiver, Sender},
};
use gwg::{
graphics::{self, Color, Drawable, Point2, Rect, Vector2},
Context, GameError, GameResult,
};
use log::{info, trace};
pub const SPRITE_COLOR: Color = graphics::BLACK;
pub const SPRITE_COLOR_INACTIVE: Color = Color::new(0.4, 0.4, 0.4, 0.5);
pub const SPRITE_COLOR_BG: Color = Color::new(0.8, 0.8, 0.8, 0.5);
pub const SPRITE_COLOR_BG_HIGHLIGHTED: Color = Color::new(0.9, 0.9, 0.9, 1.0);
pub const SPRITE_COLOR_BUTTON_BORDER: Color = Color::new(1.0, 0.0, 0.0, 0.9);
// TODO: What should we do if some widget changes its size?
// TODO: Add ScrollArea widget
pub type Result<T = ()> = std::result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
GwgError(GameError),
BadBorderCoefficient,
BadContentCoefficient,
NoDimensions,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::GwgError(ref e) => write!(f, "gwg Error: {}", e),
Error::BadBorderCoefficient => write!(f, "Border size is too large"),
Error::BadContentCoefficient => write!(f, "Content size is too large"),
Error::NoDimensions => write!(f, "The drawable has no dimensions"),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match *self {
Error::GwgError(ref e) => Some(e),
Error::BadBorderCoefficient | Error::BadContentCoefficient | Error::NoDimensions => {
None
}
}
}
}
impl From<GameError> for Error {
fn from(e: GameError) -> Self {
Error::GwgError(e)
}
}
fn quad_to_tris<T: Copy>(v: [T; 4]) -> [T; 6] {
[v[0], v[1], v[2], v[0], v[2], v[3]]
}
pub fn pack<W: Widget + 'static>(widget: W) -> RcWidget {
Rc::new(RefCell::new(widget))
}
struct Sprite {
drawable: Box<dyn Drawable>,
dimensions: Rect,
basic_scale: f32,
param: graphics::DrawParam,
}
impl Debug for Sprite {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SpriteData")
.field("drawable", &format_args!("{:p}", self.drawable))
.field("dimensions", &self.dimensions)
.field("basic_scale", &self.basic_scale)
.field("param", &self.param)
.finish()
}
}
impl Sprite {
fn new(context: &mut Context, drawable: Box<dyn Drawable>, height: f32) -> Result<Self> {
let dimensions = match drawable.dimensions(context) {
Some(dimensions) => dimensions,
None => return Err(Error::NoDimensions),
};
let basic_scale = height / dimensions.h;
let param = graphics::DrawParam {
scale: [basic_scale, basic_scale].into(),
color: SPRITE_COLOR,
..Default::default()
};
Ok(Self {
drawable,
dimensions,
param,
basic_scale,
})
}
fn draw(&self, context: &mut Context) -> GameResult<()> {
self.drawable.draw(context, self.param)
}
fn rect(&self) -> Rect {
let w = self.dimensions.w;
let h = self.dimensions.h;
// TODO: Transform Drawable's dimensions
Rect {
x: self.param.dest.x,
y: self.param.dest.y,
w: w * self.param.scale.x,
h: h * self.param.scale.y,
}
}
fn set_color(&mut self, color: Color) {
self.param.color = color;
}
fn set_pos(&mut self, pos: Point2) {
self.param.dest = pos.into();
}
}
fn make_bg(context: &mut Context, rect: Rect) -> Result<Sprite> {
make_rect(context, rect, SPRITE_COLOR_BG)
}
fn make_rect(context: &mut Context, rect: Rect, color: Color) -> Result<Sprite> {
let mode = graphics::DrawMode::fill();
let white = [1.0, 1.0, 1.0, 1.0].into();
let mesh = graphics::Mesh::new_rectangle(context, mode, rect, white)?;
let mut sprite = Sprite::new(context, Box::new(mesh), rect.h)?;
sprite.set_color(color);
Ok(sprite)
}
pub fn window_to_screen(context: &Context, pos: Point2) -> Point2 {
let (w, h) = graphics::drawable_size(context);
let w = w as f32;
let h = h as f32;
let aspect_ratio = w / h;
Point2::new(
(2.0 * pos.x / w - 1.0) * aspect_ratio,
2.0 * pos.y / h - 1.0,
)
}
#[derive(Clone, Copy, Debug)]
pub enum VAnchor {
Top,
Middle,
Bottom,
}
#[derive(Clone, Copy, Debug)]
pub enum HAnchor {
Left,
Middle,
Right,
}
// TODO: Use some kind of slots? There's no point in having two panes in the same corner.
#[derive(Clone, Copy, Debug)]
pub struct Anchor(pub HAnchor, pub VAnchor);
#[derive(Clone, Copy, Debug)]
pub enum StretchStatus {
Stretched,
AlreadyWider,
Unstretchable,
}
pub trait Widget: Debug {
fn draw(&self, _: &mut Context) -> GameResult<()>;
fn click(&self, _: Point2) {}
fn move_mouse(&mut self, _: Point2) {}
fn rect(&self) -> Rect;
fn set_pos(&mut self, pos: Point2);
fn can_stretch(&self) -> bool {
false
}
fn stretch(&mut self, _: &mut Context, _width: f32) -> Result<StretchStatus> {
// The default impl assumes the widget can't stretch.
assert!(!self.can_stretch());
Ok(StretchStatus::Unstretchable)
}
fn stretch_to_self(&mut self, context: &mut Context) -> Result<StretchStatus> {
let w = self.rect().w;
self.stretch(context, w)
}
}
fn stretch_checks(widget: &impl Widget, width: f32) -> Option<StretchStatus> {
if !widget.can_stretch() {
return Some(StretchStatus::Unstretchable);
}
if widget.rect().w > width {
return Some(StretchStatus::AlreadyWider);
}
None
}
pub type RcWidget = Rc<RefCell<dyn Widget>>;
#[derive(Debug)]
pub struct AnchoredWidget {
widget: RcWidget,
anchor: Anchor,
}
#[derive(Debug)]
pub struct Gui<Message: Clone> {
aspect_ratio: f32,
anchored_widgets: Vec<AnchoredWidget>,
receiver: Receiver<Message>,
sender: Sender<Message>,
}
impl<Message: Clone> Gui<Message> {
pub fn new(context: &Context) -> Self {
let (w, h) = graphics::drawable_size(context);
let aspect_ratio = w as f32 / h as f32;
trace!("Gui: aspect_ratio: {}", aspect_ratio);
let (sender, receiver) = channel();
Self {
anchored_widgets: Vec::new(),
receiver,
sender,
aspect_ratio,
}
}
/// Returns a clone of sender
pub fn sender(&self) -> Sender<Message> {
self.sender.clone()
}
pub fn add(&mut self, widget: &RcWidget, anchor: Anchor) {
let widget = widget.clone();
let anchored_widget = AnchoredWidget { widget, anchor };
self.anchored_widgets.push(anchored_widget);
let ratio = self.aspect_ratio;
self.resize(ratio);
}
pub fn remove(&mut self, widget: &RcWidget) -> GameResult<()> {
let len_before = self.anchored_widgets.len();
self.anchored_widgets
.retain(|w| !Rc::ptr_eq(&w.widget, widget));
let len_after = self.anchored_widgets.len();
info!("len_before={}, len_after={}", len_before, len_after);
if len_after != len_before - 1 {
panic!("Can't remove the widget");
}
Ok(())
}
pub fn draw(&self, context: &mut Context) -> GameResult<()> {
let old_coordinates = graphics::screen_coordinates(context);
let ui_coordinates = Rect::new(-self.aspect_ratio, -1.0, self.aspect_ratio * 2.0, 2.0);
graphics::set_screen_coordinates(context, ui_coordinates)?;
for AnchoredWidget { widget, .. } in &self.anchored_widgets {
widget.borrow().draw(context)?;
}
graphics::set_screen_coordinates(context, old_coordinates)?;
Ok(())
}
pub fn click(&mut self, pos: Point2) -> Option<Message> {
for AnchoredWidget { widget, .. } in &self.anchored_widgets {
widget.borrow_mut().click(pos);
}
self.receiver.try_recv().ok()
}
pub fn move_mouse(&mut self, pos: Point2) {
for AnchoredWidget { widget, .. } in &self.anchored_widgets {
widget.borrow_mut().move_mouse(pos);
}
}
pub fn resize(&mut self, ratio: f32) {
self.aspect_ratio = ratio;
trace!("Gui::resize: {}", ratio);
let offset = 0.02; // TODO: make configurable
for AnchoredWidget { widget, anchor } in &mut self.anchored_widgets {
let mut widget = widget.borrow_mut();
let rect = widget.rect();
let mut pos = rect.point();
match anchor.0 {
HAnchor::Left => pos.x = (-ratio) + offset,
HAnchor::Middle => pos.x = -rect.w / 2.0,
HAnchor::Right => pos.x = (ratio - rect.w) - offset,
}
match anchor.1 {
VAnchor::Top => pos.y = (-1.0) + offset,
VAnchor::Middle => pos.y = -rect.h / 2.0,
VAnchor::Bottom => pos.y = (1.0 - rect.h) - offset,
}
widget.set_pos(pos.into());
}
}
}
#[derive(Debug, Clone)]
pub struct LabelParam {
/// Percentage of the drawable's size.
pub drawable_k: f32,
pub bg: bool,
pub is_stretchable: bool,
}
impl Default for LabelParam {
fn default() -> Self {
LabelParam {
drawable_k: 0.8,
bg: false,
is_stretchable: false,
}
}
}
impl LabelParam {
pub fn check(&self) -> Result {
if self.drawable_k < 0.0 || self.drawable_k > 1.0 {
return Err(Error::BadContentCoefficient);
}
Ok(())
}
}
#[derive(Debug)]
pub struct Label {
sprite: Sprite,
bg: Option<Sprite>,
param: LabelParam,
rect: Rect,
height: f32,
}
impl Label {
pub fn new_with_bg(
context: &mut Context,
drawable: Box<dyn Drawable>,
height: f32,
) -> Result<Self> {
let param = LabelParam {
bg: true,
..LabelParam::default()
};
Self::from_params(context, drawable, height, param)
}
pub fn new(context: &mut Context, drawable: Box<dyn Drawable>, height: f32) -> Result<Self> {
let param = LabelParam::default();
Self::from_params(context, drawable, height, param)
}
pub fn from_params(
context: &mut Context,
drawable: Box<dyn Drawable>,
height: f32,
param: LabelParam,
) -> Result<Self> {
param.check()?;
let sprite = Sprite::new(context, drawable, height * param.drawable_k)?;
let rect = Rect {
w: sprite.rect().w,
h: sprite.rect().h / param.drawable_k,
..Default::default()
};
let bg = if param.bg {
Some(make_bg(context, rect)?)
} else {
None
};
Ok(Self {
sprite,
bg,
param,
height,
rect,
})
}
pub fn stretchable(mut self, value: bool) -> Self {
self.set_stretchable(value);
self
}
pub fn set_stretchable(&mut self, value: bool) {
self.param.is_stretchable = value;
}
pub fn with_color(mut self, color: Color) -> Self {
self.set_color(color);
self
}
pub fn set_color(&mut self, color: Color) {
self.sprite.param.color = color;
}
}
impl Widget for Label {
fn draw(&self, context: &mut Context) -> GameResult<()> {
if let Some(ref bg) = self.bg {
bg.draw(context)?;
}
self.sprite.draw(context)
}
fn rect(&self) -> Rect {
self.rect
}
fn set_pos(&mut self, pos: Point2) {
let h = (1.0 - self.param.drawable_k) * self.height;
let w = self.rect.w - self.sprite.rect().w;
self.sprite.set_pos(pos + Vector2::new(w, h) * 0.5);
if let Some(ref mut bg) = &mut self.bg {
bg.set_pos(pos);
}
self.rect.move_to(pos);
}
fn can_stretch(&self) -> bool {
self.param.is_stretchable
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
let pos: Point2 = self.rect().point().into();
let rect = Rect {
w: width,
h: self.rect.h,
..Default::default()
};
self.rect = rect;
if self.param.bg {
self.bg = Some(make_bg(context, rect)?);
}
self.set_pos(pos);
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug)]
pub struct ColoredRect {
sprite: Sprite,
color: Color,
is_stretchable: bool,
}
impl ColoredRect {
pub fn new(context: &mut Context, color: Color, rect: Rect) -> Result<Self> {
Ok(Self {
sprite: make_rect(context, rect, color)?,
color,
is_stretchable: false,
})
}
pub fn stretchable(mut self, value: bool) -> Self {
self.set_stretchable(value);
self
}
pub fn set_stretchable(&mut self, value: bool) {
self.is_stretchable = value;
}
}
impl Widget for ColoredRect {
fn draw(&self, context: &mut Context) -> GameResult<()> {
self.sprite.draw(context)
}
fn rect(&self) -> Rect {
self.sprite.rect()
}
fn set_pos(&mut self, pos: Point2) {
self.sprite.set_pos(pos);
}
fn can_stretch(&self) -> bool {
self.is_stretchable
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
let pos: Point2 = self.rect().point().into();
let rect = Rect {
w: width,
h: self.rect().h,
..Default::default()
};
self.sprite = make_rect(context, rect, self.color)?;
self.set_pos(pos);
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug)]
pub struct Spacer {
rect: Rect,
is_stretchable: bool,
}
impl Spacer {
pub fn new(rect: Rect) -> Self {
Self {
rect,
is_stretchable: false,
}
}
pub fn new_vertical(h: f32) -> Self {
let rect = Rect {
h,
..Default::default()
};
Self {
rect,
is_stretchable: false,
}
}
pub fn new_horizontal(w: f32) -> Self {
let rect = Rect {
w,
..Default::default()
};
Self {
rect,
is_stretchable: false,
}
}
pub fn stretchable(mut self, value: bool) -> Self {
self.set_stretchable(value);
self
}
pub fn set_stretchable(&mut self, value: bool) {
self.is_stretchable = value;
}
}
impl Widget for Spacer {
fn draw(&self, _: &mut Context) -> GameResult<()> {
Ok(())
}
fn rect(&self) -> Rect {
self.rect
}
fn set_pos(&mut self, pos: Point2) {
self.rect.move_to(pos)
}
fn can_stretch(&self) -> bool {
self.is_stretchable
}
fn stretch(&mut self, _: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
self.rect.w = width;
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug, Clone)]
pub struct ButtonParam {
/// Percentage of one border's size.
pub border_k: f32,
/// Percentage of the drawable's size.
pub drawable_k: f32,
pub is_stretchable: bool,
}
impl Default for ButtonParam {
fn default() -> Self {
let label_param = LabelParam::default();
Self {
border_k: 0.06,
drawable_k: label_param.drawable_k,
is_stretchable: false,
}
}
}
impl ButtonParam {
pub fn check(&self) -> Result {
if self.drawable_k < 0.0 || self.drawable_k > 1.0 {
return Err(Error::BadContentCoefficient);
}
if self.border_k * 2.0 > 1.0 - self.drawable_k {
return Err(Error::BadBorderCoefficient);
}
Ok(())
}
}
#[derive(Debug)]
pub struct Button<Message: Clone> {
is_active: bool,
sprite: Sprite,
bg: Sprite,
border: Sprite,
param: ButtonParam,
sender: Sender<Message>,
message: Message,
color: Color,
}
fn rect_to_vertices(r: Rect) -> [[f32; 2]; 4] {
let x = r.x;
let y = r.y;
[[x, y], [x, y + r.h], [x + r.w, y + r.h], [x + r.w, y]]
}
impl<Message: Clone + Debug> Button<Message> {
pub fn new(
context: &mut Context,
drawable: Box<dyn Drawable>,
height: f32,
sender: Sender<Message>,
message: Message,
) -> Result<Self> {
let param = ButtonParam::default();
Self::from_params(context, drawable, height, sender, message, param)
}
pub fn from_params(
context: &mut Context,
drawable: Box<dyn Drawable>,
height: f32,
sender: Sender<Message>,
message: Message,
param: ButtonParam,
) -> Result<Self> {
param.check()?;
let sprite = Sprite::new(context, drawable, height * param.drawable_k)?;
let outer = Self::outer_rect(&sprite, height, ¶m);
let inner = Self::inner_rect(¶m, outer);
let border = Self::make_border(context, height, outer, inner)?;
let bg = Self::make_bg_mesh(context, height, outer)?;
Ok(Self {
is_active: true,
sprite,
bg,
border,
param,
sender,
message,
color: SPRITE_COLOR,
})
}
pub fn set_color(&mut self, color: Color) {
self.color = color;
self.sprite.param.color = self.color;
}
pub fn set_active(&mut self, value: bool) {
self.is_active = value;
let color = if value {
SPRITE_COLOR
} else {
SPRITE_COLOR_INACTIVE
};
self.set_color(color);
}
pub fn is_active(&self) -> bool {
self.is_active
}
pub fn stretchable(mut self, value: bool) -> Self {
self.set_stretchable(value);
self
}
pub fn set_stretchable(&mut self, value: bool) {
self.param.is_stretchable = value;
}
fn outer_rect(sprite: &Sprite, height: f32, param: &ButtonParam) -> Rect {
let free_area_k = 1.0 - param.drawable_k - param.border_k * 2.0;
let free_area = height * free_area_k;
let border = height * param.border_k;
Rect {
w: border * 2.0 + free_area + sprite.rect().w,
h: height,
..Default::default()
}
}
fn inner_rect(param: &ButtonParam, rect: Rect) -> Rect {
let border = rect.h * param.border_k;
Rect::new(border, border, rect.w - border * 2.0, rect.h - border * 2.0)
}
fn make_border(context: &mut Context, height: f32, outer: Rect, inner: Rect) -> Result<Sprite> {
let mut vertices: Vec<[f32; 2]> = Vec::new();
let outer = rect_to_vertices(outer);
let inner = rect_to_vertices(inner);
vertices.extend(quad_to_tris([outer[0], outer[1], inner[1], inner[0]]).iter());
vertices.extend(quad_to_tris([outer[1], outer[2], inner[2], inner[1]]).iter());
vertices.extend(quad_to_tris([outer[2], outer[3], inner[3], inner[2]]).iter());
vertices.extend(quad_to_tris([outer[3], outer[0], inner[0], inner[3]]).iter());
let color = SPRITE_COLOR_BUTTON_BORDER;
let border_mesh = graphics::Mesh::from_triangles(context, &vertices, color)?;
Sprite::new(context, Box::new(border_mesh), height)
}
fn make_bg_mesh(context: &mut Context, height: f32, outer: Rect) -> Result<Sprite> {
let outer = rect_to_vertices(outer);
let triangles = quad_to_tris(outer);
let bg_mesh = graphics::Mesh::from_triangles(context, &triangles, graphics::WHITE)?;
let mut bg = Sprite::new(context, Box::new(bg_mesh), height)?;
bg.set_color(SPRITE_COLOR_BG);
Ok(bg)
}
}
impl<Message: Clone + Debug> Widget for Button<Message> {
fn draw(&self, context: &mut Context) -> GameResult {
self.bg.draw(context)?;
self.sprite.draw(context)?;
self.border.draw(context)?;
Ok(())
}
fn click(&self, pos: Point2) {
trace!("Label: rect={:?}, pos={:?}", self.sprite.rect(), pos);
if self.border.rect().contains(pos) {
let message = self.message.clone();
self.sender.send(message).unwrap();
return;
}
}
fn move_mouse(&mut self, pos: Point2) {
let highlighted = self.border.rect().contains(pos);
if highlighted {
self.bg.param.color = SPRITE_COLOR_BG_HIGHLIGHTED;
} else {
self.sprite.param.color = self.color;
self.bg.param.color = SPRITE_COLOR_BG;
};
}
fn rect(&self) -> Rect {
self.border.rect()
}
fn set_pos(&mut self, pos: Point2) {
let h = self.border.rect().h - self.sprite.rect().h;
let w = self.border.rect().w - self.sprite.rect().w;
self.sprite.set_pos(pos + Vector2::new(w, h) * 0.5);
self.border.set_pos(pos);
self.bg.set_pos(pos);
}
fn can_stretch(&self) -> bool {
self.param.is_stretchable
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
let pos: Point2 = self.rect().point().into();
let height = self.bg.dimensions.h;
let outer = Rect {
w: width,
h: self.rect().h,
..Default::default()
};
let inner = Self::inner_rect(&self.param, outer);
self.border = Self::make_border(context, height, outer, inner)?;
self.bg = Self::make_bg_mesh(context, height, outer)?;
self.set_pos(pos);
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug, Default)]
struct Layout {
widgets: Vec<Box<dyn Widget>>,
rect: Rect,
is_stretchable: bool,
}
impl Layout {
fn new() -> Self {
Self {
widgets: Vec::new(),
rect: Rect::default(),
is_stretchable: false,
}
}
pub fn set_stretchable(&mut self, value: bool) {
self.is_stretchable = value;
}
}
impl Widget for Layout {
fn draw(&self, context: &mut Context) -> GameResult {
for widget in &self.widgets {
widget.draw(context)?;
}
Ok(())
}
fn click(&self, pos: Point2) {
for widget in &self.widgets {
widget.click(pos);
}
}
fn move_mouse(&mut self, pos: Point2) {
for widget in &mut self.widgets {
widget.move_mouse(pos);
}
}
fn rect(&self) -> Rect {
self.rect
}
fn set_pos(&mut self, pos: Point2) {
let point: Point2 = self.rect.point().into();
let diff = pos - point;
for widget in &mut self.widgets {
let pos: Point2 = widget.rect().point().into();
widget.set_pos(pos + diff);
}
self.rect.move_to(pos);
}
fn can_stretch(&self) -> bool {
self.is_stretchable
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
for widget in &mut self.widgets {
widget.stretch(context, width)?;
self.rect.w = self.rect.w.max(widget.rect().w);
}
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug, Default)]
pub struct VLayout {
internal: Layout,
}
impl VLayout {
pub fn new() -> Self {
Self {
internal: Layout::new(),
}
}
pub fn stretchable(mut self, value: bool) -> Self {
self.internal.set_stretchable(value);
self
}
pub fn from_widget(widget: Box<dyn Widget>) -> Self {
let mut this = Self::new();
this.add(widget);
this
}
pub fn add(&mut self, mut widget: Box<dyn Widget>) {
let rect = widget.rect();
if let Some(last) = self.internal.widgets.last() {
let rect = last.rect();
let mut pos = rect.point();
pos.y += rect.h;
widget.set_pos(pos.into());
} else {
widget.set_pos(self.internal.rect.point().into());
}
self.internal.widgets.push(widget);
self.internal.rect.h += rect.h;
if self.internal.rect.w < rect.w {
self.internal.rect.w = rect.w;
}
}
}
impl Widget for VLayout {
fn draw(&self, context: &mut Context) -> GameResult {
self.internal.draw(context)
}
fn click(&self, pos: Point2) {
self.internal.click(pos);
}
fn move_mouse(&mut self, pos: Point2) {
self.internal.move_mouse(pos);
}
fn rect(&self) -> Rect {
self.internal.rect()
}
fn set_pos(&mut self, pos: Point2) {
self.internal.set_pos(pos);
}
fn can_stretch(&self) -> bool {
self.internal.can_stretch()
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
self.internal.stretch(context, width)
}
}
#[derive(Debug, Default)]
pub struct HLayout {
internal: Layout,
}
impl HLayout {
pub fn new() -> Self {
Self {
internal: Layout::new(),
}
}
pub fn stretchable(mut self, value: bool) -> Self {
self.internal.set_stretchable(value);
self
}
pub fn add(&mut self, mut widget: Box<dyn Widget>) {
let rect = widget.rect();
if let Some(last) = self.internal.widgets.last() {
let rect = last.rect();
let mut pos: Point2 = rect.point().into();
pos.x += rect.w;
widget.set_pos(pos);
} else {
widget.set_pos(self.internal.rect.point().into());
}
self.internal.rect.w += rect.w;
if self.internal.rect.h < rect.h {
self.internal.rect.h = rect.h;
}
self.internal.widgets.push(widget);
}
}
impl Widget for HLayout {
fn draw(&self, context: &mut Context) -> GameResult {
self.internal.draw(context)
}
fn click(&self, pos: Point2) {
self.internal.click(pos);
}
fn move_mouse(&mut self, pos: Point2) {
self.internal.move_mouse(pos);
}
fn rect(&self) -> Rect {
self.internal.rect()
}
fn set_pos(&mut self, pos: Point2) {
self.internal.set_pos(pos);
}
fn can_stretch(&self) -> bool {
self.internal.can_stretch()
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
if let Some(status) = stretch_checks(self, width) {
return Ok(status);
}
let widgets = &mut self.internal.widgets;
let stretchable_count = widgets.iter().filter(|w| w.can_stretch()).count();
let taken_w: f32 = widgets.iter().fold(0.0, |acc, w| acc + w.rect().w);
let additional_w_per_stretchable = (width - taken_w) / stretchable_count as f32;
let mut diff_w = 0.0;
for widget in widgets {
let r = widget.rect();
let mut pos: Point2 = r.point().into();
pos.x += diff_w;
widget.set_pos(pos);
if widget.can_stretch() {
let new_w = r.w + additional_w_per_stretchable;
widget.stretch(context, new_w)?;
diff_w += additional_w_per_stretchable;
}
}
self.internal.rect.w = width;
Ok(StretchStatus::Stretched)
}
}
#[derive(Debug, Default)]
pub struct LayersLayout {
internal: Layout,
}
impl LayersLayout {
pub fn new() -> Self {
Self {
internal: Layout::new(),
}
}
pub fn stretchable(mut self, value: bool) -> Self {
self.internal.set_stretchable(value);
self
}
pub fn add(&mut self, mut widget: Box<dyn Widget>) {
let rect = widget.rect();
widget.set_pos(self.internal.rect.point().into());
self.internal.widgets.push(widget);
if self.internal.rect.h < rect.h {
self.internal.rect.h = rect.h;
}
if self.internal.rect.w < rect.w {
self.internal.rect.w = rect.w;
}
}
}
impl Widget for LayersLayout {
fn draw(&self, context: &mut Context) -> GameResult {
self.internal.draw(context)
}
fn click(&self, pos: Point2) {
self.internal.click(pos);
}
fn move_mouse(&mut self, pos: Point2) {
self.internal.move_mouse(pos);
}
fn rect(&self) -> Rect {
self.internal.rect()
}
fn set_pos(&mut self, pos: Point2) {
self.internal.set_pos(pos);
}
fn can_stretch(&self) -> bool {
self.internal.can_stretch()
}
fn stretch(&mut self, context: &mut Context, width: f32) -> Result<StretchStatus> {
self.internal.stretch(context, width)
}
}
| true |
0fea779aa8d40246612ba757f9d4a363598b0012
|
Rust
|
tomvidm/rusty-cas
|
/src/numeric.rs
|
UTF-8
| 8,701 | 3.625 | 4 |
[] |
no_license
|
#![allow(dead_code)]
use std::fmt;
use std::ops::{Add, Sub, Mul, Div, Neg};
pub type ComplexType = f64;
pub type RealType = f64;
pub type IntegerType = i64;
// Numeric type
#[derive(Clone, Copy, PartialEq, PartialOrd)]
pub enum Numeric {
Real(RealType),
Complex(ComplexType),
Integer(IntegerType)
}
impl fmt::Debug for Numeric {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Numeric::Real(real) => write!(f, "{}", *real),
Numeric::Complex(complex) => write!(f, "{}", *complex),
Numeric::Integer(integer) => write!(f, "{}", *integer)
}
}
}
impl fmt::Display for Numeric {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Numeric::Real(real) => write!(f, "{}", *real),
Numeric::Complex(complex) => write!(f, "{}", *complex),
Numeric::Integer(integer) => write!(f, "{}", *integer)
}
}
}
impl Numeric {
pub fn one() -> Numeric {
Numeric::Integer(1)
}
pub fn zero() -> Numeric {
Numeric::Integer(0)
}
pub fn from_real(real: RealType) -> Numeric {
Numeric::Real(real)
}
pub fn from_complex(complex: ComplexType) -> Numeric {
Numeric::Complex(complex)
}
pub fn from_integer(integer: IntegerType) -> Numeric {
Numeric::Integer(integer)
}
pub fn to_real(self) -> RealType {
match self {
Numeric::Real(real) => return real,
Numeric::Complex(complex) => return complex as RealType,
Numeric::Integer(integer) => return integer as RealType
}
}
pub fn is_zero(&self) -> bool {
match self {
Numeric::Real(real) => return *real == 0.,
Numeric::Complex(complex) => return *complex == 0.,
Numeric::Integer(integer) => return *integer == 0
}
}
pub fn is_unity(&self) -> bool {
match self {
Numeric::Real(real) => return *real == 1.,
Numeric::Complex(complex) => return *complex == 1.,
Numeric::Integer(integer) => return *integer == 1
}
}
pub fn pow(&self, pow: IntegerType) -> Numeric {
match self {
Numeric::Real(real) => return Numeric::from_real(real.powi(pow as i32)),
Numeric::Complex(complex) => return Numeric::from_complex(complex.powi(pow as i32)),
Numeric::Integer(integer) => return Numeric::from_integer(integer.pow(pow as u32))
}
}
pub fn exp(&self) -> Numeric {
match self {
Numeric::Real(real) => return Numeric::from_real(real.exp()),
Numeric::Complex(complex) => return Numeric::from_complex(complex.exp()),
Numeric::Integer(integer) => return Numeric::from_real((*integer as RealType).exp())
}
}
}
impl Neg for Numeric {
type Output = Numeric;
fn neg(self) -> Numeric {
match self {
Numeric::Real(real) => return Numeric::from_real(-real),
Numeric::Complex(complex) => return Numeric::from_complex(-complex),
Numeric::Integer(integer) => return Numeric::from_integer(-integer)
}
}
}
impl Add for Numeric {
type Output = Numeric;
fn add(self, other: Numeric) -> Numeric {
match self {
Numeric::Real(lhs_real) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real,
Numeric::Complex(rhs_complex) => rhs_complex as RealType,
Numeric::Integer(rhs_integer) => rhs_integer as RealType
};
return Numeric::from_real(lhs_real + rhs)
},
Numeric::Complex(lhs_complex) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as ComplexType,
Numeric::Complex(rhs_complex) => rhs_complex,
Numeric::Integer(rhs_integer) => rhs_integer as ComplexType
};
return Numeric::from_complex(lhs_complex + rhs)
},
Numeric::Integer(lhs_integer) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as IntegerType,
Numeric::Complex(rhs_complex) => rhs_complex as IntegerType,
Numeric::Integer(rhs_integer) => rhs_integer
};
return Numeric::from_integer(lhs_integer + rhs)
}
}
}
}
impl Sub for Numeric {
type Output = Numeric;
fn sub(self, other: Numeric) -> Numeric {
match self {
Numeric::Real(lhs_real) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real,
Numeric::Complex(rhs_complex) => rhs_complex as RealType,
Numeric::Integer(rhs_integer) => rhs_integer as RealType
};
return Numeric::from_real(lhs_real - rhs)
},
Numeric::Complex(lhs_complex) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as ComplexType,
Numeric::Complex(rhs_complex) => rhs_complex,
Numeric::Integer(rhs_integer) => rhs_integer as ComplexType
};
return Numeric::from_complex(lhs_complex - rhs)
},
Numeric::Integer(lhs_integer) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as IntegerType,
Numeric::Complex(rhs_complex) => rhs_complex as IntegerType,
Numeric::Integer(rhs_integer) => rhs_integer
};
return Numeric::from_integer(lhs_integer - rhs)
}
}
}
}
impl Mul for Numeric {
type Output = Numeric;
fn mul(self, other: Numeric) -> Numeric {
match self {
Numeric::Real(lhs_real) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real,
Numeric::Complex(rhs_complex) => rhs_complex as RealType,
Numeric::Integer(rhs_integer) => rhs_integer as RealType
};
return Numeric::from_real(lhs_real * rhs)
},
Numeric::Complex(lhs_complex) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as ComplexType,
Numeric::Complex(rhs_complex) => rhs_complex,
Numeric::Integer(rhs_integer) => rhs_integer as ComplexType
};
return Numeric::from_complex(lhs_complex * rhs)
},
Numeric::Integer(lhs_integer) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as IntegerType,
Numeric::Complex(rhs_complex) => rhs_complex as IntegerType,
Numeric::Integer(rhs_integer) => rhs_integer
};
return Numeric::from_integer(lhs_integer * rhs)
}
}
}
}
impl Div for Numeric {
type Output = Numeric;
fn div(self, other: Numeric) -> Numeric {
match self {
Numeric::Real(lhs_real) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real,
Numeric::Complex(rhs_complex) => rhs_complex as RealType,
Numeric::Integer(rhs_integer) => rhs_integer as RealType
};
return Numeric::from_real(lhs_real / rhs)
},
Numeric::Complex(lhs_complex) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as ComplexType,
Numeric::Complex(rhs_complex) => rhs_complex,
Numeric::Integer(rhs_integer) => rhs_integer as ComplexType
};
return Numeric::from_complex(lhs_complex / rhs)
},
Numeric::Integer(lhs_integer) => {
let rhs = match other {
Numeric::Real(rhs_real) => rhs_real as IntegerType,
Numeric::Complex(rhs_complex) => rhs_complex as IntegerType,
Numeric::Integer(rhs_integer) => rhs_integer
};
return Numeric::from_integer(lhs_integer / rhs)
}
}
}
}
#[cfg(test)]
#[test]
fn test_basic_operations() {
let a = Numeric::from_real(1.);
let b = Numeric::from_integer(1);
assert_eq!(a + b, Numeric::from_real(2.));
}
| true |
3e43b66e8eac0d8be733d1663292aab37be3211d
|
Rust
|
LitxDev/freshfetch
|
/src/info/resolution.rs
|
UTF-8
| 11,488 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
use crate::mlua;
use crate::regex;
use crate::errors;
use crate::utils;
use super::kernel;
use std::env::{ var, vars };
use std::fs::{ read_to_string };
use std::path::{ Path };
use std::process::{ Command };
use regex::{ Regex };
use mlua::prelude::*;
use crate::{ Inject };
use utils::{ which::{ which } };
use kernel::{ Kernel };
#[derive(Clone, Debug)]
pub(crate) struct Resolution {
pub width: u16,
pub height: u16,
pub refresh: Option<f32>,
}
impl Resolution {
pub fn new(k: &Kernel) -> Option<Self> {
match k.name.as_str() {
"Linux" => {
if which("xrandr").is_some()
&& var("DISPLAY").is_ok()
&& var("WAYLAND_DISPLAY").is_err() {
let mut to_return = Resolution {
width: 0,
height: 0,
refresh: None,
};
// Get output of `xrandr --nograb --current`.
let xrandr_string = {
let try_xrandr = Command::new("sh")
.arg("-c")
.arg("xrandr --nograb --current")
.envs(&mut vars())
.output();
match try_xrandr {
Ok(xrandr) => match String::from_utf8(xrandr.stdout) {
Ok(xrandr) => xrandr,
Err(_) => return None,
},
Err(e) => {
errors::handle(&format!("{}{cmd}{}{err}\nNOTE: xrandr was found in the path, so this should have succeeded.\n",
errors::CMD.0,
errors::CMD.1,
cmd = "xrandr --nograb --current",
err = e));
return None;
}
}
};
// Split the output into lines.
let xrandr_lines = xrandr_string
.split("\n")
.collect::<Vec<&str>>();
// Get data from lines.
{
let regex = Regex::new(r#"\s+(?:(\d+)x(\d+))\s+((?:\d+)\.(?:\d+)\*)"#).unwrap();
for line in xrandr_lines.iter() {
match regex.captures(&line) {
Some(caps) => {
match caps.get(1) {
Some(cap) => match cap.as_str().parse::<u16>() {
Ok(width) => to_return.width = width,
// `unreachable!()` used here b/c
// only digit characters should
// be here.
Err(_) => unreachable!(),
}
// `unreachable!()` used here because
// its a required match.
None => unreachable!(),
}
match caps.get(2) {
Some(cap) => match cap.as_str().parse::<u16>() {
Ok(height) => to_return.height = height,
// Same reason as above.
Err(_) => unreachable!(),
}
// Same reason as above.
None => unreachable!(),
}
match caps.get(3) {
Some(cap) => {
let mut v = String::from(cap.as_str());
v = v.replace("*", "");
match v.parse::<f32>() {
Ok(refresh) => to_return.refresh = Some(refresh),
// Same reason as above.
Err(_) => unreachable!(),
}
}
// Same reason as above.
None => unreachable!(),
}
return Some(to_return);
}
None => (),
}
}
}
} else if which("xwininfo").is_some()
&& var("DISPLAY").is_ok()
&& var("WAYLAND_DISPLAY").is_err() {
let mut to_return = Resolution {
width: 0,
height: 0,
refresh: None
};
// Get output of `xwininfo -root`.
let xwininfo_string = {
let try_xwininfo = Command::new("sh")
.arg("-c")
.arg("xwininfo -root")
.envs(&mut vars())
.output();
match try_xwininfo {
Ok(xwininfo) => match String::from_utf8(xwininfo.stdout) {
Ok(xwininfo) => xwininfo,
Err(_) => return None,
},
Err(e) => {
errors::handle(&format!("{}{cmd}{}{err}\nNOTE: xwininfo was found in the path, so this should have succeeded.\n",
errors::CMD.0,
errors::CMD.1,
cmd = "xwininfo -root",
err = e));
return None;
}
}
};
// Split into lines.
let xwininfo_lines = xwininfo_string
.split("\n")
.collect::<Vec<&str>>();
let width_regex = Regex::new(r#"\s+Width: (\d+)"#).unwrap();
let mut width_regex_captured = false;
let height_regex = Regex::new(r#"\s+Height: (\d+)"#).unwrap();
let mut height_regex_captured = false;
for line in xwininfo_lines.iter() {
match width_regex.captures(&line) {
Some(caps) => match caps.get(1) {
Some(cap) => match cap.as_str().parse::<u16>() {
Ok(width) => {
to_return.width = width;
width_regex_captured = true;
}
Err(_) => unreachable!(),
}
None => unreachable!(),
}
None => (),
}
match height_regex.captures(&line) {
Some(caps) => match caps.get(1) {
Some(cap) => match cap.as_str().parse::<u16>() {
Ok(height) => {
to_return.height = height;
height_regex_captured = true;
}
Err(_) => unreachable!(),
}
None => unreachable!(),
}
None => (),
}
}
if width_regex_captured
&& height_regex_captured {
return Some(to_return);
}
} else if Path::new("/sys/class/drm/").is_dir() {
if let Ok(entries) = Path::new("/sys/class/drm/").read_dir() {
for entry in entries {
if let Ok(entry) = entry {
if entry.path().join("modes").is_file() {
let modes_string = match read_to_string(entry.path().join("modes")) {
Ok(modes) => modes,
Err(_) => return None,
};
let modes_lines = modes_string
.split("\n")
.collect::<Vec<&str>>();
for line in modes_lines.iter() {
let line_split = line
.split("x")
.collect::<Vec<&str>>();
let width = line_split.get(0);
let height = line_split.get(1);
if width.is_some()
&& height.is_some() {
return Some(Resolution {
width: width.unwrap().parse::<u16>().unwrap(),
height: height.unwrap().parse::<u16>().unwrap(),
refresh: None,
});
}
}
}
}
}
}
}
None
}
_ => None,
}
}
}
impl Inject for Resolution {
fn inject(&self, lua: &mut Lua) {
let globals = lua.globals();
match lua.create_table() {
Ok(t) => {
match t.set("width", self.width) {
Ok(_) => (),
Err(e) => { errors::handle(&format!("{}{}", errors::LUA, e)); panic!(); }
}
match t.set("height", self.height) {
Ok(_) => (),
Err(e) => { errors::handle(&format!("{}{}", errors::LUA, e)); panic!(); }
}
match self.refresh {
Some(refresh) => match t.set("refresh", refresh) {
Ok(_) => (),
Err(e) => { errors::handle(&format!("{}{}", errors::LUA, e)); panic!(); }
}
None => (),
}
match globals.set("resolution", t) {
Ok(_) => (),
Err(e) => { errors::handle(&format!("{}{}", errors::LUA, e)); panic!(); }
}
}
Err(e) => { errors::handle(&format!("{}{}", errors::LUA, e)); panic!(); }
}
}
}
| true |
a751a28335768772de986655ff9009a8c12bb808
|
Rust
|
sfackler/rust-postgres
|
/postgres-types/src/special.rs
|
UTF-8
| 3,024 | 3.203125 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use bytes::BytesMut;
use postgres_protocol::types;
use std::error::Error;
use std::{i32, i64};
use crate::{FromSql, IsNull, ToSql, Type};
/// A wrapper that can be used to represent infinity with `Type::Date` types.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Date<T> {
/// Represents `infinity`, a date that is later than all other dates.
PosInfinity,
/// Represents `-infinity`, a date that is earlier than all other dates.
NegInfinity,
/// The wrapped date.
Value(T),
}
impl<'a, T: FromSql<'a>> FromSql<'a> for Date<T> {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Self, Box<dyn Error + Sync + Send>> {
match types::date_from_sql(raw)? {
i32::MAX => Ok(Date::PosInfinity),
i32::MIN => Ok(Date::NegInfinity),
_ => T::from_sql(ty, raw).map(Date::Value),
}
}
fn accepts(ty: &Type) -> bool {
*ty == Type::DATE && T::accepts(ty)
}
}
impl<T: ToSql> ToSql for Date<T> {
fn to_sql(
&self,
ty: &Type,
out: &mut BytesMut,
) -> Result<IsNull, Box<dyn Error + Sync + Send>> {
let value = match *self {
Date::PosInfinity => i32::MAX,
Date::NegInfinity => i32::MIN,
Date::Value(ref v) => return v.to_sql(ty, out),
};
types::date_to_sql(value, out);
Ok(IsNull::No)
}
fn accepts(ty: &Type) -> bool {
*ty == Type::DATE && T::accepts(ty)
}
to_sql_checked!();
}
/// A wrapper that can be used to represent infinity with `Type::Timestamp` and `Type::Timestamptz`
/// types.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Timestamp<T> {
/// Represents `infinity`, a timestamp that is later than all other timestamps.
PosInfinity,
/// Represents `-infinity`, a timestamp that is earlier than all other timestamps.
NegInfinity,
/// The wrapped timestamp.
Value(T),
}
impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp<T> {
fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<Self, Box<dyn Error + Sync + Send>> {
match types::timestamp_from_sql(raw)? {
i64::MAX => Ok(Timestamp::PosInfinity),
i64::MIN => Ok(Timestamp::NegInfinity),
_ => T::from_sql(ty, raw).map(Timestamp::Value),
}
}
fn accepts(ty: &Type) -> bool {
matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty))
}
}
impl<T: ToSql> ToSql for Timestamp<T> {
fn to_sql(
&self,
ty: &Type,
out: &mut BytesMut,
) -> Result<IsNull, Box<dyn Error + Sync + Send>> {
let value = match *self {
Timestamp::PosInfinity => i64::MAX,
Timestamp::NegInfinity => i64::MIN,
Timestamp::Value(ref v) => return v.to_sql(ty, out),
};
types::timestamp_to_sql(value, out);
Ok(IsNull::No)
}
fn accepts(ty: &Type) -> bool {
matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty))
}
to_sql_checked!();
}
| true |
50dd63ef3d600159b414ec93b9c09b97713fefcd
|
Rust
|
hpolloni/xagima
|
/src/testing.rs
|
UTF-8
| 598 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
use core::panic::PanicInfo;
use crate::println;
pub fn runner(tests: &[&dyn Fn()]) {
println!("Running {} tests", tests.len());
for test in tests {
test();
}
success();
}
pub fn default_panic_handler(info: &PanicInfo) -> ! {
println!("[failed]\n");
println!("Error: {}\n", info);
fail();
crate::halt();
}
fn exit_qemu(exit_code: u32) {
use x86_64::instructions::port::Port;
unsafe {
let mut port = Port::new(0xf4);
port.write(exit_code);
}
}
pub fn fail() {
exit_qemu(0x11);
}
pub fn success() {
exit_qemu(0x10);
}
| true |
ba606ac38a54e3624df4ad919ede2fda69dda863
|
Rust
|
FyroxEngine/Fyrox
|
/src/scene/mesh/buffer.rs
|
UTF-8
| 52,281 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
//! Vertex buffer with dynamic layout. See [`VertexBuffer`] docs for more info and usage examples.
use crate::{
core::{
algebra::{Vector2, Vector3, Vector4},
arrayvec::ArrayVec,
byteorder::{ByteOrder, LittleEndian},
futures::io::Error,
math::TriangleDefinition,
visitor::prelude::*,
},
utils::value_as_u8_slice,
};
use fxhash::FxHasher;
use std::{
alloc::Layout,
fmt::{Display, Formatter},
hash::{Hash, Hasher},
marker::PhantomData,
mem::MaybeUninit,
ops::{Deref, DerefMut, Index, IndexMut, RangeBounds},
vec::Drain,
};
/// A common trait for all vertex types. **IMPORTANT:** Implementors **must** use `#[repr(C)]` attribute, otherwise the compiler
/// is free to reorder fields and you might get weird results, because definition order will be different from memory order! See
/// examples in [`VertexBuffer`] docs.
pub trait VertexTrait: Copy {
/// Returns memory layout of the vertex. It basically tells a GPU how to interpret every byte range
/// of your vertex type; which kind of information it holds.
fn layout() -> &'static [VertexAttributeDescriptor];
}
/// Data type for a vertex attribute component.
#[derive(Copy, Clone, PartialOrd, PartialEq, Eq, Ord, Hash, Visit, Debug)]
#[repr(u8)]
pub enum VertexAttributeDataType {
/// 32-bit floating-point.
F32,
/// 32-bit unsigned integer.
U32,
/// 16-bit unsigned integer.
U16,
/// 8-bit unsigned integer.
U8,
}
impl Default for VertexAttributeDataType {
fn default() -> Self {
Self::F32
}
}
impl VertexAttributeDataType {
/// Returns size of data in bytes.
pub fn size(self) -> u8 {
match self {
VertexAttributeDataType::F32 | VertexAttributeDataType::U32 => 4,
VertexAttributeDataType::U16 => 2,
VertexAttributeDataType::U8 => 1,
}
}
}
/// An usage for vertex attribute. It is a fixed set, but there are plenty
/// room for any custom data - it may be fit into `TexCoordN` attributes.
#[derive(Copy, Clone, PartialOrd, PartialEq, Eq, Ord, Hash, Visit, Debug)]
#[repr(u32)]
pub enum VertexAttributeUsage {
/// Vertex position. Usually `Vector2<f32>` or `Vector3<f32>`.
Position = 0,
/// Vertex normal. Usually `Vector3<f32>`, more rare `Vector3<u16>` (F16).
Normal = 1,
/// Vertex tangent. Usually `Vector3<f32>`.
Tangent = 2,
/// First texture coordinates. Usually `Vector2<f32>`.
/// It may be used for everything else, not only for texture coordinates.
TexCoord0 = 3,
/// Second texture coordinates.
TexCoord1 = 4,
/// Third texture coordinates.
TexCoord2 = 5,
/// Fourth texture coordinates.
TexCoord3 = 6,
/// Fifth texture coordinates.
TexCoord4 = 7,
/// Sixth texture coordinates.
TexCoord5 = 8,
/// Seventh texture coordinates.
TexCoord6 = 9,
/// Eighth texture coordinates.
TexCoord7 = 10,
/// Bone weights. Usually `Vector4<f32>`.
BoneWeight = 11,
/// Bone indices. Usually `Vector4<u8>`.
BoneIndices = 12,
/// Maximum amount of attribute kinds.
Count,
}
impl Default for VertexAttributeUsage {
fn default() -> Self {
Self::Position
}
}
/// Input vertex attribute descriptor used to construct layouts and feed vertex buffer.
#[derive(Debug)]
pub struct VertexAttributeDescriptor {
/// Claimed usage of the attribute. It could be Position, Normal, etc.
pub usage: VertexAttributeUsage,
/// Data type of every component of the attribute. It could be F32, U32, U16, etc.
pub data_type: VertexAttributeDataType,
/// Size of attribute expressed in components. For example, for `Position` it could
/// be 3 - which means there are 3 components in attribute of `data_type`.
pub size: u8,
/// Sets a "fetch rate" for vertex shader at which it will read vertex attribute:
/// 0 - per vertex (default)
/// 1 - per instance
/// 2 - per 2 instances and so on.
pub divisor: u8,
/// Defines location of the attribute in a shader (`layout(location = x) attrib;`)
pub shader_location: u8,
}
/// Vertex attribute is a simple "bridge" between raw data and its interpretation. In
/// other words it defines how to treat raw data in vertex shader.
#[derive(Visit, Copy, Clone, Default, Debug, Hash)]
pub struct VertexAttribute {
/// Claimed usage of the attribute. It could be Position, Normal, etc.
pub usage: VertexAttributeUsage,
/// Data type of every component of the attribute. It could be F32, U32, U16, etc.
pub data_type: VertexAttributeDataType,
/// Size of attribute expressed in components. For example, for `Position` it could
/// be 3 - which means there are 3 components in attribute of `data_type`.
pub size: u8,
/// Sets a "fetch rate" for vertex shader at which it will read vertex attribute:
/// 0 - per vertex (default)
/// 1 - per instance
/// 2 - per 2 instances and so on.
pub divisor: u8,
/// Offset in bytes from beginning of the vertex.
pub offset: u8,
/// Defines location of the attribute in a shader (`layout(location = x) attrib;`)
pub shader_location: u8,
}
#[derive(Clone, Debug)]
struct BytesStorage {
bytes: Vec<u8>,
layout: Layout,
}
impl Visit for BytesStorage {
fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult {
self.bytes.visit(name, visitor)?;
if visitor.is_reading() {
self.layout = Layout::array::<u8>(self.bytes.len()).unwrap();
}
Ok(())
}
}
impl Default for BytesStorage {
fn default() -> Self {
Self {
bytes: Default::default(),
layout: Layout::array::<u8>(0).unwrap(),
}
}
}
impl BytesStorage {
fn new<T>(data: Vec<T>) -> Self {
// Prevent destructor to be called on `data`, this is needed because we're taking its
// data storage and treat it as a simple bytes block.
let mut data = std::mem::ManuallyDrop::new(data);
let bytes_length = data.len() * std::mem::size_of::<T>();
let bytes_capacity = data.capacity() * std::mem::size_of::<T>();
Self {
bytes: unsafe {
Vec::<u8>::from_raw_parts(
data.as_mut_ptr() as *mut u8,
bytes_length,
bytes_capacity,
)
},
// Preserve initial memory layout, to ensure that the memory block will be deallocated
// with initial memory layout.
layout: Layout::array::<T>(data.len()).unwrap(),
}
}
fn extend_from_slice(&mut self, slice: &[u8]) {
if self.layout.align() != 1 {
// Realloc backing storage manually if the alignment is anything else than 1.
let new_storage = Vec::with_capacity(self.bytes.len());
let old_storage = std::mem::replace(&mut self.bytes, new_storage);
self.bytes.extend_from_slice(old_storage.as_slice());
self.layout = Layout::array::<u8>(self.bytes.len()).unwrap();
}
self.bytes.extend_from_slice(slice);
}
fn drain<R>(&mut self, range: R) -> Drain<'_, u8>
where
R: RangeBounds<usize>,
{
self.bytes.drain(range)
}
fn as_mut_ptr(&mut self) -> *mut u8 {
self.bytes.as_mut_ptr()
}
fn as_slice_mut(&mut self) -> &mut [u8] {
self.bytes.as_mut_slice()
}
fn clear(&mut self) {
self.bytes.clear()
}
}
impl Drop for BytesStorage {
fn drop(&mut self) {
let mut bytes = std::mem::ManuallyDrop::new(std::mem::take(&mut self.bytes));
// Dealloc manually with initial memory layout.
if bytes.capacity() != 0 {
unsafe { std::alloc::dealloc(bytes.as_mut_ptr(), self.layout) }
}
}
}
impl Deref for BytesStorage {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.bytes
}
}
/// Vertex buffer with dynamic layout. It is used to store multiple vertices of a single type, that implements [`VertexTrait`].
/// Different vertex types used to for efficient memory usage. For example, you could have a simple vertex with only position
/// expressed as Vector3 and it will be enough for simple cases, when only position is required. However, if you want to draw
/// a mesh with skeletal animation, that also supports texturing, lighting, you need to provide a lot more data (bone indices,
/// bone weights, normals, tangents, texture coordinates).
///
/// ## Examples
///
/// ```rust
/// # use fyrox::{
/// # core::algebra::Vector3,
/// # scene::mesh::buffer::{
/// # VertexAttributeDataType, VertexAttributeDescriptor, VertexAttributeUsage, VertexBuffer,
/// # VertexTrait,
/// # },
/// # };
/// #
/// #[derive(Copy, Clone)]
/// #[repr(C)]
/// struct MyVertex {
/// position: Vector3<f32>,
/// }
///
/// impl VertexTrait for MyVertex {
/// fn layout() -> &'static [VertexAttributeDescriptor] {
/// &[VertexAttributeDescriptor {
/// usage: VertexAttributeUsage::Position,
/// data_type: VertexAttributeDataType::F32,
/// size: 3,
/// divisor: 0,
/// shader_location: 0,
/// }]
/// }
/// }
///
/// fn create_triangle_vertex_buffer() -> VertexBuffer {
/// VertexBuffer::new(
/// 3,
/// vec![
/// MyVertex {
/// position: Vector3::new(0.0, 0.0, 0.0),
/// },
/// MyVertex {
/// position: Vector3::new(0.0, 1.0, 0.0),
/// },
/// MyVertex {
/// position: Vector3::new(1.0, 1.0, 0.0),
/// },
/// ],
/// )
/// .unwrap()
/// }
/// ```
///
/// This example creates a simple vertex buffer that contains a single triangle with custom vertex format. The most important
/// part here is [`VertexTrait::layout`] implementation - it describes each "attribute" of your vertex, if your layout does not
/// match the actual content of the vertex (in terms of size in bytes), then vertex buffer cannot be created and [`VertexBuffer::new`]
/// will return [`None`].
///
/// The second, but not least important is `#[repr(C)]` attribute - it is mandatory for every vertex type, it forbids fields
/// reordering of you vertex structure and guarantees that they will have the same layout in memory as their declaration order.
///
/// ## Limitations
///
/// Vertex size cannot be more than 256 bytes, this limitation shouldn't be a problem because almost every GPU supports up to
/// 16 vertex attributes with 16 bytes of size each, which gives exactly 256 bytes.
#[derive(Clone, Visit, Default, Debug)]
pub struct VertexBuffer {
dense_layout: Vec<VertexAttribute>,
sparse_layout: [Option<VertexAttribute>; 13],
vertex_size: u8,
vertex_count: u32,
data: BytesStorage,
data_hash: u64,
#[visit(optional)]
layout_hash: u64,
}
fn calculate_layout_hash(layout: &[VertexAttribute]) -> u64 {
let mut hasher = FxHasher::default();
layout.hash(&mut hasher);
hasher.finish()
}
fn calculate_data_hash(data: &[u8]) -> u64 {
let mut hasher = FxHasher::default();
data.hash(&mut hasher);
hasher.finish()
}
/// See VertexBuffer::modify for more info.
pub struct VertexBufferRefMut<'a> {
vertex_buffer: &'a mut VertexBuffer,
}
impl<'a> Drop for VertexBufferRefMut<'a> {
fn drop(&mut self) {
// Recalculate data hash.
self.vertex_buffer.data_hash = calculate_data_hash(&self.vertex_buffer.data);
}
}
impl<'a> Deref for VertexBufferRefMut<'a> {
type Target = VertexBuffer;
fn deref(&self) -> &Self::Target {
self.vertex_buffer
}
}
impl<'a> DerefMut for VertexBufferRefMut<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.vertex_buffer
}
}
impl<'a> VertexBufferRefMut<'a> {
/// Tries to append a vertex to the buffer.
///
/// # Safety and validation
///
/// This method accepts any type that has appropriate size, the size must be equal
/// with the size defined by layout. The Copy trait bound is required to ensure that
/// the type does not have any custom destructors.
pub fn push_vertex<T>(&mut self, vertex: &T) -> Result<(), ValidationError>
where
T: VertexTrait,
{
if std::mem::size_of::<T>() == self.vertex_buffer.vertex_size as usize {
self.vertex_buffer
.data
.extend_from_slice(value_as_u8_slice(vertex));
self.vertex_buffer.vertex_count += 1;
Ok(())
} else {
Err(ValidationError::InvalidVertexSize {
expected: self.vertex_buffer.vertex_size,
actual: std::mem::size_of::<T>() as u8,
})
}
}
/// Removes last vertex from the buffer.
pub fn remove_last_vertex(&mut self) {
let range = (self.vertex_buffer.data.len() - self.vertex_buffer.vertex_size as usize)..;
self.vertex_buffer.data.drain(range);
self.vertex_buffer.vertex_count -= 1;
}
/// Copies data of last vertex from the buffer to an instance of variable of a type.
///
/// # Safety and validation
///
/// This method accepts any type that has appropriate size, the size must be equal
/// with the size defined by layout. The Copy trait bound is required to ensure that
/// the type does not have any custom destructors.
pub fn pop_vertex<T>(&mut self) -> Result<T, ValidationError>
where
T: VertexTrait,
{
if std::mem::size_of::<T>() == self.vertex_buffer.vertex_size as usize
&& self.vertex_buffer.data.len() >= self.vertex_buffer.vertex_size as usize
{
unsafe {
let mut v = MaybeUninit::<T>::uninit();
std::ptr::copy_nonoverlapping(
self.vertex_buffer.data.as_ptr().add(
self.vertex_buffer.data.len() - self.vertex_buffer.vertex_size as usize,
),
v.as_mut_ptr() as *mut u8,
self.vertex_buffer.vertex_size as usize,
);
let range =
(self.vertex_buffer.data.len() - self.vertex_buffer.vertex_size as usize)..;
self.vertex_buffer.data.drain(range);
self.vertex_buffer.vertex_count -= 1;
Ok(v.assume_init())
}
} else {
Err(ValidationError::InvalidVertexSize {
expected: self.vertex_buffer.vertex_size,
actual: std::mem::size_of::<T>() as u8,
})
}
}
/// Tries to cast internal data buffer to a slice of given type. It may fail if
/// size of type is not equal with claimed size (which is set by the layout).
pub fn cast_data_mut<T>(&mut self) -> Result<&mut [T], ValidationError>
where
T: VertexTrait,
{
if std::mem::size_of::<T>() == self.vertex_buffer.vertex_size as usize {
Ok(unsafe {
std::slice::from_raw_parts_mut(
self.vertex_buffer.data.as_mut_ptr() as *const T as *mut T,
self.vertex_buffer.data.len() / std::mem::size_of::<T>(),
)
})
} else {
Err(ValidationError::InvalidVertexSize {
expected: self.vertex_buffer.vertex_size,
actual: std::mem::size_of::<T>() as u8,
})
}
}
/// Creates iterator that emits read/write accessors for vertices.
pub fn iter_mut(&mut self) -> impl Iterator<Item = VertexViewMut<'_>> + '_ {
unsafe {
VertexViewMutIterator {
ptr: self.vertex_buffer.data.as_mut_ptr(),
end: self.data.as_mut_ptr().add(
self.vertex_buffer.vertex_size as usize
* self.vertex_buffer.vertex_count as usize,
),
vertex_size: self.vertex_buffer.vertex_size,
sparse_layout: &self.vertex_buffer.sparse_layout,
marker: PhantomData,
}
}
}
/// Returns a read/write accessor of n-th vertex.
pub fn get_mut(&mut self, n: usize) -> Option<VertexViewMut<'_>> {
let offset = n * self.vertex_buffer.vertex_size as usize;
if offset < self.vertex_buffer.data.len() {
Some(VertexViewMut {
vertex_data: &mut self.vertex_buffer.data.as_slice_mut()
[offset..(offset + self.vertex_buffer.vertex_size as usize)],
sparse_layout: &self.vertex_buffer.sparse_layout,
})
} else {
None
}
}
/// Duplicates n-th vertex and puts it at the back of the buffer.
pub fn duplicate(&mut self, n: usize) {
// Vertex cannot be larger than 256 bytes, so having temporary array of
// such size is ok.
let mut temp = ArrayVec::<u8, 256>::new();
temp.try_extend_from_slice(
&self.vertex_buffer.data[(n * self.vertex_buffer.vertex_size as usize)
..((n + 1) * self.vertex_buffer.vertex_size as usize)],
)
.unwrap();
self.vertex_buffer.data.extend_from_slice(temp.as_slice());
self.vertex_buffer.vertex_count += 1;
}
/// Adds new attribute at the end of layout, reorganizes internal data storage to be
/// able to contain new attribute. Default value of the new attribute in the buffer
/// becomes `fill_value`. Graphically this could be represented like so:
///
/// Add secondary texture coordinates:
/// Before: P1_N1_TC1_P2_N2_TC2...
/// After: P1_N1_TC1_TC2(fill_value)_P2_N2_TC2_TC2(fill_value)...
pub fn add_attribute<T>(
&mut self,
descriptor: VertexAttributeDescriptor,
fill_value: T,
) -> Result<(), ValidationError>
where
T: Copy,
{
if self.vertex_buffer.sparse_layout[descriptor.usage as usize].is_some() {
Err(ValidationError::DuplicatedAttributeDescriptor)
} else {
let vertex_attribute = VertexAttribute {
usage: descriptor.usage,
data_type: descriptor.data_type,
size: descriptor.size,
divisor: descriptor.divisor,
offset: self.vertex_buffer.vertex_size,
shader_location: descriptor.shader_location,
};
self.vertex_buffer.sparse_layout[descriptor.usage as usize] = Some(vertex_attribute);
self.vertex_buffer.dense_layout.push(vertex_attribute);
self.layout_hash = calculate_layout_hash(&self.vertex_buffer.dense_layout);
let mut new_data = Vec::new();
for chunk in self
.vertex_buffer
.data
.chunks_exact(self.vertex_buffer.vertex_size as usize)
{
let mut temp = ArrayVec::<u8, 256>::new();
temp.try_extend_from_slice(chunk).unwrap();
temp.try_extend_from_slice(value_as_u8_slice(&fill_value))
.unwrap();
new_data.extend_from_slice(&temp);
}
self.vertex_buffer.data = BytesStorage::new(new_data);
self.vertex_buffer.vertex_size += std::mem::size_of::<T>() as u8;
Ok(())
}
}
/// Clears the buffer making it empty.
pub fn clear(&mut self) {
self.data.clear();
self.vertex_count = 0;
}
}
/// An error that may occur during input data and layout validation.
#[derive(Debug)]
pub enum ValidationError {
/// Attribute size must be either 1, 2, 3 or 4.
InvalidAttributeSize(usize),
/// Data size is not correct.
InvalidDataSize {
/// Expected data size in bytes.
expected: usize,
/// Actual data size in bytes.
actual: usize,
},
/// Trying to add vertex of incorrect size.
InvalidVertexSize {
/// Expected vertex size.
expected: u8,
/// Actual vertex size.
actual: u8,
},
/// A duplicate of a descriptor was found.
DuplicatedAttributeDescriptor,
/// Duplicate shader locations were found.
ConflictingShaderLocations(usize),
}
impl Display for ValidationError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ValidationError::InvalidAttributeSize(v) => {
write!(f, "Invalid attribute size {v}. Must be either 1, 2, 3 or 4")
}
ValidationError::InvalidDataSize { expected, actual } => {
write!(f, "Invalid data size. Expected {expected}, got {actual}.")
}
ValidationError::InvalidVertexSize { expected, actual } => {
write!(f, "Invalid vertex size. Expected {expected}, got {actual}.",)
}
ValidationError::DuplicatedAttributeDescriptor => {
write!(f, "A duplicate of a descriptor was found.")
}
ValidationError::ConflictingShaderLocations(v) => {
write!(f, "Duplicate shader locations were found {v}.")
}
}
}
}
impl VertexBuffer {
/// Creates new vertex buffer from provided data and with the given layout of the vertex type `T`.
pub fn new<T>(vertex_count: usize, data: Vec<T>) -> Result<Self, ValidationError>
where
T: VertexTrait,
{
let bytes = BytesStorage::new(data);
let layout = T::layout();
// Validate for duplicates and invalid layout.
for descriptor in layout {
for other_descriptor in layout {
if !std::ptr::eq(descriptor, other_descriptor) {
if descriptor.usage == other_descriptor.usage {
return Err(ValidationError::DuplicatedAttributeDescriptor);
} else if descriptor.shader_location == other_descriptor.shader_location {
return Err(ValidationError::ConflictingShaderLocations(
descriptor.shader_location as usize,
));
}
}
}
}
let mut dense_layout = Vec::new();
// Validate everything as much as possible and calculate vertex size.
let mut sparse_layout = [None; VertexAttributeUsage::Count as usize];
let mut vertex_size_bytes = 0u8;
for attribute in layout.iter() {
if attribute.size < 1 || attribute.size > 4 {
return Err(ValidationError::InvalidAttributeSize(
attribute.size as usize,
));
}
let vertex_attribute = VertexAttribute {
usage: attribute.usage,
data_type: attribute.data_type,
size: attribute.size,
divisor: attribute.divisor,
offset: vertex_size_bytes,
shader_location: attribute.shader_location,
};
dense_layout.push(vertex_attribute);
// Map dense to sparse layout to increase performance.
sparse_layout[attribute.usage as usize] = Some(vertex_attribute);
vertex_size_bytes += attribute.size * attribute.data_type.size();
}
let expected_data_size = vertex_count * vertex_size_bytes as usize;
if expected_data_size != bytes.len() {
return Err(ValidationError::InvalidDataSize {
expected: expected_data_size,
actual: bytes.len(),
});
}
Ok(Self {
vertex_size: vertex_size_bytes,
vertex_count: vertex_count as u32,
data_hash: calculate_data_hash(&bytes),
data: bytes,
layout_hash: calculate_layout_hash(&dense_layout),
sparse_layout,
dense_layout,
})
}
/// Returns a reference to underlying data buffer slice.
pub fn raw_data(&self) -> &[u8] {
&self.data
}
/// Returns true if buffer does not contain any vertex, false - otherwise.
pub fn is_empty(&self) -> bool {
self.vertex_count == 0
}
/// Returns cached data hash. Cached value is guaranteed to be in actual state.
pub fn data_hash(&self) -> u64 {
self.data_hash
}
/// Returns hash of vertex buffer layout. Cached value is guaranteed to be in actual state.
/// The hash could be used to check if the layout has changed.
pub fn layout_hash(&self) -> u64 {
self.layout_hash
}
/// Provides mutable access to content of the buffer.
///
/// # Performance
///
/// This method returns special structure which has custom destructor that
/// calculates hash of the data once modification is over. You **must** hold
/// this structure as long as possible while modifying contents of the buffer.
/// Do **not** even try to do this:
///
/// ```no_run
/// use fyrox::{
/// scene::mesh::buffer::{VertexBuffer, VertexWriteTrait, VertexAttributeUsage},
/// core::algebra::Vector3
/// };
/// fn do_something(buffer: &mut VertexBuffer) {
/// for i in 0..buffer.vertex_count() {
/// buffer
/// .modify() // Doing this in a loop will cause HUGE performance issues!
/// .get_mut(i as usize)
/// .unwrap()
/// .write_3_f32(VertexAttributeUsage::Position, Vector3::<f32>::default())
/// .unwrap();
/// }
/// }
/// ```
///
/// Instead do this:
///
/// ```no_run
/// use fyrox::{
/// scene::mesh::buffer::{VertexBuffer, VertexWriteTrait, VertexAttributeUsage},
/// core::algebra::Vector3
/// };
/// fn do_something(buffer: &mut VertexBuffer) {
/// let mut buffer_modifier = buffer.modify();
/// for mut vertex in buffer_modifier.iter_mut() {
/// vertex
/// .write_3_f32(VertexAttributeUsage::Position, Vector3::<f32>::default())
/// .unwrap();
/// }
/// }
/// ```
///
/// Why do we even need such complications? It is used for lazy hash calculation which is
/// used for automatic upload of contents to GPU in case if content has changed.
pub fn modify(&mut self) -> VertexBufferRefMut<'_> {
VertexBufferRefMut {
vertex_buffer: self,
}
}
/// Checks if an attribute of `usage` exists.
pub fn has_attribute(&self, usage: VertexAttributeUsage) -> bool {
self.sparse_layout[usage as usize].is_some()
}
/// Returns vertex buffer layout.
pub fn layout(&self) -> &[VertexAttribute] {
&self.dense_layout
}
/// Tries to cast internal data buffer to a slice of given type. It may fail if
/// size of type is not equal with claimed size (which is set by the layout).
pub fn cast_data_ref<T>(&self) -> Result<&[T], ValidationError>
where
T: VertexTrait,
{
if std::mem::size_of::<T>() == self.vertex_size as usize {
Ok(unsafe {
std::slice::from_raw_parts(
self.data.as_ptr() as *const T,
self.data.len() / std::mem::size_of::<T>(),
)
})
} else {
Err(ValidationError::InvalidVertexSize {
expected: self.vertex_size,
actual: std::mem::size_of::<T>() as u8,
})
}
}
/// Creates iterator that emits read accessors for vertices.
pub fn iter(&self) -> impl Iterator<Item = VertexViewRef<'_>> + '_ {
VertexViewRefIterator {
data: &self.data,
offset: 0,
end: self.vertex_size as usize * self.vertex_count as usize,
vertex_size: self.vertex_size,
sparse_layout: &self.sparse_layout,
}
}
/// Returns a read accessor of n-th vertex.
pub fn get(&self, n: usize) -> Option<VertexViewRef<'_>> {
let offset = n * self.vertex_size as usize;
if offset < self.data.len() {
Some(VertexViewRef {
vertex_data: &self.data[offset..(offset + self.vertex_size as usize)],
sparse_layout: &self.sparse_layout,
})
} else {
None
}
}
/// Returns exact amount of vertices in the buffer.
pub fn vertex_count(&self) -> u32 {
self.vertex_count
}
/// Return vertex size of the buffer.
pub fn vertex_size(&self) -> u8 {
self.vertex_size
}
/// Finds free location for an attribute in the layout.
pub fn find_free_shader_location(&self) -> u8 {
let mut location = None;
for attribute in self.dense_layout.chunks_exact(2) {
let left = &attribute[0];
let right = &attribute[1];
if (left.shader_location as i32 - right.shader_location as i32).abs() > 1 {
// We have a gap, use some value from it.
let origin = left.shader_location.min(right.shader_location);
location = Some(origin + 1);
break;
}
}
location.unwrap_or_else(|| {
self.dense_layout
.last()
.map(|a| a.shader_location)
.unwrap_or(0)
+ 1
})
}
}
struct VertexViewRefIterator<'a> {
data: &'a [u8],
sparse_layout: &'a [Option<VertexAttribute>],
offset: usize,
end: usize,
vertex_size: u8,
}
impl<'a> Iterator for VertexViewRefIterator<'a> {
type Item = VertexViewRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.offset >= self.end {
None
} else {
let view = VertexViewRef {
vertex_data: &self.data[self.offset..(self.offset + self.vertex_size as usize)],
sparse_layout: self.sparse_layout,
};
self.offset += self.vertex_size as usize;
Some(view)
}
}
}
struct VertexViewMutIterator<'a> {
ptr: *mut u8,
sparse_layout: &'a [Option<VertexAttribute>],
end: *mut u8,
vertex_size: u8,
marker: PhantomData<&'a mut u8>,
}
impl<'a> Iterator for VertexViewMutIterator<'a> {
type Item = VertexViewMut<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.ptr >= self.end {
None
} else {
unsafe {
let data = std::slice::from_raw_parts_mut(self.ptr, self.vertex_size as usize);
let view = VertexViewMut {
vertex_data: data,
sparse_layout: self.sparse_layout,
};
self.ptr = self.ptr.add(self.vertex_size as usize);
Some(view)
}
}
}
}
/// Read accessor for a vertex with some layout.
#[derive(Debug)]
pub struct VertexViewRef<'a> {
vertex_data: &'a [u8],
sparse_layout: &'a [Option<VertexAttribute>],
}
impl<'a> PartialEq for VertexViewRef<'a> {
fn eq(&self, other: &Self) -> bool {
self.vertex_data == other.vertex_data
}
}
/// Read/write accessor for a vertex with some layout.
#[derive(Debug)]
pub struct VertexViewMut<'a> {
vertex_data: &'a mut [u8],
sparse_layout: &'a [Option<VertexAttribute>],
}
impl<'a> PartialEq for VertexViewMut<'a> {
fn eq(&self, other: &Self) -> bool {
self.vertex_data == other.vertex_data
}
}
/// An error that may occur during fetching using vertex read/write accessor.
#[derive(Debug)]
pub enum VertexFetchError {
/// Trying to read/write non-existent attribute.
NoSuchAttribute(VertexAttributeUsage),
/// IO error.
Io(std::io::Error),
}
impl std::error::Error for VertexFetchError {}
impl Display for VertexFetchError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
VertexFetchError::NoSuchAttribute(v) => {
write!(f, "No attribute with such usage: {v:?}")
}
VertexFetchError::Io(v) => {
write!(f, "An i/o error has occurred {v:?}")
}
}
}
}
impl From<std::io::Error> for VertexFetchError {
fn from(e: Error) -> Self {
Self::Io(e)
}
}
/// A trait for read-only vertex data accessor.
pub trait VertexReadTrait {
#[doc(hidden)]
fn data_layout_ref(&self) -> (&[u8], &[Option<VertexAttribute>]);
/// Tries to read an attribute with given usage as a pair of two f32.
#[inline(always)]
fn read_2_f32(&self, usage: VertexAttributeUsage) -> Result<Vector2<f32>, VertexFetchError> {
let (data, layout) = self.data_layout_ref();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
let x = LittleEndian::read_f32(&data[(attribute.offset as usize)..]);
let y = LittleEndian::read_f32(&data[(attribute.offset as usize + 4)..]);
Ok(Vector2::new(x, y))
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
/// Tries to read an attribute with given usage as a pair of three f32.
#[inline(always)]
fn read_3_f32(&self, usage: VertexAttributeUsage) -> Result<Vector3<f32>, VertexFetchError> {
let (data, layout) = self.data_layout_ref();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
let x = LittleEndian::read_f32(&data[(attribute.offset as usize)..]);
let y = LittleEndian::read_f32(&data[(attribute.offset as usize + 4)..]);
let z = LittleEndian::read_f32(&data[(attribute.offset as usize + 8)..]);
Ok(Vector3::new(x, y, z))
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
/// Tries to read an attribute with given usage as a pair of four f32.
#[inline(always)]
fn read_4_f32(&self, usage: VertexAttributeUsage) -> Result<Vector4<f32>, VertexFetchError> {
let (data, layout) = self.data_layout_ref();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
let x = LittleEndian::read_f32(&data[(attribute.offset as usize)..]);
let y = LittleEndian::read_f32(&data[(attribute.offset as usize + 4)..]);
let z = LittleEndian::read_f32(&data[(attribute.offset as usize + 8)..]);
let w = LittleEndian::read_f32(&data[(attribute.offset as usize + 12)..]);
Ok(Vector4::new(x, y, z, w))
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
/// Tries to read an attribute with given usage as a pair of four u8.
#[inline(always)]
fn read_4_u8(&self, usage: VertexAttributeUsage) -> Result<Vector4<u8>, VertexFetchError> {
let (data, layout) = self.data_layout_ref();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
let offset = attribute.offset as usize;
let x = data[offset];
let y = data[offset + 1];
let z = data[offset + 2];
let w = data[offset + 3];
Ok(Vector4::new(x, y, z, w))
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
}
impl<'a> VertexReadTrait for VertexViewRef<'a> {
fn data_layout_ref(&self) -> (&[u8], &[Option<VertexAttribute>]) {
(self.vertex_data, self.sparse_layout)
}
}
/// A trait for read/write vertex data accessor.
pub trait VertexWriteTrait: VertexReadTrait {
#[doc(hidden)]
fn data_layout_mut(&mut self) -> (&mut [u8], &[Option<VertexAttribute>]);
/// Tries to write an attribute with given usage as a pair of two f32.
fn write_2_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector2<f32>,
) -> Result<(), VertexFetchError>;
/// Tries to write an attribute with given usage as a pair of three f32.
fn write_3_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector3<f32>,
) -> Result<(), VertexFetchError>;
/// Tries to write an attribute with given usage as a pair of four f32.
fn write_4_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector4<f32>,
) -> Result<(), VertexFetchError>;
/// Tries to write an attribute with given usage as a pair of four u8.
fn write_4_u8(
&mut self,
usage: VertexAttributeUsage,
value: Vector4<u8>,
) -> Result<(), VertexFetchError>;
}
impl<'a> VertexReadTrait for VertexViewMut<'a> {
fn data_layout_ref(&self) -> (&[u8], &[Option<VertexAttribute>]) {
(self.vertex_data, self.sparse_layout)
}
}
impl<'a> VertexWriteTrait for VertexViewMut<'a> {
#[inline(always)]
fn data_layout_mut(&mut self) -> (&mut [u8], &[Option<VertexAttribute>]) {
(self.vertex_data, self.sparse_layout)
}
#[inline(always)]
fn write_2_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector2<f32>,
) -> Result<(), VertexFetchError> {
let (data, layout) = self.data_layout_mut();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
LittleEndian::write_f32(&mut data[(attribute.offset as usize)..], value.x);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 4)..], value.y);
Ok(())
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
#[inline(always)]
fn write_3_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector3<f32>,
) -> Result<(), VertexFetchError> {
let (data, layout) = self.data_layout_mut();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
LittleEndian::write_f32(&mut data[(attribute.offset as usize)..], value.x);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 4)..], value.y);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 8)..], value.z);
Ok(())
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
#[inline(always)]
fn write_4_f32(
&mut self,
usage: VertexAttributeUsage,
value: Vector4<f32>,
) -> Result<(), VertexFetchError> {
let (data, layout) = self.data_layout_mut();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
LittleEndian::write_f32(&mut data[(attribute.offset as usize)..], value.x);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 4)..], value.y);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 8)..], value.z);
LittleEndian::write_f32(&mut data[(attribute.offset as usize + 12)..], value.w);
Ok(())
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
#[inline(always)]
fn write_4_u8(
&mut self,
usage: VertexAttributeUsage,
value: Vector4<u8>,
) -> Result<(), VertexFetchError> {
let (data, layout) = self.data_layout_mut();
if let Some(attribute) = layout.get(usage as usize).unwrap() {
data[attribute.offset as usize] = value.x;
data[(attribute.offset + 1) as usize] = value.y;
data[(attribute.offset + 2) as usize] = value.z;
data[(attribute.offset + 3) as usize] = value.w;
Ok(())
} else {
Err(VertexFetchError::NoSuchAttribute(usage))
}
}
}
/// A buffer for data that defines connections between vertices.
#[derive(Visit, Default, Clone, Debug)]
pub struct TriangleBuffer {
triangles: Vec<TriangleDefinition>,
data_hash: u64,
}
fn calculate_triangle_buffer_hash(triangles: &[TriangleDefinition]) -> u64 {
let mut hasher = FxHasher::default();
triangles.hash(&mut hasher);
hasher.finish()
}
impl TriangleBuffer {
/// Creates new triangle buffer with given set of triangles.
pub fn new(triangles: Vec<TriangleDefinition>) -> Self {
let hash = calculate_triangle_buffer_hash(&triangles);
Self {
triangles,
data_hash: hash,
}
}
/// Creates new ref iterator.
pub fn iter(&self) -> impl Iterator<Item = &TriangleDefinition> {
self.triangles.iter()
}
/// Returns a ref to inner data with triangles.
pub fn triangles_ref(&self) -> &[TriangleDefinition] {
&self.triangles
}
/// Sets a new set of triangles.
pub fn set_triangles(&mut self, triangles: Vec<TriangleDefinition>) {
self.data_hash = calculate_triangle_buffer_hash(&triangles);
self.triangles = triangles;
}
/// Returns amount of triangles in the buffer.
pub fn len(&self) -> usize {
self.triangles.len()
}
/// Returns true if the buffer is empty, false - otherwise.
pub fn is_empty(&self) -> bool {
self.triangles.is_empty()
}
/// Returns cached data hash. Cached value is guaranteed to be in actual state.
pub fn data_hash(&self) -> u64 {
self.data_hash
}
/// See VertexBuffer::modify for more info.
pub fn modify(&mut self) -> TriangleBufferRefMut<'_> {
TriangleBufferRefMut {
triangle_buffer: self,
}
}
}
impl Index<usize> for TriangleBuffer {
type Output = TriangleDefinition;
fn index(&self, index: usize) -> &Self::Output {
&self.triangles[index]
}
}
/// See VertexBuffer::modify for more info.
pub struct TriangleBufferRefMut<'a> {
triangle_buffer: &'a mut TriangleBuffer,
}
impl<'a> Deref for TriangleBufferRefMut<'a> {
type Target = TriangleBuffer;
fn deref(&self) -> &Self::Target {
self.triangle_buffer
}
}
impl<'a> DerefMut for TriangleBufferRefMut<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.triangle_buffer
}
}
impl<'a> Drop for TriangleBufferRefMut<'a> {
fn drop(&mut self) {
self.triangle_buffer.data_hash =
calculate_triangle_buffer_hash(&self.triangle_buffer.triangles);
}
}
impl<'a> TriangleBufferRefMut<'a> {
/// Returns mutable iterator.
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut TriangleDefinition> {
self.triangles.iter_mut()
}
/// Adds new triangle in the buffer.
pub fn push(&mut self, triangle: TriangleDefinition) {
self.triangles.push(triangle)
}
/// Clears the buffer.
pub fn clear(&mut self) {
self.triangles.clear();
}
}
impl<'a> Index<usize> for TriangleBufferRefMut<'a> {
type Output = TriangleDefinition;
fn index(&self, index: usize) -> &Self::Output {
&self.triangle_buffer.triangles[index]
}
}
impl<'a> IndexMut<usize> for TriangleBufferRefMut<'a> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.triangle_buffer.triangles[index]
}
}
#[cfg(test)]
mod test {
use crate::scene::mesh::buffer::VertexTrait;
use crate::{
core::algebra::{Vector2, Vector3, Vector4},
scene::mesh::buffer::{
VertexAttributeDataType, VertexAttributeDescriptor, VertexAttributeUsage, VertexBuffer,
VertexReadTrait,
},
};
#[derive(Clone, Copy, PartialEq, Debug)]
#[repr(C)]
struct Vertex {
position: Vector3<f32>,
tex_coord: Vector2<f32>,
second_tex_coord: Vector2<f32>,
normal: Vector3<f32>,
tangent: Vector4<f32>,
bone_weights: Vector4<f32>,
bone_indices: Vector4<u8>,
}
impl VertexTrait for Vertex {
fn layout() -> &'static [VertexAttributeDescriptor] {
static LAYOUT: [VertexAttributeDescriptor; 7] = [
VertexAttributeDescriptor {
usage: VertexAttributeUsage::Position,
data_type: VertexAttributeDataType::F32,
size: 3,
divisor: 0,
shader_location: 0,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::TexCoord0,
data_type: VertexAttributeDataType::F32,
size: 2,
divisor: 0,
shader_location: 1,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::TexCoord1,
data_type: VertexAttributeDataType::F32,
size: 2,
divisor: 0,
shader_location: 2,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::Normal,
data_type: VertexAttributeDataType::F32,
size: 3,
divisor: 0,
shader_location: 3,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::Tangent,
data_type: VertexAttributeDataType::F32,
size: 4,
divisor: 0,
shader_location: 4,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::BoneWeight,
data_type: VertexAttributeDataType::F32,
size: 4,
divisor: 0,
shader_location: 5,
},
VertexAttributeDescriptor {
usage: VertexAttributeUsage::BoneIndices,
data_type: VertexAttributeDataType::U8,
size: 4,
divisor: 0,
shader_location: 6,
},
];
&LAYOUT
}
}
const VERTICES: [Vertex; 3] = [
Vertex {
position: Vector3::new(1.0, 2.0, 3.0),
tex_coord: Vector2::new(0.0, 1.0),
second_tex_coord: Vector2::new(1.0, 0.0),
normal: Vector3::new(0.0, 1.0, 0.0),
tangent: Vector4::new(1.0, 0.0, 0.0, 1.0),
bone_weights: Vector4::new(0.25, 0.25, 0.25, 0.25),
bone_indices: Vector4::new(1, 2, 3, 4),
},
Vertex {
position: Vector3::new(1.0, 2.0, 3.0),
tex_coord: Vector2::new(0.0, 1.0),
second_tex_coord: Vector2::new(1.0, 0.0),
normal: Vector3::new(0.0, 1.0, 0.0),
tangent: Vector4::new(1.0, 0.0, 0.0, 1.0),
bone_weights: Vector4::new(0.25, 0.25, 0.25, 0.25),
bone_indices: Vector4::new(1, 2, 3, 4),
},
Vertex {
position: Vector3::new(1.0, 2.0, 3.0),
tex_coord: Vector2::new(0.0, 1.0),
second_tex_coord: Vector2::new(1.0, 0.0),
normal: Vector3::new(0.0, 1.0, 0.0),
tangent: Vector4::new(1.0, 0.0, 0.0, 1.0),
bone_weights: Vector4::new(0.25, 0.25, 0.25, 0.25),
bone_indices: Vector4::new(1, 2, 3, 4),
},
];
fn test_view_original_equal<T: VertexReadTrait>(view: T, original: &Vertex) {
assert_eq!(
view.read_3_f32(VertexAttributeUsage::Position).unwrap(),
original.position
);
assert_eq!(
view.read_2_f32(VertexAttributeUsage::TexCoord0).unwrap(),
original.tex_coord
);
assert_eq!(
view.read_2_f32(VertexAttributeUsage::TexCoord1).unwrap(),
original.second_tex_coord
);
assert_eq!(
view.read_3_f32(VertexAttributeUsage::Normal).unwrap(),
original.normal
);
assert_eq!(
view.read_4_f32(VertexAttributeUsage::Tangent).unwrap(),
original.tangent
);
assert_eq!(
view.read_4_f32(VertexAttributeUsage::BoneWeight).unwrap(),
original.bone_weights
);
assert_eq!(
view.read_4_u8(VertexAttributeUsage::BoneIndices).unwrap(),
original.bone_indices
);
}
fn create_test_buffer() -> VertexBuffer {
VertexBuffer::new(VERTICES.len(), VERTICES.to_vec()).unwrap()
}
#[test]
fn test_empty() {
VertexBuffer::new::<Vertex>(0, vec![]).unwrap();
}
#[test]
fn test_iter() {
let buffer = create_test_buffer();
for (view, original) in buffer.iter().zip(VERTICES.iter()) {
test_view_original_equal(view, original);
}
}
#[test]
fn test_iter_mut() {
let mut buffer = create_test_buffer();
for (view, original) in buffer.modify().iter_mut().zip(VERTICES.iter()) {
test_view_original_equal(view, original);
}
}
#[test]
fn test_vertex_duplication() {
let mut buffer = create_test_buffer();
buffer.modify().duplicate(0);
assert_eq!(buffer.vertex_count(), 4);
assert_eq!(buffer.get(0).unwrap(), buffer.get(3).unwrap())
}
#[test]
fn test_pop_vertex() {
let mut buffer = create_test_buffer();
let vertex = buffer.modify().pop_vertex::<Vertex>().unwrap();
assert_eq!(buffer.vertex_count(), 2);
assert_eq!(vertex, VERTICES[2]);
}
#[test]
fn test_remove_last_vertex() {
let mut buffer = create_test_buffer();
buffer.modify().remove_last_vertex();
assert_eq!(buffer.vertex_count(), 2);
}
#[test]
fn test_add_attribute() {
let mut buffer = create_test_buffer();
let fill = Vector2::new(0.25, 0.75);
let test_index = 1;
buffer
.modify()
.add_attribute(
VertexAttributeDescriptor {
usage: VertexAttributeUsage::TexCoord2,
data_type: VertexAttributeDataType::F32,
size: 2,
divisor: 0,
shader_location: 7,
},
fill,
)
.unwrap();
#[derive(Clone, Copy, PartialEq, Debug)]
#[repr(C)]
struct ExtendedVertex {
position: Vector3<f32>,
tex_coord: Vector2<f32>,
second_tex_coord: Vector2<f32>,
normal: Vector3<f32>,
tangent: Vector4<f32>,
bone_weights: Vector4<f32>,
bone_indices: Vector4<u8>,
third_tex_coord: Vector2<f32>, // NEW
}
let new_1 = ExtendedVertex {
position: VERTICES[test_index].position,
tex_coord: VERTICES[test_index].tex_coord,
second_tex_coord: VERTICES[test_index].second_tex_coord,
normal: VERTICES[test_index].normal,
tangent: VERTICES[test_index].tangent,
bone_weights: VERTICES[test_index].bone_weights,
bone_indices: VERTICES[test_index].bone_indices,
third_tex_coord: fill,
};
assert_eq!(
buffer.vertex_size,
std::mem::size_of::<ExtendedVertex>() as u8
);
let view = buffer.get(test_index).unwrap();
assert_eq!(
view.read_3_f32(VertexAttributeUsage::Position).unwrap(),
new_1.position
);
assert_eq!(
view.read_2_f32(VertexAttributeUsage::TexCoord0).unwrap(),
new_1.tex_coord
);
assert_eq!(
view.read_2_f32(VertexAttributeUsage::TexCoord1).unwrap(),
new_1.second_tex_coord
);
assert_eq!(
view.read_2_f32(VertexAttributeUsage::TexCoord2).unwrap(),
new_1.third_tex_coord
);
assert_eq!(
view.read_3_f32(VertexAttributeUsage::Normal).unwrap(),
new_1.normal
);
assert_eq!(
view.read_4_f32(VertexAttributeUsage::Tangent).unwrap(),
new_1.tangent
);
assert_eq!(
view.read_4_f32(VertexAttributeUsage::BoneWeight).unwrap(),
new_1.bone_weights
);
assert_eq!(
view.read_4_u8(VertexAttributeUsage::BoneIndices).unwrap(),
new_1.bone_indices
);
}
}
| true |
42a1a653c006b6c3580ad8bf08f14dd36d0cb874
|
Rust
|
lightsofapollo/toml-json-rs
|
/src/tomljson/json_toml.rs
|
UTF-8
| 2,766 | 3.328125 | 3 |
[] |
no_license
|
use toml;
use rustc_serialize::json::{self, Json};
use std::io::{BufReader, Read};
use std::collections::BTreeMap;
pub struct JsonConverter;
impl JsonConverter {
pub fn new() -> JsonConverter {
JsonConverter
}
fn convert_json(&self, json: &json::Json) -> toml::Value {
match json {
// XXX: Should we attempt to be smart and conver some to Float and other
// to Integer?
&Json::F64(ref v) => toml::Value::Float(v.clone()),
&Json::I64(ref v) => toml::Value::Integer(v.clone()),
&Json::U64(ref v) => toml::Value::Integer(v.clone() as i64),
&Json::String(ref v) => toml::Value::String(v.clone()),
&Json::Boolean(ref v) => toml::Value::Boolean(v.clone()),
// XXX: What else could this be aside from an empty string?
&Json::Null => toml::Value::String("".to_string()),
&Json::Array(ref list) => {
// Array is Vec<toml::Value>.
let mut toml_list = Vec::<toml::Value>::new();
// let mut toml_list = toml::Array::new();
for json_value in list.iter() {
toml_list.push(self.convert_json(json_value));
}
toml::Value::Array(toml_list)
}
&Json::Object(ref obj) => {
// let mut toml_map = toml::Table::new();
let mut toml_map = BTreeMap::<String, toml::Value>::new();
for (key, json_value) in obj.iter() {
toml_map.insert(key.clone(), self.convert_json(json_value));
}
toml::Value::Table(toml_map)
}
}
}
pub fn convert<R>(&self, reader: &mut R) -> Result<toml::Value, json::BuilderError>
where R: Read
{
// First we must convert the reader into a JSON type.
let buf_reader = BufReader::new(reader);
let char_iter = buf_reader.chars().map(|char_res| {
// XXX: Is there some better way to handle this then failing the entire
// task?
char_res.unwrap()
});
let mut builder = json::Builder::new(char_iter);
let json = try!(builder.build());
Ok(self.convert_json(&json))
}
}
#[cfg(test)]
mod tests {
use super::JsonConverter;
use std::fs::File;
use std::path::Path;
#[test]
fn convert_i64() {
println!("{}", 120i64);
}
#[test]
fn convert() {
let converter = JsonConverter::new();
let path = Path::new("examples/short.json");
let mut file = File::open(&path).unwrap();
let toml = converter.convert(&mut file).expect("toml convert");
println!("{}", toml.to_string());
}
}
| true |
3e7c363e429ec961ebae27e3887041479226e4e9
|
Rust
|
base0x10/Marzipan
|
/marzipan-core/src/emulators/generic_emulator/emulation_operations.rs
|
UTF-8
| 28,535 | 2.78125 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
use redcode::{CompleteInstruction, Modifier, Opcode};
use super::{
offset, operands::RegisterValues, processes::ProcessQueueSet, pspace,
};
use crate::{
emulator_core::{EmulatorError, EmulatorResult},
CoreAddr,
};
/// The results of operand evaluation and the core state required to emulation
/// an instruction.
pub struct OpInputs<'a> {
/// Currently executing warrior
pub warrior_id: u64,
/// Decoded and evaluated cached operands and current instruction
pub regs: &'a RegisterValues,
/// Currently configured core size
pub core_size: CoreAddr,
/// The process queue. Emulation functions don't pop the PC, but do
/// enqueue processes.
pub pq: &'a mut ProcessQueueSet,
/// Reference to in-core instructions.
pub core: &'a mut [CompleteInstruction],
/// PSPACE state shared by processes in the core.
pub pspace: &'a mut pspace::PSpace,
}
impl<'a> OpInputs<'a> {
/// Gets a mutable reference to an in-core address
///
/// This helper improves error handling and allows enabling clippy's
/// `indexing_slicing` lint. However I'd like to rip it out.
/// Specifically, it consumes ownership of [`OpInputs`] meaning that
/// I need to jump through hoops to keep the borrow checker happy.
///
/// # Errors
///
/// Returns an error if `addr` is invalid
fn core_get_mut(
self: OpInputs<'a>,
addr: CoreAddr,
) -> EmulatorResult<&'a mut CompleteInstruction> {
self.core
.get_mut(addr as usize)
.ok_or(EmulatorError::InternalError(
"attempt to write to invalid core index",
))
}
}
/// Implementation of the [`Opcode::Dat`] instruction
#[allow(
clippy::unnecessary_wraps,
reason = "Keep API to opcode functions identical"
)]
#[allow(
clippy::missing_const_for_fn,
reason = "Keep API to opcode functions identical"
)]
pub fn dat_op(_inputs: OpInputs) -> EmulatorResult<()> {
// Do nothing past operand evaluation
// Queue no further values to the process queue
Ok(())
}
/// Implementation of the [`Opcode::Mov`] instruction
pub fn mov_op(inputs: OpInputs) -> EmulatorResult<()> {
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size)?;
inputs.pq.push_back(next_pc, inputs.warrior_id)?;
match inputs.regs.current.instr.modifier {
Modifier::A => {
// A MOV.A instruction would replace the A-number of the
// instruction pointed to by the B-pointer with the A-number of the
// A-instruction.
let a_value = inputs.regs.a.a_field;
let b_pointer = inputs.regs.b.idx;
inputs.core_get_mut(b_pointer)?.a_field = a_value;
}
Modifier::B => {
// A MOV.B instruction would replace the B-number of the
// instruction pointed to by the B-pointer with the B-number of the
// A-instruction.
let a_value = inputs.regs.a.b_field;
let b_pointer = inputs.regs.b.idx;
inputs.core_get_mut(b_pointer)?.b_field = a_value;
}
Modifier::AB => {
// A MOV.AB instruction would replace the B-number of the
// instruction pointed to by the B-pointer with the A-number of the
// A-instruction.
let a_value = inputs.regs.a.a_field;
let b_pointer = inputs.regs.b.idx;
inputs.core_get_mut(b_pointer)?.b_field = a_value;
}
Modifier::BA => {
// A MOV.BA instruction would replace the A-number of the
// instruction pointed to by the B-pointer with the B-number of the
// A-instruction.
let a_value = inputs.regs.a.b_field;
let b_pointer = inputs.regs.b.idx;
inputs.core_get_mut(b_pointer)?.a_field = a_value;
}
Modifier::F => {
// A MOV.F instruction would replace the A-number of the
// instruction pointed to by the B-pointer with the A-number of the
// A-instruction and would also replace the B-number of the
// instruction pointed to by the B-pointer with the B-number of the
// A-instruction.
let a_value_a = inputs.regs.a.a_field;
let b_value_b = inputs.regs.b.b_field;
let b_pointer = inputs.regs.b.idx;
let target = inputs.core_get_mut(b_pointer)?;
target.a_field = a_value_a;
target.b_field = b_value_b;
}
Modifier::X => {
// A MOV.F instruction would replace the A-number of the
// instruction pointed to by the B-pointer with the A-number of the
// A-instruction and would also replace the B-number of the
// instruction pointed to by the B-pointer with the B-number of the
// A-instruction.
let a_value_b = inputs.regs.a.b_field;
let b_value_a = inputs.regs.b.a_field;
let b_pointer = inputs.regs.b.idx;
let target = inputs.core_get_mut(b_pointer)?;
target.a_field = a_value_b;
target.b_field = b_value_a;
}
Modifier::I => {
// A MOV.I instruction would replace the instruction pointed to by
// the B-pointer with the A-instruction.
let a_value_a = inputs.regs.a.a_field;
let a_value_b = inputs.regs.a.b_field;
let a_value_instr = inputs.regs.a.instr;
let b_pointer = inputs.regs.b.idx;
let target = inputs.core_get_mut(b_pointer)?;
target.instr = a_value_instr;
target.a_field = a_value_a;
target.b_field = a_value_b;
}
};
Ok(())
}
/// Helper function that determines which arithmetic operation is required, and
/// performs it for two [`CoreAddr`] values.
///
/// For instances where the right hand argument is treated as a divisor, if it
/// is zero, `None` is returned.
fn perform_arithmetic(
lhs: CoreAddr,
rhs: CoreAddr,
inputs: &OpInputs,
) -> Option<EmulatorResult<CoreAddr>> {
// Performs an math modulo coresize, returning None only if division by zero
match inputs.regs.current.instr.opcode {
Opcode::Add => Some(offset(lhs, i64::from(rhs), inputs.core_size)),
Opcode::Sub => {
// offset() deals with negatives correctly
Some(offset(
lhs,
0_i64.checked_sub(i64::from(rhs))?,
inputs.core_size,
))
}
Opcode::Mul => {
let product = u64::from(lhs).checked_mul(u64::from(rhs));
let normalized = product
.and_then(|p| p.checked_rem(u64::from(inputs.core_size)))
.and_then(|e| u32::try_from(e).ok());
Some(normalized.ok_or(EmulatorError::InternalError(
"Impossible overflow when multiplying field values",
)))
}
Opcode::Div => (rhs != 0).then(|| {
lhs.checked_div(rhs).ok_or(EmulatorError::InternalError(
"Impossible division by zero",
))
}),
Opcode::Mod => (rhs != 0).then(|| {
lhs.checked_rem(rhs).ok_or(EmulatorError::InternalError(
"Impossible division by zero",
))
}),
_ => Some(Err(EmulatorError::InternalError(
"fn arithmetic_op should only be called with Add, Sub, Mul, Div, \
or Mod",
))),
}
}
/// Implementation of the [`Opcode::Add`], [`Opcode::Sub`], [`Opcode::Mul`],
/// [`Opcode::Div`], and [`Opcode::Mod`] instruction
pub fn arithmetic_op(inputs: OpInputs) -> EmulatorResult<()> {
let a = inputs.regs.a;
let b = inputs.regs.b;
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size)?;
let war_id = inputs.warrior_id;
if inputs.core_size == 0 {
return Err(EmulatorError::InternalError("Core Size cannot be zero"));
}
match inputs.regs.current.instr.modifier {
Modifier::A => {
// Proceeds with A value set to the A number of the A instruction
// and the B value set to the A number of the B instruction.
// Writes to the A number of the B target
// let b_pointer = b.idx as usize;
let a_value = a.a_field;
let b_value = b.a_field;
if let Some(res) = perform_arithmetic(b_value, a_value, &inputs) {
inputs.pq.push_back(next_pc, war_id)?;
inputs.core_get_mut(b.idx)?.a_field = res?;
};
}
Modifier::B => {
// Proceeds with A value set to the B number of the A instruction
// and the B value set to the B number of the B instruction.
// Writes to the B number of the B target
let a_value = a.b_field;
let b_value = b.b_field;
if let Some(res) = perform_arithmetic(b_value, a_value, &inputs) {
inputs.pq.push_back(next_pc, war_id)?;
inputs.core_get_mut(b.idx)?.b_field = res?;
}
}
Modifier::AB => {
// Proceeds with A value set to the A number of the A instruction
// and the B value set to the B number of the B instruction.
// Writes to the B number of the B target
// let b_pointer = b.idx as usize;
let a_value = a.a_field;
let b_value = b.b_field;
if let Some(res) = perform_arithmetic(b_value, a_value, &inputs) {
inputs.pq.push_back(next_pc, war_id)?;
inputs.core_get_mut(b.idx)?.b_field = res?;
}
}
Modifier::BA => {
// Proceeds with A value set to the B number of the A instruction
// and the B value set to the A number of the B instruction.
// Writes to the A number of the B target
let a_value = a.b_field;
let b_value = b.a_field;
if let Some(res) = perform_arithmetic(b_value, a_value, &inputs) {
inputs.pq.push_back(next_pc, war_id)?;
inputs.core_get_mut(b.idx)?.a_field = res?;
}
}
Modifier::F | Modifier::I => {
// Add/Sub.I functions as Add/Sub.F would
// F Proceeds with A value set to the A number followed by the B
// number of the A instruction, and the B value set to the A number
// followed by the B number of the B instruction.
// Writes to first the A number followed by the B number of the
// B target
let first_result =
perform_arithmetic(b.a_field, a.a_field, &inputs);
let second_result =
perform_arithmetic(b.b_field, a.b_field, &inputs);
match (first_result, second_result) {
(Some(first), Some(second)) => {
// if there was no division by zero, continue as normal
inputs.pq.push_back(next_pc, war_id)?;
let target = inputs.core_get_mut(b.idx)?;
target.a_field = first?;
target.b_field = second?;
}
(Some(first), None) => {
// If second result had a division by zero, write out first
// result but don't write second, and
// don't queue PC + 1
inputs.core_get_mut(b.idx)?.a_field = first?;
}
(None, Some(second)) => {
// If first result had a division by zero, write out second
// result but don't write first, and
// don't queue PC + 1
inputs.core_get_mut(b.idx)?.b_field = second?;
}
(None, None) => {
// If both results had division by zero don't write anything
// to core don't queue PC + 1
}
};
}
Modifier::X => {
// Proceeds with A value set to the A number followed by the B
// number of the A instruction, and the B value set to the B number
// followed by the A number of the B instruction.
// Writes to first the B number followed by the A number of the
// B target
// let b_pointer = b.idx as usize;
let first_result =
perform_arithmetic(b.b_field, a.a_field, &inputs);
let second_result =
perform_arithmetic(b.a_field, a.b_field, &inputs);
match (first_result, second_result) {
(Some(first), Some(second)) => {
// if there was no division by zero, continue as normal
inputs.pq.push_back(next_pc, war_id)?;
let target = inputs.core_get_mut(b.idx)?;
target.b_field = first?;
target.a_field = second?;
}
(Some(first), None) => {
// If second result had a division by zero, write out first
// result but don't write second, and
// don't queue PC + 1
inputs.core_get_mut(b.idx)?.b_field = first?;
}
(None, Some(second)) => {
// If first result had a division by zero, write out second
// result but don't write first, and
// don't queue PC + 1
inputs.core_get_mut(b.idx)?.a_field = second?;
}
(None, None) => {
// If both results had division by zero don't write anything
// to core don't queue PC + 1
}
}
}
}
Ok(())
}
/// Implementation of the [`Opcode::Jmp`] instruction
pub fn jmp_op(inputs: OpInputs) -> EmulatorResult<()> {
// jmp unconditionally adds the b pointer to the process queue
inputs.pq.push_back(inputs.regs.a.idx, inputs.warrior_id)?;
Ok(())
}
/// Implementation of the [`Opcode::Jmz`] instruction
pub fn jmz_op(inputs: OpInputs) -> EmulatorResult<()> {
// JMZ tests the B-value to determine if it is zero. If the B-value is
// zero, the sum of the program counter and the A-pointer is queued.
// Otherwise, the next instruction is queued (PC + 1). JMZ.I functions
// as JMZ.F would, i.e. it jumps if both the A-number and the B-number of
// the B-instruction are zero.
let a = inputs.regs.a;
let b = inputs.regs.b;
let is_zero = match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::BA => {
// B value is the A-number of the B instruction
b.a_field == 0
}
Modifier::B | Modifier::AB => {
// B value is the B-number of the B instruction
b.b_field == 0
}
Modifier::F | Modifier::X | Modifier::I => {
// B value is the A and B numbers of the B instruction
b.a_field == 0 && b.b_field == 0
}
};
if is_zero {
inputs.pq.push_back(a.idx, inputs.warrior_id)?;
} else {
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size)?;
inputs.pq.push_back(next_pc, inputs.warrior_id)?;
}
Ok(())
}
/// Implementation of the [`Opcode::Jmn`] instruction
pub fn jmn_op(inputs: OpInputs) -> EmulatorResult<()> {
// JMN tests the B-value to determine if it is zero. If the B-value is not
// zero, the sum of the program counter and the A-pointer is queued.
// Otherwise, the next instruction is queued (PC + 1). JMN.I functions as
// JMN.F would, i.e. it jumps if both the A-number and the B-number of the
// B-instruction are non-zero. This is not the negation of the condition
// for JMZ.F.
let a = inputs.regs.a;
let b = inputs.regs.b;
let is_non_zero = match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::BA => {
// B value is the A-number of the B instruction
b.a_field != 0
}
Modifier::B | Modifier::AB => {
// B value is the B-number of the B instruction
b.b_field != 0
}
Modifier::F | Modifier::X | Modifier::I => {
// B value is the A and B numbers of the B instruction
b.a_field != 0 || b.b_field != 0
}
};
if is_non_zero {
inputs.pq.push_back(a.idx, inputs.warrior_id)?;
} else {
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size);
inputs.pq.push_back(next_pc?, inputs.warrior_id)?;
}
Ok(())
}
/// Implementation of the [`Opcode::Djn`] instruction
pub fn djn_op(inputs: OpInputs) -> EmulatorResult<()> {
// DJN decrements the B-value and the B-target, then tests the B-value to
// determine if it is zero. If the decremented B-value is not zero, the
// sum of the program counter and the A-pointer is queued. Otherwise, the
// next instruction is queued (PC + 1). DJN.I functions as DJN.F would,
// i.e. it decrements both both A/B-numbers of the B-value and the
// B-target, and jumps if both A/B-numbers of the B-value are non-zero.
let a = inputs.regs.a;
let b = inputs.regs.b;
let size = inputs.core_size;
let decrement = |x| offset(x, -1, size);
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size)?;
let war_id = inputs.warrior_id;
let modifier = inputs.regs.current.instr.modifier;
let Some(b_target) = inputs.core.get_mut(b.idx as usize) else {
return Err(EmulatorError::InternalError(
"attempt to write to invalid core index",
))
};
match modifier {
Modifier::A | Modifier::BA => {
// B value is the A-number of the B instruction
// decrement b target
let b_target_a = b_target.a_field;
let b_target_a = decrement(b_target_a)?;
b_target.a_field = b_target_a;
let non_zero = decrement(b.a_field)? != 0;
if non_zero {
inputs.pq.push_back(a.idx, war_id)?;
} else {
inputs.pq.push_back(next_pc, war_id)?;
}
}
Modifier::B | Modifier::AB => {
// B value is the B-number of the B instruction
// decrement b target
let b_target_b = b_target.b_field;
let b_target_b = decrement(b_target_b)?;
b_target.b_field = b_target_b;
let non_zero = decrement(b.b_field)? != 0;
if non_zero {
inputs.pq.push_back(a.idx, war_id)?;
} else {
inputs.pq.push_back(next_pc, war_id)?;
}
}
Modifier::F | Modifier::X | Modifier::I => {
// B value is the A and B numbers of the B instruction
// decrement b target
let b_target_a = b_target.a_field;
let b_target_a = decrement(b_target_a)?;
let b_target_b = b_target.b_field;
let b_target_b = decrement(b_target_b)?;
b_target.a_field = b_target_a;
b_target.b_field = b_target_b;
let non_zero =
decrement(b.a_field)? != 0 || decrement(b.b_field)? != 0;
if non_zero {
inputs.pq.push_back(a.idx, war_id)?;
} else {
inputs.pq.push_back(next_pc, war_id)?;
}
}
};
Ok(())
}
/// Implementation of the [`Opcode::Spl`] instruction
pub fn spl_op(inputs: OpInputs) -> EmulatorResult<()> {
// SPL queues the next instruction (PC + 1) and then queues the sum of the
// program counter and A-pointer. If the queue is full, only the next
// instruction is queued.
let next_pc = offset(inputs.regs.current.idx, 1, inputs.core_size);
inputs.pq.push_back(next_pc?, inputs.warrior_id)?;
inputs.pq.push_back(inputs.regs.a.idx, inputs.warrior_id)?;
Ok(())
}
/// Implementation of the [`Opcode::Slt`] instruction
pub fn slt_op(inputs: OpInputs) -> EmulatorResult<()> {
// SLT compares the A-value to the B-value. If the A-value is less than
// the B-value, the instruction after the next instruction (PC + 2) is
// queued (skipping the next instruction). Otherwise, the next
// instruction is queued (PC + 1). SLT.I functions as SLT.F would.
let a = inputs.regs.a;
let b = inputs.regs.b;
let is_less_than = match inputs.regs.current.instr.modifier {
Modifier::A => a.a_field < b.a_field,
Modifier::B => a.b_field < b.b_field,
Modifier::AB => a.a_field < b.b_field,
Modifier::BA => a.b_field < b.a_field,
Modifier::F | Modifier::I => {
a.a_field < b.a_field && a.b_field < b.b_field
}
Modifier::X => a.a_field < b.b_field && a.b_field < b.a_field,
};
// Increment PC twice if the condition holds, otherwise increment once
let amt = if is_less_than { 2 } else { 1 };
inputs.pq.push_back(
offset(inputs.regs.current.idx, amt, inputs.core_size)?,
inputs.warrior_id,
)?;
Ok(())
}
/// Implementation of the [`Opcode::Cmp`] and [`Opcode::Seq`] instructions
pub fn cmp_op(inputs: OpInputs) -> EmulatorResult<()> {
// CMP compares the A-value to the B-value. If the result of the
// comparison is equal, the instruction after the next instruction
// (PC + 2) is queued (skipping the next instruction). Otherwise, the
// the next instruction is queued (PC + 1).
let a = inputs.regs.a;
let b = inputs.regs.b;
let is_equal = match inputs.regs.current.instr.modifier {
Modifier::A => a.a_field == b.a_field,
Modifier::B => a.b_field == b.b_field,
Modifier::AB => a.a_field == b.b_field,
Modifier::BA => a.b_field == b.a_field,
Modifier::F => a.a_field == b.a_field && a.b_field == b.b_field,
Modifier::X => a.a_field == b.b_field && a.b_field == b.a_field,
Modifier::I => {
a.instr == b.instr
&& a.a_field == b.a_field
&& a.b_field == b.b_field
}
};
// Increment PC twice if the condition holds, otherwise increment once
let amt = if is_equal { 2 } else { 1 };
inputs.pq.push_back(
offset(inputs.regs.current.idx, amt, inputs.core_size)?,
inputs.warrior_id,
)?;
Ok(())
}
/// Implementation of the [`Opcode::Sne`] instruction
pub fn sne_op(inputs: OpInputs) -> EmulatorResult<()> {
// SNE compares the A-value to the B-value. If the result of the
// comparison is not equal, the instruction after the next instruction
// (PC + 2) is queued (skipping the next instruction). Otherwise, the
// next instruction is queued (PC + 1).
let a = inputs.regs.a;
let b = inputs.regs.b;
let is_not_equal = match inputs.regs.current.instr.modifier {
Modifier::A => a.a_field != b.a_field,
Modifier::B => a.b_field != b.b_field,
Modifier::AB => a.a_field != b.b_field,
Modifier::BA => a.b_field != b.a_field,
Modifier::F => a.a_field != b.a_field || a.b_field != b.b_field,
Modifier::X => a.a_field != b.b_field || a.b_field != b.a_field,
Modifier::I => {
a.instr != b.instr
|| a.a_field != b.a_field
|| a.b_field != b.b_field
}
};
// Increment PC twice if the condition holds, otherwise increment once
let amt = if is_not_equal { 2 } else { 1 };
inputs.pq.push_back(
offset(inputs.regs.current.idx, amt, inputs.core_size)?,
inputs.warrior_id,
)?;
Ok(())
}
/// Implementation of the [`Opcode::Nop`] instruction
pub fn nop_op(inputs: OpInputs) -> EmulatorResult<()> {
// Increments and queues the PC but otherwise has no effect past operand
// evaluation
inputs.pq.push_back(
offset(inputs.regs.current.idx, 1, inputs.core_size)?,
inputs.warrior_id,
)?;
Ok(())
}
/// Implementation of the [`Opcode::Ldp`] instruction
pub fn ldp_op(inputs: OpInputs) -> EmulatorResult<()> {
// Reads a value from the PSPACE, writing it into core memory
//
// LDP and STP are not defined in any ICWS standard. This implementation
// is based on pMARS's behavior.
//
// The source index uses one of the fields from the A instruction, taken
// modulo pspace size. This is not the field that MOV would use as a
// source index, but rather the field that MOV would use as the source
// value. Each location in PSPACE stores a single field, so the multi-
// field modifiers of (X, F, I) are not meaningful. They are defined to
// operate identically to B.
//
// Similar to source index, the destination is the same destination that
// be written to by a MOV instruction using the same modifier. Again,
// X, F, and I are not meaningful, and behave like B.
//
// Further PSPACE notes:
//
// It is expected that the PSPACE is not cleared between rounds in a
// battle, so a warrior may use information from one round to pick a
// strategy in the next round.
//
// Multiple warriors can the same PSPACE. Hypothetically, this could
// be used in multi-warrior hills where multiple warriors with the same
// author could have an advantage with communication.
//
// The value at index 0 is not shared between warriors with the same pin,
// and it does not retain it's value between rounds. Instead it's initial
// value indicates the outcome of the previous round in this battle.
//
// The pspace address space is typically smaller than the core size, and
// almost always a factor of the core size. By convention, it's 1/16 the
// size of the full core. It's not required for the size to be a factor,
// however if this isn't the case, simple assumptions break.
//
// For example the pspace locations x and x+1 will usually be adjacent
// (modulo pspace size) except when pspace is not a factor of coresize
// and x+1 = coresize = 0.
// In general: (x % coresize) % pspace size != (x % pspace size) % coresize
//
// Queue PC + 1
inputs.pq.push_back(
offset(inputs.regs.current.idx, 1, inputs.core_size)?,
inputs.warrior_id,
)?;
let a = inputs.regs.a;
let b = inputs.regs.b;
let source_index = match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::AB => a.a_field,
Modifier::B
| Modifier::BA
| Modifier::F
| Modifier::X
| Modifier::I => a.b_field,
};
let value = inputs.pspace.read(source_index, inputs.warrior_id)?;
match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::BA => {
let target = inputs.core_get_mut(b.idx)?;
target.a_field = value;
}
Modifier::B
| Modifier::AB
| Modifier::F
| Modifier::X
| Modifier::I => {
let target = inputs.core_get_mut(b.idx)?;
target.b_field = value;
}
};
Ok(())
}
/// Implementation of the [`Opcode::Stp`] instruction
pub fn stp_op(inputs: OpInputs) -> EmulatorResult<()> {
// Reads a value from the PSPACE, writing it into core memory
//
// LDP and STP are not defined in any ICWS standard. This implementation
// is based on pMARS's behavior.
let a = inputs.regs.a;
let b = inputs.regs.b;
let source_value = match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::AB => {
// A field of a operand
a.a_field
}
Modifier::B
| Modifier::BA
| Modifier::F
| Modifier::X
| Modifier::I => {
// B field of a operand
a.b_field
}
};
let pspace_dest_index = match inputs.regs.current.instr.modifier {
Modifier::A | Modifier::BA => {
// a field of b operand
b.a_field
}
Modifier::B
| Modifier::AB
| Modifier::F
| Modifier::X
| Modifier::I => {
// b field of b operand
b.b_field
}
};
inputs
.pspace
.write(pspace_dest_index, source_value, inputs.warrior_id)?;
// Queue PC + 1
inputs.pq.push_back(
offset(inputs.regs.current.idx, 1, inputs.core_size)?,
inputs.warrior_id,
)?;
Ok(())
}
| true |
db256a6f68c9cf98d0fada5f777c575053cc73ef
|
Rust
|
widforss/compiler
|
/src/ast/borrowchecker/util.rs
|
UTF-8
| 3,476 | 2.859375 | 3 |
[] |
no_license
|
use super::{Ast, BorrowError, ErrorKind, Expr, Lifetimes, Literal, State, UnOp, Value};
use std::collections::HashMap;
use std::collections::HashSet;
const LOCAL_LIFE: &'static str = "'!local";
pub fn borrow_expr<'a>(
expr: &'a Expr<'a>,
var_id: &mut u64,
ast: &'a Ast<'a>,
borrowstate: &mut State<(Vec<&'a str>, u64)>,
) -> Result<(Vec<&'a str>, u64), BorrowError<'a>> {
match &expr.value {
Value::Literal(Literal::Ref(_)) => panic!(),
Value::UnOp(UnOp::Ref(_), arg) => {
let (mut inner_life, id) = borrow_expr(arg, var_id, ast, borrowstate)?;
inner_life.push(LOCAL_LIFE);
Ok((inner_life, id))
}
Value::UnOp(UnOp::Deref, arg) => {
let (mut inner_life, id) = borrow_expr(arg, var_id, ast, borrowstate)?;
inner_life.pop();
Ok((inner_life, id))
}
Value::Literal(_) | Value::UnOp(_, _) | Value::BinOp(_, _, _) => {
*var_id += 1;
Ok((vec![], *var_id - 1))
}
Value::Call(ident, args) => borrow_call(&ident, &args, var_id, ast, borrowstate),
Value::Ident(ident) => {
let lifetimes = borrowstate.get(*ident).unwrap();
Ok(lifetimes.clone())
}
}
}
fn borrow_call<'a>(
ident: &'a str,
args: &'a Vec<Expr<'a>>,
var_id: &mut u64,
ast: &'a Ast<'a>,
borrowstate: &mut State<(Vec<&'a str>, u64)>,
) -> Result<(Vec<&'a str>, u64), BorrowError<'a>> {
let Ast(functions) = ast;
let func = functions.get(ident).unwrap();
let mut varset = HashSet::new();
let mut lifemap: HashMap<&'a str, &'a str> = HashMap::new();
let mut param_iter = func.params.iter();
for arg in args.iter() {
let (arg_lifes, id) = borrow_expr(arg, var_id, ast, borrowstate)?;
let param = param_iter.next().unwrap();
if varset.insert(id) == false {
return Err(BorrowError::new(
Some(arg.span),
ErrorKind::SameReference
))
}
let Lifetimes(param_lifes) = ¶m.lifetimes;
let mut param_life_iter = param_lifes.iter();
for arg_life in arg_lifes.iter() {
let param_life = *param_life_iter.next().unwrap();
if let None = func.lifetimes.get(param_life) {
return Err(BorrowError::new(
Some(param.span),
ErrorKind::UndeclaredLifetime,
));
}
if let Some(expect) = lifemap.insert(param_life, arg_life.clone()) {
if &expect != arg_life {
return Err(BorrowError::new(Some(arg.span), ErrorKind::LifetimeError));
}
}
}
}
let Lifetimes(func_lifes) = &func.life;
for life in func_lifes.iter() {
if let None = func.lifetimes.get(life) {
return Err(BorrowError::new(
Some(func.return_span),
ErrorKind::UndeclaredLifetime,
));
}
}
let mut lifetimes = vec![];
let Lifetimes(func_lifes) = &func.life;
for func_life in func_lifes {
if let Some(translated) = lifemap.get(func_life) {
lifetimes.push(*translated);
} else {
return Err(BorrowError::new(
Some(func.return_span),
ErrorKind::UnmappedLifetime,
))
}
}
*var_id += 1;
Ok((lifetimes, *var_id - 1))
}
| true |
a9d7eb639a874a7d75543ebb47a183e89f2d13d3
|
Rust
|
kulinsky/leetcode-problems
|
/array/single-number/main.rs
|
UTF-8
| 838 | 3.40625 | 3 |
[] |
no_license
|
// https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/549/
// Given a non-empty array of integers nums, every element appears twice except for one. Find that single one.
// You must implement a solution with a linear runtime complexity and use only constant extra space.
//
// Example 1:
// Input: nums = [2,2,1]
// Output: 1
//
// Example 2:
// Input: nums = [4,1,2,1,2]
// Output: 4
//
// Example 3:
// Input: nums = [1]
// Output: 1
//
// Constraints:
// 1 <= nums.length <= 3 * 104
// -3 * 104 <= nums[i] <= 3 * 104
// Each element in the array appears twice except for one element which appears only once
impl Solution {
pub fn single_number(nums: Vec<i32>) -> i32 {
let mut res = nums[0];
for i in 1..nums.len() {
res = res ^ nums[i];
}
res
}
}
| true |
86868905ea03a10e45494cb3e0579db3739f6bc1
|
Rust
|
ksk001100/genetic_algorithm_rs
|
/src/main.rs
|
UTF-8
| 551 | 2.671875 | 3 |
[] |
no_license
|
mod ga;
use ga::*;
const GENERATION: i32 = 100;
fn main() {
let mut pop = Population::new(100, 10, 0.6, 0.2);
pop.evaluate();
println!("Generation : 0");
println!("Max : {}", pop.max().rank);
println!("Min : {}", pop.min().rank);
println!("-----------------------------");
for gen in 1..GENERATION {
pop.evolution();
println!("Generation : {}", gen);
println!("Max : {}", pop.max().rank);
println!("Min : {}", pop.min().rank);
println!("-----------------------------");
}
}
| true |
e79fdd61cee0d191fdf17fbfc11a81297e7c1aa2
|
Rust
|
icedland/iced
|
/src/rust/iced-x86-js/src/op_access.rs
|
UTF-8
| 1,301 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
// SPDX-License-Identifier: MIT
// Copyright (C) 2018-present iced project and contributors
use wasm_bindgen::prelude::*;
// GENERATOR-BEGIN: Enum
// ⚠️This was generated by GENERATOR!🦹♂️
/// Operand, register and memory access
#[wasm_bindgen]
#[derive(Copy, Clone)]
pub enum OpAccess {
/// Nothing is read and nothing is written
None = 0,
/// The value is read
Read = 1,
/// The value is sometimes read and sometimes not
CondRead = 2,
/// The value is completely overwritten
Write = 3,
/// Conditional write, sometimes it's written and sometimes it's not modified
CondWrite = 4,
/// The value is read and written
ReadWrite = 5,
/// The value is read and sometimes written
ReadCondWrite = 6,
/// The memory operand doesn't refer to memory (eg. `LEA` instruction) or it's an instruction that doesn't read the data to a register or doesn't write to the memory location, it just prefetches/invalidates it, eg. `INVLPG`, `PREFETCHNTA`, `VGATHERPF0DPS`, etc. Some of those instructions still check if the code can access the memory location.
NoMemAccess = 7,
}
// GENERATOR-END: Enum
#[allow(dead_code)]
pub(crate) fn iced_to_op_access(value: iced_x86_rust::OpAccess) -> OpAccess {
// SAFETY: the enums are exactly identical
unsafe { std::mem::transmute(value as u8) }
}
| true |
bf7a0a5a8e1723fa48ba170b6ecd68a58de2d07a
|
Rust
|
bokuweb/docx-rs
|
/docx-core/src/reader/paragraph_property.rs
|
UTF-8
| 5,358 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
use std::io::Read;
use std::str::FromStr;
use xml::attribute::OwnedAttribute;
use xml::reader::{EventReader, XmlEvent};
use super::*;
use super::attributes::*;
use crate::types::*;
impl ElementReader for ParagraphProperty {
fn read<R: Read>(
r: &mut EventReader<R>,
attrs: &[OwnedAttribute],
) -> Result<Self, ReaderError> {
let mut p = ParagraphProperty::new();
loop {
let e = r.next();
match e {
Ok(XmlEvent::StartElement {
attributes, name, ..
}) => {
let e = XMLElement::from_str(&name.local_name).unwrap();
match e {
XMLElement::Indent => {
let (start, end, special, start_chars, hanging_chars, first_line_chars) =
read_indent(&attributes)?;
p = p.indent(start, special, end, start_chars);
if let Some(chars) = hanging_chars {
p = p.hanging_chars(chars);
}
if let Some(chars) = first_line_chars {
p = p.first_line_chars(chars);
}
continue;
}
XMLElement::Spacing => {
if let Ok(spacing) =
attributes::line_spacing::read_line_spacing(&attributes)
{
p = p.line_spacing(spacing);
}
continue;
}
XMLElement::Justification => {
if let Ok(v) = AlignmentType::from_str(&attributes[0].value) {
p = p.align(v);
}
continue;
}
XMLElement::ParagraphStyle => {
p = p.style(&attributes[0].value);
continue;
}
XMLElement::RunProperty => {
if let Ok(run_pr) = RunProperty::read(r, attrs) {
p.run_property = run_pr;
}
continue;
}
XMLElement::DivId => {
if let Some(val) = read_val(&attributes) {
p.div_id = Some(val)
}
continue;
}
XMLElement::NumberingProperty => {
if let Ok(num_pr) = NumberingProperty::read(r, attrs) {
p = p.numbering_property(num_pr);
}
continue;
}
XMLElement::OutlineLvl => {
if let Some(val) = read_val(&attributes) {
if let Ok(val) = usize::from_str(&val) {
p = p.outline_lvl(val);
}
}
continue;
}
XMLElement::KeepNext => {
if read_bool(&attributes) {
p.keep_next = Some(true);
}
}
XMLElement::KeepLines => {
if read_bool(&attributes) {
p.keep_lines = Some(true);
}
}
XMLElement::PageBreakBefore => {
if read_bool(&attributes) {
p.page_break_before = Some(true);
}
}
XMLElement::WidowControl => {
if read_bool(&attributes) {
p.widow_control = Some(true);
}
}
XMLElement::ParagraphPropertyChange => {
if let Ok(ppr_change) = ParagraphPropertyChange::read(r, &attributes) {
p.paragraph_property_change = Some(ppr_change);
}
}
XMLElement::SectionProperty => {
if let Ok(sp) = SectionProperty::read(r, &attributes) {
p.section_property = Some(sp);
}
}
_ => {}
}
}
Ok(XmlEvent::EndElement { name, .. }) => {
let e = XMLElement::from_str(&name.local_name).unwrap();
if e == XMLElement::ParagraphProperty {
return Ok(p);
}
}
Err(_) => return Err(ReaderError::XMLReadError),
_ => {}
}
}
}
}
| true |
a08cc7c0a5beb4ba05cb9be6fc61856faa75b437
|
Rust
|
RustPython/RustPython
|
/derive/src/lib.rs
|
UTF-8
| 3,847 | 2.609375 | 3 |
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
#![recursion_limit = "128"]
#![doc(html_logo_url = "https://raw.githubusercontent.com/RustPython/RustPython/main/logo.png")]
#![doc(html_root_url = "https://docs.rs/rustpython-derive/")]
use proc_macro::TokenStream;
use rustpython_derive_impl as derive_impl;
use syn::parse_macro_input;
#[proc_macro_derive(FromArgs, attributes(pyarg))]
pub fn derive_from_args(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input);
derive_impl::derive_from_args(input).into()
}
#[proc_macro_attribute]
pub fn pyclass(attr: TokenStream, item: TokenStream) -> TokenStream {
let attr = parse_macro_input!(attr);
let item = parse_macro_input!(item);
derive_impl::pyclass(attr, item).into()
}
/// Helper macro to define `Exception` types.
/// More-or-less is an alias to `pyclass` macro.
///
/// This macro serves a goal of generating multiple
/// `BaseException` / `Exception`
/// subtypes in a uniform and convenient manner.
/// It looks like `SimpleExtendsException` in `CPython`.
/// <https://github.com/python/cpython/blob/main/Objects/exceptions.c>
#[proc_macro_attribute]
pub fn pyexception(attr: TokenStream, item: TokenStream) -> TokenStream {
let attr = parse_macro_input!(attr);
let item = parse_macro_input!(item);
derive_impl::pyexception(attr, item).into()
}
#[proc_macro_attribute]
pub fn pymodule(attr: TokenStream, item: TokenStream) -> TokenStream {
let attr = parse_macro_input!(attr);
let item = parse_macro_input!(item);
derive_impl::pymodule(attr, item).into()
}
#[proc_macro_derive(PyStructSequence)]
pub fn pystruct_sequence(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input);
derive_impl::pystruct_sequence(input).into()
}
#[proc_macro_derive(TryIntoPyStructSequence)]
pub fn pystruct_sequence_try_from_object(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input);
derive_impl::pystruct_sequence_try_from_object(input).into()
}
struct Compiler;
impl derive_impl::Compiler for Compiler {
fn compile(
&self,
source: &str,
mode: rustpython_compiler::Mode,
module_name: String,
) -> Result<rustpython_compiler::CodeObject, Box<dyn std::error::Error>> {
use rustpython_compiler::{compile, CompileOpts};
Ok(compile(source, mode, module_name, CompileOpts::default())?)
}
}
#[proc_macro]
pub fn py_compile(input: TokenStream) -> TokenStream {
derive_impl::py_compile(input.into(), &Compiler).into()
}
#[proc_macro]
pub fn py_freeze(input: TokenStream) -> TokenStream {
derive_impl::py_freeze(input.into(), &Compiler).into()
}
#[proc_macro_derive(PyPayload)]
pub fn pypayload(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input);
derive_impl::pypayload(input).into()
}
/// use on struct with named fields like `struct A{x:PyRef<B>, y:PyRef<C>}` to impl `Traverse` for datatype.
///
/// use `#[pytraverse(skip)]` on fields you wish not to trace
///
/// add `trace` attr to `#[pyclass]` to make it impl `MaybeTraverse` that will call `Traverse`'s `traverse` method so make it
/// traceable(Even from type-erased PyObject)(i.e. write `#[pyclass(trace)]`).
/// # Example
/// ```rust, ignore
/// #[pyclass(module = false, traverse)]
/// #[derive(Default, Traverse)]
/// pub struct PyList {
/// elements: PyRwLock<Vec<PyObjectRef>>,
/// #[pytraverse(skip)]
/// len: AtomicCell<usize>,
/// }
/// ```
/// This create both `MaybeTraverse` that call `Traverse`'s `traverse` method and `Traverse` that impl `Traverse`
/// for `PyList` which call elements' `traverse` method and ignore `len` field.
#[proc_macro_derive(Traverse, attributes(pytraverse))]
pub fn pytraverse(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let item = parse_macro_input!(item);
derive_impl::pytraverse(item).into()
}
| true |
ffa3fed2c848861569a17e6593a887410600d132
|
Rust
|
harryaskham/scrabrudo
|
/src/precompute.rs
|
UTF-8
| 8,776 | 2.890625 | 3 |
[] |
no_license
|
/// Utility for precomputing the Monte Carlo probabilities for each word in each situation.
// TODO: Can we get away without redefining the world?
#[macro_use]
extern crate log;
extern crate pretty_env_logger;
extern crate speculate;
#[macro_use]
extern crate maplit;
#[macro_use(c)]
extern crate cute;
#[macro_use]
extern crate itertools;
extern crate bincode;
#[macro_use]
extern crate lazy_static;
extern crate clap;
extern crate rayon;
extern crate sstable;
// TODO: Can we get away without redefining the world?
pub mod bet;
pub mod dict;
pub mod die;
pub mod game;
pub mod hand;
pub mod player;
pub mod testing;
pub mod tile;
use crate::bet::*;
use crate::dict::*;
use clap::App;
use rayon::prelude::*;
use speculate::speculate;
use sstable::{Options, TableBuilder};
use std::collections::HashMap;
use std::collections::HashSet;
use std::env;
use std::fs::File;
use std::fs::OpenOptions;
use std::sync::Arc;
use std::sync::Mutex;
// TODO: I stole this code - find a library or something.
pub fn powerset<T: Clone>(slice: &[T]) -> Vec<Vec<T>> {
let mut v: Vec<Vec<T>> = Vec::new();
for mask in 0..(1 << slice.len()) {
let mut ss: Vec<T> = vec![];
let mut bitset = mask;
while bitset > 0 {
// isolate the rightmost bit to select one item
let rightmost: u64 = bitset & !(bitset - 1);
// turn the isolated bit into an array index
let idx = rightmost.trailing_zeros();
let item = (*slice.get(idx as usize).unwrap()).clone();
ss.push(item);
// zero the trailing bit
bitset &= bitset - 1;
}
v.push(ss);
}
v
}
/// Sorts a word by its chars.
fn sort_word(word: &String) -> String {
let mut chars = word.chars().collect::<Vec<char>>();
chars.sort_by(|a, b| a.cmp(b));
chars.iter().collect()
}
/// Generate the word and all its substrings.
/// e.g. HATE, ATE, HTE, HA, HT, HE, AT, AE, TE, H, A, T, E
/// Each word will be sorted to avoid further duplicates:
/// e.g. AEHT, AET, EHT, AH, HT, EH, AT, AE, ET, H, A, T, E
///
/// This is equivalent to the powerset of the characters of the word minus the empty word, sorted,
/// and filtered down to only those things that fit on the table.
fn all_sorted_substrings(word: &String, max_length: usize) -> HashSet<String> {
let chars = &(word.chars().collect::<Vec<char>>())[..];
powerset(chars)
.par_iter()
.map(|cs| cs.into_iter().collect::<String>())
.filter(|w| w.len() > 0 && w.len() <= max_length)
.map(|w| sort_word(&w))
.collect()
}
/// Creates the lookup in a single iteration.
/// First we explode out via flat_map to all possible substrings, and then we map these to their
/// Monte Carlo probabilities.
fn create_lookup(
lookup_path: &str,
words: &HashSet<String>,
max_num_items: usize,
num_trials: u32,
) {
// Expand out the dict to subwords.
let word_counter = Arc::new(Mutex::new(0));
let expanded_words = words
.par_iter()
.flat_map(|w| {
*word_counter.lock().unwrap() += 1;
info! {"{} / {} words expanded", word_counter.lock().unwrap(), words.len()};
all_sorted_substrings(w, max_num_items)
})
.collect::<HashSet<String>>();
info!("Created {} word expansions", expanded_words.len());
// Compute all the probabilities and persist to disk.
let prob_counter = Arc::new(Mutex::new(0));
let mut probs = expanded_words
.par_iter()
.map(|s| {
*prob_counter.lock().unwrap() += 1;
info! {"{} / {} probs calculated", prob_counter.lock().unwrap(), expanded_words.len()};
// Compute probs and encode
let probs = bincode::serialize(&probabilities(&s, max_num_items, num_trials)).unwrap();
(s, probs)
})
.collect::<Vec<(&String, Vec<u8>)>>();
// Write the probs out to an SSTable.
// First the keys need to be sorted.
probs.sort_by(|a, b| a.0.cmp(&b.0));
let lookup_file = OpenOptions::new()
.write(true)
.create(true)
.open(lookup_path)
.unwrap();
let mut builder = TableBuilder::new(Options::default(), lookup_file);
for prob_row in probs {
builder.add(prob_row.0.as_bytes(), &prob_row.1).unwrap();
}
builder.finish().unwrap();
}
/// Computes the various probabilities of finding the given substring in each possible number of
/// items.
/// This returns a vec where index equates to the number of items we're searching in.
/// TODO: Do a separate MCMC to generate Palafico probabilities.
fn probabilities(s: &String, max_num_items: usize, num_trials: u32) -> Vec<f64> {
(0..=max_num_items)
.into_iter()
.map(|n| monte_carlo(n as u32, s, num_trials))
.collect()
}
fn main() {
pretty_env_logger::init();
let matches = App::new("Scrabrudo Precomputation")
.version("0.1")
.about("Precomputes lookups for Scrabrudo")
.author("Harry Askham")
.args_from_usage(
"-n, --num_tiles=[NUM_TILES] 'the max number of tiles to compute'
-t, --num_trials=[NUM_TRIALS] 'the number of trials to run'
-d, --dictionary_path=[DICTIONARY] 'the path to the .txt dict to use'
-l, --lookup_path=[LOOKUP] 'the path to the lookup DB to write'",
)
.get_matches();
let mode = matches.value_of("mode").unwrap_or("scrabrudo");
let num_players: usize = matches
.value_of("num_players")
.unwrap_or("2")
.parse::<usize>()
.unwrap();
let dict_path = matches.value_of("dictionary_path").unwrap();
dict::init_dict(dict_path);
let num_tiles = matches
.value_of("num_tiles")
.unwrap()
.parse::<usize>()
.unwrap();
let num_trials = matches
.value_of("num_trials")
.unwrap()
.parse::<u32>()
.unwrap();
let lookup_path = matches.value_of("lookup_path").unwrap();
create_lookup(&lookup_path, &dict::dict(), num_tiles, num_trials);
}
speculate! {
before {
testing::set_up();
}
describe "substring generation" {
it "sorts words" {
assert_eq!("abc", sort_word(&"abc".into()));
assert_eq!("act", sort_word(&"cat".into()));
assert_eq!("aeht", sort_word(&"hate".into()));
}
it "generates substrings correctly" {
let expected = hashset! {
"aht".into(),
"et".into(),
"aet".into(),
"aeht".into(),
"e".into(),
"ah".into(),
"t".into(),
"eh".into(),
"ht".into(),
"ae".into(),
"at".into(),
"aeh".into(),
"h".into(),
"eht".into(),
"a".into()
};
let actual = all_sorted_substrings(&"hate".into(), 4);
assert_eq!(expected, actual);
}
it "enforces a max length" {
let expected = hashset! {
"et".into(),
"e".into(),
"ah".into(),
"t".into(),
"eh".into(),
"ht".into(),
"ae".into(),
"at".into(),
"h".into(),
"a".into()
};
let actual = all_sorted_substrings(&"hate".into(), 2);
assert_eq!(expected, actual);
}
}
describe "lookup generation" {
it "creates a small lookup table" {
create_lookup("/tmp/lookup1.sstable", &hashset!{ "an".into() }, 5, 10000);
dict::init_lookup("/tmp/lookup1.sstable");
assert_eq!(3, dict::lookup_len());
assert!(dict::lookup_has("a".into()));
assert!(dict::lookup_has("n".into()));
assert!(dict::lookup_has("an".into()));
let probs = dict::lookup_probs("a".into()).unwrap();
// We should always have for each amount of tiles, plus the zero-case.
assert_eq!(6, probs.len());
// Finding 'a' in 0 dice is always impossible.
assert_eq!(0.0, probs[0]);
// Always monotonically increasing as you add more dice
info!("{:?}", probs);
for i in 1..5 {
assert!(probs[i] > probs[i - 1]);
}
}
it "creates a larger lookup table" {
create_lookup("/tmp/lookup2.sstable", &hashset!{ "bat".into(), "cat".into() }, 5, 10);
dict::init_lookup("/tmp/lookup2.sstable");
assert_eq!(11, dict::lookup_len());
}
}
}
| true |
bcda2b05dc2a193388ca3fae1debdcc10d342345
|
Rust
|
janpauldahlke/fhir-rs
|
/src/model/ValueSet_Compose.rs
|
UTF-8
| 9,306 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
#![allow(unused_imports, non_camel_case_types)]
use crate::model::Element::Element;
use crate::model::Extension::Extension;
use crate::model::ValueSet_Include::ValueSet_Include;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// A ValueSet resource instance specifies a set of codes drawn from one or more
/// code systems, intended for use in a particular context. Value sets link between
/// [[[CodeSystem]]] definitions and their use in [coded
/// elements](terminologies.html).
#[derive(Debug)]
pub struct ValueSet_Compose<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl ValueSet_Compose<'_> {
pub fn new(value: &Value) -> ValueSet_Compose {
ValueSet_Compose {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for inactive
pub fn _inactive(&self) -> Option<Element> {
if let Some(val) = self.value.get("_inactive") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for lockedDate
pub fn _locked_date(&self) -> Option<Element> {
if let Some(val) = self.value.get("_lockedDate") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Exclude one or more codes from the value set based on code system filters and/or
/// other value sets.
pub fn exclude(&self) -> Option<Vec<ValueSet_Include>> {
if let Some(Value::Array(val)) = self.value.get("exclude") {
return Some(
val.into_iter()
.map(|e| ValueSet_Include {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may
/// be any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// Whether inactive codes - codes that are not approved for current use - are in
/// the value set. If inactive = true, inactive codes are to be included in the
/// expansion, if inactive = false, the inactive codes will not be included in the
/// expansion. If absent, the behavior is determined by the implementation, or by
/// the applicable $expand parameters (but generally, inactive codes would be
/// expected to be included).
pub fn inactive(&self) -> Option<bool> {
if let Some(val) = self.value.get("inactive") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// Include one or more codes from a code system or other value set(s).
pub fn include(&self) -> Vec<ValueSet_Include> {
self.value
.get("include")
.unwrap()
.as_array()
.unwrap()
.into_iter()
.map(|e| ValueSet_Include {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>()
}
/// The Locked Date is the effective date that is used to determine the version of
/// all referenced Code Systems and Value Set Definitions included in the compose
/// that are not already tied to a specific version.
pub fn locked_date(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("lockedDate") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element in
/// which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To
/// make the use of extensions safe and manageable, there is a strict set of
/// governance applied to the definition and use of extensions. Though any
/// implementer can define an extension, there is a set of requirements that SHALL
/// be met as part of the definition of the extension. Applications processing a
/// resource are required to check for modifier extensions. Modifier extensions
/// SHALL NOT change the meaning of any elements on Resource or DomainResource
/// (including cannot change the meaning of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._inactive() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._locked_date() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.exclude() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.inactive() {}
if !self
.include()
.into_iter()
.map(|e| e.validate())
.all(|x| x == true)
{
return false;
}
if let Some(_val) = self.locked_date() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
return true;
}
}
#[derive(Debug)]
pub struct ValueSet_ComposeBuilder {
pub(crate) value: Value,
}
impl ValueSet_ComposeBuilder {
pub fn build(&self) -> ValueSet_Compose {
ValueSet_Compose {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: ValueSet_Compose) -> ValueSet_ComposeBuilder {
ValueSet_ComposeBuilder {
value: (*existing.value).clone(),
}
}
pub fn new(include: Vec<ValueSet_Include>) -> ValueSet_ComposeBuilder {
let mut __value: Value = json!({});
__value["include"] = json!(include.into_iter().map(|e| e.value).collect::<Vec<_>>());
return ValueSet_ComposeBuilder { value: __value };
}
pub fn _inactive<'a>(&'a mut self, val: Element) -> &'a mut ValueSet_ComposeBuilder {
self.value["_inactive"] = json!(val.value);
return self;
}
pub fn _locked_date<'a>(&'a mut self, val: Element) -> &'a mut ValueSet_ComposeBuilder {
self.value["_lockedDate"] = json!(val.value);
return self;
}
pub fn exclude<'a>(
&'a mut self,
val: Vec<ValueSet_Include>,
) -> &'a mut ValueSet_ComposeBuilder {
self.value["exclude"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut ValueSet_ComposeBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut ValueSet_ComposeBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn inactive<'a>(&'a mut self, val: bool) -> &'a mut ValueSet_ComposeBuilder {
self.value["inactive"] = json!(val);
return self;
}
pub fn locked_date<'a>(&'a mut self, val: &str) -> &'a mut ValueSet_ComposeBuilder {
self.value["lockedDate"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut ValueSet_ComposeBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
}
| true |
0d199330fcb485be7244a0d635e9d617d435fd90
|
Rust
|
filestar-project/rust-fil-proofs
|
/filecoin-proofs/tests/parampublish/prompts_to_publish.rs
|
UTF-8
| 3,163 | 2.71875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
use std::collections::HashSet;
use std::iter::FromIterator;
use failure::Error as FailureError;
use storage_proofs::parameter_cache::CacheEntryMetadata;
use crate::parampublish::support::session::ParamPublishSessionBuilder;
use std::collections::btree_map::BTreeMap;
#[test]
fn ignores_files_unrecognized_extensions() {
Ok::<(), FailureError>(())
.and_then(|_| {
// create files with these names in the parameter cache
let to_create = vec!["aaa.vk", "aaa.params", "bbb.txt", "ddd"];
// parampublish should prompt user to publish these files
let to_prompt: HashSet<&str> =
HashSet::from_iter(vec!["aaa.vk", "aaa.params"].iter().cloned());
let (mut session, _) = ParamPublishSessionBuilder::new()
.with_session_timeout_ms(1000)
.with_files(&to_create)
.with_metadata("aaa.meta", &CacheEntryMetadata { sector_size: 1234 })
.build();
for _ in 0..to_prompt.len() {
session.exp_string("[y/n] (sector size: 1234B) ")?;
let prompt_filename = session.exp_string(": ")?;
let key: &str = &prompt_filename;
assert_eq!(true, to_prompt.contains(key), "missing {}", key);
session.send_line("n")?;
}
session.exp_string("no files to publish")?;
session.exp_string("done")?;
Ok(())
})
.expect("parampublish test failed");
}
#[test]
fn displays_sector_size_in_prompt() {
Ok::<(), FailureError>(())
.and_then(|_| {
let to_create = vec!["aaa.vk", "aaa.params", "xxx.vk", "xxx.params"];
let (mut session, _) = ParamPublishSessionBuilder::new()
.with_session_timeout_ms(1000)
.with_files(&to_create)
.with_metadata("aaa.meta", &CacheEntryMetadata { sector_size: 1234 })
.with_metadata("xxx.meta", &CacheEntryMetadata { sector_size: 4444 })
.build();
let mut map: BTreeMap<&str, String> = BTreeMap::new();
map.insert("aaa.vk", "1234".to_string());
map.insert("aaa.params", "1234".to_string());
map.insert("xxx.vk", "4444".to_string());
map.insert("xxx.params", "4444".to_string());
for _ in 0..to_create.len() {
session.exp_string("[y/n] (sector size: ")?;
let prompt_sector_size: &str = &session.exp_string("B) ")?;
let prompt_filename: &str = &session.exp_string(": ")?;
assert_eq!(
map.get(prompt_filename).expect("missing prompt filename"),
prompt_sector_size
);
session.send_line("n")?;
}
Ok(())
})
.expect("parampublish test failed");
}
#[test]
fn no_assets_no_prompt() -> Result<(), FailureError> {
let (mut session, _) = ParamPublishSessionBuilder::new()
.with_session_timeout_ms(1000)
.build();
session.exp_string("No valid parameters in directory")?;
Ok(())
}
| true |
74be3b18bdf77edb869747b4f98443c225f75172
|
Rust
|
colt-browning/integer-partitions
|
/src/lib.rs
|
UTF-8
| 5,404 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
//! Efficiently enumerate integer partitions.
//!
//! This is an implementation of a method described by
//! [Jerome Kelleher](http://jeromekelleher.net/generating-integer-partitions.html),
//! which takes a constant amount of time for each partition.
//!
//! # Examples
//!
//! ```
//! use integer_partitions::Partitions;
//!
//! let mut pp = Partitions::new(5);
//! while let Some(p) = pp.next() {
//! println!("{:?}", p)
//! }
//! ```
pub extern crate streaming_iterator;
use streaming_iterator::StreamingIterator;
/// Iterates over the partitions of a given nonnegative integer.
#[derive(Debug)]
pub struct Partitions {
a: Vec<usize>,
k: usize,
y: usize,
next: State,
}
#[derive(Debug, PartialEq, Eq)]
enum State {
A,
B { x: usize, l: usize },
}
impl Partitions {
/// Makes a new iterator.
#[inline]
pub fn new(n: usize) -> Partitions {
if n == 0 {
return Partitions {
a: vec!(1),
k: 0,
y: 0,
next: State::A,
}
}
Partitions {
a: vec![0; n + 1],
k: 1,
y: n - 1,
next: State::A,
}
}
/// Advances the iterator and returns the next partition.
#[inline]
pub fn next(&mut self) -> Option<&[usize]> {
StreamingIterator::next(self)
}
/// Makes a new iterator, trying to avoid allocations.
///
/// Any vector can be passed to this function, since its contents
/// will be cleared and it will be filled with zeroes, but note
/// that the vector will still reallocate if its capacity is less
/// than `n + 1`.
#[inline]
pub fn recycle(n: usize, mut vec: Vec<usize>) -> Partitions {
vec.clear();
if n == 0 {
vec.push(1);
return Partitions {
a: vec,
k: 0,
y: 0,
next: State::A,
}
}
vec.reserve(n + 1);
for _ in 0..(n + 1) {
vec.push(0);
}
Partitions {
a: vec,
k: 1,
y: n - 1,
next: State::A,
}
}
/// Destroys the iterator and returns a vector for further use.
///
/// You only need to call this function if you want to reuse the
/// vector for something else. Its contents will be in an undefined
/// state, and so cannot be relied upon.
#[inline]
pub fn end(self) -> Vec<usize> {
self.a
}
}
impl StreamingIterator for Partitions {
type Item = [usize];
fn get(&self) -> Option<&Self::Item> {
if self.next == State::A && self.k == 0 && (self.a[0] == 0 || self.a.len() == 1) {
if self.a[0] == 0 {
None
} else {
Some(&[])
}
} else {
Some(&self.a[..self.k + match self.next {
State::A => 1,
State::B { .. } => 2,
}])
}
}
#[inline]
fn advance(&mut self) {
let Partitions {
ref mut a,
ref mut k,
ref mut y,
ref mut next
} = *self;
match *next {
State::A => {
if *k == 0 {
if a.len() == 1 && a[0] == 1 {
a[0] = 2;
} else {
a[0] = 0;
}
} else {
*k -= 1;
let x = a[*k] + 1;
while 2 * x <= *y {
a[*k] = x;
*y -= x;
*k += 1;
}
let l = *k + 1;
if x <= *y {
a[*k] = x;
a[l] = *y;
*next = State::B { x, l };
} else {
a[*k] = x + *y;
*y = x + *y - 1;
}
}
},
State::B { mut x, l } => {
x += 1;
*y -= 1;
if x <= *y {
a[*k] = x;
a[l] = *y;
*next = State::B { x, l };
} else {
a[*k] = x + *y;
*y = x + *y - 1;
*next = State::A;
}
},
}
}
}
#[test]
fn oeis() {
//! Tests the first few entries of A000041.
let tests: &[usize] = &[
1, 1, 2, 3, 5, 7, 11, 15, 22,
30, 42, 56, 77, 101, 135, 176, 231,
297, 385, 490, 627, 792, 1002, 1255, 1575,
1958, 2436, 3010, 3718, 4565, 5604, 6842, 8349,
10143, 12310, 14883, 17977, 21637, 26015, 31185, 37338,
44583, 53174, 63261, 75175, 89134, 105558, 124754, 147273,
173525,
];
for (i, &n) in tests.iter().enumerate().skip(1) {
let mut p = Partitions::new(i);
let mut c = 0;
while let Some(x) = p.next() {
let sum: usize = x.iter().cloned().sum();
assert_eq!(sum, i);
c += 1;
}
assert_eq!(c, n);
}
}
#[test]
fn n0() {
//! Tests the special case n == 0.
let mut p = Partitions::new(0);
assert_eq!(p.next().unwrap().len(), 0);
assert_eq!(p.next(), None);
}
| true |
549f2b2c78f293bdfa9a897888fb70e3a4d95196
|
Rust
|
huggingface/tokenizers
|
/tokenizers/src/models/unigram/trainer.rs
|
UTF-8
| 30,147 | 2.921875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
// A token and a score
type SentencePiece = (String, f64);
// A full sentence or word + it's count within the dataset
type Sentence = (String, u32);
fn digamma(mut x: f64) -> f64 {
let mut result = 0.0;
while x < 7.0 {
result -= 1.0 / x;
x += 1.0;
}
x -= 1.0 / 2.0;
let xx = 1.0 / x;
let xx2 = xx * xx;
let xx4 = xx2 * xx2;
result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2
- (127.0 / 30720.0) * xx4 * xx4;
result
}
#[derive(thiserror::Error, Debug)]
pub enum UnigramTrainerError {
#[error("The vocabulary is not large enough to contain all chars")]
VocabularyTooSmall,
}
fn to_log_prob(pieces: &mut [SentencePiece]) {
let sum: f64 = pieces.iter().map(|(_, score)| score).sum();
let logsum = sum.ln();
for (_, score) in pieces.iter_mut() {
*score = score.ln() - logsum;
}
}
/// A `UnigramTrainer` can train a `Unigram` model from `word_counts`.
#[non_exhaustive]
#[derive(Builder, Debug, Clone, Serialize, Deserialize)]
pub struct UnigramTrainer {
#[builder(default = "true")]
pub show_progress: bool,
#[builder(default = "8000")]
pub vocab_size: u32,
#[builder(default = "2")]
pub n_sub_iterations: u32,
#[builder(default = "0.75")]
pub shrinking_factor: f64,
#[builder(default = "vec![]")]
pub special_tokens: Vec<AddedToken>,
#[builder(default = "HashSet::new()")]
pub initial_alphabet: HashSet<char>,
#[builder(default = "None")]
pub unk_token: Option<String>,
#[builder(default = "16")]
pub max_piece_length: usize,
#[builder(default = "1_000_000")]
seed_size: usize,
#[builder(default = "HashMap::new()")]
words: HashMap<String, u32>,
}
impl Default for UnigramTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl UnigramTrainer {
pub fn builder() -> UnigramTrainerBuilder {
UnigramTrainerBuilder::default()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"),
);
Some(p)
} else {
None
}
}
fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool {
// Checks string length
// Space not in the substring, numbers, hiragana and more should be taken
// care of within pre_tokenizers.
// https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203
let n = char_string.len();
if char_string.is_empty() || n > self.max_piece_length {
return false;
}
true
}
fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> {
let mut min_score_penalty = 0.0;
let min_score_penalty_delta = 0.0001;
let mut pieces: Vec<(String, f64)> = vec![];
let mut inserted: HashSet<String> = HashSet::new();
// We don't want to include the <UNK> that was used to train
inserted.insert("<UNK>".into());
let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect();
for c in required_chars {
if let Some(t) = existing_pieces.get(&c) {
inserted.insert(c.clone());
pieces.push((c, *t));
} else {
let score = model.min_score + min_score_penalty;
inserted.insert(c.clone());
pieces.push((c, score));
min_score_penalty += min_score_penalty_delta;
}
}
let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token {
let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| {
if t.content == *unk {
Some(i)
} else {
None
}
});
match unk_id {
Some(id) => (Some(id), false),
None => (Some(0), true),
}
} else {
(None, false)
};
let vocab_size_without_special_tokens = if need_add_unk {
self.vocab_size as usize - self.special_tokens.len() - 1
} else {
self.vocab_size as usize - self.special_tokens.len()
};
for (token, score) in model.iter() {
if inserted.contains::<str>(token) {
continue;
}
inserted.insert(token.to_string());
pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score }));
if pieces.len() == vocab_size_without_special_tokens {
break;
}
}
pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
// Insert the necessary tokens
let mut special_tokens = self
.special_tokens
.iter()
.map(|t| (t.content.clone(), 0.0))
.collect::<Vec<_>>();
if need_add_unk {
special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0));
}
Unigram::from(
special_tokens.into_iter().chain(pieces).collect(),
unk_id,
model.byte_fallback(),
)
}
fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> {
word_counts
.iter()
.flat_map(|(s, _count)| s.chars())
.chain(self.initial_alphabet.iter().copied())
.map(|c| c.to_string())
.collect()
}
fn make_seed_sentence_pieces(
&self,
sentences: &[Sentence],
_progress: &Option<ProgressBar>,
) -> Vec<SentencePiece> {
// Put all sentences in a string, separated by \0
let total: usize = sentences
.iter()
.map(|(s, _)| s.chars().count())
.sum::<usize>()
+ sentences.len();
let mut flat_string = String::with_capacity(total);
let mut all_chars: HashMap<char, u32> = HashMap::new();
let c_sentence_boundary = '\0';
let k_sentence_boundary = '\0'.to_string();
for (string, n) in sentences {
if string.is_empty() {
continue;
}
flat_string.push_str(string);
// XXX
// Comment suggests we add sentence boundary, but it seems to be missing from actual
// code in spm.
flat_string.push_str(&k_sentence_boundary);
for c in string.chars() {
if c != c_sentence_boundary {
*all_chars.entry(c).or_insert(0) += n;
}
}
}
flat_string.shrink_to_fit();
#[cfg(feature = "esaxx_fast")]
let suffix = esaxx_rs::suffix(&flat_string).unwrap();
#[cfg(not(feature = "esaxx_fast"))]
let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap();
// Basic chars need to be in sentence pieces.
let mut seed_sentencepieces: Vec<SentencePiece> = vec![];
let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect();
// Reversed order
sall_chars.sort_by_key(|&a| Reverse(a));
let mut substr_index: Vec<_> = suffix
.iter()
.filter_map(|(string, freq)| {
if string.len() <= 1 {
return None;
}
if string.contains(&c_sentence_boundary) {
return None;
}
if !self.is_valid_sentencepiece(string) {
return None;
}
let score = freq * string.len() as u32;
// if let Some(p) = &progress {
// p.inc(1);
// }
Some((score, string))
})
.collect();
// Fill seed_sentencepieces
for (count, character) in sall_chars {
seed_sentencepieces.push((character.to_string(), count.into()));
}
// sort by decreasing score
substr_index.sort_by_key(|&a| Reverse(a));
for (score, char_string) in substr_index {
// Just in case
assert!(self.is_valid_sentencepiece(char_string));
let string: String = char_string.iter().collect();
seed_sentencepieces.push((string, score.into()));
if seed_sentencepieces.len() >= self.seed_size {
break;
}
}
to_log_prob(&mut seed_sentencepieces);
seed_sentencepieces
}
fn prune_sentence_pieces(
&self,
model: &Unigram,
pieces: &[SentencePiece],
sentences: &[Sentence],
) -> Vec<SentencePiece> {
let mut always_keep = vec![true; pieces.len()];
let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
let bos_id = pieces.len() + 1;
let eos_id = pieces.len() + 2;
// First, segments the current sentencepieces to know
// how each sentencepiece is resegmented if this sentencepiece is removed
// from the vocabulary.
// To do so, we take the second best segmentation of sentencepiece[i].
// alternatives[i] stores the sequence of second best sentencepieces.
for (id, (token, _score)) in pieces.iter().enumerate() {
// Always keep unk.
if id == 0 {
always_keep[id] = false;
continue;
}
let mut lattice = Lattice::from(token, bos_id, eos_id);
model.populate_nodes(&mut lattice);
let nbests = lattice.nbest(2);
if nbests.len() == 1 {
always_keep[id] = true;
} else if nbests[0].len() >= 2 {
always_keep[id] = false;
} else if nbests[0].len() == 1 {
always_keep[id] = true;
for node in &nbests[1] {
let alt_id = node.borrow().id;
alternatives[id].push(alt_id);
}
}
}
// Second, segments all sentences to compute likelihood
// with a unigram language model. inverted[i] stores
// the set of sentence index where the sentencepieces[i] appears.
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect();
let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences
.maybe_par_chunks(chunk_size)
.map(|enumerated_sentence_count_chunk| {
let mut vsum = 0.0;
let mut freq: Vec<f64> = vec![0.0; pieces.len()];
let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
for (i, (sentence, count)) in enumerated_sentence_count_chunk {
let mut lattice = Lattice::from(sentence, bos_id, eos_id);
model.populate_nodes(&mut lattice);
vsum += *count as f64;
for node_ref in lattice.viterbi() {
let id = node_ref.borrow().id;
freq[id] += *count as f64;
inverted[id].push(*i);
}
}
(vsum, freq, inverted)
})
.reduce(
|| (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]),
|(vsum, freq, inverted), (lvsum, lfreq, linverted)| {
(
vsum + lvsum,
freq.iter()
.zip(lfreq)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
inverted
.iter()
.zip(linverted)
.map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat())
.collect(),
)
},
);
let (vsum, freq, inverted) = collected;
let sum: f64 = freq.iter().sum();
let logsum = sum.ln();
let mut candidates: Vec<(usize, f64)> = vec![];
let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize);
new_pieces.push(pieces[0].clone());
// Finally, computes how likely the LM likelihood is reduced if
// the sentencepiece[i] is removed from the vocabulary.
// Since the exact computation of loss is difficult, we compute the
// loss approximately by assuming that all sentencepiece[i] in the sentences
// are replaced with alternatives[i] when sentencepiece[i] is removed.
for (id, (token, score)) in pieces.iter().enumerate() {
if id == 0 {
continue;
}
if freq[id] == 0.0 && !always_keep[id] {
// not found in Viterbi path. Can remove this entry safely.
continue;
} else if alternatives[id].is_empty() {
// no alternatives. Keeps this entry.
new_pieces.push((token.to_string(), *score));
} else {
let mut f = 0.0; // the frequency of pieces[i];
for n in &inverted[id] {
let score = sentences[*n].1 as f64;
f += score;
}
// TODO: Temporary hack to avoid Nans.
if f == 0.0 || f.is_nan() {
// new_pieces.push((token.to_string(), *score));
continue;
}
f /= vsum; // normalizes by all sentence frequency.
let logprob_sp = freq[id].ln() - logsum;
// After removing the sentencepiece[i], its frequency freq[i] is
// re-assigned to alternatives.
// new_sum = current_sum - freq[i] + freq[i] * alternatives.size()
// = current_sum + freq[i] (alternatives - 1)
let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln();
// The frequencies of altenatives are increased by freq[i].
let mut logprob_alt = 0.0;
for n in &alternatives[id] {
logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt;
}
// loss: the diff of likelihood after removing the sentencepieces[i].
let loss = f * (logprob_sp - logprob_alt);
if loss.is_nan() {
panic!("");
}
candidates.push((id, loss));
}
}
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize;
let pruned_size = desired_vocab_size.max(pruned_size);
candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
for (id, _score) in candidates {
if new_pieces.len() == pruned_size {
break;
}
new_pieces.push(pieces[id].clone());
}
new_pieces.to_vec()
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.set_draw_delta(len as u64 / 100);
p.reset();
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) {
let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum();
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let collected: (f64, u32, Vec<f64>) = sentences
.maybe_par_chunks(chunk_size)
.map(|sentences_chunk| {
let mut expected: Vec<f64> = vec![0.0; model.len()];
let mut objs: f64 = 0.0;
let mut ntokens: u32 = 0;
for (string, freq) in sentences_chunk {
let mut lattice = Lattice::from(string, model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected);
if z.is_nan() {
panic!("likelihood is NAN. Input sentence may be too long.");
}
ntokens += lattice.viterbi().len() as u32;
objs -= z / (all_sentence_freq as f64);
}
(objs, ntokens, expected)
})
.reduce(
|| (0.0, 0, vec![0.0; model.len()]),
|(objs, ntokens, expected), (lobjs, lntokens, lexpected)| {
(
objs + lobjs,
ntokens + lntokens,
expected
.iter()
.zip(lexpected)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
)
},
);
collected
}
fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> {
if pieces.len() != expected.len() {
panic!(
"Those two iterators are supposed to be the same length ({} vs {})",
pieces.len(),
expected.len()
);
}
let mut new_pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
let mut sum = 0.0;
let expected_frequency_threshold = 0.5;
for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() {
// Always keep unk.
if i == 0 {
new_pieces.push((piece.clone(), f64::NAN));
continue;
}
if *freq < expected_frequency_threshold {
continue;
}
new_pieces.push((piece.clone(), *freq));
sum += freq;
}
// // Here we do not use the original EM, but use the
// // Bayesianified/DPified EM algorithm.
// // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf
// // This modification will act as a sparse prior.
let logsum = digamma(sum);
let new_pieces: Vec<_> = new_pieces
.into_iter()
.map(|(s, c)| (s, digamma(c) - logsum))
.collect();
new_pieces
}
pub fn do_train(
&self,
sentences: Vec<Sentence>,
model: &mut Unigram,
) -> Result<Vec<AddedToken>> {
let progress = self.setup_progress();
//
// 1. Compute frequent substrings
// TODO Should be able to upgrade to u64 when needed
self.update_progress(&progress, sentences.len(), "Suffix array seeds");
let mut pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
// We use a UNK token when training, whatever the `self.unk_token`
pieces.push(("<UNK>".into(), f64::NAN));
pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress));
self.finalize_progress(&progress, sentences.len());
// Useful to check compatibility with spm.
debug!(
"Using {} pieces on {} sentences for EM training",
pieces.len(),
sentences.len()
);
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
// 2. Run E-M Loops to fine grain the pieces.
// We will shrink the vocab by shrinking_factor every loop on average
// Some other pieces are dropped if logprob is too small
// V = N * (f)**k
// k = log(V / N) / log(f)
let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln())
/ self.shrinking_factor.ln()) as usize
+ 1;
let expected_updates = expected_loops * self.n_sub_iterations as usize;
self.update_progress(&progress, expected_updates, "EM training");
let required_chars = self.required_chars(&sentences);
if required_chars.len() as u32 > self.vocab_size {
return Err(Box::new(UnigramTrainerError::VocabularyTooSmall));
}
let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?;
loop {
// Sub-EM iteration.
for _iter in 0..self.n_sub_iterations {
// Executes E step
let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences);
// Executes M step.
pieces = self.run_m_step(&pieces, &expected);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
// Useful comment for checking compatibility with spm
debug!(
"Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}",
_iter,
new_model.len(),
_objective,
_num_tokens,
_num_tokens as f64 / model.len() as f64
);
if let Some(p) = &progress {
p.inc(1);
}
} // end of Sub EM iteration
// Stops the iteration when the size of sentences reaches to the
// desired symbol size.
if pieces.len() <= desired_vocab_size {
break;
}
// Prunes pieces.
pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
}
self.finalize_progress(&progress, expected_updates);
// Finally, adjusts the size of sentencepices to be |vocab_size|.
*model = self.finalize(new_model, required_chars)?;
Ok(self.special_tokens.clone())
}
}
impl Trainer for UnigramTrainer {
type Model = Unigram;
/// Train a Unigram model
fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> {
let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect();
self.do_train(sentences, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
use std::iter::FromIterator;
#[test]
fn test_unigram_chars() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let sentences = vec![
("This is a".to_string(), 1),
("こんにちは友達".to_string(), 1),
];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(required_chars.len(), 13);
let progress = None;
let table = trainer.make_seed_sentence_pieces(&sentences, &progress);
let target_strings = vec![
"s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ",
];
let strings: Vec<_> = table.iter().map(|(string, _)| string).collect();
assert_eq!(strings, target_strings);
let scores = table.iter().map(|(_, score)| score);
let target_scores = vec![
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-1.4663370687934272, // 6.0
-1.8718021769015916, // 4.0
];
for (score, target_score) in scores.zip(target_scores) {
assert_approx_eq!(*score, target_score, 0.01);
}
}
#[test]
fn test_initial_alphabet() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f']))
.build()
.unwrap();
let sentences = vec![("こんにちは友達".to_string(), 1)];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(
required_chars,
vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"]
.into_iter()
.map(|s| s.to_owned())
.collect::<HashSet<_>>()
);
}
#[test]
fn test_unk_token() {
// 1. Should add `unk_token` as first special token
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
// 2. Let it where it is
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[UNK]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
// 3. Don't put it there if not needed
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next().unwrap().0, "e".to_string());
}
#[test]
fn test_special_tokens() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
}
#[test]
fn test_to_log_prob() {
let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)];
to_log_prob(&mut a);
let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>();
// ln(1) - ln(3)
assert_approx_eq!(scores[0], -1.098, 0.01);
// ln(2) - ln(3)
assert_approx_eq!(scores[1], -0.405, 0.01);
}
}
| true |
ab8e2ae0eedfa28b1c1db1516c16fd4a9c215c59
|
Rust
|
ccdle12/Rust-Book-Notes
|
/20_webserver/hello/src/bin/main.rs
|
UTF-8
| 2,274 | 3.40625 | 3 |
[] |
no_license
|
use hello::ThreadPool;
use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").expect("failed to bind to port 7878");
// Create a thread pool with 4 threads.
let pool = ThreadPool::new(4);
// incoming() returns an iterator that gives us streams.
// A stream represents a connection to the server.
for stream in listener.incoming() {
let stream = stream.unwrap();
// We can create a thread for each request to handle multiple requests
// at once. This has the drawback though, of DoS attacks that can put
// a lot of strain on our server.
// thread::spawn(|| {
// handle_connection(stream);
// });
pool.execute(|| {
handle_connection(stream);
});
}
}
fn handle_connection(mut stream: TcpStream) {
// Create a buffer on the stack capable of reading 512 bytes.
let mut buffer = [0; 512];
// Read the bytes from the stream and write it the buffer.
stream.read(&mut buffer).unwrap();
// Check if the request is GET.
// b"" is a byte string.
let get = b"GET / HTTP/1.1\r\n";
// This is currently a single threaded server, we will simulate this by
// adding a sleep, to the /sleep. This will wait 5 seconds, and if we try
// to access the webserver using the default /. We will have to wait until
// the /sleep endpoint has been served.
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html")
};
// Create the contents of the web page.
let contents = fs::read_to_string(filename).unwrap();
// Return with a response.
let response = format!("{}{}", status_line, contents);
// Write the response as bytes over the stream.
stream.write(response.as_bytes()).unwrap();
// Blocks until all bytes are written.
stream.flush().unwrap();
}
| true |
23036d6ed3c03c21ee07c31b37ec3e8e528bc2d0
|
Rust
|
Pomettini/streaming-stampede
|
/src/pokemons.rs
|
UTF-8
| 1,153 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
use constants::*;
use ggez::graphics::*;
use ggez::*;
use pokemon_sprite::*;
use pokemon_types::*;
pub struct Pokemon {
pub pokemon_type: PokemonType,
pub sprite: PokemonSprite,
pub position: Point2,
pub speed: f32,
pub isfake: bool,
}
impl Pokemon {
pub fn new(ctx: &mut Context, pokemon_type: PokemonType) -> Pokemon {
setup_pokemon(ctx, pokemon_type)
}
pub fn update(&mut self) {
self.position.x += self.speed;
self.position.y += self.speed;
self.sprite.advance_sprite();
}
pub fn draw(&self, ctx: &mut Context) -> GameResult<()> {
let sprite_options = graphics::DrawParam {
src: Rect {
x: -0.0,
y: 0.0,
w: 1.0,
h: 1.0,
},
dest: self.position,
rotation: 0.0,
scale: Point2::new(1.0, 1.0),
offset: Point2::new(0.0, 0.0),
shear: Point2::new(0.0, 0.0),
color: None,
};
// println!("{}", self.position);
graphics::draw_ex(ctx, self.sprite.get_current_sprite(), sprite_options)
}
}
| true |
44140672a3d0981621e213fa817efcfb089201f2
|
Rust
|
sam-ulrich1/hedera-sdk-rust
|
/examples/create_file.rs
|
UTF-8
| 1,925 | 2.71875 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
use failure::{format_err, Error};
use hedera::{Client, SecretKey, Status};
use std::{env, thread::sleep, time::Duration};
use std::str::FromStr;
#[tokio::main]
async fn main() -> Result<(), Error> {
pretty_env_logger::try_init()?;
// Operator is the account that sends the transaction to the network
// This account is charged for the transaction fee
let operator = "0:0:2".parse()?;
let client = Client::builder("testnet.hedera.com:50003")
.node("0:0:3".parse()?)
.operator(operator, || env::var("OPERATOR_SECRET"))
.build()?;
let operator_secret : String = env::var("OPERATOR_SECRET")?;
let secret = SecretKey::from_str(&operator_secret)?;
let public = secret.public();
// init some file contents
let file_contents_string = String::from("Hedera Hashgraph is great");
let file_contents_bytes = file_contents_string.into_bytes();
// Create a file
let id = client
.create_file()
.expires_in(Duration::from_secs(2_592_000))
.key(public)
.contents(file_contents_bytes)
.memo("[hedera-sdk-rust][example] create_file")
.sign(&env::var("OPERATOR_SECRET")?.parse()?) // sign as the owner of the file
.execute_async()
.await?;
println!("creating file; transaction = {}", id);
// If we got here we know we passed pre-check
// Depending on your requirements that may be enough for some kinds of transactions
sleep(Duration::from_secs(2));
// Get the receipt and check the status to prove it was successful
let mut tx = client.transaction(id).receipt();
let receipt = tx.get_async().await?;
if receipt.status != Status::Success {
Err(format_err!(
"transaction has a non-successful status: {:?}",
receipt.status
))?;
}
let file = receipt.file_id.unwrap();
println!("file ID = {}", file);
Ok(())
}
| true |
711d9d090467624317ce9713c4efedf8f9ed35bb
|
Rust
|
fee1-dead/moreiter
|
/src/lib.rs
|
UTF-8
| 1,583 | 3.359375 | 3 |
[] |
no_license
|
pub enum ProcessResult<T> {
/// Represents a value that is either mapped or the original value.
Value(T),
Values(Box<dyn Iterator<Item = T>>),
Skip(usize),
}
pub struct Process<I, T, F> {
#[doc(hidden)]
__iter: Option<Box<dyn Iterator<Item = I>>>,
iter: T,
predicate: F
}
impl<I, T, F> Iterator for Process<I, T, F> where T: Iterator<Item = I>, F: FnMut(I) -> ProcessResult<I> {
type Item = I;
fn next(&mut self) -> Option<Self::Item> {
if let Some(ref mut it) = self.__iter {
it.next()
} else {
match self.iter.next() {
None => None,
Some(v) => {
match (self.predicate)(v) {
ProcessResult::Value(it) => {
Some(it)
}
ProcessResult::Values(mut iter) => {
let item = iter.next();
self.__iter = Some(iter);
item
}
ProcessResult::Skip(times) => {
self.iter.nth(times);
self.next()
}
}
}
}
}
}
}
pub trait MoreIter: Iterator {
fn process<F>(self, f: F) -> Process<Self::Item, Self, F> where Self: Sized, F : FnMut(Self::Item) -> ProcessResult<Self::Item> {
Process { __iter: None, iter: self, predicate: f }
}
}
#[doc(hidden)]
impl<T: Iterator> MoreIter for T {}
| true |
ed6849edc1c547caa81bb0fd02ecf288bc912755
|
Rust
|
KamiD/cosmwasm
|
/packages/std/src/types.rs
|
UTF-8
| 4,057 | 2.84375 | 3 |
[
"Apache-2.0"
] |
permissive
|
use std::fmt;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use crate::coins::Coin;
use crate::encoding::Binary;
// Added Eq and Hash to allow this to be a key in a HashMap (MockQuerier)
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, JsonSchema, Hash)]
pub struct HumanAddr(pub String);
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, JsonSchema)]
pub struct CanonicalAddr(pub Binary);
impl HumanAddr {
pub fn as_str(&self) -> &str {
&self.0
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl fmt::Display for HumanAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.0)
}
}
impl From<&str> for HumanAddr {
fn from(addr: &str) -> Self {
HumanAddr(addr.to_string())
}
}
impl From<&HumanAddr> for HumanAddr {
fn from(addr: &HumanAddr) -> Self {
HumanAddr(addr.0.to_string())
}
}
impl From<&&HumanAddr> for HumanAddr {
fn from(addr: &&HumanAddr) -> Self {
HumanAddr(addr.0.to_string())
}
}
impl From<String> for HumanAddr {
fn from(addr: String) -> Self {
HumanAddr(addr)
}
}
impl CanonicalAddr {
pub fn as_slice(&self) -> &[u8] {
&self.0.as_slice()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl fmt::Display for CanonicalAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, JsonSchema)]
pub struct Env {
pub block: BlockInfo,
pub message: MessageInfo,
pub contract: ContractInfo,
}
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, JsonSchema)]
pub struct BlockInfo {
pub height: u64,
// time is seconds since epoch begin (Jan. 1, 1970)
pub time: u64,
pub chain_id: String,
}
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, JsonSchema)]
pub struct MessageInfo {
/// The `sender` field from the wasm/store-code, wasm/instantiate or wasm/execute message.
/// You can think of this as the address that initiated the action (i.e. the message). What that
/// means exactly heavily depends on the application.
///
/// The x/wasm module ensures that the sender address signed the transaction.
/// Additional signers of the transaction that are either needed for other messages or contain unnecessary
/// signatures are not propagated into the contract.
///
/// There is a discussion to open up this field to multiple initiators, which you're welcome to join
/// if you have a specific need for that feature: https://github.com/CosmWasm/cosmwasm/issues/293
pub sender: HumanAddr,
pub sent_funds: Vec<Coin>,
}
#[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, JsonSchema)]
pub struct ContractInfo {
pub address: HumanAddr,
}
/// An empty struct that serves as a placeholder in different places,
/// such as contracts that don't set a custom message.
///
/// It is designed to be expressable in correct JSON and JSON Schema but
/// contains no meaningful data. Previously we used enums without cases,
/// but those cannot represented as valid JSON Schema (https://github.com/CosmWasm/cosmwasm/issues/451)
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct Empty {}
#[cfg(test)]
mod test {
use super::*;
use crate::serde::{from_slice, to_vec};
#[test]
fn empty_can_be_instantiated_serialized_and_deserialized() {
let instance = Empty {};
let serialized = to_vec(&instance).unwrap();
assert_eq!(serialized, b"{}");
let deserialized: Empty = from_slice(b"{}").unwrap();
assert_eq!(deserialized, instance);
let deserialized: Empty = from_slice(b"{\"stray\":\"data\"}").unwrap();
assert_eq!(deserialized, instance);
}
}
| true |
1c07d765f08c355a2ba524331b434a99cfb2d30b
|
Rust
|
passchaos/zoxide
|
/src/subcommand/query.rs
|
UTF-8
| 3,346 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
use crate::db::Dir;
use crate::fzf::Fzf;
use crate::util;
use anyhow::{bail, Context, Result};
use structopt::StructOpt;
use std::io::{self, Write};
use std::path::Path;
/// Search for a directory
#[derive(Debug, StructOpt)]
#[structopt()]
pub struct Query {
keywords: Vec<String>,
/// Opens an interactive selection menu using fzf
#[structopt(short, long, conflicts_with = "list")]
interactive: bool,
/// List all matching directories
#[structopt(short, long, conflicts_with = "interactive")]
list: bool,
/// Display score along with result
#[structopt(short, long)]
score: bool,
}
impl Query {
pub fn run(&self) -> Result<()> {
if self.list {
return self.query_list();
}
if self.interactive {
return self.query_interactive();
}
// if the input is already a valid path, simply print it as-is
if let [path] = self.keywords.as_slice() {
if Path::new(path).is_dir() {
let dir = Dir {
path: path.to_string(),
rank: 0.0,
last_accessed: 0,
};
if self.score {
println!("{}", dir.display_score(0))
} else {
println!("{}", dir.display());
}
return Ok(());
}
}
self.query()
}
fn query(&self) -> Result<()> {
let mut db = util::get_db()?;
let now = util::get_current_time()?;
let mut matches = db.matches(now, &self.keywords);
match matches.next() {
Some(dir) => {
if self.score {
println!("{}", dir.display_score(now))
} else {
println!("{}", dir.display());
}
}
None => bail!("no match found"),
}
Ok(())
}
fn query_interactive(&self) -> Result<()> {
let mut db = util::get_db()?;
let now = util::get_current_time()?;
let mut fzf = Fzf::new()?;
let mut matches = db.matches(now, &self.keywords);
while let Some(dir) = matches.next() {
fzf.write(format!("{}", dir.display_score(now)))?;
}
match fzf.wait_select()? {
Some(selection) => {
if self.score {
print!("{}", selection)
} else {
let selection = selection
.get(5..)
.with_context(|| format!("fzf returned invalid output: {}", selection))?;
print!("{}", selection)
}
}
None => bail!("no match found"),
};
Ok(())
}
fn query_list(&self) -> Result<()> {
let mut db = util::get_db()?;
let now = util::get_current_time()?;
let mut matches = db.matches(now, &self.keywords);
let stdout = io::stdout();
let mut handle = stdout.lock();
while let Some(dir) = matches.next() {
if self.score {
writeln!(handle, "{}", dir.display_score(now))
} else {
writeln!(handle, "{}", dir.display())
}
.unwrap();
}
Ok(())
}
}
| true |
0184c6a0af51420e0e4022fea0ce115847404250
|
Rust
|
timjrobinson/rust-book
|
/collections/src/main.rs
|
UTF-8
| 5,833 | 3.625 | 4 |
[] |
no_license
|
use std::collections::{HashMap,HashSet};
fn vectors() {
let mut v: Vec<i32> = Vec::new();
v.push(5);
v.push(444);
println!("Vector item 1 is: {}", v[1]);
let v2 = vec![6,7,8,9];
let mut third: &i32 = &v2[2];
println!("The third element is {}", third);
third = &3;
println!("After change, third element is {}", third);
println!("After change, third element of vec is {}", v2[2]);
match v2.get(2) {
Some(third) => println!("The third element is {}", third),
None => println!("There is no third element."),
}
let mut v3 = vec![100, 32, 57];
for i in &mut v3 {
*i += 50;
}
printvec(&v3);
}
fn strings() {
let s = String::from("mew mew");
println!("S is: {}", s);
let data = "initial contents";
let s = &data;
let mut y = data.to_string();
y.push_str(" yum");
y = y + " more!";
println!("S is: {}, y is: {}", s, y);
let mut s1 = String::from("foo");
let s2 = "bar";
s1.push_str(s2);
println!("s2 is {}", s2);
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = return_formatted(&s1, &s2, &s3);
println!("s: {}", s);
let hello = "Здравствуйте";
let s = &hello[0..4];
println!("s: {}", s);
}
fn hashmaps() {
// let mut scores = HashMap::new();
// scores.insert(String::from("Blue"), 10);
// scores.insert(String::from("Yellow"), 50);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let scores: HashMap<_, _> =
teams.into_iter().zip(initial_scores.into_iter()).collect();
println!("hashmap: {:?}", scores);
let field_name = String::from("Favorite color");
let field_value = String::from("Blue");
let mut map = HashMap::new();
map.insert(field_name, field_value);
// field_name and field_value are invalid at this point, try using them and
// see what compiler error you get!
// println!("Field name: {}", field_name);
// println!("hashmap: {:?}", map);
// field_name = String::from("Blah");
// println!("Field name: {}", field_name);
// println!("hashmap: {:?}", map);
let team_name = String::from("Blue");
let score = scores.get(&team_name);
match score {
Some(score) => { println!("Score: {}", score); },
None => println!("No score!"),
}
for (key, value) in &scores {
println!("{}: {}", key, value);
}
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(0);
*count += 1;
}
println!("{:?}", map);
}
fn summary() {
let vec = vec![1, 3, 4, 4, 12, 19, 22, 29];
let details = list_vec_details(vec);
println!("Vector Details: {:?}", details);
}
fn list_vec_details(mut vec: Vec<i32>) -> HashMap<String, f32> {
let mut sum = 0;
for i in &vec {
sum += i;
}
let len = vec.len() as i32;
let mean : f32 = (sum as f32 / len as f32) as f32;
let halfway : usize = (len / 2) as usize;
vec.sort();
let median = vec[halfway] as f32;
let mut highest_count = (0, 0);
let mut counts = HashMap::new();
for i in &vec {
let count = counts.entry(i.to_string()).or_insert(0);
*count += 1;
if *count > highest_count.1 {
highest_count.0 = *i;
highest_count.1 = *count;
}
}
let mut result = HashMap::new();
result.insert(String::from("mean"), mean);
result.insert(String::from("median"), median);
result.insert(String::from("mode"), highest_count.0 as f32);
return result;
}
fn printvec(v3: &Vec<i32>) {
for i in v3 {
println!("{}", i);
}
}
fn return_formatted(s1: &String, s2: &String, s3: &String) -> String {
let s = format!("{}, {}, {}", s1, s2, s3);
return s;
}
fn convert_to_piglatin(s: &String) -> String {
let split_string: Vec<&str> = s.split(" ").collect();
let mut piglatin_string = String::from("");
for word in &split_string {
let first_char = &word[0..1];
let piglatin_word = match first_char {
"a" | "e" | "i" | "o" | "u" => format!("{}-hay ", &word),
_ => format!("{}-{}ay ", &word[1..], first_char),
};
piglatin_string.push_str(piglatin_word.as_str());
}
return piglatin_string;
}
fn piglatin_test() {
let original = String::from("first time to eat an apple");
let converted = convert_to_piglatin(&original);
println!("Pig Latin string is: {}", converted);
}
fn parse_instruction(instruction: &String) -> (&str, &str) {
let split_instructions : Vec<&str> = instruction.split(" ").collect();
let name: &str = split_instructions[1];
let department: &str = split_instructions[3];
return (name, department);
}
fn employee_interface() {
let mut instructions: Vec<String> = Vec::new();
instructions.push(String::from("Add Sally to Engineering"));
instructions.push(String::from("Add Amir to Sales"));
instructions.push(String::from("Add Toby to Marketing"));
instructions.push(String::from("Add Juanita to Sales"));
instructions.push(String::from("Add Amir to Sales"));
let mut employee_db : HashMap<String, HashSet<String>> = HashMap::new();
for i in &instructions {
let (name, department) = parse_instruction(i);
let department = employee_db.entry(String::from(department)).or_insert(HashSet::new());
department.insert(String::from(name));
}
println!("Employee DB: {:?}", employee_db);
}
fn main() {
vectors();
strings();
hashmaps();
summary();
piglatin_test();
employee_interface();
}
| true |
5c47760e304fd76bb2c732be042e6dfa62d2eb7d
|
Rust
|
osphea/zircon-rpi
|
/src/developer/ffx/config/src/heuristic_config.rs
|
UTF-8
| 1,575 | 2.8125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {crate::api::ReadConfig, serde_json::Value, std::collections::HashMap};
pub(crate) type HeuristicFn = fn(key: &str) -> Option<Value>;
pub(crate) struct Heuristic<'a> {
heuristics: &'a HashMap<&'static str, HeuristicFn>,
}
impl<'a> Heuristic<'a> {
pub(crate) fn new(heuristics: &'a HashMap<&'static str, HeuristicFn>) -> Self {
Self { heuristics }
}
}
impl ReadConfig for Heuristic<'_> {
fn get(&self, key: &str) -> Option<Value> {
self.heuristics.get(key).map(|r| r(key)).flatten()
}
}
////////////////////////////////////////////////////////////////////////////////
// tests
#[cfg(test)]
mod test {
use super::*;
fn test_heuristic(key: &str) -> Option<Value> {
Some(Value::String(key.to_string()))
}
#[test]
fn test_config_heuristics() {
let (heuristic_key, heuristic_key_2) = ("test", "test_2");
let mut heuristics = HashMap::<&str, HeuristicFn>::new();
heuristics.insert(heuristic_key, test_heuristic);
heuristics.insert(heuristic_key_2, test_heuristic);
let config = Heuristic::new(&heuristics);
let missing_key = "whatever";
assert_eq!(None, config.get(missing_key));
assert_eq!(Some(Value::String(heuristic_key.to_string())), config.get(heuristic_key));
assert_eq!(Some(Value::String(heuristic_key_2.to_string())), config.get(heuristic_key_2));
}
}
| true |
077f7049a0fd169a16b69bcd6758eb17fc6084a8
|
Rust
|
gitter-badger/rsmorphy
|
/src/container/decode/error.rs
|
UTF-8
| 517 | 2.6875 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
use std::num::ParseIntError;
use std::num::ParseFloatError;
#[derive(Debug, Clone, PartialEq)]
pub enum DecodeError {
UnexpectedEnd,
UnknownPartType,
DoesntMatch,
ParseIntError(ParseIntError),
ParseFloatError(ParseFloatError)
}
impl From<ParseIntError> for DecodeError {
fn from(e: ParseIntError) -> Self {
DecodeError::ParseIntError(e)
}
}
impl From<ParseFloatError> for DecodeError {
fn from(e: ParseFloatError) -> Self {
DecodeError::ParseFloatError(e)
}
}
| true |
d878d2b2fefdce16a97ac82268ef7d2c79246c38
|
Rust
|
HectorPeeters/dyno
|
/src/backend/x86_backend.rs
|
UTF-8
| 7,151 | 2.953125 | 3 |
[] |
no_license
|
use crate::ast::{BinaryOperationType, Expression, Statement};
use crate::backend::Backend;
use crate::error::{DynoError, DynoResult};
use crate::types::{DynoType, DynoValue};
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::process::Command;
use std::time::SystemTime;
const REG_NAMES: [&str; 4] = ["%r8", "%r9", "%r10", "%r11"];
pub struct X86Backend {
writer: BufWriter<File>,
regs: [bool; 4],
}
type Register = usize;
impl Backend for X86Backend {
type Register = Register;
fn generate_statement(&mut self, statement: &Statement) -> DynoResult<()> {
match statement {
Statement::If(condition, true_statement) => self.generate_if(condition, true_statement),
Statement::While(condition, body) => self.generate_while(condition, body),
Statement::Return(x) => self.generate_return(x),
Statement::Block(children) => {
for child in children {
self.generate_statement(&child)?;
}
Ok(())
}
Statement::Declaration(name, value_type) => self.generate_declaration(name, value_type),
Statement::Assignment(name, expression) => self.generate_assignment(name, expression),
}
}
fn generate_expression(&mut self, expression: &Expression) -> DynoResult<Self::Register> {
match expression {
Expression::BinaryOperation(op_type, left, right) => {
self.generate_binop(op_type, left, right)
}
Expression::Literal(value_type, value) => self.generate_literal(value_type, value),
Expression::Widen(expression, value_type) => {
self.generate_widen(expression, value_type)
}
Expression::Identifier(name) => self.generate_identifier(name),
}
}
}
impl X86Backend {
fn new(file_name: &str) -> Self {
Self {
writer: BufWriter::new(File::create(file_name).unwrap()),
regs: [false; 4],
}
}
fn allocate_reg(&mut self) -> DynoResult<Register> {
for (i, reg) in self.regs.iter().enumerate() {
if !reg {
self.regs[i] = true;
return Ok(i);
}
}
Err(DynoError::GeneratorError(
"All registers are allocated".to_string(),
))
}
fn deallocate_reg(&mut self, reg: Register) -> DynoResult<()> {
if !self.regs[reg] {
return Err(DynoError::GeneratorError(
"Trying to free a register which is not used".to_string(),
));
}
self.regs[reg] = false;
Ok(())
}
fn finish(&mut self) -> DynoResult<()> {
self.writer.flush()?;
Ok(())
}
fn generate_header(&mut self) -> DynoResult<()> {
writeln!(self.writer, ".globl main")?;
writeln!(self.writer, ".text")?;
writeln!(self.writer, "main:")?;
Ok(())
}
fn generate_binop(
&mut self,
op_type: &BinaryOperationType,
left: &Expression,
right: &Expression,
) -> DynoResult<Register> {
use BinaryOperationType::*;
println!("{:#?}\n{:#?}", left, right);
let left = self.generate_expression(left)?;
let right = self.generate_expression(right)?;
println!("{} {}", left, right);
match op_type {
Add => writeln!(
self.writer,
"addq {}, {}",
REG_NAMES[right], REG_NAMES[left]
)?,
Subtract => writeln!(
self.writer,
"subq {}, {}",
REG_NAMES[right], REG_NAMES[left]
)?,
Multiply => writeln!(
self.writer,
"imul {}, {}",
REG_NAMES[right], REG_NAMES[left]
)?,
Divide => writeln!(
self.writer,
"movq {}, %rax\nmovq $0, %rdx\ndivq {}\nmovq %rax, {}",
REG_NAMES[left], REG_NAMES[right], REG_NAMES[left]
)?,
_ => todo!(),
}
self.deallocate_reg(right)?;
Ok(left)
}
fn generate_literal(
&mut self,
value_type: &DynoType,
value: &DynoValue,
) -> DynoResult<Register> {
use crate::types::DynoValue::*;
let reg = self.allocate_reg()?;
match (value_type, value) {
(_, UInt(x)) => writeln!(self.writer, "movq ${}, {}", x, REG_NAMES[reg])?,
_ => {
return Err(DynoError::GeneratorError(format!(
"Failed to generate literal for {:?}, {:?}",
value_type, value,
)))
}
}
Ok(reg)
}
fn generate_widen(
&mut self,
expression: &Expression,
_value_type: &DynoType,
) -> DynoResult<Register> {
//TODO: actually implement widen heres
self.generate_expression(expression)
}
fn generate_identifier(&mut self, _name: &str) -> DynoResult<Register> {
Ok(0)
}
fn generate_if(
&mut self,
_condition: &Expression,
_true_statement: &Statement,
) -> DynoResult<()> {
todo!();
}
fn generate_while(&mut self, _condition: &Expression, _body: &Statement) -> DynoResult<()> {
todo!();
}
fn generate_return(&mut self, expression: &Expression) -> DynoResult<()> {
let reg = self.generate_expression(expression)?;
writeln!(self.writer, "movq {}, %rax", REG_NAMES[reg])?;
writeln!(self.writer, "ret")?;
self.deallocate_reg(reg)
}
#[allow(dead_code)]
fn generate_block(&mut self, children: &[Statement]) -> DynoResult<()> {
for child in children {
self.generate_statement(child)?;
}
Ok(())
}
fn generate_declaration(&mut self, _name: &str, _value_type: &DynoType) -> DynoResult<()> {
todo!();
}
fn generate_assignment(&mut self, _name: &str, _expression: &Expression) -> DynoResult<()> {
todo!();
}
}
pub fn compile_and_run(ast: &Statement) -> DynoResult<u64> {
std::fs::create_dir_all("target/x86")?;
//TODO: replace this with a hash or something
let time = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_nanos();
let assembly_file = format!("target/x86/{}.s", time);
let mut backend = X86Backend::new(&assembly_file);
backend.generate_header()?;
backend.generate_statement(ast)?;
backend.finish()?;
let executable = format!("target/x86/{}.out", time);
let compile_status = Command::new("cc")
.arg(&assembly_file)
.arg("-o")
.arg(&executable)
.status()?;
if compile_status.code().unwrap() != 0 {
return Err(DynoError::GeneratorError(
"Failed to compile assembly".to_string(),
));
}
//TODO: change this to support 64 bit integer output
let status = Command::new(&executable).status()?;
Ok(status.code().unwrap() as u64)
}
| true |
76315f21d377a981c39dc2876d7477c564bf8c7a
|
Rust
|
vanam/rust2llvm
|
/examples/07_function.rs
|
UTF-8
| 353 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
fn gcd(a: i32, b: i32) -> i32 {
// if as a expression and expression as a implicit return statement
if b == 0 {
a
} else {
gcd(b, a % b) // support for direct recursion
}
}
fn main() {
let a: i32 = 12;
let b: i32 = 90;
printf("GCD(%d, %d) = %d\n", a, b, gcd(a, b)); // GCD(12, 90) = 6
}
| true |
3a73cd9585e21ad0dd0e86d39df9ca7c69f3b01d
|
Rust
|
kubos/kubos
|
/libs/file-protocol/src/protocol.rs
|
UTF-8
| 29,532 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
//
// Copyright (C) 2018 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! File transfer protocol module
use super::messages;
use super::parsers;
use super::storage;
use super::Message;
use crate::error::ProtocolError;
use cbor_protocol::Protocol as CborProtocol;
use log::{error, info, warn};
use rand::{self, Rng};
use serde_cbor::Value;
use std::cell::Cell;
use std::net::SocketAddr;
use std::str;
use std::thread;
use std::time::Duration;
/// Configuration data for Protocol
#[derive(Clone)]
pub struct ProtocolConfig {
// Name of folder used to store protocol metadata
storage_prefix: String,
// Chunk size used in transfers
transfer_chunk_size: usize,
// How many times do we read and timeout
// while in the Hold state before stopping
hold_count: u16,
// Duration of delay between individual chunk transmission
inter_chunk_delay: Duration,
// Max number of chunks to transmit in one go
max_chunks_transmit: Option<u32>,
// Chunk size used in storage hashing
hash_chunk_size: usize,
}
impl ProtocolConfig {
/// Creates new ProtocolConfig struct
pub fn new(
storage_prefix: Option<String>,
transfer_chunk_size: usize,
hold_count: u16,
inter_chunk_delay: u64,
max_chunks_transmit: Option<u32>,
hash_chunk_size: usize,
) -> Self {
ProtocolConfig {
storage_prefix: storage_prefix.unwrap_or_else(|| "file-storage".to_owned()),
transfer_chunk_size,
hold_count,
inter_chunk_delay: Duration::from_millis(inter_chunk_delay),
max_chunks_transmit,
hash_chunk_size,
}
}
}
/// File protocol information structure
pub struct Protocol {
cbor_proto: CborProtocol,
remote_addr: Cell<SocketAddr>,
config: ProtocolConfig,
}
/// Current state of the file protocol transaction
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum State {
/// Neutral state, neither transmitting nor receiving
Holding {
/// Number of consecutive times the holding state has been hit
count: u16,
/// Previous state to return to once we exit the holding state
prev_state: Box<State>,
},
/// Preparing to receive file chunks
StartReceive {
/// Destination file path
path: String,
},
/// Currently receiving a file
Receiving {
/// Transaction identifier
channel_id: u32,
/// File hash
hash: String,
/// Destination file path
path: String,
/// File mode
mode: Option<u32>,
},
/// All file chunks have been received
ReceivingDone {
/// Transaction identifier
channel_id: u32,
/// File hash
hash: String,
/// Destination file path
path: String,
/// File mode
mode: Option<u32>,
},
/// Currenty transmitting a file
Transmitting,
/// All file chunks have been transmitted
TransmittingDone,
/// Finished transmitting/receiving, thread or process may end
Done,
}
impl Protocol {
/// Create a new file protocol instance using an automatically assigned UDP socket
///
/// # Arguments
///
/// * host_ip - The local IP address
/// * remote_addr - The remote IP and port to communicate with
/// * prefix - Temporary storage directory prefix
///
/// # Errors
///
/// If this function encounters any errors, it will panic
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(Some("my/file/storage".to_owned()), 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "192.168.0.1:7000", config);
/// ```
///
pub fn new(host_addr: &str, remote_addr: &str, config: ProtocolConfig) -> Self {
// Get a local UDP socket (Bind)
let c_protocol = CborProtocol::new(host_addr, config.transfer_chunk_size);
// Set up the full connection info
Protocol {
cbor_proto: c_protocol,
remote_addr: Cell::new(
remote_addr
.parse::<SocketAddr>()
.map_err(|err| {
error!("Failed to parse remote_addr: {:?}", err);
err
})
.unwrap(),
),
config,
}
}
/// Send CBOR packet to the destination port
///
/// # Arguments
///
/// * vec - CBOR packet to send
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
/// use serde_cbor::ser;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
/// let message = ser::to_vec_packed(&"ping").unwrap();
///
/// f_protocol.send(&message);
/// ```
///
pub fn send(&self, vec: &[u8]) -> Result<(), ProtocolError> {
self.cbor_proto.send_message(vec, self.remote_addr.get())?;
Ok(())
}
/// Receive a file protocol message
///
/// # Arguments
///
/// * timeout - Maximum time to wait for a reply. If `None`, will block indefinitely
///
/// # Errors
///
/// - If this function times out, it will return `Err(None)`
/// - If this function encounters any errors, it will return an error message string
///
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
/// use std::time::Duration;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// let message = match f_protocol.recv(Some(Duration::from_secs(1))) {
/// Ok(data) => data,
/// Err(ProtocolError::ReceiveTimeout) => {
/// println!("Timeout waiting for message");
/// return;
/// }
/// Err(err) => panic!("Failed to receive message: {}", err),
/// };
/// ```
///
pub fn recv(&self, timeout: Option<Duration>) -> Result<Value, ProtocolError> {
match timeout {
Some(value) => Ok(self.cbor_proto.recv_message_timeout(value)?),
None => Ok(self.cbor_proto.recv_message()?),
}
}
/// Generates a new random channel ID for use when initiating a
/// file transfer.
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// let channel_id = f_protocol.generate_channel();
/// ```
///
pub fn generate_channel(&self) -> Result<u32, ProtocolError> {
let mut rng = rand::thread_rng();
let channel_id: u32 = rng.gen_range(100_000, 999_999);
Ok(channel_id)
}
/// Send a file's metadata information to the remote target
///
/// # Arguments
///
/// * channel_id - Channel ID for transaction
/// * hash - BLAKE2s hash of file
/// * num_chunks - Number of data chunks needed for file
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// # ::std::fs::File::create("client.txt").unwrap();
///
/// let (hash, num_chunks, _mode) = f_protocol.initialize_file("client.txt").unwrap();
/// let channel_id = f_protocol.generate_channel().unwrap();
/// f_protocol.send_metadata(channel_id, &hash, num_chunks);
/// ```
///
pub fn send_metadata(
&self,
channel_id: u32,
hash: &str,
num_chunks: u32,
) -> Result<(), ProtocolError> {
self.send(&messages::metadata(channel_id, hash, num_chunks)?)
}
/// Send a request to cleanup the remote storage folder
pub fn send_cleanup(&self, channel_id: u32, hash: Option<String>) -> Result<(), ProtocolError> {
self.send(&messages::cleanup(channel_id, hash)?)
}
/// Request remote target to receive file from host
///
/// # Arguments
///
/// * channel_id - Channel ID used for transaction
/// * hash - BLAKE2s hash of file
/// * target_path - Destination file path
/// * mode - File mode
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// # ::std::fs::File::create("client.txt").unwrap();
///
/// let (hash, _num_chunks, mode) = f_protocol.initialize_file("client.txt").unwrap();
/// let channel_id = f_protocol.generate_channel().unwrap();
/// f_protocol.send_export(channel_id, &hash, "final/dir/service.txt", mode);
/// ```
///
pub fn send_export(
&self,
channel_id: u32,
hash: &str,
target_path: &str,
mode: u32,
) -> Result<(), ProtocolError> {
self.send(&messages::export_request(
channel_id,
hash,
target_path,
mode,
)?)?;
Ok(())
}
/// Request a file from a remote target
///
/// # Arguments
///
/// * source_path - File remote target should send
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
/// let channel_id = f_protocol.generate_channel().unwrap();
///
/// f_protocol.send_import(channel_id, "service.txt");
/// ```
///
pub fn send_import(&self, channel_id: u32, source_path: &str) -> Result<(), ProtocolError> {
self.send(&messages::import_request(channel_id, source_path)?)?;
Ok(())
}
/// Prepare a file for transfer
///
/// Imports the file into temporary storage and calculates the BLAKE2s hash
///
/// # Arguments
///
/// * source_path - File to initialize for transfer
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// # ::std::fs::File::create("client.txt").unwrap();
///
/// let (_hash, _num_chunks, _mode) = f_protocol.initialize_file("client.txt").unwrap();
/// ```
///
pub fn initialize_file(&self, source_path: &str) -> Result<(String, u32, u32), ProtocolError> {
storage::initialize_file(
&self.config.storage_prefix,
source_path,
self.config.transfer_chunk_size,
self.config.hash_chunk_size,
)
}
// Verify the integrity of received file data and then transfer into the requested permanent file location.
// Notify the connection peer of the results
//
// Verifies:
// a) All of the chunks of a file have been received
// b) That the calculated hash of said chunks matches the expected hash
//
fn finalize_file(
&self,
channel_id: u32,
hash: &str,
target_path: &str,
mode: Option<u32>,
) -> Result<(), ProtocolError> {
match storage::finalize_file(
&self.config.storage_prefix,
hash,
target_path,
mode,
self.config.hash_chunk_size,
) {
Ok(_) => {
self.send(&messages::operation_success(channel_id, hash)?)?;
storage::delete_file(&self.config.storage_prefix, hash)?;
Ok(())
}
Err(e) => {
self.send(&messages::operation_failure(channel_id, &format!("{}", e))?)?;
Err(e)
}
}
}
/// Send all requested chunks of a file to the remote destination
///
/// # Arguments
/// * channel_id - ID of channel to communicate over
/// * hash - Hash of file corresponding to chunks
/// * chunks - List of chunk ranges to transmit
fn send_chunks(
&self,
channel_id: u32,
hash: &str,
chunks: &[(u32, u32)],
) -> Result<(), ProtocolError> {
let mut chunks_transmitted = 0;
for (first, last) in chunks {
for chunk_index in *first..*last {
match storage::load_chunk(&self.config.storage_prefix, hash, chunk_index) {
Ok(c) => self.send(&messages::chunk(channel_id, hash, chunk_index, &c)?)?,
Err(e) => {
warn!("Failed to load chunk {}:{} : {}", hash, chunk_index, e);
storage::delete_file(&self.config.storage_prefix, hash)?;
return Err(ProtocolError::CorruptFile(hash.to_string()));
}
};
if let Some(max_chunks_transmit) = self.config.max_chunks_transmit {
chunks_transmitted += 1;
if chunks_transmitted >= max_chunks_transmit {
return Ok(());
}
}
thread::sleep(self.config.inter_chunk_delay);
}
}
Ok(())
}
/// Listen for and process file protocol messages
///
/// # Arguments
///
/// * pump - Function which returns the next message for processing
/// * timeout - Maximum time to listen for a single message
/// * start_state - Current transaction state
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```no_run
/// use file_protocol::*;
/// use std::time::Duration;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// f_protocol.message_engine(
/// |d| f_protocol.recv(Some(d)),
/// Duration::from_millis(10),
/// &State::Transmitting
/// );
/// ```
///
pub fn message_engine<F>(
&self,
pump: F,
timeout: Duration,
start_state: &State,
) -> Result<(), ProtocolError>
where
F: Fn(Duration) -> Result<Value, ProtocolError>,
{
let mut state = start_state.clone();
loop {
// Listen on UDP port
let message = match pump(timeout) {
Ok(message) => {
// If we previously timed out, restore the old state
if let State::Holding { prev_state, .. } = state {
state = *prev_state;
}
message
}
Err(ProtocolError::ReceiveTimeout) => match state.clone() {
State::Receiving {
channel_id,
hash,
path,
mode,
} => {
match storage::validate_file(&self.config.storage_prefix, &hash, None) {
Ok((true, _)) => {
self.send(&messages::ack(channel_id, &hash, None)?)?;
state = State::ReceivingDone {
channel_id,
hash: hash.clone(),
path: path.clone(),
mode,
};
}
Ok((false, chunks)) => {
self.send(&messages::nak(channel_id, &hash, &chunks)?)?;
state = State::Holding {
count: 0,
prev_state: Box::new(state.clone()),
};
continue;
}
Err(e) => return Err(e),
};
match self.finalize_file(channel_id, &hash, &path, mode) {
Ok(_) => {
return Ok(());
}
Err(e) => {
warn!("Failed to finalize file {} as {}: {}", hash, path, e);
// TODO: Handle finalization failures (ex. corrupted chunk file)
state = State::Holding {
count: 0,
prev_state: Box::new(state.clone()),
};
continue;
}
}
}
State::ReceivingDone {
channel_id,
hash,
path,
mode,
} => {
// We've got all the chunks of data we want.
// Stitch it back together and verify the hash of the official file
self.finalize_file(channel_id, &hash, &path, mode)?;
return Ok(());
}
State::Done => {
return Ok(());
}
State::Holding { count, prev_state } => {
if count > self.config.hold_count {
match prev_state.as_ref() {
State::Holding { .. } => return Ok(()),
_other => {
return Err(ProtocolError::ReceiveTimeout);
}
}
} else {
state = State::Holding {
count: count + 1,
prev_state,
};
continue;
}
}
_ => {
state = State::Holding {
count: 0,
prev_state: Box::new(state.clone()),
};
continue;
}
},
Err(e) => return Err(e),
};
match self.process_message(message, &state) {
Ok(new_state) => state = new_state,
Err(e) => return Err(e),
}
match state.clone() {
State::ReceivingDone {
channel_id,
hash,
path,
mode,
} => {
// We've got all the chunks of data we want.
// Stitch it back together and verify the hash of the official file
self.finalize_file(channel_id, &hash, &path, mode)?;
return Ok(());
}
State::Done => return Ok(()),
_ => continue,
};
}
}
/// Process a file protocol message
///
/// Returns the new transaction state
///
/// # Arguments
///
/// * message - File protocol message to process
/// * state - Current transaction state
///
/// # Errors
///
/// If this function encounters any errors, it will return an error message string
///
/// # Examples
///
/// ```
/// use file_protocol::*;
/// use std::time::Duration;
///
/// let config = FileProtocolConfig::new(None, 1024, 5, 1, None, 2048);
/// let f_protocol = FileProtocol::new("0.0.0.0:8000", "0.0.0.0:7000", config);
///
/// if let Ok(message) = f_protocol.recv(Some(Duration::from_millis(100))) {
/// let _state = f_protocol.process_message(
/// message,
/// &State::StartReceive {
/// path: "target/dir/file.bin".to_owned()
/// }
/// );
/// }
/// ```
///
pub fn process_message(&self, message: Value, state: &State) -> Result<State, ProtocolError> {
let parsed_message = parsers::parse_message(message)?;
let new_state = match &parsed_message {
Message::Sync(channel_id, hash) => {
info!("<- {{ {}, {} }}", channel_id, hash);
state.clone()
}
Message::Metadata(channel_id, hash, num_chunks) => {
info!("<- {{ {}, {}, {} }}", channel_id, hash, num_chunks);
storage::store_meta(&self.config.storage_prefix, hash, *num_chunks)?;
State::StartReceive {
path: hash.to_owned(),
}
}
Message::ReceiveChunk(channel_id, hash, chunk_num, data) => {
info!(
"<- {{ {}, {}, {}, chunk_data }}",
channel_id, hash, chunk_num
);
storage::store_chunk(&self.config.storage_prefix, hash, *chunk_num, data)?;
state.clone()
}
Message::ACK(_channel_id, ack_hash) => {
info!("<- {{ {}, true }}", ack_hash);
// TODO: Figure out hash verification here
State::TransmittingDone
}
Message::NAK(channel_id, hash, Some(missing_chunks)) => {
info!(
"<- {{ {}, {}, false, {:?} }}",
channel_id, hash, missing_chunks
);
match self.send_chunks(*channel_id, hash, missing_chunks) {
Ok(()) => {}
Err(error) => self.send(&messages::operation_failure(
*channel_id,
&format!("{}", error),
)?)?,
};
State::Transmitting
}
Message::NAK(channel_id, hash, None) => {
info!("<- {{ {}, {}, false }}", channel_id, hash);
// TODO: Maybe trigger a failure?
state.clone()
}
Message::ReqReceive(channel_id, hash, path, mode) => {
info!(
"<- {{ {}, export, {}, {}, {:?} }}",
channel_id, hash, path, mode
);
// The client wants to send us a file.
// See what state the file is currently in on our side
match storage::validate_file(&self.config.storage_prefix, hash, None) {
Ok((true, _)) => {
// We've already got all the file data in temporary storage
self.send(&messages::ack(*channel_id, hash, None)?)?;
State::ReceivingDone {
channel_id: *channel_id,
hash: hash.to_string(),
path: path.to_string(),
mode: *mode,
}
}
Ok((false, chunks)) => {
// We're missing some number of data chunks of the requrested file
self.send(&messages::nak(*channel_id, hash, &chunks)?)?;
State::Receiving {
channel_id: *channel_id,
hash: hash.to_string(),
path: path.to_string(),
mode: *mode,
}
}
Err(e) => return Err(e),
}
}
Message::ReqTransmit(channel_id, path) => {
info!("<- {{ {}, import, {} }}", channel_id, path);
// Set up the requested file for transmission
match self.initialize_file(path) {
Ok((hash, num_chunks, mode)) => {
// It worked, let the requester know we're ready to send
self.send(&messages::import_setup_success(
*channel_id,
&hash,
num_chunks,
mode,
)?)?;
State::Transmitting
}
Err(error) => {
// It failed. Let the requester know that we can't transmit
// the file they want.
self.send(&messages::operation_failure(
*channel_id,
&format!("{}", error),
)?)?;
State::Done
}
}
}
Message::SuccessReceive(channel_id, hash) => {
info!("<- {{ {}, true }}", channel_id);
storage::delete_file(&self.config.storage_prefix, hash)?;
State::Done
}
Message::SuccessTransmit(channel_id, hash, num_chunks, mode) => {
match mode {
Some(value) => info!(
"<- {{ {}, true, {}, {}, {} }}",
channel_id, hash, num_chunks, value
),
None => {
info!("<- {{ {}, true, {}, {} }}", channel_id, hash, num_chunks)
}
}
// TODO: handle channel_id mismatch
match storage::validate_file(&self.config.storage_prefix, hash, Some(*num_chunks)) {
Ok((true, _)) => {
self.send(&messages::ack(*channel_id, hash, Some(*num_chunks))?)?;
match state.clone() {
State::StartReceive { path } => State::ReceivingDone {
channel_id: *channel_id,
hash: hash.to_string(),
path,
mode: *mode,
},
_ => State::Done,
}
}
Ok((false, chunks)) => {
self.send(&messages::nak(*channel_id, hash, &chunks)?)?;
match state.clone() {
State::StartReceive { path } => State::Receiving {
channel_id: *channel_id,
hash: hash.to_string(),
path,
mode: *mode,
},
_ => state.clone(),
}
}
Err(e) => return Err(e),
}
}
Message::Failure(channel_id, error_message) => {
info!("<- {{ {}, false, {} }}", channel_id, error_message);
return Err(ProtocolError::TransmissionError {
channel_id: *channel_id,
error_message: error_message.to_string(),
});
}
Message::Cleanup(channel_id, Some(hash)) => {
info!("<- {{ {}, cleanup, {} }}", channel_id, hash);
storage::delete_file(&self.config.storage_prefix, hash)?;
State::Done
}
Message::Cleanup(channel_id, None) => {
info!("< {{ {}, cleanup }}", channel_id);
storage::delete_storage(&self.config.storage_prefix)?;
State::Done
}
};
Ok(new_state)
}
}
| true |
a847e3733ea55add4f035ba2db05a0f38f0f7b42
|
Rust
|
arnohub/vertex
|
/src/sources/node/btrfs.rs
|
UTF-8
| 14,868 | 2.625 | 3 |
[
"Apache-2.0"
] |
permissive
|
use std::collections::BTreeMap;
use event::{Metric, tags};
use super::{Error, ErrorContext, read_into, read_to_string};
use std::path::{Path, PathBuf};
const SECTOR_SIZE: u64 = 512;
/// LayoutUsage contains additional usage statistics for a disk layout
pub struct LayoutUsage {
used_bytes: u64,
total_bytes: u64,
ratio: f64,
}
/// AllocationStats contains allocation statistics for a data type
pub struct AllocationStats {
// Usage statistics
disk_used_bytes: u64,
disk_total_bytes: u64,
may_used_bytes: u64,
pinned_bytes: u64,
total_pinned_bytes: u64,
read_only_bytes: u64,
reserved_bytes: u64,
used_bytes: u64,
total_bytes: u64,
// Flags marking filesystem state
// See Linux fs/btrfs/ctree.h for more information.
flags: u64,
// Additional disk usage statistics depending on the disk
// layout. At least one of these will exist and not be nil
layouts: BTreeMap<String, LayoutUsage>,
}
/// Allocation contains allocation statistics for data,
/// metadata and system data
pub struct Allocation {
global_rsv_reserved: u64,
global_rsv_size: u64,
data: Option<AllocationStats>,
metadata: Option<AllocationStats>,
system: Option<AllocationStats>,
}
/// Device contains information about a device that is part of
/// a Btrfs filesystem
struct Device {
size: u64,
}
/// Stats contains statistics for a single Btrfs filesystem.
/// See Linux fs/btrfs/sysfs.c for more information
pub struct Stats {
uuid: String,
label: String,
allocation: Allocation,
devices: BTreeMap<String, Device>,
features: Vec<String>,
clone_alignment: u64,
node_size: u64,
quota_override: u64,
sector_size: u64,
}
pub async fn gather(sys_path: &str) -> Result<Vec<Metric>, Error> {
let stats = stats(sys_path).await?;
let mut metrics = vec![];
for s in &stats {
metrics.extend(stats_to_metrics(s));
}
Ok(metrics)
}
fn stats_to_metrics(stats: &Stats) -> Vec<Metric> {
let mut metrics = vec![
Metric::gauge_with_tags(
"node_btrfs_info",
"Filesystem information",
1.0,
tags!(
"label" => stats.label.clone()
),
),
Metric::gauge(
"node_btrfs_global_rsv_size_bytes",
"Size of global reserve.",
stats.allocation.global_rsv_size as f64,
),
];
// Information about devices
for (name, device) in &stats.devices {
metrics.push(Metric::gauge_with_tags(
"node_btrfs_device_size_bytes",
"Size of a device that is part of the filesystem.",
device.size as f64,
tags!(
"device" => name
),
));
}
// Information about data, metadata and system data.
if let Some(s) = &stats.allocation.data {
metrics.extend(get_allocation_stats("data", s));
}
if let Some(s) = &stats.allocation.metadata {
metrics.extend(get_allocation_stats("metadata", s));
}
if let Some(s) = &stats.allocation.system {
metrics.extend(get_allocation_stats("system", s));
}
metrics
}
fn get_allocation_stats(typ: &str, stats: &AllocationStats) -> Vec<Metric> {
let mut metrics = vec![
Metric::gauge_with_tags(
"node_btrfs_reserved_bytes",
"Amount of space reserved for a data type",
stats.reserved_bytes as f64,
tags!(
"block_group_type" => typ
),
)
];
// Add all layout statistics
for (layout, s) in &stats.layouts {
let mode = layout;
metrics.extend_from_slice(&[
Metric::gauge_with_tags(
"node_btrfs_used_bytes",
"Amount of used space by a layout/data type",
s.used_bytes as f64,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
Metric::gauge_with_tags(
"node_btrfs_size_bytes",
"Amount of space allocated for a layout/data type",
s.total_bytes as f64,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
Metric::gauge_with_tags(
"node_btrfs_allocation_ratio",
"Data allocation ratio for a layout/data type",
s.ratio,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
])
}
metrics
}
fn get_layout_metrics(typ: &str, mode: &str, s: LayoutUsage) -> Vec<Metric> {
vec![
Metric::gauge_with_tags(
"node_btrfs_used_bytes",
"Amount of used space by a layout/data type",
s.used_bytes as f64,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
Metric::gauge_with_tags(
"node_btrfs_size_bytes",
"Amount of space allocated for a layout/data type",
s.total_bytes as f64,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
Metric::gauge_with_tags(
"node_btrfs_allocation_ratio",
"Data allocation ratio for a layout/data type",
s.ratio,
tags!(
"block_group_type" => typ,
"mode" => mode
),
),
]
}
async fn stats(root: &str) -> Result<Vec<Stats>, Error> {
let pattern = format!("{}/fs/btrfs/*-*", root, );
let paths = glob::glob(&pattern)
.context("find btrfs blocks failed")?;
let mut stats = vec![];
for entry in paths {
match entry {
Ok(path) => {
let s = get_stats(path).await
.context("get btrfs stats failed")?;
stats.push(s);
}
_ => {}
}
}
Ok(stats)
}
async fn get_stats(root: PathBuf) -> Result<Stats, Error> {
let devices = read_device_info(&root).await
.with_context(|| {
format!("read device info failed, {:#?}", &root)
})?;
let path = root.join("label");
let label = read_to_string(path).await?;
let path = root.join("metadata_uuid");
let uuid = read_to_string(path).await?.trim_end().to_string();
let path = root.join("features");
let features = list_files(path).await?;
let path = root.join("clone_alignment");
let clone_alignment = read_into(path).await?;
let path = root.join("nodesize");
let node_size = read_into(path).await?;
let path = root.join("quota_override");
let quota_override = read_into(path).await?;
let path = root.join("sectorsize");
let sector_size = read_into(path).await?;
let path = root.join("allocation/global_rsv_reserved");
let global_rsv_reserved = read_into(path).await?;
let path = root.join("allocation/global_rsv_size");
let global_rsv_size = read_into(path).await?;
let path = root.join("allocation/data");
let data = read_allocation_stats(path, devices.len()).await.ok();
let path = root.join("allocation/metadata");
let metadata = read_allocation_stats(path, devices.len()).await.ok();
let path = root.join("allocation/system");
let system = read_allocation_stats(path, devices.len()).await.ok();
Ok(Stats {
uuid,
label,
devices,
features,
clone_alignment,
node_size,
quota_override,
sector_size,
allocation: Allocation {
global_rsv_reserved,
global_rsv_size,
data,
metadata,
system,
},
})
}
async fn list_files(path: impl AsRef<Path>) -> Result<Vec<String>, Error> {
let mut dirs = tokio::fs::read_dir(path).await?;
let mut files = vec![];
while let Some(entry) = dirs.next_entry().await? {
let name = entry.file_name().into_string().unwrap();
files.push(name);
}
Ok(files)
}
async fn read_device_info(path: &PathBuf) -> Result<BTreeMap<String, Device>, Error> {
let path = path.join("devices");
let mut dirs = tokio::fs::read_dir(path)
.await
.context("read btrfs devices failed")?;
let mut devices = BTreeMap::new();
while let Some(ent) = dirs.next_entry().await? {
let name = ent.file_name().into_string().unwrap();
let mut path = ent.path();
path.push("size");
let size: u64 = read_into(path).await.unwrap_or(0);
devices.insert(name, Device {
size: size * SECTOR_SIZE
});
}
Ok(devices)
}
async fn read_allocation_stats(root: PathBuf, devices: usize) -> Result<AllocationStats, Error> {
let path = root.join("bytes_may_use");
let may_used_bytes = read_into(path).await?;
let path = root.join("bytes_pinned");
let pinned_bytes = read_into(path).await?;
let path = root.join("bytes_readonly");
let read_only_bytes = read_into(path).await?;
let path = root.join("bytes_reserved");
let reserved_bytes = read_into(path).await?;
let path = root.join("bytes_used");
let used_bytes = read_into(path).await?;
let path = root.join("disk_used");
let disk_used_bytes = read_into(path).await?;
let path = root.join("disk_total");
let disk_total_bytes = read_into(path).await?;
let path = root.join("flags");
let flags = read_into(path).await?;
let path = root.join("total_bytes");
let total_bytes = read_into(path).await?;
let path = root.join("total_bytes_pinned");
let total_pinned_bytes = read_into(path).await?;
// TODO: check the path arg, it is just a placeholder
let layouts = read_layouts(root, devices).await?;
Ok(AllocationStats {
disk_used_bytes,
disk_total_bytes,
may_used_bytes,
pinned_bytes,
total_pinned_bytes,
read_only_bytes,
reserved_bytes,
used_bytes,
total_bytes,
flags,
layouts,
})
}
async fn read_layouts(root: PathBuf, devices: usize) -> Result<BTreeMap<String, LayoutUsage>, Error> {
let mut dirs = tokio::fs::read_dir(root).await?;
let mut layouts = BTreeMap::new();
while let Some(ent) = dirs.next_entry().await? {
let path = ent.path();
if !path.is_dir() {
continue;
}
let name = path.file_name().unwrap().to_str().unwrap().to_string();
let layout = read_layout(&path, devices).await?;
layouts.insert(name, layout);
}
Ok(layouts)
}
// read_layout reads the Btrfs layout statistics for an allocation layout.
async fn read_layout(root: &PathBuf, devices: usize) -> Result<LayoutUsage, Error> {
let root = root.clone();
let path = root.join("total_bytes");
let total_bytes = read_into(path).await?;
let path = root.join("used_bytes");
let used_bytes = read_into(path).await?;
let name = root.file_name().unwrap().to_str().unwrap_or("");
let ratio = calc_ratio(name, devices);
Ok(LayoutUsage {
used_bytes,
total_bytes,
ratio,
})
}
// calc_ratio returns the calculated ratio for a layout mode
fn calc_ratio(p: &str, n: usize) -> f64 {
match p {
"single" | "raid0" => 1f64,
"dup" | "raid1" | "raid10" => 2f64,
"raid5" => n as f64 / (n - 1) as f64,
"raid6" => n as f64 / (n - 2) as f64,
_ => 0.0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_get_stats() {
let path = "testdata/sys";
let stats = stats(path).await.unwrap();
struct Alloc {
layout: String,
size: u64,
ratio: f64,
}
struct Expected {
uuid: String,
label: String,
devices: usize,
features: usize,
data: Alloc,
meta: Alloc,
system: Alloc,
}
let wants = vec![
Expected {
uuid: "0abb23a9-579b-43e6-ad30-227ef47fcb9d".to_string(),
label: "fixture".to_string(),
devices: 2,
features: 4,
data: Alloc {
layout: "raid0".to_string(),
size: 2147483648,
ratio: 1.0,
},
meta: Alloc {
layout: "raid1".to_string(),
size: 1073741824,
ratio: 2.0,
},
system: Alloc {
layout: "raid1".to_string(),
size: 8388608,
ratio: 2.0,
},
},
Expected {
uuid: "7f07c59f-6136-449c-ab87-e1cf2328731b".to_string(),
label: "".to_string(),
devices: 4,
features: 5,
data: Alloc {
layout: "raid5".to_string(),
size: 644087808,
ratio: 4.0 / 3.0,
},
meta: Alloc {
layout: "raid6".to_string(),
size: 429391872,
ratio: 4.0 / 2.0,
},
system: Alloc {
layout: "raid6".to_string(),
size: 16777216,
ratio: 4.0 / 2.0,
},
},
];
assert_eq!(wants.len(), stats.len());
for i in 0..wants.len() {
let want = &wants[i];
let got = &stats[i];
assert_eq!(got.uuid, want.uuid);
assert_eq!(got.devices.len(), want.devices);
assert_eq!(got.features.len(), want.features);
assert_eq!(got.allocation.data.as_ref().unwrap().total_bytes, want.data.size);
assert_eq!(got.allocation.metadata.as_ref().unwrap().total_bytes, want.meta.size);
assert_eq!(got.allocation.system.as_ref().unwrap().total_bytes, want.system.size);
assert_eq!(got.allocation.data.as_ref().unwrap().layouts.get(&want.data.layout).unwrap().ratio, want.data.ratio);
assert_eq!(got.allocation.metadata.as_ref().unwrap().layouts.get(&want.meta.layout).unwrap().ratio, want.meta.ratio);
assert_eq!(got.allocation.system.as_ref().unwrap().layouts.get(&want.system.layout).unwrap().ratio, want.system.ratio);
}
}
#[tokio::test]
async fn test_read_device_info() {
let path = PathBuf::from("testdata/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b");
let _infos = read_device_info(&path).await.unwrap();
}
}
| true |
985d828cabaf96264727e59df95824d8adb553ad
|
Rust
|
rodrigorc/lemon_rust
|
/examples/example1/src/lexer.rs
|
UTF-8
| 849 | 2.796875 | 3 |
[
"Apache-2.0"
] |
permissive
|
use regex::Regex;
pub enum LexerAction<TOKEN> {
Ignore,
Action(Box<dyn Fn(&str) -> Option<TOKEN>>),
Token(Box<dyn Fn() -> TOKEN>),
}
pub struct Lexer<TOKEN> {
re : Vec<(Regex, LexerAction<TOKEN>)>,
}
impl<TOKEN> Lexer<TOKEN> {
pub fn new<I>(rules: I) -> Lexer<TOKEN>
where I : IntoIterator<Item=(&'static str, LexerAction<TOKEN>)> {
let re = rules.into_iter().map(|(s, ac)| {
let expr = format!("^({})", s);
(Regex::new(&expr).unwrap(), ac)
}).collect::<Vec<_>>();
Lexer{re : re}
}
pub fn next(&self, s : &str) -> (usize, Option<&LexerAction<TOKEN>>) {
for &(ref re, ref action) in self.re.iter() {
if let Some(to) = re.find(s) {
return (to.range().len(), Some(action));
}
}
(0, None)
}
}
| true |
28772a3a8657f24800e4e3f204f15748c00a5c23
|
Rust
|
E-gy/try_all
|
/src/lib.rs
|
UTF-8
| 1,291 | 3.546875 | 4 |
[
"MIT"
] |
permissive
|
//! Rust iterator extensions to operate on `Result`s effectively.
//!
//! ## [`try_map_all`](crate::TryMapAll::try_map_all)
//! _and [`try_map_all_opt`](crate::TryMapAllOption::try_map_all_opt)_
//!
//! Applies a closure on all items of the iterator until one fails (or all succeed).
//!
//! ```rust
//! # use crate::try_all::*;
//! fn all_numbers_x2(strs: &Vec<&str>) -> Result<Vec<u64>, std::num::ParseIntError> {
//! Ok(strs.iter().try_map_all(|s| Ok(s.parse::<u64>()?*2))?.collect())
//! }
//! ```
//!
//! Respectively, for [`Option`]s:
//! ```rust
//! # use crate::try_all::*;
//! fn not_zero(is: Vec<u64>) -> Option<Vec<u64>> {
//! Some(is.into_iter().try_map_all_opt(|i| if i > 0 { Some(i) } else { None })?.collect())
//! }
//! ```
//!
//! _Note: once [#42327](https://github.com/rust-lang/rust/issues/42327) is merged, `try_map_all` will be implemented\* under one name for all try types._
//!
//! ## [`try_all`](crate::TryAll::try_all(self))
//!
//! Tries all items of the iterator until one fails (or all succeed).
//!
//! ```
//! # use crate::try_all::*;
//! fn parse_all_numbers(strs: &Vec<&str>) -> Result<Vec<u64>, std::num::ParseIntError> {
//! Ok(strs.iter().map(|s| s.parse()).try_all()?.collect())
//! }
//! ```
//!
mod iter;
pub use iter::*;
mod map;
pub use map::*;
| true |
7edfd14c2688fe23ad0508a3181d6f88e2147595
|
Rust
|
bilalhusain/redis-rs
|
/src/redis/parser.rs
|
UTF-8
| 4,569 | 3.171875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
use std::str;
use std::io::Reader;
use std::str::from_utf8;
use enums::*;
pub struct Parser<T> {
iter: T,
}
pub struct ByteIterator<'a> {
pub reader: &'a mut Reader,
}
impl<T: Iterator<u8>> Parser<T> {
/// Creates a new parser from a character source iterator.
pub fn new(iter: T) -> Parser<T> {
Parser { iter: iter }
}
/// parses a value from the iterator
pub fn parse_value(&mut self) -> Value {
let b = try_unwrap!(self.iter.next(), Invalid);
match b as char {
'+' => self.parse_status(),
':' => self.parse_int(),
'$' => self.parse_data(),
'*' => self.parse_bulk(),
'-' => self.parse_error(),
_ => Invalid,
}
}
#[inline]
fn expect_char(&mut self, refchar: char) -> bool {
match self.iter.next() {
Some(c) => {
if c as char == refchar {
return true;
}
},
_ => {}
}
return false;
}
#[inline]
fn expect_newline(&mut self) -> bool {
let c = try_unwrap!(self.iter.next(), false) as char;
if c == '\n' {
return true;
}
if c != '\r' {
return false;
}
return self.expect_char('\n');
}
fn read_line(&mut self) -> Option<Vec<u8>> {
let mut rv = vec![];
loop {
let b = try_unwrap!(self.iter.next(), None);
match b as char {
'\n' => { break; }
'\r' => {
if self.expect_char('\n') {
break;
} else {
return None;
}
},
_ => { rv.push(b) }
};
}
Some(rv)
}
fn read(&mut self, bytes: uint) -> Option<Vec<u8>> {
let mut rv = vec![];
rv.reserve(bytes);
for _ in range(0, bytes) {
rv.push(try_unwrap!(self.iter.next(), None));
}
Some(rv)
}
fn read_int_line(&mut self) -> Option<i64> {
let line = try_unwrap!(self.read_line(), None);
from_str(try_unwrap!(from_utf8(line.as_slice()), None).trim())
}
fn parse_status(&mut self) -> Value {
let line = try_unwrap!(self.read_line(), Invalid);
let s = try_unwrap!(str::from_utf8_owned(line).ok(), Invalid);
if s == "OK".to_string() {
Success
} else {
Status(s)
}
}
fn parse_int(&mut self) -> Value {
Int(try_unwrap!(self.read_int_line(), Invalid))
}
fn parse_data(&mut self) -> Value {
let length = try_unwrap!(self.read_int_line(), Invalid);
if length < 0 {
Nil
} else {
let data = try_unwrap!(self.read(length as uint), Invalid);
if !self.expect_newline() {
Invalid
} else {
Data(data)
}
}
}
fn parse_bulk(&mut self) -> Value {
let length = try_unwrap!(self.read_int_line(), Invalid);
if length < 0 {
Nil
} else {
let mut rv = vec![];
rv.reserve(length as uint);
for _ in range(0, length) {
match self.parse_value() {
Invalid => { return Invalid; }
value => rv.push(value)
};
}
Bulk(rv)
}
}
fn parse_error(&mut self) -> Value {
let byte_line = try_unwrap!(self.read_line(), Invalid);
let line = try_unwrap!(from_utf8(byte_line.as_slice()), Invalid);
let mut pieces = line.splitn(' ', 1);
let code = match pieces.next().unwrap() {
"ERR" => ResponseError,
"EXECABORT" => ExecAbortError,
"LOADING" => BusyLoadingError,
"NOSCRIPT" => NoScriptError,
"" => UnknownError,
other => ExtensionError(other.to_string()),
};
let message = pieces.next().unwrap_or("An unknown error ocurred.");
return Error(code, message.to_string());
}
}
/// Parses bytes into a redis value
pub fn parse_redis_value(bytes: &[u8]) -> Value {
let mut parser = Parser::new(bytes.iter().map(|x| *x));
parser.parse_value()
}
/// A simple iterator helper that reads bytes from a reader
impl<'a> Iterator<u8> for ByteIterator<'a> {
#[inline]
fn next(&mut self) -> Option<u8> {
self.reader.read_byte().ok()
}
}
| true |
64c249be2b9d74bdeeddbc769c026a3e2762e3ee
|
Rust
|
Nereuxofficial/rust_move_gen
|
/src/mv_list/mod.rs
|
UTF-8
| 1,580 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
use bb::BB;
use castle::Castle;
use square::Square;
mod mv_counter;
mod mv_vec;
mod piece_square_table;
mod sorted_move_adder;
pub use self::mv_counter::MoveCounter;
pub use self::mv_vec::MoveVec;
pub use self::piece_square_table::PieceSquareTable;
pub use self::sorted_move_adder::{SortedMoveAdder, SortedMoveHeap, SortedMoveHeapItem};
/// MoveAdder represents a way to collect moves from move generation functions
/// Implementations:
/// MoveCounter (count moves only)
/// MoveVec (adds move to Vec)
/// SortedMoveAdder (adds moves along with piece-square-scores to a sorted binary heap)
pub trait MoveAdder {
fn add_captures(&mut self, from: Square, targets: BB);
fn add_non_captures(&mut self, from: Square, targets: BB);
/// Adds the castle to the move list
fn add_castle(&mut self, castle: Castle);
/// Adds pawn non-captures to the list. Targets is a bitboard of valid to-squares. Shift is the distance the pawn moved to get to the target square, mod 64. For example, for a white piece moving forward one row this is '8'. For a black piece moving forward one row this is 56 (-8 % 64).
fn add_pawn_pushes(&mut self, shift: usize, targets: BB);
/// Adds pawn captures to list. Targets and shift are same as for `add_pawn_pushes`. Do not use this for en-passant captures (use `add_pawn_ep_capture`)
fn add_pawn_captures(&mut self, shift: usize, targets: BB);
/// Adds pawn en-passant capture to list. From and to are the squares the moving pieces moves from and to, respectively
fn add_pawn_ep_capture(&mut self, from: Square, to: Square);
}
| true |
0c1adb54bb80499762f4125d2c651db42f1dee67
|
Rust
|
comit-network/secp256kfun
|
/secp256kfun/tests/against_c_lib.rs
|
UTF-8
| 5,298 | 2.6875 | 3 |
[
"0BSD"
] |
permissive
|
#![allow(non_snake_case)]
#[cfg(not(target_arch = "wasm32"))]
mod test {
use secp256k1::{PublicKey, SecretKey};
use secp256kfun::{g, marker::*, op::double_mul, s, Scalar, G};
fn rand_32_bytes() -> [u8; 32] {
use rand::RngCore;
let mut bytes = [0u8; 32];
rand::thread_rng().fill_bytes(&mut bytes);
bytes
}
#[test]
fn point_multiplication() {
let secp = secp256k1::Secp256k1::new();
// Multiply a generator by scalar for both libraries and test equality
let (point_1, secp_pk_1) = {
let scalar_1 = rand_32_bytes();
let point_1 = g!({ Scalar::from_bytes_mod_order(scalar_1.clone()) } * G)
.mark::<(Normal, NonZero)>()
.unwrap();
let secp_pk_1 =
PublicKey::from_secret_key(&secp, &SecretKey::from_slice(&scalar_1).unwrap());
assert_eq!(
&point_1.to_bytes_uncompressed()[..],
&secp_pk_1.serialize_uncompressed()[..]
);
(point_1, secp_pk_1)
};
// Multiply the resulting points by another scalar and test equality
{
let scalar_2 = rand_32_bytes();
let point_2 = g!({ Scalar::from_bytes_mod_order(scalar_2.clone()) } * point_1)
.mark::<(Normal, NonZero)>()
.unwrap();
let secp_pk_2 = {
let mut secp_pk_2 = secp_pk_1.clone();
secp_pk_2.mul_assign(&secp, &scalar_2).unwrap();
secp_pk_2
};
assert_eq!(
&point_2.to_bytes_uncompressed()[..],
&secp_pk_2.serialize_uncompressed()[..]
);
}
}
#[test]
fn vartime_double_mul() {
let secp = secp256k1::Secp256k1::new();
let scalar_H = rand_32_bytes();
let y = rand_32_bytes();
let x = rand_32_bytes();
let result = {
let H = g!({ Scalar::from_bytes_mod_order(scalar_H.clone()) } * G);
double_mul(
&Scalar::from_bytes_mod_order(x.clone()).mark::<Public>(),
G,
&Scalar::from_bytes_mod_order(y.clone()).mark::<Public>(),
&H,
)
.mark::<(Normal, NonZero)>()
.unwrap()
};
let result_secp = {
let H = PublicKey::from_secret_key(&secp, &SecretKey::from_slice(&scalar_H).unwrap());
let x_G = PublicKey::from_secret_key(&secp, &SecretKey::from_slice(&x).unwrap());
let mut y_H = H.clone();
y_H.mul_assign(&secp, &y).unwrap();
x_G.combine(&y_H).unwrap()
};
assert_eq!(
&result.to_bytes_uncompressed()[..],
&result_secp.serialize_uncompressed()[..],
)
}
#[test]
fn point_addition() {
let secp = secp256k1::Secp256k1::new();
let scalar_1 = rand_32_bytes();
let point_1 = g!({ Scalar::from_bytes_mod_order(scalar_1.clone()) } * G);
let secp_pk_1 =
PublicKey::from_secret_key(&secp, &SecretKey::from_slice(&scalar_1).unwrap());
assert_eq!(
(g!(point_1 + point_1))
.mark::<(Normal, NonZero)>()
.unwrap()
.to_bytes_uncompressed()[..],
secp_pk_1
.combine(&secp_pk_1)
.unwrap()
.serialize_uncompressed()[..]
);
}
#[test]
fn scalar_ops() {
let bytes_1 = rand_32_bytes();
let bytes_2 = rand_32_bytes();
let scalar_1 = Scalar::from_bytes_mod_order(bytes_1.clone());
let scalar_2 = Scalar::from_bytes_mod_order(bytes_2.clone());
let sk_1 = &SecretKey::from_slice(&bytes_1).unwrap();
assert_eq!(&scalar_1.to_bytes()[..], &sk_1[..]);
assert_eq!(
&(s!(scalar_1 + scalar_2)).to_bytes()[..],
&{
let mut res = sk_1.clone();
res.add_assign(&bytes_2[..]).unwrap();
res
}[..]
);
assert_eq!(
&(s!(scalar_1 * scalar_2)).to_bytes()[..],
&{
let mut res = sk_1.clone();
res.mul_assign(&bytes_2[..]).unwrap();
res
}[..]
)
}
#[test]
fn scalar_inversion() {
// we have to test against this grin secp because that's the one that exposes it
use secp256k1zkp::key::SecretKey;
let secp = secp256k1zkp::Secp256k1::new();
let bytes = rand_32_bytes();
let mut sk = SecretKey::from_slice(&secp, &bytes).unwrap();
let scalar = Scalar::from_bytes_mod_order(bytes.clone())
.mark::<NonZero>()
.unwrap();
sk.inv_assign(&secp).unwrap();
assert_eq!(&scalar.invert().to_bytes()[..], &sk[..]);
}
#[test]
fn scalar_negation() {
use secp256k1zkp::key::SecretKey;
let secp = secp256k1zkp::Secp256k1::new();
let bytes = rand_32_bytes();
let mut sk = SecretKey::from_slice(&secp, &bytes).unwrap();
let scalar = Scalar::from_bytes_mod_order(bytes.clone());
sk.neg_assign(&secp).unwrap();
assert_eq!(&(-scalar).to_bytes()[..], &sk[..]);
}
}
| true |
9597749e517753d728e57de123b712478ca8bf57
|
Rust
|
gnzlbg/stdsimd
|
/coresimd/src/macros.rs
|
UTF-8
| 16,788 | 2.765625 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
//! Utility macros
macro_rules! define_ty {
($name:ident, $($elty:ident),+) => {
#[repr(simd)]
#[derive(Clone, Copy, Debug, PartialEq)]
#[allow(non_camel_case_types)]
pub struct $name($($elty),*);
}
}
macro_rules! define_ty_doc {
($name:ident, $($elty:ident),+ | $(#[$doc:meta])*) => {
$(#[$doc])*
#[repr(simd)]
#[derive(Clone, Copy, Debug, PartialEq)]
#[allow(non_camel_case_types)]
pub struct $name($($elty),*);
}
}
macro_rules! define_impl {
(
$name:ident, $elemty:ident, $nelems:expr, $boolname:ident,
$($elname:ident),+
) => {
impl $name {
#[inline(always)]
pub const fn new($($elname: $elemty),*) -> $name {
$name($($elname),*)
}
#[inline(always)]
pub fn len() -> i32 {
$nelems
}
#[inline(always)]
pub const fn splat(value: $elemty) -> $name {
$name($({
#[allow(non_camel_case_types, dead_code)]
struct $elname;
value
}),*)
}
#[inline(always)]
pub fn extract(self, idx: u32) -> $elemty {
assert!(idx < $nelems);
unsafe { self.extract_unchecked(idx) }
}
#[inline(always)]
pub unsafe fn extract_unchecked(self, idx: u32) -> $elemty {
simd_extract(self, idx)
}
#[inline(always)]
pub fn replace(self, idx: u32, val: $elemty) -> $name {
assert!(idx < $nelems);
unsafe { self.replace_unchecked(idx, val) }
}
#[inline(always)]
pub unsafe fn replace_unchecked(
self,
idx: u32,
val: $elemty,
) -> $name {
simd_insert(self, idx, val)
}
#[inline(always)]
pub fn store(self, slice: &mut [$elemty], offset: usize) {
assert!(slice[offset..].len() >= $nelems);
unsafe { self.store_unchecked(slice, offset) }
}
#[inline(always)]
pub unsafe fn store_unchecked(
self,
slice: &mut [$elemty],
offset: usize,
) {
use core::mem::size_of;
use core::ptr;
ptr::copy_nonoverlapping(
&self as *const $name as *const u8,
slice.get_unchecked_mut(offset) as *mut $elemty as *mut u8,
size_of::<$name>());
}
#[inline(always)]
pub fn load(slice: &[$elemty], offset: usize) -> $name {
assert!(slice[offset..].len() >= $nelems);
unsafe { $name::load_unchecked(slice, offset) }
}
#[inline(always)]
pub unsafe fn load_unchecked(
slice: &[$elemty],
offset: usize,
) -> $name {
use core::mem::size_of;
use core::ptr;
let mut x = $name::splat(0 as $elemty);
ptr::copy_nonoverlapping(
slice.get_unchecked(offset) as *const $elemty as *const u8,
&mut x as *mut $name as *mut u8,
size_of::<$name>());
x
}
#[inline(always)]
pub fn eq(self, other: $name) -> $boolname {
unsafe { simd_eq(self, other) }
}
#[inline(always)]
pub fn ne(self, other: $name) -> $boolname {
unsafe { simd_ne(self, other) }
}
#[inline(always)]
pub fn lt(self, other: $name) -> $boolname {
unsafe { simd_lt(self, other) }
}
#[inline(always)]
pub fn le(self, other: $name) -> $boolname {
unsafe { simd_le(self, other) }
}
#[inline(always)]
pub fn gt(self, other: $name) -> $boolname {
unsafe { simd_gt(self, other) }
}
#[inline(always)]
pub fn ge(self, other: $name) -> $boolname {
unsafe { simd_ge(self, other) }
}
}
}
}
macro_rules! define_from {
($to:ident, $($from:ident),+) => {
$(
impl From<$from> for $to {
#[inline(always)]
fn from(f: $from) -> $to {
unsafe { ::core::mem::transmute(f) }
}
}
)+
}
}
macro_rules! define_common_ops {
($($ty:ident),+) => {
$(
impl ::core::ops::Add for $ty {
type Output = Self;
#[inline(always)]
fn add(self, other: Self) -> Self {
unsafe { simd_add(self, other) }
}
}
impl ::core::ops::Sub for $ty {
type Output = Self;
#[inline(always)]
fn sub(self, other: Self) -> Self {
unsafe { simd_sub(self, other) }
}
}
impl ::core::ops::Mul for $ty {
type Output = Self;
#[inline(always)]
fn mul(self, other: Self) -> Self {
unsafe { simd_mul(self, other) }
}
}
impl ::core::ops::Div for $ty {
type Output = Self;
#[inline(always)]
fn div(self, other: Self) -> Self {
unsafe { simd_div(self, other) }
}
}
impl ::core::ops::Rem for $ty {
type Output = Self;
#[inline(always)]
fn rem(self, other: Self) -> Self {
unsafe { simd_rem(self, other) }
}
}
impl ::core::ops::AddAssign for $ty {
#[inline(always)]
fn add_assign(&mut self, other: Self) {
*self = *self + other;
}
}
impl ::core::ops::SubAssign for $ty {
#[inline(always)]
fn sub_assign(&mut self, other: Self) {
*self = *self - other;
}
}
impl ::core::ops::MulAssign for $ty {
#[inline(always)]
fn mul_assign(&mut self, other: Self) {
*self = *self * other;
}
}
impl ::core::ops::DivAssign for $ty {
#[inline(always)]
fn div_assign(&mut self, other: Self) {
*self = *self / other;
}
}
impl ::core::ops::RemAssign for $ty {
#[inline(always)]
fn rem_assign(&mut self, other: Self) {
*self = *self % other;
}
}
)+
}
}
macro_rules! define_shifts {
($ty:ident, $elem:ident, $($by:ident),+) => {
$(
impl ::core::ops::Shl<$by> for $ty {
type Output = Self;
#[inline(always)]
fn shl(self, other: $by) -> Self {
unsafe { simd_shl(self, $ty::splat(other as $elem)) }
}
}
impl ::core::ops::Shr<$by> for $ty {
type Output = Self;
#[inline(always)]
fn shr(self, other: $by) -> Self {
unsafe { simd_shr(self, $ty::splat(other as $elem)) }
}
}
impl ::core::ops::ShlAssign<$by> for $ty {
#[inline(always)]
fn shl_assign(&mut self, other: $by) {
*self = *self << other;
}
}
impl ::core::ops::ShrAssign<$by> for $ty {
#[inline(always)]
fn shr_assign(&mut self, other: $by) {
*self = *self >> other;
}
}
)+
}
}
macro_rules! define_float_ops {
($($ty:ident),+) => {
$(
impl ::core::ops::Neg for $ty {
type Output = Self;
#[inline(always)]
fn neg(self) -> Self {
Self::splat(-1.0) * self
}
}
)+
};
}
macro_rules! define_signed_integer_ops {
($($ty:ident),+) => {
$(
impl ::core::ops::Neg for $ty {
type Output = Self;
#[inline(always)]
fn neg(self) -> Self {
Self::splat(-1) * self
}
}
)+
};
}
macro_rules! define_integer_ops {
($(($ty:ident, $elem:ident)),+) => {
$(
impl ::core::ops::Not for $ty {
type Output = Self;
#[inline(always)]
fn not(self) -> Self {
$ty::splat(!0) ^ self
}
}
impl ::core::ops::BitAnd for $ty {
type Output = Self;
#[inline(always)]
fn bitand(self, other: Self) -> Self {
unsafe { simd_and(self, other) }
}
}
impl ::core::ops::BitOr for $ty {
type Output = Self;
#[inline(always)]
fn bitor(self, other: Self) -> Self {
unsafe { simd_or(self, other) }
}
}
impl ::core::ops::BitXor for $ty {
type Output = Self;
#[inline(always)]
fn bitxor(self, other: Self) -> Self {
unsafe { simd_xor(self, other) }
}
}
impl ::core::ops::BitAndAssign for $ty {
#[inline(always)]
fn bitand_assign(&mut self, other: Self) {
*self = *self & other;
}
}
impl ::core::ops::BitOrAssign for $ty {
#[inline(always)]
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
impl ::core::ops::BitXorAssign for $ty {
#[inline(always)]
fn bitxor_assign(&mut self, other: Self) {
*self = *self ^ other;
}
}
define_shifts!(
$ty, $elem,
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize);
impl ::core::fmt::LowerHex for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter)
-> ::core::fmt::Result {
write!(f, "{}(", stringify!($ty))?;
let n = ::core::mem::size_of_val(self)
/ ::core::mem::size_of::<$elem>();
for i in 0..n {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{:#x}", self.extract(i as u32))?;
}
write!(f, ")")
}
}
)+
}
}
macro_rules! define_casts {
($(($fromty:ident, $toty:ident, $cast:ident)),+) => {
$(
impl $fromty {
#[inline(always)]
pub fn $cast(self) -> ::simd::$toty {
unsafe { simd_cast(self) }
}
}
)+
}
}
#[cfg(test)]
#[macro_export]
macro_rules! test_arithmetic_ {
($tn:ident, $zero:expr, $one:expr, $two:expr, $four:expr) => {
{
let z = $tn::splat($zero);
let o = $tn::splat($one);
let t = $tn::splat($two);
let f = $tn::splat($four);
// add
assert_eq!(z + z, z);
assert_eq!(o + z, o);
assert_eq!(t + z, t);
assert_eq!(t + t, f);
// sub
assert_eq!(z - z, z);
assert_eq!(o - z, o);
assert_eq!(t - z, t);
assert_eq!(f - t, t);
assert_eq!(f - o - o, t);
// mul
assert_eq!(z * z, z);
assert_eq!(z * o, z);
assert_eq!(z * t, z);
assert_eq!(o * t, t);
assert_eq!(t * t, f);
// div
assert_eq!(z / o, z);
assert_eq!(t / o, t);
assert_eq!(f / o, f);
assert_eq!(t / t, o);
assert_eq!(f / t, t);
// rem
assert_eq!(o % o, z);
assert_eq!(f % t, z);
{
let mut v = z;
assert_eq!(v, z);
v += o; // add_assign
assert_eq!(v, o);
v -= o; // sub_assign
assert_eq!(v, z);
v = t;
v *= o; // mul_assign
assert_eq!(v, t);
v *= t;
assert_eq!(v, f);
v /= o; // div_assign
assert_eq!(v, f);
v /= t;
assert_eq!(v, t);
v %= t; // rem_assign
assert_eq!(v, z);
}
}
};
}
#[cfg(test)]
#[macro_export]
macro_rules! test_neg_ {
($tn:ident, $zero:expr, $one:expr, $two:expr, $four:expr) => {
{
let z = $tn::splat($zero);
let o = $tn::splat($one);
let t = $tn::splat($two);
let f = $tn::splat($four);
let nz = $tn::splat(-$zero);
let no = $tn::splat(-$one);
let nt = $tn::splat(-$two);
let nf = $tn::splat(-$four);
assert_eq!(-z, nz);
assert_eq!(-o, no);
assert_eq!(-t, nt);
assert_eq!(-f, nf);
}
};
}
#[cfg(test)]
#[macro_export]
macro_rules! test_bit_arithmetic_ {
($tn:ident) => {
{
let z = $tn::splat(0);
let o = $tn::splat(1);
let t = $tn::splat(2);
let f = $tn::splat(4);
let m = $tn::splat(!z.extract(0));
// shr
assert_eq!(o >> 1, z);
assert_eq!(t >> 1, o);
assert_eq!(f >> 1, t);
// shl
assert_eq!(o << 1, t);
assert_eq!(o << 2, f);
assert_eq!(t << 1, f);
// bitand
assert_eq!(o & o, o);
assert_eq!(t & t, t);
assert_eq!(t & o, z);
// bitor
assert_eq!(o | o, o);
assert_eq!(t | t, t);
assert_eq!(z | o, o);
// bitxor
assert_eq!(o ^ o, z);
assert_eq!(t ^ t, z);
assert_eq!(z ^ o, o);
// not
assert_eq!(!z, m);
assert_eq!(!m, z);
{ // shr_assign
let mut v = o;
v >>= 1;
assert_eq!(v, z);
}
{ // shl_assign
let mut v = o;
v <<= 1;
assert_eq!(v, t);
}
{ // and_assign
let mut v = o;
v &= t;
assert_eq!(v, z);
}
{ // or_assign
let mut v = z;
v |= o;
assert_eq!(v, o);
}
{ // xor_assign
let mut v = z;
v ^= o;
assert_eq!(v, o);
}
}
};
}
#[cfg(test)]
#[macro_export]
macro_rules! test_ops_si {
($($tn:ident),+) => {
$(
test_arithmetic_!($tn, 0, 1, 2, 4);
test_neg_!($tn, 0, 1, 2, 4);
test_bit_arithmetic_!($tn);
)+
};
}
#[cfg(test)]
#[macro_export]
macro_rules! test_ops_ui {
($($tn:ident),+) => {
$(
test_arithmetic_!($tn, 0, 1, 2, 4);
test_bit_arithmetic_!($tn);
)+
};
}
#[cfg(test)]
#[macro_export]
macro_rules! test_ops_f {
($($tn:ident),+) => {
$(
test_arithmetic_!($tn, 0., 1., 2., 4.);
test_neg_!($tn, 0., 1., 2., 4.);
)+
};
}
| true |
8e1f212a45d7c390102855e56ddf5ccc55fc1b3b
|
Rust
|
FengchenX/orbtk-mirror
|
/crates/widgets/src/numeric_box.rs
|
UTF-8
| 9,733 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
use super::behaviors::MouseBehavior;
use crate::prelude::*;
use crate::shell::{Key, KeyEvent};
use core::f64::MAX;
use rust_decimal::prelude::*;
pub static ID_INPUT: &'static str = "numeric_box_input";
pub static ELEMENT_INPUT: &'static str = "numeric_box_input";
pub static ELEMENT_BTN: &'static str = "numeric_box_button";
// one mouse up scroll is delta.y = 12.0
static ONE_SCROLL: f64 = 12.0;
enum InputAction {
Inc,
Dec,
ChangeByKey(KeyEvent),
ChangeByMouseScroll(Point),
Focus,
}
#[derive(Default, AsAny)]
struct NumericBoxState {
action: Option<InputAction>,
pub input: Entity,
min: Decimal,
max: Decimal,
step: Decimal,
current_value: Decimal,
}
impl NumericBoxState {
fn action(&mut self, action: InputAction) {
self.action = Some(action);
}
fn change_val(&mut self, new_value: Decimal, ctx: &mut Context) {
if new_value >= self.min && new_value <= self.max {
self.current_value = new_value;
ctx.get_widget(self.input)
.set::<String16>("text", String16::from(self.current_value.to_string()));
}
}
fn request_focus(&self, ctx: &mut Context) {
if !ctx.widget().get::<bool>("focused") {
ctx.widget().set::<bool>("focused", true);
ctx.push_event_by_window(FocusEvent::RequestFocus(ctx.entity));
}
}
}
fn default_or(key: &str, default_value: f64, ctx: &mut Context) -> Decimal {
let property = ctx.widget().clone_or_default(key);
match Decimal::from_f64(property) {
Some(val) => val,
None => Decimal::from_f64(default_value).unwrap(),
}
}
impl State for NumericBoxState {
fn init(&mut self, _: &mut Registry, ctx: &mut Context) {
self.input = ctx.entity_of_child(ID_INPUT).expect(
"NumericBoxState
.init(): the child input could not be found!",
);
self.min = default_or("min", 0.0, ctx);
self.max = default_or("max", MAX, ctx);
self.step = default_or("step", 1.0, ctx);
self.current_value = default_or("val", 0.0, ctx);
let init_value = String16::from(self.current_value.to_string());
ctx.get_widget(self.input)
.set::<String16>("text", init_value);
}
// TODO: let the user type the value, or select it for cut, copy, paste operations
fn update(&mut self, _: &mut Registry, ctx: &mut Context) {
if let Some(action) = &self.action {
match action {
InputAction::Inc => {
self.change_val(self.current_value + self.step, ctx);
}
InputAction::Dec => {
self.change_val(self.current_value - self.step, ctx);
}
InputAction::ChangeByKey(key_event) => match key_event.key {
Key::Up | Key::NumpadAdd => {
self.change_val(self.current_value + self.step, ctx);
}
Key::Down | Key::NumpadSubtract => {
self.change_val(self.current_value - self.step, ctx);
}
_ => {}
},
InputAction::ChangeByMouseScroll(delta) => {
match Decimal::from_f64(delta.y / ONE_SCROLL) {
Some(scroll_count) => {
self.change_val(self.current_value + (self.step * scroll_count), ctx);
}
None => {}
}
}
InputAction::Focus => {
self.request_focus(ctx);
}
}
self.action = None;
}
}
}
widget!(
/// `NumericBox` is used to let the user increase or decrease
/// the value of the input by a given, fixed value called `step` until it reaches the upper or
/// lower bounds.
/// The widget can be controlled by clicking on the two control buttons, or the keybaord's
/// Up and Down, Numpad+ and Numpad- keys, or the mouse scroll.
/// Note: after the widget is initialized, changing the min, max or step properties has no effect.
///
/// # Examples:
/// Create a NumericBox with default values:
/// ```rust
/// NumericBox::new().build(ctx)
/// ```
///
/// Create a NumericBox with custom values:
/// ```rust
/// NumericBox::new().min(10.0).max(100.0).val(50.0).step(5.0).build(ctx)
/// ```
NumericBox<NumericBoxState>: KeyDownHandler {
/// Sets or shares the background color property
background: Brush,
/// Sets or shares the border color property
border_brush: Brush,
/// Sets or shares the border width property
border_width: Thickness,
/// Sets or shares the border radius property
border_radius: f64,
/// Sets or shares the focused property
focused: bool,
/// Sets or shares the foreground color property
foreground: Brush,
/// Sets or shares the minimum allowed value property
min: f64,
/// Sets or shares the maximum allowed value property
max: f64,
/// Sets or shares the stepping value property
step: f64,
/// Sets or shares the current value property
val: f64
}
);
impl Template for NumericBox {
fn template(self, id: Entity, ctx: &mut BuildContext) -> Self {
self.name("NumericBox")
.background("transparent")
.foreground(colors::LINK_WATER_COLOR)
.border_brush("#647b91")
.border_width(1.0)
.border_radius(3.0)
.element("numeric_box")
.focused(false)
.height(32.0)
.margin(4.0)
.min(0.0)
.max(200.0)
.step(1.0)
.val(0.0)
.width(128.0)
.child(
MouseBehavior::new()
.on_mouse_down(move |states, _| {
states
.get_mut::<NumericBoxState>(id)
.action(InputAction::Focus);
true
})
.on_scroll(move |states, delta| {
states
.get_mut::<NumericBoxState>(id)
.action(InputAction::ChangeByMouseScroll(delta));
true
})
.build(ctx),
)
.child(
Grid::new()
.columns(Columns::new().add("*").add(32.))
.rows(Rows::new().add(16.0).add(16.0))
.child(
TextBox::new()
.h_align("stretch")
.attach(Grid::column(0))
.attach(Grid::row_span(2))
.attach(Grid::row(0))
.border_brush(id)
.border_radius(id)
.border_width(id)
.element(ELEMENT_INPUT)
.enabled(false)
.id(ID_INPUT)
.max_width(96.)
.text("0")
.build(ctx),
)
.child(
Button::new()
.attach(Grid::column(1))
.attach(Grid::row(0))
.border_brush("transparent")
.border_radius(0.0)
.border_width(0.0)
.min_width(30.0)
.height(30.0)
.class("single_content")
.element(ELEMENT_BTN)
.text("+")
.margin(1)
.on_click(move |states, _| {
states
.get_mut::<NumericBoxState>(id)
.action(InputAction::Inc);
true
})
.build(ctx),
)
.child(
Button::new()
.attach(Grid::column(1))
.attach(Grid::row(1))
.border_brush("transparent")
.border_radius(0.0)
.border_width(0.0)
.class("single_content")
.element(ELEMENT_BTN)
.min_width(30.0)
.height(30.0)
.padding(0.0)
.margin(1)
.text("-")
.on_click(move |states, _| {
states
.get_mut::<NumericBoxState>(id)
.action(InputAction::Dec);
true
})
.build(ctx),
)
.build(ctx),
)
.on_key_down(move |states, event| -> bool {
states
.get_mut::<NumericBoxState>(id)
.action(InputAction::ChangeByKey(event));
false
})
}
fn render_object(&self) -> Box<dyn RenderObject> {
Box::new(RectangleRenderObject)
}
}
| true |
8fbc8ae7551d4499e6b0385491e32574f30f94b0
|
Rust
|
bytesnake/hex
|
/cli/src/store.rs
|
UTF-8
| 2,832 | 2.671875 | 3 |
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use std::slice;
use std::io::Read;
use std::fs::File;
use std::process::Command;
use std::path::Path;
use walkdir::WalkDir;
use id3::Tag;
use hex_database::{Track, Writer};
use hex_music_container::{Configuration, Container};
pub fn store(write: &Writer, path: &Path, data_path: &Path) {
let mut files = Vec::new();
for e in WalkDir::new(path).into_iter().filter_map(|e| e.ok()) {
if e.metadata().unwrap().is_file() {
let path = e.path();
let extension = path.extension().unwrap();
if extension == "aac" || extension == "mp3" || extension == "wav" || extension == "ogg" {
files.push(path.to_path_buf());
}
}
}
for file in files {
println!("Converting file {:?}", file.to_str());
let tag = Tag::read_from_path(&file);
// convert to pcm file
let mut cmd = Command::new("ffmpeg")
.arg("-y")
.arg("-hide_banner")
//.arg("-loglevel").arg("panic")
.arg("-i").arg(&file)
.arg("-ar").arg("48000")
.arg("-ac").arg("2")
.arg("-f").arg("s16le")
.arg("/tmp/hex-cli-audio")
.spawn()
.expect("Failed to spawn ffmpeg!");
cmd.wait().unwrap();
let mut audio_file = File::open("/tmp/hex-cli-audio").unwrap();
let mut data = Vec::new();
audio_file.read_to_end(&mut data).unwrap();
let data: &[i16] = unsafe {
slice::from_raw_parts(
data.as_ptr() as *const i16,
data.len() / 2
)
};
println!("Finished converting with {} samples", data.len());
let fingerprint = hex_database::utils::get_fingerprint(2, &data).unwrap();
let mut track = Track::empty(
fingerprint,
data.len() as f64 / 48000.0 / 2.0
);
if let Ok(tag) = tag {
if let Some(title) = tag.title().map(|x| x.to_string()) {
track.title = Some(title);
} else {
track.title = Some(file.file_stem().unwrap().to_str().unwrap().into());
}
track.album = tag.album().map(|x| x.to_string());
track.interpret = tag.artist().map(|x| x.to_string());
track.composer = tag.artist().map(|x| x.to_string());
} else {
track.title = Some(file.file_stem().unwrap().to_str().unwrap().into());
}
// store with music container
let file = File::create(data_path.join(track.key.to_path())).unwrap();
Container::save_pcm(Configuration::Stereo, data.to_vec(), file, None).unwrap();
println!("Add track with key {}", track.key.to_string());
write.add_track(track).unwrap();
}
}
| true |
74d8d5b196757ad9579d4ba65c40cd278da6b2e1
|
Rust
|
sergei-romanenko/spsc
|
/spsc-lite-rust/src/advanced_process_tree_builder.rs
|
UTF-8
| 5,530 | 2.640625 | 3 |
[] |
no_license
|
use crate::algebra::*;
use crate::he::*;
use crate::language::*;
use crate::msg::*;
use crate::process_tree::*;
use crate::process_tree_builder::*;
use std::rc::Rc;
// Advanced Supercompiler with homeomorphic embedding and generalization
struct AdvancedBuildStep;
fn abstract_node(alpha: &RcNode, t: &RcTerm, subst: Subst) {
let bindings = subst
.iter()
.map(|(n, t)| (n.clone(), Rc::clone(t)))
.collect();
let let_term = Term::mk_let(Rc::clone(t), bindings);
replace_subtree(alpha, &let_term);
}
fn split_node(ng: &mut NameGen, beta: &RcNode) {
let t = beta.get_body();
match &*t {
Term::CFG { kind, name, args } => {
let names1 = ng.fresh_name_list(args.len());
let args1 = names1.iter().map(|x| Term::var(x)).collect();
let t1 = Term::mk_cfg(kind.clone(), name, args1);
let bs1 = names1
.iter()
.zip(args)
.map(|(n, t)| (n.clone(), Rc::clone(t)))
.collect();
let let_term = Term::mk_let(t1, bs1);
replace_subtree(beta, &let_term);
}
_ => unimplemented!(),
}
}
fn generalize_alpha_or_split(ng: &mut NameGen, beta: &RcNode, alpha: &RcNode) {
let g = msg(ng, &alpha.get_body(), &beta.get_body());
if g.t.is_var() {
split_node(ng, beta);
} else {
abstract_node(alpha, &g.t, g.s1);
}
}
fn find_embedded_ancestor(beta: &RcNode) -> Option<RcNode> {
for alpha in Ancestors::new(beta) {
if alpha.get_body().is_fg_call()
&& embedded_in(&alpha.get_body(), &beta.get_body())
{
return Some(alpha);
}
}
return None;
}
impl BuildStep for AdvancedBuildStep {
fn build_step(
&self,
d: &mut DrivingEngine,
tree: &mut Tree,
beta: &RcNode,
) {
if let Some(alpha) = find_more_general_ancestor(beta) {
loop_back(beta, &alpha);
} else {
if let Some(alpha) = find_embedded_ancestor(beta) {
generalize_alpha_or_split(&mut d.name_gen, beta, &alpha)
} else {
d.expand_node(tree, beta);
}
}
}
}
pub fn build_advanced_process_tree(
ng: NameGen,
k: i64,
prog: Program,
t: RcTerm,
) -> Tree {
build_process_tree(AdvancedBuildStep {}, ng, k, prog, t)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::parser::*;
fn build_pr_tree(prog: Program, t: RcTerm, k: i64) -> Tree {
let ng = NameGen::new("v", 100);
build_advanced_process_tree(ng, k, prog, t)
}
fn build_pr_tree_1_ok(prog: &str, t: &str, expected: &str) {
let tree = build_pr_tree(parse_program(prog), parse_term(t), 1);
assert_eq!(expected, tree.to_string());
}
fn build_pr_tree_ok(prog: &str, t: &str, expected: &str) {
let tree = build_pr_tree(parse_program(prog), parse_term(t), 100);
assert_eq!(expected, tree.to_string());
}
const P_ADD: &str = "gAdd(Z,y)=y;gAdd(S(x),y)=S(gAdd(x,y));";
const P_ADD_ACC: &str = "gAddAcc(Z,y)=y;gAddAcc(S(x),y)=gAddAcc(x,S(y));";
#[test]
fn test_build_pr_tree_1() {
build_pr_tree_1_ok("", "x", "{0:(x,,,[])}");
build_pr_tree_1_ok(
"",
"S(Z)",
"{\
0:(S(Z),,,[1]),1:(Z,,0,[])}",
);
build_pr_tree_1_ok(
P_ADD_ACC,
"gAddAcc(S(Z),Z)",
"{\
0:(gAddAcc(S(Z),Z),,,[1]),1:(gAddAcc(Z,S(Z)),,0,[])}",
);
build_pr_tree_1_ok(
P_ADD,
"gAdd(a,b)",
"{\
0:(gAdd(a,b),,,[1,2]),1:(b,a=Z,0,[]),\
2:(S(gAdd(v100,b)),a=S(v100),0,[])}",
);
build_pr_tree_1_ok(
P_ADD,
"gAdd(gAdd(a,b),c)",
"{\
0:(gAdd(gAdd(a,b),c),,,[1,2]),1:(gAdd(b,c),a=Z,0,[]),\
2:(gAdd(S(gAdd(v100,b)),c),a=S(v100),0,[])}",
);
}
#[test]
fn test_build_pr_tree() {
build_pr_tree_ok(
P_ADD_ACC,
"gAddAcc(a,b)",
"{\
0:(gAddAcc(a,b),,,[1,2]),1:(b,a=Z,0,[]),\
2:(let a=v100,b=S(b) in gAddAcc(a,b),a=S(v100),0,[3,4,5]),\
3:(gAddAcc(a,b),,2,[]),4:(v100,,2,[]),\
5:(S(b),,2,[6]),6:(b,,5,[])}",
);
build_pr_tree_ok(
P_ADD_ACC,
"gAddAcc(S(Z),Z)",
"{\
0:(gAddAcc(S(Z),Z),,,[1]),1:(gAddAcc(Z,S(Z)),,0,[2]),\
2:(S(Z),,1,[3]),3:(Z,,2,[])}",
);
build_pr_tree_ok(
P_ADD,
"gAdd(a,b)",
"{\
0:(gAdd(a,b),,,[1,2]),1:(b,a=Z,0,[]),\
2:(S(gAdd(v100,b)),a=S(v100),0,[3]),3:(gAdd(v100,b),,2,[])}",
);
build_pr_tree_ok(
P_ADD,
"gAdd(gAdd(a,b),c)",
"{\
0:(gAdd(gAdd(a,b),c),,,[1,2]),1:(gAdd(b,c),a=Z,0,[3,4]),\
3:(c,b=Z,1,[]),4:(S(gAdd(v101,c)),b=S(v101),1,[5]),\
5:(gAdd(v101,c),,4,[]),2:(gAdd(S(gAdd(v100,b)),c),a=S(v100),0,[6]),\
6:(S(gAdd(gAdd(v100,b),c)),,2,[7]),7:(gAdd(gAdd(v100,b),c),,6,[])}",
);
build_pr_tree_ok(P_ADD, "gAdd(a,a)", "{\
0:(let v102=a,v103=a in gAdd(v102,v103),,,[4,5,6]),\
4:(gAdd(v102,v103),,0,[7,8]),7:(v103,v102=Z,4,[]),\
8:(S(gAdd(v104,v103)),v102=S(v104),4,[9]),9:(gAdd(v104,v103),,8,[]),\
5:(a,,0,[]),6:(a,,0,[])}");
}
}
| true |
c954daee439a95ca484b67c68bccb5784af46498
|
Rust
|
mdzik/hyperqueue
|
/crates/hyperqueue/src/common/manager/pbs.rs
|
UTF-8
| 3,174 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
use std::path::PathBuf;
use std::process::Command;
use std::str;
use std::time::Duration;
use anyhow::Context;
use serde_json::Value;
use crate::common::env::HQ_QSTAT_PATH;
use crate::common::manager::common::{format_duration, parse_hms_duration};
pub struct PbsContext {
pub qstat_path: PathBuf,
}
impl PbsContext {
pub fn create() -> anyhow::Result<Self> {
let qstat_path = match Command::new("qstat").spawn() {
Ok(mut process) => {
process.wait().ok();
PathBuf::from("qstat")
}
Err(e) => {
log::warn!(
"Couldn't get qstat path directly ({}), trying to get it from environment variable {}",
e,
HQ_QSTAT_PATH
);
let path = std::env::var(HQ_QSTAT_PATH)
.context("Cannot get qstat path from environment variable")?;
path.into()
}
};
Ok(Self { qstat_path })
}
}
fn parse_pbs_job_remaining_time(job_id: &str, data: &str) -> anyhow::Result<Duration> {
let data_json: Value = serde_json::from_str(data)?;
let walltime = parse_hms_duration(
data_json["Jobs"][job_id]["Resource_List"]["walltime"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Could not find walltime key for job {}", job_id))?,
)?;
let used = parse_hms_duration(
data_json["Jobs"][job_id]["resources_used"]["walltime"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Could not find used time key for job {}", job_id))?,
)?;
if walltime < used {
anyhow::bail!("Pbs: Used time is bigger then walltime");
}
Ok(walltime - used)
}
/// Calculates how much time is left for the given job using `qstat`.
/// TODO: make this async
pub fn get_remaining_timelimit(ctx: &PbsContext, job_id: &str) -> anyhow::Result<Duration> {
let result = Command::new(&ctx.qstat_path)
.args(&["-f", "-F", "json", job_id])
.output()?;
if !result.status.success() {
anyhow::bail!(
"qstat command exited with {}: {}\n{}",
result.status,
String::from_utf8_lossy(&result.stderr),
String::from_utf8_lossy(&result.stdout)
);
}
let output = String::from_utf8_lossy(&result.stdout).into_owned();
log::debug!("qstat output: {}", output.trim());
parse_pbs_job_remaining_time(job_id, output.as_str())
}
/// Format a duration as a PBS time string, e.g. 01:05:02
pub fn format_pbs_duration(duration: &Duration) -> String {
format_duration(duration)
}
pub fn parse_pbs_datetime(datetime: &str) -> anyhow::Result<chrono::NaiveDateTime> {
Ok(chrono::NaiveDateTime::parse_from_str(
datetime,
"%a %b %d %H:%M:%S %Y",
)?)
}
#[cfg(test)]
mod test {
use crate::common::manager::pbs::parse_pbs_datetime;
#[test]
fn test_parse_pbs_datetime() {
let date = parse_pbs_datetime("Thu Aug 19 13:05:17 2021").unwrap();
assert_eq!(
date.format("%d.%m.%Y %H:%M:%S").to_string(),
"19.08.2021 13:05:17"
);
}
}
| true |
3c454dc1df02a4a0b8291cf530d044fd0cb7e944
|
Rust
|
JHowell45/rust-practise
|
/chapter_3/data_types/src/main.rs
|
UTF-8
| 678 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
fn main() {
let x = 2.0; // f64
println!("x: {}", x);
let y: f32 = 3.0; // f32
println!("y: {}", y);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (x, y, z) = tup;
println!("The value of x is: {}", x);
println!("The value of y is: {}", y);
println!("The value of z is: {}", z);
let a = [1, 2, 3, 4, 5];
let first = a[0];
let second = a[1];
println!("The value of first is: {}", first);
println!("The value of second is: {}", second);
let x: (i32, f64, u8) = (500, 6.4, 1);
let a = x.0;
let b = x.1;
let c = x.2;
println!("The value of a is: {}", a);
println!("The value of b is: {}", b);
println!("The value of c is: {}", c);
}
| true |
ae0f91a34fb084ed858206bb4ccf9ef09d8e926d
|
Rust
|
joaodelgado/rustyboy
|
/src/lib.rs
|
UTF-8
| 1,415 | 3 | 3 |
[] |
no_license
|
#![allow(clippy::verbose_bit_mask)]
mod cartridge;
mod cpu;
mod debugger;
mod errors;
pub mod game_boy;
use std::fs::File;
use std::io::prelude::*;
use errors::{Error, ErrorKind, Result};
pub struct Config {
pub rom_name: String,
}
impl Config {
pub fn new(mut args: std::env::Args) -> Result<Config> {
// Skip program name
args.next();
let rom_name = match args.next() {
Some(arg) => arg,
None => {
return Err(Error::new(
ErrorKind::InvalidInput,
"Please provide the rom name the first argument",
))
}
};
Ok(Config { rom_name })
}
}
pub fn read_file(file_name: &str) -> Result<Vec<u8>> {
let mut file = File::open(file_name)?;
let mut data: Vec<u8> = Vec::new();
file.read_to_end(&mut data)?;
Ok(data)
}
#[inline]
pub fn u8_to_u16(b1: u8, b2: u8) -> u16 {
(u16::from(b1) << 8) | u16::from(b2)
}
#[inline]
pub fn u16_to_u8(n: u16) -> (u8, u8) {
(((n >> 8) as u8), (n as u8))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_u8_to_u16() {
assert_eq!(u8_to_u16(0xff, 0xff), 0xffff);
assert_eq!(u8_to_u16(0xf0, 0x77), 0xf077);
}
#[test]
fn test_u16_to_u8() {
assert_eq!(u16_to_u8(0xffff), (0xff, 0xff));
assert_eq!(u16_to_u8(0xf077), (0xf0, 0x77));
}
}
| true |
bb53b64c47ecc703864dd813f536274193ec38da
|
Rust
|
peter50216/vim-simple-statusline
|
/rust-bin/src/nvim/asyncio.rs
|
UTF-8
| 1,071 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
use futures::Poll;
use std::io;
use tokio::io::{AsyncRead, AsyncWrite};
pub struct AsyncIO<R: AsyncRead, W: AsyncWrite> {
fin: R,
fout: W,
}
impl<R: AsyncRead, W: AsyncWrite> io::Read for AsyncIO<R, W> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.fin.read(buf)
}
}
impl<R: AsyncRead, W: AsyncWrite> io::Write for AsyncIO<R, W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.fout.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.fout.flush()
}
}
impl<R: AsyncRead, W: AsyncWrite> AsyncRead for AsyncIO<R, W> {}
impl<R: AsyncRead, W: AsyncWrite> AsyncWrite for AsyncIO<R, W> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.fout.shutdown()
}
}
pub fn stdio() -> AsyncIO<impl AsyncRead, impl AsyncWrite> {
let fin = tokio_file_unix::File::new_nb(tokio_file_unix::raw_stdin().unwrap())
.unwrap()
.into_reader(&tokio::reactor::Handle::default())
.unwrap();
let fout = tokio::io::stdout();
AsyncIO { fin, fout }
}
| true |
dd496e5f8b7e00fc605d616974836f71a8929d2d
|
Rust
|
GavinHwa/tarpc
|
/src/client.rs
|
UTF-8
| 4,408 | 2.546875 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use Packet;
use futures::{Async, BoxFuture};
use futures::stream::Empty;
use std::fmt;
use std::io;
use tokio_proto::pipeline;
use tokio_service::Service;
use util::Never;
/// A client `Service` that writes and reads bytes.
///
/// Typically, this would be combined with a serialization pre-processing step
/// and a deserialization post-processing step.
#[derive(Clone)]
pub struct Client {
inner: pipeline::Client<Packet, Vec<u8>, Empty<Never, io::Error>, io::Error>,
}
impl Service for Client {
type Request = Packet;
type Response = Vec<u8>;
type Error = io::Error;
type Future = BoxFuture<Vec<u8>, io::Error>;
fn poll_ready(&self) -> Async<()> {
Async::Ready(())
}
fn call(&self, request: Packet) -> Self::Future {
self.inner.call(pipeline::Message::WithoutBody(request))
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Client {{ .. }}")
}
}
/// Exposes a trait for connecting asynchronously to servers.
pub mod future {
use futures::{self, Async, Future};
use protocol::{LOOP_HANDLE, TarpcTransport};
use std::cell::RefCell;
use std::io;
use std::net::SocketAddr;
use super::Client;
use tokio_core::net::TcpStream;
use tokio_proto::pipeline;
/// Types that can connect to a server asynchronously.
pub trait Connect: Sized {
/// The type of the future returned when calling connect.
type Fut: Future<Item = Self, Error = io::Error>;
/// Connects to a server located at the given address.
fn connect(addr: &SocketAddr) -> Self::Fut;
}
/// A future that resolves to a `Client` or an `io::Error`.
pub struct ClientFuture {
inner: futures::Oneshot<io::Result<Client>>,
}
impl Future for ClientFuture {
type Item = Client;
type Error = io::Error;
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
match self.inner.poll().unwrap() {
Async::Ready(Ok(client)) => Ok(Async::Ready(client)),
Async::Ready(Err(err)) => Err(err),
Async::NotReady => Ok(Async::NotReady),
}
}
}
impl Connect for Client {
type Fut = ClientFuture;
/// Starts an event loop on a thread and registers a new client
/// connected to the given address.
fn connect(addr: &SocketAddr) -> ClientFuture {
let addr = *addr;
let (tx, rx) = futures::oneshot();
LOOP_HANDLE.spawn(move |handle| {
let handle2 = handle.clone();
TcpStream::connect(&addr, handle)
.and_then(move |tcp| {
let tcp = RefCell::new(Some(tcp));
let c = try!(pipeline::connect(&handle2, move || {
Ok(TarpcTransport::new(tcp.borrow_mut().take().unwrap()))
}));
Ok(Client { inner: c })
})
.then(|client| Ok(tx.complete(client)))
});
ClientFuture { inner: rx }
}
}
}
/// Exposes a trait for connecting synchronously to servers.
pub mod sync {
use futures::Future;
use std::io;
use std::net::ToSocketAddrs;
use super::Client;
/// Types that can connect to a server synchronously.
pub trait Connect: Sized {
/// Connects to a server located at the given address.
fn connect<A>(addr: A) -> Result<Self, io::Error> where A: ToSocketAddrs;
}
impl Connect for Client {
fn connect<A>(addr: A) -> Result<Self, io::Error>
where A: ToSocketAddrs
{
let addr = if let Some(a) = try!(addr.to_socket_addrs()).next() {
a
} else {
return Err(io::Error::new(io::ErrorKind::AddrNotAvailable,
"`ToSocketAddrs::to_socket_addrs` returned an empty \
iterator."));
};
<Self as super::future::Connect>::connect(&addr).wait()
}
}
}
| true |
2b04394dc91c8224241bacc9c00637ea192fd5ed
|
Rust
|
Yelp/casper
|
/casper-server/src/lua/utils.rs
|
UTF-8
| 1,142 | 3.078125 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
use mlua::{Lua, Result, Table};
fn random(_: &Lua, upper_bound: Option<u32>) -> Result<u32> {
let n = rand::random::<u32>();
match upper_bound {
Some(upper_bound) => Ok(n % upper_bound),
None => Ok(n),
}
}
fn random_string(_: &Lua, (len, mode): (usize, Option<String>)) -> Result<String> {
Ok(crate::utils::random_string(len, mode.as_deref()))
}
pub fn create_module(lua: &Lua) -> Result<Table> {
lua.create_table_from([
("random", lua.create_function(random)?),
("random_string", lua.create_function(random_string)?),
])
}
#[cfg(test)]
mod tests {
use mlua::{chunk, Lua, Result};
#[test]
fn test_module() -> Result<()> {
let lua = Lua::new();
let utils = super::create_module(&lua)?;
lua.load(chunk! {
local n = $utils.random()
assert(type(n) == "number" and math.floor(n) == n)
local s = $utils.random_string(5)
assert(type(s) == "string" and #s == 5)
assert($utils.random_string(5, "hex"):match("^[0-9a-f]+$"))
})
.exec()
.unwrap();
Ok(())
}
}
| true |
2fffb0b8c45800ee09811e5e007243e6bd628ca9
|
Rust
|
ReinierMaas/microfacet
|
/src/lib.rs
|
UTF-8
| 2,974 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
extern crate cgmath;
extern crate rand;
use cgmath::InnerSpace;
use cgmath::Vector3;
use self::rand::Closed01;
#[inline]
pub fn microfacet_sample(normal: &Vector3<f32>
, view: &Vector3<f32>
, alpha: f32) -> Vector3<f32> {
let Closed01(r0) = rand::random::<Closed01<f32>>();
let Closed01(r1) = rand::random::<Closed01<f32>>();
let t = r0.powf(2.0 / (alpha + 1.0));
let phi = 2. * std::f32::consts::PI * r1;
let sqrt_1_min_t = (1.0 - t).sqrt();
let x = phi.cos() * sqrt_1_min_t;
let y = phi.sin() * sqrt_1_min_t;
let z = t.sqrt();
let halfway = from_tangent_to_local(normal, &Vector3::new(x, y, z));
reflect(view, &halfway)
}
#[inline]
fn from_tangent_to_local(normal: &Vector3<f32>, tangent: &Vector3<f32>) -> Vector3<f32> {
let t = (normal.cross(if normal.x.abs() > 0.99 { Vector3::new(0.0,1.0,0.0) } else { Vector3::new(1.0,0.0,0.0) })).normalize();
let b = normal.cross(t);
tangent.x * t + tangent.y * b + tangent.z * normal
}
#[inline]
fn reflect(view: &Vector3<f32>, halfway: &Vector3<f32>) -> Vector3<f32> {
2.0 * view.dot(*halfway) * halfway - view
}
#[inline]
pub fn microfacet(normal: &Vector3<f32>
, view_direction: &Vector3<f32>
, light_direction: &Vector3<f32>
, specular_color: &Vector3<f32>
, alpha: f32) -> Vector3<f32> {
let halfway = (view_direction + light_direction).normalize();
let ndotv = normal.dot(*view_direction);
let ndotl = normal.dot(*light_direction);
let ndoth = normal.dot(halfway);
let hdotv = halfway.dot(*view_direction);
let hdotl = halfway.dot(*light_direction);
let normal_distribution = normal_distribution(ndoth, alpha);
let geometric_term = geometric_term(ndotv, ndotl, ndoth, hdotv);
let fresnel_term = fresnel_term(hdotl, specular_color);
let normalization = 1.0 / (4.0 * ndotl * ndotv);
normal_distribution * geometric_term * normalization * fresnel_term
}
#[inline]
fn normal_distribution(ndoth: f32
, alpha: f32) -> f32 {
const FRAC_1_2PI: f32 = 1.0 / (2.0 * std::f32::consts::PI);
// Blinn-Phong distribution
(alpha + 2.0) * FRAC_1_2PI * ndoth.powf(alpha)
}
#[inline]
fn geometric_term(ndotv: f32
, ndotl: f32
, ndoth: f32
, hdotv: f32) -> f32 {
// Physically Based Rendring, page 455
let frac_2ndoth_hdotv = (2.0 * ndoth) / hdotv;
(1.0_f32).min((frac_2ndoth_hdotv * ndotv).min(frac_2ndoth_hdotv * ndotl))
}
#[inline]
fn fresnel_term(hdotl: f32
, specular_color: &Vector3<f32>) -> Vector3<f32> {
const ONES: Vector3<f32> = Vector3::<f32>{x: 1.0, y: 1.0, z: 1.0};
// Schlick's approximation
let pow_5_1_min_hdotl = (1.0 - hdotl).powi(5);
specular_color + pow_5_1_min_hdotl * (ONES - specular_color)
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
| true |
a60fdb8a71f14477f88be6aeb9a80da76cf8a706
|
Rust
|
teachteamnfp/libra
|
/secure/storage/src/tests/suite.rs
|
UTF-8
| 4,791 | 2.859375 | 3 |
[
"Apache-2.0"
] |
permissive
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{Error, Policy, Storage, Value};
use libra_crypto::{ed25519::Ed25519PrivateKey, Uniform};
use rand::{rngs::StdRng, SeedableRng};
const KEY_KEY: &str = "key";
const U64_KEY: &str = "u64";
/// This helper function checks various features of the secure storage behaviour relating to K/V
/// support, e.g., ensuring errors are returned when attempting to retrieve missing keys, and
/// verifying sets and gets are implemented correctly.
pub fn run_test_suite(storage: &mut dyn Storage, name: &str) {
assert!(
storage.available(),
eprintln!("Backend storage, {}, is not available", name)
);
let public = Policy::public();
let u64_value_0 = 5;
let u64_value_1 = 2322;
let mut rng = StdRng::from_seed([13u8; 32]);
let key_value_0 = Ed25519PrivateKey::generate_for_testing(&mut rng);
let key_value_1 = Ed25519PrivateKey::generate_for_testing(&mut rng);
assert_eq!(
storage.get(KEY_KEY).unwrap_err(),
Error::KeyNotSet(KEY_KEY.to_string())
);
assert_eq!(
storage.get(U64_KEY).unwrap_err(),
Error::KeyNotSet(U64_KEY.to_string())
);
assert_eq!(
storage
.set(KEY_KEY, Value::Ed25519PrivateKey(key_value_0.clone()))
.unwrap_err(),
Error::KeyNotSet(KEY_KEY.to_string())
);
assert_eq!(
storage.set(U64_KEY, Value::U64(u64_value_0)).unwrap_err(),
Error::KeyNotSet(U64_KEY.to_string())
);
storage
.create_if_not_exists(U64_KEY, Value::U64(u64_value_1), &public)
.unwrap();
storage
.create(
KEY_KEY,
Value::Ed25519PrivateKey(key_value_1.clone()),
&public,
)
.unwrap();
assert_eq!(storage.get(U64_KEY).unwrap().u64().unwrap(), u64_value_1);
assert_eq!(
&storage.get(KEY_KEY).unwrap().ed25519_private_key().unwrap(),
&key_value_1
);
storage.set(U64_KEY, Value::U64(u64_value_0)).unwrap();
storage
.set(KEY_KEY, Value::Ed25519PrivateKey(key_value_0.clone()))
.unwrap();
assert_eq!(&storage.get(U64_KEY).unwrap().u64().unwrap(), &u64_value_0);
assert_eq!(
&storage.get(KEY_KEY).unwrap().ed25519_private_key().unwrap(),
&key_value_0
);
// Should not affect the above computation
storage
.create_if_not_exists(U64_KEY, Value::U64(u64_value_1), &public)
.unwrap();
storage
.create_if_not_exists(KEY_KEY, Value::Ed25519PrivateKey(key_value_1), &public)
.unwrap();
assert_eq!(
storage
.get(U64_KEY)
.unwrap()
.ed25519_private_key()
.unwrap_err(),
Error::UnexpectedValueType
);
assert_eq!(
storage.get(KEY_KEY).unwrap().u64().unwrap_err(),
Error::UnexpectedValueType
);
// Attempt to perform a u64_key creation twice (i.e., for a key that already exists!)
assert!(storage
.create(U64_KEY, Value::U64(u64_value_1), &public)
.is_err());
}
/// This helper function: (i) creates a new named test key pair; (ii) retrieves the public key for
/// the created key pair; (iii) compares the public keys returned by the create call and the
/// retrieval call.
pub fn create_get_and_test_key_pair(storage: &mut dyn Storage) {
let key_pair_name = "Test Key";
let public_key = storage
.generate_new_ed25519_key_pair(key_pair_name, &Policy::public())
.expect("Failed to create a test Ed25519 key pair!");
let retrieved_public_key = storage
.get_public_key_for(key_pair_name)
.expect("Failed to fetch the test key pair!");
assert_eq!(public_key, retrieved_public_key);
}
/// This helper function attempts to create two named key pairs using the same name, and asserts
/// that the second creation call (i.e., the duplicate), fails.
pub fn create_key_pair_twice(storage: &mut dyn Storage) {
let key_pair_name = "Test Key";
let policy = Policy::public();
let _ = storage
.generate_new_ed25519_key_pair(key_pair_name, &policy)
.expect("Failed to create a test Ed25519 key pair!");
assert!(
storage
.generate_new_ed25519_key_pair(key_pair_name, &policy)
.is_err(),
"The second call to generate_ed25519_key_pair() should have failed!"
);
}
/// This helper function tries to get the public key of a key pair that has not yet been created. As
/// such, it asserts that this attempt fails.
pub fn get_uncreated_key_pair(storage: &mut dyn Storage) {
assert!(
storage.get_public_key_for("Non-existent key").is_err(),
"Accessing a key that has not yet been created should have failed!"
);
}
| true |
b1a859b9a0870a38e6cb3dc3a799692f92bfc604
|
Rust
|
imxrt-rs/imxrt-async-hal
|
/src/pit.rs
|
UTF-8
| 10,926 | 3.03125 | 3 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! Periodic interrupt timer (PIT) driver and futures
//!
//! The PIT timer channels are the most precise timers in the HAL. PIT timers run on the periodic clock
//! frequency.
//!
//! A single hardware PIT instance has four PIT channels. Use [`new`](PIT::new()) to acquire these four
//! channels.
//!
//! # Example
//!
//! Delay for 250ms using PIT channel 3.
//!
//! ```no_run
//! use imxrt_async_hal as hal;
//! use hal::ral;
//! use hal::PIT;
//!
//! let ccm = ral::ccm::CCM::take().unwrap();
//! // Select 24MHz crystal oscillator, divide by 24 == 1MHz clock
//! ral::modify_reg!(ral::ccm, ccm, CSCMR1, PERCLK_PODF: DIVIDE_24, PERCLK_CLK_SEL: 1);
//! // Enable PIT clock gate
//! ral::modify_reg!(ral::ccm, ccm, CCGR1, CG6: 0b11);
//! let (_, _, _, mut pit) = ral::pit::PIT::take()
//! .map(PIT::new)
//! .unwrap();
//!
//! # async {
//! pit.delay(250_000).await;
//! # };
//! ```
use crate::ral;
use core::{
future::Future,
marker::PhantomPinned,
pin::Pin,
sync::atomic,
task::{Context, Poll, Waker},
};
/// Periodic interrupt timer (PIT)
///
/// See the [module-level documentation](crate::pit) for more information.
#[cfg_attr(docsrs, doc(cfg(feature = "pit")))]
pub struct PIT {
channel: register::ChannelInstance,
}
impl PIT {
/// Acquire four PIT channels from the RAL's PIT instance
pub fn new(pit: ral::pit::Instance) -> (PIT, PIT, PIT, PIT) {
ral::write_reg!(ral::pit, pit, MCR, MDIS: MDIS_0);
// Reset all PIT channels
//
// PIT channels may be used by a systems boot ROM, or another
// user. Set them to a known, good state.
ral::write_reg!(ral::pit, pit, TCTRL0, 0);
ral::write_reg!(ral::pit, pit, TCTRL1, 0);
ral::write_reg!(ral::pit, pit, TCTRL2, 0);
ral::write_reg!(ral::pit, pit, TCTRL3, 0);
unsafe {
cortex_m::peripheral::NVIC::unmask(crate::ral::interrupt::PIT);
(
PIT {
channel: register::ChannelInstance::zero(),
},
PIT {
channel: register::ChannelInstance::one(),
},
PIT {
channel: register::ChannelInstance::two(),
},
PIT {
channel: register::ChannelInstance::three(),
},
)
}
}
/// Wait for the counts to elapse
///
/// The elapsed time is a function of your clock selection and clock frequency.
pub fn delay(&mut self, count: u32) -> Delay<'_> {
Delay {
channel: &mut self.channel,
count,
_pin: PhantomPinned,
}
}
}
static mut WAKERS: [Option<Waker>; 4] = [None, None, None, None];
/// A future that yields once the PIT timer elapses
pub struct Delay<'a> {
channel: &'a mut register::ChannelInstance,
_pin: PhantomPinned,
count: u32,
}
impl<'a> Future for Delay<'a> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let count = self.count;
// Safety: future is safely Unpin; only exposed as !Unpin, just in case.
let this = unsafe { Pin::into_inner_unchecked(self) };
poll_delay(&mut this.channel, cx, count)
}
}
fn poll_delay(
channel: &mut register::ChannelInstance,
cx: &mut Context<'_>,
count: u32,
) -> Poll<()> {
if ral::read_reg!(register, channel, TFLG, TIF == 1) {
// Complete! W1C
ral::write_reg!(register, channel, TFLG, TIF: 1);
Poll::Ready(())
} else if ral::read_reg!(register, channel, TCTRL) != 0 {
// We're active; do nothing
Poll::Pending
} else {
// Neither complete nor active; prepare to run
ral::write_reg!(register, channel, LDVAL, count);
unsafe {
WAKERS[channel.index()] = Some(cx.waker().clone());
}
atomic::compiler_fence(atomic::Ordering::SeqCst);
ral::modify_reg!(register, channel, TCTRL, TIE: 1);
ral::modify_reg!(register, channel, TCTRL, TEN: 1);
Poll::Pending
}
}
impl<'a> Drop for Delay<'a> {
fn drop(&mut self) {
poll_cancel(&mut self.channel);
}
}
fn poll_cancel(channel: &mut register::ChannelInstance) {
ral::write_reg!(register, channel, TCTRL, 0);
}
interrupts! {
handler!{unsafe fn PIT() {
use register::ChannelInstance;
[
ChannelInstance::zero(),
ChannelInstance::one(),
ChannelInstance::two(),
ChannelInstance::three(),
]
.iter_mut()
.zip(WAKERS.iter_mut())
.filter(|(channel, _)| ral::read_reg!(register, channel, TFLG, TIF == 1))
.for_each(|(channel, waker)| {
ral::write_reg!(register, channel, TCTRL, 0);
if let Some(waker) = waker.take() {
waker.wake();
}
});
}}
}
/// The auto-generated RAL API is cumbersome. This is a macro-compatible API that makes it
/// easier to work with.
///
/// The approach here is to
///
/// - take the RAL flags, and remove the channel number (copy-paste from RAL)
/// - expose a 'Channel' as a collection of PIT channel registers (copy-paste from RAL)
mod register {
#![allow(unused, non_snake_case, non_upper_case_globals)] // Compatibility with RAL
use crate::ral::{RORegister, RWRegister};
#[repr(C)]
pub struct ChannelRegisterBlock {
/// Timer Load Value Register
pub LDVAL: RWRegister<u32>,
/// Current Timer Value Register
pub CVAL: RORegister<u32>,
/// Timer Control Register
pub TCTRL: RWRegister<u32>,
/// Timer Flag Register
pub TFLG: RWRegister<u32>,
}
pub struct ChannelInstance {
addr: u32,
idx: usize,
_marker: ::core::marker::PhantomData<*const ChannelRegisterBlock>,
}
impl ::core::ops::Deref for ChannelInstance {
type Target = ChannelRegisterBlock;
#[inline(always)]
fn deref(&self) -> &ChannelRegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
const PIT_BASE_ADDRESS: u32 = 0x4008_4000;
const PIT_CHANNEL_0_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x100;
const PIT_CHANNEL_1_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x110;
const PIT_CHANNEL_2_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x120;
const PIT_CHANNEL_3_ADDRESS: u32 = PIT_BASE_ADDRESS + 0x130;
impl ChannelInstance {
const unsafe fn new(addr: u32, idx: usize) -> Self {
ChannelInstance {
addr,
idx,
_marker: core::marker::PhantomData,
}
}
pub const fn index(&self) -> usize {
self.idx
}
pub const unsafe fn zero() -> Self {
Self::new(PIT_CHANNEL_0_ADDRESS, 0)
}
pub const unsafe fn one() -> Self {
Self::new(PIT_CHANNEL_1_ADDRESS, 1)
}
pub const unsafe fn two() -> Self {
Self::new(PIT_CHANNEL_2_ADDRESS, 2)
}
pub const unsafe fn three() -> Self {
Self::new(PIT_CHANNEL_3_ADDRESS, 3)
}
}
/// Timer Load Value Register
pub mod LDVAL {
/// Timer Start Value
pub mod TSV {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Current Timer Value Register
pub mod CVAL {
/// Current Timer Value
pub mod TVL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (32 bits: 0xffffffff << 0)
pub const mask: u32 = 0xffffffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Timer Control Register
pub mod TCTRL {
/// Timer Enable
pub mod TEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Timer n is disabled.
pub const TEN_0: u32 = 0b0;
/// 0b1: Timer n is enabled.
pub const TEN_1: u32 = 0b1;
}
}
/// Timer Interrupt Enable
pub mod TIE {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Interrupt requests from Timer n are disabled.
pub const TIE_0: u32 = 0b0;
/// 0b1: Interrupt will be requested whenever TIF is set.
pub const TIE_1: u32 = 0b1;
}
}
/// Chain Mode
pub mod CHN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Timer is not chained.
pub const CHN_0: u32 = 0b0;
/// 0b1: Timer is chained to previous timer. For example, for Channel 2, if this field is set, Timer 2 is chained to Timer 1.
pub const CHN_1: u32 = 0b1;
}
}
}
/// Timer Flag Register
pub mod TFLG {
/// Timer Interrupt Flag
pub mod TIF {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Timeout has not yet occurred.
pub const TIF_0: u32 = 0b0;
/// 0b1: Timeout has occurred.
pub const TIF_1: u32 = 0b1;
}
}
}
}
| true |
c83e38dfc8b54cc54edabda5c77866796cad5d6e
|
Rust
|
onelson/destiny2-api-rs
|
/codegen/src/models/destiny_responses_destiny_profile_response.rs
|
UTF-8
| 10,841 | 2.609375 | 3 |
[] |
no_license
|
/*
* Bungie.Net API
*
* These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
*
* OpenAPI spec version: 2.0.0
* Contact: support@bungie.com
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
/// DestinyResponsesDestinyProfileResponse : The response for GetDestinyProfile, with components for character and item-level data.
#[derive(Debug, Serialize, Deserialize)]
pub struct DestinyResponsesDestinyProfileResponse {
/// Recent, refundable purchases you have made from vendors. When will you use it? Couldn't say... COMPONENT TYPE: VendorReceipts
#[serde(rename = "vendorReceipts")]
vendor_receipts: Option<Object>,
/// The profile-level inventory of the Destiny Profile. COMPONENT TYPE: ProfileInventories
#[serde(rename = "profileInventory")]
profile_inventory: Option<Object>,
/// The profile-level currencies owned by the Destiny Profile. COMPONENT TYPE: ProfileCurrencies
#[serde(rename = "profileCurrencies")]
profile_currencies: Option<Object>,
/// The basic information about the Destiny Profile (formerly \"Account\"). COMPONENT TYPE: Profiles
#[serde(rename = "profile")]
profile: Option<Object>,
/// Items available from Kiosks that are available Profile-wide (i.e. across all characters) This component returns information about what Kiosk items are available to you on a *Profile* level. It is theoretically possible for Kiosks to have items gated by specific Character as well. If you ever have those, you will find them on the characterKiosks property. COMPONENT TYPE: Kiosks
#[serde(rename = "profileKiosks")]
profile_kiosks: Option<Object>,
/// Basic information about each character, keyed by the CharacterId. COMPONENT TYPE: Characters
#[serde(rename = "characters")]
characters: Option<Object>,
/// The character-level non-equipped inventory items, keyed by the Character's Id. COMPONENT TYPE: CharacterInventories
#[serde(rename = "characterInventories")]
character_inventories: Option<Object>,
/// Character-level progression data, keyed by the Character's Id. COMPONENT TYPE: CharacterProgressions
#[serde(rename = "characterProgressions")]
character_progressions: Option<Object>,
/// Character rendering data - a minimal set of info needed to render a character in 3D - keyed by the Character's Id. COMPONENT TYPE: CharacterRenderData
#[serde(rename = "characterRenderData")]
character_render_data: Option<Object>,
/// Character activity data - the activities available to this character and its status, keyed by the Character's Id. COMPONENT TYPE: CharacterActivities
#[serde(rename = "characterActivities")]
character_activities: Option<Object>,
/// The character's equipped items, keyed by the Character's Id. COMPONENT TYPE: CharacterEquipment
#[serde(rename = "characterEquipment")]
character_equipment: Option<Object>,
/// Items available from Kiosks that are available to a specific character as opposed to the account as a whole. It must be combined with data from the profileKiosks property to get a full picture of the character's available items to check out of a kiosk. This component returns information about what Kiosk items are available to you on a *Character* level. Usually, kiosk items will be earned for the entire Profile (all characters) at once. To find those, look in the profileKiosks property. COMPONENT TYPE: Kiosks
#[serde(rename = "characterKiosks")]
character_kiosks: Option<Object>,
/// Information about instanced items across all returned characters, keyed by the item's instance ID. COMPONENT TYPE: [See inside the DestinyItemComponentSet contract for component types.]
#[serde(rename = "itemComponents")]
item_components: Option<Object>
}
impl DestinyResponsesDestinyProfileResponse {
/// The response for GetDestinyProfile, with components for character and item-level data.
pub fn new() -> DestinyResponsesDestinyProfileResponse {
DestinyResponsesDestinyProfileResponse {
vendor_receipts: None,
profile_inventory: None,
profile_currencies: None,
profile: None,
profile_kiosks: None,
characters: None,
character_inventories: None,
character_progressions: None,
character_render_data: None,
character_activities: None,
character_equipment: None,
character_kiosks: None,
item_components: None
}
}
pub fn set_vendor_receipts(&mut self, vendor_receipts: Object) {
self.vendor_receipts = Some(vendor_receipts);
}
pub fn with_vendor_receipts(mut self, vendor_receipts: Object) -> DestinyResponsesDestinyProfileResponse {
self.vendor_receipts = Some(vendor_receipts);
self
}
pub fn vendor_receipts(&self) -> Option<&Object> {
self.vendor_receipts.as_ref()
}
pub fn reset_vendor_receipts(&mut self) {
self.vendor_receipts = None;
}
pub fn set_profile_inventory(&mut self, profile_inventory: Object) {
self.profile_inventory = Some(profile_inventory);
}
pub fn with_profile_inventory(mut self, profile_inventory: Object) -> DestinyResponsesDestinyProfileResponse {
self.profile_inventory = Some(profile_inventory);
self
}
pub fn profile_inventory(&self) -> Option<&Object> {
self.profile_inventory.as_ref()
}
pub fn reset_profile_inventory(&mut self) {
self.profile_inventory = None;
}
pub fn set_profile_currencies(&mut self, profile_currencies: Object) {
self.profile_currencies = Some(profile_currencies);
}
pub fn with_profile_currencies(mut self, profile_currencies: Object) -> DestinyResponsesDestinyProfileResponse {
self.profile_currencies = Some(profile_currencies);
self
}
pub fn profile_currencies(&self) -> Option<&Object> {
self.profile_currencies.as_ref()
}
pub fn reset_profile_currencies(&mut self) {
self.profile_currencies = None;
}
pub fn set_profile(&mut self, profile: Object) {
self.profile = Some(profile);
}
pub fn with_profile(mut self, profile: Object) -> DestinyResponsesDestinyProfileResponse {
self.profile = Some(profile);
self
}
pub fn profile(&self) -> Option<&Object> {
self.profile.as_ref()
}
pub fn reset_profile(&mut self) {
self.profile = None;
}
pub fn set_profile_kiosks(&mut self, profile_kiosks: Object) {
self.profile_kiosks = Some(profile_kiosks);
}
pub fn with_profile_kiosks(mut self, profile_kiosks: Object) -> DestinyResponsesDestinyProfileResponse {
self.profile_kiosks = Some(profile_kiosks);
self
}
pub fn profile_kiosks(&self) -> Option<&Object> {
self.profile_kiosks.as_ref()
}
pub fn reset_profile_kiosks(&mut self) {
self.profile_kiosks = None;
}
pub fn set_characters(&mut self, characters: Object) {
self.characters = Some(characters);
}
pub fn with_characters(mut self, characters: Object) -> DestinyResponsesDestinyProfileResponse {
self.characters = Some(characters);
self
}
pub fn characters(&self) -> Option<&Object> {
self.characters.as_ref()
}
pub fn reset_characters(&mut self) {
self.characters = None;
}
pub fn set_character_inventories(&mut self, character_inventories: Object) {
self.character_inventories = Some(character_inventories);
}
pub fn with_character_inventories(mut self, character_inventories: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_inventories = Some(character_inventories);
self
}
pub fn character_inventories(&self) -> Option<&Object> {
self.character_inventories.as_ref()
}
pub fn reset_character_inventories(&mut self) {
self.character_inventories = None;
}
pub fn set_character_progressions(&mut self, character_progressions: Object) {
self.character_progressions = Some(character_progressions);
}
pub fn with_character_progressions(mut self, character_progressions: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_progressions = Some(character_progressions);
self
}
pub fn character_progressions(&self) -> Option<&Object> {
self.character_progressions.as_ref()
}
pub fn reset_character_progressions(&mut self) {
self.character_progressions = None;
}
pub fn set_character_render_data(&mut self, character_render_data: Object) {
self.character_render_data = Some(character_render_data);
}
pub fn with_character_render_data(mut self, character_render_data: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_render_data = Some(character_render_data);
self
}
pub fn character_render_data(&self) -> Option<&Object> {
self.character_render_data.as_ref()
}
pub fn reset_character_render_data(&mut self) {
self.character_render_data = None;
}
pub fn set_character_activities(&mut self, character_activities: Object) {
self.character_activities = Some(character_activities);
}
pub fn with_character_activities(mut self, character_activities: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_activities = Some(character_activities);
self
}
pub fn character_activities(&self) -> Option<&Object> {
self.character_activities.as_ref()
}
pub fn reset_character_activities(&mut self) {
self.character_activities = None;
}
pub fn set_character_equipment(&mut self, character_equipment: Object) {
self.character_equipment = Some(character_equipment);
}
pub fn with_character_equipment(mut self, character_equipment: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_equipment = Some(character_equipment);
self
}
pub fn character_equipment(&self) -> Option<&Object> {
self.character_equipment.as_ref()
}
pub fn reset_character_equipment(&mut self) {
self.character_equipment = None;
}
pub fn set_character_kiosks(&mut self, character_kiosks: Object) {
self.character_kiosks = Some(character_kiosks);
}
pub fn with_character_kiosks(mut self, character_kiosks: Object) -> DestinyResponsesDestinyProfileResponse {
self.character_kiosks = Some(character_kiosks);
self
}
pub fn character_kiosks(&self) -> Option<&Object> {
self.character_kiosks.as_ref()
}
pub fn reset_character_kiosks(&mut self) {
self.character_kiosks = None;
}
pub fn set_item_components(&mut self, item_components: Object) {
self.item_components = Some(item_components);
}
pub fn with_item_components(mut self, item_components: Object) -> DestinyResponsesDestinyProfileResponse {
self.item_components = Some(item_components);
self
}
pub fn item_components(&self) -> Option<&Object> {
self.item_components.as_ref()
}
pub fn reset_item_components(&mut self) {
self.item_components = None;
}
}
| true |
bc9e72c397c00dde831d0e917e789cfbefd42860
|
Rust
|
pratik2709/learn-rust
|
/iterators/src/main.rs
|
UTF-8
| 1,422 | 3.703125 | 4 |
[] |
no_license
|
fn main() {
let v1 = vec![1,2,3];
let v1_iter = v1.iter();
println!("{:?}", v1_iter);
for v in v1_iter{
println!("{}", v)
}
let sumof:i32 = v1.iter().sum();
let t:Vec<_> = v1.iter().map(|x| x+1).collect();
println!("{:?}", t);
shoe_main();
println!("{:?}",Counter::new());
let mut c = Counter::new();
println!("{:?}",c.next());
println!("{:?}",c.next());
println!("{:?}",c.next());
iterate_example();
}
fn iterate_example(){
let needle = 0xCB;
println!("{:b}", needle);
let hackstack = [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147];
//into_iter returns items by value
for item in hackstack.iter(){
if *item == needle{
println!("item found:: {}", item)
}
}
}
#[derive(Debug)]
struct Shoe{
name: String,
size: u32
}
fn find_my_shoe(shoe:Vec<Shoe>, size:u32) -> Vec<Shoe>{
shoe.into_iter().filter(|s| s.size == size).collect()
}
fn shoe_main(){
let s = vec![
Shoe{name:String::from("t1"),size:10},
Shoe{name: String::from("t2"),size: 20},
Shoe{name:String::from("t3"), size:10},
];
println!("{:?}",find_my_shoe(s,10));
}
#[derive(Debug)]
struct Counter{
count:u32,
}
impl Counter{
fn new() -> Counter{
Counter{count:0}
}
}
impl Iterator for Counter{
type Item = u32;
fn next(&mut self) -> Option<Self::Item>{
self.count += 1;
if self.count < 3{
Some(self.count)
}
else{
None
}
}
}
| true |
a1ee3a41462e40e5f5844eeca8d8c137b7b51d6e
|
Rust
|
rune-rs/rune
|
/crates/rune/src/parse.rs
|
UTF-8
| 1,151 | 2.734375 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
//! Parsing utilities for Rune.
mod expectation;
mod id;
mod lexer;
mod opaque;
mod parse;
mod parser;
mod peek;
mod resolve;
pub use self::expectation::Expectation;
pub(crate) use self::expectation::IntoExpectation;
pub use self::id::{Id, NonZeroId};
pub(crate) use self::lexer::{Lexer, LexerMode};
pub(crate) use self::opaque::Opaque;
pub use self::parse::Parse;
pub use self::parser::{Parser, Peeker};
pub use self::peek::Peek;
pub(crate) use self::resolve::{Resolve, ResolveContext};
use crate::compile;
use crate::SourceId;
/// Parse the given input as the given type that implements
/// [Parse][crate::parse::Parse]. The specified `source_id` will be used when
/// referencing any parsed elements. `shebang` indicates if the parser should
/// try to parse a shebang or not.
///
/// This will raise an error through [Parser::eof] if the specified `source` is
/// not fully consumed by the parser.
pub fn parse_all<T>(source: &str, source_id: SourceId, shebang: bool) -> compile::Result<T>
where
T: Parse,
{
let mut parser = Parser::new(source, source_id, shebang);
let ast = parser.parse::<T>()?;
parser.eof()?;
Ok(ast)
}
| true |
55440e50b8e931b22f53753acdd621fff454c47f
|
Rust
|
im-0/log4rs-syslog
|
/src/file.rs
|
UTF-8
| 3,483 | 2.671875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
#![forbid(unsafe_code)]
use std;
use libc;
use log;
use log4rs;
use syslog;
#[derive(Deserialize)]
struct SyslogAppenderOpenlogConfig {
ident: String,
option: syslog::LogOption,
facility: syslog::Facility,
}
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]
#[allow(non_camel_case_types)]
enum FakeLibcLogLevel {
LOG_EMERG,
LOG_ALERT,
LOG_CRIT,
LOG_ERR,
LOG_WARNING,
LOG_NOTICE,
LOG_INFO,
LOG_DEBUG,
}
type LevelMapConf = std::collections::BTreeMap<log::Level, FakeLibcLogLevel>;
#[derive(Deserialize)]
struct SyslogAppenderConfig {
openlog: Option<SyslogAppenderOpenlogConfig>,
encoder: Option<log4rs::encode::EncoderConfig>,
level_map: Option<LevelMapConf>,
}
struct SyslogAppenderDeserializer;
impl log4rs::file::Deserialize for SyslogAppenderDeserializer {
type Trait = log4rs::append::Append;
type Config = SyslogAppenderConfig;
fn deserialize(
&self,
config: Self::Config,
deserializers: &log4rs::file::Deserializers,
) -> Result<Box<Self::Trait>, Box<std::error::Error + Sync + Send>> {
let mut builder = syslog::SyslogAppender::builder();
if let Some(openlog_conf) = config.openlog {
builder = builder.openlog(
&openlog_conf.ident,
openlog_conf.option,
openlog_conf.facility,
);
};
if let Some(encoder_conf) = config.encoder {
builder = builder.encoder(deserializers.deserialize(&encoder_conf.kind, encoder_conf.config)?);
}
if let Some(level_map) = config.level_map {
let mut map = std::collections::BTreeMap::new();
for (level, libc_level) in level_map {
let libc_level = match libc_level {
FakeLibcLogLevel::LOG_EMERG => libc::LOG_EMERG,
FakeLibcLogLevel::LOG_ALERT => libc::LOG_ALERT,
FakeLibcLogLevel::LOG_CRIT => libc::LOG_CRIT,
FakeLibcLogLevel::LOG_ERR => libc::LOG_ERR,
FakeLibcLogLevel::LOG_WARNING => libc::LOG_WARNING,
FakeLibcLogLevel::LOG_NOTICE => libc::LOG_NOTICE,
FakeLibcLogLevel::LOG_INFO => libc::LOG_INFO,
FakeLibcLogLevel::LOG_DEBUG => libc::LOG_DEBUG,
};
let _ = map.insert(level, libc_level);
}
for level in &[
log::Level::Error,
log::Level::Warn,
log::Level::Info,
log::Level::Debug,
log::Level::Trace,
] {
let _ = map.get(level)
.ok_or_else(|| format!("Log level missing in map: {:?}", level))?;
}
builder = builder.level_map(Box::new(move |l| map[&l]));
}
Ok(Box::new(builder.build()))
}
}
/// Register deserializer for creating syslog appender based on log4rs configuration file.
///
/// See `./examples/from_conf.rs` for full example.
///
/// # Examples
///
/// ```
/// extern crate log4rs;
/// extern crate log4rs_syslog;
///
/// let mut deserializers = log4rs::file::Deserializers::new();
/// log4rs_syslog::register(&mut deserializers);
/// let result = log4rs::init_file("/path/to/log-conf.yaml", deserializers);
/// ```
pub fn register(deserializers: &mut log4rs::file::Deserializers) {
deserializers.insert("libc-syslog", SyslogAppenderDeserializer);
}
| true |
f263f9002d1fa6a2d4c9c710882b4c8ae9fc8870
|
Rust
|
CurryPseudo/curry-pbrt
|
/src/material/bxdf/mod.rs
|
UTF-8
| 9,564 | 2.65625 | 3 |
[] |
no_license
|
mod lambertian;
mod microfacet;
mod oren_nayar;
mod specular;
use crate::*;
pub use lambertian::*;
pub use microfacet::*;
pub use oren_nayar::*;
pub use specular::*;
use std::sync::Arc;
pub enum BxDFType {
Delta,
Reflect,
Transmit,
}
pub trait BxDF {
fn f(&self, wo: &Vector3f, wi: &Vector3f) -> Option<Spectrum>;
fn sample_f(&self, wo: &Vector3f, u: &Point2f) -> (Vector3f, Option<Spectrum>, Float) {
let (mut wi, pdf) = cosine_sample_hemisphere(*u);
match self.bxdf_type() {
BxDFType::Reflect => {
if wo.z < 0. {
wi.z *= -1.;
}
}
BxDFType::Transmit => {
if wo.z > 0. {
wi.z *= -1.;
}
}
_ => (),
};
let f = self.f(wo, &wi);
(wi, f, pdf)
}
fn pdf(&self, _wo: &Vector3f, wi: &Vector3f) -> Float {
wi.z * INV_PI
}
fn f_pdf(&self, wo: &Vector3f, wi: &Vector3f) -> (Option<Spectrum>, Float) {
(self.f(wo, wi), self.pdf(wo, wi))
}
fn bxdf_type(&self) -> BxDFType {
BxDFType::Reflect
}
}
pub struct ScaledBxDF(Arc<dyn BxDF>, Spectrum);
impl BxDF for ScaledBxDF {
fn f(&self, wo: &Vector3f, wi: &Vector3f) -> Option<Spectrum> {
self.0.f(wo, wi).map(|f| f * self.1)
}
fn sample_f(&self, wo: &Vector3f, u: &Point2f) -> (Vector3f, Option<Spectrum>, Float) {
let (wi, s, pdf) = self.0.sample_f(wo, u);
(wi, s.map(|f| f * self.1), pdf)
}
fn pdf(&self, wo: &Vector3f, wi: &Vector3f) -> Float {
self.0.pdf(wo, wi)
}
fn f_pdf(&self, wo: &Vector3f, wi: &Vector3f) -> (Option<Spectrum>, Float) {
let (f, pdf) = self.0.f_pdf(wo, wi);
(f.map(|f| f * self.1), pdf)
}
fn bxdf_type(&self) -> BxDFType {
self.0.bxdf_type()
}
}
pub struct BSDF {
n: Vector3f,
sn: Vector3f,
snx: Vector3f,
sny: Vector3f,
bxdfs: Vec<Arc<dyn BxDF>>,
reflect_bxdfs: Vec<Arc<dyn BxDF>>,
transmit_bxdfs: Vec<Arc<dyn BxDF>>,
delta_bxdfs: Vec<Arc<dyn DeltaBxDF>>,
}
impl BSDF {
pub fn new(n: Normal3f, sn: Normal3f) -> Self {
let sn = sn.into();
let (snx, sny) = coordinate_system(&sn);
let n = Vector3f::new(n.dot(&snx), n.dot(&sny), n.dot(&sn)).normalize();
Self {
n,
sn,
snx,
sny,
bxdfs: Vec::new(),
reflect_bxdfs: Vec::new(),
transmit_bxdfs: Vec::new(),
delta_bxdfs: Vec::new(),
}
}
fn local_to_world(&self, w: &Vector3f) -> Vector3f {
let snx = &self.snx;
let sny = &self.sny;
let sn = &self.sn;
Vector3f::new(
snx.x * w.x + sny.x * w.y + sn.x * w.z,
snx.y * w.x + sny.y * w.y + sn.y * w.z,
snx.z * w.x + sny.z * w.y + sn.z * w.z,
)
.normalize()
}
fn world_to_local(&self, w: &Vector3f) -> Vector3f {
Vector3f::new(w.dot(&self.snx), w.dot(&self.sny), w.dot(&self.sn)).normalize()
}
pub fn add_bxdf<T: BxDF + 'static>(&mut self, bxdf: Arc<T>) {
self.bxdfs.push(bxdf.clone());
match bxdf.bxdf_type() {
BxDFType::Reflect => {
self.reflect_bxdfs.push(bxdf);
}
BxDFType::Transmit => {
self.transmit_bxdfs.push(bxdf);
}
BxDFType::Delta => panic!(),
}
}
pub fn add_delta_bxdf<T: DeltaBxDF + BxDF + 'static>(&mut self, delta_bxdf: Arc<T>) {
self.delta_bxdfs.push(delta_bxdf);
}
pub fn sample_all_delta_f(&self, wo: &Vector3f) -> Vec<(Vector3f, Spectrum)> {
let mut r = Vec::new();
for delta_bxdf in &self.delta_bxdfs {
if let Some((wi, s)) = delta_bxdf.sample_f(&self.world_to_local(wo)) {
r.push((self.local_to_world(&wi), s));
}
}
r
}
fn choose_no_delta_f(
&self,
index: usize,
wo: &Vector3f,
u: &Point2f,
) -> (Vector3f, Option<Spectrum>, Float) {
let choose_bxdf = &self.bxdfs[index];
let wo_local = self.world_to_local(wo);
let (wi_local, f, pdf) = choose_bxdf.sample_f(&wo_local, u);
let wi = self.local_to_world(&wi_local);
let bxdfs_len_f = self.bxdfs.len() as Float;
(wi, f, pdf / bxdfs_len_f)
}
pub fn sample_no_delta_f(
&self,
wo: &Vector3f,
u: &Point2f,
) -> (Vector3f, Option<Spectrum>, Float) {
if self.bxdfs.is_empty() {
return (Vector3f::new(0., 0., 0.), None, 0.);
}
let (index, remap) = sample_usize_remap(u.x, self.bxdfs.len());
self.choose_no_delta_f(index, wo, &Point2f::new(remap, u.y))
}
pub fn sample_delta_f(&self, wo: &Vector3f, u: Float) -> (Vector3f, Option<Spectrum>, Float) {
let wo_local = &self.world_to_local(wo);
if self.delta_bxdfs.is_empty() {
return (Vector3f::new(0., 0., 0.), None, 0.);
}
let mut wi_f = Vec::new();
for i in 0..self.delta_bxdfs.len() {
let delta_bxdf = &self.delta_bxdfs[i];
if let Some((wi_local, f)) = delta_bxdf.sample_f(wo_local) {
wi_f.push((wi_local, f));
}
}
let (i, pdf, _) = sample_distribution_1d_remap(u, wi_f.len(), &|i| wi_f[i].1.y());
let (wi_local, f) = wi_f[i];
(self.local_to_world(&wi_local), Some(f), pdf)
}
pub fn no_delta_f_pdf(&self, wo: &Vector3f, wi: &Vector3f) -> (Option<Spectrum>, Float) {
if self.bxdfs.is_empty() {
return (None, 0.);
}
let wo = self.world_to_local(wo);
let wi = self.world_to_local(wi);
let reflect = wo.dot(&self.n) * wi.dot(&self.n) > 0.;
let mut f = None;
let mut pdf = 0.;
let bxdfs = if reflect {
&self.reflect_bxdfs
} else {
&self.transmit_bxdfs
};
for bxdf in bxdfs {
if let (Some(this_f), this_pdf) = bxdf.f_pdf(&wo, &wi) {
f = Some(f.unwrap_or_else(|| Spectrum::new(0.)) + this_f);
pdf += this_pdf;
}
}
let len = bxdfs.len() as Float;
(f, pdf / len)
}
pub fn sample_f(
&self,
wo: &Vector3f,
sampler: &mut dyn Sampler,
) -> (Vector3f, Option<Spectrum>, Float, bool) {
let (i, pdf, remap) = sampler.get_distribution_1d_remap(2, &|i| if i == 0 {self.bxdfs.len()} else {self.delta_bxdfs.len()} as Float );
let ((wi, f, internal_pdf), is_delta) = if i == 0 {
(
self.sample_no_delta_f(wo, &Point2f::new(remap, sampler.get_1d())),
false,
)
} else {
(self.sample_delta_f(wo, remap), true)
};
(wi, f, pdf * internal_pdf, is_delta)
}
pub fn is_all_delta(&self) -> bool {
self.bxdfs.is_empty()
}
pub fn mix(self, rhs: Self, scale: Spectrum) -> Self {
let s1 = scale;
let s2 = Spectrum::new(1.) - s1;
if s1.is_black() {
return rhs;
}
if s2.is_black() {
return self;
}
let mut bxdfs: Vec<Arc<dyn BxDF>> = Vec::new();
for bxdf in self.bxdfs {
bxdfs.push(Arc::new(ScaledBxDF(bxdf, s1)));
}
for bxdf in rhs.bxdfs {
bxdfs.push(Arc::new(ScaledBxDF(bxdf, s2)));
}
let mut reflect_bxdfs: Vec<Arc<dyn BxDF>> = Vec::new();
for bxdf in self.reflect_bxdfs {
reflect_bxdfs.push(Arc::new(ScaledBxDF(bxdf, s1)));
}
for bxdf in rhs.reflect_bxdfs {
reflect_bxdfs.push(Arc::new(ScaledBxDF(bxdf, s2)));
}
let mut transmit_bxdfs: Vec<Arc<dyn BxDF>> = Vec::new();
for bxdf in self.transmit_bxdfs {
transmit_bxdfs.push(Arc::new(ScaledBxDF(bxdf, s1)));
}
for bxdf in rhs.transmit_bxdfs {
transmit_bxdfs.push(Arc::new(ScaledBxDF(bxdf, s2)));
}
let mut delta_bxdfs: Vec<Arc<dyn DeltaBxDF>> = Vec::new();
for bxdf in self.delta_bxdfs {
delta_bxdfs.push(Arc::new(ScaledDeltaBxDF(bxdf, s1)));
}
for bxdf in rhs.delta_bxdfs {
delta_bxdfs.push(Arc::new(ScaledDeltaBxDF(bxdf, s2)));
}
let n = self.n;
let sn = self.sn;
let snx = self.snx;
let sny = self.sny;
Self {
n,
sn,
snx,
sny,
bxdfs,
reflect_bxdfs,
transmit_bxdfs,
delta_bxdfs,
}
}
}
pub trait DeltaBxDF {
fn sample_f(&self, wo: &Vector3f) -> Option<(Vector3f, Spectrum)>;
}
impl<T: DeltaBxDF> BxDF for T {
fn sample_f(&self, wo: &Vector3f, _: &Point2f) -> (Vector3f, Option<Spectrum>, Float) {
if let Some((wi, s)) = self.sample_f(wo) {
(wi, Some(s), 1.)
} else {
(Vector3f::new(0., 0., 0.), None, 0.)
}
}
fn pdf(&self, _wo: &Vector3f, _wi: &Vector3f) -> Float {
0.
}
fn f(&self, _wo: &Vector3f, _wi: &Vector3f) -> Option<Spectrum> {
None
}
fn bxdf_type(&self) -> BxDFType {
BxDFType::Delta
}
}
pub struct ScaledDeltaBxDF(Arc<dyn DeltaBxDF>, Spectrum);
impl DeltaBxDF for ScaledDeltaBxDF {
fn sample_f(&self, wo: &Vector3f) -> Option<(Vector3f, Spectrum)> {
self.0.sample_f(wo).map(|(wi, f)| (wi, f * self.1))
}
}
| true |
ef9dcb84c89c6aea0966344418da37b8988507ea
|
Rust
|
Rowmance/Chip8
|
/src/keypad.rs
|
UTF-8
| 3,394 | 3.53125 | 4 |
[] |
no_license
|
use sdl2::keyboard::Keycode;
use std::mem::discriminant;
/// The keymap to use.
pub enum KeypadSetting {
/// DVORAK bindings.
DVORAK,
/// Qwerty Bindings.
QWERTY,
}
/// Represents a keypad.
pub struct Keypad {
/// The state of the 16 keys.
///
/// These have the following layout:
/// ```
/// 1 2 3 C
/// 4 5 6 D
/// 7 8 9 E
/// A 0 B F
/// ```
/// The value is true if the key is pressed.
pub keys: [bool; 16],
/// The keypad setting
pub setting: KeypadSetting,
}
impl Keypad {
pub fn new(setting: KeypadSetting) -> Self {
Keypad {
keys: [false; 16],
setting,
}
}
pub fn clear(&mut self) {
for i in 0..self.keys.len() {
self.keys[i] = false;
}
}
/// Returns true if the given key index is pressed.
pub fn is_key_pressed(&self, key: u8) -> bool {
self.keys[key as usize]
}
/// Maps the given pressed keyboard-key to an index
pub fn set_from_keycode(&mut self, key: Keycode, state: bool) {
if discriminant(&self.setting) == discriminant(&KeypadSetting::DVORAK) {
match key {
Keycode::Num1 => self.keys[0x1 as usize] = state,
Keycode::Num2 => self.keys[0x2 as usize] = state,
Keycode::Num3 => self.keys[0x3 as usize] = state,
Keycode::Num4 => self.keys[0xC as usize] = state,
Keycode::Quote => self.keys[0x4 as usize] = state,
Keycode::Comma => self.keys[0x5 as usize] = state,
Keycode::Period => self.keys[0x6 as usize] = state,
Keycode::P => self.keys[0xD as usize] = state,
Keycode::A => self.keys[0x7 as usize] = state,
Keycode::O => self.keys[0x8 as usize] = state,
Keycode::E => self.keys[0x9 as usize] = state,
Keycode::U => self.keys[0xE as usize] = state,
Keycode::Semicolon => self.keys[0xA as usize] = state,
Keycode::Q => self.keys[0x0 as usize] = state,
Keycode::J => self.keys[0xB as usize] = state,
Keycode::K => self.keys[0xF as usize] = state,
_ => (),
}
} else {
match key {
Keycode::Num1 => self.keys[0x1 as usize] = state,
Keycode::Num2 => self.keys[0x2 as usize] = state,
Keycode::Num3 => self.keys[0x3 as usize] = state,
Keycode::Num4 => self.keys[0xC as usize] = state,
Keycode::Q => self.keys[0x4 as usize] = state,
Keycode::W => self.keys[0x5 as usize] = state,
Keycode::E => self.keys[0x6 as usize] = state,
Keycode::R => self.keys[0xD as usize] = state,
Keycode::A => self.keys[0x7 as usize] = state,
Keycode::S => self.keys[0x8 as usize] = state,
Keycode::D => self.keys[0x9 as usize] = state,
Keycode::F => self.keys[0xE as usize] = state,
Keycode::Z => self.keys[0xA as usize] = state,
Keycode::X => self.keys[0x0 as usize] = state,
Keycode::C => self.keys[0xB as usize] = state,
Keycode::V => self.keys[0xF as usize] = state,
_ => (),
}
}
}
}
| true |
ef7d25e4aa95421e665bec9244c1a47e6f16395e
|
Rust
|
bvdvecht/cqc
|
/tests/request.rs
|
UTF-8
| 13,694 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
extern crate cqc;
#[cfg(test)]
mod request {
use cqc::builder::{Client, RemoteId};
use cqc::hdr::*;
use cqc::{Decoder, Encoder, Request};
macro_rules! get_byte_16 {
($value:expr, $byte:expr) => {
($value >> ((1 - $byte) * 8)) as u8
};
}
macro_rules! get_byte_32 {
($value:expr, $byte:expr) => {
($value >> ((3 - $byte) * 8)) as u8
};
}
// Set up constants.
const APP_ID: u16 = 0x0A_0E;
const QUBIT_ID: u16 = 0xBE_56;
const EXTRA_QUBIT_ID: u16 = 0xFE_80;
const REMOTE_APP_ID: u16 = 0x5E_3F;
const REMOTE_NODE: u32 = 0xAE_04_E2_52;
const REMOTE_PORT: u16 = 0x91_03;
const STEP: u8 = 192;
// Encode a request packet that only has a CQC header.
#[test]
fn cqc_hdr() {
let client = Client::new(APP_ID);
let request = client.hello();
// Buffer to write into.
let buf_len: usize = request.len() as usize;
let mut buffer = vec![0xAA; buf_len];
// Expected values
let msg_type = MsgType::Tp(Tp::Hello);
let length = 0;
// Big-endian
let expected: Vec<u8> = vec![
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Encode a packet that has a CMD header, but no XTRA header.
#[test]
fn cmd_hdr() {
let client = Client::new(APP_ID);
let request = client
.cmd_new(QUBIT_ID, *CmdOpt::empty().set_notify().set_block());
// Buffer to write into.
let buf_len: usize = request.len() as usize;
let mut buffer = vec![0xAA; buf_len];
// Expected values
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len();
let instr = Cmd::New;
let options = *CmdOpt::empty().set_notify().set_block();
// Big-endian
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Encode a packet with a CMD and ROT headers.
#[test]
fn rot_hdr() {
let client = Client::new(APP_ID);
let request = client.cmd_rot_x(
QUBIT_ID,
*CmdOpt::empty().set_notify().set_block(),
STEP,
);
// Buffer to write into.
let buf_len: usize = request.len() as usize;
let mut buffer = vec![0xAA; buf_len];
// Expected values
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len() + RotHdr::hdr_len();
let instr = Cmd::RotX;
let options = *CmdOpt::empty().set_notify().set_block();
// Big-endian
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
// XTRA header
STEP,
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Encode a packet with a CMD and QUBIT headers.
#[test]
fn qubit_hdr() {
let client = Client::new(APP_ID);
let request = client.cmd_cnot(
QUBIT_ID,
*CmdOpt::empty().set_notify().set_block(),
EXTRA_QUBIT_ID,
);
// Buffer to write into.
let buf_len: usize = request.len() as usize;
let mut buffer = vec![0xAA; buf_len];
// Expected values
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len() + QubitHdr::hdr_len();
let instr = Cmd::Cnot;
let options = *CmdOpt::empty().set_notify().set_block();
// Big-endian
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
// XTRA header
get_byte_16!(EXTRA_QUBIT_ID, 0),
get_byte_16!(EXTRA_QUBIT_ID, 1),
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Encode a packet with a CMD and COMM headers.
#[test]
fn comm_hdr() {
let client = Client::new(APP_ID);
let request = client.cmd_send(
QUBIT_ID,
*CmdOpt::empty().set_notify().set_block(),
RemoteId {
remote_app_id: REMOTE_APP_ID,
remote_port: REMOTE_PORT,
remote_node: REMOTE_NODE,
},
);
// Buffer to write into.
let buf_len: usize = request.len() as usize;
let mut buffer = vec![0xAA; buf_len];
// Expected values
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len() + CommHdr::hdr_len();
let instr = Cmd::Send;
let options = *CmdOpt::empty().set_notify().set_block();
// Big-endian
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
// XTRA header
get_byte_16!(REMOTE_APP_ID, 0),
get_byte_16!(REMOTE_APP_ID, 1),
get_byte_16!(REMOTE_PORT, 0),
get_byte_16!(REMOTE_PORT, 1),
get_byte_32!(REMOTE_NODE, 0),
get_byte_32!(REMOTE_NODE, 1),
get_byte_32!(REMOTE_NODE, 2),
get_byte_32!(REMOTE_NODE, 3),
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Test an encoding when the provided buffer is too small (should panic).
#[test]
#[should_panic(expected = "failed to write whole buffer")]
fn cqc_hdr_buf_too_small() {
let client = Client::new(APP_ID);
let request = client.hello();
// Buffer to write into.
let mut buffer = vec![0xAA; (request.len() - 1) as usize];
let encoder = Encoder::new();
// This should panic.
encoder.encode(&request, &mut buffer[..]);
}
// Test an encoding when the provided buffer is too small, but sufficient
// for the CQC header (should panic).
#[test]
#[should_panic(expected = "failed to write whole buffer")]
fn cmd_hdr_buf_too_small() {
let client = Client::new(APP_ID);
let request = client.cmd_i(QUBIT_ID, CmdOpt::empty());
// Buffer to write into.
let mut buffer = vec![0xAA; (request.len() - 1) as usize];
let encoder = Encoder::new();
// This should panic.
encoder.encode(&request, &mut buffer[..]);
}
// Test an encoding when the provided buffer is too large. Excess should
// be untouched.
#[test]
fn buf_too_large() {
let client = Client::new(APP_ID);
let request = client.hello();
// Buffer to write into.
let write_len: usize = request.len() as usize;
let buf_len: usize = write_len + 4;
let mut buffer = vec![0xAA; buf_len as usize];
// Expected values
let msg_type = MsgType::Tp(Tp::Hello);
let length = 0;
// Big-endian
let expected: Vec<u8> = vec![
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// The rest should be untouched.
0xAA,
0xAA,
0xAA,
0xAA,
];
let encoder = Encoder::new();
encoder.encode(&request, &mut buffer[..]);
assert_eq!(buffer, expected);
let decoder = Decoder::new();
let decoded: Request = decoder.decode(&buffer[..]).unwrap();
assert_eq!(decoded, request);
}
// Decode a request that only has a non-zero length indicating follow-up
// headers, but it is too short to hold the expected header. This should
// return an Error and thus panic on unwrap.
#[test]
#[should_panic(expected = "invalid length 3, expected CmdHdr")]
fn invalid_len() {
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len() - 1;
let instr = Cmd::New;
let options = *CmdOpt::empty().set_notify().set_block();
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
];
let decoder = Decoder::new();
let _: Request = decoder.decode(&expected[..]).unwrap();
}
// Decode a request that only has an invalid CQC version. This should
// return an error (and thus panic on an unwrap).
#[test]
#[should_panic(expected = "Invalid CQC version")]
fn invalid_version() {
let msg_type = MsgType::Tp(Tp::Command);
let length = CmdHdr::hdr_len();
let instr = Cmd::New;
let options = *CmdOpt::empty().set_notify().set_block();
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8 + 1,
From::from(msg_type),
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
];
let decoder = Decoder::new();
let _: Request = decoder.decode(&expected[..]).unwrap();
}
// Decode a request that only has an invalid message type. This should
// return an error (and thus panic on an unwrap).
#[test]
#[should_panic(expected = "Invalid CQC message type")]
fn invalid_msg_type() {
let length = CmdHdr::hdr_len();
let instr = Cmd::New;
let options = *CmdOpt::empty().set_notify().set_block();
let expected: Vec<u8> = vec![
// CQC header
Version::V2 as u8,
0xFF,
get_byte_16!(APP_ID, 0),
get_byte_16!(APP_ID, 1),
get_byte_32!(length, 0),
get_byte_32!(length, 1),
get_byte_32!(length, 2),
get_byte_32!(length, 3),
// CMD header
get_byte_16!(QUBIT_ID, 0),
get_byte_16!(QUBIT_ID, 1),
instr as u8,
options.bits(),
];
let decoder = Decoder::new();
let _: Request = decoder.decode(&expected[..]).unwrap();
}
}
| true |
7716c6e378e06f3471b122aa8f888291b3eba6b6
|
Rust
|
SergiusIW/collider-rs
|
/src/core/dur_hitbox/mod.rs
|
UTF-8
| 9,555 | 2.734375 | 3 |
[
"Apache-2.0"
] |
permissive
|
// Copyright 2016-2018 Matthew D. Michelotti
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod solvers;
use core;
use geom::shape::PlacedBounds;
use geom::*;
use std::f64;
// DurHitbox (and DurHbVel) is almost identical to Hitbox (and HbVel), except
// it uses a `duration` (amount of time until invalidation of the hitbox)
// rather than an `end_time` (time of the invalidation of the hitbox). This
// new struct is meant to make that distinction clear.
#[derive(Clone)]
pub struct DurHbVel {
pub value: Vec2,
pub resize: Vec2,
pub duration: f64,
}
impl DurHbVel {
pub fn still() -> DurHbVel {
DurHbVel {
value: Vec2::zero(),
resize: Vec2::zero(),
duration: f64::INFINITY,
}
}
fn is_still(&self) -> bool {
self.value == Vec2::zero() && self.resize == Vec2::zero()
}
fn negate(&self) -> DurHbVel {
DurHbVel {
value: -self.value,
resize: -self.resize,
duration: self.duration,
}
}
}
impl PlacedBounds for DurHbVel {
fn bounds_center(&self) -> &Vec2 {
&self.value
}
fn bounds_dims(&self) -> &Vec2 {
&self.resize
}
}
#[derive(Clone)]
pub struct DurHitbox {
pub value: PlacedShape,
pub vel: DurHbVel,
}
impl DurHitbox {
pub fn new(value: PlacedShape) -> DurHitbox {
DurHitbox {
value,
vel: DurHbVel::still(),
}
}
pub fn advanced_shape(&self, time: f64) -> PlacedShape {
assert!(
time < core::HIGH_TIME,
"requires time < {}",
core::HIGH_TIME
);
self.value.advance(self.vel.value, self.vel.resize, time)
}
pub fn bounding_box(&self) -> PlacedShape {
self.bounding_box_for(self.vel.duration)
}
pub fn bounding_box_for(&self, duration: f64) -> PlacedShape {
if self.vel.is_still() {
self.value.as_rect()
} else {
let end_value = self.advanced_shape(duration);
self.value.bounding_box(&end_value)
}
}
pub fn collide_time(&self, other: &DurHitbox) -> f64 {
solvers::collide_time(self, other)
}
pub fn separate_time(&self, other: &DurHitbox, padding: f64) -> f64 {
solvers::separate_time(self, other, padding)
}
}
#[cfg(test)]
mod tests {
use core::dur_hitbox::DurHitbox;
use geom::*;
use std::f64;
#[test]
fn test_rect_rect_collision() {
let mut a = DurHitbox::new(PlacedShape::new(v2(-11.0, 0.0), Shape::rect(v2(2.0, 2.0))));
a.vel.value = v2(2.0, 0.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(12.0, 2.0), Shape::rect(v2(2.0, 4.0))));
b.vel.value = v2(-0.5, 0.0);
b.vel.resize = v2(1.0, 0.0);
b.vel.duration = 100.0;
assert_eq!(a.collide_time(&b), 7.0);
assert_eq!(b.collide_time(&a), 7.0);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
}
#[test]
fn test_circle_circle_collision() {
let sqrt2 = (2.0f64).sqrt();
let mut a = DurHitbox::new(PlacedShape::new(v2(-0.1 * sqrt2, 0.0), Shape::circle(2.0)));
a.vel.value = v2(0.1, 0.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(
v2(3.0 * sqrt2, 0.0),
Shape::circle(2.0 + sqrt2 * 0.1),
));
b.vel.value = v2(-2.0, 1.0);
b.vel.resize = v2(-0.1, -0.1);
b.vel.duration = 100.0;
assert!((a.collide_time(&b) - sqrt2).abs() < 1e-7);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
}
#[test]
fn test_rect_circle_collision() {
let mut a = DurHitbox::new(PlacedShape::new(v2(-11.0, 0.0), Shape::circle(2.0)));
a.vel.value = v2(2.0, 0.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(12.0, 2.0), Shape::rect(v2(2.0, 4.0))));
b.vel.value = v2(-1.0, 0.0);
b.vel.duration = 100.0;
assert_eq!(a.collide_time(&b), 7.0);
assert_eq!(b.collide_time(&a), 7.0);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
}
#[test]
fn test_rect_circle_angled_collision() {
let mut a = DurHitbox::new(PlacedShape::new(v2(0., 0.), Shape::square(2.)));
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(5., 5.), Shape::circle(2.)));
b.vel.value = v2(-1., -1.);
b.vel.duration = 100.0;
let collide_time = a.collide_time(&b);
let expected_time = 4. - 1. / 2f64.sqrt();
assert_eq!(collide_time, expected_time);
}
#[test]
fn test_rect_rect_separation() {
let mut a = DurHitbox::new(PlacedShape::new(v2(0.0, 0.0), Shape::rect(v2(6.0, 4.0))));
a.vel.value = v2(1.0, 1.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(1.0, 0.0), Shape::rect(v2(4.0, 4.0))));
b.vel.value = v2(0.5, 0.0);
b.vel.duration = 100.0;
assert_eq!(a.separate_time(&b, 0.1), 4.1);
assert_eq!(b.separate_time(&a, 0.1), 4.1);
assert_eq!(a.collide_time(&b), 0.0);
}
#[test]
fn test_circle_circle_separation() {
let sqrt2 = (2.0f64).sqrt();
let mut a = DurHitbox::new(PlacedShape::new(v2(2.0, 5.0), Shape::circle(2.0)));
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(3.0, 4.0), Shape::circle(1.8)));
b.vel.value = v2(-1.0, 1.0);
b.vel.duration = 100.0;
assert_eq!(a.separate_time(&b, 0.1), 1.0 + sqrt2);
assert_eq!(b.separate_time(&a, 0.1), 1.0 + sqrt2);
assert_eq!(a.collide_time(&b), 0.0);
}
#[test]
fn test_rect_circle_separation() {
let sqrt2 = (2.0f64).sqrt();
let mut a = DurHitbox::new(PlacedShape::new(v2(4.0, 2.0), Shape::rect(v2(4.0, 6.0))));
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(3.0, 4.0), Shape::circle(3.8)));
b.vel.value = v2(-1.0, 1.0);
b.vel.duration = 100.0;
assert_eq!(a.separate_time(&b, 0.1), 1.0 + sqrt2);
assert_eq!(b.separate_time(&a, 0.1), 1.0 + sqrt2);
assert_eq!(a.collide_time(&b), 0.0);
}
#[test]
fn test_rect_circle_angled_separation() {
let mut a = DurHitbox::new(PlacedShape::new(v2(0., 0.), Shape::square(2.)));
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(-1., 1.), Shape::circle(2.)));
b.vel.value = v2(1., -1.);
b.vel.duration = 100.0;
let separate_time = a.separate_time(&b, 0.1);
let expected_time = 2. + 1.1 / 2f64.sqrt();
assert_eq!(separate_time, expected_time);
}
#[test]
fn test_no_collision() {
let mut a = DurHitbox::new(PlacedShape::new(v2(-11.0, 0.0), Shape::rect(v2(2.0, 2.0))));
a.vel.value = v2(2.0, 0.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(12.0, 2.0), Shape::rect(v2(2.0, 4.0))));
b.vel.value = v2(-1.0, 1.0);
b.vel.duration = 100.0;
assert_eq!(a.collide_time(&b), f64::INFINITY);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
b.value.shape = Shape::circle(2.0);
b.vel.resize = Vec2::zero();
assert_eq!(a.collide_time(&b), f64::INFINITY);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
a.value.shape = Shape::circle(2.0);
a.vel.resize = Vec2::zero();
assert_eq!(a.collide_time(&b), f64::INFINITY);
assert_eq!(a.separate_time(&b, 0.1), 0.0);
}
#[test]
fn test_no_separation() {
let mut a = DurHitbox::new(PlacedShape::new(v2(5.0, 1.0), Shape::rect(v2(2.0, 2.0))));
a.vel.value = v2(2.0, 1.0);
a.vel.duration = 100.0;
let mut b = DurHitbox::new(PlacedShape::new(v2(5.0, 1.0), Shape::rect(v2(2.0, 4.0))));
b.vel.value = v2(2.0, 1.0);
b.vel.duration = 100.0;
assert_eq!(a.separate_time(&b, 0.1), f64::INFINITY);
assert_eq!(a.collide_time(&b), 0.0);
b.value.shape = Shape::circle(2.0);
b.vel.resize = Vec2::zero();
assert_eq!(a.separate_time(&b, 0.1), f64::INFINITY);
assert_eq!(a.collide_time(&b), 0.0);
a.value.shape = Shape::circle(2.0);
a.vel.resize = Vec2::zero();
assert_eq!(a.separate_time(&b, 0.1), f64::INFINITY);
assert_eq!(a.collide_time(&b), 0.0);
}
#[test]
fn test_low_duration() {
let sqrt2 = (2.0f64).sqrt();
let mut a = DurHitbox::new(PlacedShape::new(v2(0.0, 0.0), Shape::circle(2.0)));
a.vel.duration = 4.0 - sqrt2 + 0.01;
let mut b = DurHitbox::new(PlacedShape::new(v2(4.0, 4.0), Shape::circle(2.0)));
b.vel.value = v2(-1.0, -1.0);
b.vel.duration = 4.0 - sqrt2 + 0.01;
assert_eq!(a.collide_time(&b), 4.0 - sqrt2);
a.vel.duration -= 0.02;
assert_eq!(a.collide_time(&b), f64::INFINITY);
b.vel.duration -= 0.02;
assert_eq!(a.collide_time(&b), f64::INFINITY);
}
}
| true |
337c5a1bca6379fe27c2c523275fb14d679014ec
|
Rust
|
zevstravitz/Poker
|
/rust/src/cluster.rs
|
UTF-8
| 1,369 | 2.859375 | 3 |
[] |
no_license
|
use crate::card_utils;
use ndarray::Array;
use rand::prelude::SliceRandom;
use rand::thread_rng;
use std::collections::HashMap;
// Cluster hands based on the second moment of the equity distribution, aka E[HS^2].
// This approach is much faster but becomes inferior for larger abstractions.
// However, it may be sufficient when using depth-limited solving. This uses
// percentile bucketing.
pub fn cluster_ehs2(distributions: &HashMap<u64, Vec<f64>>, bins: i32) -> HashMap<u64, i32> {
let mut ehs2: Vec<(String, f64)> = second_moments(distributions);
// a.1 and b.1 are the ehs2 of the tuple, each tuple contains (hand, ehs2).
ehs2.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
let mut clusters = HashMap::new();
for (idx, (hand, val)) in ehs2.iter().enumerate() {
let bucket: i32 = ((bins as f64) * (idx as f64) / (ehs2.len() as f64)) as i32;
clusters.insert(hand.clone(), bucket);
}
clusters
}
// Assumes all distributions are normalized to 1
fn second_moments(distributions: &HashMap<u64, Vec<f64>>) -> Vec<(u64, f64)> {
let mut ehs2 = Vec::new();
for (hand, dist) in distributions {
let squared: Vec<f64> = dist.iter().map(|x| x.powi(2)).collect();
let sum: f64 = squared.iter().sum();
let mean = sum / squared.len() as f64;
ehs2.push((hand.clone(), mean));
}
ehs2
}
| true |
0f1f32842d0eb740f348f5464fcbd90ebcf398e8
|
Rust
|
conradlo/rusty-leetcode
|
/src/_0105_construct_binary_tree_from_preorder_and_inorder_traversal.rs
|
UTF-8
| 4,275 | 3.28125 | 3 |
[] |
no_license
|
/*
* @lc app=leetcode id=105 lang=rust
*
* [105] Construct Binary Tree from Preorder and Inorder Traversal
*/
// @lc code=start
// Definition for a binary tree node.
#[derive(Debug, PartialEq, Eq)]
pub struct TreeNode {
pub val: i32,
pub left: Option<Rc<RefCell<TreeNode>>>,
pub right: Option<Rc<RefCell<TreeNode>>>,
}
impl TreeNode {
#[inline]
pub fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
}
struct Solution;
use std::cell::RefCell;
use std::rc::Rc;
impl Solution {
// You may assume that duplicates do not exist in the tree.
// 8ms 10.53%
pub fn build_tree(preorder: Vec<i32>, inorder: Vec<i32>) -> Option<Rc<RefCell<TreeNode>>> {
if let Some(&val) = preorder.get(0) {
// root node
let mut root = TreeNode::new(val);
// split inorder at root's val
let mut cursor = 0;
for (i, &node_val) in inorder.iter().enumerate() {
if node_val == val {
cursor = i; // index of root's val in inorder
}
}
let left_inorder = &inorder[0..cursor];
let right_inorder = &inorder[cursor + 1..]; // cursor + 1, skip root
let left_preorder = &preorder[1..=left_inorder.len()]; // start from 1, skip root's val
let right_preorder = &preorder[(left_inorder.len() + 1)..];
root.left = Solution::build_tree(left_preorder.to_vec(), left_inorder.to_vec());
root.right = Solution::build_tree(right_preorder.to_vec(), right_inorder.to_vec());
return Some(Rc::new(RefCell::new(root)));
}
None
}
}
// @lc code=end
// cargo watch -x "test _0105_ -- --nocapture --test-threads=1"
#[cfg(test)]
mod tests {
use super::*;
pub fn postorder_traversal_recursive(root: Option<Rc<RefCell<TreeNode>>>) -> Vec<i32> {
// copy from # 145
let mut traverse = vec![];
if let Some(rc_node) = root {
let node = rc_node.borrow();
traverse.append(&mut postorder_traversal_recursive(node.left.clone()));
traverse.append(&mut postorder_traversal_recursive(node.right.clone()));
traverse.push(node.val);
}
traverse
}
fn get_test_cases() -> Vec<(Vec<i32>, Vec<i32>, Vec<i32>)> {
// (preorder, inorder, postorder)
return vec![
(vec![], vec![], vec![]),
(vec![1], vec![1], vec![1]),
// [1, 2, 3]
(vec![1, 2, 3], vec![2, 1, 3], vec![2, 3, 1]),
// [1, null, 2, 3]
(vec![1, 2, 3], vec![1, 3, 2], vec![3, 2, 1]),
(
// [3, 9, 20, null, null, 15, 7]
vec![3, 9, 20, 15, 7],
vec![9, 3, 15, 20, 7],
vec![9, 15, 7, 20, 3],
),
(
// [5, 1, 4, null, null, 3, 6]
vec![5, 1, 4, 3, 6],
vec![1, 5, 3, 4, 6],
vec![1, 3, 6, 4, 5],
),
(
// [5,4, 7, 3, null, 2, null, -1, null, 9]
vec![5, 4, 3, -1, 7, 2, 9],
vec![-1, 3, 4, 5, 9, 2, 7],
vec![-1, 3, 4, 9, 2, 7, 5],
),
(
// [6,2,7,1,4,null,9,null,null,3,5,8]
vec![6, 2, 1, 4, 3, 5, 7, 9, 8],
vec![1, 2, 3, 4, 5, 6, 7, 8, 9],
vec![1, 3, 5, 4, 2, 8, 9, 7, 6],
),
(
// [5,4,8,11,null,13,6,7,2,null,null,null,1]
vec![5, 4, 11, 7, 2, 8, 13, 6, 1],
vec![7, 11, 2, 4, 5, 13, 8, 6, 1],
vec![7, 2, 11, 4, 13, 1, 6, 8, 5],
),
];
}
#[test]
fn run_test_cases_1() {
let testcases = get_test_cases();
for (preorder, inorder, postorder) in testcases {
// let _inorder = inorder.clone();
// let _postorder = postorder.clone();
let result = Solution::build_tree(preorder, inorder);
// println!("{:?} {:?} | {:#?}", _inorder, _postorder, result);
assert_eq!(postorder_traversal_recursive(result), postorder);
}
}
}
| true |
ea92291d9de1ed3af764c31133cd5a2293b71128
|
Rust
|
utilForever/BOJ
|
/Rust/15593 - Lifeguards (Bronze).rs
|
UTF-8
| 1,742 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
use io::Write;
use std::{io, str};
pub struct UnsafeScanner<R> {
reader: R,
buf_str: Vec<u8>,
buf_iter: str::SplitAsciiWhitespace<'static>,
}
impl<R: io::BufRead> UnsafeScanner<R> {
pub fn new(reader: R) -> Self {
Self {
reader,
buf_str: vec![],
buf_iter: "".split_ascii_whitespace(),
}
}
pub fn token<T: str::FromStr>(&mut self) -> T {
loop {
if let Some(token) = self.buf_iter.next() {
return token.parse().ok().expect("Failed parse");
}
self.buf_str.clear();
self.reader
.read_until(b'\n', &mut self.buf_str)
.expect("Failed read");
self.buf_iter = unsafe {
let slice = str::from_utf8_unchecked(&self.buf_str);
std::mem::transmute(slice.split_ascii_whitespace())
}
}
}
}
fn main() {
let (stdin, stdout) = (io::stdin(), io::stdout());
let mut scan = UnsafeScanner::new(stdin.lock());
let mut out = io::BufWriter::new(stdout.lock());
let n = scan.token::<usize>();
let mut lifeguards = vec![(0, 0); n];
let mut times = vec![0; 1000];
for i in 0..n {
let (a, b) = (scan.token::<usize>(), scan.token::<usize>());
lifeguards[i] = (a, b);
for j in a..b {
times[j] += 1;
}
}
let mut ret = 0;
for i in 0..n {
let (a, b) = lifeguards[i];
let mut times_cloned = times.clone();
for j in a..b {
times_cloned[j] -= 1;
}
let count = times_cloned.iter().filter(|&x| *x > 0).count();
ret = ret.max(count);
}
writeln!(out, "{ret}").unwrap();
}
| true |
f58370df4357743ae787df32d2bade6f9540cf3b
|
Rust
|
thepowersgang/rust_os
|
/Kernel/Modules/gui/input/mod.rs
|
UTF-8
| 11,799 | 2.65625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/input/mod.rs
//! GUI input managment
#[allow(unused_imports)]
use kernel::prelude::*;
use self::keyboard::KeyCode;
use core::sync::atomic::{Ordering,AtomicUsize,AtomicU8};
use kernel::sync::Mutex;
pub mod keyboard;
pub mod mouse;
#[derive(Debug)]
pub enum Event
{
KeyDown(keyboard::KeyCode),
KeyUp(keyboard::KeyCode),
KeyFire(keyboard::KeyCode),
Text([u8; 6]), // 6 bytes, as that can fit in a u64 with a 16-bit tag
MouseMove(u32,u32,i16,i16),
MouseDown(u32,u32,u8),
MouseUp(u32,u32,u8),
MouseClick(u32,u32, u8, u8),
}
struct ModKeyPair(AtomicUsize);
struct MouseCursor {
graphics_cursor: ::kernel::sync::Mutex<::kernel::metadevs::video::CursorHandle>,
}
struct InputChannel
{
//caps_active: AtomicBool, // Go DIAF capslock
shift_held: ModKeyPair,
ctrl_held: ModKeyPair,
alt_held: ModKeyPair,
//altgr: ModKeyPair, // AltGr is usually just one... but meh
last_key_pressed: AtomicU8,
//active_repeat: AtomicValue<u8>,
//repeat_start: Timestamp,
cursor: MouseCursor,
// TODO: Mutex feels too heavy, but there may be multiple mice on one channel
double_click_info: Mutex<MouseClickInfo>,
}
struct MouseClickInfo
{
button: u8,
count: u8,
time: ::kernel::time::TickCount,
x: u32,
y: u32,
}
//struct IMEState
//{
// ime_ofs: u8,
// ime_val: u32,
//}
/// Maximum time in kernel ticks between subsequent press/release events for a click/doubleclick
const DOUBLE_CLICK_TIMEOUT: u64 = 500; // 500ms
/// Maximum distance along any axis between press/release before a click is not registered
const MAX_CLICK_MOVE: u32 = 10;
static MAIN_INPUT: InputChannel = InputChannel::new();
pub fn init() {
//MAIN_INPUT.cursor.
}
fn get_channel_by_index(_idx: usize) -> &'static InputChannel {
&MAIN_INPUT
}
impl InputChannel
{
const fn new() -> InputChannel {
InputChannel {
shift_held: ModKeyPair::new(),
ctrl_held: ModKeyPair::new(),
alt_held: ModKeyPair::new(),
//altgr: ModKeyPair::new(),
cursor: MouseCursor::new(),
last_key_pressed: AtomicU8::new(KeyCode::None as u8),
double_click_info: Mutex::new(MouseClickInfo::new()),
}
}
pub fn handle_key(&self, key: keyboard::KeyCode, release: bool)
{
log_trace!("key={:?}, release={}", key, release);
match (release, key)
{
// Maintain key states
(false, KeyCode::RightShift) => self.shift_held.set_r(),
(false, KeyCode::LeftShift) => self.shift_held.set_l(),
(false, KeyCode::RightCtrl) => self.ctrl_held.set_r(),
(false, KeyCode::LeftCtrl) => self.ctrl_held.set_l(),
(false, KeyCode::RightAlt) => self.alt_held.set_r(),
(false, KeyCode::LeftAlt) => self.alt_held.set_l(),
(true, KeyCode::RightShift) => self.shift_held.clear_r(),
(true, KeyCode::LeftShift) => self.shift_held.clear_l(),
(true, KeyCode::RightCtrl) => self.ctrl_held.clear_r(),
(true, KeyCode::LeftCtrl) => self.ctrl_held.clear_l(),
(true, KeyCode::RightAlt) => self.alt_held.clear_r(),
(true, KeyCode::LeftAlt) => self.alt_held.clear_l(),
// Check for session change commands, don't propagate if they fired
// - 'try_change_session' checks for the required modifier keys and permissions
// TODO: Should this be handled by the `windows` module?
(false, KeyCode::Esc) => if self.try_change_session(0) { return ; },
(false, KeyCode::F1) => if self.try_change_session(1) { return ; },
(false, KeyCode::F2) => if self.try_change_session(2) { return ; },
(false, KeyCode::F3) => if self.try_change_session(3) { return ; },
(false, KeyCode::F4) => if self.try_change_session(4) { return ; },
(false, KeyCode::F5) => if self.try_change_session(5) { return ; },
(false, KeyCode::F6) => if self.try_change_session(6) { return ; },
(false, KeyCode::F7) => if self.try_change_session(7) { return ; },
(false, KeyCode::F8) => if self.try_change_session(8) { return ; },
(false, KeyCode::F9) => if self.try_change_session(9) { return ; },
(false, KeyCode::F10) => if self.try_change_session(10) { return ; },
(false, KeyCode::F11) => if self.try_change_session(11) { return ; },
(false, KeyCode::F12) => if self.try_change_session(12) { return ; },
_ => {},
}
let last_key = self.last_key_pressed.load(Ordering::Relaxed);
if !release {
self.last_key_pressed.store(key as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyDown(key));
}
// Handle fire and text events
if key.is_modifier()
{
// Only fire a modifier on key-up IF they were the last one pressed
// - This allows "Gui" (windows) to fire on key-up while still being used as a modifier
if release && last_key == key as u8
{
super::windows::handle_input( Event::KeyFire(key) );
}
}
else
{
// TODO: Support repetition (of the last non-modifier pressed)
if !release
{
super::windows::handle_input( Event::KeyFire(key) );
// TODO: Should only generate text if no non-shift modifiers are depressed
//if self.enable_input_translation {
let s = self.get_input_string(key);
if s.len() > 0 {
let mut buf = [0; 6];
buf[.. s.len()].clone_from_slice( s.as_bytes() );
super::windows::handle_input( Event::Text(buf) );
}
//}
}
}
// Send key combination to active active window (via the window subsystem)
if release {
self.last_key_pressed.store(KeyCode::None as u8, Ordering::Relaxed);
super::windows::handle_input(/*self, */Event::KeyUp(key));
}
}
pub fn handle_mouse_set(&self, norm_x: u16, norm_y: u16)
{
// Mouse movement, update cursor
let (dx,dy) = self.cursor.set_pos(norm_x, norm_y);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx as i16, dy as i16));
}
pub fn handle_mouse_move(&self, dx: i16, dy: i16)
{
// Mouse movement, update cursor
self.cursor.move_pos(dx as i32, dy as i32);
let (x,y) = self.cursor.pos();
self.double_click_info.lock().clear();
super::windows::handle_input(/*self, */Event::MouseMove(x, y, dx, dy));
}
pub fn handle_mouse_btn(&self, btn: u8, release: bool)
{
let (x,y) = self.cursor.pos();
if release
{
// Released - check the double-click timer
if let Some(ev) = self.double_click_info.lock().check( x,y, btn )
{
super::windows::handle_input(/*self, */ev);
}
super::windows::handle_input(/*self, */Event::MouseUp(x, y, btn));
}
else
{
// Pressed - reset the double-click timer
self.double_click_info.lock().reset(x,y, btn);
super::windows::handle_input(/*self, */Event::MouseDown(x, y, btn));
}
}
fn shift(&self) -> bool {
self.shift_held.get()
}
fn upper(&self) -> bool {
self.shift()
}
fn get_input_string(&self, keycode: KeyCode) -> &str
{
macro_rules! shift { ($s:ident: $lower:expr, $upper:expr) => { if $s.shift() { $upper } else {$lower} }; }
macro_rules! alpha { ($s:ident: $lower:expr, $upper:expr) => { if $s.upper() { $upper } else {$lower} }; }
match keycode
{
KeyCode::A => alpha!(self: "a", "A"),
KeyCode::B => alpha!(self: "b", "B"),
KeyCode::C => alpha!(self: "c", "C"),
KeyCode::D => alpha!(self: "d", "D"),
KeyCode::E => alpha!(self: "e", "E"),
KeyCode::F => alpha!(self: "f", "F"),
KeyCode::G => alpha!(self: "g", "G"),
KeyCode::H => alpha!(self: "h", "H"),
KeyCode::I => alpha!(self: "i", "I"),
KeyCode::J => alpha!(self: "j", "J"),
KeyCode::K => alpha!(self: "k", "K"),
KeyCode::L => alpha!(self: "l", "L"),
KeyCode::M => alpha!(self: "m", "M"),
KeyCode::N => alpha!(self: "n", "N"),
KeyCode::O => alpha!(self: "o", "O"),
KeyCode::P => alpha!(self: "p", "P"),
KeyCode::Q => alpha!(self: "q", "Q"),
KeyCode::R => alpha!(self: "r", "R"),
KeyCode::S => alpha!(self: "s", "S"),
KeyCode::T => alpha!(self: "t", "T"),
KeyCode::U => alpha!(self: "u", "U"),
KeyCode::V => alpha!(self: "v", "V"),
KeyCode::W => alpha!(self: "w", "W"),
KeyCode::X => alpha!(self: "x", "X"),
KeyCode::Y => alpha!(self: "y", "Y"),
KeyCode::Z => alpha!(self: "z", "Z"),
KeyCode::SquareOpen => shift!(self: "[", "{"),
KeyCode::SquareClose => shift!(self: "[", "{"),
KeyCode::Backslash => shift!(self: "\\","|"),
KeyCode::Semicolon => shift!(self: ";", ":"),
KeyCode::Quote => shift!(self: "'", "\""),
KeyCode::Comma => shift!(self: ",", "<"),
KeyCode::Period => shift!(self: ".", ">"),
KeyCode::Slash => shift!(self: "/", "?"),
KeyCode::Kb1 => shift!(self: "1", "!"),
KeyCode::Kb2 => shift!(self: "2", "@"),
KeyCode::Kb3 => shift!(self: "3", "#"),
KeyCode::Kb4 => shift!(self: "4", "$"),
KeyCode::Kb5 => shift!(self: "5", "%"),
KeyCode::Kb6 => shift!(self: "6", "^"),
KeyCode::Kb7 => shift!(self: "7", "&"),
KeyCode::Kb8 => shift!(self: "8", "*"),
KeyCode::Kb9 => shift!(self: "9", "("),
KeyCode::Kb0 => shift!(self: "0", ")"),
KeyCode::Minus => shift!(self: "-", "_"),
KeyCode::Equals => shift!(self: "=", "+"),
KeyCode::Space => " ",
_ => "",
}
}
fn try_change_session(&self, target: usize) -> bool {
if self.is_master() && self.ctrl_held.get() && self.alt_held.get() {
super::windows::switch_active(target);
true
}
else {
false
}
}
fn is_master(&self) -> bool { true }
}
impl ModKeyPair {
const fn new() -> ModKeyPair {
ModKeyPair(AtomicUsize::new(0))
}
fn set_l(&self) { self.0.fetch_or(1, Ordering::Relaxed); }
fn set_r(&self) { self.0.fetch_or(2, Ordering::Relaxed); }
fn clear_l(&self) { self.0.fetch_and(!1, Ordering::Relaxed); }
fn clear_r(&self) { self.0.fetch_and(!2, Ordering::Relaxed); }
fn get(&self) -> bool {
self.0.load(Ordering::Relaxed) != 0
}
}
impl MouseCursor {
const fn new() -> MouseCursor {
MouseCursor {
graphics_cursor: ::kernel::sync::Mutex::new(::kernel::metadevs::video::CursorHandle::new()),
}
}
fn add_coord(cur: u32, d: i32) -> u32 {
if d < 0 {
u32::saturating_sub(cur, -d as u32)
}
else {
u32::saturating_add(cur, d as u32)
}
}
/// Set cursor position to normalised coordinates
fn set_pos(&self, norm_x: u16, norm_y: u16) -> (i32, i32) {
let mut lh = self.graphics_cursor.lock();
let pos = lh.get_pos();
let rect = match ::kernel::metadevs::video::get_display_for_pos(pos)
{
Ok(v) => v,
Err(v) => v,
};
let new_pos = ::kernel::metadevs::video::Pos {
x: rect.x() + ((rect.w() as u64 * norm_x as u64) >> 16) as u32,
y: rect.y() + ((rect.h() as u64 * norm_y as u64) >> 16) as u32,
};
lh.set_pos(new_pos);
(
(new_pos.x as i32 - pos.x as i32) as i32,
(new_pos.y as i32 - pos.y as i32) as i32,
)
}
fn move_pos(&self, dx: i32, dy: i32) {
let mut lh = self.graphics_cursor.lock();
let mut pos = lh.get_pos();
pos.x = Self::add_coord(pos.x, dx);
pos.y = Self::add_coord(pos.y, dy);
lh.set_pos(pos);
}
fn pos(&self) -> (u32,u32) {
let pos = self.graphics_cursor.lock().get_pos();
(pos.x, pos.y)
}
}
impl MouseClickInfo
{
const fn new() -> MouseClickInfo {
MouseClickInfo {
button: 0xFF, x: 0, y: 0,
time: 0,
count: 0,
}
}
fn clear(&mut self)
{
self.button = 0xFF;
}
fn reset(&mut self, x: u32, y: u32, button: u8)
{
self.button = button;
self.count = 0;
self.x = x;
self.y = y;
self.time = ::kernel::time::ticks();
}
fn check(&mut self, x: u32, y: u32, button: u8) -> Option<Event>
{
use kernel::lib::num::abs_diff;
if self.button != button {
self.clear();
None
}
else if (::kernel::time::ticks() - self.time) > DOUBLE_CLICK_TIMEOUT {
self.clear();
None
}
else if abs_diff(self.x, x) > MAX_CLICK_MOVE || abs_diff(self.y, y) > MAX_CLICK_MOVE {
self.clear();
None
}
else {
self.time = ::kernel::time::ticks();
self.x = x;
self.y = y;
if self.count < 0xFF {
self.count += 1;
}
Some( Event::MouseClick(x, y, button, self.count) )
}
}
}
| true |
61c482ebd8b981fd0c8642599c18cb54f511ae24
|
Rust
|
leandrosilva/wikipedia_roots
|
/src/main.rs
|
UTF-8
| 4,081 | 3.234375 | 3 |
[] |
no_license
|
use std::env;
use reqwest;
use scraper::{Html, Selector};
use std::thread;
use std::time::Duration;
use url::Url;
enum CrawlState {
Found,
Continue,
MaxSteps,
Loop,
}
fn get_base_url(url: &String) -> Url {
let mut url_obj = Url::parse(url).unwrap();
match url_obj.path_segments_mut() {
Ok(mut path) => {
path.clear();
}
Err(_) => return Url::parse("https://en.wikipedia.org/").unwrap(),
}
url_obj.set_query(None);
url_obj
}
fn find_first_link(url: &String) -> Option<String> {
let resp = reqwest::blocking::get(url).unwrap();
if !resp.status().is_success() {
return None;
}
let html = resp.text().unwrap();
let document = Html::parse_document(&html);
let p_selector = Selector::parse("p").unwrap();
let a_selector = Selector::parse("a").unwrap();
for p in document.select(&p_selector) {
match p.select(&a_selector).next() {
// First link of first the paragraph
Some(a) => match a.value().attr("href") {
Some(href) => {
let href = href.to_owned();
// Skip citation
if !href.starts_with("#") {
let base_url = get_base_url(url);
let link = base_url.join(href.as_str()).unwrap();
return Some(link.as_str().to_owned());
}
}
_ => {}
},
_ => {}
}
}
None
}
fn check_crawl_state(
search_history: &Vec<String>,
target_url: &String,
max_steps: usize,
) -> CrawlState {
match search_history.last() {
None => CrawlState::Continue,
Some(url) => {
if url == target_url {
CrawlState::Found
} else {
if search_history.len() > max_steps {
CrawlState::MaxSteps
} else {
let count = search_history.iter().filter(|e| e == &url).count();
if count == 2 {
CrawlState::Loop
} else {
CrawlState::Continue
}
}
}
}
}
}
fn get_target_url() -> Option<String> {
let mut args = env::args();
args.next();
args.next()
}
fn main() {
let start_url = String::from("https://en.wikipedia.org/wiki/Special:Random");
let target_url = get_target_url().unwrap_or(String::from("https://en.wikipedia.org/wiki/Philosophy"));
let max_steps = 30;
println!("Starting at: {}", start_url);
println!("Target is: {}", target_url);
println!("Starting...");
let mut search_history = Vec::new();
search_history.push(start_url);
loop {
match check_crawl_state(&search_history, &target_url, max_steps) {
CrawlState::Found => {
println!("Found the target article!");
break;
}
CrawlState::MaxSteps => {
println!("The search has gone on far too long. (abort)");
break;
}
CrawlState::Loop => {
println!("Got to an article visited before. (abort)");
let current_url = search_history.last().unwrap();
println!("> {}", current_url);
break;
}
CrawlState::Continue => {
let current_url = search_history.last().unwrap();
println!("{}", current_url);
let first_link = find_first_link(¤t_url);
match first_link {
None => {
println!("Got to an article with no links to go by. (aborting)");
break;
}
Some(link_url) => {
search_history.push(link_url.to_owned());
thread::sleep(Duration::from_secs(2));
}
}
}
};
}
}
| true |
f385eddea39a368644160bb29a291b568202ad9f
|
Rust
|
ivfranco/notes
|
/ostep/chapter_31/semaphore/src/bin/mutex_nostarve.rs
|
UTF-8
| 3,743 | 3.140625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
use std::{
cell::UnsafeCell,
collections::HashMap,
env::{self, Args},
fmt::Display,
ops::{Deref, DerefMut},
process,
str::FromStr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread,
};
use semaphore::Semaphore;
fn main() {
let mut args = env::args();
args.next();
let num_threads = parse_next_arg(&mut args, "invalid NUM_THREADS");
let loops = parse_next_arg(&mut args, "invalid LOOPS");
let freq = Arc::new(NsMutex::new(HashMap::new()));
println!("begin");
let handles = (0..num_threads)
.map(|_| {
let local = Arc::clone(&freq);
thread::spawn(move || {
for _ in 0..loops {
let id = thread::current().id();
let mut freq = local.acquire();
*freq.entry(id).or_insert(0u32) += 1;
}
})
})
.collect::<Vec<_>>();
for handle in handles {
handle.join().unwrap();
}
let mut total = 0;
for (id, turns) in &*freq.acquire() {
println!("{:?} acquired the lock {} times", id, turns);
total += turns;
}
assert_eq!(total, num_threads * loops);
println!("end");
}
fn parse_next_arg<T: FromStr>(args: &mut Args, desc: impl Display) -> T {
args.next()
.and_then(|arg| arg.parse::<T>().ok())
.unwrap_or_else(|| {
error_exit(desc);
})
}
fn error_exit(err: impl Display) -> ! {
eprintln!("USAGE: EXEC NUM_THREADS LOOPS");
eprintln!("{}", err);
process::exit(1);
}
struct NsMutex<T> {
mutex: Semaphore,
t1: Semaphore,
t2: Semaphore,
room1: UnsafeCell<u32>,
room2: UnsafeCell<u32>,
poisoned: AtomicBool,
data: UnsafeCell<T>,
}
impl<T> NsMutex<T> {
fn new(data: T) -> Self {
Self {
mutex: Semaphore::new(1),
t1: Semaphore::new(1),
t2: Semaphore::new(0),
room1: UnsafeCell::new(0),
room2: UnsafeCell::new(0),
poisoned: AtomicBool::new(false),
data: UnsafeCell::new(data),
}
}
fn acquire(&self) -> NsMutexGuard<T> {
unsafe {
self.mutex.wait().unwrap();
*self.room1.get() += 1;
self.mutex.post().unwrap();
self.t1.wait().unwrap();
*self.room2.get() += 1;
self.mutex.wait().unwrap();
*self.room1.get() -= 1;
if *self.room1.get() == 0 {
self.mutex.post().unwrap();
self.t2.post().unwrap();
} else {
self.mutex.post().unwrap();
self.t1.post().unwrap();
}
self.t2.wait().unwrap();
*self.room2.get() -= 1;
NsMutexGuard { lock: self }
}
}
fn release(&self) {
unsafe {
if *self.room2.get() == 0 {
self.t1.post().unwrap();
} else {
self.t2.post().unwrap();
}
}
}
}
unsafe impl<T: Send> Send for NsMutex<T> {}
unsafe impl<T: Send + Sync> Sync for NsMutex<T> {}
struct NsMutexGuard<'a, T> {
lock: &'a NsMutex<T>,
}
impl<'a, T> Deref for NsMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lock.data.get() }
}
}
impl<'a, T> DerefMut for NsMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.data.get() }
}
}
impl<'a, T> Drop for NsMutexGuard<'a, T> {
fn drop(&mut self) {
if thread::panicking() {
self.lock.poisoned.store(true, Ordering::Relaxed);
}
self.lock.release();
}
}
| true |
049ad47b18b9c86a73c32ffc45c77d2553b66af7
|
Rust
|
strelec/DPLL-with-Rust
|
/src/solver/clause.rs
|
UTF-8
| 693 | 2.859375 | 3 |
[] |
no_license
|
extern crate bit_set;
use self::bit_set::BitSet;
pub type Set = BitSet;
pub type Bag = Vec<usize>;
pub struct Clause {
pub t: Bag,
pub f: Bag
}
impl Clause {
pub fn eval(&self, t: &Set, f: &Set) -> bool {
self.t.iter().any( |&v| t.contains(v) ) ||
self.f.iter().any( |&v| f.contains(v) )
}
pub fn eval_complete(&self, t: &Set) -> bool {
self.t.iter().any( |&v| t.contains(v) ) ||
self.f.iter().any( |&v| !t.contains(v) )
}
pub fn find_unit(bag: &Bag, filter: &Set, up_to: u8) -> (u8, usize) {
let mut count = 0u8;
let mut var = 0;
for &v in bag {
if !filter.contains(v) {
count += 1;
var = v;
if count == up_to { break }
}
}
(count, var)
}
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.