file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client.rs | // #[macro_use]
extern crate actix;
// extern crate byteorder;
// extern crate bytes;
extern crate futures;
extern crate serde;
extern crate serde_json;
// extern crate tokio_io;
// extern crate tokio_tcp;
extern crate awc;
extern crate rustls;
extern crate structopt;
#[macro_use]
extern crate log;
extern crate env_logger;
// #[macro_use]
extern crate serde_derive;
use actix::{
// prelude::*, io::FramedWrite
io::{SinkWrite, WriteHandler},
prelude::*,
Actor,
ActorContext,
AsyncContext,
Context,
Handler,
StreamHandler,
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{
lazy,
/* future::ok, */ stream::{SplitSink, Stream},
Future,
};
use std::{
io,
// str::FromStr,
// time::Duration,
sync::Arc,
thread,
// net, process, thread,
};
// use tokio_io::{AsyncRead, io::WriteHalf};
// use tokio_tcp::TcpStream;
use awc::{
error::WsProtocolError,
http::StatusCode,
ws::{Codec, Frame, Message},
Client, Connector,
};
use rustls::ClientConfig;
use structopt::StructOpt;
// use webpki;
// use webpki_roots;
// mod codec;
// mod server;
// mod ws;
// mod util;
mod ws_var;
use ws_var::HEARTBEAT_INTERVAL;
#[derive(StructOpt, Debug, Clone)]
/// Generalized WebSocket Client
pub struct Opt {
/// Address to connect
#[structopt(short = "u", default_value = "https://localhost:443/ws")]
url: String,
/// Message to send. Set it to '-' to read stdin to send,
/// leave it blank to use stdin as console loop to send multiple messages.
#[structopt(short = "m", default_value = "")]
msg: String,
}
mod danger {
use rustls::{
self, Certificate, RootCertStore, ServerCertVerified, ServerCertVerifier, TLSError,
};
use webpki;
pub struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_roots: &RootCertStore,
_presented_certs: &[Certificate],
_dns_name: webpki::DNSNameRef<'_>,
_ocsp: &[u8],
) -> Result<ServerCertVerified, TLSError> {
Ok(ServerCertVerified::assertion())
}
}
}
fn main() -> io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let opt = Opt::from_args();
// let sys = System::new("ws-client");
System::run(move || {
let mut cfg = ClientConfig::new();
// let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
// cfg.set_protocols(&protos);
cfg.dangerous()
.set_certificate_verifier(Arc::new(danger::NoCertificateVerification {}));
let client = Client::build()
.connector(Connector::new().rustls(Arc::new(cfg)).finish())
.finish();
// sys.block_on(
Arbiter::spawn(lazy(move || {
client
.ws(&opt.url)
.connect()
.map_err(|e| panic!("{}", e))
.map(move |(response, framed)| {
let sys = System::current();
if response.status() != StatusCode::SWITCHING_PROTOCOLS {
sys.stop();
}
let (sink, stream) = framed.split();
let addr = WsClient::create(|ctx| {
WsClient::add_stream(stream, ctx);
WsClient(SinkWrite::new(sink, ctx))
});
let read_stdin = || -> String {
let mut cmd = String::new();
if io::stdin().read_line(&mut cmd).is_err() {
println!("error");
}
cmd
};
if opt.msg.is_empty() {
// start console loop
thread::spawn(move || loop {
addr.do_send(ClientCommand(read_stdin()));
});
} else if opt.msg == "-" | else {
addr.do_send(ClientCommand(opt.msg));
sys.stop();
}
})
}));
})
// ).unwrap();
// sys.block_on(
// ).unwrap();
// Arbiter::spawn(
// TcpStream::connect(&addr)
// .and_then(|stream| {
// let addr = WsClient::create(|ctx| {
// let (r, w) = stream.split();
// WsClient::add_stream(
// FramedRead::new(r, codec::ClientWsCodec),
// ctx,
// );
// WsClient {
// framed: FramedWrite::new(
// w,
// codec::ClientWsCodec,
// ctx,
// ),
// }
// });
// // start console loop
// thread::spawn(move || loop {
// let mut cmd = String::new();
// if io::stdin().read_line(&mut cmd).is_err() {
// println!("error");
// return;
// }
// addr.do_send(ClientCommand(cmd));
// });
// ok(())
// })
// .map_err(|e| {
// println!("Can not connect to server: {}", e);
// process::exit(1)
// }),
// );
// println!("Running ws client");
// sys.run()
}
// struct WsClient {
// framed: FramedWrite<WriteHalf<TcpStream>, codec::ClientWsCodec>,
// }
// #[derive(Message)]
// struct ClientCommand(String);
// impl Actor for WsClient {
// type Context = Context<Self>;
// fn started(&mut self, ctx: &mut Context<Self>) {
// // start heartbeats otherwise server will disconnect after 10 seconds
// self.hb(ctx)
// }
// fn stopped(&mut self, _: &mut Context<Self>) {
// println!("Disconnected");
// // Stop application on disconnect
// System::current().stop();
// }
// }
// impl WsClient {
// fn hb(&self, ctx: &mut Context<Self>) {
// ctx.run_later(Duration::new(, 0), |act, ctx| {
// act.framed.write(codec::WsRequest::Ping);
// act.hb(ctx);
// // client should also check for a timeout here, similar to the
// // server code
// });
// }
// }
// impl actix::io::WriteHandler<io::Error> for WsClient {}
// /// Handle stdin commands
// impl Handler<ClientCommand> for WsClient {
// type Result = ();
// fn handle(&mut self, msg: ClientCommand, _: &mut Context<Self>) {
// let m = msg.0.trim();
// if m.is_empty() {
// return;
// }
// // we check for /sss type of messages
// // if m.starts_with('/') {
// // let v: Vec<&str> = m.splitn(2, ' ').collect();
// // match v[0] {
// // "/list" => {
// // self.framed.write(codec::WsRequest::List);
// // }
// // "/join" => {
// // if v.len() == 2 {
// // self.framed.write(codec::WsRequest::Join(v[1].to_owned()));
// // } else {
// // println!("!!! room name is required");
// // }
// // }
// // _ => println!("!!! unknown command"),
// // }
// // } else {
// self.framed.write(codec::WsRequest::Message(m.to_owned()));
// // }
// }
// }
// /// Server communication
// impl StreamHandler<codec::WsResponse, io::Error> for WsClient {
// fn handle(&mut self, msg: codec::WsResponse, _: &mut Context<Self>) {
// match msg {
// codec::WsResponse::Message(ref msg) => {
// println!("message: {}", msg);
// }
// // codec::WsResponse::Joined(ref msg) => {
// // println!("!!! joined: {}", msg);
// // }
// // codec::WsResponse::Rooms(rooms) => {
// // println!("\n!!! Available rooms:");
// // for room in rooms {
// // println!("{}", room);
// // }
// // println!("");
// // }
// _ => (),
// }
// }
// }
struct WsClient<T>(SinkWrite<SplitSink<Framed<T, Codec>>>)
where
T: AsyncRead + AsyncWrite;
#[derive(Message)]
struct ClientCommand(String);
impl<T: 'static> Actor for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// start heartbeats otherwise server will disconnect after 10 seconds
self.hb(ctx)
}
fn stopped(&mut self, _: &mut Context<Self>) {
info!("Disconnected");
// Stop application on disconnect
System::current().stop();
}
}
impl<T: 'static> WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn hb(&self, ctx: &mut Context<Self>) {
ctx.run_later(HEARTBEAT_INTERVAL, |act, ctx| {
act.0.write(Message::Ping(String::new())).unwrap();
act.hb(ctx);
// client should also check for a timeout here, similar to the
// server code
});
}
}
/// Handle stdin commands
impl<T: 'static> Handler<ClientCommand> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
type Result = ();
fn handle(&mut self, msg: ClientCommand, _ctx: &mut Context<Self>) {
self.0.write(Message::Text(msg.0)).unwrap();
}
}
/// Handle server websocket messages
impl<T: 'static> StreamHandler<Frame, WsProtocolError> for WsClient<T>
where
T: AsyncRead + AsyncWrite,
{
fn handle(&mut self, msg: Frame, _ctx: &mut Context<Self>) {
match msg {
Frame::Text(txt) => println!("Server: {:?}", txt),
_ => (),
}
}
fn started(&mut self, _ctx: &mut Context<Self>) {
info!("Connected");
}
fn finished(&mut self, ctx: &mut Context<Self>) {
info!("Server disconnected");
ctx.stop()
}
}
impl<T: 'static> WriteHandler<WsProtocolError> for WsClient<T> where T: AsyncRead + AsyncWrite {}
| {
addr.do_send(ClientCommand(read_stdin()));
sys.stop();
} | conditional_block |
base.py | """
Common and utility functions/classes for vulnerability-manager
"""
import base64
import csv
from io import StringIO
import json
from math import floor
from os import environ
from distutils.util import strtobool # pylint: disable=import-error, no-name-in-module
import requests
import connexion
from flask import Response
from prometheus_client import Counter
from common.logging import get_logger
LOGGER = get_logger(__name__)
VMAAS_HOST = environ.get('VMAAS_HOST', 'http://vmaas-webapp-1.vmaas-ci.svc:8080') # pylint: disable=invalid-name
DEFAULT_ROUTE = "%s/%s" % (environ.get('PATH_PREFIX', "/api"),
environ.get('APP_NAME', "vulnerability"))
IDENTITY_HEADER = "x-rh-identity"
DEFAULT_PAGE_SIZE = 25
SKIP_ENTITLEMENT_CHECK = strtobool(environ.get('SKIP_ENTITLEMENT_CHECK', 'FALSE'))
READ_ONLY_MODE = strtobool(environ.get('READ_ONLY_MODE', 'FALSE'))
LOGGER.info("Access URL: %s", DEFAULT_ROUTE)
# Prometheus support
# Counter for all-the-get-calls, dealt with in BaseHandler
REQUEST_COUNTS = Counter('ve_manager_invocations', 'Number of calls per handler', ['method', 'endpoint'])
class InvalidArgumentException(Exception):
"""Illegal arguments for pagination/filtering/sorting"""
class ApplicationException(Exception):
"""General exception in the application"""
def __init__(self, message, status_code):
self.message = message
self.status_code = status_code
super().__init__()
def format_exception(self):
"""Formats error message to desired format"""
if isinstance(self.message, dict):
return self.message, self.status_code
return Request.format_exception(self.message, self.status_code)
class MissingEntitlementException(Exception):
"""smart management entitlement is missing"""
class | (Exception):
"""manager is running in read-only mode"""
def basic_auth(username, password, required_scopes=None): # pylint: disable=unused-argument
"""
Basic auth is done on 3scale level.
"""
raise MissingEntitlementException
def auth(x_rh_identity, required_scopes=None): # pylint: disable=unused-argument
"""
Parses account number from the x-rh-identity header
"""
decoded_value = base64.b64decode(x_rh_identity).decode("utf-8")
LOGGER.debug('identity decoded: %s', decoded_value)
identity = json.loads(decoded_value)
if 'identity' not in identity:
return None
id_details = identity['identity']
if 'account_number' not in id_details:
return None
rh_account_number = id_details['account_number']
if SKIP_ENTITLEMENT_CHECK:
return {'uid': rh_account_number}
if 'entitlements' not in identity or 'smart_management' not in identity['entitlements']:
raise MissingEntitlementException
if identity['entitlements']['smart_management'].get('is_entitled', False):
return {'uid': rh_account_number}
raise MissingEntitlementException
def forbidden(exception): # pylint: disable=unused-argument
"""Override default connexion 401 coming from auth() with 403"""
return Response(response=json.dumps({'errors': [{'detail': 'smart_management entitlement is missing',
'status': '403'}]}),
status=403, mimetype='application/vnd.api+json')
class Request:
"""general class for processing requests"""
_endpoint_name = None
@staticmethod
def _check_int_arg(kwargs, key, dflt, zero_allowed=False):
val = kwargs.get(key, dflt)
if val < 0 or (val == 0 and not zero_allowed):
raise ApplicationException("Requested %s out of range: %s" % (key, val), 400)
return val
@staticmethod
def _check_read_only_mode():
if READ_ONLY_MODE:
raise ReadOnlyModeException("Service is running in read-only mode. Please try again later.")
@staticmethod
def _format_data(output_data_format, data_list):
if output_data_format == "csv":
output = StringIO()
if data_list:
# create list of columns - type, id and all keys from attributes
fields = ["type", "id"]
fields.extend(data_list[0]["attributes"].keys())
writer = csv.DictWriter(output, fields)
writer.writeheader()
for item in data_list:
# create flat dictionary (type, id + values from attributes) and write it
writer.writerow({field: item.get(field) or item["attributes"].get(field) for field in fields})
return output.getvalue()
return data_list
@classmethod
def _parse_list_arguments(cls, kwargs):
# We may get limit/offset, or page/page_size, or both
# limit/offset 'wins', if it's set
# page/page_size defaults to 0/DEFAULT_PAGE_SIZE and limit/offset to DEFAULT_PAGE_SIZE if *neither* are set
# regardless, make sure limit/offset and page/page_size a) both exist, and b) are consistent, before we leave
offset_set = kwargs.get('offset', '') or kwargs.get('limit', '')
page_set = kwargs.get('page', '') or kwargs.get('page_size', '')
if offset_set:
limit = cls._check_int_arg(kwargs, "limit", DEFAULT_PAGE_SIZE)
offset = cls._check_int_arg(kwargs, "offset", 0, True)
page = floor(offset / limit) + 1
page_size = limit
elif page_set:
page = cls._check_int_arg(kwargs, "page", 1)
page_size = cls._check_int_arg(kwargs, "page_size", DEFAULT_PAGE_SIZE)
limit = page_size
offset = (page - 1) * page_size
else:
page = 1
offset = 0
page_size = DEFAULT_PAGE_SIZE
limit = DEFAULT_PAGE_SIZE
data_format = kwargs.get("data_format", "json")
if data_format not in ["json", "csv"]:
raise InvalidArgumentException("Invalid data format: %s" % kwargs.get("data_format", None))
return {
"filter": kwargs.get("filter", None),
"sort": kwargs.get("sort", None),
"page": page,
"page_size": page_size,
"limit": limit,
"offset": offset,
"data_format": data_format
}
@staticmethod
def format_exception(text, status_code):
"""Formats error message to desired format"""
return {"errors": [{"status": str(status_code), "detail": text}]}, status_code
@staticmethod
def hide_satellite_managed():
"""Parses hide-satellite-managed from headers"""
try:
return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))
except ValueError:
return False
@staticmethod
def _parse_arguments(kwargs, argv):
"""
Utility method for getting parameters from request which come as string
and their conversion to a object we'd like to have.
Expects array of {'arg_name' : some_str, 'convert_func' : e.g. float, int}
Returns dict of values if succeeds, throws exception in case of fail
"""
retval = {}
errors = []
for arg in argv:
retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)
if retval[arg['arg_name']]:
try:
if arg['convert_func'] is not None:
retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])
except ValueError:
errors.append({'status': '400',
'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])})
if errors:
raise ApplicationException({'errors': errors}, 400)
return retval
@classmethod
def vmaas_call(cls, endpoint, data):
"""Calls vmaas and retrieves data from it"""
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
try:
response = requests.post(VMAAS_HOST + endpoint,
data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
LOGGER.error('Could not connect to %s', (VMAAS_HOST,))
raise ApplicationException('Could not connect to %s' % (VMAAS_HOST,), 500)
if response.status_code == 200:
return response.json()
LOGGER.error('Received %s from vmaas on %s endpoint', response.status_code, endpoint)
raise ApplicationException('Received %s from vmaas on %s endpoint' %
(response.status_code, VMAAS_HOST + endpoint), response.status_code)
class GetRequest(Request):
"""general class for processing GET requests"""
@classmethod
def get(cls, **kwargs):
"""Answer GET request"""
REQUEST_COUNTS.labels('get', cls._endpoint_name).inc()
try:
return cls.handle_get(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_get(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PatchRequest(Request):
"""general class for processing PATCH requests"""
@classmethod
def patch(cls, **kwargs):
"""Answer PATCH request"""
REQUEST_COUNTS.labels('patch', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_patch(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_patch(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PostRequest(Request):
"""general class for processing POST requests"""
@classmethod
def post(cls, **kwargs):
"""Answer POST request"""
REQUEST_COUNTS.labels('post', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_post(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500), 500
@classmethod
def handle_post(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
def parse_int_list(input_str):
"""Function to parse string with ints to list, e.g. '1,2,3' -> [1,2,3]"""
return [int(part) for part in input_str.split(",")]
| ReadOnlyModeException | identifier_name |
base.py | """
Common and utility functions/classes for vulnerability-manager
"""
import base64
import csv
from io import StringIO
import json
from math import floor
from os import environ
from distutils.util import strtobool # pylint: disable=import-error, no-name-in-module
import requests
import connexion
from flask import Response
from prometheus_client import Counter
from common.logging import get_logger
LOGGER = get_logger(__name__)
VMAAS_HOST = environ.get('VMAAS_HOST', 'http://vmaas-webapp-1.vmaas-ci.svc:8080') # pylint: disable=invalid-name
DEFAULT_ROUTE = "%s/%s" % (environ.get('PATH_PREFIX', "/api"),
environ.get('APP_NAME', "vulnerability"))
IDENTITY_HEADER = "x-rh-identity"
DEFAULT_PAGE_SIZE = 25
SKIP_ENTITLEMENT_CHECK = strtobool(environ.get('SKIP_ENTITLEMENT_CHECK', 'FALSE'))
READ_ONLY_MODE = strtobool(environ.get('READ_ONLY_MODE', 'FALSE'))
LOGGER.info("Access URL: %s", DEFAULT_ROUTE)
# Prometheus support
# Counter for all-the-get-calls, dealt with in BaseHandler
REQUEST_COUNTS = Counter('ve_manager_invocations', 'Number of calls per handler', ['method', 'endpoint'])
class InvalidArgumentException(Exception):
"""Illegal arguments for pagination/filtering/sorting"""
class ApplicationException(Exception):
"""General exception in the application"""
def __init__(self, message, status_code):
self.message = message
self.status_code = status_code
super().__init__()
def format_exception(self):
"""Formats error message to desired format"""
if isinstance(self.message, dict):
return self.message, self.status_code
return Request.format_exception(self.message, self.status_code)
class MissingEntitlementException(Exception):
"""smart management entitlement is missing"""
class ReadOnlyModeException(Exception):
"""manager is running in read-only mode"""
def basic_auth(username, password, required_scopes=None): # pylint: disable=unused-argument
"""
Basic auth is done on 3scale level.
"""
raise MissingEntitlementException
def auth(x_rh_identity, required_scopes=None): # pylint: disable=unused-argument
"""
Parses account number from the x-rh-identity header
"""
decoded_value = base64.b64decode(x_rh_identity).decode("utf-8")
LOGGER.debug('identity decoded: %s', decoded_value)
identity = json.loads(decoded_value)
if 'identity' not in identity:
return None
id_details = identity['identity']
if 'account_number' not in id_details:
return None
rh_account_number = id_details['account_number']
if SKIP_ENTITLEMENT_CHECK:
return {'uid': rh_account_number}
if 'entitlements' not in identity or 'smart_management' not in identity['entitlements']:
raise MissingEntitlementException
if identity['entitlements']['smart_management'].get('is_entitled', False):
return {'uid': rh_account_number}
raise MissingEntitlementException
def forbidden(exception): # pylint: disable=unused-argument
"""Override default connexion 401 coming from auth() with 403"""
return Response(response=json.dumps({'errors': [{'detail': 'smart_management entitlement is missing',
'status': '403'}]}),
status=403, mimetype='application/vnd.api+json')
class Request:
"""general class for processing requests"""
_endpoint_name = None
@staticmethod
def _check_int_arg(kwargs, key, dflt, zero_allowed=False):
val = kwargs.get(key, dflt)
if val < 0 or (val == 0 and not zero_allowed):
raise ApplicationException("Requested %s out of range: %s" % (key, val), 400)
return val
@staticmethod
def _check_read_only_mode():
if READ_ONLY_MODE:
raise ReadOnlyModeException("Service is running in read-only mode. Please try again later.")
@staticmethod
def _format_data(output_data_format, data_list):
if output_data_format == "csv":
output = StringIO()
if data_list:
# create list of columns - type, id and all keys from attributes
fields = ["type", "id"]
fields.extend(data_list[0]["attributes"].keys())
writer = csv.DictWriter(output, fields)
writer.writeheader()
for item in data_list:
# create flat dictionary (type, id + values from attributes) and write it
writer.writerow({field: item.get(field) or item["attributes"].get(field) for field in fields})
return output.getvalue()
return data_list
@classmethod
def _parse_list_arguments(cls, kwargs):
# We may get limit/offset, or page/page_size, or both
# limit/offset 'wins', if it's set
# page/page_size defaults to 0/DEFAULT_PAGE_SIZE and limit/offset to DEFAULT_PAGE_SIZE if *neither* are set
# regardless, make sure limit/offset and page/page_size a) both exist, and b) are consistent, before we leave
offset_set = kwargs.get('offset', '') or kwargs.get('limit', '')
page_set = kwargs.get('page', '') or kwargs.get('page_size', '')
if offset_set:
limit = cls._check_int_arg(kwargs, "limit", DEFAULT_PAGE_SIZE)
offset = cls._check_int_arg(kwargs, "offset", 0, True)
page = floor(offset / limit) + 1
page_size = limit
elif page_set:
page = cls._check_int_arg(kwargs, "page", 1)
page_size = cls._check_int_arg(kwargs, "page_size", DEFAULT_PAGE_SIZE)
limit = page_size
offset = (page - 1) * page_size
else:
page = 1
offset = 0
page_size = DEFAULT_PAGE_SIZE
limit = DEFAULT_PAGE_SIZE
data_format = kwargs.get("data_format", "json")
if data_format not in ["json", "csv"]:
raise InvalidArgumentException("Invalid data format: %s" % kwargs.get("data_format", None))
return {
"filter": kwargs.get("filter", None),
"sort": kwargs.get("sort", None),
"page": page,
"page_size": page_size,
"limit": limit,
"offset": offset,
"data_format": data_format
}
@staticmethod
def format_exception(text, status_code):
"""Formats error message to desired format"""
return {"errors": [{"status": str(status_code), "detail": text}]}, status_code
@staticmethod
def hide_satellite_managed():
"""Parses hide-satellite-managed from headers"""
try:
return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))
except ValueError:
return False
@staticmethod
def _parse_arguments(kwargs, argv):
"""
Utility method for getting parameters from request which come as string
and their conversion to a object we'd like to have.
Expects array of {'arg_name' : some_str, 'convert_func' : e.g. float, int}
Returns dict of values if succeeds, throws exception in case of fail
"""
retval = {}
errors = []
for arg in argv:
|
if errors:
raise ApplicationException({'errors': errors}, 400)
return retval
@classmethod
def vmaas_call(cls, endpoint, data):
"""Calls vmaas and retrieves data from it"""
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
try:
response = requests.post(VMAAS_HOST + endpoint,
data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
LOGGER.error('Could not connect to %s', (VMAAS_HOST,))
raise ApplicationException('Could not connect to %s' % (VMAAS_HOST,), 500)
if response.status_code == 200:
return response.json()
LOGGER.error('Received %s from vmaas on %s endpoint', response.status_code, endpoint)
raise ApplicationException('Received %s from vmaas on %s endpoint' %
(response.status_code, VMAAS_HOST + endpoint), response.status_code)
class GetRequest(Request):
"""general class for processing GET requests"""
@classmethod
def get(cls, **kwargs):
"""Answer GET request"""
REQUEST_COUNTS.labels('get', cls._endpoint_name).inc()
try:
return cls.handle_get(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_get(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PatchRequest(Request):
"""general class for processing PATCH requests"""
@classmethod
def patch(cls, **kwargs):
"""Answer PATCH request"""
REQUEST_COUNTS.labels('patch', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_patch(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_patch(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PostRequest(Request):
"""general class for processing POST requests"""
@classmethod
def post(cls, **kwargs):
"""Answer POST request"""
REQUEST_COUNTS.labels('post', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_post(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500), 500
@classmethod
def handle_post(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
def parse_int_list(input_str):
"""Function to parse string with ints to list, e.g. '1,2,3' -> [1,2,3]"""
return [int(part) for part in input_str.split(",")]
| retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)
if retval[arg['arg_name']]:
try:
if arg['convert_func'] is not None:
retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])
except ValueError:
errors.append({'status': '400',
'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])}) | conditional_block |
base.py | """
Common and utility functions/classes for vulnerability-manager
"""
import base64
import csv
from io import StringIO
import json
from math import floor
from os import environ
from distutils.util import strtobool # pylint: disable=import-error, no-name-in-module
import requests
import connexion
from flask import Response
from prometheus_client import Counter
from common.logging import get_logger
LOGGER = get_logger(__name__)
VMAAS_HOST = environ.get('VMAAS_HOST', 'http://vmaas-webapp-1.vmaas-ci.svc:8080') # pylint: disable=invalid-name
DEFAULT_ROUTE = "%s/%s" % (environ.get('PATH_PREFIX', "/api"),
environ.get('APP_NAME', "vulnerability"))
IDENTITY_HEADER = "x-rh-identity"
DEFAULT_PAGE_SIZE = 25
SKIP_ENTITLEMENT_CHECK = strtobool(environ.get('SKIP_ENTITLEMENT_CHECK', 'FALSE'))
READ_ONLY_MODE = strtobool(environ.get('READ_ONLY_MODE', 'FALSE'))
LOGGER.info("Access URL: %s", DEFAULT_ROUTE)
# Prometheus support
# Counter for all-the-get-calls, dealt with in BaseHandler
REQUEST_COUNTS = Counter('ve_manager_invocations', 'Number of calls per handler', ['method', 'endpoint'])
class InvalidArgumentException(Exception):
"""Illegal arguments for pagination/filtering/sorting"""
class ApplicationException(Exception):
"""General exception in the application"""
def __init__(self, message, status_code):
self.message = message
self.status_code = status_code
super().__init__()
def format_exception(self):
"""Formats error message to desired format"""
if isinstance(self.message, dict):
return self.message, self.status_code
return Request.format_exception(self.message, self.status_code)
class MissingEntitlementException(Exception):
"""smart management entitlement is missing"""
class ReadOnlyModeException(Exception):
"""manager is running in read-only mode"""
def basic_auth(username, password, required_scopes=None): # pylint: disable=unused-argument
"""
Basic auth is done on 3scale level.
"""
raise MissingEntitlementException
def auth(x_rh_identity, required_scopes=None): # pylint: disable=unused-argument
"""
Parses account number from the x-rh-identity header
"""
decoded_value = base64.b64decode(x_rh_identity).decode("utf-8")
LOGGER.debug('identity decoded: %s', decoded_value)
identity = json.loads(decoded_value)
if 'identity' not in identity:
return None
id_details = identity['identity']
if 'account_number' not in id_details:
return None
rh_account_number = id_details['account_number']
if SKIP_ENTITLEMENT_CHECK:
return {'uid': rh_account_number}
if 'entitlements' not in identity or 'smart_management' not in identity['entitlements']:
raise MissingEntitlementException
if identity['entitlements']['smart_management'].get('is_entitled', False):
return {'uid': rh_account_number}
raise MissingEntitlementException
def forbidden(exception): # pylint: disable=unused-argument
"""Override default connexion 401 coming from auth() with 403"""
return Response(response=json.dumps({'errors': [{'detail': 'smart_management entitlement is missing',
'status': '403'}]}),
status=403, mimetype='application/vnd.api+json')
class Request:
"""general class for processing requests"""
_endpoint_name = None
@staticmethod
def _check_int_arg(kwargs, key, dflt, zero_allowed=False):
val = kwargs.get(key, dflt)
if val < 0 or (val == 0 and not zero_allowed):
raise ApplicationException("Requested %s out of range: %s" % (key, val), 400)
return val
@staticmethod
def _check_read_only_mode():
if READ_ONLY_MODE:
raise ReadOnlyModeException("Service is running in read-only mode. Please try again later.")
@staticmethod
def _format_data(output_data_format, data_list):
if output_data_format == "csv":
output = StringIO()
if data_list:
# create list of columns - type, id and all keys from attributes
fields = ["type", "id"]
fields.extend(data_list[0]["attributes"].keys())
writer = csv.DictWriter(output, fields)
writer.writeheader()
for item in data_list:
# create flat dictionary (type, id + values from attributes) and write it
writer.writerow({field: item.get(field) or item["attributes"].get(field) for field in fields})
return output.getvalue()
return data_list
@classmethod
def _parse_list_arguments(cls, kwargs):
# We may get limit/offset, or page/page_size, or both
# limit/offset 'wins', if it's set
# page/page_size defaults to 0/DEFAULT_PAGE_SIZE and limit/offset to DEFAULT_PAGE_SIZE if *neither* are set
# regardless, make sure limit/offset and page/page_size a) both exist, and b) are consistent, before we leave
offset_set = kwargs.get('offset', '') or kwargs.get('limit', '')
page_set = kwargs.get('page', '') or kwargs.get('page_size', '')
if offset_set:
limit = cls._check_int_arg(kwargs, "limit", DEFAULT_PAGE_SIZE)
offset = cls._check_int_arg(kwargs, "offset", 0, True)
page = floor(offset / limit) + 1
page_size = limit
elif page_set:
page = cls._check_int_arg(kwargs, "page", 1)
page_size = cls._check_int_arg(kwargs, "page_size", DEFAULT_PAGE_SIZE)
limit = page_size
offset = (page - 1) * page_size
else:
page = 1
offset = 0
page_size = DEFAULT_PAGE_SIZE
limit = DEFAULT_PAGE_SIZE
data_format = kwargs.get("data_format", "json")
if data_format not in ["json", "csv"]:
raise InvalidArgumentException("Invalid data format: %s" % kwargs.get("data_format", None))
return {
"filter": kwargs.get("filter", None),
"sort": kwargs.get("sort", None),
"page": page,
"page_size": page_size,
"limit": limit,
"offset": offset,
"data_format": data_format
}
@staticmethod
def format_exception(text, status_code):
|
@staticmethod
def hide_satellite_managed():
"""Parses hide-satellite-managed from headers"""
try:
return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))
except ValueError:
return False
@staticmethod
def _parse_arguments(kwargs, argv):
"""
Utility method for getting parameters from request which come as string
and their conversion to a object we'd like to have.
Expects array of {'arg_name' : some_str, 'convert_func' : e.g. float, int}
Returns dict of values if succeeds, throws exception in case of fail
"""
retval = {}
errors = []
for arg in argv:
retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)
if retval[arg['arg_name']]:
try:
if arg['convert_func'] is not None:
retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])
except ValueError:
errors.append({'status': '400',
'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])})
if errors:
raise ApplicationException({'errors': errors}, 400)
return retval
@classmethod
def vmaas_call(cls, endpoint, data):
"""Calls vmaas and retrieves data from it"""
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
try:
response = requests.post(VMAAS_HOST + endpoint,
data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
LOGGER.error('Could not connect to %s', (VMAAS_HOST,))
raise ApplicationException('Could not connect to %s' % (VMAAS_HOST,), 500)
if response.status_code == 200:
return response.json()
LOGGER.error('Received %s from vmaas on %s endpoint', response.status_code, endpoint)
raise ApplicationException('Received %s from vmaas on %s endpoint' %
(response.status_code, VMAAS_HOST + endpoint), response.status_code)
class GetRequest(Request):
"""general class for processing GET requests"""
@classmethod
def get(cls, **kwargs):
"""Answer GET request"""
REQUEST_COUNTS.labels('get', cls._endpoint_name).inc()
try:
return cls.handle_get(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_get(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PatchRequest(Request):
"""general class for processing PATCH requests"""
@classmethod
def patch(cls, **kwargs):
"""Answer PATCH request"""
REQUEST_COUNTS.labels('patch', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_patch(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_patch(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PostRequest(Request):
"""general class for processing POST requests"""
@classmethod
def post(cls, **kwargs):
"""Answer POST request"""
REQUEST_COUNTS.labels('post', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_post(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500), 500
@classmethod
def handle_post(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
def parse_int_list(input_str):
"""Function to parse string with ints to list, e.g. '1,2,3' -> [1,2,3]"""
return [int(part) for part in input_str.split(",")]
| """Formats error message to desired format"""
return {"errors": [{"status": str(status_code), "detail": text}]}, status_code | identifier_body |
base.py | """
Common and utility functions/classes for vulnerability-manager
"""
import base64
import csv
from io import StringIO
import json
from math import floor
from os import environ
from distutils.util import strtobool # pylint: disable=import-error, no-name-in-module
import requests
import connexion
from flask import Response
from prometheus_client import Counter
from common.logging import get_logger
LOGGER = get_logger(__name__)
VMAAS_HOST = environ.get('VMAAS_HOST', 'http://vmaas-webapp-1.vmaas-ci.svc:8080') # pylint: disable=invalid-name
DEFAULT_ROUTE = "%s/%s" % (environ.get('PATH_PREFIX', "/api"),
environ.get('APP_NAME', "vulnerability"))
IDENTITY_HEADER = "x-rh-identity"
DEFAULT_PAGE_SIZE = 25
SKIP_ENTITLEMENT_CHECK = strtobool(environ.get('SKIP_ENTITLEMENT_CHECK', 'FALSE'))
READ_ONLY_MODE = strtobool(environ.get('READ_ONLY_MODE', 'FALSE'))
LOGGER.info("Access URL: %s", DEFAULT_ROUTE)
# Prometheus support
# Counter for all-the-get-calls, dealt with in BaseHandler
REQUEST_COUNTS = Counter('ve_manager_invocations', 'Number of calls per handler', ['method', 'endpoint'])
class InvalidArgumentException(Exception):
"""Illegal arguments for pagination/filtering/sorting"""
class ApplicationException(Exception):
"""General exception in the application"""
def __init__(self, message, status_code):
self.message = message
self.status_code = status_code
super().__init__()
def format_exception(self):
"""Formats error message to desired format"""
if isinstance(self.message, dict):
return self.message, self.status_code
return Request.format_exception(self.message, self.status_code)
class MissingEntitlementException(Exception):
"""smart management entitlement is missing"""
class ReadOnlyModeException(Exception):
"""manager is running in read-only mode"""
def basic_auth(username, password, required_scopes=None): # pylint: disable=unused-argument
"""
Basic auth is done on 3scale level.
"""
raise MissingEntitlementException
def auth(x_rh_identity, required_scopes=None): # pylint: disable=unused-argument
"""
Parses account number from the x-rh-identity header
"""
decoded_value = base64.b64decode(x_rh_identity).decode("utf-8")
LOGGER.debug('identity decoded: %s', decoded_value)
identity = json.loads(decoded_value)
if 'identity' not in identity:
return None
id_details = identity['identity']
if 'account_number' not in id_details:
return None
rh_account_number = id_details['account_number']
if SKIP_ENTITLEMENT_CHECK:
return {'uid': rh_account_number}
if 'entitlements' not in identity or 'smart_management' not in identity['entitlements']:
raise MissingEntitlementException
if identity['entitlements']['smart_management'].get('is_entitled', False):
return {'uid': rh_account_number}
raise MissingEntitlementException
def forbidden(exception): # pylint: disable=unused-argument
"""Override default connexion 401 coming from auth() with 403"""
return Response(response=json.dumps({'errors': [{'detail': 'smart_management entitlement is missing',
'status': '403'}]}),
status=403, mimetype='application/vnd.api+json')
class Request:
"""general class for processing requests"""
_endpoint_name = None
@staticmethod
def _check_int_arg(kwargs, key, dflt, zero_allowed=False):
val = kwargs.get(key, dflt)
if val < 0 or (val == 0 and not zero_allowed):
raise ApplicationException("Requested %s out of range: %s" % (key, val), 400)
return val
@staticmethod
def _check_read_only_mode():
if READ_ONLY_MODE:
raise ReadOnlyModeException("Service is running in read-only mode. Please try again later.")
@staticmethod
def _format_data(output_data_format, data_list):
if output_data_format == "csv":
output = StringIO()
if data_list:
# create list of columns - type, id and all keys from attributes
fields = ["type", "id"]
fields.extend(data_list[0]["attributes"].keys())
writer = csv.DictWriter(output, fields)
writer.writeheader()
for item in data_list:
# create flat dictionary (type, id + values from attributes) and write it
writer.writerow({field: item.get(field) or item["attributes"].get(field) for field in fields})
return output.getvalue()
return data_list
@classmethod
def _parse_list_arguments(cls, kwargs):
# We may get limit/offset, or page/page_size, or both
# limit/offset 'wins', if it's set
# page/page_size defaults to 0/DEFAULT_PAGE_SIZE and limit/offset to DEFAULT_PAGE_SIZE if *neither* are set
# regardless, make sure limit/offset and page/page_size a) both exist, and b) are consistent, before we leave
offset_set = kwargs.get('offset', '') or kwargs.get('limit', '')
page_set = kwargs.get('page', '') or kwargs.get('page_size', '')
if offset_set:
limit = cls._check_int_arg(kwargs, "limit", DEFAULT_PAGE_SIZE)
offset = cls._check_int_arg(kwargs, "offset", 0, True)
page = floor(offset / limit) + 1
page_size = limit
elif page_set:
page = cls._check_int_arg(kwargs, "page", 1)
page_size = cls._check_int_arg(kwargs, "page_size", DEFAULT_PAGE_SIZE)
limit = page_size
offset = (page - 1) * page_size
else:
page = 1
offset = 0
page_size = DEFAULT_PAGE_SIZE
limit = DEFAULT_PAGE_SIZE
data_format = kwargs.get("data_format", "json")
if data_format not in ["json", "csv"]:
raise InvalidArgumentException("Invalid data format: %s" % kwargs.get("data_format", None))
return {
"filter": kwargs.get("filter", None),
"sort": kwargs.get("sort", None),
"page": page,
"page_size": page_size,
"limit": limit,
"offset": offset,
"data_format": data_format
}
@staticmethod
def format_exception(text, status_code):
"""Formats error message to desired format"""
return {"errors": [{"status": str(status_code), "detail": text}]}, status_code
@staticmethod
def hide_satellite_managed():
"""Parses hide-satellite-managed from headers"""
try:
return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))
except ValueError:
return False
@staticmethod
def _parse_arguments(kwargs, argv):
"""
Utility method for getting parameters from request which come as string
and their conversion to a object we'd like to have.
Expects array of {'arg_name' : some_str, 'convert_func' : e.g. float, int}
Returns dict of values if succeeds, throws exception in case of fail
"""
retval = {}
errors = []
for arg in argv:
retval[arg['arg_name']] = kwargs.get(arg['arg_name'], None)
if retval[arg['arg_name']]:
try:
if arg['convert_func'] is not None:
retval[arg['arg_name']] = arg['convert_func'](retval[arg['arg_name']])
except ValueError:
errors.append({'status': '400',
'detail': 'Error in argument %s: %s' % (arg['arg_name'], retval[arg['arg_name']])})
if errors:
raise ApplicationException({'errors': errors}, 400)
return retval
@classmethod
def vmaas_call(cls, endpoint, data):
"""Calls vmaas and retrieves data from it"""
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
try:
response = requests.post(VMAAS_HOST + endpoint,
data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
LOGGER.error('Could not connect to %s', (VMAAS_HOST,))
raise ApplicationException('Could not connect to %s' % (VMAAS_HOST,), 500)
if response.status_code == 200:
return response.json()
LOGGER.error('Received %s from vmaas on %s endpoint', response.status_code, endpoint)
raise ApplicationException('Received %s from vmaas on %s endpoint' %
(response.status_code, VMAAS_HOST + endpoint), response.status_code)
class GetRequest(Request):
"""general class for processing GET requests"""
@classmethod
def get(cls, **kwargs):
"""Answer GET request"""
REQUEST_COUNTS.labels('get', cls._endpoint_name).inc()
try:
return cls.handle_get(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500)
@classmethod
def handle_get(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PatchRequest(Request):
"""general class for processing PATCH requests"""
@classmethod
def patch(cls, **kwargs):
"""Answer PATCH request"""
REQUEST_COUNTS.labels('patch', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_patch(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc: | return cls.format_exception('Internal server error', 500)
@classmethod
def handle_patch(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
class PostRequest(Request):
"""general class for processing POST requests"""
@classmethod
def post(cls, **kwargs):
"""Answer POST request"""
REQUEST_COUNTS.labels('post', cls._endpoint_name).inc()
try:
cls._check_read_only_mode()
return cls.handle_post(**kwargs)
except ApplicationException as exc:
return exc.format_exception()
except InvalidArgumentException as exc:
return cls.format_exception(str(exc), 400)
except ReadOnlyModeException as exc:
return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ')
return cls.format_exception('Internal server error', 500), 500
@classmethod
def handle_post(cls, **kwargs):
"""To be implemented in child classes"""
raise NotImplementedError
def parse_int_list(input_str):
"""Function to parse string with ints to list, e.g. '1,2,3' -> [1,2,3]"""
return [int(part) for part in input_str.split(",")] | return cls.format_exception(str(exc), 503)
except Exception: # pylint: disable=broad-except
LOGGER.exception('Unhandled exception: ') | random_line_split |
plot_inv_1_dcr_sounding.py | # -*- coding: utf-8 -*-
"""
Least-Squares 1D Inversion of Sounding Data
===========================================
Here we use the module *SimPEG.electromangetics.static.resistivity* to invert
DC resistivity sounding data and recover a 1D electrical resistivity model.
In this tutorial, we focus on the following:
- How to define sources and receivers from a survey file
- How to define the survey
- 1D inversion of DC resistivity data
For this tutorial, we will invert sounding data collected over a layered Earth using
a Wenner array. The end product is layered Earth model which explains the data.
"""
#########################################################################
# Import modules
# --------------
#
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import tarfile
from discretize import TensorMesh
from SimPEG import (
maps,
data,
data_misfit,
regularization,
optimization,
inverse_problem,
inversion,
directives,
utils,
)
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.utils import plot_1d_layer_model
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 2
#############################################
# Define File Names
# -----------------
#
# Here we provide the file paths to assets we need to run the inversion. The
# Path to the true model is also provided for comparison with the inversion
# results. These files are stored as a tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz"
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
data_filename = dir_path + "app_res_1d_data.dobs"
#############################################
# Load Data, Define Survey and Plot
# ---------------------------------
#
# Here we load the observed data, define the DC survey geometry and plot the
# data values.
#
# Load data
dobs = np.loadtxt(str(data_filename))
# Extract source and receiver electrode locations and the observed data
A_electrodes = dobs[:, 0:3]
B_electrodes = dobs[:, 3:6]
M_electrodes = dobs[:, 6:9]
N_electrodes = dobs[:, 9:12]
dobs = dobs[:, -1]
# Define survey
unique_tx, k = np.unique(np.c_[A_electrodes, B_electrodes], axis=0, return_index=True)
n_sources = len(k)
k = np.sort(k)
k = np.r_[k, len(k) + 1]
source_list = []
for ii in range(0, n_sources):
# MN electrode locations for receivers. Each is an (N, 3) numpy array
|
# Define survey
survey = dc.Survey(source_list)
# Plot apparent resistivities on sounding curve as a function of Wenner separation
# parameter.
electrode_separations = 0.5 * np.sqrt(
np.sum((survey.locations_a - survey.locations_b) ** 2, axis=1)
)
fig = plt.figure(figsize=(11, 5))
mpl.rcParams.update({"font.size": 14})
ax1 = fig.add_axes([0.15, 0.1, 0.7, 0.85])
ax1.semilogy(electrode_separations, dobs, "b")
ax1.set_xlabel("AB/2 (m)")
ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)")
plt.show()
###############################################
# Assign Uncertainties
# --------------------
#
# Inversion with SimPEG requires that we define standard deviation on our data.
# This represents our estimate of the noise in our data. For DC sounding data,
# a relative error is applied to each datum. For this tutorial, the relative
# error on each datum will be 2%.
std = 0.02 * np.abs(dobs)
###############################################
# Define Data
# --------------------
#
# Here is where we define the data that are inverted. The data are defined by
# the survey, the observation values and the standard deviation.
#
data_object = data.Data(survey, dobs=dobs, standard_deviation=std)
###############################################
# Defining a 1D Layered Earth (1D Tensor Mesh)
# --------------------------------------------
#
# Here, we define the layer thicknesses for our 1D simulation. To do this, we use
# the TensorMesh class.
#
# Define layer thicknesses
layer_thicknesses = 5 * np.logspace(0, 1, 25)
# Define a mesh for plotting and regularization.
mesh = TensorMesh([(np.r_[layer_thicknesses, layer_thicknesses[-1]])], "0")
print(mesh)
###############################################################
# Define a Starting and Reference Model
# -------------------------------------
#
# Here, we create starting and/or reference models for the inversion as
# well as the mapping from the model space to the active cells. Starting and
# reference models can be a constant background value or contain a-priori
# structures. Here, the starting model is log(1000) Ohm meters.
#
# Define log-resistivity values for each layer since our model is the
# log-resistivity. Don't make the values 0!
# Otherwise the gradient for the 1st iteration is zero and the inversion will
# not converge.
# Define model. A resistivity (Ohm meters) or conductivity (S/m) for each layer.
starting_model = np.log(2e2 * np.ones((len(layer_thicknesses) + 1)))
# Define mapping from model to active cells.
model_map = maps.IdentityMap(nP=len(starting_model)) * maps.ExpMap()
#######################################################################
# Define the Physics
# ------------------
#
# Here we define the physics of the problem using the Simulation1DLayers class.
#
simulation = dc.simulation_1d.Simulation1DLayers(
survey=survey,
rhoMap=model_map,
thicknesses=layer_thicknesses,
)
#######################################################################
# Define Inverse Problem
# ----------------------
#
# The inverse problem is defined by 3 things:
#
# 1) Data Misfit: a measure of how well our recovered model explains the field data
# 2) Regularization: constraints placed on the recovered model and a priori information
# 3) Optimization: the numerical approach used to solve the inverse problem
#
#
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# Within the data misfit, the residual between predicted and observed data are
# normalized by the data's standard deviation.
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
# Define the regularization (model objective function)
reg = regularization.WeightedLeastSquares(
mesh, alpha_s=1.0, alpha_x=1.0, reference_model=starting_model
)
# Define how the optimization problem is solved. Here we will use an inexact
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.InexactGaussNewton(maxIter=30, maxIterCG=20)
# Define the inverse problem
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
#######################################################################
# Define Inversion Directives
# ---------------------------
#
# Here we define any directives that are carried out during the inversion. This
# includes the cooling schedule for the trade-off parameter (beta), stopping
# criteria for the inversion and saving inversion results at each iteration.
#
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
# Set the rate of reduction in trade-off parameter (beta) each time the
# the inverse problem is solved. And set the number of Gauss-Newton iterations
# for each trade-off paramter value.
beta_schedule = directives.BetaSchedule(coolingFactor=5.0, coolingRate=3.0)
# Apply and update sensitivity weighting as the model updates
update_sensitivity_weights = directives.UpdateSensitivityWeights()
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SaveOutputEveryIteration(save_txt=False)
# Setting a stopping criteria for the inversion.
target_misfit = directives.TargetMisfit(chifact=1)
# The directives are defined as a list.
directives_list = [
update_sensitivity_weights,
starting_beta,
beta_schedule,
save_iteration,
target_misfit,
]
#####################################################################
# Running the Inversion
# ---------------------
#
# To define the inversion object, we need to define the inversion problem and
# the set of directives. We can then run the inversion.
#
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run the inversion
recovered_model = inv.run(starting_model)
############################################################
# Examining the Results
# ---------------------
#
# Define true model and layer thicknesses
true_model = np.r_[1e3, 4e3, 2e2]
true_layers = np.r_[100.0, 100.0]
# Plot true model and recovered model
fig = plt.figure(figsize=(6, 4))
x_min = np.min([np.min(model_map * recovered_model), np.min(true_model)])
x_max = np.max([np.max(model_map * recovered_model), np.max(true_model)])
ax1 = fig.add_axes([0.2, 0.15, 0.7, 0.7])
plot_1d_layer_model(true_layers, true_model, ax=ax1, plot_elevation=True, color="b")
plot_1d_layer_model(
layer_thicknesses,
model_map * recovered_model,
ax=ax1,
plot_elevation=True,
color="r",
)
ax1.set_xlabel(r"Resistivity ($\Omega m$)")
ax1.set_xlim(0.9 * x_min, 1.1 * x_max)
ax1.legend(["True Model", "Recovered Model"])
# Plot the true and apparent resistivities on a sounding curve
fig = plt.figure(figsize=(11, 5))
ax1 = fig.add_axes([0.2, 0.1, 0.6, 0.8])
ax1.semilogy(electrode_separations, dobs, "b")
ax1.semilogy(electrode_separations, inv_prob.dpred, "r")
ax1.set_xlabel("AB/2 (m)")
ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)")
ax1.legend(["True Sounding Curve", "Predicted Sounding Curve"])
plt.show()
| M_locations = M_electrodes[k[ii] : k[ii + 1], :]
N_locations = N_electrodes[k[ii] : k[ii + 1], :]
receiver_list = [
dc.receivers.Dipole(
M_locations,
N_locations,
data_type="apparent_resistivity",
)
]
# AB electrode locations for source. Each is a (1, 3) numpy array
A_location = A_electrodes[k[ii], :]
B_location = B_electrodes[k[ii], :]
source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) | conditional_block |
plot_inv_1_dcr_sounding.py | # -*- coding: utf-8 -*-
"""
Least-Squares 1D Inversion of Sounding Data
===========================================
Here we use the module *SimPEG.electromangetics.static.resistivity* to invert
DC resistivity sounding data and recover a 1D electrical resistivity model.
In this tutorial, we focus on the following:
- How to define sources and receivers from a survey file
- How to define the survey
- 1D inversion of DC resistivity data
For this tutorial, we will invert sounding data collected over a layered Earth using
a Wenner array. The end product is layered Earth model which explains the data.
"""
#########################################################################
# Import modules
# --------------
#
import os
import numpy as np |
from discretize import TensorMesh
from SimPEG import (
maps,
data,
data_misfit,
regularization,
optimization,
inverse_problem,
inversion,
directives,
utils,
)
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.utils import plot_1d_layer_model
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 2
#############################################
# Define File Names
# -----------------
#
# Here we provide the file paths to assets we need to run the inversion. The
# Path to the true model is also provided for comparison with the inversion
# results. These files are stored as a tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz"
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
data_filename = dir_path + "app_res_1d_data.dobs"
#############################################
# Load Data, Define Survey and Plot
# ---------------------------------
#
# Here we load the observed data, define the DC survey geometry and plot the
# data values.
#
# Load data
dobs = np.loadtxt(str(data_filename))
# Extract source and receiver electrode locations and the observed data
A_electrodes = dobs[:, 0:3]
B_electrodes = dobs[:, 3:6]
M_electrodes = dobs[:, 6:9]
N_electrodes = dobs[:, 9:12]
dobs = dobs[:, -1]
# Define survey
unique_tx, k = np.unique(np.c_[A_electrodes, B_electrodes], axis=0, return_index=True)
n_sources = len(k)
k = np.sort(k)
k = np.r_[k, len(k) + 1]
source_list = []
for ii in range(0, n_sources):
# MN electrode locations for receivers. Each is an (N, 3) numpy array
M_locations = M_electrodes[k[ii] : k[ii + 1], :]
N_locations = N_electrodes[k[ii] : k[ii + 1], :]
receiver_list = [
dc.receivers.Dipole(
M_locations,
N_locations,
data_type="apparent_resistivity",
)
]
# AB electrode locations for source. Each is a (1, 3) numpy array
A_location = A_electrodes[k[ii], :]
B_location = B_electrodes[k[ii], :]
source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location))
# Define survey
survey = dc.Survey(source_list)
# Plot apparent resistivities on sounding curve as a function of Wenner separation
# parameter.
electrode_separations = 0.5 * np.sqrt(
np.sum((survey.locations_a - survey.locations_b) ** 2, axis=1)
)
fig = plt.figure(figsize=(11, 5))
mpl.rcParams.update({"font.size": 14})
ax1 = fig.add_axes([0.15, 0.1, 0.7, 0.85])
ax1.semilogy(electrode_separations, dobs, "b")
ax1.set_xlabel("AB/2 (m)")
ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)")
plt.show()
###############################################
# Assign Uncertainties
# --------------------
#
# Inversion with SimPEG requires that we define standard deviation on our data.
# This represents our estimate of the noise in our data. For DC sounding data,
# a relative error is applied to each datum. For this tutorial, the relative
# error on each datum will be 2%.
std = 0.02 * np.abs(dobs)
###############################################
# Define Data
# --------------------
#
# Here is where we define the data that are inverted. The data are defined by
# the survey, the observation values and the standard deviation.
#
data_object = data.Data(survey, dobs=dobs, standard_deviation=std)
###############################################
# Defining a 1D Layered Earth (1D Tensor Mesh)
# --------------------------------------------
#
# Here, we define the layer thicknesses for our 1D simulation. To do this, we use
# the TensorMesh class.
#
# Define layer thicknesses
layer_thicknesses = 5 * np.logspace(0, 1, 25)
# Define a mesh for plotting and regularization.
mesh = TensorMesh([(np.r_[layer_thicknesses, layer_thicknesses[-1]])], "0")
print(mesh)
###############################################################
# Define a Starting and Reference Model
# -------------------------------------
#
# Here, we create starting and/or reference models for the inversion as
# well as the mapping from the model space to the active cells. Starting and
# reference models can be a constant background value or contain a-priori
# structures. Here, the starting model is log(1000) Ohm meters.
#
# Define log-resistivity values for each layer since our model is the
# log-resistivity. Don't make the values 0!
# Otherwise the gradient for the 1st iteration is zero and the inversion will
# not converge.
# Define model. A resistivity (Ohm meters) or conductivity (S/m) for each layer.
starting_model = np.log(2e2 * np.ones((len(layer_thicknesses) + 1)))
# Define mapping from model to active cells.
model_map = maps.IdentityMap(nP=len(starting_model)) * maps.ExpMap()
#######################################################################
# Define the Physics
# ------------------
#
# Here we define the physics of the problem using the Simulation1DLayers class.
#
simulation = dc.simulation_1d.Simulation1DLayers(
survey=survey,
rhoMap=model_map,
thicknesses=layer_thicknesses,
)
#######################################################################
# Define Inverse Problem
# ----------------------
#
# The inverse problem is defined by 3 things:
#
# 1) Data Misfit: a measure of how well our recovered model explains the field data
# 2) Regularization: constraints placed on the recovered model and a priori information
# 3) Optimization: the numerical approach used to solve the inverse problem
#
#
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# Within the data misfit, the residual between predicted and observed data are
# normalized by the data's standard deviation.
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
# Define the regularization (model objective function)
reg = regularization.WeightedLeastSquares(
mesh, alpha_s=1.0, alpha_x=1.0, reference_model=starting_model
)
# Define how the optimization problem is solved. Here we will use an inexact
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.InexactGaussNewton(maxIter=30, maxIterCG=20)
# Define the inverse problem
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
#######################################################################
# Define Inversion Directives
# ---------------------------
#
# Here we define any directives that are carried out during the inversion. This
# includes the cooling schedule for the trade-off parameter (beta), stopping
# criteria for the inversion and saving inversion results at each iteration.
#
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
# Set the rate of reduction in trade-off parameter (beta) each time the
# the inverse problem is solved. And set the number of Gauss-Newton iterations
# for each trade-off paramter value.
beta_schedule = directives.BetaSchedule(coolingFactor=5.0, coolingRate=3.0)
# Apply and update sensitivity weighting as the model updates
update_sensitivity_weights = directives.UpdateSensitivityWeights()
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SaveOutputEveryIteration(save_txt=False)
# Setting a stopping criteria for the inversion.
target_misfit = directives.TargetMisfit(chifact=1)
# The directives are defined as a list.
directives_list = [
update_sensitivity_weights,
starting_beta,
beta_schedule,
save_iteration,
target_misfit,
]
#####################################################################
# Running the Inversion
# ---------------------
#
# To define the inversion object, we need to define the inversion problem and
# the set of directives. We can then run the inversion.
#
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run the inversion
recovered_model = inv.run(starting_model)
############################################################
# Examining the Results
# ---------------------
#
# Define true model and layer thicknesses
true_model = np.r_[1e3, 4e3, 2e2]
true_layers = np.r_[100.0, 100.0]
# Plot true model and recovered model
fig = plt.figure(figsize=(6, 4))
x_min = np.min([np.min(model_map * recovered_model), np.min(true_model)])
x_max = np.max([np.max(model_map * recovered_model), np.max(true_model)])
ax1 = fig.add_axes([0.2, 0.15, 0.7, 0.7])
plot_1d_layer_model(true_layers, true_model, ax=ax1, plot_elevation=True, color="b")
plot_1d_layer_model(
layer_thicknesses,
model_map * recovered_model,
ax=ax1,
plot_elevation=True,
color="r",
)
ax1.set_xlabel(r"Resistivity ($\Omega m$)")
ax1.set_xlim(0.9 * x_min, 1.1 * x_max)
ax1.legend(["True Model", "Recovered Model"])
# Plot the true and apparent resistivities on a sounding curve
fig = plt.figure(figsize=(11, 5))
ax1 = fig.add_axes([0.2, 0.1, 0.6, 0.8])
ax1.semilogy(electrode_separations, dobs, "b")
ax1.semilogy(electrode_separations, inv_prob.dpred, "r")
ax1.set_xlabel("AB/2 (m)")
ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)")
ax1.legend(["True Sounding Curve", "Predicted Sounding Curve"])
plt.show() | import matplotlib as mpl
import matplotlib.pyplot as plt
import tarfile | random_line_split |
ListBlock.js | "use strict";
const h = require('react-hyperscript')
, R = require('ramda')
, React = require('react')
, natsort = require('natsort')
, { Flex, Box, Text, Select } = require('periodo-ui')
, { colors } = require('periodo-ui').theme
, { Button, DropdownMenu, DropdownMenuItem, Link } = require('periodo-ui')
, Icon = require('react-geomicons').default
, StreamConsumingBlock = require('./StreamConsumingBlock')
, concat = [].concat.bind([])
const ListHeader = ({
start,
shownItems,
hide,
items,
limit,
columns,
shownColumns,
prevPage,
nextPage,
firstPage,
lastPage,
updateOpts,
}) =>
h(Flex, {
bg: 'gray.1',
p: 1,
alignItems: 'center',
justifyContent: 'space-between',
}, [
h(Box, {
textAlign: 'left',
flex: '1 1 auto',
}, [
!hide && h(Text, {
mx: 2,
}, `${start + 1}‒${start + shownItems.length} of ${items.length}`),
]),
h(Flex, {
justifyContent: 'center',
flex: '1 1 auto',
}, [
h(Button, {
borderRadius: 0,
disabled: start === 0,
onClick: firstPage,
}, h(Icon, {
onMouseDown: e => {
if (start === 0) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'previous',
color: 'black',
})),
h(Button, {
borderRadius: 0,
disabled: start === 0,
onClick: prevPage,
}, h(Icon, {
onMouseDown: e => {
if (start === 0) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'triangleLeft',
color: 'black',
})),
h(Select, {
bg: '#fafafa',
value: limit,
minWidth: '60px',
onChange: e => {
updateOpts(R.set(
R.lensProp('limit'),
e.target.value
))
}
}, [10, 25, 50, 100, 250].map(n =>
h('option', { key: n, value: n, }, n),
)),
h(Button, {
borderRadius: 0,
disabled: start + shownItems.length >= items.length,
onClick: nextPage,
}, h(Icon, {
onMouseDown: e => {
if (start + shownItems.length >= items.length) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'triangleRight',
color: 'black',
})),
h(Button, {
borderRadius: 0,
disabled: start + shownItems.length >= items.length,
onClick: lastPage,
}, h(Icon, {
onMouseDown: e => {
if (start + shownItems.length >= items.length) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'next',
color: 'black',
})),
]),
h(Box, {
textAlign: 'right',
flex: '1 1 auto',
}, [
h(DropdownMenu, {
closeOnSelection: false,
openLeft: true,
label: 'Columns',
}, Object.keys(columns).map(key =>
h(DropdownMenuItem, {
key,
textAlign: 'left',
}, [
h('input', {
type: 'checkbox',
checked: shownColumns.includes(key),
onChange: () => {
updateOpts(opts =>
R.over(
R.lensProp('shownColumns'),
(shownColumns.includes(key) ? R.without : R.flip(R.union))([key]),
opts
)
)
}
}),
columns[key].label,
])
))
]),
])
function DefaultRowNumbering({ number }) {
return h(Box, {
px: 1,
css: {
color: '#999',
display: 'inline-block',
fontSize: '12px',
lineHeight: '24px',
width: '5ch',
}
}, number)
}
function LinkedRowNumbering(props) {
return (
h(Link, {
px: 1,
css: {
display: 'inline-block',
fontSize: '12px',
lineHeight: '24px',
width: '5ch',
':hover': {
textDecoration: 'none',
backgroundColor: colors.blue5,
color: 'white',
}
},
route: props.makeItemRoute(props)
}, props.number)
)
}
module.exports = function makeList(opts) {
const { label, description, defaultOpts={}, transducer, columns, makeItemRoute } = opts
const withDefaults = obj => Object.assign({
start: 0,
limit: 25,
shownColumns: Object.keys(columns),
}, defaultOpts, obj)
const RowNumbering = makeItemRoute ? LinkedRowNumbering : DefaultRowNumbering
const next = (prev, items, props={}) => {
let ret = R.transduce(
transducer || R.map(R.identity),
concat,
prev || [],
items
)
if (props && props.sortBy) {
const col = columns[props.sortBy]
if (col) {
const sorter = natsort({
insensitive: true,
desc: props.sortDirection === 'desc',
})
ret = ret.sort((a, b) => {
const [_a, _b] = [a, b].map(col.getValue)
if (_a == null) return 1
if (_b == null) return -1
return sorter(_a, _b)
})
}
}
return ret
}
class List extends React.Component {
constructor(props) {
super(props);
this.state = {
start: withDefaults(props).start,
}
this.firstPage = this.firstPage.bind(this);
this.lastPage = this.lastPage.bind(this);
this.nextPage = this.nextPage.bind(this);
this.prevPage = this.prevPage.bind(this);
}
componentWillReceiveProps(nextProps) {
const updateSort = (
nextProps.sortBy !== this.props.sortBy ||
nextProps.sortDirection !== this.props.sortDirection
)
if (updateSort) {
this.props.updateData(data => next(data, [], nextProps))
}
}
firstPage() {
this.setState({ start: 0 })
}
lastPage() {
const { data, limit } = this.props
let start = 0
while (start * limit < data.length) start++
start--
start = start * limit;
this.setState({ start })
}
nextPage() {
const { data } = this.props
, limit = parseInt(this.props.limit)
this.setState(prev => {
let start = prev.start + limit
if (start >= data.length) start = prev.start;
return { start } |
prevPage() {
const limit = parseInt(this.props.limit)
this.setState(prev => {
let start = prev.start - limit
if (start < 0) start = 0;
return { start }
})
}
render() {
const items = this.props.data
, { shownColumns, sortBy, sortDirection, updateOpts } = this.props
, limit = parseInt(this.props.limit)
, { start } = this.state
, shownItems = items.slice(start, start + limit)
, hide = shownItems.length === 0
return (
h(Box, {
tabIndex: 0,
onKeyDown: e => {
if (e.key === 'ArrowLeft') this.prevPage();
if (e.key === 'ArrowRight') this.nextPage();
}
}, [
h(ListHeader, Object.assign({
hide,
items,
limit,
shownItems,
prevPage: this.prevPage,
nextPage: this.nextPage,
firstPage: this.firstPage,
lastPage: this.lastPage,
columns,
shownColumns,
}, this.props, this.state)),
h(Box, {
is: 'table',
css: {
width: '100%',
borderCollapse: 'collapse',
}
}, [
h(Box, {
is: 'thead',
mb: 1,
}, [
h(Box, {
is: 'tr',
bg: 'gray.1',
textAlign: 'left',
}, [
h(Box, {
is: 'th',
key: 'first',
p: 1,
})
].concat(shownColumns.map(n =>
h(Box, {
is: 'th',
key: n,
p: 1,
onClick: () => {
updateOpts((opts={}) => Object.assign(
{},
opts,
{
sortBy: n,
sortDirection: opts.sortBy === n
? (!opts.sortDirection || opts.sortDirection === 'asc') ? 'desc' : 'asc'
: 'asc'
}
))
}
}, [
columns[n].label,
n === sortBy && (
sortDirection === 'desc' ? '▲' : '▼'
)
])
)))
]),
h('tbody',
shownItems.map(
(item, i) => h(Box, {
is: 'tr',
key: item.id,
m: 0,
css: {
height: '24px',
':hover': {
backgroundColor: '#e4e2e0',
}
}
}, [
h(Box, {
is: 'td',
key: '_numbering',
p: 0,
css: {
width: '.1%',
whiteSpace: 'nowrap',
}
}, h(RowNumbering, Object.assign({}, this.props, {
item,
number: i + 1 + start,
makeItemRoute,
})))
].concat(R.values(R.pick(shownColumns, columns)).map(
col =>
h(Box, {
is: 'td',
key: col.label,
px: 1,
py: 0,
css: {
}
}, (col.render || R.identity)(col.getValue(item, this.props.backend)))
))
)
)
)
]),
hide && (
h(Text, {
align: 'center',
fontSize: 4,
p: 2,
}, 'No items to display')
),
])
)
}
}
return {
label,
description,
processOpts: withDefaults,
defaultOpts,
Component: StreamConsumingBlock(next, Infinity)(List)
}
} | })
} | random_line_split |
ListBlock.js | "use strict";
const h = require('react-hyperscript')
, R = require('ramda')
, React = require('react')
, natsort = require('natsort')
, { Flex, Box, Text, Select } = require('periodo-ui')
, { colors } = require('periodo-ui').theme
, { Button, DropdownMenu, DropdownMenuItem, Link } = require('periodo-ui')
, Icon = require('react-geomicons').default
, StreamConsumingBlock = require('./StreamConsumingBlock')
, concat = [].concat.bind([])
const ListHeader = ({
start,
shownItems,
hide,
items,
limit,
columns,
shownColumns,
prevPage,
nextPage,
firstPage,
lastPage,
updateOpts,
}) =>
h(Flex, {
bg: 'gray.1',
p: 1,
alignItems: 'center',
justifyContent: 'space-between',
}, [
h(Box, {
textAlign: 'left',
flex: '1 1 auto',
}, [
!hide && h(Text, {
mx: 2,
}, `${start + 1}‒${start + shownItems.length} of ${items.length}`),
]),
h(Flex, {
justifyContent: 'center',
flex: '1 1 auto',
}, [
h(Button, {
borderRadius: 0,
disabled: start === 0,
onClick: firstPage,
}, h(Icon, {
onMouseDown: e => {
if (start === 0) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'previous',
color: 'black',
})),
h(Button, {
borderRadius: 0,
disabled: start === 0,
onClick: prevPage,
}, h(Icon, {
onMouseDown: e => {
if (start === 0) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'triangleLeft',
color: 'black',
})),
h(Select, {
bg: '#fafafa',
value: limit,
minWidth: '60px',
onChange: e => {
updateOpts(R.set(
R.lensProp('limit'),
e.target.value
))
}
}, [10, 25, 50, 100, 250].map(n =>
h('option', { key: n, value: n, }, n),
)),
h(Button, {
borderRadius: 0,
disabled: start + shownItems.length >= items.length,
onClick: nextPage,
}, h(Icon, {
onMouseDown: e => {
if (start + shownItems.length >= items.length) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'triangleRight',
color: 'black',
})),
h(Button, {
borderRadius: 0,
disabled: start + shownItems.length >= items.length,
onClick: lastPage,
}, h(Icon, {
onMouseDown: e => {
if (start + shownItems.length >= items.length) {
e.stopPropagation();
e.preventDefault();
}
},
name: 'next',
color: 'black',
})),
]),
h(Box, {
textAlign: 'right',
flex: '1 1 auto',
}, [
h(DropdownMenu, {
closeOnSelection: false,
openLeft: true,
label: 'Columns',
}, Object.keys(columns).map(key =>
h(DropdownMenuItem, {
key,
textAlign: 'left',
}, [
h('input', {
type: 'checkbox',
checked: shownColumns.includes(key),
onChange: () => {
updateOpts(opts =>
R.over(
R.lensProp('shownColumns'),
(shownColumns.includes(key) ? R.without : R.flip(R.union))([key]),
opts
)
)
}
}),
columns[key].label,
])
))
]),
])
function DefaultRowNumbering({ number }) {
return h(Box, {
px: 1,
css: {
color: '#999',
display: 'inline-block',
fontSize: '12px',
lineHeight: '24px',
width: '5ch',
}
}, number)
}
function LinkedRowNumbering(props) {
return (
h(Link, {
px: 1,
css: {
display: 'inline-block',
fontSize: '12px',
lineHeight: '24px',
width: '5ch',
':hover': {
textDecoration: 'none',
backgroundColor: colors.blue5,
color: 'white',
}
},
route: props.makeItemRoute(props)
}, props.number)
)
}
module.exports = function makeList(opts) {
const { label, description, defaultOpts={}, transducer, columns, makeItemRoute } = opts
const withDefaults = obj => Object.assign({
start: 0,
limit: 25,
shownColumns: Object.keys(columns),
}, defaultOpts, obj)
const RowNumbering = makeItemRoute ? LinkedRowNumbering : DefaultRowNumbering
const next = (prev, items, props={}) => {
let ret = R.transduce(
transducer || R.map(R.identity),
concat,
prev || [],
items
)
if (props && props.sortBy) {
const col = columns[props.sortBy]
if (col) {
const sorter = natsort({
insensitive: true,
desc: props.sortDirection === 'desc',
})
ret = ret.sort((a, b) => {
const [_a, _b] = [a, b].map(col.getValue)
if (_a == null) return 1
if (_b == null) return -1
return sorter(_a, _b)
})
}
}
return ret
}
class List extends React.Component {
co | rops) {
super(props);
this.state = {
start: withDefaults(props).start,
}
this.firstPage = this.firstPage.bind(this);
this.lastPage = this.lastPage.bind(this);
this.nextPage = this.nextPage.bind(this);
this.prevPage = this.prevPage.bind(this);
}
componentWillReceiveProps(nextProps) {
const updateSort = (
nextProps.sortBy !== this.props.sortBy ||
nextProps.sortDirection !== this.props.sortDirection
)
if (updateSort) {
this.props.updateData(data => next(data, [], nextProps))
}
}
firstPage() {
this.setState({ start: 0 })
}
lastPage() {
const { data, limit } = this.props
let start = 0
while (start * limit < data.length) start++
start--
start = start * limit;
this.setState({ start })
}
nextPage() {
const { data } = this.props
, limit = parseInt(this.props.limit)
this.setState(prev => {
let start = prev.start + limit
if (start >= data.length) start = prev.start;
return { start }
})
}
prevPage() {
const limit = parseInt(this.props.limit)
this.setState(prev => {
let start = prev.start - limit
if (start < 0) start = 0;
return { start }
})
}
render() {
const items = this.props.data
, { shownColumns, sortBy, sortDirection, updateOpts } = this.props
, limit = parseInt(this.props.limit)
, { start } = this.state
, shownItems = items.slice(start, start + limit)
, hide = shownItems.length === 0
return (
h(Box, {
tabIndex: 0,
onKeyDown: e => {
if (e.key === 'ArrowLeft') this.prevPage();
if (e.key === 'ArrowRight') this.nextPage();
}
}, [
h(ListHeader, Object.assign({
hide,
items,
limit,
shownItems,
prevPage: this.prevPage,
nextPage: this.nextPage,
firstPage: this.firstPage,
lastPage: this.lastPage,
columns,
shownColumns,
}, this.props, this.state)),
h(Box, {
is: 'table',
css: {
width: '100%',
borderCollapse: 'collapse',
}
}, [
h(Box, {
is: 'thead',
mb: 1,
}, [
h(Box, {
is: 'tr',
bg: 'gray.1',
textAlign: 'left',
}, [
h(Box, {
is: 'th',
key: 'first',
p: 1,
})
].concat(shownColumns.map(n =>
h(Box, {
is: 'th',
key: n,
p: 1,
onClick: () => {
updateOpts((opts={}) => Object.assign(
{},
opts,
{
sortBy: n,
sortDirection: opts.sortBy === n
? (!opts.sortDirection || opts.sortDirection === 'asc') ? 'desc' : 'asc'
: 'asc'
}
))
}
}, [
columns[n].label,
n === sortBy && (
sortDirection === 'desc' ? '▲' : '▼'
)
])
)))
]),
h('tbody',
shownItems.map(
(item, i) => h(Box, {
is: 'tr',
key: item.id,
m: 0,
css: {
height: '24px',
':hover': {
backgroundColor: '#e4e2e0',
}
}
}, [
h(Box, {
is: 'td',
key: '_numbering',
p: 0,
css: {
width: '.1%',
whiteSpace: 'nowrap',
}
}, h(RowNumbering, Object.assign({}, this.props, {
item,
number: i + 1 + start,
makeItemRoute,
})))
].concat(R.values(R.pick(shownColumns, columns)).map(
col =>
h(Box, {
is: 'td',
key: col.label,
px: 1,
py: 0,
css: {
}
}, (col.render || R.identity)(col.getValue(item, this.props.backend)))
))
)
)
)
]),
hide && (
h(Text, {
align: 'center',
fontSize: 4,
p: 2,
}, 'No items to display')
),
])
)
}
}
return {
label,
description,
processOpts: withDefaults,
defaultOpts,
Component: StreamConsumingBlock(next, Infinity)(List)
}
}
| nstructor(p | identifier_name |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum GameInputEffect {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
}
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape), .. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space), .. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0, .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1, .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3, .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2, .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4, .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0, .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1, .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3, .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2, .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4, .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up, .. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down, .. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms != last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (), | .map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
} | }
}
});
playthrough.update_time(song_time_ms) | random_line_split |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum | {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
}
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape), .. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space), .. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0, .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1, .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3, .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2, .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4, .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0, .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1, .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3, .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2, .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4, .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up, .. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down, .. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms != last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
playthrough.update_time(song_time_ms)
.map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
}
| GameInputEffect | identifier_name |
main.rs | extern crate sdl2;
extern crate ears;
mod chart;
mod guitarplaythrough;
use std::time::{Duration, Instant};
use sdl2::event::Event;
use sdl2::pixels;
use sdl2::keyboard::Keycode;
use sdl2::gfx::primitives::DrawRenderer;
use ears::{AudioController};
use guitarplaythrough::*;
const SCREEN_WIDTH: u32 = 800;
const SCREEN_HEIGHT: u32 = 600;
enum GameButton {
Green,
Red,
Yellow,
Blue,
Orange,
}
enum GameInputAction {
Quit,
ButtonDown(GameButton),
ButtonUp(GameButton),
Strum,
}
impl GameButton {
fn to_guitar(self: &Self) -> Fret {
match self {
GameButton::Green => Fret::G,
GameButton::Red => Fret::R,
GameButton::Yellow => Fret::Y,
GameButton::Blue => Fret::B,
GameButton::Orange => Fret::O,
}
}
}
impl GameInputAction {
fn to_guitar_action(self: &Self) -> Option<GuitarInputAction> {
match self {
GameInputAction::Quit => None,
GameInputAction::ButtonDown(button) => Some(GuitarInputAction::FretDown(button.to_guitar())),
GameInputAction::ButtonUp(button) => Some(GuitarInputAction::FretUp(button.to_guitar())),
GameInputAction::Strum => Some(GuitarInputAction::Strum),
}
}
}
enum GameInputEffect {
Quit,
GuitarEffect(GuitarGameEffect),
}
fn draw_fret<T: sdl2::render::RenderTarget>(canvas: &sdl2::render::Canvas<T>, enabled: bool, x: i16, y: i16, radius: i16, color: pixels::Color) -> Result<(), String> |
enum FrameLimit {
Vsync,
Cap(u32),
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
/* joystick initialization */
let joystick_subsystem = sdl_context.joystick()?;
let available = joystick_subsystem.num_joysticks()
.map_err(|e| format!("can't enumerate joysticks: {}", e))?;
println!("{} joysticks available", available);
// Iterate over all available joysticks and stop once we manage to open one.
let mut joystick = (0..available).find_map(|id| match joystick_subsystem.open(id) {
Ok(c) => {
println!("Success: opened \"{}\"", c.name());
Some(c)
},
Err(e) => {
println!("failed: {:?}", e);
None
},
}).expect("Couldn't open any joystick");
// Print the joystick's power level
println!("\"{}\" power level: {:?}", joystick.name(), joystick.power_level()
.map_err(|e| e.to_string())?);
/* window initialization */
let video_subsys = sdl_context.video()?;
let window = video_subsys.window("bumpit", SCREEN_WIDTH, SCREEN_HEIGHT)
.position_centered()
.opengl()
.build()
.map_err(|e| e.to_string())?;
let mut canvas = window.into_canvas().build().map_err(|e| e.to_string())?;
let mut events = sdl_context.event_pump()?;
let mut playthrough: GuitarPlaythrough = std::fs::read_to_string("Songs/notes.chart")
.map_err(|e| e.to_string())
.and_then(|file| chart::read(file.as_ref())
.map_err(|e| { println!("Error: {:?}", e); return String::from("couldn't parse chart") })) // TODO: error to string
.and_then(|chart| GuitarPlaythrough::new(chart)
.map_err(|s| String::from(s)))?;
fn draw<T: sdl2::render::RenderTarget>(canvas: &mut sdl2::render::Canvas<T>, playthrough: &GuitarPlaythrough, time: f32) {
canvas.set_draw_color(pixels::Color::RGB(0, 0, 0));
canvas.clear();
for i in 0..playthrough.notes_hit {
let _ = draw_fret(&canvas, true, (i as i16) * 10, 10, 5, pixels::Color::RGB(255, 255, 255));
}
let frets = playthrough.frets;
let _ = draw_fret(&canvas, frets[Fret::G as usize], 50, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::R as usize], 150, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 0, 0));
let _ = draw_fret(&canvas, frets[Fret::Y as usize], 250, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(128, 128, 0));
let _ = draw_fret(&canvas, frets[Fret::B as usize], 350, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(0, 0, 128));
let _ = draw_fret(&canvas, frets[Fret::O as usize], 450, (SCREEN_HEIGHT as i16) - 75, 25, pixels::Color::RGB(192, 128, 00));
for note in &playthrough.chart.notes {
let position_past_time = playthrough.chart.ticks_to_ms(note.ticks) - time;
let progress_on_screen = position_past_time / 1000f32;
if progress_on_screen > 1f32 || progress_on_screen < 0f32 {
continue;
}
let y = ((1f32 - progress_on_screen) * (SCREEN_HEIGHT as f32)) as i16 - 75;
if note.is_open() {
let _ = canvas.rectangle(50, y - 2, 462, y + 2, pixels::Color::RGB(200, 60, 200));
} else {
note.chord.iter()
.enumerate()
.filter(|(_i, chord_note)| **chord_note)
.for_each(|(note_index, _chord_note)| {
let _ = draw_fret(&canvas, true, 50 + (note_index as i16) * 100, y, 17, pixels::Color::RGB(60, 80, 100));
});
}
}
canvas.present();
};
fn input<'a>(events: &'a mut sdl2::EventPump) -> impl Iterator<Item = Option<GameInputAction>> + 'a {
events.poll_iter()
.map(|event| match event {
Event::Quit {..} => Some(GameInputAction::Quit),
Event::KeyDown { keycode: Some(Keycode::Escape), .. } => Some(GameInputAction::Quit),
Event::KeyDown { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::KeyDown { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::KeyDown { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::KeyDown { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::KeyDown { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::KeyUp { keycode : Some(Keycode::Z), .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::KeyUp { keycode : Some(Keycode::X), .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::KeyUp { keycode : Some(Keycode::C), .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::KeyUp { keycode : Some(Keycode::V), .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::KeyUp { keycode : Some(Keycode::B), .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::KeyDown { keycode : Some(Keycode::Space), .. } => Some(GameInputAction::Strum),
Event::JoyButtonDown { button_idx : 0, .. } => Some(GameInputAction::ButtonDown(GameButton::Green)),
Event::JoyButtonDown { button_idx : 1, .. } => Some(GameInputAction::ButtonDown(GameButton::Red)),
Event::JoyButtonDown { button_idx : 3, .. } => Some(GameInputAction::ButtonDown(GameButton::Yellow)),
Event::JoyButtonDown { button_idx : 2, .. } => Some(GameInputAction::ButtonDown(GameButton::Blue)),
Event::JoyButtonDown { button_idx : 4, .. } => Some(GameInputAction::ButtonDown(GameButton::Orange)),
Event::JoyButtonUp { button_idx : 0, .. } => Some(GameInputAction::ButtonUp(GameButton::Green)),
Event::JoyButtonUp { button_idx : 1, .. } => Some(GameInputAction::ButtonUp(GameButton::Red)),
Event::JoyButtonUp { button_idx : 3, .. } => Some(GameInputAction::ButtonUp(GameButton::Yellow)),
Event::JoyButtonUp { button_idx : 2, .. } => Some(GameInputAction::ButtonUp(GameButton::Blue)),
Event::JoyButtonUp { button_idx : 4, .. } => Some(GameInputAction::ButtonUp(GameButton::Orange)),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Up, .. } => Some(GameInputAction::Strum),
Event::JoyHatMotion { hat_idx : 0, state : sdl2::joystick::HatState::Down, .. } => Some(GameInputAction::Strum),
_ => None
})
}
// for power-saving. if Some, the game will sleep for
const FRAME_LIMIT: Option<FrameLimit> = Option::Some(FrameLimit::Cap(120));
// TODO: enable vsync based on frame_limit
// https://wiki.libsdl.org/SDL_GL_SetSwapInterval
// TODO: process inputs more frequently than once per frame?
// avoidable if we have accurate input event timestamps? (+ assumption our processing is short)
// TODO: when frame_limit is FPS cap, do measurements for sleep interval
// that results in that frequency (at runtime)
// and ensure game loop handles huge outliers in sleep wakeup time
let mut music = ears::Sound::new("Songs/song.ogg")?;
music.play();
let mut previous_frame_time = Instant::now();
let mut last_playhead_pos_ms = 0f32;
let mut song_time_ms = 0f32;
let mut run = true;
while run {
// https://www.reddit.com/r/gamedev/comments/13y26t/how_do_rhythm_games_stay_in_sync_with_the_music/c78aawd/
let this_frame_time = Instant::now();
song_time_ms += this_frame_time.duration_since(previous_frame_time).as_millis() as f32;
previous_frame_time = this_frame_time;
let playhead_pos_ms = music.get_offset() * 1000f32;
if playhead_pos_ms != last_playhead_pos_ms {
song_time_ms = (song_time_ms + playhead_pos_ms) / 2f32;
last_playhead_pos_ms = playhead_pos_ms;
}
let effects = input(&mut events)
.filter_map(|action| match action {
Some(GameInputAction::Quit) => Some(GameInputEffect::Quit),
Some(action) => match action.to_guitar_action() {
Some(guitar_action) => {
// sdl's event timestamps are always later than the OS timestamp
// so just assume that events are happening at this instant
// TODO: can we do better?
// TODO: track inputs for replays?
playthrough.apply(&guitar_action, song_time_ms).map(|e| GameInputEffect::GuitarEffect(e))
},
None => None,
},
None => None,
});
effects.for_each(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
playthrough.update_time(song_time_ms)
.map(|e| GameInputEffect::GuitarEffect(e))
.map(|effect: GameInputEffect| {
match effect {
GameInputEffect::Quit => run = false,
GameInputEffect::GuitarEffect(effect) => match effect {
Hit => (),
Overstrum => (),
MissStreak => (),
MissNoStreak => (),
ReleaseSustain => (),
}
}
});
draw(&mut canvas, &playthrough, song_time_ms);
match FRAME_LIMIT {
Some(FrameLimit::Vsync) => (), // present() waits for vsync if on
Some(FrameLimit::Cap(cap)) => {
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / cap));
},
None => (),
}
}
Ok(())
}
| {
if enabled {
canvas.filled_circle(x, y, radius, color)
} else {
canvas.circle(x, y, radius, color)
}
} | identifier_body |
script.js | let footer = document.getElementsByTagName('footer')[0];
let searchInput = document.getElementById('searchInput');
document.addEventListener('DOMContentLoaded', start); // когда HTML будет подготовлен и загружен, вызвать функцию start
function start() {
let bookTitle;
if (searchInput.value == '') {
let url = window.location.pathname;
if (url.indexOf('/found/') != -1) {
url = url.replace('/found/', '');
if (url != '') {
bookTitle = decodeURI(url);
searchInput.value = bookTitle;
searchBook(bookTitle);
}
else {
window.location.replace('/');
}
}
}
document.getElementById('searchBtn').addEventListener('click', searchBook);
searchInput.addEventListener('keypress',()=>{if(event.key==='Enter'){event.preventDefault();searchBook()}}); // поиск по Энтеру
}
// Главные функции
function searchBook(bookTitle) {
if ((bookTitle == '[object MouseEvent]') || (bookTitle == undefined)) {
bookTitle = searchInput.value;
}
if (bookTitle != '') {
footer.classList.remove('index');
searchInput.removeAttribute('autofocus');
let xhr = new XMLHttpRequest();
let params = 'bookTitle='+bookTitle,
template = '<div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="book"><div class="bookDesc"><h2> </h2><div class="details lead"> <span class="author"> </span> <span class="publisher"> </span> <span class="pages"> </span></div></div></div></div></div><div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="library"><div class="libraryDesc" style="width:20%"><div style="padding:0 40%" class="name"> </div><div class="details"><div style="padding:0 50%" class="address"> </div></div></div></div></div></div>',
alert = setTimeout(showSearchAlert, 10000, document.querySelector('.searchAlert'), '<b>Книга нашлась.</b> Проверяется в других библиотеках. С новыми книгами, которых немного, поиск работает быстрее');
xhr.abort(); // отменяем предыдущий запрос
document.getElementById('results').innerHTML=''; // очищаем контейнер для результатов
for (let i=0;i<3;i++){ // цикл вставки шаблона в контейнер для результатов на время загрузки
let elem = document.createElement('div');
elem.classList.add('bookContainer','template');
elem.innerHTML=template;
document.getElementById('results').append(elem);
}
history.pushState(null, null, '/found/' + bookTitle); // добавление запроса в URL
document.title = '«' + bookTitle + '» в библиотеках Москвы';
xhr.open('POST', '../php/search.php');
xhr.onreadystatechange=()=>{
if(xhr.readyState === 4) {
if(xhr.status === 200) {
clearTimeout(alert); // ОСТАНАВЛИВАЕМ ТАЙМЕР
document.getElementById('results').innerHTML = xhr.responseText;
| requestButton.addEventListener('click', toRequest);
let bookingButtons = document.querySelectorAll('input[value="Забронировать"]');
if (bookingButtons.length > 0) {
for (var i = 0; i < bookingButtons.length; i++) {
bookingButtons[i].addEventListener('click', {handleEvent: toBook, number: i});
var surname = document.getElementsByName('surname')[i];
surname.addEventListener('blur', {handleEvent: printSurnameInFormProof, number: i, surname: surname});
}
}
// Добавление автора и почты в подтверждение запроса книги
var inputAuthor = document.getElementById('author');
if (inputAuthor != null) {
inputAuthor.onblur = printAuthor;
function printAuthor() {
var textAuthor = document.getElementById('authorAdd');
textAuthor.innerHTML = this.value;
}
}
var inputEmail = document.getElementById('email');
if (inputEmail != null) {
inputEmail.onblur = printEmail;
function printEmail() {
var textEmail = document.getElementById('emailAdd');
textEmail.innerHTML = this.value;
}
}
// Открывание/скрывание режима работы библиотек
var timetableLinks = document.querySelectorAll('.timetableLink');
if (timetableLinks.length > 0) {
for (let i = 0; i < timetableLinks.length; i++) {
let timetableLink = timetableLinks[i];
timetableLink.addEventListener('click', {
handleEvent: controlSchedule,
link: timetableLink,
number: i
});
}
}
}
else console.log('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
xhr.send(params);
}
else {
searchInput.focus();
}
}
function toRequest() {
let email = document.getElementById('email').value;
let surname = document.getElementById('surname').value;
let title = document.getElementById('title').value;
let author = document.getElementById('author').value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/request.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelector('.form').style.display = 'none';
document.querySelector('.formProof').style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
function toBook(e) {
let email = document.getElementsByName('email')[this.number];
let surname = document.getElementsByName('surname')[this.number];
if ((email.value == '') || (email.value.match(/.+@.+\..+/i) == null)) {
email.focus();
email.classList.add("invalid");
}
else if (surname.value == '') {
email.classList.remove("invalid");
surname.focus();
surname.classList.add("invalid");
}
else {
surname.classList.remove("invalid");
email = email.value;
surname = surname.value;
let title = document.getElementsByName('titleBooking')[this.number].value;
let author = document.getElementsByName('author')[this.number].value;
let publisher = document.getElementsByName('publisher')[this.number].value;
let year = document.getElementsByName('year')[this.number].value;
let pages = document.getElementsByName('pages')[this.number].value;
let callNumber = document.getElementsByName('callNumber')[this.number].value;
let library = document.getElementsByName('library')[this.number].value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author + '&publisher=' + publisher + '&year=' + year + '&pages=' + pages + '&callNumber=' + callNumber + '&library=' + library;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/book.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelectorAll('.formBooking')[this.number].style.display = 'none';
document.querySelectorAll('.formProof')[this.number].style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
}
// Сопутствующие функции
function printSurnameInFormProof(e) {
let textSurname = document.querySelectorAll('.surnameAdd')[this.number];
let surnameValue = this.surname.value;
textSurname.innerHTML = surnameValue;
}
function controlSchedule(e) {
var link = this.link;
var schedule = document.querySelectorAll('.timetableSchedule')[this.number];
if (link.classList.contains('timetableLinkClosed')) {
link.classList.remove('timetableLinkClosed');
link.classList.add('timetableLinkOpened');
schedule.style.display = 'block';
}
else {
link.classList.remove('timetableLinkOpened');
link.classList.add('timetableLinkClosed');
schedule.style.display = 'none';
}
}
function showSearchAlert(alertID, content) { // ПОКАЗ УВЕДОМЛЕНИЯ
alertID.style.display='flex';
alertID.style.animationName='showSearchAlert';
alertID.innerHTML='<div>'+content+'</div>'+'<svg viewBox="0 0 10 10" class="closeBtn"><path d="M2,8 L8,2" class="closeBtn_p1"></path><path d="M2,2 L8,8" class="closeBtn_p2"></path></svg>';
let aTimer=setTimeout(closeSearchAlert, 15000, alertID);
document.querySelector('.closeBtn').addEventListener('click',()=>{closeSearchAlert(alertID);clearTimeout(aTimer);});
}
function closeSearchAlert(alertID) { // СКРЫТИЕ УВЕДОМЛЕНИЯ
alertID.style.animationName='closeSearchAlert';
setTimeout(()=>{alertID.style.display=''},1000)
} | // Обработка кнопок для запроса и бронирования книги
let requestButton = document.getElementById('toRequest');
if (requestButton != null) | random_line_split |
script.js | let footer = document.getElementsByTagName('footer')[0];
let searchInput = document.getElementById('searchInput');
document.addEventListener('DOMContentLoaded', start); // когда HTML будет подготовлен и загружен, вызвать функцию start
function start() {
let bookTitle;
if (searchInput.value == '') {
let url = window.location.pathname;
if (url.indexOf('/found/') != -1) {
url = url.replace('/found/', '');
if (url != '') {
bookTitle = decodeURI(url);
searchInput.value = bookTitle;
searchBook(bookTitle);
}
else {
window.location.replace('/');
}
}
}
document.getElementById('searchBtn').addEventListener('click', searchBook);
searchInput.addEventListener('keypress',()=>{if(event.key==='Enter'){event.preventDefault();searchBook()}}); // поиск по Энтеру
}
// Главные функции
function searchBook(bookTitle) {
if ((bookTitle == '[object MouseEvent]') || (bookTitle == undefined)) {
bookTitle = searchInput.value;
}
if (bookTitle != '') {
footer.classList.remove('index');
searchInput.removeAttribute('autofocus');
let xhr = new XMLHttpRequest();
let params = 'bookTitle='+bookTitle,
template = '<div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="book"><div class="bookDesc"><h2> </h2><div class="details lead"> <span class="author"> </span> <span class="publisher"> </span> <span class="pages"> </span></div></div></div></div></div><div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="library"><div class="libraryDesc" style="width:20%"><div style="padding:0 40%" class="name"> </div><div class="details"><div style="padding:0 50%" class="address"> </div></div></div></div></div></div>',
alert = setTimeout(showSearchAlert, 10000, document.querySelector('.searchAlert'), '<b>Книга нашлась.</b> Проверяется в других библиотеках. С новыми книгами, которых немного, поиск работает быстрее');
xhr.abort(); // отменяем предыдущий запрос
document.getElementById('results').innerHTML=''; // очищаем контейнер для результатов
for (let i=0;i<3;i++){ // цикл вставки шаблона в контейнер для результатов на время загрузки
let elem = document.createElement('div');
elem.classList.add('bookContainer','template');
elem.innerHTML=template;
document.getElementById('results').append(elem);
}
history.pushState(null, null, '/found/' + bookTitle); // добавление запроса в URL
document.title = '«' + bookTitle + '» в библиотеках Москвы';
xhr.open('POST', '../php/search.php');
xhr.onreadystatechange=()=>{
if(xhr.readyState === 4) {
if(xhr.status === 200) {
clearTimeout(alert); // ОСТАНАВЛИВАЕМ ТАЙМЕР
document.getElementById('results').innerHTML = xhr.responseText;
// Обработка кнопок для запроса и бронирования книги
let requestButton = document.getElementById('toRequest');
if (requestButton != null)
requestButton.addEventListener('click', toRequest);
let bookingButtons = document.querySelectorAll('input[value="Забронировать"]');
if (bookingButtons.length > 0) {
for (var i = 0; i < bookingButtons.length; i++) {
bookingButtons[i].addEventListener('click', {handleEvent: toBook, number: i});
var surname = document.getElementsByName('surname')[i];
surname.addEventListener('blur', {handleEvent: printSurnameInFormProof, number: i, surname: surname});
}
}
// Добавление автора и почты в подтверждение запроса книги
var inputAuthor = document.getElementById('author');
if (inputAuthor != null) {
inputAuthor.onblur = printAuthor;
function printAuthor() {
var textAuthor = document.getElementById('authorAdd');
textAuthor.innerHTML = this.value;
}
}
var inputEmail = document.getElementById('email');
if (inputEmail != null) {
inputEmail.onblur = printEmail;
function printEmail() {
var textEmail = document.getElementById('emailAdd');
textEmail.innerHTML = this.value;
}
}
// Открывание/скрывание режима работы библиотек
var timetableLinks = document.querySelectorAll('.timetableLink');
if (timetableLinks.length > 0) {
for (let i = 0; i < timetableLinks.length; i++) {
let timetableLink = timetableLinks[i];
timetableLink.addEventListener('click', {
handleEvent: controlSchedule,
link: timetableLink,
number: i
});
}
}
}
else console.log('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
xhr.send(params);
}
else {
searchInput.focus();
}
}
function toRequest() {
let email = document.getElementById('email').value;
let surname = document.getElementById('surname').value;
let title = document.getElementById('title').value;
let author = document.getElementById('author').value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/request.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelector('.form').style.display = 'none';
document.querySelector('.formProof').style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
function toBook(e) {
let email = document.getElementsByName('email')[this.number];
let surname = document.getElementsByName('surname')[this.number];
if ((email.value == '') || (email.value.match(/.+@.+\..+/i) == null)) {
email.focus();
email.classList.add("invalid");
}
else if (surname.value == '') {
email.classList.remove("invalid");
surname.focus();
surname.classList.add("invalid");
}
else {
surname.classList.remove("invalid");
email = email.value;
surname = surname.value;
let title = document.getElementsByName('titleBooking')[this.number].value;
let author = document.getElementsByName('author')[this.number].value;
let publisher = document.getElementsByName('publisher')[this.number].value;
let year = document.getElementsByName('year')[this.number].value;
let pages = document.getElementsByName('pages')[this.number].value;
let callNumber = document.getElementsByName('callNumber')[this.number].value;
let library = document.getElementsByName('library')[this.number].value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author + '&publisher=' + publisher + '&year=' + year + '&pages=' + pages + '&callNumber=' + callNumber + '&library=' + library;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/book.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelectorAll('.formBooking')[this.number].style.display = 'none';
document.querySelectorAll('.formProof')[this.number].style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
}
// Сопутствующие функции
function printSurnameInFormProof(e) {
let textSurname = document.querySelectorAll('.surnameAdd')[this.number];
let surnameValue = this.surname.value;
textSurname.innerHTML = surnameValue;
}
function controlSchedule(e) {
var link = this.link;
var schedule = document.querySelectorAll('.timetableSchedule')[this.number]; | ist.add('timetableLinkClosed');
schedule.style.display = 'none';
}
}
function showSearchAlert(alertID, content) { // ПОКАЗ УВЕДОМЛЕНИЯ
alertID.style.display='flex';
alertID.style.animationName='showSearchAlert';
alertID.innerHTML='<div>'+content+'</div>'+'<svg viewBox="0 0 10 10" class="closeBtn"><path d="M2,8 L8,2" class="closeBtn_p1"></path><path d="M2,2 L8,8" class="closeBtn_p2"></path></svg>';
let aTimer=setTimeout(closeSearchAlert, 15000, alertID);
document.querySelector('.closeBtn').addEventListener('click',()=>{closeSearchAlert(alertID);clearTimeout(aTimer);});
}
function closeSearchAlert(alertID) { // СКРЫТИЕ УВЕДОМЛЕНИЯ
alertID.style.animationName='closeSearchAlert';
setTimeout(()=>{alertID.style.display=''},1000)
} |
if (link.classList.contains('timetableLinkClosed')) {
link.classList.remove('timetableLinkClosed');
link.classList.add('timetableLinkOpened');
schedule.style.display = 'block';
}
else {
link.classList.remove('timetableLinkOpened');
link.classL | conditional_block |
script.js | let footer = document.getElementsByTagName('footer')[0];
let searchInput = document.getElementById('searchInput');
document.addEventListener('DOMContentLoaded', start); // когда HTML будет подготовлен и загружен, вызвать функцию start
function start() {
let bookTitle;
if (searchInput.value == '') {
let url = window.location.pathname;
if (url.indexOf('/found/') != -1) {
url = url.replace('/found/', '');
if (url != '') {
bookTitle = decodeURI(url);
searchInput.value = bookTitle;
searchBook(bookTitle);
}
else {
window.location.replace('/');
}
}
}
document.getElementById('searchBtn').addEventListener('click', searchBook);
searchInput.addEventListener('keypress',()=>{if(event.key==='Enter'){event.preventDefault();searchBook()}}); // поиск по Энтеру
}
// Главные функции
function searchBook(bookTitle) {
if ((bookTitle == '[object MouseEvent]') || (bookTitle == undefined)) {
bookTitle = searchInput.value;
}
if (bookTitle != '') {
footer.classList.remove('index');
searchInput.removeAttribute('autofocus');
let xhr = new XMLHttpRequest();
let params = 'bookTitle='+bookTitle,
template = '<div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="book"><div class="bookDesc"><h2> </h2><div class="details lead"> <span class="author"> </span> <span class="publisher"> </span> <span class="pages"> </span></div></div></div></div></div><div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="library"><div class="libraryDesc" style="width:20%"><div style="padding:0 40%" class="name"> </div><div class="details"><div style="padding:0 50%" class="address"> </div></div></div></div></div></div>',
alert = setTimeout(showSearchAlert, 10000, document.querySelector('.searchAlert'), '<b>Книга нашлась.</b> Проверяется в других библиотеках. С новыми книгами, которых немного, поиск работает быстрее');
xhr.abort(); // отменяем предыдущий запрос
document.getElementById('results').innerHTML=''; // очищаем контейнер для результатов
for (let i=0;i<3;i++){ // цикл вставки шаблона в контейнер для результатов на время загрузки
let elem = document.createElement('div');
elem.classList.add('bookContainer','template');
elem.innerHTML=template;
document.getElementById('results').append(elem);
}
history.pushState(null, null, '/found/' + bookTitle); // добавление запроса в URL
document.title = '«' + bookTitle + '» в библиотеках Москвы';
xhr.open('POST', '../php/search.php');
xhr.onreadystatechange=()=>{
if(xhr.readyState === 4) {
if(xhr.status === 200) {
clearTimeout(alert); // ОСТАНАВЛИВАЕМ ТАЙМЕР
document.getElementById('results').innerHTML = xhr.responseText;
// Обработка кнопок для запроса и бронирования книги
let requestButton = document.getElementById('toRequest');
if (requestButton != null)
requestButton.addEventListener('click', toRequest);
let bookingButtons = document.querySelectorAll('input[value="Забронировать"]');
if (bookingButtons.length > 0) {
for (var i = 0; i < bookingButtons.length; i++) {
bookingButtons[i].addEventListener('click', {handleEvent: toBook, number: i});
var surname = document.getElementsByName('surname')[i];
surname.addEventListener('blur', {handleEvent: printSurnameInFormProof, number: i, surname: surname});
}
}
// Добавление автора и почты в подтверждение запроса книги
var inputAuthor = document.getElementById('author');
if (inputAuthor != null) {
inputAuthor.onblur = printAuthor;
function printAuthor() {
var textAuthor = document.getElementById('authorAdd');
textAuthor.innerHTML = this.value;
}
}
var inputEmail = document.getElementById('email');
if (inputEmail != null) {
inputEmail.onblur = printEmail;
function printEmail() {
var textEmail = document.getElementById('emailAdd');
textEmail.innerHTML = this.value;
}
}
// Открывание/скрывание режима работы библиотек
var timetableLinks = document.querySelectorAll('.timetableLink');
if (timetableLinks.length > 0) {
| t i = 0; i < timetableLinks.length; i++) {
let timetableLink = timetableLinks[i];
timetableLink.addEventListener('click', {
handleEvent: controlSchedule,
link: timetableLink,
number: i
});
}
}
}
else console.log('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
xhr.send(params);
}
else {
searchInput.focus();
}
}
function toRequest() {
let email = document.getElementById('email').value;
let surname = document.getElementById('surname').value;
let title = document.getElementById('title').value;
let author = document.getElementById('author').value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/request.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelector('.form').style.display = 'none';
document.querySelector('.formProof').style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
function toBook(e) {
let email = document.getElementsByName('email')[this.number];
let surname = document.getElementsByName('surname')[this.number];
if ((email.value == '') || (email.value.match(/.+@.+\..+/i) == null)) {
email.focus();
email.classList.add("invalid");
}
else if (surname.value == '') {
email.classList.remove("invalid");
surname.focus();
surname.classList.add("invalid");
}
else {
surname.classList.remove("invalid");
email = email.value;
surname = surname.value;
let title = document.getElementsByName('titleBooking')[this.number].value;
let author = document.getElementsByName('author')[this.number].value;
let publisher = document.getElementsByName('publisher')[this.number].value;
let year = document.getElementsByName('year')[this.number].value;
let pages = document.getElementsByName('pages')[this.number].value;
let callNumber = document.getElementsByName('callNumber')[this.number].value;
let library = document.getElementsByName('library')[this.number].value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author + '&publisher=' + publisher + '&year=' + year + '&pages=' + pages + '&callNumber=' + callNumber + '&library=' + library;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/book.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelectorAll('.formBooking')[this.number].style.display = 'none';
document.querySelectorAll('.formProof')[this.number].style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
}
// Сопутствующие функции
function printSurnameInFormProof(e) {
let textSurname = document.querySelectorAll('.surnameAdd')[this.number];
let surnameValue = this.surname.value;
textSurname.innerHTML = surnameValue;
}
function controlSchedule(e) {
var link = this.link;
var schedule = document.querySelectorAll('.timetableSchedule')[this.number];
if (link.classList.contains('timetableLinkClosed')) {
link.classList.remove('timetableLinkClosed');
link.classList.add('timetableLinkOpened');
schedule.style.display = 'block';
}
else {
link.classList.remove('timetableLinkOpened');
link.classList.add('timetableLinkClosed');
schedule.style.display = 'none';
}
}
function showSearchAlert(alertID, content) { // ПОКАЗ УВЕДОМЛЕНИЯ
alertID.style.display='flex';
alertID.style.animationName='showSearchAlert';
alertID.innerHTML='<div>'+content+'</div>'+'<svg viewBox="0 0 10 10" class="closeBtn"><path d="M2,8 L8,2" class="closeBtn_p1"></path><path d="M2,2 L8,8" class="closeBtn_p2"></path></svg>';
let aTimer=setTimeout(closeSearchAlert, 15000, alertID);
document.querySelector('.closeBtn').addEventListener('click',()=>{closeSearchAlert(alertID);clearTimeout(aTimer);});
}
function closeSearchAlert(alertID) { // СКРЫТИЕ УВЕДОМЛЕНИЯ
alertID.style.animationName='closeSearchAlert';
setTimeout(()=>{alertID.style.display=''},1000)
} | for (le | identifier_name |
script.js | let footer = document.getElementsByTagName('footer')[0];
let searchInput = document.getElementById('searchInput');
document.addEventListener('DOMContentLoaded', start); // когда HTML будет подготовлен и загружен, вызвать функцию start
function start() {
let bookTitle;
if (searchInput.value == '') {
let url = window.location.pathname;
if (url.indexOf('/found/') != -1) {
url = url.replace('/found/', '');
if (url != '') {
bookTitle = decodeURI(url);
searchInput.value = bookTitle;
searchBook(bookTitle);
}
else {
window.location.replace('/');
}
}
}
document.getElementById('searchBtn').addEventListener('click', searchBook);
searchInput.addEventListener('keypress',()=>{if(event.key==='Enter'){event.preventDefault();searchBook()}}); // поиск по Энтеру
}
// Главные функции
function searchBook(bookTitle) {
if ((bookTitle == '[object MouseEvent]') || (bookTitle == undefined)) {
bookTitle = searchInput.value;
}
if (bookTitle != '') {
footer.classList.remove('index');
searchInput.removeAttribute('autofocus');
let xhr = new XMLHttpRequest();
let params = 'bookTitle='+bookTitle,
template = '<div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="book"><div class="bookDesc"><h2> </h2><div class="details lead"> <span class="author"> </span> <span class="publisher"> </span> <span class="pages"> </span></div></div></div></div></div><div class="row"><div class="col-sm-12 col-md-12 col-lg-10 offset-lg-1 col-xl-8 offset-xl-2"><div class="library"><div class="libraryDesc" style="width:20%"><div style="padding:0 40%" class="name"> </div><div class="details"><div style="padding:0 50%" class="address"> </div></div></div></div></div></div>',
alert = setTimeout(showSearchAlert, 10000, document.querySelector('.searchAlert'), '<b>Книга нашлась.</b> Проверяется в других библиотеках. С новыми книгами, которых немного, поиск работает быстрее');
xhr.abort(); // отменяем предыдущий запрос
document.getElementById('results').innerHTML=''; // очищаем контейнер для результатов
for (let i=0;i<3;i++){ // цикл вставки шаблона в контейнер для результатов на время загрузки
let elem = document.createElement('div');
elem.classList.add('bookContainer','template');
elem.innerHTML=template;
document.getElementById('results').append(elem);
}
history.pushState(null, null, '/found/' + bookTitle); // добавление запроса в URL
document.title = '«' + bookTitle + '» в библиотеках Москвы';
xhr.open('POST', '../php/search.php');
xhr.onreadystatechange=()=>{
if(xhr.readyState === 4) {
if(xhr.status === 200) {
clearTimeout(alert); // ОСТАНАВЛИВАЕМ ТАЙМЕР
document.getElementById('results').innerHTML = xhr.responseText;
// Обработка кнопок для запроса и бронирования книги
let requestButton = document.getElementById('toRequest');
if (requestButton != null)
requestButton.addEventListener('click', toRequest);
let bookingButtons = document.querySelectorAll('input[value="Забронировать"]');
if (bookingButtons.length > 0) {
for (var i = 0; i < bookingButtons.length; i++) {
bookingButtons[i].addEventListener('click', {handleEvent: toBook, number: i});
var surname = document.getElementsByName('surname')[i];
surname.addEventListener('blur', {handleEvent: printSurnameInFormProof, number: i, surname: surname});
}
}
// Добавление автора и почты в подтверждение запроса книги
var inputAuthor = document.getElementById('author');
if (inputAuthor != null) {
inputAuthor.onblur = printAuthor;
function printAuthor() {
var textAuthor = document.getElementById('authorAdd');
textAuthor.innerHTML = this.value;
}
}
var inputEmail = document.getElementById('email');
if (inputEmail != null) {
inputEmail.onblur = printEmail;
function printEmail() {
var textEmail = document.getElementById('emailAdd');
textEmail.innerHTML = this.value;
}
}
// Открывание/скрывание режима работы библиотек
var timetableLinks = document.querySelectorAll('.timetableLink');
if (timetableLinks.length > 0) {
for (let i = 0; i < timetableLinks.length; i++) {
let timetableLink = timetableLinks[i];
timetableLink.addEventListener('click', {
handleEvent: controlSchedule,
link: timetableLink,
number: i
});
}
}
}
else console.log('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
xhr.send(params);
}
else {
searchInput.focus();
}
}
function toRequest() {
let email = document.getElementById('email').value;
let surname = document.getElementById('surname').value;
let title = document.getElementById('title').value;
let author = document.getElementById('author').value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/request.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelector('.form').style.display = 'none';
document.querySelector('.formProof').style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
function toBook(e) {
let email = document.getElementsByName('email')[this.number];
let surname = document.getElementsByName('surname')[this.number];
if ((email.value == '') || (email.value.match(/.+@.+\..+/i) == null)) {
email.focus();
email.classList.add("invalid");
}
else if (surname.value == '') {
email.classList.remove("invalid");
surname.focus();
surname.classList.add("invalid");
}
else {
surname.classList.remove("invalid");
email = email.value;
surname = surname.value;
let title = document.getElementsByName('titleBooking')[this.number].value;
let author = document.getElementsByName('author')[this.number].value;
let publisher = document.getElementsByName('publisher')[this.number].value;
let year = document.getElementsByName('year')[this.number].value;
let pages = document.getElementsByName('pages')[this.number].value;
let callNumber = document.getElementsByName('callNumber')[this.number].value;
let library = document.getElementsByName('library')[this.number].value;
let params = 'email=' + email + '&surname=' + surname + '&title=' + title + '&author=' + author + '&publisher=' + publisher + '&year=' + year + '&pages=' + pages + '&callNumber=' + callNumber + '&library=' + library;
let xhr = new XMLHttpRequest();
xhr.open('POST', '../php/book.php'); // определяем тип запроса и ссылку на обработчик запроса
xhr.timeout = 5000; // таймаут запроса в мс
xhr.ontimeout=()=>{alert('Превышено время ожидания ответа от сервера!')};
xhr.onreadystatechange=()=>{ // когда меняется статус запроса, вызываем функцию
if (xhr.readyState === 4){ // если статус 4 (завершено)
if (xhr.status === 200) { // если код ответа сервера 200, получить ответ
document.querySelectorAll('.formBooking')[this.number].style.display = 'none';
document.querySelectorAll('.formProof')[this.number].style.display = 'block';
}
else alert('Ошибка: ' + xhr.status);
}
};
xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); // устанавливаем HTTP-заголовок
xhr.send(params); // отправляем запрос
}
}
// Сопутствующие функции
function printSurnameInFormProof(e) {
let textSurname = document.querySelectorAll('.surnameAdd')[this.number];
let surnameValue = this.surname.value;
textSurname.innerHTML = surnameValue;
}
function controlSchedule(e) {
var link = this.link;
var schedule = document.querySelectorAll('.timetableSchedule')[this.number];
if (link.classList.contains('timetableLinkClosed')) {
link.classList.remove('timetableLinkClosed');
link.classList.add('timetableLinkOpened');
schedule.style.display = 'block';
}
else {
link.classList.remove('timetableLinkOpened');
link.classList.add('timetableLinkClosed');
schedule.style.display = 'none';
}
}
function showSearchAlert(alertID, content) { // ПОКАЗ УВЕДОМЛЕНИЯ
alertID.style.display='flex';
alertID.style.animationName='showSearchAlert';
alertID.innerHTML='<div>'+content+'</div>'+'<svg viewBox="0 0 10 10" class="closeBtn"><path d="M2,8 L8,2" class="closeBtn_p1"></path><path | er('click',()=>{closeSearchAlert(alertID);clearTimeout(aTimer);});
}
function closeSearchAlert(alertID) { // СКРЫТИЕ УВЕДОМЛЕНИЯ
alertID.style.animationName='closeSearchAlert';
setTimeout(()=>{alertID.style.display=''},1000)
} | d="M2,2 L8,8" class="closeBtn_p2"></path></svg>';
let aTimer=setTimeout(closeSearchAlert, 15000, alertID);
document.querySelector('.closeBtn').addEventListen | identifier_body |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else |
})
}
}
| {
Err(format!("Unexpected status: {}", response.status()))
} | conditional_block |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log() | maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
} | .insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
| random_line_split |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() |
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn flush(host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
}
| {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
} | identifier_body |
elasticsearch.rs | use crate::{
buffers::Acker,
event::Event,
sinks::util::{
http::{HttpRetryLogic, HttpService},
retries::FixedRetryPolicy,
BatchServiceSink, Buffer, Compression, SinkExt,
},
template::Template,
topology::config::{DataType, SinkConfig},
};
use futures::{stream::iter_ok, Future, Sink};
use http::{Method, Uri};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tower::ServiceBuilder;
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct ElasticSearchConfig {
pub host: String,
pub index: Option<String>,
pub doc_type: Option<String>,
pub id_key: Option<String>,
pub batch_size: Option<usize>,
pub batch_timeout: Option<u64>,
pub compression: Option<Compression>,
// Tower Request based configuration
pub request_in_flight_limit: Option<usize>,
pub request_timeout_secs: Option<u64>,
pub request_rate_limit_duration_secs: Option<u64>,
pub request_rate_limit_num: Option<u64>,
pub request_retry_attempts: Option<usize>,
pub request_retry_backoff_secs: Option<u64>,
}
#[typetag::serde(name = "elasticsearch")]
impl SinkConfig for ElasticSearchConfig {
fn build(&self, acker: Acker) -> Result<(super::RouterSink, super::Healthcheck), String> {
let sink = es(self.clone(), acker);
let healtcheck = healthcheck(self.host.clone());
Ok((sink, healtcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
}
fn es(config: ElasticSearchConfig, acker: Acker) -> super::RouterSink {
let host = config.host.clone();
let id_key = config.id_key.clone();
let gzip = match config.compression.unwrap_or(Compression::Gzip) {
Compression::None => false,
Compression::Gzip => true,
};
let batch_size = config.batch_size.unwrap_or(bytesize::mib(10u64) as usize);
let batch_timeout = config.batch_timeout.unwrap_or(1);
let timeout = config.request_timeout_secs.unwrap_or(60);
let in_flight_limit = config.request_in_flight_limit.unwrap_or(5);
let rate_limit_duration = config.request_rate_limit_duration_secs.unwrap_or(1);
let rate_limit_num = config.request_rate_limit_num.unwrap_or(5);
let retry_attempts = config.request_retry_attempts.unwrap_or(usize::max_value());
let retry_backoff_secs = config.request_retry_backoff_secs.unwrap_or(1);
let index = if let Some(idx) = &config.index {
Template::from(idx.as_str())
} else {
Template::from("vector-%Y.%m.%d")
};
let doc_type = config.clone().doc_type.unwrap_or("_doc".into());
let policy = FixedRetryPolicy::new(
retry_attempts,
Duration::from_secs(retry_backoff_secs),
HttpRetryLogic,
);
let http_service = HttpService::new(move |body: Vec<u8>| {
let uri = format!("{}/_bulk", host);
let uri: Uri = uri.parse().unwrap();
let mut builder = hyper::Request::builder();
builder.method(Method::POST);
builder.uri(uri);
builder.header("Content-Type", "application/x-ndjson");
if gzip {
builder.header("Content-Encoding", "gzip");
}
builder.body(body).unwrap()
});
let service = ServiceBuilder::new()
.concurrency_limit(in_flight_limit)
.rate_limit(rate_limit_num, Duration::from_secs(rate_limit_duration))
.retry(policy)
.timeout(Duration::from_secs(timeout))
.service(http_service);
let sink = BatchServiceSink::new(service, acker)
.batched_with_min(
Buffer::new(gzip),
batch_size,
Duration::from_secs(batch_timeout),
)
.with_flat_map(move |e| iter_ok(encode_event(e, &index, &doc_type, &id_key)));
Box::new(sink)
}
fn encode_event(
event: Event,
index: &Template,
doc_type: &String,
id_key: &Option<String>,
) -> Option<Vec<u8>> {
let index = index
.render_string(&event)
.map_err(|keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?keys
);
})
.ok()?;
let mut action = json!({
"index": {
"_index": index,
"_type": doc_type,
}
});
maybe_set_id(
id_key.as_ref(),
action.pointer_mut("/index").unwrap(),
&event,
);
let mut body = serde_json::to_vec(&action).unwrap();
body.push(b'\n');
serde_json::to_writer(&mut body, &event.into_log().unflatten()).unwrap();
body.push(b'\n');
Some(body)
}
fn healthcheck(host: String) -> super::Healthcheck {
let uri = format!("{}/_cluster/health", host);
let request = Request::get(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
let healthcheck = client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
});
Box::new(healthcheck)
}
fn maybe_set_id(key: Option<impl AsRef<str>>, doc: &mut serde_json::Value, event: &Event) {
if let Some(val) = key.and_then(|k| event.as_log().get(&k.as_ref().into())) {
let val = val.to_string_lossy();
doc.as_object_mut()
.unwrap()
.insert("_id".into(), json!(val));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Event;
use serde_json::json;
#[test]
fn sets_id_from_custom_field() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({"_id": "bar"}), action);
}
#[test]
fn doesnt_set_id_when_field_missing() {
let id_key = Some("foo");
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("not_foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
#[test]
fn doesnt_set_id_when_not_configured() {
let id_key: Option<&str> = None;
let mut event = Event::from("butts");
event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let mut action = json!({});
maybe_set_id(id_key, &mut action, &event);
assert_eq!(json!({}), action);
}
}
#[cfg(test)]
#[cfg(feature = "es-integration-tests")]
mod integration_tests {
use super::*;
use crate::buffers::Acker;
use crate::{
event,
test_util::{block_on, random_events_with_stream, random_string},
topology::config::SinkConfig,
Event,
};
use elastic::client::SyncClientBuilder;
use futures::{Future, Sink};
use hyper::{Body, Client, Request};
use hyper_tls::HttpsConnector;
use serde_json::{json, Value};
#[test]
fn structures_events_correctly() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
id_key: Some("my_id".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let mut input_event = Event::from("raw log line");
input_event
.as_mut_log()
.insert_explicit("my_id".into(), "42".into());
input_event
.as_mut_log()
.insert_explicit("foo".into(), "bar".into());
let pump = sink.send(input_event.clone());
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(1, response.total());
let hit = response.into_hits().next().unwrap();
assert_eq!("42", hit.id());
let value = hit.into_document().unwrap();
let expected = json!({
"message": "raw log line",
"my_id": "42",
"foo": "bar",
"timestamp": input_event.as_log()[&event::TIMESTAMP],
});
assert_eq!(expected, value);
}
#[test]
fn insert_events() {
let index = gen_index();
let config = ElasticSearchConfig {
host: "http://localhost:9200/".into(),
index: Some(index.clone()),
doc_type: Some("log_lines".into()),
compression: Some(Compression::None),
batch_size: Some(1),
..Default::default()
};
let (sink, _hc) = config.build(Acker::Null).unwrap();
let (input, events) = random_events_with_stream(100, 100);
let pump = sink.send_all(events);
block_on(pump).unwrap();
// make sure writes all all visible
block_on(flush(config.host)).unwrap();
let client = SyncClientBuilder::new().build().unwrap();
let response = client
.search::<Value>()
.index(index)
.body(json!({
"query": { "query_string": { "query": "*" } }
}))
.send()
.unwrap();
assert_eq!(input.len() as u64, response.total());
let input = input
.into_iter()
.map(|rec| serde_json::to_value(rec.into_log().unflatten()).unwrap())
.collect::<Vec<_>>();
for hit in response.into_hits() {
let event = hit.into_document().unwrap();
assert!(input.contains(&event));
}
}
fn gen_index() -> String {
format!("test-{}", random_string(10).to_lowercase())
}
fn | (host: String) -> impl Future<Item = (), Error = String> {
let uri = format!("{}/_flush", host);
let request = Request::post(uri).body(Body::empty()).unwrap();
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = Client::builder().build(https);
client
.request(request)
.map_err(|err| err.to_string())
.and_then(|response| {
if response.status() == hyper::StatusCode::OK {
Ok(())
} else {
Err(format!("Unexpected status: {}", response.status()))
}
})
}
}
| flush | identifier_name |
session.go | package netutil
import (
"crypto/rc4"
"encoding/binary"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/zxfonline/misc/golangtrace"
"github.com/zxfonline/misc/chanutil"
"github.com/zxfonline/misc/expvar"
"github.com/zxfonline/misc/log"
"github.com/zxfonline/misc/timefix"
"github.com/zxfonline/misc/trace"
)
var _sessionID int64
const (
//消息报头字节数
HEAD_SIZE = 4
//消息包自增的序号
SEQ_ID_SIZE = 4
//消息号占用的字节数
MSG_ID_SIZE = 2
)
var (
ServerEndian = binary.LittleEndian
)
const (
SESS_KEYEXCG = 0x1 // 是否已经交换完毕KEY
SESS_ENCRYPT = 0x2 // 是否可以开始加密
)
type NetPacket struct {
MsgId uint16
Data []byte
Session *TCPSession
//收到该消息包的时间戳 毫秒
ReceiveTime time.Time
}
type NetConnIF interface {
SetReadDeadline(t time.Time) error
SetWriteDeadline(t time.Time) error
Close() error
SetWriteBuffer(bytes int) error
SetReadBuffer(bytes int) error
Write(b []byte) (n int, err error)
RemoteAddr() net.Addr
Read(p []byte) (n int, err error)
}
type TCPSession struct {
//Conn *net.TCPConn
Conn NetConnIF
IP net.IP
SendChan chan *NetPacket
ReadChan chan *NetPacket
//离线消息管道,用于外部接收连接断开的消息并处理后续
OffChan chan int64
// ID
SessionId int64
// 会话标记
Flag int32
// 发送缓冲
sendCache []byte
//发送缓冲大小
sendCacheSize uint32
readDelay time.Duration
sendDelay time.Duration
// Declares how many times we will try to resend message
MaxSendRetries int
//发送管道满后是否需要关闭连接
sendFullClose bool
CloseState chanutil.DoneChan
maxRecvSize uint32
// 包频率包数
rpmLimit uint32
// 包频率检测间隔
rpmInterval time.Duration
// 对收到的包进行计数,可避免重放攻击-REPLAY-ATTACK
PacketRcvSeq uint32
//数据包发送计数器
PacketSndSeq uint32
// 超过频率控制离线通知包
offLineMsg *NetPacket
OnLineTime int64
OffLineTime int64
EncodeKey []byte
DecodeKey []byte
tr golangtrace.Trace
sendLock sync.Mutex
}
//filter:true 过滤成功,抛弃该报文;false:过滤失败,继续执行该报文消息
func (s *TCPSession) HandleConn(filter func(*NetPacket) bool) {
go s.ReadLoop(filter)
go s.SendLoop()
}
//网络连接远程ip
func (s *TCPSession) RemoteAddr() net.Addr {
return s.Conn.RemoteAddr()
}
//网络连接远程ip
func (s *TCPSession) RemoteIp() net.IP {
//addr := s.Conn.RemoteAddr().String()
//host, _, err := net.SplitHostPort(addr)
//if err != nil {
// host = addr
//}
//return net.ParseIP(host).String()
return s.IP
}
func (s *TCPSession) Send(packet *NetPacket) bool {
if packet == nil {
return false
}
if !s.sendFullClose { //阻塞发送,直到管道关闭
select {
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
case <-s.CloseState:
return false
}
} else { //缓存管道满了会关闭连接
select {
case <-s.CloseState:
return false
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
default:
log.Errorf("session sender overflow,close session,waitChan:%d,msg:%d,session:%d,remote:%s", len(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
s.Close()
return false
}
}
}
// RC4加密解密
func (s *TCPSession) SetCipher(encodeKey, decodeKey []byte) error {
if len(encodeKey) < 1 || len(encodeKey) > 256 {
return rc4.KeySizeError(len(encodeKey))
}
if len(decodeKey) < 1 || len(decodeKey) > 256 {
return rc4.KeySizeError(len(decodeKey))
}
s.EncodeKey = encodeKey
s.DecodeKey = decodeKey
s.Flag |= SESS_KEYEXCG
return nil
}
func (s *TCPSession) ReadLoop(filter func(*NetPacket) bool) {
defer log.PrintPanicStack()
// 关闭发送
defer s.Close()
//var delayTimer *time.Timer
//if s.readDelay > 0 {
// delayTimer = time.NewTimer(s.readDelay)
//}
rpmStart := time.Now()
rpmCount := uint32(0)
//rpmMsgCount := 0
// 4字节包长度
header := make([]byte, HEAD_SIZE)
for {
// 读取超时
if s.readDelay > 0 {
s.Conn.SetReadDeadline(time.Now().Add(s.readDelay))
}
// 4字节包长度
n, err := io.ReadFull(s.Conn, header)
if err != nil {
//if err != io.EOF {
// log.Warnf("error receiving header,bytes:%d,session:%d,remote:%s,err:%v", n, s.SessionId, s.RemoteAddr(), err)
//}
return
}
// packet payload
size := ServerEndian.Uint32(header)
if size < SEQ_ID_SIZE+MSG_ID_SIZE || (s.maxRecvSize != 0 && size > s.maxRecvSize) {
log.Warnf("error receiving,size:%d,head:%+v,session:%d,remote:%s", size, header, s.SessionId, s.RemoteAddr())
return
}
payload := make([]byte, size)
n, err = io.ReadFull(s.Conn, payload)
if err != nil {
log.Warnf("error receiving body,bytes:%d,size:%d,session:%d,remote:%s,err:%v", n, size, s.SessionId, s.RemoteAddr(), err)
return
}
// 收包频率控制
if s.rpmLimit > 0 {
rpmCount++
// 达到限制包数
if rpmCount > s.rpmLimit {
now := time.Now()
// 检测时间间隔
if now.Sub(rpmStart) < s.rpmInterval {
// 提示操作太频繁三次后踢下线
//rpmMsgCount++
//if rpmMsgCount > 3 {
// 发送频率过高的消息包
s.DirectSendAndClose(s.offLineMsg)
log.Errorf("session rpm too high,%d/%s qps,session:%d,remote:%s", rpmCount, s.rpmInterval, s.SessionId, s.RemoteAddr())
return
//}
//// 发送频率过高的消息包
//s.Send(s.offLineMsg)
}
// 未超过限制
rpmCount = 0
rpmStart = now
}
}
s.PacketRcvSeq++
//fmt.Printf("a %x\n", payload)
// 解密
if s.Flag&SESS_ENCRYPT != 0 {
decoder, _ := rc4.NewCipher(s.DecodeKey)
decoder.XORKeyStream(payload, payload)
//fmt.Printf("b1 %x\n", payload)
//} else {
//fmt.Printf("b2 %x\n", payload)
}
// 读客户端数据包序列号(1,2,3...)
// 客户端发送的数据包必须包含一个自增的序号,必须严格递增
// 加密后,可避免重放攻击-REPLAY-ATTACK
seqId := ServerEndian.Uint32(payload[:SEQ_ID_SIZE])
if seqId != s.PacketRcvSeq {
log.Errorf("session illegal packet sequence id:%v should be:%v size:%v", seqId, s.PacketRcvSeq, len(payload))
return
}
msgId := ServerEndian.Uint16(payload[SEQ_ID_SIZE : SEQ_ID_SIZE+MSG_ID_SIZE])
pack := &NetPacket{MsgId: msgId, Data: payload[SEQ_ID_SIZE+MSG_ID_SIZE:], Session: s, ReceiveTime: time.Now()}
//if s.readDelay > 0 {
// if !delayTimer.Stop() {
// select {
// case <-delayTimer.C:
// default:
// }
// }
// delayTimer.Reset(s.readDelay)
// if filter == nil {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// } else {
// if ok := filter(pack); !ok {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// }
// }
//} else {
if filter == nil {
s.ReadChan <- pack
} else {
if ok := filter(pack); !ok {
s.ReadChan <- pack
}
}
//}
}
}
func (s *TCPSession) Close() {
s.CloseState.SetDone()
}
func (s *TCPSession) IsClosed() bool {
return s.CloseState.R().Done()
}
func (s *TCPSession) closeTask() {
s.OffLineTime = timefix.SecondTime()
if s.OffChan != nil {
s.OffChan <- s.SessionId
//// 告诉服务器该玩家掉线,或者下线
//t := time.NewTimer(15 * time.Second)
//select {
//case s.OffChan <- s.SessionId:
// t.Stop()
//case <-t.C:
// log.Warnf("off chan time out,session:%d,remote:%s", s.SessionId, s.RemoteAddr())
//}
}
// close connection
s.Conn.Close()
}
func (s *TCPSession) SendLoop() {
defer log.PrintPanicStack()
for {
select {
case <-s.CloseState:
s.closeTask()
return
case packet := <-s.SendChan:
s.DirectSend(packet)
}
}
}
func (s *TCPSession) DirectSendAndClose(packet *NetPacket) {
go func() {
defer s.Close()
s.DirectSend(packet)
time.Sleep(1 * time.Second)
}()
}
func (s *TCPSession) DirectSend(packet *NetPacket) bool {
if packet == nil {
return true
}
if s.IsClosed() {
return false
}
s.sendLock.Lock()
defer s.sendLock.Unlock()
s.PacketSndSeq++
packLen := uint32(SEQ_ID_SIZE + MSG_ID_SIZE + len(packet.Data))
totalSize := packLen + HEAD_SIZE
if totalSize > s.sendCacheSize {
s.sendCacheSize = totalSize + (totalSize >> 2) //1.25倍率
s.sendCache = make([]byte, s.sendCacheSize)
}
// 4字节包长度
ServerEndian.PutUint32(s.sendCache, packLen)
//4字节消息序列号
ServerEndian.PutUint32(s.sendCache[HEAD_SIZE:], s.PacketSndSeq)
// 2字节消息id
ServerEndian.PutUint16(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE:], packet.MsgId)
copy(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE+MSG_ID_SIZE:], packet.Data)
| ption
// (NOT_ENCRYPTED) -> KEYEXCG -> ENCRYPT
if s.Flag&SESS_ENCRYPT != 0 { // encryption is enabled
encoder, _ := rc4.NewCipher(s.EncodeKey)
data := s.sendCache[HEAD_SIZE:totalSize]
encoder.XORKeyStream(data, data)
} else if s.Flag&SESS_KEYEXCG != 0 { // key is exchanged, encryption is not yet enabled
//s.Flag &^= SESS_KEYEXCG
s.Flag |= SESS_ENCRYPT
}
err := s.performSend(s.sendCache[:totalSize], 0)
if err != nil {
log.Debugf("error writing msg,session:%d,remote:%s,err:%v", s.SessionId, s.RemoteAddr(), err)
s.Close()
return false
}
return true
}
func (s *TCPSession) performSend(data []byte, sendRetries int) error {
// 写超时
if s.sendDelay > 0 {
s.Conn.SetWriteDeadline(time.Now().Add(s.sendDelay))
}
_, err := s.Conn.Write(data)
if err != nil {
return s.processSendError(err, data, sendRetries)
}
return nil
}
func (s *TCPSession) processSendError(err error, data []byte, sendRetries int) error {
netErr, ok := err.(net.Error)
if !ok {
return err
}
if s.isNeedToResendMessage(netErr, sendRetries) {
return s.performSend(data, sendRetries+1)
}
//if !netErr.Temporary() {
// //重连,让外部来重连吧
//}
return err
}
func (s *TCPSession) isNeedToResendMessage(err net.Error, sendRetries int) bool {
return (err.Temporary() || err.Timeout()) && sendRetries < s.MaxSendRetries
}
// 设置链接参数
func (s *TCPSession) SetParameter(readDelay, sendDelay time.Duration, maxRecvSize uint32, sendFullClose bool) {
s.maxRecvSize = maxRecvSize
if readDelay >= 0 {
s.readDelay = readDelay
}
if sendDelay >= 0 {
s.sendDelay = sendDelay
}
s.sendFullClose = sendFullClose
}
// 包频率控制参数
func (s *TCPSession) SetRpmParameter(rpmLimit uint32, rpmInterval time.Duration, msg *NetPacket) {
s.rpmLimit = rpmLimit
s.rpmInterval = rpmInterval
s.offLineMsg = msg
}
func NewSession(conn NetConnIF, readChan, sendChan chan *NetPacket, offChan chan int64) (*TCPSession, error) {
s := &TCPSession{
Conn: conn,
SendChan: sendChan,
ReadChan: readChan,
OffChan: offChan,
SessionId: atomic.AddInt64(&_sessionID, 1),
sendCache: make([]byte, 256),
sendCacheSize: 256,
readDelay: 30 * time.Second,
sendDelay: 10 * time.Second,
sendFullClose: true,
maxRecvSize: 10 * 1024,
OnLineTime: timefix.SecondTime(),
CloseState: chanutil.NewDoneChan(),
}
host, _, err := net.SplitHostPort(conn.RemoteAddr().String())
if err != nil {
log.Error("cannot get remote address:", err)
return nil, err
}
s.IP = net.ParseIP(host)
//log.Debugf("new connection from:%v port:%v", host, port)
return s, nil
}
func (s *TCPSession) TraceStart(family, title string, expvar bool) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.New(family, title, expvar)
}
}
func (s *TCPSession) TraceStartWithStart(family, title string, expvar bool, startTime time.Time) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.NewWithStart(family, title, expvar, startTime)
}
}
func (s *TCPSession) TraceFinish(traceDefer func(*expvar.Map, int64)) {
if s.tr != nil {
tt := s.tr
tt.Finish()
if traceDefer != nil {
family := tt.GetFamily()
req := expvar.Get(family)
if req == nil {
func() {
defer func() {
if v := recover(); v != nil {
req = expvar.Get(family)
}
}()
req = expvar.NewMap(family)
}()
}
traceDefer(req.(*expvar.Map), tt.GetElapsedTime())
}
s.tr = nil
}
}
func (s *TCPSession) TracePrintf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
}
}
func (s *TCPSession) TraceErrorf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
s.tr.SetError()
}
}
|
// encry | identifier_name |
session.go | package netutil
import (
"crypto/rc4"
"encoding/binary"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/zxfonline/misc/golangtrace"
"github.com/zxfonline/misc/chanutil"
"github.com/zxfonline/misc/expvar"
"github.com/zxfonline/misc/log"
"github.com/zxfonline/misc/timefix"
"github.com/zxfonline/misc/trace"
)
var _sessionID int64
const (
//消息报头字节数
HEAD_SIZE = 4
//消息包自增的序号
SEQ_ID_SIZE = 4
//消息号占用的字节数
MSG_ID_SIZE = 2
)
var (
ServerEndian = binary.LittleEndian
)
const (
SESS_KEYEXCG = 0x1 // 是否已经交换完毕KEY
SESS_ENCRYPT = 0x2 // 是否可以开始加密
)
type NetPacket struct {
MsgId uint16
Data []byte
Session *TCPSession
//收到该消息包的时间戳 毫秒
ReceiveTime time.Time
}
type NetConnIF interface {
SetReadDeadline(t time.Time) error
SetWriteDeadline(t time.Time) error
Close() error
SetWriteBuffer(bytes int) error
SetReadBuffer(bytes int) error
Write(b []byte) (n int, err error)
RemoteAddr() net.Addr
Read(p []byte) (n int, err error)
}
type TCPSession struct {
//Conn *net.TCPConn
Conn NetConnIF
IP net.IP
SendChan chan *NetPacket
ReadChan chan *NetPacket
//离线消息管道,用于外部接收连接断开的消息并处理后续
OffChan chan int64
// ID
SessionId int64
// 会话标记
Flag int32
// 发送缓冲
sendCache []byte
//发送缓冲大小
sendCacheSize uint32
readDelay time.Duration
sendDelay time.Duration
// Declares how many times we will try to resend message
MaxSendRetries int
//发送管道满后是否需要关闭连接
sendFullClose bool
CloseState chanutil.DoneChan
maxRecvSize uint32
// 包频率包数
rpmLimit uint32
// 包频率检测间隔
rpmInterval time.Duration
// 对收到的包进行计数,可避免重放攻击-REPLAY-ATTACK
PacketRcvSeq uint32
//数据包发送计数器
PacketSndSeq uint32
// 超过频率控制离线通知包
offLineMsg *NetPacket
OnLineTime int64
OffLineTime int64
EncodeKey []byte
DecodeKey []byte
tr golangtrace.Trace
sendLock sync.Mutex
}
//filter:true 过滤成功,抛弃该报文;false:过滤失败,继续执行该报文消息
func (s *TCPSession) HandleConn(filter func(*NetPacket) bool) {
go s.ReadLoop(filter)
go s.SendLoop()
}
//网络连接远程ip
func (s *TCPSession) RemoteAddr() net.Addr {
return s.Conn.RemoteAddr()
}
//网络连接远程ip
func (s *TCPSession) RemoteIp() net.IP {
//addr := s.Conn.RemoteAddr().String()
//host, _, err := net.SplitHostPort(addr)
//if err != nil {
// host = addr
//}
//return net.ParseIP(host).String()
return s.IP
}
func (s *TCPSession) Send(packet *NetPacket) bool {
if packet == nil {
return false
}
if !s.sendFullClose { //阻塞发送,直到管道关闭
select {
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
case <-s.CloseState:
return false
}
} else { //缓存管道满了会关闭连接
select {
case <-s.CloseState:
return false
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
default:
log.Errorf("session sender overflow,close session,waitChan:%d,msg:%d,session:%d,remote:%s", len(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
s.Close()
return false
}
}
}
// RC4加密解密
func (s *TCPSession) SetCipher(encodeKey, decodeKey []byte) error {
if len(encodeKey) < 1 || len(encodeKey) > 256 {
return rc4.KeySizeError(len(encodeKey))
}
if len(decodeKey) < 1 || len(decodeKey) > 256 {
return rc4.KeySizeError(len(decodeKey))
}
s.EncodeKey = encodeKey
s.DecodeKey = decodeKey
s.Flag |= SESS_KEYEXCG
return nil
}
func (s *TCPSession) ReadLoop(filter func(*NetPacket) bool) {
defer log.PrintPanicStack()
// 关闭发送
defer s.Close()
//var delayTimer *time.Timer
//if s.readDelay > 0 {
// delayTimer = time.NewTimer(s.readDelay)
//}
rpmStart := time.Now()
rpmCount := uint32(0)
//rpmMsgCount := 0
// 4字节包长度
header := make([]byte, HEAD_SIZE)
for {
// 读取超时
if s.readDelay > 0 {
s.Conn.SetReadDeadline(time.Now().Add(s.readDelay))
}
// 4字节包长度
n, err := io.ReadFull(s.Conn, header)
if err != nil {
//if err != io.EOF {
// log.Warnf("error receiving header,bytes:%d,session:%d,remote:%s,err:%v", n, s.SessionId, s.RemoteAddr(), err)
//}
return
}
// packet payload
size := ServerEndian.Uint32(header)
if size < SEQ_ID_SIZE+MSG_ID_SIZE || (s.maxRecvSize != 0 && size > s.maxRecvSize) {
log.Warnf("error receiving,size:%d,head:%+v,session:%d,remote:%s", size, header, s.SessionId, s.RemoteAddr())
return
}
payload := make([]byte, size)
n, err = io.ReadFull(s.Conn, payload)
if err != nil {
log.Warnf("error receiving body,bytes:%d,size:%d,session:%d,remote:%s,err:%v", n, size, s.SessionId, s.RemoteAddr(), err)
return
}
// 收包频率控制
if s.rpmLimit > 0 {
rpmCount++
// 达到限制包数
if rpmCount > s.rpmLimit {
now := time.Now()
// 检测时间间隔
if now.Sub(rpmStart) < s.rpmInterval {
// 提示操作太频繁三次后踢下线
//rpmMsgCount++
//if rpmMsgCount > 3 {
// 发送频率过高的消息包
s.DirectSendAndClose(s.offLineMsg)
log.Errorf("session rpm too high,%d/%s qps,session:%d,remote:%s", rpmCount, s.rpmInterval, s.SessionId, s.RemoteAddr())
return
//}
//// 发送频率过高的消息包
//s.Send(s.offLineMsg)
}
// 未超过限制
rpmCount = 0
rpmStart = now
}
}
s.PacketRcvSeq++
//fmt.Printf("a %x\n", payload)
// 解密
if s.Flag&SESS_ENCRYPT != 0 {
decoder, _ := rc4.NewCipher(s.DecodeKey)
decoder.XORKeyStream(payload, payload)
//fmt.Printf("b1 %x\n", payload)
//} else {
//fmt.Printf("b2 %x\n", payload)
}
// 读客户端数据包序列号(1,2,3...)
// 客户端发送的数据包必须包含一个自增的序号,必须严格递增
// 加密后,可避免重放攻击-REPLAY-ATTACK
seqId := ServerEndian.Uint32(payload[:SEQ_ID_SIZE])
if seqId != s.PacketRcvSeq {
log.Errorf("session illegal packet sequence id:%v should be:%v size:%v", seqId, s.PacketRcvSeq, len(payload))
return
}
msgId := ServerEndian.Uint16(payload[SEQ_ID_SIZE : SEQ_ID_SIZE+MSG_ID_SIZE])
pack := &NetPacket{MsgId: msgId, Data: payload[SEQ_ID_SIZE+MSG_ID_SIZE:], Session: s, ReceiveTime: time.Now()}
//if s.readDelay > 0 {
// if !delayTimer.Stop() {
// select {
// case <-delayTimer.C:
// default:
// }
// }
// delayTimer.Reset(s.readDelay)
// if filter == nil {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// } else {
// if ok := filter(pack); !ok {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// }
// }
//} else {
if filter == nil {
s.ReadChan <- pack
} else {
if ok := filter(pack); !ok {
s.ReadChan <- pack
}
}
//}
}
}
func (s *TCPSession) Close() {
s.CloseState.SetDone()
}
func (s *TCPSession) IsClosed() bool {
return s.CloseState.R().Done()
}
func (s *TCPSession) closeTask() {
s.OffLineTime = timefix.SecondTime()
if s.OffChan != nil {
s.OffChan <- s.SessionId
//// 告诉服务器该玩家掉线,或者下线
//t := time.NewTimer(15 * time.Second)
//select {
//case s.OffChan <- s.SessionId:
// t.Stop()
//case <-t.C:
// log.Warnf("off chan time out,session:%d,remote:%s", s.SessionId, s.RemoteAddr())
//}
}
// close connection
s.Conn.Close()
}
func (s *TCPSession) SendLoop() {
defer log.PrintPanicStack()
for {
select {
case <-s.CloseState:
s.closeTask()
return
case packet := <-s.SendChan:
s.DirectSend(packet)
}
}
}
func (s *TCPSession) DirectSendAndClose(packet *NetPacket) {
go func() {
defer s.Close()
s.DirectSend(packet)
time.Sleep(1 * time.Second)
}()
}
func (s *TCPSession) DirectSend(packet *NetPacket) bool {
if packet == nil {
return true
}
if s.IsClosed() {
return false
}
s.sendLock.Lock()
defer s.sendLock.Unlock()
s.PacketSndSeq++
packLen := uint32(SEQ_ID_SIZE + MSG_ID_SIZE + len(packet.Data))
totalSize := packLen + HEAD_SIZE
if totalSize > s.sendCacheSize {
s.sendCacheSize = totalSize + (totalSize >> 2) //1.25倍率
s.sendCache = make([]byte, s.sendCacheSize)
}
// 4字节包长度
ServerEndian.PutUint32(s.sendCache, packLen)
//4字节消息序列号
ServerEndian.PutUint32(s.sendCache[HEAD_SIZE:], s.PacketSndSeq)
// 2字节消息id
ServerEndian.PutUint16(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE:], packet.MsgId)
copy(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE+MSG_ID_SIZE:], packet.Data)
// encryption
// (NOT_ENCRYPTED) -> KEYEXCG -> ENCRYPT
if s.Flag&SESS_ENCRYPT != 0 { // encryption is enabled
encoder, _ := rc4.NewCipher(s.EncodeKey)
data := s.sendCache[HEAD_SIZE:totalSize]
encoder.XORKeyStream(data, data)
} else if s.Flag&SESS_KEYEXCG != 0 { // key is exchanged, encryption is not yet enabled
//s.Flag &^= SESS_KEYEXCG
s.Flag |= SESS_ENCRYPT
}
err := s.performSend(s.sendCache[:totalSize], 0)
if err != nil {
log.Debugf("error writing msg,session:%d,remote:%s,err:%v", s.SessionId, s.RemoteAddr(), err)
s.Close()
return false
}
return true
}
func (s *TCPSession) performSend(data []byte, sendRetries int) error {
// 写超时
if s.sendDelay > 0 {
s.Conn.SetWriteDeadline(time.Now().Add(s.sendDelay))
}
_, err := s.Conn.Write(data)
if err != nil {
return s.processSendError(err, data, sendRetries)
}
return nil
}
func (s *TCPSession) processSendError(err error, data []byte, sendRetries int) error {
netErr, ok := err.(net.Error)
if !ok {
return err
}
if s.isNeedToResendMessage(netErr, sendRetries) {
return s.performSend(data, sendRetries+1)
}
//if !netErr.Temporary() {
// //重连,让外部来重连吧
//}
return err
}
func (s *TCPSession) isNeedToResendMessage(err net.Error, sendRetries int) bool {
return (err.Temporary() || err.Timeout()) && sendRetries < s.MaxSendRetries
}
// 设置链接参数
func (s *TCPSession) SetParameter(readDelay, sendDelay time.Duration, maxRecvSize uint32, sendFullClose bool) {
s.maxRecvSize = maxRecvSize
if readDelay >= 0 {
s.readDelay = readDelay
}
if sendDelay >= 0 {
s.sendDelay = sendDelay
}
s.sendFullClose = sendFullClose
}
// 包频率控制参数
func (s *TCPSession) SetRpmParameter(rpmLimit uint32, rpmInterval time.Duration, msg *NetPacket) {
s.rpmLimit = rpmLimit
s.rpmInterval = rpmInterval
s.offLineMsg = msg
}
func NewSession(conn NetConnIF, readChan, sendChan chan *NetPacket, offChan chan int64) (*TCPSession, error) {
s := &TCPSession{
Conn: conn,
SendChan: sendChan,
ReadChan: readChan,
OffChan: offChan,
SessionId: atomic.AddInt64(&_sessionID, 1),
sendCache: make([]byte, 256),
sendCacheSize: 256,
readDelay: 30 * time.Second,
sendDelay: 10 * time.Second,
sendFullClose: true,
maxRecvSize: 10 * 1024,
OnLineTime: timefix.SecondTime(),
CloseState: chanutil | rr := net.SplitHostPort(conn.RemoteAddr().String())
if err != nil {
log.Error("cannot get remote address:", err)
return nil, err
}
s.IP = net.ParseIP(host)
//log.Debugf("new connection from:%v port:%v", host, port)
return s, nil
}
func (s *TCPSession) TraceStart(family, title string, expvar bool) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.New(family, title, expvar)
}
}
func (s *TCPSession) TraceStartWithStart(family, title string, expvar bool, startTime time.Time) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.NewWithStart(family, title, expvar, startTime)
}
}
func (s *TCPSession) TraceFinish(traceDefer func(*expvar.Map, int64)) {
if s.tr != nil {
tt := s.tr
tt.Finish()
if traceDefer != nil {
family := tt.GetFamily()
req := expvar.Get(family)
if req == nil {
func() {
defer func() {
if v := recover(); v != nil {
req = expvar.Get(family)
}
}()
req = expvar.NewMap(family)
}()
}
traceDefer(req.(*expvar.Map), tt.GetElapsedTime())
}
s.tr = nil
}
}
func (s *TCPSession) TracePrintf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
}
}
func (s *TCPSession) TraceErrorf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
s.tr.SetError()
}
}
| .NewDoneChan(),
}
host, _, e | conditional_block |
session.go | package netutil
import (
"crypto/rc4"
"encoding/binary"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/zxfonline/misc/golangtrace"
"github.com/zxfonline/misc/chanutil"
"github.com/zxfonline/misc/expvar"
"github.com/zxfonline/misc/log"
"github.com/zxfonline/misc/timefix"
"github.com/zxfonline/misc/trace"
)
var _sessionID int64
const (
//消息报头字节数
HEAD_SIZE = 4
//消息包自增的序号
SEQ_ID_SIZE = 4
//消息号占用的字节数
MSG_ID_SIZE = 2
)
var (
ServerEndian = binary.LittleEndian
)
const (
SESS_KEYEXCG = 0x1 // 是否已经交换完毕KEY
SESS_ENCRYPT = 0x2 // 是否可以开始加密
)
type NetPacket struct {
MsgId uint16
Data []byte
Session *TCPSession
//收到该消息包的时间戳 毫秒
ReceiveTime time.Time
}
type NetConnIF interface {
SetReadDeadline(t time.Time) error
SetWriteDeadline(t time.Time) error
Close() error
SetWriteBuffer(bytes int) error
SetReadBuffer(bytes int) error
Write(b []byte) (n int, err error)
RemoteAddr() net.Addr
Read(p []byte) (n int, err error)
}
type TCPSession struct {
//Conn *net.TCPConn
Conn NetConnIF
IP net.IP
SendChan chan *NetPacket
ReadChan chan *NetPacket
//离线消息管道,用于外部接收连接断开的消息并处理后续
OffChan chan int64
// ID
SessionId int64
// 会话标记
Flag int32
// 发送缓冲
sendCache []byte
//发送缓冲大小
sendCacheSize uint32
readDelay time.Duration
sendDelay time.Duration
// Declares how many times we will try to resend message
MaxSendRetries int
//发送管道满后是否需要关闭连接
sendFullClose bool
CloseState chanutil.DoneChan
maxRecvSize uint32
// 包频率包数
rpmLimit uint32
// 包频率检测间隔
rpmInterval time.Duration
// 对收到的包进行计数,可避免重放攻击-REPLAY-ATTACK
PacketRcvSeq uint32
//数据包发送计数器
PacketSndSeq uint32
// 超过频率控制离线通知包
offLineMsg *NetPacket
OnLineTime int64
OffLineTime int64
EncodeKey []byte
DecodeKey []byte
tr golangtrace.Trace
sendLock sync.Mutex
}
//filter:true 过滤成功,抛弃该报文;false:过滤失败,继续执行该报文消息
func (s *TCPSession) HandleConn(filter func(*NetPacket) bool) {
go s.ReadLoop(filter)
go s.SendLoop()
}
//网络连接远程ip
func (s *TCPSession) RemoteAddr() net.Addr {
return s.Conn.RemoteAddr()
}
//网络连接远程ip
func (s *TCPSession) RemoteIp() net.IP {
//addr := s.Conn.RemoteAddr().String()
//host, _, err := net.SplitHostPort(addr)
//if err != nil {
// host = addr
//}
//return net.ParseIP(host).String()
return s.IP
}
func (s *TCPSession) Send(packet *NetPacket) bool {
if packet == nil {
return false
}
if !s.sendFullClose { //阻塞发送,直到管道关闭
select {
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
case <-s.CloseState:
return false
}
} else { //缓存管道满了会关闭连接
select {
case <-s.CloseState:
return false
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
default:
log.Errorf("session sender overflow,close session,waitChan:%d,msg:%d,session:%d,remote:%s", len(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
s.Close()
return false
}
}
}
// RC4加密解密
func (s *TCPSession) SetCipher(encodeKey, decodeKey []byte) error {
if len(encodeKey) < 1 || len(encodeKey) > 256 {
return rc4.KeySizeError(len(encodeKey))
}
if len(decodeKey) < 1 || len(decodeKey) > 256 {
return rc4.KeySizeError(len(decodeKey))
}
s.EncodeKey = encodeKey
s.DecodeKey = decodeKey
s.Flag |= SESS_KEYEXCG
return nil
}
func (s *TCPSession) ReadLoop(filter func(*NetPacket) bool) {
defer log.PrintPanicStack()
// 关闭发送
defer s.Close()
//var delayTimer *time.Timer
//if s.readDelay > 0 {
// delayTimer = time.NewTimer(s.readDelay)
//}
rpmStart := time.Now()
rpmCount := uint32(0)
//rpmMsgCount := 0
// 4字节包长度
header := make([]byte, HEAD_SIZE)
for {
// 读取超时
if s.readDelay > 0 {
s.Conn.SetReadDeadline(time.Now().Add(s.readDelay))
}
// 4字节包长度
n, err := io.ReadFull(s.Conn, header)
if err != nil {
//if err != io.EOF {
// log.Warnf("error receiving header,bytes:%d,session:%d,remote:%s,err:%v", n, s.SessionId, s.RemoteAddr(), err)
//}
return
}
// packet payload
size := ServerEndian.Uint32(header)
if size < SEQ_ID_SIZE+MSG_ID_SIZE || (s.maxRecvSize != 0 && size > s.maxRecvSize) {
log.Warnf("error receiving,size:%d,head:%+v,session:%d,remote:%s", size, header, s.SessionId, s.RemoteAddr())
return
}
payload := make([]byte, size)
n, err = io.ReadFull(s.Conn, payload)
if err != nil {
log.Warnf("error receiving body,bytes:%d,size:%d,session:%d,remote:%s,err:%v", n, size, s.SessionId, s.RemoteAddr(), err)
return
}
// 收包频率控制
if s.rpmLimit > 0 {
rpmCount++
// 达到限制包数
if rpmCount > s.rpmLimit {
now := time.Now()
// 检测时间间隔
if now.Sub(rpmStart) < s.rpmInterval {
// 提示操作太频繁三次后踢下线
//rpmMsgCount++
//if rpmMsgCount > 3 {
// 发送频率过高的消息包
s.DirectSendAndClose(s.offLineMsg)
log.Errorf("session rpm too high,%d/%s qps,session:%d,remote:%s", rpmCount, s.rpmInterval, s.SessionId, s.RemoteAddr())
return
//}
//// 发送频率过高的消息包
//s.Send(s.offLineMsg)
}
// 未超过限制
rpmCount = 0
rpmStart = now
}
}
s.PacketRcvSeq++
//fmt.Printf("a %x\n", payload)
// 解密
if s.Flag&SESS_ENCRYPT != 0 {
decoder, _ := rc4.NewCipher(s.DecodeKey)
decoder.XORKeyStream(payload, payload)
//fmt.Printf("b1 %x\n", payload)
//} else {
//fmt.Printf("b2 %x\n", payload)
}
// 读客户端数据包序列号(1,2,3...)
// 客户端发送的数据包必须包含一个自增的序号,必须严格递增
// 加密后,可避免重放攻击-REPLAY-ATTACK
seqId := ServerEndian.Uint32(payload[:SEQ_ID_SIZE])
if seqId != s.PacketRcvSeq {
log.Errorf("session illegal packet sequence id:%v should be:%v size:%v", seqId, s.PacketRcvSeq, len(payload))
return
}
msgId := ServerEndian.Uint16(payload[SEQ_ID_SIZE : SEQ_ID_SIZE+MSG_ID_SIZE])
pack := &NetPacket{MsgId: msgId, Data: payload[SEQ_ID_SIZE+MSG_ID_SIZE:], Session: s, ReceiveTime: time.Now()}
//if s.readDelay > 0 {
// if !delayTimer.Stop() {
// select {
// case <-delayTimer.C:
// default:
// }
// }
// delayTimer.Reset(s.readDelay)
// if filter == nil {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// } else {
// if ok := filter(pack); !ok {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// }
// }
//} else {
if filter == nil {
s.ReadChan <- pack
} else {
if ok := filter(pack); !ok {
s.ReadChan <- pack
}
}
//}
}
}
func (s *TCPSession) Close() {
s.CloseState.SetDone()
}
func (s *TCPSession) IsClosed() bool {
return s.CloseState.R().Done()
}
func (s *TCPSession) closeTask() {
s.OffLineTime = timefix.SecondTime()
if s.OffChan != nil {
s.OffChan <- s.SessionId
//// 告诉服务器该玩家掉线,或者下线
//t := time.NewTimer(15 * time.Second)
//select {
//case s.OffChan <- s.SessionId:
// t.Stop()
//case <-t.C:
// log.Warnf("off chan time out,session:%d,remote:%s", s.SessionId, s.RemoteAddr())
//}
}
// close connection
s.Conn.Close()
}
func (s *TCPSession) SendLoop() {
defer log.PrintPanicStack()
for {
select {
case <-s.CloseState:
s.closeTask()
return
case packet := <-s.SendChan:
s.DirectSend(packet)
}
}
}
func (s *TCPSession) DirectSendAndClose(packet *NetPacket) {
go func() {
defer s.Close()
s.DirectSend(packet)
time.Sleep(1 * time.Second)
}()
}
func (s *TCPSession) DirectSend(packet *NetPacket) bool {
if packet == nil {
return true
}
if s.IsClosed() {
return false
}
s.sendLock.Lock()
defer s.sendLock.Unlock()
s.PacketSndSeq++
packLen := uint32(SEQ_ID_SIZE + MSG_ID_SIZE + len(packet.Data))
totalSize := packLen + HEAD_SIZE
if totalSize > s.sendCacheSize {
s.sendCacheSize = totalSize + (totalSize >> 2) //1.25倍率
s.sendCache = make([]byte, s.sendCacheSize)
}
// 4字节包长度
ServerEndian.PutUint32(s.sendCache, packLen)
//4字节消息序列号
ServerEndian.PutUint32(s.sendCache[HEAD_SIZE:], s.PacketSndSeq)
// 2字节消息id
ServerEndian.PutUint16(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE:], packet.MsgId)
copy(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE+MSG_ID_SIZE:], packet.Data)
// encryption
// (NOT_ENCRYPTED) -> KEYEXCG -> ENCRYPT
if s.Flag&SESS_ENCRYPT != 0 { // encryption is enabled
encoder, _ := rc4.NewCipher(s.EncodeKey)
data := s.sendCache[HEAD_SIZE:totalSize]
encoder.XORKeyStream(data, data)
} else if s.Flag&SESS_KEYEXCG != 0 { // key is exchanged, encryption is not yet enabled
//s.Flag &^= SESS_KEYEXCG
s.Flag |= SESS_ENCRYPT
}
err := s.performSend(s.sendCache[:totalSize], 0)
if err != nil {
log.Debugf("error writing msg,session:%d,remote:%s,err:%v", s.SessionId, s.RemoteAddr(), err)
s.Close()
return false
}
return true
}
func (s *TCPSession) performSend(data []byte, sendRetries int) error {
// 写超时
if s.sendDelay > 0 {
s.Conn.SetWriteDeadline(time.Now().Add(s.sendDelay))
}
_, err := s.Conn.Write(data)
if err != nil {
return s.processSendError(err, data, sendRetries)
}
return nil
}
func (s *TCPSession) processSendError(err error, data []byte, sendRetries int) error {
netErr, ok := err.(net.Error)
if !ok {
return err
}
if s.isNeedToResendMessage(netErr, sendRetries) {
return s.performSend(data, sendRetries+1)
}
//if !netErr.Temporary() {
// //重连,让外部来重连吧
//}
return err
}
func (s *TCPSession) isNeedToResendMessage(err net.Error, sendRetries int) bool {
return (err.Temporary() || err.Timeout()) && sendRetries < s.MaxSendRetries
}
// 设置链接参数
func (s *TCPSession) SetParameter(readDelay, sendDelay time.Duration, maxRecvSize uint32, sendFullClose bool) {
s.maxRecvSize = maxRecvSize
if readDelay >= 0 {
s.readDelay = readDelay
}
if sendDelay >= 0 {
s.sendDelay = sendDelay
}
s.sendFullClose = sendFullClose
}
// 包频率控制参数
func (s *TCPSession) SetRpmParameter(rpmLimit uint32, rpmInterval time.Duration, msg *NetPacket) {
s.rpmLimit = rpmLimit
s.rpmInterval = rpmInterval
s.offLineMsg = msg
}
func NewSession(conn NetConnIF, readChan, sendChan chan *NetPacket, offChan chan int64) (*TCPSession, error) {
s := &TCPSession{
Conn: conn,
SendChan: sendChan,
ReadChan: readChan,
OffChan: offChan,
SessionId: atomic.AddInt64(&_sessionID, 1),
sendCache: make([]byte, 256),
sendCacheSize: 256,
readDelay: 30 * time.Second,
sendDelay: 10 * time.Second,
sendFullClose: true,
maxRecvSize: 10 * 1024,
OnLineTime: timefix.SecondTime(),
CloseState: chanutil.NewDoneChan(),
}
host, _, err := net.SplitHostPort(conn.RemoteAddr().String())
if err != nil {
log.Error("cannot get remote address:", err)
return nil, err
}
s.IP = net.ParseIP(host)
//log.Debugf("new connection from:%v port:%v", host, port)
return s, nil
}
func (s *TCPSession) TraceStart(family, title string, expvar bool) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.New(family, title, expvar)
}
}
func (s *TCPSession) TraceStartWithStart(family, title string, expvar bool, startTime time.Time) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.NewWithStart(family, title, expvar, startTime)
}
}
func (s *TCPSession) TraceFinish(traceDefer func(*expvar.Map, int64)) {
if s.tr != nil {
tt := s.tr
tt.Finish()
if traceDefer != nil {
family := tt.GetFamily()
req := expvar.Get(family)
if req == nil {
func() {
defer func() {
if v := recover(); v != nil {
req = expvar.Get(family)
}
}()
req = expvar.NewMap(family)
}()
}
traceDefer(req.(*expvar.Map), tt.GetElapsedTime())
}
s.tr = nil
}
}
func (s *TCPSession) TracePrintf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
}
}
func (s *TCPSession) TraceErrorf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
s.tr.SetError()
}
}
| identifier_body |
||
session.go | package netutil
import (
"crypto/rc4"
"encoding/binary"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/zxfonline/misc/golangtrace"
"github.com/zxfonline/misc/chanutil"
"github.com/zxfonline/misc/expvar"
"github.com/zxfonline/misc/log"
"github.com/zxfonline/misc/timefix"
"github.com/zxfonline/misc/trace"
)
var _sessionID int64
const (
//消息报头字节数
HEAD_SIZE = 4
//消息包自增的序号
SEQ_ID_SIZE = 4
//消息号占用的字节数
MSG_ID_SIZE = 2
)
var (
ServerEndian = binary.LittleEndian
)
const (
SESS_KEYEXCG = 0x1 // 是否已经交换完毕KEY
SESS_ENCRYPT = 0x2 // 是否可以开始加密
)
type NetPacket struct {
MsgId uint16
Data []byte
Session *TCPSession
//收到该消息包的时间戳 毫秒 | }
type NetConnIF interface {
SetReadDeadline(t time.Time) error
SetWriteDeadline(t time.Time) error
Close() error
SetWriteBuffer(bytes int) error
SetReadBuffer(bytes int) error
Write(b []byte) (n int, err error)
RemoteAddr() net.Addr
Read(p []byte) (n int, err error)
}
type TCPSession struct {
//Conn *net.TCPConn
Conn NetConnIF
IP net.IP
SendChan chan *NetPacket
ReadChan chan *NetPacket
//离线消息管道,用于外部接收连接断开的消息并处理后续
OffChan chan int64
// ID
SessionId int64
// 会话标记
Flag int32
// 发送缓冲
sendCache []byte
//发送缓冲大小
sendCacheSize uint32
readDelay time.Duration
sendDelay time.Duration
// Declares how many times we will try to resend message
MaxSendRetries int
//发送管道满后是否需要关闭连接
sendFullClose bool
CloseState chanutil.DoneChan
maxRecvSize uint32
// 包频率包数
rpmLimit uint32
// 包频率检测间隔
rpmInterval time.Duration
// 对收到的包进行计数,可避免重放攻击-REPLAY-ATTACK
PacketRcvSeq uint32
//数据包发送计数器
PacketSndSeq uint32
// 超过频率控制离线通知包
offLineMsg *NetPacket
OnLineTime int64
OffLineTime int64
EncodeKey []byte
DecodeKey []byte
tr golangtrace.Trace
sendLock sync.Mutex
}
//filter:true 过滤成功,抛弃该报文;false:过滤失败,继续执行该报文消息
func (s *TCPSession) HandleConn(filter func(*NetPacket) bool) {
go s.ReadLoop(filter)
go s.SendLoop()
}
//网络连接远程ip
func (s *TCPSession) RemoteAddr() net.Addr {
return s.Conn.RemoteAddr()
}
//网络连接远程ip
func (s *TCPSession) RemoteIp() net.IP {
//addr := s.Conn.RemoteAddr().String()
//host, _, err := net.SplitHostPort(addr)
//if err != nil {
// host = addr
//}
//return net.ParseIP(host).String()
return s.IP
}
func (s *TCPSession) Send(packet *NetPacket) bool {
if packet == nil {
return false
}
if !s.sendFullClose { //阻塞发送,直到管道关闭
select {
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
case <-s.CloseState:
return false
}
} else { //缓存管道满了会关闭连接
select {
case <-s.CloseState:
return false
case s.SendChan <- packet:
if wait := len(s.SendChan); wait > cap(s.SendChan)/10*5 && wait%20 == 0 {
log.Warnf("session send process,waitChan:%d/%d,msg:%d,session:%d,remote:%s", wait, cap(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
}
return true
default:
log.Errorf("session sender overflow,close session,waitChan:%d,msg:%d,session:%d,remote:%s", len(s.SendChan), packet.MsgId, s.SessionId, s.RemoteAddr())
s.Close()
return false
}
}
}
// RC4加密解密
func (s *TCPSession) SetCipher(encodeKey, decodeKey []byte) error {
if len(encodeKey) < 1 || len(encodeKey) > 256 {
return rc4.KeySizeError(len(encodeKey))
}
if len(decodeKey) < 1 || len(decodeKey) > 256 {
return rc4.KeySizeError(len(decodeKey))
}
s.EncodeKey = encodeKey
s.DecodeKey = decodeKey
s.Flag |= SESS_KEYEXCG
return nil
}
func (s *TCPSession) ReadLoop(filter func(*NetPacket) bool) {
defer log.PrintPanicStack()
// 关闭发送
defer s.Close()
//var delayTimer *time.Timer
//if s.readDelay > 0 {
// delayTimer = time.NewTimer(s.readDelay)
//}
rpmStart := time.Now()
rpmCount := uint32(0)
//rpmMsgCount := 0
// 4字节包长度
header := make([]byte, HEAD_SIZE)
for {
// 读取超时
if s.readDelay > 0 {
s.Conn.SetReadDeadline(time.Now().Add(s.readDelay))
}
// 4字节包长度
n, err := io.ReadFull(s.Conn, header)
if err != nil {
//if err != io.EOF {
// log.Warnf("error receiving header,bytes:%d,session:%d,remote:%s,err:%v", n, s.SessionId, s.RemoteAddr(), err)
//}
return
}
// packet payload
size := ServerEndian.Uint32(header)
if size < SEQ_ID_SIZE+MSG_ID_SIZE || (s.maxRecvSize != 0 && size > s.maxRecvSize) {
log.Warnf("error receiving,size:%d,head:%+v,session:%d,remote:%s", size, header, s.SessionId, s.RemoteAddr())
return
}
payload := make([]byte, size)
n, err = io.ReadFull(s.Conn, payload)
if err != nil {
log.Warnf("error receiving body,bytes:%d,size:%d,session:%d,remote:%s,err:%v", n, size, s.SessionId, s.RemoteAddr(), err)
return
}
// 收包频率控制
if s.rpmLimit > 0 {
rpmCount++
// 达到限制包数
if rpmCount > s.rpmLimit {
now := time.Now()
// 检测时间间隔
if now.Sub(rpmStart) < s.rpmInterval {
// 提示操作太频繁三次后踢下线
//rpmMsgCount++
//if rpmMsgCount > 3 {
// 发送频率过高的消息包
s.DirectSendAndClose(s.offLineMsg)
log.Errorf("session rpm too high,%d/%s qps,session:%d,remote:%s", rpmCount, s.rpmInterval, s.SessionId, s.RemoteAddr())
return
//}
//// 发送频率过高的消息包
//s.Send(s.offLineMsg)
}
// 未超过限制
rpmCount = 0
rpmStart = now
}
}
s.PacketRcvSeq++
//fmt.Printf("a %x\n", payload)
// 解密
if s.Flag&SESS_ENCRYPT != 0 {
decoder, _ := rc4.NewCipher(s.DecodeKey)
decoder.XORKeyStream(payload, payload)
//fmt.Printf("b1 %x\n", payload)
//} else {
//fmt.Printf("b2 %x\n", payload)
}
// 读客户端数据包序列号(1,2,3...)
// 客户端发送的数据包必须包含一个自增的序号,必须严格递增
// 加密后,可避免重放攻击-REPLAY-ATTACK
seqId := ServerEndian.Uint32(payload[:SEQ_ID_SIZE])
if seqId != s.PacketRcvSeq {
log.Errorf("session illegal packet sequence id:%v should be:%v size:%v", seqId, s.PacketRcvSeq, len(payload))
return
}
msgId := ServerEndian.Uint16(payload[SEQ_ID_SIZE : SEQ_ID_SIZE+MSG_ID_SIZE])
pack := &NetPacket{MsgId: msgId, Data: payload[SEQ_ID_SIZE+MSG_ID_SIZE:], Session: s, ReceiveTime: time.Now()}
//if s.readDelay > 0 {
// if !delayTimer.Stop() {
// select {
// case <-delayTimer.C:
// default:
// }
// }
// delayTimer.Reset(s.readDelay)
// if filter == nil {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// } else {
// if ok := filter(pack); !ok {
// select {
// case s.ReadChan <- pack:
// case <-delayTimer.C:
// log.Warnf("session read busy or closed,waitChan:%d,session:%d,remote:%s", len(s.ReadChan), s.SessionId, s.RemoteAddr())
// return
// }
// }
// }
//} else {
if filter == nil {
s.ReadChan <- pack
} else {
if ok := filter(pack); !ok {
s.ReadChan <- pack
}
}
//}
}
}
func (s *TCPSession) Close() {
s.CloseState.SetDone()
}
func (s *TCPSession) IsClosed() bool {
return s.CloseState.R().Done()
}
func (s *TCPSession) closeTask() {
s.OffLineTime = timefix.SecondTime()
if s.OffChan != nil {
s.OffChan <- s.SessionId
//// 告诉服务器该玩家掉线,或者下线
//t := time.NewTimer(15 * time.Second)
//select {
//case s.OffChan <- s.SessionId:
// t.Stop()
//case <-t.C:
// log.Warnf("off chan time out,session:%d,remote:%s", s.SessionId, s.RemoteAddr())
//}
}
// close connection
s.Conn.Close()
}
func (s *TCPSession) SendLoop() {
defer log.PrintPanicStack()
for {
select {
case <-s.CloseState:
s.closeTask()
return
case packet := <-s.SendChan:
s.DirectSend(packet)
}
}
}
func (s *TCPSession) DirectSendAndClose(packet *NetPacket) {
go func() {
defer s.Close()
s.DirectSend(packet)
time.Sleep(1 * time.Second)
}()
}
func (s *TCPSession) DirectSend(packet *NetPacket) bool {
if packet == nil {
return true
}
if s.IsClosed() {
return false
}
s.sendLock.Lock()
defer s.sendLock.Unlock()
s.PacketSndSeq++
packLen := uint32(SEQ_ID_SIZE + MSG_ID_SIZE + len(packet.Data))
totalSize := packLen + HEAD_SIZE
if totalSize > s.sendCacheSize {
s.sendCacheSize = totalSize + (totalSize >> 2) //1.25倍率
s.sendCache = make([]byte, s.sendCacheSize)
}
// 4字节包长度
ServerEndian.PutUint32(s.sendCache, packLen)
//4字节消息序列号
ServerEndian.PutUint32(s.sendCache[HEAD_SIZE:], s.PacketSndSeq)
// 2字节消息id
ServerEndian.PutUint16(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE:], packet.MsgId)
copy(s.sendCache[HEAD_SIZE+SEQ_ID_SIZE+MSG_ID_SIZE:], packet.Data)
// encryption
// (NOT_ENCRYPTED) -> KEYEXCG -> ENCRYPT
if s.Flag&SESS_ENCRYPT != 0 { // encryption is enabled
encoder, _ := rc4.NewCipher(s.EncodeKey)
data := s.sendCache[HEAD_SIZE:totalSize]
encoder.XORKeyStream(data, data)
} else if s.Flag&SESS_KEYEXCG != 0 { // key is exchanged, encryption is not yet enabled
//s.Flag &^= SESS_KEYEXCG
s.Flag |= SESS_ENCRYPT
}
err := s.performSend(s.sendCache[:totalSize], 0)
if err != nil {
log.Debugf("error writing msg,session:%d,remote:%s,err:%v", s.SessionId, s.RemoteAddr(), err)
s.Close()
return false
}
return true
}
func (s *TCPSession) performSend(data []byte, sendRetries int) error {
// 写超时
if s.sendDelay > 0 {
s.Conn.SetWriteDeadline(time.Now().Add(s.sendDelay))
}
_, err := s.Conn.Write(data)
if err != nil {
return s.processSendError(err, data, sendRetries)
}
return nil
}
func (s *TCPSession) processSendError(err error, data []byte, sendRetries int) error {
netErr, ok := err.(net.Error)
if !ok {
return err
}
if s.isNeedToResendMessage(netErr, sendRetries) {
return s.performSend(data, sendRetries+1)
}
//if !netErr.Temporary() {
// //重连,让外部来重连吧
//}
return err
}
func (s *TCPSession) isNeedToResendMessage(err net.Error, sendRetries int) bool {
return (err.Temporary() || err.Timeout()) && sendRetries < s.MaxSendRetries
}
// 设置链接参数
func (s *TCPSession) SetParameter(readDelay, sendDelay time.Duration, maxRecvSize uint32, sendFullClose bool) {
s.maxRecvSize = maxRecvSize
if readDelay >= 0 {
s.readDelay = readDelay
}
if sendDelay >= 0 {
s.sendDelay = sendDelay
}
s.sendFullClose = sendFullClose
}
// 包频率控制参数
func (s *TCPSession) SetRpmParameter(rpmLimit uint32, rpmInterval time.Duration, msg *NetPacket) {
s.rpmLimit = rpmLimit
s.rpmInterval = rpmInterval
s.offLineMsg = msg
}
func NewSession(conn NetConnIF, readChan, sendChan chan *NetPacket, offChan chan int64) (*TCPSession, error) {
s := &TCPSession{
Conn: conn,
SendChan: sendChan,
ReadChan: readChan,
OffChan: offChan,
SessionId: atomic.AddInt64(&_sessionID, 1),
sendCache: make([]byte, 256),
sendCacheSize: 256,
readDelay: 30 * time.Second,
sendDelay: 10 * time.Second,
sendFullClose: true,
maxRecvSize: 10 * 1024,
OnLineTime: timefix.SecondTime(),
CloseState: chanutil.NewDoneChan(),
}
host, _, err := net.SplitHostPort(conn.RemoteAddr().String())
if err != nil {
log.Error("cannot get remote address:", err)
return nil, err
}
s.IP = net.ParseIP(host)
//log.Debugf("new connection from:%v port:%v", host, port)
return s, nil
}
func (s *TCPSession) TraceStart(family, title string, expvar bool) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.New(family, title, expvar)
}
}
func (s *TCPSession) TraceStartWithStart(family, title string, expvar bool, startTime time.Time) {
if trace.EnableTracing {
s.TraceFinish(nil)
s.tr = golangtrace.NewWithStart(family, title, expvar, startTime)
}
}
func (s *TCPSession) TraceFinish(traceDefer func(*expvar.Map, int64)) {
if s.tr != nil {
tt := s.tr
tt.Finish()
if traceDefer != nil {
family := tt.GetFamily()
req := expvar.Get(family)
if req == nil {
func() {
defer func() {
if v := recover(); v != nil {
req = expvar.Get(family)
}
}()
req = expvar.NewMap(family)
}()
}
traceDefer(req.(*expvar.Map), tt.GetElapsedTime())
}
s.tr = nil
}
}
func (s *TCPSession) TracePrintf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
}
}
func (s *TCPSession) TraceErrorf(format string, a ...interface{}) {
if s.tr != nil {
s.tr.LazyPrintf(format, a...)
s.tr.SetError()
}
} | ReceiveTime time.Time | random_line_split |
batcher.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package requestbatcher is a library to enable easy batching of roachpb
// requests.
//
// Batching in general represents a tradeoff between throughput and latency. The
// underlying assumption being that batched operations are cheaper than an
// individual operation. If this is not the case for your workload, don't use
// this library.
//
// Batching assumes that data with the same key can be sent in a single batch.
// The initial implementation uses rangeID as the key explicitly to avoid
// creating an overly general solution without motivation but interested readers
// should recognize that it would be easy to extend this package to accept an
// arbitrary comparable key.
package requestbatcher
import (
"container/heap"
"context"
"sync"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// The motivating use case for this package are opportunities to perform cleanup
// operations in a single raft transaction rather than several. Three main
// opportunities are known:
//
// 1) Intent resolution
// 2) Txn heartbeating
// 3) Txn record garbage collection
//
// The first two have a relatively tight time bound expectations. In other words
// it would be surprising and negative for a client if operations were not sent
// soon after they were queued. The transaction record GC workload can be rather
// asynchronous. This motivates the need for some knobs to control the maximum
// acceptable amount of time to buffer operations before sending.
// Another wrinkle is dealing with the different ways in which sending a batch
// may fail. A batch may fail in an ambiguous way (RPC/network errors), it may
// fail completely (which is likely indistinguishable from the ambiguous
// failure) and lastly it may fail partially. Today's Sender contract is fairly
// ambiguous about the contract between BatchResponse inner responses and errors
// returned from a batch request.
// TODO(ajwerner): Do we need to consider ordering dependencies between
// operations? For the initial motivating use cases for this library there are
// no data dependencies between operations and the only key will be the guess
// for where an operation should go.
// TODO(ajwerner): Consider a more sophisticated mechanism to limit on maximum
// number of requests in flight at a time. This may ultimately lead to a need
// for queuing. Furthermore consider using batch time to dynamically tune the
// amount of time we wait.
// TODO(ajwerner): Consider filtering requests which might have been canceled
// before sending a batch.
// TODO(ajwerner): Consider more dynamic policies with regards to deadlines.
// Perhaps we want to wait no more than some percentile of the duration of
// historical operations and stay idle only some other percentile. For example
// imagine if the max delay was the 50th and the max idle was the 10th? This
// has a problem when much of the prior workload was say local operations and
// happened very rapidly. Perhaps we need to provide some bounding envelope?
// TODO(ajwerner): Consider a more general purpose interface for this package.
// While several interface-oriented interfaces have been explored they all felt
// heavy and allocation intensive.
// TODO(ajwerner): Consider providing an interface which enables a single
// goroutine to dispatch a number of requests destined for different ranges to
// the RequestBatcher which may then wait for completion of all of the requests.
// What is the right contract for error handling? Imagine a situation where a
// client has dispatched N requests and one has been sent and returns with an
// error while others are queued. What should happen? Should the client receive
// the error rapidly? Should the other requests be sent at all? Should they be
// filtered before sending?
// Config contains the dependencies and configuration for a Batcher.
type Config struct {
// Name of the batcher, used for logging, timeout errors, and the stopper.
Name string
// Sender can round-trip a batch. Sender must not be nil.
Sender kv.Sender
// Stopper controls the lifecycle of the Batcher. Stopper must not be nil.
Stopper *stop.Stopper
// MaxSizePerBatch is the maximum number of bytes in individual requests in a
// batch. If MaxSizePerBatch <= 0 then no limit is enforced.
MaxSizePerBatch int
// MaxMsgsPerBatch is the maximum number of messages.
// If MaxMsgsPerBatch <= 0 then no limit is enforced.
MaxMsgsPerBatch int
// MaxKeysPerBatchReq is the maximum number of keys that each batch is
// allowed to touch during one of its requests. If the limit is exceeded,
// the batch is paginated over a series of individual requests. This limit
// corresponds to the MaxSpanRequestKeys assigned to the Header of each
// request. If MaxKeysPerBatchReq <= 0 then no limit is enforced.
MaxKeysPerBatchReq int
// MaxWait is the maximum amount of time a message should wait in a batch
// before being sent. If MaxWait is <= 0 then no wait timeout is enforced.
// It is inadvisable to disable both MaxIdle and MaxWait.
MaxWait time.Duration
// MaxIdle is the amount of time a batch should wait between message additions
// before being sent. The idle timer allows clients to observe low latencies
// when throughput is low. If MaxWait is <= 0 then no wait timeout is
// enforced. It is inadvisable to disable both MaxIdle and MaxWait.
MaxIdle time.Duration
// InFlightBackpressureLimit is the number of batches in flight above which
// sending clients should experience backpressure. If the batcher has more
// requests than this in flight it will not accept new requests until the
// number of in flight batches is again below this threshold. This value does
// not limit the number of batches which may ultimately be in flight as
// batches which are queued to send but not yet in flight will still send.
// Note that values less than or equal to zero will result in the use of
// DefaultInFlightBackpressureLimit.
InFlightBackpressureLimit int
// NowFunc is used to determine the current time. It defaults to timeutil.Now.
NowFunc func() time.Time
}
const (
// DefaultInFlightBackpressureLimit is the InFlightBackpressureLimit used if
// a zero value for that setting is passed in a Config to New.
// TODO(ajwerner): Justify this number.
DefaultInFlightBackpressureLimit = 1000
// BackpressureRecoveryFraction is the fraction of InFlightBackpressureLimit
// used to detect when enough in flight requests have completed such that more
// requests should now be accepted. A value less than 1 is chosen in order to
// avoid thrashing on backpressure which might ultimately defeat the purpose
// of the RequestBatcher.
backpressureRecoveryFraction = .8
)
func backpressureRecoveryThreshold(limit int) int {
if l := int(float64(limit) * backpressureRecoveryFraction); l > 0 {
return l
}
return 1 // don't allow the recovery threshold to be 0
}
// RequestBatcher batches requests destined for a single range based on
// a configured batching policy.
type RequestBatcher struct {
pool pool
cfg Config
// sendBatchOpName is the string passed to contextutil.RunWithTimeout when
// sending a batch.
sendBatchOpName string
batches batchQueue
requestChan chan *request
sendDoneChan chan struct{}
}
// Response is exported for use with the channel-oriented SendWithChan method.
// At least one of Resp or Err will be populated for every sent Response.
type Response struct {
Resp roachpb.Response
Err error
}
// New creates a new RequestBatcher.
func New(cfg Config) *RequestBatcher {
validateConfig(&cfg)
b := &RequestBatcher{
cfg: cfg,
pool: makePool(),
batches: makeBatchQueue(),
requestChan: make(chan *request),
sendDoneChan: make(chan struct{}),
}
b.sendBatchOpName = b.cfg.Name + ".sendBatch"
if err := cfg.Stopper.RunAsyncTask(context.Background(), b.cfg.Name, b.run); err != nil {
panic(err)
}
return b
}
func validateConfig(cfg *Config) {
if cfg.Stopper == nil {
panic("cannot construct a Batcher with a nil Stopper")
} else if cfg.Sender == nil {
panic("cannot construct a Batcher with a nil Sender")
}
if cfg.InFlightBackpressureLimit <= 0 {
cfg.InFlightBackpressureLimit = DefaultInFlightBackpressureLimit
}
if cfg.NowFunc == nil {
cfg.NowFunc = timeutil.Now
}
}
// SendWithChan sends a request with a client provided response channel. The
// client is responsible for ensuring that the passed respChan has a buffer at
// least as large as the number of responses it expects to receive. Using an
// insufficiently buffered channel can lead to deadlocks and unintended delays
// processing requests inside the RequestBatcher.
func (b *RequestBatcher) SendWithChan(
ctx context.Context, respChan chan<- Response, rangeID roachpb.RangeID, req roachpb.Request,
) error {
select {
case b.requestChan <- b.pool.newRequest(ctx, rangeID, req, respChan):
return nil
case <-b.cfg.Stopper.ShouldQuiesce():
return stop.ErrUnavailable
case <-ctx.Done():
return ctx.Err()
}
}
// Send sends req as a part of a batch. An error is returned if the context
// is canceled before the sending of the request completes. The context with
// the latest deadline for a batch is used to send the underlying batch request.
func (b *RequestBatcher) Send(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request,
) (roachpb.Response, error) {
responseChan := b.pool.getResponseChan()
if err := b.SendWithChan(ctx, responseChan, rangeID, req); err != nil {
return nil, err
}
select {
case resp := <-responseChan:
// It's only safe to put responseChan back in the pool if it has been
// received from.
b.pool.putResponseChan(responseChan)
return resp.Resp, resp.Err
case <-b.cfg.Stopper.ShouldQuiesce():
return nil, stop.ErrUnavailable
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *RequestBatcher) sendDone(ba *batch) {
b.pool.putBatch(ba)
select {
case b.sendDoneChan <- struct{}{}:
case <-b.cfg.Stopper.ShouldQuiesce():
}
}
func (b *RequestBatcher) sendBatch(ctx context.Context, ba *batch) {
if err := b.cfg.Stopper.RunAsyncTask(ctx, "send-batch", func(ctx context.Context) {
defer b.sendDone(ba)
var br *roachpb.BatchResponse
send := func(ctx context.Context) error {
var pErr *roachpb.Error
if br, pErr = b.cfg.Sender.Send(ctx, ba.batchRequest(&b.cfg)); pErr != nil {
return pErr.GoError()
}
return nil
}
if !ba.sendDeadline.IsZero() {
actualSend := send
send = func(context.Context) error {
return contextutil.RunWithTimeout(
ctx, b.sendBatchOpName, timeutil.Until(ba.sendDeadline), actualSend)
}
}
// Send requests in a loop to support pagination, which may be necessary
// if MaxKeysPerBatchReq is set. If so, partial responses with resume
// spans may be returned for requests, indicating that the limit was hit
// before they could complete and that they should be resumed over the
// specified key span. Requests in the batch are neither guaranteed to
// be ordered nor guaranteed to be non-overlapping, so we can make no
// assumptions about the requests that will result in full responses
// (with no resume spans) vs. partial responses vs. empty responses (see
// the comment on roachpb.Header.MaxSpanRequestKeys).
//
// To accommodate this, we keep track of all partial responses from
// previous iterations. After receiving a batch of responses during an
// iteration, the responses are each combined with the previous response
// for their corresponding requests. From there, responses that have no
// resume spans are removed. Responses that have resume spans are
// updated appropriately and sent again in the next iteration. The loop
// proceeds until all requests have been run to completion.
var prevResps []roachpb.Response
for len(ba.reqs) > 0 {
err := send(ctx)
nextReqs, nextPrevResps := ba.reqs[:0], prevResps[:0]
for i, r := range ba.reqs {
var res Response
if br != nil {
resp := br.Responses[i].GetInner()
if prevResps != nil {
prevResp := prevResps[i]
if cErr := roachpb.CombineResponses(prevResp, resp); cErr != nil {
log.Fatalf(ctx, "%v", cErr)
}
resp = prevResp
}
if resume := resp.Header().ResumeSpan; resume != nil {
// Add a trimmed request to the next batch.
h := r.req.Header()
h.SetSpan(*resume)
r.req = r.req.ShallowCopy()
r.req.SetHeader(h)
nextReqs = append(nextReqs, r)
// Strip resume span from previous response and record.
prevH := resp.Header()
prevH.ResumeSpan = nil
prevResp := resp
prevResp.SetHeader(prevH)
nextPrevResps = append(nextPrevResps, prevResp)
continue
}
res.Resp = resp
}
if err != nil {
res.Err = err
}
b.sendResponse(r, res)
}
ba.reqs, prevResps = nextReqs, nextPrevResps
}
}); err != nil {
b.sendDone(ba)
}
}
func (b *RequestBatcher) sendResponse(req *request, resp Response) {
// This send should never block because responseChan is buffered.
req.responseChan <- resp
b.pool.putRequest(req)
}
func addRequestToBatch(cfg *Config, now time.Time, ba *batch, r *request) (shouldSend bool) {
// Update the deadline for the batch if this requests's deadline is later
// than the current latest.
rDeadline, rHasDeadline := r.ctx.Deadline()
// If this is the first request or
if len(ba.reqs) == 0 ||
// there are already requests and there is a deadline and
(len(ba.reqs) > 0 && !ba.sendDeadline.IsZero() &&
// this request either doesn't have a deadline or has a later deadline,
(!rHasDeadline || rDeadline.After(ba.sendDeadline))) {
// set the deadline to this request's deadline.
ba.sendDeadline = rDeadline
}
ba.reqs = append(ba.reqs, r)
ba.size += r.req.Size()
ba.lastUpdated = now
if cfg.MaxIdle > 0 {
ba.deadline = ba.lastUpdated.Add(cfg.MaxIdle)
}
if cfg.MaxWait > 0 {
waitDeadline := ba.startTime.Add(cfg.MaxWait)
if cfg.MaxIdle <= 0 || waitDeadline.Before(ba.deadline) {
ba.deadline = waitDeadline
}
}
return (cfg.MaxMsgsPerBatch > 0 && len(ba.reqs) >= cfg.MaxMsgsPerBatch) ||
(cfg.MaxSizePerBatch > 0 && ba.size >= cfg.MaxSizePerBatch)
}
func (b *RequestBatcher) cleanup(err error) {
for ba := b.batches.popFront(); ba != nil; ba = b.batches.popFront() {
for _, r := range ba.reqs {
b.sendResponse(r, Response{Err: err})
}
}
}
func (b *RequestBatcher) run(ctx context.Context) {
// Create a context to be used in sendBatch to cancel in-flight batches when
// this function exits. If we did not cancel in-flight requests then the
// Stopper might get stuck waiting for those requests to complete.
sendCtx, cancel := context.WithCancel(ctx)
defer cancel()
var (
// inFlight tracks the number of batches currently being sent.
// true.
inFlight = 0
// inBackPressure indicates whether the reqChan is enabled.
// It becomes true when inFlight exceeds b.cfg.InFlightBackpressureLimit.
inBackPressure = false
// recoveryThreshold is the number of in flight requests below which the
// the inBackPressure state should exit.
recoveryThreshold = backpressureRecoveryThreshold(b.cfg.InFlightBackpressureLimit)
// reqChan consults inBackPressure to determine whether the goroutine is
// accepting new requests.
reqChan = func() <-chan *request {
if inBackPressure {
return nil
}
return b.requestChan
}
sendBatch = func(ba *batch) {
inFlight++
if inFlight >= b.cfg.InFlightBackpressureLimit {
inBackPressure = true
}
b.sendBatch(sendCtx, ba)
}
handleSendDone = func() {
inFlight--
if inFlight < recoveryThreshold {
inBackPressure = false
}
}
handleRequest = func(req *request) {
now := b.cfg.NowFunc()
ba, existsInQueue := b.batches.get(req.rangeID)
if !existsInQueue {
ba = b.pool.newBatch(now)
}
if shouldSend := addRequestToBatch(&b.cfg, now, ba, req); shouldSend {
if existsInQueue {
b.batches.remove(ba)
}
sendBatch(ba)
} else {
b.batches.upsert(ba)
}
}
deadline time.Time
timer = timeutil.NewTimer()
maybeSetTimer = func() {
var nextDeadline time.Time
if next := b.batches.peekFront(); next != nil {
nextDeadline = next.deadline
}
if !deadline.Equal(nextDeadline) || timer.Read {
deadline = nextDeadline
if !deadline.IsZero() {
timer.Reset(timeutil.Until(deadline))
} else {
// Clear the current timer due to a sole batch already sent before
// the timer fired.
timer.Stop()
timer = timeutil.NewTimer()
}
}
}
)
for {
select {
case req := <-reqChan():
handleRequest(req)
maybeSetTimer()
case <-timer.C:
timer.Read = true
sendBatch(b.batches.popFront())
maybeSetTimer()
case <-b.sendDoneChan:
handleSendDone()
case <-b.cfg.Stopper.ShouldQuiesce():
b.cleanup(stop.ErrUnavailable)
return
case <-ctx.Done():
b.cleanup(ctx.Err())
return
}
}
}
type request struct {
ctx context.Context
req roachpb.Request
rangeID roachpb.RangeID
responseChan chan<- Response
}
type batch struct {
reqs []*request
size int // bytes
// sendDeadline is the latest deadline reported by a request's context.
// It will be zero valued if any request does not contain a deadline.
sendDeadline time.Time
// idx is the batch's index in the batchQueue.
idx int
// deadline is the time at which this batch should be sent according to the
// Batcher's configuration.
deadline time.Time
// startTime is the time at which the first request was added to the batch.
startTime time.Time
// lastUpdated is the latest time when a request was added to the batch.
lastUpdated time.Time
}
func (b *batch) rangeID() roachpb.RangeID {
if len(b.reqs) == 0 {
panic("rangeID cannot be called on an empty batch")
}
return b.reqs[0].rangeID
}
func (b *batch) batchRequest(cfg *Config) roachpb.BatchRequest {
req := roachpb.BatchRequest{
// Preallocate the Requests slice.
Requests: make([]roachpb.RequestUnion, 0, len(b.reqs)),
}
for _, r := range b.reqs {
req.Add(r.req)
}
if cfg.MaxKeysPerBatchReq > 0 {
req.MaxSpanRequestKeys = int64(cfg.MaxKeysPerBatchReq)
}
return req
}
// pool stores object pools for the various commonly reused objects of the
// batcher
type pool struct {
responseChanPool sync.Pool
batchPool sync.Pool
requestPool sync.Pool
}
func makePool() pool {
return pool{
responseChanPool: sync.Pool{
New: func() interface{} { return make(chan Response, 1) },
},
batchPool: sync.Pool{
New: func() interface{} { return &batch{} },
},
requestPool: sync.Pool{
New: func() interface{} { return &request{} },
},
}
}
func (p *pool) getResponseChan() chan Response {
return p.responseChanPool.Get().(chan Response)
}
func (p *pool) putResponseChan(r chan Response) {
p.responseChanPool.Put(r)
}
func (p *pool) newRequest(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request, responseChan chan<- Response,
) *request {
r := p.requestPool.Get().(*request)
*r = request{
ctx: ctx,
rangeID: rangeID,
req: req,
responseChan: responseChan,
}
return r
}
func (p *pool) putRequest(r *request) |
func (p *pool) newBatch(now time.Time) *batch {
ba := p.batchPool.Get().(*batch)
*ba = batch{
startTime: now,
idx: -1,
}
return ba
}
func (p *pool) putBatch(b *batch) {
*b = batch{}
p.batchPool.Put(b)
}
// batchQueue is a container for batch objects which offers O(1) get based on
// rangeID and peekFront as well as O(log(n)) upsert, removal, popFront.
// Batch structs are heap ordered inside of the batches slice based on their
// deadline with the earliest deadline at the front.
//
// Note that the batch struct stores its index in the batches slice and is -1
// when not part of the queue. The heap methods update the batch indices when
// updating the heap. Take care not to ever put a batch in to multiple
// batchQueues. At time of writing this package only ever used one batchQueue
// per RequestBatcher.
type batchQueue struct {
batches []*batch
byRange map[roachpb.RangeID]*batch
}
var _ heap.Interface = (*batchQueue)(nil)
func makeBatchQueue() batchQueue {
return batchQueue{
byRange: map[roachpb.RangeID]*batch{},
}
}
func (q *batchQueue) peekFront() *batch {
if q.Len() == 0 {
return nil
}
return q.batches[0]
}
func (q *batchQueue) popFront() *batch {
if q.Len() == 0 {
return nil
}
return heap.Pop(q).(*batch)
}
func (q *batchQueue) get(id roachpb.RangeID) (*batch, bool) {
b, exists := q.byRange[id]
return b, exists
}
func (q *batchQueue) remove(ba *batch) {
delete(q.byRange, ba.rangeID())
heap.Remove(q, ba.idx)
}
func (q *batchQueue) upsert(ba *batch) {
if ba.idx >= 0 {
heap.Fix(q, ba.idx)
} else {
heap.Push(q, ba)
}
}
func (q *batchQueue) Len() int {
return len(q.batches)
}
func (q *batchQueue) Swap(i, j int) {
q.batches[i], q.batches[j] = q.batches[j], q.batches[i]
q.batches[i].idx = i
q.batches[j].idx = j
}
func (q *batchQueue) Less(i, j int) bool {
idl, jdl := q.batches[i].deadline, q.batches[j].deadline
if before := idl.Before(jdl); before || !idl.Equal(jdl) {
return before
}
return q.batches[i].rangeID() < q.batches[j].rangeID()
}
func (q *batchQueue) Push(v interface{}) {
ba := v.(*batch)
ba.idx = len(q.batches)
q.byRange[ba.rangeID()] = ba
q.batches = append(q.batches, ba)
}
func (q *batchQueue) Pop() interface{} {
ba := q.batches[len(q.batches)-1]
q.batches = q.batches[:len(q.batches)-1]
delete(q.byRange, ba.rangeID())
ba.idx = -1
return ba
}
| {
*r = request{}
p.requestPool.Put(r)
} | identifier_body |
batcher.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package requestbatcher is a library to enable easy batching of roachpb
// requests.
//
// Batching in general represents a tradeoff between throughput and latency. The
// underlying assumption being that batched operations are cheaper than an
// individual operation. If this is not the case for your workload, don't use
// this library.
//
// Batching assumes that data with the same key can be sent in a single batch.
// The initial implementation uses rangeID as the key explicitly to avoid
// creating an overly general solution without motivation but interested readers
// should recognize that it would be easy to extend this package to accept an
// arbitrary comparable key.
package requestbatcher
import (
"container/heap"
"context"
"sync"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// The motivating use case for this package are opportunities to perform cleanup
// operations in a single raft transaction rather than several. Three main
// opportunities are known:
//
// 1) Intent resolution
// 2) Txn heartbeating
// 3) Txn record garbage collection
//
// The first two have a relatively tight time bound expectations. In other words
// it would be surprising and negative for a client if operations were not sent
// soon after they were queued. The transaction record GC workload can be rather
// asynchronous. This motivates the need for some knobs to control the maximum
// acceptable amount of time to buffer operations before sending.
// Another wrinkle is dealing with the different ways in which sending a batch
// may fail. A batch may fail in an ambiguous way (RPC/network errors), it may
// fail completely (which is likely indistinguishable from the ambiguous
// failure) and lastly it may fail partially. Today's Sender contract is fairly
// ambiguous about the contract between BatchResponse inner responses and errors
// returned from a batch request.
// TODO(ajwerner): Do we need to consider ordering dependencies between
// operations? For the initial motivating use cases for this library there are
// no data dependencies between operations and the only key will be the guess
// for where an operation should go.
// TODO(ajwerner): Consider a more sophisticated mechanism to limit on maximum
// number of requests in flight at a time. This may ultimately lead to a need
// for queuing. Furthermore consider using batch time to dynamically tune the
// amount of time we wait.
// TODO(ajwerner): Consider filtering requests which might have been canceled
// before sending a batch.
// TODO(ajwerner): Consider more dynamic policies with regards to deadlines.
// Perhaps we want to wait no more than some percentile of the duration of
// historical operations and stay idle only some other percentile. For example
// imagine if the max delay was the 50th and the max idle was the 10th? This
// has a problem when much of the prior workload was say local operations and
// happened very rapidly. Perhaps we need to provide some bounding envelope?
// TODO(ajwerner): Consider a more general purpose interface for this package.
// While several interface-oriented interfaces have been explored they all felt
// heavy and allocation intensive.
// TODO(ajwerner): Consider providing an interface which enables a single
// goroutine to dispatch a number of requests destined for different ranges to
// the RequestBatcher which may then wait for completion of all of the requests.
// What is the right contract for error handling? Imagine a situation where a
// client has dispatched N requests and one has been sent and returns with an
// error while others are queued. What should happen? Should the client receive
// the error rapidly? Should the other requests be sent at all? Should they be
// filtered before sending?
// Config contains the dependencies and configuration for a Batcher.
type Config struct {
// Name of the batcher, used for logging, timeout errors, and the stopper.
Name string
// Sender can round-trip a batch. Sender must not be nil.
Sender kv.Sender
// Stopper controls the lifecycle of the Batcher. Stopper must not be nil.
Stopper *stop.Stopper
// MaxSizePerBatch is the maximum number of bytes in individual requests in a
// batch. If MaxSizePerBatch <= 0 then no limit is enforced.
MaxSizePerBatch int
// MaxMsgsPerBatch is the maximum number of messages.
// If MaxMsgsPerBatch <= 0 then no limit is enforced.
MaxMsgsPerBatch int
// MaxKeysPerBatchReq is the maximum number of keys that each batch is
// allowed to touch during one of its requests. If the limit is exceeded,
// the batch is paginated over a series of individual requests. This limit
// corresponds to the MaxSpanRequestKeys assigned to the Header of each
// request. If MaxKeysPerBatchReq <= 0 then no limit is enforced.
MaxKeysPerBatchReq int
// MaxWait is the maximum amount of time a message should wait in a batch
// before being sent. If MaxWait is <= 0 then no wait timeout is enforced.
// It is inadvisable to disable both MaxIdle and MaxWait.
MaxWait time.Duration
// MaxIdle is the amount of time a batch should wait between message additions
// before being sent. The idle timer allows clients to observe low latencies
// when throughput is low. If MaxWait is <= 0 then no wait timeout is
// enforced. It is inadvisable to disable both MaxIdle and MaxWait.
MaxIdle time.Duration
// InFlightBackpressureLimit is the number of batches in flight above which
// sending clients should experience backpressure. If the batcher has more
// requests than this in flight it will not accept new requests until the
// number of in flight batches is again below this threshold. This value does
// not limit the number of batches which may ultimately be in flight as
// batches which are queued to send but not yet in flight will still send.
// Note that values less than or equal to zero will result in the use of
// DefaultInFlightBackpressureLimit.
InFlightBackpressureLimit int
// NowFunc is used to determine the current time. It defaults to timeutil.Now.
NowFunc func() time.Time
}
const (
// DefaultInFlightBackpressureLimit is the InFlightBackpressureLimit used if
// a zero value for that setting is passed in a Config to New.
// TODO(ajwerner): Justify this number.
DefaultInFlightBackpressureLimit = 1000
// BackpressureRecoveryFraction is the fraction of InFlightBackpressureLimit
// used to detect when enough in flight requests have completed such that more
// requests should now be accepted. A value less than 1 is chosen in order to
// avoid thrashing on backpressure which might ultimately defeat the purpose
// of the RequestBatcher.
backpressureRecoveryFraction = .8
)
func backpressureRecoveryThreshold(limit int) int {
if l := int(float64(limit) * backpressureRecoveryFraction); l > 0 {
return l
}
return 1 // don't allow the recovery threshold to be 0
}
// RequestBatcher batches requests destined for a single range based on
// a configured batching policy.
type RequestBatcher struct {
pool pool
cfg Config
// sendBatchOpName is the string passed to contextutil.RunWithTimeout when
// sending a batch.
sendBatchOpName string
batches batchQueue
requestChan chan *request
sendDoneChan chan struct{}
}
// Response is exported for use with the channel-oriented SendWithChan method.
// At least one of Resp or Err will be populated for every sent Response.
type Response struct {
Resp roachpb.Response
Err error
}
// New creates a new RequestBatcher.
func New(cfg Config) *RequestBatcher {
validateConfig(&cfg)
b := &RequestBatcher{
cfg: cfg,
pool: makePool(),
batches: makeBatchQueue(),
requestChan: make(chan *request),
sendDoneChan: make(chan struct{}),
}
b.sendBatchOpName = b.cfg.Name + ".sendBatch"
if err := cfg.Stopper.RunAsyncTask(context.Background(), b.cfg.Name, b.run); err != nil {
panic(err)
}
return b
}
func validateConfig(cfg *Config) {
if cfg.Stopper == nil {
panic("cannot construct a Batcher with a nil Stopper")
} else if cfg.Sender == nil {
panic("cannot construct a Batcher with a nil Sender")
}
if cfg.InFlightBackpressureLimit <= 0 {
cfg.InFlightBackpressureLimit = DefaultInFlightBackpressureLimit
}
if cfg.NowFunc == nil {
cfg.NowFunc = timeutil.Now
}
}
// SendWithChan sends a request with a client provided response channel. The
// client is responsible for ensuring that the passed respChan has a buffer at
// least as large as the number of responses it expects to receive. Using an
// insufficiently buffered channel can lead to deadlocks and unintended delays
// processing requests inside the RequestBatcher.
func (b *RequestBatcher) SendWithChan(
ctx context.Context, respChan chan<- Response, rangeID roachpb.RangeID, req roachpb.Request,
) error {
select {
case b.requestChan <- b.pool.newRequest(ctx, rangeID, req, respChan):
return nil
case <-b.cfg.Stopper.ShouldQuiesce():
return stop.ErrUnavailable
case <-ctx.Done():
return ctx.Err()
}
}
// Send sends req as a part of a batch. An error is returned if the context
// is canceled before the sending of the request completes. The context with
// the latest deadline for a batch is used to send the underlying batch request.
func (b *RequestBatcher) Send(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request,
) (roachpb.Response, error) {
responseChan := b.pool.getResponseChan()
if err := b.SendWithChan(ctx, responseChan, rangeID, req); err != nil {
return nil, err
}
select {
case resp := <-responseChan:
// It's only safe to put responseChan back in the pool if it has been
// received from.
b.pool.putResponseChan(responseChan)
return resp.Resp, resp.Err
case <-b.cfg.Stopper.ShouldQuiesce():
return nil, stop.ErrUnavailable
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *RequestBatcher) sendDone(ba *batch) {
b.pool.putBatch(ba)
select {
case b.sendDoneChan <- struct{}{}:
case <-b.cfg.Stopper.ShouldQuiesce():
}
}
func (b *RequestBatcher) sendBatch(ctx context.Context, ba *batch) {
if err := b.cfg.Stopper.RunAsyncTask(ctx, "send-batch", func(ctx context.Context) {
defer b.sendDone(ba)
var br *roachpb.BatchResponse
send := func(ctx context.Context) error {
var pErr *roachpb.Error
if br, pErr = b.cfg.Sender.Send(ctx, ba.batchRequest(&b.cfg)); pErr != nil {
return pErr.GoError()
}
return nil
}
if !ba.sendDeadline.IsZero() {
actualSend := send
send = func(context.Context) error {
return contextutil.RunWithTimeout(
ctx, b.sendBatchOpName, timeutil.Until(ba.sendDeadline), actualSend)
}
}
// Send requests in a loop to support pagination, which may be necessary
// if MaxKeysPerBatchReq is set. If so, partial responses with resume
// spans may be returned for requests, indicating that the limit was hit
// before they could complete and that they should be resumed over the
// specified key span. Requests in the batch are neither guaranteed to
// be ordered nor guaranteed to be non-overlapping, so we can make no
// assumptions about the requests that will result in full responses
// (with no resume spans) vs. partial responses vs. empty responses (see
// the comment on roachpb.Header.MaxSpanRequestKeys).
//
// To accommodate this, we keep track of all partial responses from
// previous iterations. After receiving a batch of responses during an
// iteration, the responses are each combined with the previous response
// for their corresponding requests. From there, responses that have no
// resume spans are removed. Responses that have resume spans are
// updated appropriately and sent again in the next iteration. The loop
// proceeds until all requests have been run to completion.
var prevResps []roachpb.Response
for len(ba.reqs) > 0 {
err := send(ctx)
nextReqs, nextPrevResps := ba.reqs[:0], prevResps[:0]
for i, r := range ba.reqs {
var res Response
if br != nil {
resp := br.Responses[i].GetInner()
if prevResps != nil {
prevResp := prevResps[i]
if cErr := roachpb.CombineResponses(prevResp, resp); cErr != nil {
log.Fatalf(ctx, "%v", cErr)
}
resp = prevResp
}
if resume := resp.Header().ResumeSpan; resume != nil {
// Add a trimmed request to the next batch.
h := r.req.Header()
h.SetSpan(*resume)
r.req = r.req.ShallowCopy()
r.req.SetHeader(h)
nextReqs = append(nextReqs, r)
// Strip resume span from previous response and record.
prevH := resp.Header()
prevH.ResumeSpan = nil
prevResp := resp
prevResp.SetHeader(prevH)
nextPrevResps = append(nextPrevResps, prevResp)
continue
}
res.Resp = resp
}
if err != nil {
res.Err = err
}
b.sendResponse(r, res)
}
ba.reqs, prevResps = nextReqs, nextPrevResps
}
}); err != nil {
b.sendDone(ba)
}
}
func (b *RequestBatcher) sendResponse(req *request, resp Response) {
// This send should never block because responseChan is buffered.
req.responseChan <- resp
b.pool.putRequest(req)
}
func addRequestToBatch(cfg *Config, now time.Time, ba *batch, r *request) (shouldSend bool) {
// Update the deadline for the batch if this requests's deadline is later
// than the current latest.
rDeadline, rHasDeadline := r.ctx.Deadline()
// If this is the first request or
if len(ba.reqs) == 0 ||
// there are already requests and there is a deadline and
(len(ba.reqs) > 0 && !ba.sendDeadline.IsZero() &&
// this request either doesn't have a deadline or has a later deadline,
(!rHasDeadline || rDeadline.After(ba.sendDeadline))) {
// set the deadline to this request's deadline.
ba.sendDeadline = rDeadline
}
ba.reqs = append(ba.reqs, r)
ba.size += r.req.Size()
ba.lastUpdated = now
if cfg.MaxIdle > 0 {
ba.deadline = ba.lastUpdated.Add(cfg.MaxIdle)
}
if cfg.MaxWait > 0 {
waitDeadline := ba.startTime.Add(cfg.MaxWait)
if cfg.MaxIdle <= 0 || waitDeadline.Before(ba.deadline) {
ba.deadline = waitDeadline
}
}
return (cfg.MaxMsgsPerBatch > 0 && len(ba.reqs) >= cfg.MaxMsgsPerBatch) ||
(cfg.MaxSizePerBatch > 0 && ba.size >= cfg.MaxSizePerBatch)
}
func (b *RequestBatcher) cleanup(err error) {
for ba := b.batches.popFront(); ba != nil; ba = b.batches.popFront() {
for _, r := range ba.reqs {
b.sendResponse(r, Response{Err: err})
}
}
}
func (b *RequestBatcher) run(ctx context.Context) {
// Create a context to be used in sendBatch to cancel in-flight batches when
// this function exits. If we did not cancel in-flight requests then the
// Stopper might get stuck waiting for those requests to complete.
sendCtx, cancel := context.WithCancel(ctx)
defer cancel()
var (
// inFlight tracks the number of batches currently being sent.
// true.
inFlight = 0
// inBackPressure indicates whether the reqChan is enabled.
// It becomes true when inFlight exceeds b.cfg.InFlightBackpressureLimit.
inBackPressure = false
// recoveryThreshold is the number of in flight requests below which the
// the inBackPressure state should exit.
recoveryThreshold = backpressureRecoveryThreshold(b.cfg.InFlightBackpressureLimit)
// reqChan consults inBackPressure to determine whether the goroutine is
// accepting new requests.
reqChan = func() <-chan *request {
if inBackPressure {
return nil
}
return b.requestChan
}
sendBatch = func(ba *batch) {
inFlight++
if inFlight >= b.cfg.InFlightBackpressureLimit {
inBackPressure = true
}
b.sendBatch(sendCtx, ba)
}
handleSendDone = func() {
inFlight--
if inFlight < recoveryThreshold {
inBackPressure = false
}
}
handleRequest = func(req *request) {
now := b.cfg.NowFunc()
ba, existsInQueue := b.batches.get(req.rangeID)
if !existsInQueue {
ba = b.pool.newBatch(now)
}
if shouldSend := addRequestToBatch(&b.cfg, now, ba, req); shouldSend {
if existsInQueue {
b.batches.remove(ba)
}
sendBatch(ba)
} else {
b.batches.upsert(ba)
}
}
deadline time.Time
timer = timeutil.NewTimer()
maybeSetTimer = func() {
var nextDeadline time.Time
if next := b.batches.peekFront(); next != nil {
nextDeadline = next.deadline
}
if !deadline.Equal(nextDeadline) || timer.Read {
deadline = nextDeadline
if !deadline.IsZero() {
timer.Reset(timeutil.Until(deadline))
} else {
// Clear the current timer due to a sole batch already sent before
// the timer fired.
timer.Stop()
timer = timeutil.NewTimer()
}
}
}
)
for {
select {
case req := <-reqChan():
handleRequest(req)
maybeSetTimer()
case <-timer.C:
timer.Read = true
sendBatch(b.batches.popFront())
maybeSetTimer()
case <-b.sendDoneChan:
handleSendDone()
case <-b.cfg.Stopper.ShouldQuiesce():
b.cleanup(stop.ErrUnavailable)
return
case <-ctx.Done():
b.cleanup(ctx.Err())
return
}
}
}
type request struct {
ctx context.Context
req roachpb.Request
rangeID roachpb.RangeID
responseChan chan<- Response
}
type batch struct {
reqs []*request
size int // bytes
// sendDeadline is the latest deadline reported by a request's context.
// It will be zero valued if any request does not contain a deadline.
sendDeadline time.Time
// idx is the batch's index in the batchQueue.
idx int
// deadline is the time at which this batch should be sent according to the
// Batcher's configuration.
deadline time.Time
// startTime is the time at which the first request was added to the batch.
startTime time.Time
// lastUpdated is the latest time when a request was added to the batch.
lastUpdated time.Time
}
func (b *batch) rangeID() roachpb.RangeID {
if len(b.reqs) == 0 {
panic("rangeID cannot be called on an empty batch")
}
return b.reqs[0].rangeID
}
func (b *batch) | (cfg *Config) roachpb.BatchRequest {
req := roachpb.BatchRequest{
// Preallocate the Requests slice.
Requests: make([]roachpb.RequestUnion, 0, len(b.reqs)),
}
for _, r := range b.reqs {
req.Add(r.req)
}
if cfg.MaxKeysPerBatchReq > 0 {
req.MaxSpanRequestKeys = int64(cfg.MaxKeysPerBatchReq)
}
return req
}
// pool stores object pools for the various commonly reused objects of the
// batcher
type pool struct {
responseChanPool sync.Pool
batchPool sync.Pool
requestPool sync.Pool
}
func makePool() pool {
return pool{
responseChanPool: sync.Pool{
New: func() interface{} { return make(chan Response, 1) },
},
batchPool: sync.Pool{
New: func() interface{} { return &batch{} },
},
requestPool: sync.Pool{
New: func() interface{} { return &request{} },
},
}
}
func (p *pool) getResponseChan() chan Response {
return p.responseChanPool.Get().(chan Response)
}
func (p *pool) putResponseChan(r chan Response) {
p.responseChanPool.Put(r)
}
func (p *pool) newRequest(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request, responseChan chan<- Response,
) *request {
r := p.requestPool.Get().(*request)
*r = request{
ctx: ctx,
rangeID: rangeID,
req: req,
responseChan: responseChan,
}
return r
}
func (p *pool) putRequest(r *request) {
*r = request{}
p.requestPool.Put(r)
}
func (p *pool) newBatch(now time.Time) *batch {
ba := p.batchPool.Get().(*batch)
*ba = batch{
startTime: now,
idx: -1,
}
return ba
}
func (p *pool) putBatch(b *batch) {
*b = batch{}
p.batchPool.Put(b)
}
// batchQueue is a container for batch objects which offers O(1) get based on
// rangeID and peekFront as well as O(log(n)) upsert, removal, popFront.
// Batch structs are heap ordered inside of the batches slice based on their
// deadline with the earliest deadline at the front.
//
// Note that the batch struct stores its index in the batches slice and is -1
// when not part of the queue. The heap methods update the batch indices when
// updating the heap. Take care not to ever put a batch in to multiple
// batchQueues. At time of writing this package only ever used one batchQueue
// per RequestBatcher.
type batchQueue struct {
batches []*batch
byRange map[roachpb.RangeID]*batch
}
var _ heap.Interface = (*batchQueue)(nil)
func makeBatchQueue() batchQueue {
return batchQueue{
byRange: map[roachpb.RangeID]*batch{},
}
}
func (q *batchQueue) peekFront() *batch {
if q.Len() == 0 {
return nil
}
return q.batches[0]
}
func (q *batchQueue) popFront() *batch {
if q.Len() == 0 {
return nil
}
return heap.Pop(q).(*batch)
}
func (q *batchQueue) get(id roachpb.RangeID) (*batch, bool) {
b, exists := q.byRange[id]
return b, exists
}
func (q *batchQueue) remove(ba *batch) {
delete(q.byRange, ba.rangeID())
heap.Remove(q, ba.idx)
}
func (q *batchQueue) upsert(ba *batch) {
if ba.idx >= 0 {
heap.Fix(q, ba.idx)
} else {
heap.Push(q, ba)
}
}
func (q *batchQueue) Len() int {
return len(q.batches)
}
func (q *batchQueue) Swap(i, j int) {
q.batches[i], q.batches[j] = q.batches[j], q.batches[i]
q.batches[i].idx = i
q.batches[j].idx = j
}
func (q *batchQueue) Less(i, j int) bool {
idl, jdl := q.batches[i].deadline, q.batches[j].deadline
if before := idl.Before(jdl); before || !idl.Equal(jdl) {
return before
}
return q.batches[i].rangeID() < q.batches[j].rangeID()
}
func (q *batchQueue) Push(v interface{}) {
ba := v.(*batch)
ba.idx = len(q.batches)
q.byRange[ba.rangeID()] = ba
q.batches = append(q.batches, ba)
}
func (q *batchQueue) Pop() interface{} {
ba := q.batches[len(q.batches)-1]
q.batches = q.batches[:len(q.batches)-1]
delete(q.byRange, ba.rangeID())
ba.idx = -1
return ba
}
| batchRequest | identifier_name |
batcher.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package requestbatcher is a library to enable easy batching of roachpb
// requests.
//
// Batching in general represents a tradeoff between throughput and latency. The
// underlying assumption being that batched operations are cheaper than an
// individual operation. If this is not the case for your workload, don't use
// this library.
//
// Batching assumes that data with the same key can be sent in a single batch.
// The initial implementation uses rangeID as the key explicitly to avoid
// creating an overly general solution without motivation but interested readers
// should recognize that it would be easy to extend this package to accept an
// arbitrary comparable key.
package requestbatcher
import (
"container/heap"
"context"
"sync"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// The motivating use case for this package are opportunities to perform cleanup
// operations in a single raft transaction rather than several. Three main
// opportunities are known:
//
// 1) Intent resolution
// 2) Txn heartbeating
// 3) Txn record garbage collection
//
// The first two have a relatively tight time bound expectations. In other words
// it would be surprising and negative for a client if operations were not sent
// soon after they were queued. The transaction record GC workload can be rather
// asynchronous. This motivates the need for some knobs to control the maximum
// acceptable amount of time to buffer operations before sending.
// Another wrinkle is dealing with the different ways in which sending a batch
// may fail. A batch may fail in an ambiguous way (RPC/network errors), it may
// fail completely (which is likely indistinguishable from the ambiguous
// failure) and lastly it may fail partially. Today's Sender contract is fairly
// ambiguous about the contract between BatchResponse inner responses and errors
// returned from a batch request.
// TODO(ajwerner): Do we need to consider ordering dependencies between
// operations? For the initial motivating use cases for this library there are
// no data dependencies between operations and the only key will be the guess
// for where an operation should go.
// TODO(ajwerner): Consider a more sophisticated mechanism to limit on maximum
// number of requests in flight at a time. This may ultimately lead to a need
// for queuing. Furthermore consider using batch time to dynamically tune the
// amount of time we wait.
// TODO(ajwerner): Consider filtering requests which might have been canceled
// before sending a batch.
// TODO(ajwerner): Consider more dynamic policies with regards to deadlines.
// Perhaps we want to wait no more than some percentile of the duration of
// historical operations and stay idle only some other percentile. For example
// imagine if the max delay was the 50th and the max idle was the 10th? This
// has a problem when much of the prior workload was say local operations and
// happened very rapidly. Perhaps we need to provide some bounding envelope?
// TODO(ajwerner): Consider a more general purpose interface for this package.
// While several interface-oriented interfaces have been explored they all felt
// heavy and allocation intensive.
// TODO(ajwerner): Consider providing an interface which enables a single
// goroutine to dispatch a number of requests destined for different ranges to
// the RequestBatcher which may then wait for completion of all of the requests.
// What is the right contract for error handling? Imagine a situation where a
// client has dispatched N requests and one has been sent and returns with an
// error while others are queued. What should happen? Should the client receive
// the error rapidly? Should the other requests be sent at all? Should they be
// filtered before sending?
// Config contains the dependencies and configuration for a Batcher.
type Config struct {
// Name of the batcher, used for logging, timeout errors, and the stopper.
Name string
// Sender can round-trip a batch. Sender must not be nil.
Sender kv.Sender
// Stopper controls the lifecycle of the Batcher. Stopper must not be nil.
Stopper *stop.Stopper
// MaxSizePerBatch is the maximum number of bytes in individual requests in a
// batch. If MaxSizePerBatch <= 0 then no limit is enforced.
MaxSizePerBatch int
// MaxMsgsPerBatch is the maximum number of messages.
// If MaxMsgsPerBatch <= 0 then no limit is enforced.
MaxMsgsPerBatch int
// MaxKeysPerBatchReq is the maximum number of keys that each batch is
// allowed to touch during one of its requests. If the limit is exceeded,
// the batch is paginated over a series of individual requests. This limit
// corresponds to the MaxSpanRequestKeys assigned to the Header of each
// request. If MaxKeysPerBatchReq <= 0 then no limit is enforced.
MaxKeysPerBatchReq int
// MaxWait is the maximum amount of time a message should wait in a batch
// before being sent. If MaxWait is <= 0 then no wait timeout is enforced.
// It is inadvisable to disable both MaxIdle and MaxWait.
MaxWait time.Duration
// MaxIdle is the amount of time a batch should wait between message additions
// before being sent. The idle timer allows clients to observe low latencies
// when throughput is low. If MaxWait is <= 0 then no wait timeout is
// enforced. It is inadvisable to disable both MaxIdle and MaxWait.
MaxIdle time.Duration
// InFlightBackpressureLimit is the number of batches in flight above which
// sending clients should experience backpressure. If the batcher has more
// requests than this in flight it will not accept new requests until the
// number of in flight batches is again below this threshold. This value does
// not limit the number of batches which may ultimately be in flight as
// batches which are queued to send but not yet in flight will still send.
// Note that values less than or equal to zero will result in the use of
// DefaultInFlightBackpressureLimit.
InFlightBackpressureLimit int
// NowFunc is used to determine the current time. It defaults to timeutil.Now.
NowFunc func() time.Time
}
const (
// DefaultInFlightBackpressureLimit is the InFlightBackpressureLimit used if
// a zero value for that setting is passed in a Config to New.
// TODO(ajwerner): Justify this number.
DefaultInFlightBackpressureLimit = 1000
// BackpressureRecoveryFraction is the fraction of InFlightBackpressureLimit
// used to detect when enough in flight requests have completed such that more
// requests should now be accepted. A value less than 1 is chosen in order to
// avoid thrashing on backpressure which might ultimately defeat the purpose
// of the RequestBatcher.
backpressureRecoveryFraction = .8
)
func backpressureRecoveryThreshold(limit int) int {
if l := int(float64(limit) * backpressureRecoveryFraction); l > 0 {
return l
}
return 1 // don't allow the recovery threshold to be 0
}
// RequestBatcher batches requests destined for a single range based on
// a configured batching policy.
type RequestBatcher struct {
pool pool
cfg Config
// sendBatchOpName is the string passed to contextutil.RunWithTimeout when
// sending a batch.
sendBatchOpName string
batches batchQueue
requestChan chan *request
sendDoneChan chan struct{}
}
// Response is exported for use with the channel-oriented SendWithChan method.
// At least one of Resp or Err will be populated for every sent Response.
type Response struct {
Resp roachpb.Response
Err error
}
// New creates a new RequestBatcher.
func New(cfg Config) *RequestBatcher {
validateConfig(&cfg)
b := &RequestBatcher{
cfg: cfg,
pool: makePool(),
batches: makeBatchQueue(),
requestChan: make(chan *request),
sendDoneChan: make(chan struct{}),
}
b.sendBatchOpName = b.cfg.Name + ".sendBatch"
if err := cfg.Stopper.RunAsyncTask(context.Background(), b.cfg.Name, b.run); err != nil {
panic(err)
}
return b
}
func validateConfig(cfg *Config) {
if cfg.Stopper == nil {
panic("cannot construct a Batcher with a nil Stopper")
} else if cfg.Sender == nil {
panic("cannot construct a Batcher with a nil Sender")
}
if cfg.InFlightBackpressureLimit <= 0 {
cfg.InFlightBackpressureLimit = DefaultInFlightBackpressureLimit
}
if cfg.NowFunc == nil {
cfg.NowFunc = timeutil.Now
}
}
// SendWithChan sends a request with a client provided response channel. The
// client is responsible for ensuring that the passed respChan has a buffer at
// least as large as the number of responses it expects to receive. Using an
// insufficiently buffered channel can lead to deadlocks and unintended delays
// processing requests inside the RequestBatcher.
func (b *RequestBatcher) SendWithChan(
ctx context.Context, respChan chan<- Response, rangeID roachpb.RangeID, req roachpb.Request,
) error {
select {
case b.requestChan <- b.pool.newRequest(ctx, rangeID, req, respChan):
return nil
case <-b.cfg.Stopper.ShouldQuiesce():
return stop.ErrUnavailable
case <-ctx.Done():
return ctx.Err()
}
}
// Send sends req as a part of a batch. An error is returned if the context
// is canceled before the sending of the request completes. The context with
// the latest deadline for a batch is used to send the underlying batch request.
func (b *RequestBatcher) Send(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request,
) (roachpb.Response, error) {
responseChan := b.pool.getResponseChan()
if err := b.SendWithChan(ctx, responseChan, rangeID, req); err != nil {
return nil, err
}
select {
case resp := <-responseChan:
// It's only safe to put responseChan back in the pool if it has been
// received from.
b.pool.putResponseChan(responseChan)
return resp.Resp, resp.Err
case <-b.cfg.Stopper.ShouldQuiesce():
return nil, stop.ErrUnavailable
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *RequestBatcher) sendDone(ba *batch) {
b.pool.putBatch(ba)
select {
case b.sendDoneChan <- struct{}{}:
case <-b.cfg.Stopper.ShouldQuiesce():
}
}
func (b *RequestBatcher) sendBatch(ctx context.Context, ba *batch) {
if err := b.cfg.Stopper.RunAsyncTask(ctx, "send-batch", func(ctx context.Context) {
defer b.sendDone(ba)
var br *roachpb.BatchResponse
send := func(ctx context.Context) error {
var pErr *roachpb.Error
if br, pErr = b.cfg.Sender.Send(ctx, ba.batchRequest(&b.cfg)); pErr != nil {
return pErr.GoError()
}
return nil
}
if !ba.sendDeadline.IsZero() {
actualSend := send
send = func(context.Context) error {
return contextutil.RunWithTimeout(
ctx, b.sendBatchOpName, timeutil.Until(ba.sendDeadline), actualSend)
}
}
// Send requests in a loop to support pagination, which may be necessary
// if MaxKeysPerBatchReq is set. If so, partial responses with resume
// spans may be returned for requests, indicating that the limit was hit
// before they could complete and that they should be resumed over the
// specified key span. Requests in the batch are neither guaranteed to
// be ordered nor guaranteed to be non-overlapping, so we can make no
// assumptions about the requests that will result in full responses
// (with no resume spans) vs. partial responses vs. empty responses (see
// the comment on roachpb.Header.MaxSpanRequestKeys).
//
// To accommodate this, we keep track of all partial responses from
// previous iterations. After receiving a batch of responses during an
// iteration, the responses are each combined with the previous response
// for their corresponding requests. From there, responses that have no
// resume spans are removed. Responses that have resume spans are
// updated appropriately and sent again in the next iteration. The loop
// proceeds until all requests have been run to completion.
var prevResps []roachpb.Response
for len(ba.reqs) > 0 {
err := send(ctx)
nextReqs, nextPrevResps := ba.reqs[:0], prevResps[:0]
for i, r := range ba.reqs {
var res Response
if br != nil {
resp := br.Responses[i].GetInner()
if prevResps != nil {
prevResp := prevResps[i]
if cErr := roachpb.CombineResponses(prevResp, resp); cErr != nil {
log.Fatalf(ctx, "%v", cErr)
}
resp = prevResp
}
if resume := resp.Header().ResumeSpan; resume != nil {
// Add a trimmed request to the next batch.
h := r.req.Header()
h.SetSpan(*resume)
r.req = r.req.ShallowCopy()
r.req.SetHeader(h)
nextReqs = append(nextReqs, r)
// Strip resume span from previous response and record.
prevH := resp.Header()
prevH.ResumeSpan = nil
prevResp := resp
prevResp.SetHeader(prevH)
nextPrevResps = append(nextPrevResps, prevResp)
continue
}
res.Resp = resp
}
if err != nil {
res.Err = err
}
b.sendResponse(r, res)
}
ba.reqs, prevResps = nextReqs, nextPrevResps
}
}); err != nil {
b.sendDone(ba)
}
}
func (b *RequestBatcher) sendResponse(req *request, resp Response) {
// This send should never block because responseChan is buffered.
req.responseChan <- resp
b.pool.putRequest(req)
}
func addRequestToBatch(cfg *Config, now time.Time, ba *batch, r *request) (shouldSend bool) {
// Update the deadline for the batch if this requests's deadline is later
// than the current latest.
rDeadline, rHasDeadline := r.ctx.Deadline()
// If this is the first request or
if len(ba.reqs) == 0 ||
// there are already requests and there is a deadline and
(len(ba.reqs) > 0 && !ba.sendDeadline.IsZero() &&
// this request either doesn't have a deadline or has a later deadline,
(!rHasDeadline || rDeadline.After(ba.sendDeadline))) {
// set the deadline to this request's deadline.
ba.sendDeadline = rDeadline
}
ba.reqs = append(ba.reqs, r)
ba.size += r.req.Size()
ba.lastUpdated = now
if cfg.MaxIdle > 0 {
ba.deadline = ba.lastUpdated.Add(cfg.MaxIdle)
}
if cfg.MaxWait > 0 {
waitDeadline := ba.startTime.Add(cfg.MaxWait)
if cfg.MaxIdle <= 0 || waitDeadline.Before(ba.deadline) {
ba.deadline = waitDeadline
}
}
return (cfg.MaxMsgsPerBatch > 0 && len(ba.reqs) >= cfg.MaxMsgsPerBatch) ||
(cfg.MaxSizePerBatch > 0 && ba.size >= cfg.MaxSizePerBatch)
}
func (b *RequestBatcher) cleanup(err error) {
for ba := b.batches.popFront(); ba != nil; ba = b.batches.popFront() {
for _, r := range ba.reqs {
b.sendResponse(r, Response{Err: err})
}
}
}
func (b *RequestBatcher) run(ctx context.Context) {
// Create a context to be used in sendBatch to cancel in-flight batches when
// this function exits. If we did not cancel in-flight requests then the
// Stopper might get stuck waiting for those requests to complete.
sendCtx, cancel := context.WithCancel(ctx)
defer cancel()
var (
// inFlight tracks the number of batches currently being sent.
// true.
inFlight = 0
// inBackPressure indicates whether the reqChan is enabled.
// It becomes true when inFlight exceeds b.cfg.InFlightBackpressureLimit.
inBackPressure = false
// recoveryThreshold is the number of in flight requests below which the
// the inBackPressure state should exit.
recoveryThreshold = backpressureRecoveryThreshold(b.cfg.InFlightBackpressureLimit)
// reqChan consults inBackPressure to determine whether the goroutine is
// accepting new requests.
reqChan = func() <-chan *request {
if inBackPressure |
return b.requestChan
}
sendBatch = func(ba *batch) {
inFlight++
if inFlight >= b.cfg.InFlightBackpressureLimit {
inBackPressure = true
}
b.sendBatch(sendCtx, ba)
}
handleSendDone = func() {
inFlight--
if inFlight < recoveryThreshold {
inBackPressure = false
}
}
handleRequest = func(req *request) {
now := b.cfg.NowFunc()
ba, existsInQueue := b.batches.get(req.rangeID)
if !existsInQueue {
ba = b.pool.newBatch(now)
}
if shouldSend := addRequestToBatch(&b.cfg, now, ba, req); shouldSend {
if existsInQueue {
b.batches.remove(ba)
}
sendBatch(ba)
} else {
b.batches.upsert(ba)
}
}
deadline time.Time
timer = timeutil.NewTimer()
maybeSetTimer = func() {
var nextDeadline time.Time
if next := b.batches.peekFront(); next != nil {
nextDeadline = next.deadline
}
if !deadline.Equal(nextDeadline) || timer.Read {
deadline = nextDeadline
if !deadline.IsZero() {
timer.Reset(timeutil.Until(deadline))
} else {
// Clear the current timer due to a sole batch already sent before
// the timer fired.
timer.Stop()
timer = timeutil.NewTimer()
}
}
}
)
for {
select {
case req := <-reqChan():
handleRequest(req)
maybeSetTimer()
case <-timer.C:
timer.Read = true
sendBatch(b.batches.popFront())
maybeSetTimer()
case <-b.sendDoneChan:
handleSendDone()
case <-b.cfg.Stopper.ShouldQuiesce():
b.cleanup(stop.ErrUnavailable)
return
case <-ctx.Done():
b.cleanup(ctx.Err())
return
}
}
}
type request struct {
ctx context.Context
req roachpb.Request
rangeID roachpb.RangeID
responseChan chan<- Response
}
type batch struct {
reqs []*request
size int // bytes
// sendDeadline is the latest deadline reported by a request's context.
// It will be zero valued if any request does not contain a deadline.
sendDeadline time.Time
// idx is the batch's index in the batchQueue.
idx int
// deadline is the time at which this batch should be sent according to the
// Batcher's configuration.
deadline time.Time
// startTime is the time at which the first request was added to the batch.
startTime time.Time
// lastUpdated is the latest time when a request was added to the batch.
lastUpdated time.Time
}
func (b *batch) rangeID() roachpb.RangeID {
if len(b.reqs) == 0 {
panic("rangeID cannot be called on an empty batch")
}
return b.reqs[0].rangeID
}
func (b *batch) batchRequest(cfg *Config) roachpb.BatchRequest {
req := roachpb.BatchRequest{
// Preallocate the Requests slice.
Requests: make([]roachpb.RequestUnion, 0, len(b.reqs)),
}
for _, r := range b.reqs {
req.Add(r.req)
}
if cfg.MaxKeysPerBatchReq > 0 {
req.MaxSpanRequestKeys = int64(cfg.MaxKeysPerBatchReq)
}
return req
}
// pool stores object pools for the various commonly reused objects of the
// batcher
type pool struct {
responseChanPool sync.Pool
batchPool sync.Pool
requestPool sync.Pool
}
func makePool() pool {
return pool{
responseChanPool: sync.Pool{
New: func() interface{} { return make(chan Response, 1) },
},
batchPool: sync.Pool{
New: func() interface{} { return &batch{} },
},
requestPool: sync.Pool{
New: func() interface{} { return &request{} },
},
}
}
func (p *pool) getResponseChan() chan Response {
return p.responseChanPool.Get().(chan Response)
}
func (p *pool) putResponseChan(r chan Response) {
p.responseChanPool.Put(r)
}
func (p *pool) newRequest(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request, responseChan chan<- Response,
) *request {
r := p.requestPool.Get().(*request)
*r = request{
ctx: ctx,
rangeID: rangeID,
req: req,
responseChan: responseChan,
}
return r
}
func (p *pool) putRequest(r *request) {
*r = request{}
p.requestPool.Put(r)
}
func (p *pool) newBatch(now time.Time) *batch {
ba := p.batchPool.Get().(*batch)
*ba = batch{
startTime: now,
idx: -1,
}
return ba
}
func (p *pool) putBatch(b *batch) {
*b = batch{}
p.batchPool.Put(b)
}
// batchQueue is a container for batch objects which offers O(1) get based on
// rangeID and peekFront as well as O(log(n)) upsert, removal, popFront.
// Batch structs are heap ordered inside of the batches slice based on their
// deadline with the earliest deadline at the front.
//
// Note that the batch struct stores its index in the batches slice and is -1
// when not part of the queue. The heap methods update the batch indices when
// updating the heap. Take care not to ever put a batch in to multiple
// batchQueues. At time of writing this package only ever used one batchQueue
// per RequestBatcher.
type batchQueue struct {
batches []*batch
byRange map[roachpb.RangeID]*batch
}
var _ heap.Interface = (*batchQueue)(nil)
func makeBatchQueue() batchQueue {
return batchQueue{
byRange: map[roachpb.RangeID]*batch{},
}
}
func (q *batchQueue) peekFront() *batch {
if q.Len() == 0 {
return nil
}
return q.batches[0]
}
func (q *batchQueue) popFront() *batch {
if q.Len() == 0 {
return nil
}
return heap.Pop(q).(*batch)
}
func (q *batchQueue) get(id roachpb.RangeID) (*batch, bool) {
b, exists := q.byRange[id]
return b, exists
}
func (q *batchQueue) remove(ba *batch) {
delete(q.byRange, ba.rangeID())
heap.Remove(q, ba.idx)
}
func (q *batchQueue) upsert(ba *batch) {
if ba.idx >= 0 {
heap.Fix(q, ba.idx)
} else {
heap.Push(q, ba)
}
}
func (q *batchQueue) Len() int {
return len(q.batches)
}
func (q *batchQueue) Swap(i, j int) {
q.batches[i], q.batches[j] = q.batches[j], q.batches[i]
q.batches[i].idx = i
q.batches[j].idx = j
}
func (q *batchQueue) Less(i, j int) bool {
idl, jdl := q.batches[i].deadline, q.batches[j].deadline
if before := idl.Before(jdl); before || !idl.Equal(jdl) {
return before
}
return q.batches[i].rangeID() < q.batches[j].rangeID()
}
func (q *batchQueue) Push(v interface{}) {
ba := v.(*batch)
ba.idx = len(q.batches)
q.byRange[ba.rangeID()] = ba
q.batches = append(q.batches, ba)
}
func (q *batchQueue) Pop() interface{} {
ba := q.batches[len(q.batches)-1]
q.batches = q.batches[:len(q.batches)-1]
delete(q.byRange, ba.rangeID())
ba.idx = -1
return ba
}
| {
return nil
} | conditional_block |
batcher.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package requestbatcher is a library to enable easy batching of roachpb
// requests.
//
// Batching in general represents a tradeoff between throughput and latency. The
// underlying assumption being that batched operations are cheaper than an
// individual operation. If this is not the case for your workload, don't use
// this library.
//
// Batching assumes that data with the same key can be sent in a single batch.
// The initial implementation uses rangeID as the key explicitly to avoid
// creating an overly general solution without motivation but interested readers
// should recognize that it would be easy to extend this package to accept an
// arbitrary comparable key.
package requestbatcher
import (
"container/heap"
"context"
"sync"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// The motivating use case for this package are opportunities to perform cleanup
// operations in a single raft transaction rather than several. Three main
// opportunities are known:
//
// 1) Intent resolution
// 2) Txn heartbeating
// 3) Txn record garbage collection
//
// The first two have a relatively tight time bound expectations. In other words
// it would be surprising and negative for a client if operations were not sent
// soon after they were queued. The transaction record GC workload can be rather
// asynchronous. This motivates the need for some knobs to control the maximum
// acceptable amount of time to buffer operations before sending.
// Another wrinkle is dealing with the different ways in which sending a batch
// may fail. A batch may fail in an ambiguous way (RPC/network errors), it may
// fail completely (which is likely indistinguishable from the ambiguous
// failure) and lastly it may fail partially. Today's Sender contract is fairly
// ambiguous about the contract between BatchResponse inner responses and errors
// returned from a batch request.
// TODO(ajwerner): Do we need to consider ordering dependencies between
// operations? For the initial motivating use cases for this library there are
// no data dependencies between operations and the only key will be the guess
// for where an operation should go.
// TODO(ajwerner): Consider a more sophisticated mechanism to limit on maximum
// number of requests in flight at a time. This may ultimately lead to a need
// for queuing. Furthermore consider using batch time to dynamically tune the
// amount of time we wait.
// TODO(ajwerner): Consider filtering requests which might have been canceled
// before sending a batch.
// TODO(ajwerner): Consider more dynamic policies with regards to deadlines.
// Perhaps we want to wait no more than some percentile of the duration of
// historical operations and stay idle only some other percentile. For example
// imagine if the max delay was the 50th and the max idle was the 10th? This
// has a problem when much of the prior workload was say local operations and
// happened very rapidly. Perhaps we need to provide some bounding envelope?
// TODO(ajwerner): Consider a more general purpose interface for this package.
// While several interface-oriented interfaces have been explored they all felt
// heavy and allocation intensive.
// TODO(ajwerner): Consider providing an interface which enables a single
// goroutine to dispatch a number of requests destined for different ranges to
// the RequestBatcher which may then wait for completion of all of the requests.
// What is the right contract for error handling? Imagine a situation where a
// client has dispatched N requests and one has been sent and returns with an
// error while others are queued. What should happen? Should the client receive
// the error rapidly? Should the other requests be sent at all? Should they be
// filtered before sending?
// Config contains the dependencies and configuration for a Batcher.
type Config struct {
// Name of the batcher, used for logging, timeout errors, and the stopper.
Name string
// Sender can round-trip a batch. Sender must not be nil.
Sender kv.Sender
// Stopper controls the lifecycle of the Batcher. Stopper must not be nil.
Stopper *stop.Stopper
// MaxSizePerBatch is the maximum number of bytes in individual requests in a
// batch. If MaxSizePerBatch <= 0 then no limit is enforced.
MaxSizePerBatch int
// MaxMsgsPerBatch is the maximum number of messages.
// If MaxMsgsPerBatch <= 0 then no limit is enforced.
MaxMsgsPerBatch int
// MaxKeysPerBatchReq is the maximum number of keys that each batch is
// allowed to touch during one of its requests. If the limit is exceeded,
// the batch is paginated over a series of individual requests. This limit
// corresponds to the MaxSpanRequestKeys assigned to the Header of each
// request. If MaxKeysPerBatchReq <= 0 then no limit is enforced.
MaxKeysPerBatchReq int
// MaxWait is the maximum amount of time a message should wait in a batch
// before being sent. If MaxWait is <= 0 then no wait timeout is enforced.
// It is inadvisable to disable both MaxIdle and MaxWait.
MaxWait time.Duration
// MaxIdle is the amount of time a batch should wait between message additions
// before being sent. The idle timer allows clients to observe low latencies
// when throughput is low. If MaxWait is <= 0 then no wait timeout is
// enforced. It is inadvisable to disable both MaxIdle and MaxWait.
MaxIdle time.Duration
// InFlightBackpressureLimit is the number of batches in flight above which
// sending clients should experience backpressure. If the batcher has more
// requests than this in flight it will not accept new requests until the
// number of in flight batches is again below this threshold. This value does
// not limit the number of batches which may ultimately be in flight as
// batches which are queued to send but not yet in flight will still send.
// Note that values less than or equal to zero will result in the use of
// DefaultInFlightBackpressureLimit.
InFlightBackpressureLimit int
// NowFunc is used to determine the current time. It defaults to timeutil.Now.
NowFunc func() time.Time
}
const (
// DefaultInFlightBackpressureLimit is the InFlightBackpressureLimit used if
// a zero value for that setting is passed in a Config to New.
// TODO(ajwerner): Justify this number.
DefaultInFlightBackpressureLimit = 1000
// BackpressureRecoveryFraction is the fraction of InFlightBackpressureLimit
// used to detect when enough in flight requests have completed such that more
// requests should now be accepted. A value less than 1 is chosen in order to
// avoid thrashing on backpressure which might ultimately defeat the purpose
// of the RequestBatcher.
backpressureRecoveryFraction = .8
)
func backpressureRecoveryThreshold(limit int) int {
if l := int(float64(limit) * backpressureRecoveryFraction); l > 0 {
return l
}
return 1 // don't allow the recovery threshold to be 0
}
// RequestBatcher batches requests destined for a single range based on
// a configured batching policy.
type RequestBatcher struct {
pool pool
cfg Config
// sendBatchOpName is the string passed to contextutil.RunWithTimeout when
// sending a batch.
sendBatchOpName string
batches batchQueue
requestChan chan *request
sendDoneChan chan struct{}
}
// Response is exported for use with the channel-oriented SendWithChan method.
// At least one of Resp or Err will be populated for every sent Response.
type Response struct {
Resp roachpb.Response
Err error
}
// New creates a new RequestBatcher.
func New(cfg Config) *RequestBatcher {
validateConfig(&cfg)
b := &RequestBatcher{
cfg: cfg,
pool: makePool(),
batches: makeBatchQueue(),
requestChan: make(chan *request),
sendDoneChan: make(chan struct{}),
}
b.sendBatchOpName = b.cfg.Name + ".sendBatch"
if err := cfg.Stopper.RunAsyncTask(context.Background(), b.cfg.Name, b.run); err != nil {
panic(err)
}
return b
}
func validateConfig(cfg *Config) {
if cfg.Stopper == nil {
panic("cannot construct a Batcher with a nil Stopper")
} else if cfg.Sender == nil {
panic("cannot construct a Batcher with a nil Sender")
}
if cfg.InFlightBackpressureLimit <= 0 {
cfg.InFlightBackpressureLimit = DefaultInFlightBackpressureLimit
}
if cfg.NowFunc == nil {
cfg.NowFunc = timeutil.Now | // client is responsible for ensuring that the passed respChan has a buffer at
// least as large as the number of responses it expects to receive. Using an
// insufficiently buffered channel can lead to deadlocks and unintended delays
// processing requests inside the RequestBatcher.
func (b *RequestBatcher) SendWithChan(
ctx context.Context, respChan chan<- Response, rangeID roachpb.RangeID, req roachpb.Request,
) error {
select {
case b.requestChan <- b.pool.newRequest(ctx, rangeID, req, respChan):
return nil
case <-b.cfg.Stopper.ShouldQuiesce():
return stop.ErrUnavailable
case <-ctx.Done():
return ctx.Err()
}
}
// Send sends req as a part of a batch. An error is returned if the context
// is canceled before the sending of the request completes. The context with
// the latest deadline for a batch is used to send the underlying batch request.
func (b *RequestBatcher) Send(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request,
) (roachpb.Response, error) {
responseChan := b.pool.getResponseChan()
if err := b.SendWithChan(ctx, responseChan, rangeID, req); err != nil {
return nil, err
}
select {
case resp := <-responseChan:
// It's only safe to put responseChan back in the pool if it has been
// received from.
b.pool.putResponseChan(responseChan)
return resp.Resp, resp.Err
case <-b.cfg.Stopper.ShouldQuiesce():
return nil, stop.ErrUnavailable
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (b *RequestBatcher) sendDone(ba *batch) {
b.pool.putBatch(ba)
select {
case b.sendDoneChan <- struct{}{}:
case <-b.cfg.Stopper.ShouldQuiesce():
}
}
func (b *RequestBatcher) sendBatch(ctx context.Context, ba *batch) {
if err := b.cfg.Stopper.RunAsyncTask(ctx, "send-batch", func(ctx context.Context) {
defer b.sendDone(ba)
var br *roachpb.BatchResponse
send := func(ctx context.Context) error {
var pErr *roachpb.Error
if br, pErr = b.cfg.Sender.Send(ctx, ba.batchRequest(&b.cfg)); pErr != nil {
return pErr.GoError()
}
return nil
}
if !ba.sendDeadline.IsZero() {
actualSend := send
send = func(context.Context) error {
return contextutil.RunWithTimeout(
ctx, b.sendBatchOpName, timeutil.Until(ba.sendDeadline), actualSend)
}
}
// Send requests in a loop to support pagination, which may be necessary
// if MaxKeysPerBatchReq is set. If so, partial responses with resume
// spans may be returned for requests, indicating that the limit was hit
// before they could complete and that they should be resumed over the
// specified key span. Requests in the batch are neither guaranteed to
// be ordered nor guaranteed to be non-overlapping, so we can make no
// assumptions about the requests that will result in full responses
// (with no resume spans) vs. partial responses vs. empty responses (see
// the comment on roachpb.Header.MaxSpanRequestKeys).
//
// To accommodate this, we keep track of all partial responses from
// previous iterations. After receiving a batch of responses during an
// iteration, the responses are each combined with the previous response
// for their corresponding requests. From there, responses that have no
// resume spans are removed. Responses that have resume spans are
// updated appropriately and sent again in the next iteration. The loop
// proceeds until all requests have been run to completion.
var prevResps []roachpb.Response
for len(ba.reqs) > 0 {
err := send(ctx)
nextReqs, nextPrevResps := ba.reqs[:0], prevResps[:0]
for i, r := range ba.reqs {
var res Response
if br != nil {
resp := br.Responses[i].GetInner()
if prevResps != nil {
prevResp := prevResps[i]
if cErr := roachpb.CombineResponses(prevResp, resp); cErr != nil {
log.Fatalf(ctx, "%v", cErr)
}
resp = prevResp
}
if resume := resp.Header().ResumeSpan; resume != nil {
// Add a trimmed request to the next batch.
h := r.req.Header()
h.SetSpan(*resume)
r.req = r.req.ShallowCopy()
r.req.SetHeader(h)
nextReqs = append(nextReqs, r)
// Strip resume span from previous response and record.
prevH := resp.Header()
prevH.ResumeSpan = nil
prevResp := resp
prevResp.SetHeader(prevH)
nextPrevResps = append(nextPrevResps, prevResp)
continue
}
res.Resp = resp
}
if err != nil {
res.Err = err
}
b.sendResponse(r, res)
}
ba.reqs, prevResps = nextReqs, nextPrevResps
}
}); err != nil {
b.sendDone(ba)
}
}
func (b *RequestBatcher) sendResponse(req *request, resp Response) {
// This send should never block because responseChan is buffered.
req.responseChan <- resp
b.pool.putRequest(req)
}
func addRequestToBatch(cfg *Config, now time.Time, ba *batch, r *request) (shouldSend bool) {
// Update the deadline for the batch if this requests's deadline is later
// than the current latest.
rDeadline, rHasDeadline := r.ctx.Deadline()
// If this is the first request or
if len(ba.reqs) == 0 ||
// there are already requests and there is a deadline and
(len(ba.reqs) > 0 && !ba.sendDeadline.IsZero() &&
// this request either doesn't have a deadline or has a later deadline,
(!rHasDeadline || rDeadline.After(ba.sendDeadline))) {
// set the deadline to this request's deadline.
ba.sendDeadline = rDeadline
}
ba.reqs = append(ba.reqs, r)
ba.size += r.req.Size()
ba.lastUpdated = now
if cfg.MaxIdle > 0 {
ba.deadline = ba.lastUpdated.Add(cfg.MaxIdle)
}
if cfg.MaxWait > 0 {
waitDeadline := ba.startTime.Add(cfg.MaxWait)
if cfg.MaxIdle <= 0 || waitDeadline.Before(ba.deadline) {
ba.deadline = waitDeadline
}
}
return (cfg.MaxMsgsPerBatch > 0 && len(ba.reqs) >= cfg.MaxMsgsPerBatch) ||
(cfg.MaxSizePerBatch > 0 && ba.size >= cfg.MaxSizePerBatch)
}
func (b *RequestBatcher) cleanup(err error) {
for ba := b.batches.popFront(); ba != nil; ba = b.batches.popFront() {
for _, r := range ba.reqs {
b.sendResponse(r, Response{Err: err})
}
}
}
func (b *RequestBatcher) run(ctx context.Context) {
// Create a context to be used in sendBatch to cancel in-flight batches when
// this function exits. If we did not cancel in-flight requests then the
// Stopper might get stuck waiting for those requests to complete.
sendCtx, cancel := context.WithCancel(ctx)
defer cancel()
var (
// inFlight tracks the number of batches currently being sent.
// true.
inFlight = 0
// inBackPressure indicates whether the reqChan is enabled.
// It becomes true when inFlight exceeds b.cfg.InFlightBackpressureLimit.
inBackPressure = false
// recoveryThreshold is the number of in flight requests below which the
// the inBackPressure state should exit.
recoveryThreshold = backpressureRecoveryThreshold(b.cfg.InFlightBackpressureLimit)
// reqChan consults inBackPressure to determine whether the goroutine is
// accepting new requests.
reqChan = func() <-chan *request {
if inBackPressure {
return nil
}
return b.requestChan
}
sendBatch = func(ba *batch) {
inFlight++
if inFlight >= b.cfg.InFlightBackpressureLimit {
inBackPressure = true
}
b.sendBatch(sendCtx, ba)
}
handleSendDone = func() {
inFlight--
if inFlight < recoveryThreshold {
inBackPressure = false
}
}
handleRequest = func(req *request) {
now := b.cfg.NowFunc()
ba, existsInQueue := b.batches.get(req.rangeID)
if !existsInQueue {
ba = b.pool.newBatch(now)
}
if shouldSend := addRequestToBatch(&b.cfg, now, ba, req); shouldSend {
if existsInQueue {
b.batches.remove(ba)
}
sendBatch(ba)
} else {
b.batches.upsert(ba)
}
}
deadline time.Time
timer = timeutil.NewTimer()
maybeSetTimer = func() {
var nextDeadline time.Time
if next := b.batches.peekFront(); next != nil {
nextDeadline = next.deadline
}
if !deadline.Equal(nextDeadline) || timer.Read {
deadline = nextDeadline
if !deadline.IsZero() {
timer.Reset(timeutil.Until(deadline))
} else {
// Clear the current timer due to a sole batch already sent before
// the timer fired.
timer.Stop()
timer = timeutil.NewTimer()
}
}
}
)
for {
select {
case req := <-reqChan():
handleRequest(req)
maybeSetTimer()
case <-timer.C:
timer.Read = true
sendBatch(b.batches.popFront())
maybeSetTimer()
case <-b.sendDoneChan:
handleSendDone()
case <-b.cfg.Stopper.ShouldQuiesce():
b.cleanup(stop.ErrUnavailable)
return
case <-ctx.Done():
b.cleanup(ctx.Err())
return
}
}
}
type request struct {
ctx context.Context
req roachpb.Request
rangeID roachpb.RangeID
responseChan chan<- Response
}
type batch struct {
reqs []*request
size int // bytes
// sendDeadline is the latest deadline reported by a request's context.
// It will be zero valued if any request does not contain a deadline.
sendDeadline time.Time
// idx is the batch's index in the batchQueue.
idx int
// deadline is the time at which this batch should be sent according to the
// Batcher's configuration.
deadline time.Time
// startTime is the time at which the first request was added to the batch.
startTime time.Time
// lastUpdated is the latest time when a request was added to the batch.
lastUpdated time.Time
}
func (b *batch) rangeID() roachpb.RangeID {
if len(b.reqs) == 0 {
panic("rangeID cannot be called on an empty batch")
}
return b.reqs[0].rangeID
}
func (b *batch) batchRequest(cfg *Config) roachpb.BatchRequest {
req := roachpb.BatchRequest{
// Preallocate the Requests slice.
Requests: make([]roachpb.RequestUnion, 0, len(b.reqs)),
}
for _, r := range b.reqs {
req.Add(r.req)
}
if cfg.MaxKeysPerBatchReq > 0 {
req.MaxSpanRequestKeys = int64(cfg.MaxKeysPerBatchReq)
}
return req
}
// pool stores object pools for the various commonly reused objects of the
// batcher
type pool struct {
responseChanPool sync.Pool
batchPool sync.Pool
requestPool sync.Pool
}
func makePool() pool {
return pool{
responseChanPool: sync.Pool{
New: func() interface{} { return make(chan Response, 1) },
},
batchPool: sync.Pool{
New: func() interface{} { return &batch{} },
},
requestPool: sync.Pool{
New: func() interface{} { return &request{} },
},
}
}
func (p *pool) getResponseChan() chan Response {
return p.responseChanPool.Get().(chan Response)
}
func (p *pool) putResponseChan(r chan Response) {
p.responseChanPool.Put(r)
}
func (p *pool) newRequest(
ctx context.Context, rangeID roachpb.RangeID, req roachpb.Request, responseChan chan<- Response,
) *request {
r := p.requestPool.Get().(*request)
*r = request{
ctx: ctx,
rangeID: rangeID,
req: req,
responseChan: responseChan,
}
return r
}
func (p *pool) putRequest(r *request) {
*r = request{}
p.requestPool.Put(r)
}
func (p *pool) newBatch(now time.Time) *batch {
ba := p.batchPool.Get().(*batch)
*ba = batch{
startTime: now,
idx: -1,
}
return ba
}
func (p *pool) putBatch(b *batch) {
*b = batch{}
p.batchPool.Put(b)
}
// batchQueue is a container for batch objects which offers O(1) get based on
// rangeID and peekFront as well as O(log(n)) upsert, removal, popFront.
// Batch structs are heap ordered inside of the batches slice based on their
// deadline with the earliest deadline at the front.
//
// Note that the batch struct stores its index in the batches slice and is -1
// when not part of the queue. The heap methods update the batch indices when
// updating the heap. Take care not to ever put a batch in to multiple
// batchQueues. At time of writing this package only ever used one batchQueue
// per RequestBatcher.
type batchQueue struct {
batches []*batch
byRange map[roachpb.RangeID]*batch
}
var _ heap.Interface = (*batchQueue)(nil)
func makeBatchQueue() batchQueue {
return batchQueue{
byRange: map[roachpb.RangeID]*batch{},
}
}
func (q *batchQueue) peekFront() *batch {
if q.Len() == 0 {
return nil
}
return q.batches[0]
}
func (q *batchQueue) popFront() *batch {
if q.Len() == 0 {
return nil
}
return heap.Pop(q).(*batch)
}
func (q *batchQueue) get(id roachpb.RangeID) (*batch, bool) {
b, exists := q.byRange[id]
return b, exists
}
func (q *batchQueue) remove(ba *batch) {
delete(q.byRange, ba.rangeID())
heap.Remove(q, ba.idx)
}
func (q *batchQueue) upsert(ba *batch) {
if ba.idx >= 0 {
heap.Fix(q, ba.idx)
} else {
heap.Push(q, ba)
}
}
func (q *batchQueue) Len() int {
return len(q.batches)
}
func (q *batchQueue) Swap(i, j int) {
q.batches[i], q.batches[j] = q.batches[j], q.batches[i]
q.batches[i].idx = i
q.batches[j].idx = j
}
func (q *batchQueue) Less(i, j int) bool {
idl, jdl := q.batches[i].deadline, q.batches[j].deadline
if before := idl.Before(jdl); before || !idl.Equal(jdl) {
return before
}
return q.batches[i].rangeID() < q.batches[j].rangeID()
}
func (q *batchQueue) Push(v interface{}) {
ba := v.(*batch)
ba.idx = len(q.batches)
q.byRange[ba.rangeID()] = ba
q.batches = append(q.batches, ba)
}
func (q *batchQueue) Pop() interface{} {
ba := q.batches[len(q.batches)-1]
q.batches = q.batches[:len(q.batches)-1]
delete(q.byRange, ba.rangeID())
ba.idx = -1
return ba
} | }
}
// SendWithChan sends a request with a client provided response channel. The | random_line_split |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace()); | let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if !self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB ... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling != ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
} | let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new(); | random_line_split |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 | else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if !self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB ... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling != ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| {
None
} | conditional_block |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct SceneChangeDetector<'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) |
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling != ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if !self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB ... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
} | identifier_body |
lib.rs | mod pixel;
mod y4m;
use self::pixel::*;
use ::y4m::{Colorspace, Decoder};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet};
use std::io::Read;
/// Options determining how to run scene change detection.
pub struct DetectionOptions {
/// Whether or not to analyze the chroma planes.
/// Enabling this is slower, but may be more accurate.
pub use_chroma: bool,
/// Enabling this will utilize heuristics to avoid scenecuts
/// that are too close to each other.
/// This is generally useful if you want scenecut detection
/// for use in an encoder.
/// If you want a raw list of scene changes, you should disable this.
pub ignore_flashes: bool,
/// The minimum distane between two scene changes.
pub min_scenecut_distance: Option<usize>,
/// The maximum distance between two scene changes.
pub max_scenecut_distance: Option<usize>,
/// The distance to look ahead in the video
/// for scene flash detection.
///
/// Not used if `ignore_flashes` is `true`.
pub lookahead_distance: usize,
/// An optional callback that will fire after each frame is analyzed.
/// Arguments passed in will be, in order,
/// the number of frames analyzed, and the number of keyframes detected.
///
/// This is generally useful for displaying progress, etc.
pub progress_callback: Option<Box<dyn Fn(usize, usize)>>,
}
impl Default for DetectionOptions {
fn default() -> Self {
DetectionOptions {
use_chroma: true,
ignore_flashes: false,
lookahead_distance: 5,
min_scenecut_distance: None,
max_scenecut_distance: None,
progress_callback: None,
}
}
}
/// Runs through a y4m video clip,
/// detecting where scene changes occur.
/// This is adjustable based on the `opts` parameters.
///
/// Returns a `Vec` containing the frame numbers where the scene changes occur.
pub fn detect_scene_changes<R: Read, T: Pixel>(
dec: &mut Decoder<R>,
opts: DetectionOptions,
) -> Vec<usize> {
assert!(opts.lookahead_distance >= 1);
let bit_depth = dec.get_bit_depth() as u8;
let chroma_sampling = ChromaSampling::from(dec.get_colorspace());
let mut detector = SceneChangeDetector::new(bit_depth, chroma_sampling, &opts);
let mut frame_queue = BTreeMap::new();
let mut keyframes = BTreeSet::new();
let mut frameno = 0;
loop {
let mut next_input_frameno = frame_queue
.keys()
.last()
.copied()
.map(|key| key + 1)
.unwrap_or(0);
while next_input_frameno < frameno + opts.lookahead_distance {
let frame = y4m::read_video_frame::<R, T>(dec);
if let Ok(frame) = frame {
frame_queue.insert(next_input_frameno, frame);
next_input_frameno += 1;
} else {
// End of input
break;
}
}
let frame_set = frame_queue
.iter()
.skip_while(|&(&key, _)| key < frameno)
.map(|(_, value)| value)
.take(opts.lookahead_distance)
.collect::<Vec<_>>();
if frame_set.is_empty() {
// End of video
break;
}
detector.analyze_next_frame(
if frameno == 0 {
None
} else {
frame_queue.get(&(frameno - 1))
},
&frame_set,
frameno,
&mut keyframes,
);
if frameno > 0 {
frame_queue.remove(&(frameno - 1));
}
frameno += 1;
if let Some(ref progress_fn) = opts.progress_callback {
progress_fn(frameno, keyframes.len());
}
}
keyframes.into_iter().collect()
}
type PlaneData<T> = [Vec<T>; 3];
/// Available chroma sampling formats.
#[derive(Copy, Clone, Debug, PartialEq)]
enum ChromaSampling {
/// Both vertically and horizontally subsampled.
Cs420,
/// Horizontally subsampled.
Cs422,
/// Not subsampled.
Cs444,
/// Monochrome.
Cs400,
}
impl From<Colorspace> for ChromaSampling {
fn from(other: Colorspace) -> Self {
use Colorspace::*;
match other {
Cmono => ChromaSampling::Cs400,
C420 | C420p10 | C420p12 | C420jpeg | C420paldv | C420mpeg2 => ChromaSampling::Cs420,
C422 | C422p10 | C422p12 => ChromaSampling::Cs422,
C444 | C444p10 | C444p12 => ChromaSampling::Cs444,
}
}
}
impl ChromaSampling {
/// Provides the amount to right shift the luma plane dimensions to get the
/// chroma plane dimensions.
/// Only values 0 or 1 are ever returned.
/// The plane dimensions must also be rounded up to accommodate odd luma plane
/// sizes.
/// Cs400 returns None, as there are no chroma planes.
pub fn get_decimation(self) -> Option<(usize, usize)> {
use self::ChromaSampling::*;
match self {
Cs420 => Some((1, 1)),
Cs422 => Some((1, 0)),
Cs444 => Some((0, 0)),
Cs400 => None,
}
}
}
/// Runs keyframe detection on frames from the lookahead queue.
struct | <'a> {
/// Minimum average difference between YUV deltas that will trigger a scene change.
threshold: u8,
opts: &'a DetectionOptions,
/// Frames that cannot be marked as keyframes due to the algorithm excluding them.
/// Storing the frame numbers allows us to avoid looking back more than one frame.
excluded_frames: BTreeSet<usize>,
chroma_sampling: ChromaSampling,
}
impl<'a> SceneChangeDetector<'a> {
pub fn new(bit_depth: u8, chroma_sampling: ChromaSampling, opts: &'a DetectionOptions) -> Self {
// This implementation is based on a Python implementation at
// https://pyscenedetect.readthedocs.io/en/latest/reference/detection-methods/.
// The Python implementation uses HSV values and a threshold of 30. Comparing the
// YUV values was sufficient in most cases, and avoided a more costly YUV->RGB->HSV
// conversion, but the deltas needed to be scaled down. The deltas for keyframes
// in YUV were about 1/3 to 1/2 of what they were in HSV, but non-keyframes were
// very unlikely to have a delta greater than 3 in YUV, whereas they may reach into
// the double digits in HSV. Therefore, 12 was chosen as a reasonable default threshold.
// This may be adjusted later.
const BASE_THRESHOLD: u8 = 12;
Self {
threshold: BASE_THRESHOLD * bit_depth / 8,
opts,
excluded_frames: BTreeSet::new(),
chroma_sampling,
}
}
/// Runs keyframe detection on the next frame in the lookahead queue.
///
/// This function requires that a subset of input frames
/// is passed to it in order, and that `keyframes` is only
/// updated from this method. `input_frameno` should correspond
/// to the first frame in `frame_set`.
///
/// This will gracefully handle the first frame in the video as well.
pub fn analyze_next_frame<T: Pixel>(
&mut self,
previous_frame: Option<&PlaneData<T>>,
frame_set: &[&PlaneData<T>],
input_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) {
let frame_set = match previous_frame {
Some(frame) => [frame]
.iter()
.chain(frame_set.iter())
.cloned()
.collect::<Vec<_>>(),
None => {
// The first frame is always a keyframe.
keyframes.insert(0);
return;
}
};
self.exclude_scene_flashes(&frame_set, input_frameno);
if self.is_key_frame(&frame_set[0], &frame_set[1], input_frameno, keyframes) {
keyframes.insert(input_frameno);
}
}
/// Determines if `current_frame` should be a keyframe.
fn is_key_frame<T: Pixel>(
&self,
previous_frame: &PlaneData<T>,
current_frame: &PlaneData<T>,
current_frameno: usize,
keyframes: &mut BTreeSet<usize>,
) -> bool {
// Find the distance to the previous keyframe.
let previous_keyframe = keyframes.iter().last().unwrap();
let distance = current_frameno - previous_keyframe;
// Handle minimum and maximum key frame intervals.
if distance < self.opts.min_scenecut_distance.unwrap_or(0) {
return false;
}
if distance
>= self
.opts
.max_scenecut_distance
.unwrap_or(usize::max_value())
{
return true;
}
if self.excluded_frames.contains(¤t_frameno) {
return false;
}
self.has_scenecut(previous_frame, current_frame)
}
/// Uses lookahead to avoid coding short flashes as scenecuts.
/// Saves excluded frame numbers in `self.excluded_frames`.
fn exclude_scene_flashes<T: Pixel>(&mut self, frame_subset: &[&PlaneData<T>], frameno: usize) {
let lookahead_distance = cmp::min(self.opts.lookahead_distance, frame_subset.len() - 1);
// Where A and B are scenes: AAAAAABBBAAAAAA
// If BBB is shorter than lookahead_distance, it is detected as a flash
// and not considered a scenecut.
for j in 1..=lookahead_distance {
if !self.has_scenecut(&frame_subset[0], &frame_subset[j]) {
// Any frame in between `0` and `j` cannot be a real scenecut.
for i in 0..=j {
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
// Where A-F are scenes: AAAAABBCCDDEEFFFFFF
// If each of BB ... EE are shorter than `lookahead_distance`, they are
// detected as flashes and not considered scenecuts.
// Instead, the first F frame becomes a scenecut.
// If the video ends before F, no frame becomes a scenecut.
for i in 1..lookahead_distance {
if self.has_scenecut(&frame_subset[i], &frame_subset[lookahead_distance]) {
// If the current frame is the frame before a scenecut, it cannot also be the frame of a scenecut.
let frameno = frameno + i - 1;
self.excluded_frames.insert(frameno);
}
}
}
/// Run a comparison between two frames to determine if they qualify for a scenecut.
///
/// The current algorithm detects fast cuts using changes in colour and intensity between frames.
/// Since the difference between frames is used, only fast cuts are detected
/// with this method. This is intended to change via https://github.com/xiph/rav1e/issues/794.
fn has_scenecut<T: Pixel>(&self, frame1: &PlaneData<T>, frame2: &PlaneData<T>) -> bool {
let mut delta = Self::get_plane_sad(&frame1[0], &frame2[0]);
let mut len = frame1[0].len() as u64;
if self.opts.use_chroma && self.chroma_sampling != ChromaSampling::Cs400 {
let (x_dec, y_dec) = self.chroma_sampling.get_decimation().unwrap();
let dec = x_dec + y_dec;
delta += Self::get_plane_sad(&frame1[1], &frame2[1]) << dec;
len += (frame1[1].len() as u64) << dec;
delta += Self::get_plane_sad(&frame1[2], &frame2[2]) << dec;
len += (frame1[2].len() as u64) << dec;
}
delta >= self.threshold as u64 * len
}
#[inline(always)]
fn get_plane_sad<T: Pixel>(plane1: &[T], plane2: &[T]) -> u64 {
assert_eq!(plane1.len(), plane2.len());
plane1
.iter()
.zip(plane2.iter())
.map(|(&p1, &p2)| (i16::cast_from(p1) - i16::cast_from(p2)).abs() as u64)
.sum::<u64>()
}
}
| SceneChangeDetector | identifier_name |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self |
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname != map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if !maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname != first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
} | identifier_body |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
|
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname != map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if !maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname != first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
} | #[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip; | random_line_split |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn | (self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname != map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if !maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
{
bail!("no executable mapping for module image");
}
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname != first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| wait | identifier_name |
debugger.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use std::collections::BTreeMap;
use std::io::Read;
use std::process::{Child, Command};
use anyhow::{bail, format_err, Result};
use debuggable_module::path::FilePath;
use debuggable_module::Address;
use pete::{Ptracer, Restart, Signal, Stop, Tracee};
use procfs::process::{MMPermissions, MMapPath, MemoryMap, Process};
use crate::record::Output;
pub trait DebugEventHandler {
fn on_breakpoint(&mut self, dbg: &mut DebuggerContext, tracee: &mut Tracee) -> Result<()>;
fn on_module_load(
&mut self,
db: &mut DebuggerContext,
tracee: &mut Tracee,
image: &ModuleImage,
) -> Result<()>;
}
pub struct Debugger<'eh> {
context: DebuggerContext,
event_handler: &'eh mut dyn DebugEventHandler,
}
impl<'eh> Debugger<'eh> {
pub fn new(event_handler: &'eh mut dyn DebugEventHandler) -> Self {
let context = DebuggerContext::new();
Self {
context,
event_handler,
}
}
pub fn spawn(&mut self, cmd: Command) -> Result<Child> {
Ok(self.context.tracer.spawn(cmd)?)
}
pub fn wait(self, mut child: Child) -> Result<Output> {
if let Err(err) = self.wait_on_stops() {
// Ignore error if child already exited.
let _ = child.kill();
return Err(err);
}
// Currently unavailable on Linux.
let status = None;
let stdout = if let Some(pipe) = &mut child.stdout {
let mut stdout = Vec::new();
pipe.read_to_end(&mut stdout)?;
String::from_utf8_lossy(&stdout).into_owned()
} else {
"".into()
};
let stderr = if let Some(pipe) = &mut child.stderr {
let mut stderr = Vec::new();
pipe.read_to_end(&mut stderr)?;
String::from_utf8_lossy(&stderr).into_owned()
} else {
"".into()
};
// Clean up, ignoring output that we've already gathered.
//
// These calls should also be unnecessary no-ops, but we really want to avoid any dangling
// or zombie child processes.
let _ = child.kill();
let _ = child.wait();
let output = Output {
status,
stderr,
stdout,
};
Ok(output)
}
fn wait_on_stops(mut self) -> Result<()> {
use pete::ptracer::Options;
// Continue the tracee process until the return from its initial `execve()`.
let mut tracee = continue_to_init_execve(&mut self.context.tracer)?;
// Do not follow forks.
//
// After this, we assume that any new tracee is a thread in the same
// group as the root tracee.
let mut options = Options::all();
options.remove(Options::PTRACE_O_TRACEFORK);
options.remove(Options::PTRACE_O_TRACEVFORK);
options.remove(Options::PTRACE_O_TRACEEXEC);
tracee.set_options(options)?;
// Initialize index of mapped modules now that we have a PID to query.
self.context.images = Some(Images::new(tracee.pid.as_raw()));
self.update_images(&mut tracee)?;
// Restart tracee and enter the main debugger loop.
self.context.tracer.restart(tracee, Restart::Syscall)?;
while let Some(mut tracee) = self.context.tracer.wait()? {
match tracee.stop {
Stop::SyscallEnter => trace!("syscall-enter: {:?}", tracee.stop),
Stop::SyscallExit => {
self.update_images(&mut tracee)?;
}
Stop::SignalDelivery {
signal: Signal::SIGTRAP,
} => {
self.restore_and_call_if_breakpoint(&mut tracee)?;
}
Stop::Clone { new: pid } => {
// Only seen when the `VM_CLONE` flag is set, as of Linux 4.15.
info!("new thread: {}", pid);
}
_ => {
debug!("stop: {:?}", tracee.stop);
}
}
if let Err(err) = self.context.tracer.restart(tracee, Restart::Syscall) {
error!("unable to restart tracee: {}", err);
}
}
Ok(())
}
fn restore_and_call_if_breakpoint(&mut self, tracee: &mut Tracee) -> Result<()> {
let mut regs = tracee.registers()?;
#[cfg(target_arch = "x86_64")]
let instruction_pointer = &mut regs.rip;
#[cfg(target_arch = "aarch64")]
let instruction_pointer = &mut regs.pc;
// Compute what the last PC would have been _if_ we stopped due to a soft breakpoint.
//
// If we don't have a registered breakpoint, then we will not use this value.
let pc = Address(instruction_pointer.saturating_sub(1));
if self.context.breakpoints.clear(tracee, pc)? {
// We restored the original, `int3`-clobbered instruction in `clear()`. Now
// set the tracee's registers to execute it on restart. Do this _before_ the
// callback to simulate a hardware breakpoint.
*instruction_pointer = pc.0;
tracee.set_registers(regs)?;
self.event_handler
.on_breakpoint(&mut self.context, tracee)?;
} else {
warn!("no registered breakpoint for SIGTRAP delivery at {pc:x}");
// We didn't fix up a registered soft breakpoint, so we have no reason to
// re-execute the instruction at the last PC. Leave the tracee registers alone.
}
Ok(())
}
fn update_images(&mut self, tracee: &mut Tracee) -> Result<()> {
let images = self
.context
.images
.as_mut()
.ok_or_else(|| format_err!("internal error: recorder images not initialized"))?;
let events = images.update()?;
for (_base, image) in &events.loaded {
self.event_handler
.on_module_load(&mut self.context, tracee, image)?;
}
Ok(())
}
}
pub struct DebuggerContext {
pub breakpoints: Breakpoints,
pub images: Option<Images>,
pub tracer: Ptracer,
}
impl DebuggerContext {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let breakpoints = Breakpoints::default();
let images = None;
let tracer = Ptracer::new();
Self {
breakpoints,
images,
tracer,
}
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
self.images.as_ref()?.find_image_for_addr(addr)
}
}
/// Executable memory-mapped files for a process.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Images {
mapped: BTreeMap<Address, ModuleImage>,
pid: i32,
}
impl Images {
pub fn new(pid: i32) -> Self {
let mapped = BTreeMap::default();
Self { mapped, pid }
}
pub fn mapped(&self) -> impl Iterator<Item = (Address, &ModuleImage)> {
self.mapped.iter().map(|(va, i)| (*va, i))
}
pub fn update(&mut self) -> Result<LoadEvents> {
let proc = Process::new(self.pid)?;
let mut new = BTreeMap::new();
let mut group: Vec<MemoryMap> = vec![];
for map in proc.maps()? {
if let Some(last) = group.last() {
if last.pathname != map.pathname {
// The current memory mapping is the start of a new group.
//
// Consume the current group, and track any new module image.
if let Ok(image) = ModuleImage::new(group) {
let base = image.base();
new.insert(base, image);
}
// Reset the current group.
group = vec![];
}
}
group.push(map);
}
let events = LoadEvents::new(&self.mapped, &new);
self.mapped = new;
Ok(events)
}
pub fn find_image_for_addr(&self, addr: Address) -> Option<&ModuleImage> {
let (_, image) = self.mapped().find(|(_, im)| im.contains(&addr))?;
Some(image)
}
}
/// A `MemoryMap` that is known to be file-backed and executable.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ModuleImage {
base: Address,
maps: Vec<MemoryMap>,
path: FilePath,
}
impl ModuleImage {
// Accepts an increasing sequence of memory mappings with a common file-backed
// pathname.
pub fn new(mut maps: Vec<MemoryMap>) -> Result<Self> {
maps.sort_by_key(|m| m.address);
if maps.is_empty() {
bail!("no mapping for module image");
}
if !maps
.iter()
.any(|m| m.perms.contains(MMPermissions::EXECUTE))
|
// Cannot panic due to initial length check.
let first = &maps[0];
let path = if let MMapPath::Path(path) = &first.pathname {
FilePath::new(path.to_string_lossy())?
} else {
bail!("module image mappings must be file-backed");
};
for map in &maps {
if map.pathname != first.pathname {
bail!("module image mapping not file-backed");
}
}
let base = Address(first.address.0);
let image = ModuleImage { base, maps, path };
Ok(image)
}
pub fn path(&self) -> &FilePath {
&self.path
}
pub fn base(&self) -> Address {
self.base
}
pub fn contains(&self, addr: &Address) -> bool {
for map in &self.maps {
let lo = Address(map.address.0);
let hi = Address(map.address.1);
if (lo..hi).contains(addr) {
return true;
}
}
false
}
}
pub struct LoadEvents {
pub loaded: Vec<(Address, ModuleImage)>,
pub unloaded: Vec<(Address, ModuleImage)>,
}
impl LoadEvents {
pub fn new(old: &BTreeMap<Address, ModuleImage>, new: &BTreeMap<Address, ModuleImage>) -> Self {
// New not in old.
let loaded: Vec<_> = new
.iter()
.filter(|(nva, n)| {
!old.iter()
.any(|(iva, i)| *nva == iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
// Old not in new.
let unloaded: Vec<_> = old
.iter()
.filter(|(iva, i)| {
!new.iter()
.any(|(nva, n)| nva == *iva && n.path() == i.path())
})
.map(|(va, i)| (*va, i.clone()))
.collect();
Self { loaded, unloaded }
}
}
#[derive(Clone, Debug, Default)]
pub struct Breakpoints {
saved: BTreeMap<Address, u8>,
}
impl Breakpoints {
pub fn set(&mut self, tracee: &mut Tracee, addr: Address) -> Result<()> {
// Return if the breakpoint exists. We don't want to conclude that the
// saved instruction byte was `0xcc`.
if self.saved.contains_key(&addr) {
return Ok(());
}
let mut data = [0u8];
tracee.read_memory_mut(addr.0, &mut data)?;
self.saved.insert(addr, data[0]);
tracee.write_memory(addr.0, &[0xcc])?;
Ok(())
}
pub fn clear(&mut self, tracee: &mut Tracee, addr: Address) -> Result<bool> {
let data = self.saved.remove(&addr);
let cleared = if let Some(data) = data {
tracee.write_memory(addr.0, &[data])?;
true
} else {
false
};
Ok(cleared)
}
}
fn continue_to_init_execve(tracer: &mut Ptracer) -> Result<Tracee> {
while let Some(tracee) = tracer.wait()? {
if let Stop::SyscallExit = &tracee.stop {
return Ok(tracee);
}
tracer.restart(tracee, Restart::Continue)?;
}
bail!("did not see initial execve() in tracee while recording coverage");
}
| {
bail!("no executable mapping for module image");
} | conditional_block |
lib.rs | //! [![license:MIT/Apache-2.0][1]](https://github.com/uazu/stakker)
//! [![github:uazu/stakker][2]](https://github.com/uazu/stakker)
//! [![crates.io:stakker][3]](https://crates.io/crates/stakker)
//! [![docs.rs:stakker][4]](https://docs.rs/stakker)
//! [![uazu.github.io:stakker][5]](https://uazu.github.io/stakker/)
//!
//! [1]: https://img.shields.io/badge/license-MIT%2FApache--2.0-blue
//! [2]: https://img.shields.io/badge/github-uazu%2Fstakker-brightgreen
//! [3]: https://img.shields.io/badge/crates.io-stakker-red
//! [4]: https://img.shields.io/badge/docs.rs-stakker-purple
//! [5]: https://img.shields.io/badge/uazu.github.io-stakker-yellow
//!
//! **Stakker** is a lightweight low-level single-threaded actor
//! runtime. It is designed to be layered on top of whatever event
//! source or main loop the user prefers to use. Asynchronous calls
//! are addressed to individual methods within an actor, rather like
//! Pony behaviours. All calls and argument types are known and
//! statically checked at compile-time giving the optimiser a lot of
//! scope. **Stakker** also provides a timer queue for timeouts or
//! delayed calls, a lazy queue to allow batching recent operations,
//! and an idle queue for running a call when nothing else is
//! outstanding.
//!
//! By default **Stakker** uses unsafe code for better time and memory
//! efficiency. However if you prefer to avoid unsafe code, then
//! enable the **no-unsafe** feature which compiles the whole crate
//! with `forbid(unsafe_code)`. Safe alternatives will be used, at
//! some cost in time and memory. There are other features that
//! provide finer-grained control (see below).
//!
//! - [Overview of types](#overview-of-types)
//! - [Efficiency](#efficiency)
//! - [Cargo features](#cargo-features)
//! - [Testing](#testing)
//! - [Tutorial example](#tutorial-example)
//! - [Main loop examples](#main-loop-examples)
//! - [Why the name **Stakker**?](#why-the-name-stakker)
//!
//! See the [Stakker Guide and Design
//! Notes](https://uazu.github.io/stakker/) for additional
//! documentation.
//!
//!
//! # Overview of types
//!
//! [`Actor`] and [`ActorOwn`] are ref-counting references to an
//! actor. Create an actor with [`actor!`] and call it with
//! [`call!`].
//!
//! [`Fwd`] and [`Ret`] forward data to another destination
//! asynchronously, typically to a particular entry-point in a
//! particular actor. So [`Fwd`] and [`Ret`] instances take the role
//! of callback functions. The difference between them is that
//! [`Fwd`] may be called multiple times, is ref-counted for cheap
//! cloning and is based on a `Fn` with `Copy`, whereas [`Ret`] can be
//! used only once, is based on `FnOnce` and is a "move" value. Also
//! the [`Ret`] end-point is informed if the [`Ret`] instance is
//! dropped without sending back a message, for example if a zombie
//! actor is called. See the [`fwd_*!`](#macros) and
//! [`ret_*!`](#macros) macros for creation of instances, and [`fwd!`]
//! and [`ret!`] to make use of them.
//!
//! [`Stakker`] is the external interface to the runtime, i.e. how it
//! is managed from the event loop, or during startup.
//!
//! [`Cx`] is the context passed to all actor methods. It gives
//! access to methods related to the actor being called. It also
//! gives access to [`Core`].
//!
//! [`Core`] is the part of [`Stakker`] which is accessible to actors
//! during actor calls via [`Cx`]. Both [`Stakker`] and [`Cx`]
//! references dereference to [`Core`] and can be used wherever a
//! [`Core`] ref is required.
//!
//! [`Share`] allows a mutable structure to be shared safely between
//! actors, a bit like IPC shared-memory but with guaranteed exclusive
//! access. This may be used for efficiency, like shared-memory
//! buffers are sometimes used between OS processes.
//!
//! [`Deferrer`] allows queuing things to run from `Drop` handlers or
//! from other places in the main thread without access to [`Core`].
//! All actors have a built-in [`Deferrer`] which can be used from
//! outside the actor.
//!
//! For interfacing with other threads, [`PipedThread`] wraps a thread
//! and handles all data transfer to/from it and all cleanup.
//! [`Channel`] allows other threads to send messages to an actor.
//! [`Waker`] is a primitive which allows channels and other data
//! transfer to the main thread to be coordinated. [See here](sync)
//! for more details.
//!
//!
//! # Efficiency
//!
//! A significant aim in the development of **Stakker** was to be
//! lightweight and to minimize overheads in time and memory, and to
//! scale well. Another significant aim was to be "as simple as
//! possible but no simpler", to try to find an optimal set of types
//! and operations that provide the required functionality and
//! ergonomics and that fit the Rust model, to make maximum use of the
//! guarantees that Rust provides.
//!
//! By default **Stakker** uses [`TCell`](https://docs.rs/qcell) or
//! [`TLCell`](https://docs.rs/qcell) for zero-cost protected access
//! to actor state, which also guarantees at compile-time that no
//! actor can directly access any other actor.
//!
//! By default a cut-down ref-counting implementation is used instead
//! of `Rc`, which saves around one `usize` per [`Actor`] or [`Fwd`]
//! instance.
//!
//! With default features, only one thread is allowed to run a
//! [`Stakker`] instance, which enables an optimisation which uses a
//! global variable for the [`Deferrer`] defer queue (used for drop
//! handlers). However if more [`Stakker`] instances need to be run,
//! then the **multi-thread** or **multi-stakker** features cause it
//! to use alternative implementations.
//!
//! All deferred operations, including all async actor calls, are
//! handled as `FnOnce` instances on a queue. The aim is to make this
//! cheap enough so that deferring something doesn't have to be a big
//! decision. Thanks to Rust's inlining, these are efficient -- the
//! compiler might even choose to inline the internal code of the
//! actor call into the `FnOnce`, as that is all known at
//! compile-time.
//!
//! By default the `FnOnce` queue is a flat heterogeneous queue,
//! storing the closures directly in a byte `Vec`, which should give
//! best performance and cache locality at the cost of some unsafe
//! code. However a fully-safe boxed closure queue implementation is
//! also available.
//!
//! Forwarding handlers ([`Fwd`]) are boxed `Fn` instances along with
//! a ref-count. Return handlers ([`Ret`]) are boxed `FnOnce`
//! instances. Both typically queue a `FnOnce` operation when | //!
//! If no inter-thread operations are active, then **Stakker** will
//! never do locking or any atomic operations, nor block for any
//! reason. So the code can execute at full speed without triggering
//! any CPU memory fences or whatever. Usually the only thing that
//! blocks would be the external I/O poller whilst waiting for I/O or
//! timer expiry. When other threads have been started and they defer
//! wake-ups to the main thread, this is handled as an I/O event which
//! causes the wake flags to be checked using atomic operations.
//!
//!
//! # Cargo features
//!
//! Cargo features in **Stakker** do not change **Stakker**'s public
//! API. The API stays the same, but the implementation behind the
//! API changes.
//!
//! Also, cargo features are additive. This means that if one crate
//! using **Stakker** enables a feature, then it is enabled for all
//! uses of **Stakker** in the build. So when features switch between
//! alternative implementations, enabling a feature has to result in
//! the more tolerant implementation, because all users of the crate
//! have to be able to work with this configuration. This usually
//! means that features switch from the most efficient and restrictive
//! implementation, to a less efficient but more flexible one.
//!
//! So using the default features is the best choice unless you have
//! specific requirements. When a crate that uses **Stakker** doesn't
//! care about whether a feature is enabled or not, it should avoid
//! setting it and leave it up to the application to choose.
//!
//! Features enabled by default:
//!
//! - **inter-thread**: Enables inter-thread operations such as
//! [`Waker`] and [`PipedThread`].
//!
//! Optional features:
//!
//! - **no-unsafe-queue**: Disable the fast FnOnce queue implementation,
//! which uses unsafe code. Uses a boxed queue instead.
//!
//! - **no-unsafe**: Disable all unsafe code within this crate, at
//! some cost in time and memory.
//!
//! - **multi-thread**: Specifies that more than one **Stakker** will
//! run in the process, at most one **Stakker** per thread. This
//! disables some optimisations that require process-wide access.
//!
//! - **multi-stakker**: Specifies that more than one **Stakker** may
//! need to run in the same thread. This disables optimisations that
//! require either process-wide or thread-local access.
//!
//! - **inline-deferrer**: Forces use of the inline [`Deferrer`]
//! implementation instead of using the global or thread-local
//! implementation. Possibly useful if thread-locals are very slow.
//!
//! - **logger**: Enables **Stakker**'s core logging feature, which
//! logs actor startup and termination, and which allows macros from
//! the `stakker_log` crate to log with actor context information.
//! See [`Stakker::set_logger`].
//!
//! These are the implementations that are switched, in order of
//! preference, listing most-preferred first:
//!
//! ### Cell type
//!
//! - `TCell`: Best performance, but only allows a single **Stakker**
//! per process
//!
//! - `TLCell`: Best performance, but uses thread-locals at
//! **Stakker** creation time and only allows a single **Stakker** per
//! thread
//!
//! - `QCell`: Allows many **Stakker** instances per thread at some
//! cost in time and memory
//!
//! ### Deferrer
//!
//! - Global deferrer: Uses a global variable to find the [`Deferrer`]
//!
//! - Thread-local deferrer: Uses a thread-local to find the
//! [`Deferrer`], with safe and unsafe variants
//!
//! - Inline deferrer: Keeps references to the [`Deferrer`] in all
//! places where it is needed, with safe and unsafe variants. In
//! particular this adds a `usize` to all actors.
//!
//! ### Actor ref-counting
//!
//! - Packed: Uses a little unsafe code to save a `usize` per actor
//!
//! - Standard: Uses `std::rc::Rc`
//!
//! ### Call queues
//!
//! - Fast `FnOnce` queue: Appends `FnOnce` closures directly to a
//! flat memory buffer. Gives best performance, but uses `unsafe`
//! code.
//!
//! - Boxed queue: Stores closures indirectly by boxing them
//!
//!
//! # Testing
//!
//! **Stakker** has unit and doc tests that give over 90% coverage
//! across all feature combinations. These tests also run cleanly
//! under valgrind and MIRI. In addition there are some fuzz tests
//! and stress tests under `extra/` that further exercise particular
//! components to verify that they operate as expected.
//!
//!
//! # Tutorial example
//!
//! ```
//!# use stakker::{actor, after, call, ret_nop, ret_shutdown, fwd_to, ret, ret_some_to};
//!# use stakker::{Actor, CX, Fwd, Stakker, Ret};
//!# use std::time::{Duration, Instant};
//!#
//! // An actor is represented as a struct which holds the actor state
//! struct Light {
//! start: Instant,
//! on: bool,
//! }
//!
//! impl Light {
//! // This is a "Prep" method which is used to create a Self value
//! // for the actor. `cx` is the actor context and gives access to
//! // Stakker `Core`. (`CX![]` expands to `&mut Cx<'_, Self>`.)
//! // A "Prep" method doesn't have to return a Self value right away.
//! // For example it might asynchronously attempt a connection to a
//! // remote server first before arranging a call to another "Prep"
//! // function which returns the Self value. Once a value is returned,
//! // the actor is "Ready" and any queued-up operations on the actor
//! // will be executed.
//! pub fn init(cx: CX![]) -> Option<Self> {
//! // Use cx.now() instead of Instant::now() to allow execution
//! // in virtual time if supported by the environment.
//! let start = cx.now();
//! Some(Self { start, on: false })
//! }
//!
//! // Methods that may be called once the actor is "Ready" have a
//! // `&mut self` or `&self` first argument.
//! pub fn set(&mut self, cx: CX![], on: bool) {
//! self.on = on;
//! let time = cx.now() - self.start;
//! println!("{:04}.{:03} Light on: {}", time.as_secs(), time.subsec_millis(), on);
//! }
//!
//! // A `Fwd` or `Ret` allows passing data to arbitrary destinations,
//! // like an async callback. Here we use it to return a value.
//! pub fn query(&self, cx: CX![], ret: Ret<bool>) {
//! ret!([ret], self.on);
//! }
//! }
//!
//! // This is another actor that holds a reference to a Light actor.
//! struct Flasher {
//! light: Actor<Light>,
//! interval: Duration,
//! count: usize,
//! }
//!
//! impl Flasher {
//! pub fn init(cx: CX![], light: Actor<Light>,
//! interval: Duration, count: usize) -> Option<Self> {
//! // Defer first switch to the queue
//! call!([cx], switch(true));
//! Some(Self { light, interval, count })
//! }
//!
//! pub fn switch(&mut self, cx: CX![], on: bool) {
//! // Change the light state
//! call!([self.light], set(on));
//!
//! self.count -= 1;
//! if self.count != 0 {
//! // Call switch again after a delay
//! after!(self.interval, [cx], switch(!on));
//! } else {
//! // Terminate the actor successfully, causing StopCause handler to run
//! cx.stop();
//! }
//!
//! // Query the light state, receiving the response in the method
//! // `recv_state`, which has both fixed and forwarded arguments.
//! let ret = ret_some_to!([cx], recv_state(self.count) as (bool));
//! call!([self.light], query(ret));
//! }
//!
//! fn recv_state(&self, _: CX![], count: usize, state: bool) {
//! println!(" (at count {} received: {})", count, state);
//! }
//! }
//!
//! let mut stakker0 = Stakker::new(Instant::now());
//! let stakker = &mut stakker0;
//!
//! // Create and initialise the Light and Flasher actors. The
//! // Flasher actor is given a reference to the Light. Use a
//! // StopCause handler to shutdown when the Flasher terminates.
//! let light = actor!(stakker, Light::init(), ret_nop!());
//! let _flasher = actor!(
//! stakker,
//! Flasher::init(light.clone(), Duration::from_secs(1), 6),
//! ret_shutdown!(stakker)
//! );
//!
//! // Since we're not in virtual time, we use `Instant::now()` in
//! // this loop, which is then passed on to all the actors as
//! // `cx.now()`. (If you want to run time faster or slower you
//! // could use another source of time.) So all calls in a batch of
//! // processing get the same `cx.now()` value. Also note that
//! // `Instant::now()` uses a Mutex on some platforms so it saves
//! // cycles to call it less often.
//! stakker.run(Instant::now(), false);
//!# if false {
//! while stakker.not_shutdown() {
//! // Wait for next timer to expire. Here there's no I/O polling
//! // required to wait for external events, so just `sleep`
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), false);
//! std::thread::sleep(maxdur);
//!
//! // Run queue and timers
//! stakker.run(Instant::now(), false);
//! }
//!# } else { // Use virtual time version when testing
//!# let mut now = Instant::now();
//!# while stakker.not_shutdown() {
//!# now += stakker.next_wait_max(now, Duration::from_secs(60), false);
//!# stakker.run(now, false);
//!# }
//!# }
//! ```
//!
//!
//! # Main loop examples
//!
//! Note that the 60s duration used below just means that the process
//! will wake every 60s if nothing else is going on. You could make
//! this a larger value.
//!
//! ### Virtual time main loop, no I/O, no idle queue handling
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# fn test(stakker: &mut Stakker) {
//! let mut now = Instant::now();
//! stakker.run(now, false);
//! while stakker.not_shutdown() {
//! now += stakker.next_wait_max(now, Duration::from_secs(60), false);
//! stakker.run(now, false);
//! }
//!# }
//! ```
//!
//! ### Real time main loop, no I/O, no idle queue handling
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# fn test(stakker: &mut Stakker) {
//! stakker.run(Instant::now(), false);
//! while stakker.not_shutdown() {
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), false);
//! std::thread::sleep(maxdur);
//! stakker.run(Instant::now(), false);
//! }
//!# }
//! ```
//!
//! ### Real time I/O poller main loop, with idle queue handling
//!
//! This example uses `MioPoll` from the `stakker_mio` crate.
//!
//! ```no_run
//!# use stakker::Stakker;
//!# use std::time::{Duration, Instant};
//!# struct MioPoll;
//!# impl MioPoll { fn poll(&self, d: Duration) -> std::io::Result<bool> { Ok(false) } }
//!# fn test(stakker: &mut Stakker, miopoll: &mut MioPoll) -> std::io::Result<()> {
//! let mut idle_pending = stakker.run(Instant::now(), false);
//! while stakker.not_shutdown() {
//! let maxdur = stakker.next_wait_max(Instant::now(), Duration::from_secs(60), idle_pending);
//! let activity = miopoll.poll(maxdur)?;
//! idle_pending = stakker.run(Instant::now(), !activity);
//! }
//!# Ok(())
//!# }
//! ```
//!
//! The way this works is that if there are idle queue items pending,
//! then `next_wait_max` returns 0s, which means that the `poll` call
//! only checks for new I/O events without blocking. If there are no
//! new events (`activity` is false), then an item from the idle queue
//! is run.
//!
//!
//! # Why the name **Stakker**?
//!
//! "Single-threaded actor runtime" → STACR → **Stakker**.
//! The name is also a small tribute to the 1988 Humanoid track
//! "Stakker Humanoid", which borrows samples from the early video
//! game **Berzerk**, and which rolls along quite economically as I
//! hope the **Stakker** runtime also does.
//!
//! [`ActorOwn`]: struct.ActorOwn.html
//! [`Actor`]: struct.Actor.html
//! [`Channel`]: sync/struct.Channel.html
//! [`Core`]: struct.Core.html
//! [`Cx`]: struct.Cx.html
//! [`Deferrer`]: struct.Deferrer.html
//! [`Fwd`]: struct.Fwd.html
//! [`PipedThread`]: sync/struct.PipedThread.html
//! [`Ret`]: struct.Ret.html
//! [`Share`]: struct.Share.html
//! [`Stakker::set_logger`]: struct.Stakker.html#method.set_logger
//! [`Stakker`]: struct.Stakker.html
//! [`Waker`]: sync/struct.Waker.html
//! [`actor!`]: macro.actor.html
//! [`call!`]: macro.call.html
//! [`fwd!`]: macro.fwd.html
//! [`ret!`]: macro.ret.html
// Insist on 2018 style
#![deny(rust_2018_idioms)]
// No unsafe code is allowed anywhere if no-unsafe is set
#![cfg_attr(feature = "no-unsafe", forbid(unsafe_code))]
// To fix these would break the API
#![allow(clippy::upper_case_acronyms)]
// TODO: Illustrate Fwd in the tutorial example, e.g. make println!
// output go via a Fwd
pub use crate::core::{Core, Stakker};
pub use crate::log::{LogFilter, LogID, LogLevel, LogLevelError, LogRecord, LogVisitor};
pub use actor::{Actor, ActorOwn, ActorOwnAnon, ActorOwnSlab, Cx, StopCause};
pub use deferrer::Deferrer;
pub use fwd::Fwd;
pub use ret::Ret;
pub use share::{Share, ShareWeak};
pub use timers::{FixedTimerKey, MaxTimerKey, MinTimerKey};
// These are for backwards-compatibility. They allow the types to
// still be accessed at the top level of the crate, but hides this in
// the online docs. Not hiding it in the locally-generated docs
// allows semver-checks to pass.
#[cfg_attr(docsrs, doc(hidden))]
pub use sync::{PipedLink, PipedThread, Waker};
/// Auxiliary types that are not interesting in themselves
pub mod aux {
pub use crate::actor::ActorOwnSlabIter;
}
// Trait checks
static_assertions::assert_not_impl_any!(Stakker: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Core: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Cx<'_, u8>: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Ret<u8>: Send, Sync, Copy, Clone);
static_assertions::assert_not_impl_any!(Actor<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(task::Task: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Deferrer: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Share<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Fwd<u8>: Send, Sync, Copy);
static_assertions::assert_not_impl_any!(Waker: Copy, Clone);
static_assertions::assert_impl_all!(Actor<u8>: Clone);
static_assertions::assert_impl_all!(Deferrer: Clone);
static_assertions::assert_impl_all!(Share<u8>: Clone);
static_assertions::assert_impl_all!(Fwd<u8>: Clone);
static_assertions::assert_impl_all!(Waker: Send, Sync);
static_assertions::assert_impl_all!(FixedTimerKey: Copy, Clone);
static_assertions::assert_impl_all!(MaxTimerKey: Copy, Clone);
static_assertions::assert_impl_all!(MinTimerKey: Copy, Clone);
mod actor;
mod core;
mod fwd;
mod log;
mod macros;
mod ret;
mod share;
pub mod sync;
pub mod task;
mod timers;
#[cfg(test)]
mod test;
// Ref-counting selections
#[cfg(not(feature = "no-unsafe"))]
mod rc {
pub(crate) mod count;
pub(crate) mod minrc;
pub(crate) mod actorrc_packed;
pub(crate) use actorrc_packed::ActorRc;
pub(crate) mod fwdrc_min;
pub(crate) use fwdrc_min::FwdRc;
}
#[cfg(feature = "no-unsafe")]
mod rc {
pub(crate) mod count;
pub(crate) mod actorrc_std;
pub(crate) use actorrc_std::ActorRc;
pub(crate) mod fwdrc_std;
pub(crate) use fwdrc_std::FwdRc;
}
// Deferrer selection
#[cfg(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
not(feature = "multi-thread"),
not(feature = "no-unsafe")
))]
mod deferrer {
mod api;
pub use api::Deferrer;
mod global;
use global::DeferrerAux;
}
#[cfg(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
feature = "multi-thread"
))]
mod deferrer {
mod api;
pub use api::Deferrer;
#[cfg(feature = "no-unsafe")]
mod thread_local_safe;
#[cfg(feature = "no-unsafe")]
use thread_local_safe::DeferrerAux;
#[cfg(not(feature = "no-unsafe"))]
mod thread_local;
#[cfg(not(feature = "no-unsafe"))]
use thread_local::DeferrerAux;
}
// Inline deferrer used if neither of the other options fits. Clearer
// to not simplify this boolean expression, because the subexpressions
// should match the expressions above.
#[cfg(all(
not(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
not(feature = "multi-thread"),
not(feature = "no-unsafe")
)),
not(all(
not(feature = "inline-deferrer"),
not(feature = "multi-stakker"),
feature = "multi-thread",
)),
))]
mod deferrer {
mod api;
pub use api::Deferrer;
#[cfg(feature = "no-unsafe")]
mod inline_safe;
#[cfg(feature = "no-unsafe")]
use inline_safe::DeferrerAux;
#[cfg(not(feature = "no-unsafe"))]
mod inline;
#[cfg(not(feature = "no-unsafe"))]
use inline::DeferrerAux;
}
// FnOnceQueue selection
#[cfg(not(any(feature = "no-unsafe", feature = "no-unsafe-queue")))]
mod queue {
mod flat;
pub(crate) use flat::FnOnceQueue;
}
#[cfg(any(feature = "no-unsafe", feature = "no-unsafe-queue"))]
mod queue {
mod boxed;
pub(crate) use boxed::FnOnceQueue;
}
// Cell selection
#[cfg(all(not(feature = "multi-stakker"), not(feature = "multi-thread")))]
mod cell {
pub(crate) mod tcell;
pub(crate) use tcell as cell;
}
#[cfg(all(not(feature = "multi-stakker"), feature = "multi-thread"))]
mod cell {
pub(crate) mod tlcell;
pub(crate) use tlcell as cell;
}
#[cfg(feature = "multi-stakker")]
mod cell {
pub(crate) mod qcell;
pub(crate) use self::qcell as cell;
} | //! provided with arguments. These are also efficient due to
//! inlining. In this case two chunks of inlined code are generated
//! for each by the compiler: the first which accepts arguments and
//! pushes the second one onto the queue. | random_line_split |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if !cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8, ... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn get_constructor_tag<'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag), .. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value, | cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
}
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
} | }
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool { | random_line_split |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if !cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8, ... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn | <'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag), .. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value,
}
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool {
cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
}
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
}
| get_constructor_tag | identifier_name |
decisiontree.rs | //! llvm/decisiontree.rs - Defines how to codegen a decision tree
//! via `codegen_tree`. This decisiontree is the result of compiling
//! a match expression into a decisiontree during type inference.
use crate::llvm::{ Generator, CodeGen };
use crate::types::pattern::{ DecisionTree, Case, VariantTag };
use crate::types::{ Type, typed::Typed };
use crate::parser::ast::Match;
use crate::cache::{ ModuleCache, DefinitionInfoId, DefinitionKind };
use crate::nameresolution::builtin::PAIR_ID;
use inkwell::values::{ BasicValueEnum, IntValue, PhiValue };
use inkwell::types::BasicType;
use inkwell::basic_block::BasicBlock;
/// This type alias is used for convenience in codegen_case
/// for adding blocks and values to the switch cases
/// while compiling a given case of a pattern match.
type SwitchCases<'g> = Vec<(IntValue<'g>, BasicBlock<'g>)>;
impl<'g> Generator<'g> {
/// Perform LLVM codegen for the given DecisionTree.
/// This roughly translates the tree into a series of switches and phi nodes.
pub fn codegen_tree<'c>(&mut self, tree: &DecisionTree, match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
let value_to_match = match_expr.expression.codegen(self, cache);
// Each Switch case in the tree works by switching on a given value in a DefinitionInfoId
// then storing each part it extracted into other DefinitionInfoIds and recursing. Thus,
// the initial value needs to be stored in the first id here since before this there was no
// extract and store step that would have set the value beforehand.
if let DecisionTree::Switch(id, _) = tree {
let typ = self.follow_bindings(match_expr.expression.get_type().unwrap(), cache);
self.definitions.insert((*id, typ), value_to_match);
}
let starting_block = self.current_block();
let ending_block = self.insert_into_new_block("match_end");
// Create the phi value to merge the value of all the match branches
let match_type = match_expr.typ.as_ref().unwrap();
let llvm_type = self.convert_type(match_type, cache);
let phi = self.builder.build_phi(llvm_type, "match_result");
// branches may be repeated in the decision tree, so this Vec is used to store the block
// of each branch if it was already codegen'd.
let mut branches: Vec<_> = vec![None; match_expr.branches.len()];
self.builder.position_at_end(starting_block);
// Then codegen the decisiontree itself that will eventually lead to each branch.
self.codegen_subtree(tree, &mut branches, phi, ending_block, match_expr, cache);
self.builder.position_at_end(ending_block);
phi.as_basic_value()
}
/// Recurse on the given DecisionTree, codegening each switch and remembering
/// all the Leaf nodes that have already been compiled, since these may be
/// repeated in the same DecisionTree.
fn codegen_subtree<'c>(&mut self, tree: &DecisionTree, branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>, match_end: BasicBlock<'g>, match_expr: &Match<'c>, cache: &mut ModuleCache<'c>)
{
match tree {
DecisionTree::Leaf(n) => {
// If this leaf has been codegen'd already, branches[n] was already set to Some in codegen_case
match branches[*n] {
Some(_block) => (),
_ => {
self.codegen_branch(&match_expr.branches[*n].1, match_end, cache)
.map(|(branch, value)| phi.add_incoming(&[(&value, branch)]));
}
}
},
DecisionTree::Fail => {
unreachable!("DecisionTree::Fail encountered during DecisionTree codegen. This should have been caught during completeness checking.");
},
DecisionTree::Switch(id, cases) => {
if !cases.is_empty() {
let type_to_switch_on = cache.definition_infos[id.0].typ.as_ref().unwrap();
let type_to_switch_on = self.follow_bindings(type_to_switch_on, cache);
let value_to_switch_on = self.definitions[&(*id, type_to_switch_on)];
let starting_block = self.current_block();
// All llvm switches require an else block, even if this pattern doesn't
// include one. In that case we insert an unreachable instruction.
let else_block = self.codegen_match_else_block(value_to_switch_on,
cases, branches, phi, match_end, match_expr, cache);
let mut switch_cases = vec![];
for case in cases.iter() {
self.codegen_case(case, value_to_switch_on, &mut switch_cases,
branches, phi, match_end, match_expr, cache);
}
self.builder.position_at_end(starting_block);
if cases.len() > 1 {
self.build_switch(value_to_switch_on, else_block, switch_cases);
} else if cases.len() == 1 {
// If we only have 1 case we don't need to test anything, just forcibly
// br to that case. This optimization is necessary for structs since structs
// have no tag to check against.
self.builder.build_unconditional_branch(switch_cases[0].1);
}
}
},
}
}
fn build_switch<'c>(&self,
value_to_switch_on: BasicValueEnum<'g>,
else_block: BasicBlock<'g>,
switch_cases: SwitchCases<'g>)
{
// TODO: Switch to if-else chains over a single switch block.
// Currently this will fail at runtime when attempting to match
// a constructor with a string value after trying to convert it into an
// integer tag value.
let tag = self.extract_tag(value_to_switch_on);
self.builder.build_switch(tag, else_block, &switch_cases);
}
fn codegen_case<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
switch_cases: &mut SwitchCases<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>)
{
// Early out if this is a match-all case. Those should be handled by codegen_match_else_block
let tag = match &case.tag {
Some(tag) => tag,
None => return,
};
// Bind each pattern then codegen the rest of the tree.
// If the rest of the tree is a Leaf that has already been codegen'd we shouldn't compile
// it twice, instead we take its starting block and jump straight to that in the switch case.
let block = match &case.branch {
DecisionTree::Leaf(n) => {
match &branches[*n] {
Some(block) => *block,
None => {
// Codegening the branch also stores its starting_block in branches,
// so we can retrieve it here.
let branch_start = self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache);
branches[*n] = Some(branch_start);
branch_start
}
}
},
_ => self.codegen_case_in_new_block(case,
matched_value, branches, phi, match_end, match_expr, cache)
};
let constructor_tag = self.get_constructor_tag(tag, cache).unwrap();
switch_cases.push((constructor_tag.into_int_value(), block));
}
/// Creates a new llvm::BasicBlock to insert into, then binds the union downcast
/// from the current case, then compiles the rest of the subtree.
fn codegen_case_in_new_block<'c>(&mut self,
case: &Case,
matched_value: BasicValueEnum<'g>,
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let branch_start = self.insert_into_new_block("match_branch");
self.bind_pattern_fields(case, matched_value, cache);
self.codegen_subtree(&case.branch, branches, phi, match_end, match_expr, cache);
branch_start
}
/// Given a tagged union (either { tag: u8, ... } or just (tag: u8)), extract the
/// integer tag component to compare which constructor this value was constructed from.
fn extract_tag(&self, variant: BasicValueEnum<'g>) -> IntValue<'g> {
if variant.is_struct_value() {
self.builder.build_extract_value(variant.into_struct_value(), 0, "tag").unwrap().into_int_value()
} else {
assert!(variant.is_int_value());
variant.into_int_value()
}
}
/// Get the tag value that identifies which constructor this is.
fn get_constructor_tag<'c>(&mut self, tag: &VariantTag, cache: &mut ModuleCache<'c>) -> Option<BasicValueEnum<'g>> {
match tag {
VariantTag::True => Some(self.bool_value(true)),
VariantTag::False => Some(self.bool_value(false)),
VariantTag::Unit => Some(self.unit_value()),
// TODO: Remove pair tag, it shouldn't need one
VariantTag::UserDefined(PAIR_ID) => Some(self.unit_value()),
VariantTag::UserDefined(id) => {
match &cache.definition_infos[id.0].definition {
Some(DefinitionKind::TypeConstructor { tag: Some(tag), .. }) => {
Some(self.tag_value(*tag as u8))
},
_ => None,
}
},
VariantTag::Literal(literal) => Some(literal.codegen(self, cache)),
}
}
fn is_union_constructor<'c>(typ: &Type, cache: &ModuleCache<'c>) -> bool {
use crate::types::Type::*;
match typ {
Primitive(_) => false,
Ref(_) => false,
Function(function) => Self::is_union_constructor(&function.return_type, cache),
TypeApplication(typ, _) => Self::is_union_constructor(typ, cache),
ForAll(_, typ) => Self::is_union_constructor(typ, cache),
UserDefinedType(id) => cache.type_infos[id.0].is_union(),
TypeVariable(_) => unreachable!("Constructors should always have concrete types"),
}
}
/// Cast the given value to the given tagged-union variant. Returns None if
/// the given VariantTag is not a tagged-union tag.
fn cast_to_variant_type<'c>(&mut self, value: BasicValueEnum<'g>, case: &Case,
cache: &mut ModuleCache<'c>) -> BasicValueEnum<'g>
{
match &case.tag {
Some(VariantTag::UserDefined(id)) => {
let mut field_types = vec![];
let constructor = &cache.definition_infos[id.0];
if Self::is_union_constructor(constructor.typ.as_ref().unwrap(), cache) {
field_types.push(self.tag_type());
}
for field_ids in case.fields.iter() {
let typ = cache.definition_infos[field_ids[0].0].typ.as_ref().unwrap();
field_types.push(self.convert_type(typ, cache));
}
let cast_type = self.context.struct_type(&field_types, false).as_basic_type_enum();
self.reinterpret_cast_llvm_type(value, cast_type)
},
_ => value,
}
}
/// When creating a decision tree, any match all case is always last in the case list.
fn has_match_all_case(&self, cases: &[Case]) -> bool {
cases.last().unwrap().tag == None
}
/// codegen an else/match-all case of a particular constructor in a DecisionTree.
/// If there is no MatchAll case (represented by a None value for case.tag) then
/// a block is created with an llvm unreachable assertion.
fn codegen_match_else_block<'c>(&mut self,
value_to_switch_on: BasicValueEnum<'g>,
cases: &[Case],
branches: &mut [Option<BasicBlock<'g>>],
phi: PhiValue<'g>,
match_end: BasicBlock<'g>,
match_expr: &Match<'c>,
cache: &mut ModuleCache<'c>) -> BasicBlock<'g>
{
let block = self.insert_into_new_block("match_all");
let last_case = cases.last().unwrap();
// If there's a catch-all case we can codegen the code there. Otherwise if this
// constructor has no catchall the resulting code should be unreachable.
if self.has_match_all_case(cases) {
self.bind_pattern_field(value_to_switch_on, &last_case.fields[0], cache);
self.codegen_subtree(&last_case.branch, branches, phi, match_end, match_expr, cache);
} else {
self.builder.build_unreachable();
}
block
}
/// Each Case in a DecisionTree::Switch contains { tag, fields, branch } where tag
/// is the matched constructor tag and fields contains a Vec of Vec<DefinitionInfoId>
/// where the outer Vec contains an inner Vec for each field of the tagged-union variant,
/// and each inner Vec contains the variables to bind the result of that field to. There
/// can be multiple ids for a single field as a result of combining multiple cases into one,
/// see the DecisionTree type and its completeness checking for more information.
fn bind_pattern_field<'c>(&mut self, value: BasicValueEnum<'g>, field: &[DefinitionInfoId], cache: &mut ModuleCache<'c>) |
/// Performs the union downcast, binding each field of the downcasted variant
/// the the appropriate DefinitionInfoIds held within the given Case.
fn bind_pattern_fields<'c>(&mut self, case: &Case, matched_value: BasicValueEnum<'g>, cache: &mut ModuleCache<'c>) {
let variant = self.cast_to_variant_type(matched_value, &case, cache);
// There are three cases here:
// 1. The tag is a tagged union tag. In this case, the value is a tuple of (tag, fields...)
// so bind each nth field to the n+1 value in this tuple.
// 2. The tag is a tuple. In this case, bind each nth tuple field to each nth field.
// 3. The tag is a primitive like true/false. In this case there is only 1 "field" and we
// bind it to the entire value.
match &case.tag {
Some(VariantTag::UserDefined(constructor)) => {
let variant = variant.into_struct_value();
// TODO: Stop special casing pairs and allow a 0 offset
// for every product type
let offset = if *constructor == PAIR_ID { 0 } else { 1 };
for (field_no, ids) in case.fields.iter().enumerate() {
let field = self.builder.build_extract_value(variant, offset + field_no as u32, "pattern_extract").unwrap();
self.bind_pattern_field(field, ids, cache);
}
},
_ => {
assert!(case.fields.len() <= 1);
if case.fields.len() == 1 {
self.bind_pattern_field(variant, &case.fields[0], cache);
}
}
}
}
}
| {
for id in field {
let typ = self.follow_bindings(cache.definition_infos[id.0].typ.as_ref().unwrap(), cache);
self.definitions.insert((*id, typ), value);
}
} | identifier_body |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) |
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word == .word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
} | identifier_body |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word == .word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? "); | match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
} | io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else { | random_line_split |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn | () -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word == .word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) {
Some(x.word.clone())
} else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| new | identifier_name |
main.rs | // -- SymSpell --
// Explanation at
// https://medium.com/@wolfgarbe/1000x-faster-spelling-correction-algorithm-2012-8701fcd87a5f
// TL;DR, HashTable keys are generated
// from all words + all possible
// permutations of those words with up
// to two deletes, and the data held
// in each key is the correctly spelled
// word (or possible words) with their
// count included to determine which
// of the possible words is more likely.
use std::collections::HashMap;
// word == word
// score == word priority // higher number == higher priority
#[derive(Debug, Clone)]
struct Word {
word: String,
score: u64
}
// word_map must be populated after
// Dictionary struct is created.
// word_map is the reference dictionary.
// All entries here are considered correct
// error_map is the compiled list
// of acceptable permutations.
// word_map is searched first for inputs.
// if none are found, then
// error_map is then searched for possible matches
#[derive(Debug, Clone)]
struct Dictionary {
word_map: HashMap<String, Word>,
error_map: HashMap<String, Vec<String>>,
error_distance: u8
}
// UNIMPLEMENTED YET
// only counts in word_map are measured to
// determined probable match
// only counts in word_map are incremented
// and check when inserting a new word.
// counts in error_map are ignored.
// only necessary for word_map,
// only word_map requires knowing
// the word score.
// error_map can be Hash<String, Vec<String>>
impl Word {
fn new(word: &str, count: u64) -> Word {
Word {
word: word.to_string(),
score: count
}
}
}
impl Dictionary {
fn new() -> Dictionary {
Dictionary {
word_map: HashMap::new(),
error_map: HashMap::new(),
error_distance: 2
}
}
fn insert(&mut self, word: &str) {
if let Some(x) = self.word_map.get_mut(word) {
x.score += 1;
} else {
self.word_map
.insert(
word.to_string(),
Word::new(word, 1)
);
}
}
fn insert_with_count(&mut self, word: &str, count: u64) {
self.insert(word);
self.word_map
.get_mut(word)
.unwrap()
.score = count;
}
// Permutations
// inserted don't replace the
// existing permutations,
// they are only are
// appended to the existing
// values.
fn insert_with_permutations(&mut self, word: &str) {
if let Some(_x) = self.word_map.get_mut(word) {
self.add_permutations(word);
} else {
self.insert(word);// insert new word.
self.add_permutations(word);
}
}
fn insert_with_permutations_and_count(&mut self, word: &str, count: u64) {
if let Some(x) = self.word_map.get_mut(word) {
x.score = count;
self.add_permutations(word);
} else {
self.insert_with_count(word, count);// insert new word.
self.add_permutations(word);
}
}
fn add_permutations(&mut self, word: &str) {
// Vec<String>
// Must only contain inserts of
// correct words
let permuted_keys = self.permutations_of(word);
for i in permuted_keys {
// if error key exists
if let Some(x) = self.error_map.get_mut(&i) {
let mut new_set: HashMap<String, ()> = HashMap::new();
// collect vector of existing
// correct possibilities
// into hashmap to prevent
// duplicate entries
for y in x.clone() {
new_set.insert(y, ());
}
// Add the new word to
// list of correct
// possibilities
// at this key
new_set.insert(word.to_string(), ());
x.clear();
for j in new_set.keys() {
x.push(j.to_string());
}
} else {
self.error_map
.insert(
i.clone(),
vec![word.to_string()]
);
}
}
}
fn generate_permutations(&self, word: &str) -> Vec<String> {
let mut permutations: Vec<String> = Vec::new();
// Generate permutations of this word
for i in 0..word.len() {
let mut permuted: Vec<char> = word.chars().collect();
permuted.remove(i);
permutations.push(permuted.into_iter().collect::<String>());
}
permutations
}
fn permutations_of(&self, word: &str) -> Vec<String> {
let mut permutation_list: HashMap<String, ()> = HashMap::new();
permutation_list.insert(word.to_string(), ());
for _i in 0..self.error_distance {
for u in permutation_list.clone().keys() {
for o in self.generate_permutations(u) {
permutation_list.insert(o, ());
}
}
}
let mut result: Vec<String> = Vec::new();
for i in permutation_list.keys() {
result.push(i.to_string());
}
result
}
fn find_best_match(&self, possibilities: Vec<String>) -> String {
let mut max = 0;
let mut best_match: Option<String> = None;
for i in possibilities.clone() {
let score = self.word_map[&i].score;
if score > max {
max = score;
best_match = Some(i.clone());
}
}
best_match.expect("Nothing matched in iterator... somehow...")
}
fn generate_errors(&mut self) {
let mut result = self.error_map.clone();
// word_map: HashMap<String, Word>
// error_map: HashMap<String, Vec<String>>
let error_map = if self.error_map.is_empty() {
// Word -> Vec<String>
// Word == .word : String
// but Word is behind a HashMap key...
// So we iterate and convert it
// to Vec<String>
let mut words: HashMap<String, Vec<String>> = HashMap::new();
for s in self.word_map.clone().keys() {
words.insert(s.to_string(), vec![s.to_string()]);
}
words // Vec<String>
} else {
self.error_map.clone() // Vec<String>
};
for i in error_map.keys() {
if i.len() > 2 {
for u in 0..i.len() {
let mut permuted: Vec<char> = i.chars().collect();
permuted.remove(u);
let permutation = permuted.into_iter().collect::<String>();
if let Some(x) = result.get_mut(&permutation) {
let mut set: HashMap<String, ()> = HashMap::new();
for w in x.clone() {
set.entry(w.clone()).or_insert(());
}
// for w in error_map.get(i).unwrap().clone() {
// set.entry(w.word).or_insert(());
// }
let mut y: Vec<String> = Vec::new();
for k in set.keys() {
y.push(k.to_string());
}
x.clear();
for v in y {
x.push(v);
}
} else {
result
.entry(permutation)
.or_insert(error_map.get(i).unwrap().clone());
}
}
}
}
self.error_map = result;
}
fn check(&self, word: &str) -> Option<String>{
// regular functions don't capture parent scope.
// closures do catch parent scope
let find = |word: &str| -> Option<String> {
if let Some(x) = self.word_map.get(word) | else if let Some(x) = self.error_map.get(word) {
if x.len() > 1 {
Some(self.find_best_match(x.to_vec()))
} else {
Some(x[0].clone())
}
} else {
None
}
};
if let Some(x) = find(word) {
return Some(x);
}
let mut permutations = vec![word.to_string()];
permutations.extend(self.permutations_of(word));
for v in permutations.clone() {
permutations.extend(self.permutations_of(&v));
}
for i in permutations {
if let Some(x) = find(&i) {
return Some(x);
}
}
return None;
}
}
fn main() {
let mut d = Dictionary::new();
// d.insert_with_permutations("Fork");
// d.insert_with_permutations("Doofus");
// d.insert_with_permutations_and_count("Bell", 32);
// d.insert_with_permutations_and_count("Belly", 29);
// d.insert_with_permutations_and_count("Bellow", 19);
// println!("{:?}", d.generate_permutations("Bell"));
// println!("{:?}", "===");
// println!("{:?}", d.generate_permutations("Belly"));
// println!("{:?}", "===");
// for i in d.word_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// for i in d.error_map.clone() {
// println!("{:?}", i);
// }
// println!("");
// println!("{:?}", d.check("Dofus"));
// println!("{:?}", d.check("Dfus"));
// println!("{:?}", d.check("Doooofus"));
// println!("{:?}", d.check("Dooofus"));
// println!("{:?}", d.check("Forky"));
// println!("{:?}", d.check("Forkyy"));
// println!("{:?}", d.check("Fo"));
// println!("Hello, world!");
// Testing setup
use std::io;
loop {
let mut cmd = String::new();
io::stdin().read_line(&mut cmd).expect("no work... >:(");
if cmd.trim() == "add" {
let mut word = String::new();
let mut value = String::new();
println!("word? ");
io::stdin().read_line(&mut word).expect("no work... >:(");
println!("value? ");
io::stdin().read_line(&mut value).expect("no work... >:(");
d.insert_with_permutations_and_count(word.trim().as_ref(), value.trim().parse().expect("not a number"));
} else {
match d.check(&cmd.trim()) {
Some(x) => println!("Did you mean {}?", x),
_ => println!("Not found :(")
};
}
}
}
| {
Some(x.word.clone())
} | conditional_block |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
}
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct | <'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 && !buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
self.base_mut().as_mut_slices()
}
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
}
| PopIter | identifier_name |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
}
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct PopIter<'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 && !buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) { |
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
} | self.base_mut().as_mut_slices()
} | random_line_split |
consumer.rs | use super::{
delegate::DelegateMut,
observer::{DelegateObserver, Observer},
utils::modulus,
};
use crate::utils::{slice_assume_init_mut, slice_assume_init_ref, write_uninit_slice};
use core::{iter::Chain, mem::MaybeUninit, ptr, slice};
#[cfg(feature = "std")]
use std::io::{self, Write};
/// Consumer part of ring buffer.
///
/// # Mode
///
/// It can operate in immediate (by default) or postponed mode.
/// Mode could be switched using [`Self::postponed`]/[`Self::into_postponed`] and [`Self::into_immediate`] methods.
///
/// + In immediate mode removed and inserted items are automatically synchronized with the other end.
/// + In postponed mode synchronization occurs only when [`Self::sync`] or [`Self::into_immediate`] is called or when `Self` is dropped.
/// The reason to use postponed mode is that multiple subsequent operations are performed faster due to less frequent cache synchronization.
pub trait Consumer: Observer {
unsafe fn set_read_index(&self, value: usize);
/// Moves `read` pointer by `count` places forward.
///
/// # Safety
///
/// First `count` items in occupied memory must be moved out or dropped.
///
/// Must not be called concurrently.
unsafe fn advance_read_index(&self, count: usize) {
self.set_read_index((self.read_index() + count) % modulus(self));
}
/// Provides a direct access to the ring buffer occupied memory.
/// The difference from [`Self::as_slices`] is that this method provides slices of [`MaybeUninit`], so items may be moved out of slices.
///
/// Returns a pair of slices of stored items, the second one may be empty.
/// Elements with lower indices in slice are older. First slice contains older items that second one.
///
/// # Safety
///
/// All items are initialized. Elements must be removed starting from the beginning of first slice.
/// When all items are removed from the first slice then items must be removed from the beginning of the second slice.
///
/// *This method must be followed by [`Self::advance_read`] call with the number of items being removed previously as argument.*
/// *No other mutating calls allowed before that.*
fn occupied_slices(&self) -> (&[MaybeUninit<Self::Item>], &[MaybeUninit<Self::Item>]) {
let (first, second) = unsafe { self.unsafe_slices(self.read_index(), self.write_index()) };
(first as &_, second as &_)
}
/// Provides a direct mutable access to the ring buffer occupied memory.
///
/// Same as [`Self::occupied_slices`].
///
/// # Safety
///
/// When some item is replaced with uninitialized value then it must not be read anymore.
unsafe fn occupied_slices_mut(&mut self) -> (&mut [MaybeUninit<Self::Item>], &mut [MaybeUninit<Self::Item>]) {
self.unsafe_slices(self.read_index(), self.write_index())
}
/// Returns a pair of slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices();
(slice_assume_init_ref(left), slice_assume_init_ref(right))
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the ring buffer.
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
unsafe {
let (left, right) = self.occupied_slices_mut();
(slice_assume_init_mut(left), slice_assume_init_mut(right))
}
}
/// Removes latest item from the ring buffer and returns it.
///
/// Returns `None` if the ring buffer is empty.
fn try_pop(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let elem = unsafe { self.occupied_slices().0.get_unchecked(0).assume_init_read() };
unsafe { self.advance_read_index(1) };
Some(elem)
} else {
None
}
}
/// Removes items from the ring buffer and writes them into a slice.
///
/// Returns count of items been removed.
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
let (left, right) = self.occupied_slices();
let count = if elems.len() < left.len() {
unsafe { write_uninit_slice(elems, left.get_unchecked(..elems.len())) };
elems.len()
} else {
let (left_elems, elems) = elems.split_at_mut(left.len());
unsafe { write_uninit_slice(left_elems, left) };
left.len()
+ if elems.len() < right.len() {
unsafe { write_uninit_slice(elems, right.get_unchecked(..elems.len())) };
elems.len()
} else |
};
unsafe { self.advance_read_index(count) };
count
}
fn into_iter(self) -> IntoIter<Self> {
IntoIter::new(self)
}
/// Returns an iterator that removes items one by one from the ring buffer.
fn pop_iter(&mut self) -> PopIter<'_, Self> {
PopIter::new(self)
}
/// Returns a front-to-back iterator containing references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter(&self) -> Iter<'_, Self> {
let (left, right) = self.as_slices();
left.iter().chain(right.iter())
}
/// Returns a front-to-back iterator that returns mutable references to items in the ring buffer.
///
/// This iterator does not remove items out of the ring buffer.
fn iter_mut(&mut self) -> IterMut<'_, Self> {
let (left, right) = self.as_mut_slices();
left.iter_mut().chain(right.iter_mut())
}
/// Removes at most `count` and at least `min(count, Self::len())` items from the buffer and safely drops them.
///
/// If there is no concurring producer activity then exactly `min(count, Self::len())` items are removed.
///
/// Returns the number of deleted items.
///
/// ```
/// # extern crate ringbuf;
/// # use ringbuf::{LocalRb, storage::Static, traits::*};
/// # fn main() {
/// let mut rb = LocalRb::<Static<i32, 8>>::default();
///
/// assert_eq!(rb.push_iter(0..8), 8);
///
/// assert_eq!(rb.skip(4), 4);
/// assert_eq!(rb.skip(8), 4);
/// assert_eq!(rb.skip(4), 0);
/// # }
/// ```
fn skip(&mut self, count: usize) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()).take(count) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let actual_count = usize::min(count, left.len() + right.len());
self.advance_read_index(actual_count);
actual_count
}
}
/// Removes all items from the buffer and safely drops them.
///
/// Returns the number of deleted items.
fn clear(&mut self) -> usize {
unsafe {
let (left, right) = self.occupied_slices_mut();
for elem in left.iter_mut().chain(right.iter_mut()) {
ptr::drop_in_place(elem.as_mut_ptr());
}
let count = left.len() + right.len();
self.advance_read_index(count);
count
}
}
#[cfg(feature = "std")]
/// Removes at most first `count` bytes from the ring buffer and writes them into a [`Write`] instance.
/// If `count` is `None` then as much as possible bytes will be written.
///
/// Returns `Ok(n)` if `write` succeeded. `n` is number of bytes been written.
/// `n == 0` means that either `write` returned zero or ring buffer is empty.
///
/// If `write` is failed then original error is returned. In this case it is guaranteed that no items was written to the writer.
/// To achieve this we write only one contiguous slice at once. So this call may write less than `len` items even if the writer is ready to get more.
fn write_into<S: Write>(&mut self, writer: &mut S, count: Option<usize>) -> io::Result<usize>
where
Self: Consumer<Item = u8>,
{
let (left, _) = self.occupied_slices();
let count = usize::min(count.unwrap_or(left.len()), left.len());
let left_init = unsafe { slice_assume_init_ref(&left[..count]) };
let write_count = writer.write(left_init)?;
assert!(write_count <= count);
unsafe { self.advance_read_index(write_count) };
Ok(write_count)
}
}
pub struct IntoIter<C: Consumer>(C);
impl<C: Consumer> IntoIter<C> {
pub fn new(inner: C) -> Self {
Self(inner)
}
pub fn into_inner(self) -> C {
self.0
}
}
impl<C: Consumer> Iterator for IntoIter<C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.0.try_pop()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.occupied_len(), None)
}
}
/// An iterator that removes items from the ring buffer.
pub struct PopIter<'a, C: Consumer> {
target: &'a C,
slices: (&'a [MaybeUninit<C::Item>], &'a [MaybeUninit<C::Item>]),
len: usize,
}
impl<'a, C: Consumer> PopIter<'a, C> {
pub fn new(target: &'a mut C) -> Self {
let slices = target.occupied_slices();
Self {
len: slices.0.len() + slices.1.len(),
slices,
target,
}
}
}
impl<'a, C: Consumer> Iterator for PopIter<'a, C> {
type Item = C::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match self.slices.0.len() {
0 => None,
n => {
let item = unsafe { self.slices.0.get_unchecked(0).assume_init_read() };
if n == 1 {
(self.slices.0, self.slices.1) = (self.slices.1, &[]);
} else {
self.slices.0 = unsafe { self.slices.0.get_unchecked(1..n) };
}
Some(item)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl<'a, C: Consumer> ExactSizeIterator for PopIter<'a, C> {
fn len(&self) -> usize {
self.slices.0.len() + self.slices.1.len()
}
}
impl<'a, C: Consumer> Drop for PopIter<'a, C> {
fn drop(&mut self) {
unsafe { self.target.advance_read_index(self.len - self.len()) };
}
}
/// Iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type Iter<'a, C: Consumer> = Chain<slice::Iter<'a, C::Item>, slice::Iter<'a, C::Item>>;
/// Mutable iterator over ring buffer contents.
///
/// *Please do not rely on actual type, it may change in future.*
#[allow(type_alias_bounds)]
pub type IterMut<'a, C: Consumer> = Chain<slice::IterMut<'a, C::Item>, slice::IterMut<'a, C::Item>>;
#[macro_export]
macro_rules! impl_consumer_traits {
($type:ident $(< $( $param:tt $( : $first_bound:tt $(+ $next_bound:tt )* )? ),+ >)?) => {
#[cfg(feature = "std")]
impl $(< $( $param $( : $first_bound $(+ $next_bound )* )? ),+ >)? std::io::Read for $type $(< $( $param ),+ >)?
where
Self: $crate::traits::Consumer<Item = u8>,
{
fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
use $crate::consumer::Consumer;
let n = self.pop_slice(buffer);
if n == 0 && !buffer.is_empty() {
Err(std::io::ErrorKind::WouldBlock.into())
} else {
Ok(n)
}
}
}
};
}
pub trait DelegateConsumer: DelegateObserver + DelegateMut
where
Self::Base: Consumer,
{
}
impl<D: DelegateConsumer> Consumer for D
where
D::Base: Consumer,
{
#[inline]
unsafe fn set_read_index(&self, value: usize) {
self.base().set_read_index(value)
}
#[inline]
unsafe fn advance_read_index(&self, count: usize) {
self.base().advance_read_index(count)
}
#[inline]
fn occupied_slices(&self) -> (&[core::mem::MaybeUninit<Self::Item>], &[core::mem::MaybeUninit<Self::Item>]) {
self.base().occupied_slices()
}
#[inline]
unsafe fn occupied_slices_mut(&mut self) -> (&mut [core::mem::MaybeUninit<Self::Item>], &mut [core::mem::MaybeUninit<Self::Item>]) {
self.base_mut().occupied_slices_mut()
}
#[inline]
fn as_slices(&self) -> (&[Self::Item], &[Self::Item]) {
self.base().as_slices()
}
#[inline]
fn as_mut_slices(&mut self) -> (&mut [Self::Item], &mut [Self::Item]) {
self.base_mut().as_mut_slices()
}
#[inline]
fn try_pop(&mut self) -> Option<Self::Item> {
self.base_mut().try_pop()
}
#[inline]
fn pop_slice(&mut self, elems: &mut [Self::Item]) -> usize
where
Self::Item: Copy,
{
self.base_mut().pop_slice(elems)
}
#[inline]
fn iter(&self) -> Iter<'_, Self> {
self.base().iter()
}
#[inline]
fn iter_mut(&mut self) -> IterMut<'_, Self> {
self.base_mut().iter_mut()
}
#[inline]
fn skip(&mut self, count: usize) -> usize {
self.base_mut().skip(count)
}
#[inline]
fn clear(&mut self) -> usize {
self.base_mut().clear()
}
}
| {
unsafe { write_uninit_slice(elems.get_unchecked_mut(..right.len()), right) };
right.len()
} | conditional_block |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { Some((name, definitions)) } else { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn fr | egacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean,
FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
}
| om(l | identifier_name |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { | lse { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn from(legacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean,
FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
}
| Some((name, definitions)) } e | conditional_block |
v4.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with Schemas.
This module contains all the code related with the schemas used by this lib to decode many PackedFile types.
The basic structure of an `Schema` is:
```ignore
(
version: 3,
versioned_files: [
DB("_kv_battle_ai_ability_usage_variables_tables", [
(
version: 0,
fields: [
(
name: "key",
field_type: StringU8,
is_key: true,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
(
name: "value",
field_type: F32,
is_key: false,
default_value: None,
max_length: 0,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: "",
ca_order: -1,
is_bitwise: 0,
enum_values: {},
),
],
localised_fields: [],
),
]),
],
)
```
Inside the schema there are `VersionedFile` variants of different types, with a Vec of `Definition`, one for each version of that PackedFile supported.
!*/
use rayon::prelude::*;
use ron::de::from_bytes;
use serde_derive::{Serialize, Deserialize};
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use crate::error::Result;
use crate::schema::Schema as SchemaV5;
use crate::schema::Definition as DefinitionV5;
use crate::schema::FieldType as FieldTypeV5;
use crate::schema::Field as FieldV5;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct represents a Schema File in memory, ready to be used to decode versioned PackedFiles.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct SchemaV4 {
/// It stores the structural version of the Schema.
version: u16,
/// It stores the versioned files inside the Schema.
versioned_files: Vec<VersionedFileV4>
}
/// This enum defines all types of versioned files that the schema system supports.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum VersionedFileV4 {
/// It stores a `Vec<Definition>` with the definitions for each version of AnimFragment files decoded.
AnimFragment(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of AnomTable files decoded.
AnimTable(Vec<DefinitionV4>),
/// It stores the name of the table, and a `Vec<Definition>` with the definitions for each version of that table decoded.
DB(String, Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` to decode the dependencies of a PackFile.
DepManager(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of Loc files decoded (currently, only version `1`).
Loc(Vec<DefinitionV4>),
/// It stores a `Vec<Definition>` with the definitions for each version of MatchedCombat files decoded.
MatchedCombat(Vec<DefinitionV4>),
}
/// This struct contains all the data needed to decode a specific version of a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Default, Serialize, Deserialize)]
pub struct DefinitionV4 {
/// The version of the PackedFile the definition is for. These versions are:
/// - `-1`: for fake `Definition`, used for dependency resolving stuff.
/// - `0`: for unversioned PackedFiles.
/// - `1+`: for versioned PackedFiles.
version: i32,
/// This is a collection of all `Field`s the PackedFile uses, in the order it uses them.
fields: Vec<FieldV4>,
/// This is a list of all the fields from this definition that are moved to a Loc PackedFile on exporting.
localised_fields: Vec<FieldV4>,
}
/// This struct holds all the relevant data do properly decode a field from a versioned PackedFile.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub struct FieldV4 {
/// Name of the field. Should contain no spaces, using `_` instead.
pub name: String,
/// Type of the field.
pub field_type: FieldTypeV4,
/// `True` if the field is a `Key` field of a table. `False` otherwise.
pub is_key: bool,
/// The default value of the field.
pub default_value: Option<String>,
/// If the field's data corresponds to a filename.
pub is_filename: bool,
/// Path where the file in the data of the field can be, if it's restricted to one path.
pub filename_relative_path: Option<String>,
/// `Some(referenced_table, referenced_column)` if the field is referencing another table/column. `None` otherwise.
pub is_reference: Option<(String, String)>,
/// `Some(referenced_columns)` if the field is using another column/s from the referenced table for lookup values.
pub lookup: Option<Vec<String>>,
/// Aclarative description of what the field is for.
pub description: String,
/// Visual position in CA's Table. `-1` means we don't know its position.
pub ca_order: i16,
/// Variable to tell if this column is a bitwise column (spanned accross multiple columns) or not. Only applicable to numeric fields.
pub is_bitwise: i32,
/// Variable that specifies the "Enum" values for each value in this field.
pub enum_values: BTreeMap<i32, String>,
/// If the field is part of a 3-part RGB column set, and which one (R, G or B) it is.
pub is_part_of_colour: Option<u8>,
}
/// This enum defines every type of field the lib can encode/decode.
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug, Serialize, Deserialize)]
pub enum FieldTypeV4 {
Boolean,
F32,
F64,
I16,
I32,
I64,
ColourRGB,
StringU8,
StringU16,
OptionalStringU8,
OptionalStringU16,
SequenceU16(Box<DefinitionV4>),
SequenceU32(Box<DefinitionV4>)
}
/// This struct represents a bunch of Schema Patches in memory.
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatches {
/// It stores the patches split by games.
patches: HashMap<String, SchemaPatch>
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct SchemaPatch{
/// It stores a list of per-table, per-column patches.
tables: HashMap<String, HashMap<String, HashMap<String, String>>>,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
/// Implementation of `SchemaV4`.
impl SchemaV4 {
/// This function loads a `Schema` to memory from a file in the `schemas/` folder.
pub fn load(path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
/// This function tries to update the Schema at the provided Path to a more recent format.
pub fn update(schema_path: &Path, patches_path: &Path, game_name: &str) -> Result<()> {
let schema_legacy = Self::load(schema_path)?;
let mut schema = SchemaV5::from(&schema_legacy);
// Fix for empty dependencies, again.
schema.definitions.par_iter_mut().for_each(|(table_name, definitions)| {
definitions.iter_mut().for_each(|definition| {
definition.fields.iter_mut().for_each(|field| {
if let Some((ref_table, ref_column)) = field.is_reference(None) {
if ref_table.trim().is_empty() || ref_column.trim().is_empty() {
dbg!(&table_name);
dbg!(field.name());
field.is_reference = None;
}
}
})
})
});
let schema_patches = SchemaPatches::load(patches_path);
if let Ok(schema_patches) = schema_patches {
if let Some(patches) = schema_patches.patches.get(game_name) {
schema.patches = patches.tables.clone();
}
}
// Disable saving until 4.0 releases.
schema.save(schema_path)?;
Ok(())
}
}
/// Implementation of `Definition`.
impl DefinitionV4 {
/// This function creates a new empty `Definition` for the version provided.
pub fn new(version: i32) -> DefinitionV4 {
DefinitionV4 {
version,
localised_fields: vec![],
fields: vec![],
}
}
/// This function returns the version of the provided definition.
pub fn version(&self) -> i32 {
self.version
}
/// This function returns a mutable reference to the list of fields in the definition.
pub fn fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.fields
}
/// This function returns the localised fields of the provided definition
pub fn localised_fields_mut(&mut self) -> &mut Vec<FieldV4> {
&mut self.localised_fields
}
}
/// Default implementation of `FieldType`.
impl Default for FieldV4 {
fn default() -> Self {
Self {
name: String::from("new_field"),
field_type: FieldTypeV4::StringU8,
is_key: false,
default_value: None,
is_filename: false,
filename_relative_path: None,
is_reference: None,
lookup: None,
description: String::from(""),
ca_order: -1,
is_bitwise: 0,
enum_values: BTreeMap::new(),
is_part_of_colour: None,
}
}
}
/// Default implementation of `SchemaV4`.
impl Default for SchemaV4 {
fn default() -> Self {
Self {
version: 3,
versioned_files: vec![]
}
}
}
impl From<&SchemaV4> for SchemaV5 {
fn from(legacy_schema: &SchemaV4) -> Self {
let mut schema = Self::default();
legacy_schema.versioned_files.iter()
.filter_map(|versioned| if let VersionedFileV4::DB(name, definitions) = versioned { Some((name, definitions)) } else { None })
.for_each(|(name, definitions)| {
definitions.iter().for_each(|definition| {
schema.add_definition(name, &From::from(definition));
})
});
schema
}
}
impl From<&DefinitionV4> for DefinitionV5 {
fn from(legacy_table_definition: &DefinitionV4) -> Self {
let mut definition = Self::new(legacy_table_definition.version, None);
let fields = legacy_table_definition.fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_fields(fields);
let fields = legacy_table_definition.localised_fields.iter().map(From::from).collect::<Vec<FieldV5>>();
definition.set_localised_fields(fields);
definition
}
}
impl From<&FieldV4> for FieldV5 {
fn from(legacy_field: &FieldV4) -> Self {
Self {
name: legacy_field.name.to_owned(),
field_type: From::from(&legacy_field.field_type),
is_key: legacy_field.is_key,
default_value: legacy_field.default_value.clone(),
is_filename: legacy_field.is_filename,
filename_relative_path: legacy_field.filename_relative_path.clone(),
is_reference: legacy_field.is_reference.clone(),
lookup: legacy_field.lookup.clone(),
description: legacy_field.description.to_owned(),
ca_order: legacy_field.ca_order,
..Default::default()
}
}
}
impl From<&FieldTypeV4> for FieldTypeV5 {
fn from(legacy_field_type: &FieldTypeV4) -> Self {
match legacy_field_type {
FieldTypeV4::Boolean => Self::Boolean, | FieldTypeV4::I16 => Self::I16,
FieldTypeV4::I32 => Self::I32,
FieldTypeV4::I64 => Self::I64,
FieldTypeV4::F32 => Self::F32,
FieldTypeV4::F64 => Self::F64,
FieldTypeV4::ColourRGB => Self::ColourRGB,
FieldTypeV4::StringU8 => Self::StringU8,
FieldTypeV4::StringU16 => Self::StringU16,
FieldTypeV4::OptionalStringU8 => Self::OptionalStringU8,
FieldTypeV4::OptionalStringU16 => Self::OptionalStringU16,
FieldTypeV4::SequenceU16(sequence) => Self::SequenceU16(Box::new(From::from(&**sequence))),
FieldTypeV4::SequenceU32(sequence) => Self::SequenceU32(Box::new(From::from(&**sequence))),
}
}
}
impl SchemaPatches {
/// This function loads a `SchemaPatches` to memory from a file in the `schemas/` folder.
pub fn load(file_path: &Path) -> Result<Self> {
let mut file = BufReader::new(File::open(file_path)?);
let mut data = Vec::with_capacity(file.get_ref().metadata()?.len() as usize);
file.read_to_end(&mut data)?;
from_bytes(&data).map_err(From::from)
}
} | random_line_split |
|
service.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dirk
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"regexp"
"strconv"
"strings"
"sync"
"time"
eth2client "github.com/attestantio/go-eth2-client"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/validatorsmanager"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
"github.com/wealdtech/go-bytesutil"
dirk "github.com/wealdtech/go-eth2-wallet-dirk"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/credentials"
)
// Service is the manager for dirk accounts.
type Service struct {
mutex sync.RWMutex
monitor metrics.AccountManagerMonitor
clientMonitor metrics.ClientMonitor
processConcurrency int64
endpoints []*dirk.Endpoint
accountPaths []string
credentials credentials.TransportCredentials
accounts map[phase0.BLSPubKey]e2wtypes.Account
validatorsManager validatorsmanager.Service
domainProvider eth2client.DomainProvider
farFutureEpoch phase0.Epoch
currentEpochProvider chaintime.Service
wallets map[string]e2wtypes.Wallet
walletsMutex sync.RWMutex
}
// module-wide log.
var log zerolog.Logger
// New creates a new dirk account manager.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "accountmanager").Str("impl", "dirk").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
credentials, err := credentialsFromCerts(ctx, parameters.clientCert, parameters.clientKey, parameters.caCert)
if err != nil {
return nil, errors.Wrap(err, "failed to build credentials")
}
endpoints := make([]*dirk.Endpoint, 0, len(parameters.endpoints))
for _, endpoint := range parameters.endpoints {
endpointParts := strings.Split(endpoint, ":")
if len(endpointParts) != 2 {
log.Warn().Str("endpoint", endpoint).Msg("Malformed endpoint")
continue
}
port, err := strconv.ParseUint(endpointParts[1], 10, 32)
if err != nil {
log.Warn().Str("endpoint", endpoint).Err(err).Msg("Malformed port")
continue
}
if port == 0 {
log.Warn().Str("endpoint", endpoint).Msg("Invalid port")
continue
}
endpoints = append(endpoints, dirk.NewEndpoint(endpointParts[0], uint32(port)))
}
if len(endpoints) == 0 {
return nil, errors.New("no valid endpoints specified")
}
farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain far future epoch")
}
s := &Service{
monitor: parameters.monitor,
clientMonitor: parameters.clientMonitor,
processConcurrency: parameters.processConcurrency,
endpoints: endpoints,
accountPaths: parameters.accountPaths,
credentials: credentials,
domainProvider: parameters.domainProvider,
validatorsManager: parameters.validatorsManager,
farFutureEpoch: farFutureEpoch,
currentEpochProvider: parameters.currentEpochProvider,
wallets: make(map[string]e2wtypes.Wallet),
}
log.Trace().Int64("process_concurrency", s.processConcurrency).Msg("Set process concurrency")
if err := s.refreshAccounts(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial accounts")
}
if err := s.refreshValidators(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial validator states")
}
return s, nil
}
// Refresh refreshes the accounts from Dirk, and account validator state from
// the validators provider.
// This is a relatively expensive operation, so should not be run in the validating path.
func (s *Service) Refresh(ctx context.Context) {
if err := s.refreshAccounts(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh accounts")
}
if err := s.refreshValidators(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh validators")
}
}
// refreshAccounts refreshes the accounts from Dirk.
func (s *Service) refreshAccounts(ctx context.Context) error {
// Create the relevant wallets.
wallets := make([]e2wtypes.Wallet, 0, len(s.accountPaths))
pathsByWallet := make(map[string][]string)
for _, path := range s.accountPaths {
pathBits := strings.Split(path, "/")
var paths []string
var exists bool
if paths, exists = pathsByWallet[pathBits[0]]; !exists {
paths = make([]string, 0)
}
pathsByWallet[pathBits[0]] = append(paths, path)
wallet, err := s.openWallet(ctx, pathBits[0])
if err != nil {
log.Warn().Err(err).Str("wallet", pathBits[0]).Msg("Failed to open wallet")
} else {
wallets = append(wallets, wallet)
}
}
verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths)
// Fetch accounts for each wallet in parallel.
started := time.Now()
accounts := make(map[phase0.BLSPubKey]e2wtypes.Account)
var accountsMu sync.Mutex
sem := semaphore.NewWeighted(s.processConcurrency)
var wg sync.WaitGroup
for i := range wallets {
wg.Add(1)
go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, i int, mu *sync.Mutex) {
defer wg.Done()
if err := sem.Acquire(ctx, 1); err != nil {
log.Error().Err(err).Msg("Failed to acquire semaphore")
return
}
defer sem.Release(1)
log := log.With().Str("wallet", wallets[i].Name()).Logger()
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained semaphore")
walletAccounts := s.fetchAccountsForWallet(ctx, wallets[i], verificationRegexes)
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Obtained accounts")
accountsMu.Lock()
for k, v := range walletAccounts { | accountsMu.Unlock()
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Imported accounts")
}(ctx, sem, &wg, i, &accountsMu)
}
wg.Wait()
log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts")
if len(accounts) == 0 && len(s.accounts) != 0 {
log.Warn().Msg("No accounts obtained; retaining old list")
return nil
}
s.mutex.Lock()
s.accounts = accounts
s.mutex.Unlock()
return nil
}
// openWallet opens a wallet, using an existing one if present.
func (s *Service) openWallet(ctx context.Context, name string) (e2wtypes.Wallet, error) {
s.walletsMutex.Lock()
defer s.walletsMutex.Unlock()
wallet, exists := s.wallets[name]
var err error
if !exists {
wallet, err = dirk.OpenWallet(ctx, name, s.credentials, s.endpoints)
if err != nil {
return nil, err
}
s.wallets[name] = wallet
}
return wallet, nil
}
// refreshValidators refreshes the validator information for our known accounts.
func (s *Service) refreshValidators(ctx context.Context) error {
accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
accountPubKeys = append(accountPubKeys, pubKey)
}
if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil {
return errors.Wrap(err, "failed to refresh validators")
}
return nil
}
func credentialsFromCerts(ctx context.Context, clientCert []byte, clientKey []byte, caCert []byte) (credentials.TransportCredentials, error) {
clientPair, err := tls.X509KeyPair(clientCert, clientKey)
if err != nil {
return nil, errors.Wrap(err, "failed to load client keypair")
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{clientPair},
MinVersion: tls.VersionTLS13,
}
if caCert != nil {
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(caCert) {
return nil, errors.New("failed to add CA certificate")
}
tlsCfg.RootCAs = cp
}
return credentials.NewTLS(tlsCfg), nil
}
// ValidatingAccountsForEpoch obtains the validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
// stateCount is used to update metrics.
stateCount := map[api.ValidatorState]uint64{
api.ValidatorStateUnknown: 0,
api.ValidatorStatePendingInitialized: 0,
api.ValidatorStatePendingQueued: 0,
api.ValidatorStateActiveOngoing: 0,
api.ValidatorStateActiveExiting: 0,
api.ValidatorStateActiveSlashed: 0,
api.ValidatorStateExitedUnslashed: 0,
api.ValidatorStateExitedSlashed: 0,
api.ValidatorStateWithdrawalPossible: 0,
api.ValidatorStateWithdrawalDone: 0,
}
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
stateCount[state]++
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
account := s.accounts[validator.PublicKey]
log.Trace().
Str("name", account.Name()).
Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())).
Uint64("index", uint64(index)).
Str("state", state.String()).
Msg("Validating account")
validatingAccounts[index] = account
}
}
// Update metrics if this is the current epoch.
if epoch == s.currentEpochProvider.CurrentEpoch() {
stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators))
for state, count := range stateCount {
s.monitor.Accounts(strings.ToLower(state.String()), count)
}
}
return validatingAccounts, nil
}
// ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
indexPresenceMap := make(map[phase0.ValidatorIndex]bool)
for _, index := range indices {
indexPresenceMap[index] = true
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
if _, present := indexPresenceMap[index]; !present {
continue
}
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
validatingAccounts[index] = s.accounts[validator.PublicKey]
}
}
return validatingAccounts, nil
}
// accountPathsToVerificationRegexes turns account paths in to regexes to allow verification.
func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0, len(paths))
for _, path := range paths {
log := log.With().Str("path", path).Logger()
parts := strings.Split(path, "/")
if len(parts) == 0 || len(parts[0]) == 0 {
log.Debug().Msg("Invalid path")
continue
}
if len(parts) == 1 {
parts = append(parts, ".*")
}
if len(parts[1]) == 0 {
parts[1] = ".*"
}
parts[0] = strings.TrimPrefix(parts[0], "^")
parts[0] = strings.TrimSuffix(parts[0], "$")
parts[1] = strings.TrimPrefix(parts[1], "^")
parts[1] = strings.TrimSuffix(parts[1], "$")
specifier := fmt.Sprintf("^%s/%s$", parts[0], parts[1])
regex, err := regexp.Compile(specifier)
if err != nil {
log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex")
continue
}
regexes = append(regexes, regex)
}
return regexes
}
func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, verificationRegexes []*regexp.Regexp) map[phase0.BLSPubKey]e2wtypes.Account {
res := make(map[phase0.BLSPubKey]e2wtypes.Account)
for account := range wallet.Accounts(ctx) {
// Ensure the name matches one of our account paths.
name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name())
verified := false
for _, verificationRegex := range verificationRegexes {
if verificationRegex.Match([]byte(name)) {
verified = true
break
}
}
if !verified {
log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring")
continue
}
var pubKey []byte
if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider {
pubKey = provider.CompositePublicKey().Marshal()
} else {
pubKey = account.PublicKey().Marshal()
}
res[bytesutil.ToBytes48(pubKey)] = account
}
return res
}
|
accounts[k] = v
}
| conditional_block |
service.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dirk
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"regexp"
"strconv"
"strings"
"sync"
"time"
eth2client "github.com/attestantio/go-eth2-client"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/validatorsmanager"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
"github.com/wealdtech/go-bytesutil"
dirk "github.com/wealdtech/go-eth2-wallet-dirk"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/credentials"
)
// Service is the manager for dirk accounts.
type Service struct {
mutex sync.RWMutex
monitor metrics.AccountManagerMonitor
clientMonitor metrics.ClientMonitor
processConcurrency int64
endpoints []*dirk.Endpoint
accountPaths []string
credentials credentials.TransportCredentials
accounts map[phase0.BLSPubKey]e2wtypes.Account
validatorsManager validatorsmanager.Service
domainProvider eth2client.DomainProvider
farFutureEpoch phase0.Epoch
currentEpochProvider chaintime.Service
wallets map[string]e2wtypes.Wallet
walletsMutex sync.RWMutex
}
// module-wide log.
var log zerolog.Logger
// New creates a new dirk account manager.
func New(ctx context.Context, params ...Parameter) (*Service, error) { |
// Refresh refreshes the accounts from Dirk, and account validator state from
// the validators provider.
// This is a relatively expensive operation, so should not be run in the validating path.
func (s *Service) Refresh(ctx context.Context) {
if err := s.refreshAccounts(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh accounts")
}
if err := s.refreshValidators(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh validators")
}
}
// refreshAccounts refreshes the accounts from Dirk.
func (s *Service) refreshAccounts(ctx context.Context) error {
// Create the relevant wallets.
wallets := make([]e2wtypes.Wallet, 0, len(s.accountPaths))
pathsByWallet := make(map[string][]string)
for _, path := range s.accountPaths {
pathBits := strings.Split(path, "/")
var paths []string
var exists bool
if paths, exists = pathsByWallet[pathBits[0]]; !exists {
paths = make([]string, 0)
}
pathsByWallet[pathBits[0]] = append(paths, path)
wallet, err := s.openWallet(ctx, pathBits[0])
if err != nil {
log.Warn().Err(err).Str("wallet", pathBits[0]).Msg("Failed to open wallet")
} else {
wallets = append(wallets, wallet)
}
}
verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths)
// Fetch accounts for each wallet in parallel.
started := time.Now()
accounts := make(map[phase0.BLSPubKey]e2wtypes.Account)
var accountsMu sync.Mutex
sem := semaphore.NewWeighted(s.processConcurrency)
var wg sync.WaitGroup
for i := range wallets {
wg.Add(1)
go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, i int, mu *sync.Mutex) {
defer wg.Done()
if err := sem.Acquire(ctx, 1); err != nil {
log.Error().Err(err).Msg("Failed to acquire semaphore")
return
}
defer sem.Release(1)
log := log.With().Str("wallet", wallets[i].Name()).Logger()
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained semaphore")
walletAccounts := s.fetchAccountsForWallet(ctx, wallets[i], verificationRegexes)
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Obtained accounts")
accountsMu.Lock()
for k, v := range walletAccounts {
accounts[k] = v
}
accountsMu.Unlock()
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Imported accounts")
}(ctx, sem, &wg, i, &accountsMu)
}
wg.Wait()
log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts")
if len(accounts) == 0 && len(s.accounts) != 0 {
log.Warn().Msg("No accounts obtained; retaining old list")
return nil
}
s.mutex.Lock()
s.accounts = accounts
s.mutex.Unlock()
return nil
}
// openWallet opens a wallet, using an existing one if present.
func (s *Service) openWallet(ctx context.Context, name string) (e2wtypes.Wallet, error) {
s.walletsMutex.Lock()
defer s.walletsMutex.Unlock()
wallet, exists := s.wallets[name]
var err error
if !exists {
wallet, err = dirk.OpenWallet(ctx, name, s.credentials, s.endpoints)
if err != nil {
return nil, err
}
s.wallets[name] = wallet
}
return wallet, nil
}
// refreshValidators refreshes the validator information for our known accounts.
func (s *Service) refreshValidators(ctx context.Context) error {
accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
accountPubKeys = append(accountPubKeys, pubKey)
}
if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil {
return errors.Wrap(err, "failed to refresh validators")
}
return nil
}
func credentialsFromCerts(ctx context.Context, clientCert []byte, clientKey []byte, caCert []byte) (credentials.TransportCredentials, error) {
clientPair, err := tls.X509KeyPair(clientCert, clientKey)
if err != nil {
return nil, errors.Wrap(err, "failed to load client keypair")
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{clientPair},
MinVersion: tls.VersionTLS13,
}
if caCert != nil {
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(caCert) {
return nil, errors.New("failed to add CA certificate")
}
tlsCfg.RootCAs = cp
}
return credentials.NewTLS(tlsCfg), nil
}
// ValidatingAccountsForEpoch obtains the validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
// stateCount is used to update metrics.
stateCount := map[api.ValidatorState]uint64{
api.ValidatorStateUnknown: 0,
api.ValidatorStatePendingInitialized: 0,
api.ValidatorStatePendingQueued: 0,
api.ValidatorStateActiveOngoing: 0,
api.ValidatorStateActiveExiting: 0,
api.ValidatorStateActiveSlashed: 0,
api.ValidatorStateExitedUnslashed: 0,
api.ValidatorStateExitedSlashed: 0,
api.ValidatorStateWithdrawalPossible: 0,
api.ValidatorStateWithdrawalDone: 0,
}
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
stateCount[state]++
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
account := s.accounts[validator.PublicKey]
log.Trace().
Str("name", account.Name()).
Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())).
Uint64("index", uint64(index)).
Str("state", state.String()).
Msg("Validating account")
validatingAccounts[index] = account
}
}
// Update metrics if this is the current epoch.
if epoch == s.currentEpochProvider.CurrentEpoch() {
stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators))
for state, count := range stateCount {
s.monitor.Accounts(strings.ToLower(state.String()), count)
}
}
return validatingAccounts, nil
}
// ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
indexPresenceMap := make(map[phase0.ValidatorIndex]bool)
for _, index := range indices {
indexPresenceMap[index] = true
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
if _, present := indexPresenceMap[index]; !present {
continue
}
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
validatingAccounts[index] = s.accounts[validator.PublicKey]
}
}
return validatingAccounts, nil
}
// accountPathsToVerificationRegexes turns account paths in to regexes to allow verification.
func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0, len(paths))
for _, path := range paths {
log := log.With().Str("path", path).Logger()
parts := strings.Split(path, "/")
if len(parts) == 0 || len(parts[0]) == 0 {
log.Debug().Msg("Invalid path")
continue
}
if len(parts) == 1 {
parts = append(parts, ".*")
}
if len(parts[1]) == 0 {
parts[1] = ".*"
}
parts[0] = strings.TrimPrefix(parts[0], "^")
parts[0] = strings.TrimSuffix(parts[0], "$")
parts[1] = strings.TrimPrefix(parts[1], "^")
parts[1] = strings.TrimSuffix(parts[1], "$")
specifier := fmt.Sprintf("^%s/%s$", parts[0], parts[1])
regex, err := regexp.Compile(specifier)
if err != nil {
log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex")
continue
}
regexes = append(regexes, regex)
}
return regexes
}
func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, verificationRegexes []*regexp.Regexp) map[phase0.BLSPubKey]e2wtypes.Account {
res := make(map[phase0.BLSPubKey]e2wtypes.Account)
for account := range wallet.Accounts(ctx) {
// Ensure the name matches one of our account paths.
name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name())
verified := false
for _, verificationRegex := range verificationRegexes {
if verificationRegex.Match([]byte(name)) {
verified = true
break
}
}
if !verified {
log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring")
continue
}
var pubKey []byte
if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider {
pubKey = provider.CompositePublicKey().Marshal()
} else {
pubKey = account.PublicKey().Marshal()
}
res[bytesutil.ToBytes48(pubKey)] = account
}
return res
}
|
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "accountmanager").Str("impl", "dirk").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
credentials, err := credentialsFromCerts(ctx, parameters.clientCert, parameters.clientKey, parameters.caCert)
if err != nil {
return nil, errors.Wrap(err, "failed to build credentials")
}
endpoints := make([]*dirk.Endpoint, 0, len(parameters.endpoints))
for _, endpoint := range parameters.endpoints {
endpointParts := strings.Split(endpoint, ":")
if len(endpointParts) != 2 {
log.Warn().Str("endpoint", endpoint).Msg("Malformed endpoint")
continue
}
port, err := strconv.ParseUint(endpointParts[1], 10, 32)
if err != nil {
log.Warn().Str("endpoint", endpoint).Err(err).Msg("Malformed port")
continue
}
if port == 0 {
log.Warn().Str("endpoint", endpoint).Msg("Invalid port")
continue
}
endpoints = append(endpoints, dirk.NewEndpoint(endpointParts[0], uint32(port)))
}
if len(endpoints) == 0 {
return nil, errors.New("no valid endpoints specified")
}
farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain far future epoch")
}
s := &Service{
monitor: parameters.monitor,
clientMonitor: parameters.clientMonitor,
processConcurrency: parameters.processConcurrency,
endpoints: endpoints,
accountPaths: parameters.accountPaths,
credentials: credentials,
domainProvider: parameters.domainProvider,
validatorsManager: parameters.validatorsManager,
farFutureEpoch: farFutureEpoch,
currentEpochProvider: parameters.currentEpochProvider,
wallets: make(map[string]e2wtypes.Wallet),
}
log.Trace().Int64("process_concurrency", s.processConcurrency).Msg("Set process concurrency")
if err := s.refreshAccounts(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial accounts")
}
if err := s.refreshValidators(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial validator states")
}
return s, nil
}
| identifier_body |
service.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dirk
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"regexp"
"strconv"
"strings"
"sync" | eth2client "github.com/attestantio/go-eth2-client"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/validatorsmanager"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
"github.com/wealdtech/go-bytesutil"
dirk "github.com/wealdtech/go-eth2-wallet-dirk"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/credentials"
)
// Service is the manager for dirk accounts.
type Service struct {
mutex sync.RWMutex
monitor metrics.AccountManagerMonitor
clientMonitor metrics.ClientMonitor
processConcurrency int64
endpoints []*dirk.Endpoint
accountPaths []string
credentials credentials.TransportCredentials
accounts map[phase0.BLSPubKey]e2wtypes.Account
validatorsManager validatorsmanager.Service
domainProvider eth2client.DomainProvider
farFutureEpoch phase0.Epoch
currentEpochProvider chaintime.Service
wallets map[string]e2wtypes.Wallet
walletsMutex sync.RWMutex
}
// module-wide log.
var log zerolog.Logger
// New creates a new dirk account manager.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "accountmanager").Str("impl", "dirk").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
credentials, err := credentialsFromCerts(ctx, parameters.clientCert, parameters.clientKey, parameters.caCert)
if err != nil {
return nil, errors.Wrap(err, "failed to build credentials")
}
endpoints := make([]*dirk.Endpoint, 0, len(parameters.endpoints))
for _, endpoint := range parameters.endpoints {
endpointParts := strings.Split(endpoint, ":")
if len(endpointParts) != 2 {
log.Warn().Str("endpoint", endpoint).Msg("Malformed endpoint")
continue
}
port, err := strconv.ParseUint(endpointParts[1], 10, 32)
if err != nil {
log.Warn().Str("endpoint", endpoint).Err(err).Msg("Malformed port")
continue
}
if port == 0 {
log.Warn().Str("endpoint", endpoint).Msg("Invalid port")
continue
}
endpoints = append(endpoints, dirk.NewEndpoint(endpointParts[0], uint32(port)))
}
if len(endpoints) == 0 {
return nil, errors.New("no valid endpoints specified")
}
farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain far future epoch")
}
s := &Service{
monitor: parameters.monitor,
clientMonitor: parameters.clientMonitor,
processConcurrency: parameters.processConcurrency,
endpoints: endpoints,
accountPaths: parameters.accountPaths,
credentials: credentials,
domainProvider: parameters.domainProvider,
validatorsManager: parameters.validatorsManager,
farFutureEpoch: farFutureEpoch,
currentEpochProvider: parameters.currentEpochProvider,
wallets: make(map[string]e2wtypes.Wallet),
}
log.Trace().Int64("process_concurrency", s.processConcurrency).Msg("Set process concurrency")
if err := s.refreshAccounts(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial accounts")
}
if err := s.refreshValidators(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial validator states")
}
return s, nil
}
// Refresh refreshes the accounts from Dirk, and account validator state from
// the validators provider.
// This is a relatively expensive operation, so should not be run in the validating path.
func (s *Service) Refresh(ctx context.Context) {
if err := s.refreshAccounts(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh accounts")
}
if err := s.refreshValidators(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh validators")
}
}
// refreshAccounts refreshes the accounts from Dirk.
func (s *Service) refreshAccounts(ctx context.Context) error {
// Create the relevant wallets.
wallets := make([]e2wtypes.Wallet, 0, len(s.accountPaths))
pathsByWallet := make(map[string][]string)
for _, path := range s.accountPaths {
pathBits := strings.Split(path, "/")
var paths []string
var exists bool
if paths, exists = pathsByWallet[pathBits[0]]; !exists {
paths = make([]string, 0)
}
pathsByWallet[pathBits[0]] = append(paths, path)
wallet, err := s.openWallet(ctx, pathBits[0])
if err != nil {
log.Warn().Err(err).Str("wallet", pathBits[0]).Msg("Failed to open wallet")
} else {
wallets = append(wallets, wallet)
}
}
verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths)
// Fetch accounts for each wallet in parallel.
started := time.Now()
accounts := make(map[phase0.BLSPubKey]e2wtypes.Account)
var accountsMu sync.Mutex
sem := semaphore.NewWeighted(s.processConcurrency)
var wg sync.WaitGroup
for i := range wallets {
wg.Add(1)
go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, i int, mu *sync.Mutex) {
defer wg.Done()
if err := sem.Acquire(ctx, 1); err != nil {
log.Error().Err(err).Msg("Failed to acquire semaphore")
return
}
defer sem.Release(1)
log := log.With().Str("wallet", wallets[i].Name()).Logger()
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained semaphore")
walletAccounts := s.fetchAccountsForWallet(ctx, wallets[i], verificationRegexes)
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Obtained accounts")
accountsMu.Lock()
for k, v := range walletAccounts {
accounts[k] = v
}
accountsMu.Unlock()
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Imported accounts")
}(ctx, sem, &wg, i, &accountsMu)
}
wg.Wait()
log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts")
if len(accounts) == 0 && len(s.accounts) != 0 {
log.Warn().Msg("No accounts obtained; retaining old list")
return nil
}
s.mutex.Lock()
s.accounts = accounts
s.mutex.Unlock()
return nil
}
// openWallet opens a wallet, using an existing one if present.
func (s *Service) openWallet(ctx context.Context, name string) (e2wtypes.Wallet, error) {
s.walletsMutex.Lock()
defer s.walletsMutex.Unlock()
wallet, exists := s.wallets[name]
var err error
if !exists {
wallet, err = dirk.OpenWallet(ctx, name, s.credentials, s.endpoints)
if err != nil {
return nil, err
}
s.wallets[name] = wallet
}
return wallet, nil
}
// refreshValidators refreshes the validator information for our known accounts.
func (s *Service) refreshValidators(ctx context.Context) error {
accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
accountPubKeys = append(accountPubKeys, pubKey)
}
if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil {
return errors.Wrap(err, "failed to refresh validators")
}
return nil
}
func credentialsFromCerts(ctx context.Context, clientCert []byte, clientKey []byte, caCert []byte) (credentials.TransportCredentials, error) {
clientPair, err := tls.X509KeyPair(clientCert, clientKey)
if err != nil {
return nil, errors.Wrap(err, "failed to load client keypair")
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{clientPair},
MinVersion: tls.VersionTLS13,
}
if caCert != nil {
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(caCert) {
return nil, errors.New("failed to add CA certificate")
}
tlsCfg.RootCAs = cp
}
return credentials.NewTLS(tlsCfg), nil
}
// ValidatingAccountsForEpoch obtains the validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
// stateCount is used to update metrics.
stateCount := map[api.ValidatorState]uint64{
api.ValidatorStateUnknown: 0,
api.ValidatorStatePendingInitialized: 0,
api.ValidatorStatePendingQueued: 0,
api.ValidatorStateActiveOngoing: 0,
api.ValidatorStateActiveExiting: 0,
api.ValidatorStateActiveSlashed: 0,
api.ValidatorStateExitedUnslashed: 0,
api.ValidatorStateExitedSlashed: 0,
api.ValidatorStateWithdrawalPossible: 0,
api.ValidatorStateWithdrawalDone: 0,
}
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
stateCount[state]++
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
account := s.accounts[validator.PublicKey]
log.Trace().
Str("name", account.Name()).
Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())).
Uint64("index", uint64(index)).
Str("state", state.String()).
Msg("Validating account")
validatingAccounts[index] = account
}
}
// Update metrics if this is the current epoch.
if epoch == s.currentEpochProvider.CurrentEpoch() {
stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators))
for state, count := range stateCount {
s.monitor.Accounts(strings.ToLower(state.String()), count)
}
}
return validatingAccounts, nil
}
// ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
indexPresenceMap := make(map[phase0.ValidatorIndex]bool)
for _, index := range indices {
indexPresenceMap[index] = true
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
if _, present := indexPresenceMap[index]; !present {
continue
}
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
validatingAccounts[index] = s.accounts[validator.PublicKey]
}
}
return validatingAccounts, nil
}
// accountPathsToVerificationRegexes turns account paths in to regexes to allow verification.
func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0, len(paths))
for _, path := range paths {
log := log.With().Str("path", path).Logger()
parts := strings.Split(path, "/")
if len(parts) == 0 || len(parts[0]) == 0 {
log.Debug().Msg("Invalid path")
continue
}
if len(parts) == 1 {
parts = append(parts, ".*")
}
if len(parts[1]) == 0 {
parts[1] = ".*"
}
parts[0] = strings.TrimPrefix(parts[0], "^")
parts[0] = strings.TrimSuffix(parts[0], "$")
parts[1] = strings.TrimPrefix(parts[1], "^")
parts[1] = strings.TrimSuffix(parts[1], "$")
specifier := fmt.Sprintf("^%s/%s$", parts[0], parts[1])
regex, err := regexp.Compile(specifier)
if err != nil {
log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex")
continue
}
regexes = append(regexes, regex)
}
return regexes
}
func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, verificationRegexes []*regexp.Regexp) map[phase0.BLSPubKey]e2wtypes.Account {
res := make(map[phase0.BLSPubKey]e2wtypes.Account)
for account := range wallet.Accounts(ctx) {
// Ensure the name matches one of our account paths.
name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name())
verified := false
for _, verificationRegex := range verificationRegexes {
if verificationRegex.Match([]byte(name)) {
verified = true
break
}
}
if !verified {
log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring")
continue
}
var pubKey []byte
if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider {
pubKey = provider.CompositePublicKey().Marshal()
} else {
pubKey = account.PublicKey().Marshal()
}
res[bytesutil.ToBytes48(pubKey)] = account
}
return res
} | "time"
| random_line_split |
service.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dirk
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"regexp"
"strconv"
"strings"
"sync"
"time"
eth2client "github.com/attestantio/go-eth2-client"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/validatorsmanager"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
"github.com/wealdtech/go-bytesutil"
dirk "github.com/wealdtech/go-eth2-wallet-dirk"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/credentials"
)
// Service is the manager for dirk accounts.
type Service struct {
mutex sync.RWMutex
monitor metrics.AccountManagerMonitor
clientMonitor metrics.ClientMonitor
processConcurrency int64
endpoints []*dirk.Endpoint
accountPaths []string
credentials credentials.TransportCredentials
accounts map[phase0.BLSPubKey]e2wtypes.Account
validatorsManager validatorsmanager.Service
domainProvider eth2client.DomainProvider
farFutureEpoch phase0.Epoch
currentEpochProvider chaintime.Service
wallets map[string]e2wtypes.Wallet
walletsMutex sync.RWMutex
}
// module-wide log.
var log zerolog.Logger
// New creates a new dirk account manager.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "accountmanager").Str("impl", "dirk").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
credentials, err := credentialsFromCerts(ctx, parameters.clientCert, parameters.clientKey, parameters.caCert)
if err != nil {
return nil, errors.Wrap(err, "failed to build credentials")
}
endpoints := make([]*dirk.Endpoint, 0, len(parameters.endpoints))
for _, endpoint := range parameters.endpoints {
endpointParts := strings.Split(endpoint, ":")
if len(endpointParts) != 2 {
log.Warn().Str("endpoint", endpoint).Msg("Malformed endpoint")
continue
}
port, err := strconv.ParseUint(endpointParts[1], 10, 32)
if err != nil {
log.Warn().Str("endpoint", endpoint).Err(err).Msg("Malformed port")
continue
}
if port == 0 {
log.Warn().Str("endpoint", endpoint).Msg("Invalid port")
continue
}
endpoints = append(endpoints, dirk.NewEndpoint(endpointParts[0], uint32(port)))
}
if len(endpoints) == 0 {
return nil, errors.New("no valid endpoints specified")
}
farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain far future epoch")
}
s := &Service{
monitor: parameters.monitor,
clientMonitor: parameters.clientMonitor,
processConcurrency: parameters.processConcurrency,
endpoints: endpoints,
accountPaths: parameters.accountPaths,
credentials: credentials,
domainProvider: parameters.domainProvider,
validatorsManager: parameters.validatorsManager,
farFutureEpoch: farFutureEpoch,
currentEpochProvider: parameters.currentEpochProvider,
wallets: make(map[string]e2wtypes.Wallet),
}
log.Trace().Int64("process_concurrency", s.processConcurrency).Msg("Set process concurrency")
if err := s.refreshAccounts(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial accounts")
}
if err := s.refreshValidators(ctx); err != nil {
return nil, errors.Wrap(err, "failed to fetch initial validator states")
}
return s, nil
}
// Refresh refreshes the accounts from Dirk, and account validator state from
// the validators provider.
// This is a relatively expensive operation, so should not be run in the validating path.
func (s *Service) Refresh(ctx context.Context) {
if err := s.refreshAccounts(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh accounts")
}
if err := s.refreshValidators(ctx); err != nil {
log.Error().Err(err).Msg("Failed to refresh validators")
}
}
// refreshAccounts refreshes the accounts from Dirk.
func (s *Service) r | ctx context.Context) error {
// Create the relevant wallets.
wallets := make([]e2wtypes.Wallet, 0, len(s.accountPaths))
pathsByWallet := make(map[string][]string)
for _, path := range s.accountPaths {
pathBits := strings.Split(path, "/")
var paths []string
var exists bool
if paths, exists = pathsByWallet[pathBits[0]]; !exists {
paths = make([]string, 0)
}
pathsByWallet[pathBits[0]] = append(paths, path)
wallet, err := s.openWallet(ctx, pathBits[0])
if err != nil {
log.Warn().Err(err).Str("wallet", pathBits[0]).Msg("Failed to open wallet")
} else {
wallets = append(wallets, wallet)
}
}
verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths)
// Fetch accounts for each wallet in parallel.
started := time.Now()
accounts := make(map[phase0.BLSPubKey]e2wtypes.Account)
var accountsMu sync.Mutex
sem := semaphore.NewWeighted(s.processConcurrency)
var wg sync.WaitGroup
for i := range wallets {
wg.Add(1)
go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, i int, mu *sync.Mutex) {
defer wg.Done()
if err := sem.Acquire(ctx, 1); err != nil {
log.Error().Err(err).Msg("Failed to acquire semaphore")
return
}
defer sem.Release(1)
log := log.With().Str("wallet", wallets[i].Name()).Logger()
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained semaphore")
walletAccounts := s.fetchAccountsForWallet(ctx, wallets[i], verificationRegexes)
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Obtained accounts")
accountsMu.Lock()
for k, v := range walletAccounts {
accounts[k] = v
}
accountsMu.Unlock()
log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Imported accounts")
}(ctx, sem, &wg, i, &accountsMu)
}
wg.Wait()
log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts")
if len(accounts) == 0 && len(s.accounts) != 0 {
log.Warn().Msg("No accounts obtained; retaining old list")
return nil
}
s.mutex.Lock()
s.accounts = accounts
s.mutex.Unlock()
return nil
}
// openWallet opens a wallet, using an existing one if present.
func (s *Service) openWallet(ctx context.Context, name string) (e2wtypes.Wallet, error) {
s.walletsMutex.Lock()
defer s.walletsMutex.Unlock()
wallet, exists := s.wallets[name]
var err error
if !exists {
wallet, err = dirk.OpenWallet(ctx, name, s.credentials, s.endpoints)
if err != nil {
return nil, err
}
s.wallets[name] = wallet
}
return wallet, nil
}
// refreshValidators refreshes the validator information for our known accounts.
func (s *Service) refreshValidators(ctx context.Context) error {
accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
accountPubKeys = append(accountPubKeys, pubKey)
}
if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil {
return errors.Wrap(err, "failed to refresh validators")
}
return nil
}
func credentialsFromCerts(ctx context.Context, clientCert []byte, clientKey []byte, caCert []byte) (credentials.TransportCredentials, error) {
clientPair, err := tls.X509KeyPair(clientCert, clientKey)
if err != nil {
return nil, errors.Wrap(err, "failed to load client keypair")
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{clientPair},
MinVersion: tls.VersionTLS13,
}
if caCert != nil {
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(caCert) {
return nil, errors.New("failed to add CA certificate")
}
tlsCfg.RootCAs = cp
}
return credentials.NewTLS(tlsCfg), nil
}
// ValidatingAccountsForEpoch obtains the validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
// stateCount is used to update metrics.
stateCount := map[api.ValidatorState]uint64{
api.ValidatorStateUnknown: 0,
api.ValidatorStatePendingInitialized: 0,
api.ValidatorStatePendingQueued: 0,
api.ValidatorStateActiveOngoing: 0,
api.ValidatorStateActiveExiting: 0,
api.ValidatorStateActiveSlashed: 0,
api.ValidatorStateExitedUnslashed: 0,
api.ValidatorStateExitedSlashed: 0,
api.ValidatorStateWithdrawalPossible: 0,
api.ValidatorStateWithdrawalDone: 0,
}
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
stateCount[state]++
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
account := s.accounts[validator.PublicKey]
log.Trace().
Str("name", account.Name()).
Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())).
Uint64("index", uint64(index)).
Str("state", state.String()).
Msg("Validating account")
validatingAccounts[index] = account
}
}
// Update metrics if this is the current epoch.
if epoch == s.currentEpochProvider.CurrentEpoch() {
stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators))
for state, count := range stateCount {
s.monitor.Accounts(strings.ToLower(state.String()), count)
}
}
return validatingAccounts, nil
}
// ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch.
func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) {
validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account)
pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts))
for pubKey := range s.accounts {
pubKeys = append(pubKeys, pubKey)
}
indexPresenceMap := make(map[phase0.ValidatorIndex]bool)
for _, index := range indices {
indexPresenceMap[index] = true
}
validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys)
for index, validator := range validators {
if _, present := indexPresenceMap[index]; !present {
continue
}
state := api.ValidatorToState(validator, epoch, s.farFutureEpoch)
if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting {
validatingAccounts[index] = s.accounts[validator.PublicKey]
}
}
return validatingAccounts, nil
}
// accountPathsToVerificationRegexes turns account paths in to regexes to allow verification.
func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0, len(paths))
for _, path := range paths {
log := log.With().Str("path", path).Logger()
parts := strings.Split(path, "/")
if len(parts) == 0 || len(parts[0]) == 0 {
log.Debug().Msg("Invalid path")
continue
}
if len(parts) == 1 {
parts = append(parts, ".*")
}
if len(parts[1]) == 0 {
parts[1] = ".*"
}
parts[0] = strings.TrimPrefix(parts[0], "^")
parts[0] = strings.TrimSuffix(parts[0], "$")
parts[1] = strings.TrimPrefix(parts[1], "^")
parts[1] = strings.TrimSuffix(parts[1], "$")
specifier := fmt.Sprintf("^%s/%s$", parts[0], parts[1])
regex, err := regexp.Compile(specifier)
if err != nil {
log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex")
continue
}
regexes = append(regexes, regex)
}
return regexes
}
func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, verificationRegexes []*regexp.Regexp) map[phase0.BLSPubKey]e2wtypes.Account {
res := make(map[phase0.BLSPubKey]e2wtypes.Account)
for account := range wallet.Accounts(ctx) {
// Ensure the name matches one of our account paths.
name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name())
verified := false
for _, verificationRegex := range verificationRegexes {
if verificationRegex.Match([]byte(name)) {
verified = true
break
}
}
if !verified {
log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring")
continue
}
var pubKey []byte
if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider {
pubKey = provider.CompositePublicKey().Marshal()
} else {
pubKey = account.PublicKey().Marshal()
}
res[bytesutil.ToBytes48(pubKey)] = account
}
return res
}
| efreshAccounts( | identifier_name |
pinhole.go | package pinhole
import (
"fmt"
"image"
"image/color"
"image/png"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"golang.org/x/image/font/gofont/goregular"
"github.com/fogleman/gg"
"github.com/golang/freetype/truetype"
"github.com/google/btree"
)
const circleSteps = 45
var gof = func() *truetype.Font {
gof, err := truetype.Parse(goregular.TTF)
if err != nil {
panic(err)
}
return gof
}()
type line struct {
x1, y1, z1 float64
x2, y2, z2 float64
nocaps bool
color color.Color
str string
scale float64
circle bool
cfirst *line
cprev *line
cnext *line
drawcoords *fourcorners
}
func (l *line) Rect() (min, max [3]float64) {
if l.x1 < l.x2 {
min[0], max[0] = l.x1, l.x2
} else {
min[0], max[0] = l.x2, l.x1
}
if l.y1 < l.y2 {
min[1], max[1] = l.y1, l.y2
} else {
min[1], max[1] = l.y2, l.y1
}
if l.z1 < l.z2 {
min[2], max[2] = l.z1, l.z2
} else {
min[2], max[2] = l.z2, l.z1
}
return
}
func (l *line) Center() []float64 {
min, max := l.Rect()
return []float64{
(max[0] + min[0]) / 2,
(max[1] + min[1]) / 2,
(max[2] + min[2]) / 2,
}
}
type Pinhole struct {
lines []*line
stack []int
}
func New() *Pinhole {
return &Pinhole{}
}
func (p *Pinhole) Begin() {
p.stack = append(p.stack, len(p.lines))
}
func (p *Pinhole) End() {
if len(p.stack) > 0 {
p.stack = p.stack[:len(p.stack)-1]
}
}
func (p *Pinhole) Rotate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
l := p.lines[i]
if x != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, x, 0)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, x, 0)
}
if y != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, y, 1)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, y, 1)
}
if z != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, z, 2)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, z, 2)
}
p.lines[i] = l
}
}
func (p *Pinhole) Translate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 += x
p.lines[i].y1 += y
p.lines[i].z1 += z
p.lines[i].x2 += x
p.lines[i].y2 += y
p.lines[i].z2 += z
}
}
func (p *Pinhole) Scale(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 *= x
p.lines[i].y1 *= y
p.lines[i].z1 *= z
p.lines[i].x2 *= x
p.lines[i].y2 *= y
p.lines[i].z2 *= z
if len(p.lines[i].str) > 0 {
p.lines[i].scale *= math.Min(x, y)
}
}
}
func (p *Pinhole) Colorize(color color.Color) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].color = color
}
}
func (p *Pinhole) Center() {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
minx, miny, minz := math.Inf(+1), math.Inf(+1), math.Inf(+1)
maxx, maxy, maxz := math.Inf(-1), math.Inf(-1), math.Inf(-1)
for ; i < len(p.lines); i++ {
if p.lines[i].x1 < minx {
minx = p.lines[i].x1
}
if p.lines[i].x1 > maxx {
maxx = p.lines[i].x1
}
if p.lines[i].y1 < miny {
miny = p.lines[i].y1
}
if p.lines[i].y1 > maxy {
maxy = p.lines[i].y1
}
if p.lines[i].z1 < minz {
minz = p.lines[i].z1
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
if p.lines[i].x2 < minx {
minx = p.lines[i].x2
}
if p.lines[i].x2 > maxx {
maxx = p.lines[i].x2
}
if p.lines[i].y2 < miny {
miny = p.lines[i].y2
}
if p.lines[i].y2 > maxy {
maxy = p.lines[i].y2
}
if p.lines[i].z2 < minz {
minz = p.lines[i].z2
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
}
x := (maxx + minx) / 2
y := (maxy + miny) / 2
z := (maxz + minz) / 2
p.Translate(-x, -y, -z)
}
func (p *Pinhole) DrawString(x, y, z float64, s string) {
if s != "" {
p.DrawLine(x, y, z, x, y, z)
//p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
p.lines[len(p.lines)-1].str = s
}
}
func (p *Pinhole) DrawRect(minx, miny, maxx, maxy, z float64) {
p.DrawLine(minx, maxy, z, maxx, maxy, z)
p.DrawLine(maxx, maxy, z, maxx, miny, z)
p.DrawLine(maxx, miny, z, minx, miny, z)
p.DrawLine(minx, miny, z, minx, maxy, z)
}
func (p *Pinhole) DrawCube(minx, miny, minz, maxx, maxy, maxz float64) {
p.DrawLine(minx, maxy, minz, maxx, maxy, minz)
p.DrawLine(maxx, maxy, minz, maxx, miny, minz)
p.DrawLine(maxx, miny, minz, minx, miny, minz)
p.DrawLine(minx, miny, minz, minx, maxy, minz)
p.DrawLine(minx, maxy, maxz, maxx, maxy, maxz)
p.DrawLine(maxx, maxy, maxz, maxx, miny, maxz)
p.DrawLine(maxx, miny, maxz, minx, miny, maxz)
p.DrawLine(minx, miny, maxz, minx, maxy, maxz)
p.DrawLine(minx, maxy, minz, minx, maxy, maxz)
p.DrawLine(maxx, maxy, minz, maxx, maxy, maxz)
p.DrawLine(maxx, miny, minz, maxx, miny, maxz)
p.DrawLine(minx, miny, minz, minx, miny, maxz)
}
func (p *Pinhole) DrawDot(x, y, z float64, radius float64) {
p.DrawLine(x, y, z, x, y, z)
p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
}
func (p *Pinhole) DrawLine(x1, y1, z1, x2, y2, z2 float64) {
l := &line{
x1: x1, y1: y1, z1: z1,
x2: x2, y2: y2, z2: z2,
color: color.Black,
scale: 1,
}
p.lines = append(p.lines, l)
}
func (p *Pinhole) DrawCircle(x, y, z float64, radius float64) {
var fx, fy, fz float64
var lx, ly, lz float64
var first, prev *line
// we go one beyond the steps because we need to join at the end
for i := float64(0); i <= circleSteps; i++ {
var dx, dy, dz float64
dx, dy = destination(x, y, (math.Pi*2)/circleSteps*i, radius)
dz = z
if i > 0 {
if i == circleSteps {
p.DrawLine(lx, ly, lz, fx, fy, fz)
} else {
p.DrawLine(lx, ly, lz, dx, dy, dz)
}
line := p.lines[len(p.lines)-1]
line.nocaps = true
line.circle = true
if first == nil {
first = line
}
line.cfirst = first
line.cprev = prev
if prev != nil {
prev.cnext = line
}
prev = line
} else {
fx, fy, fz = dx, dy, dz
}
lx, ly, lz = dx, dy, dz
}
}
type ImageOptions struct {
BGColor color.Color
LineWidth float64
Scale float64
}
var DefaultImageOptions = &ImageOptions{
BGColor: color.White,
LineWidth: 1,
Scale: 1,
}
type byDistance []*line
func (a byDistance) Len() int {
return len(a)
}
func (a byDistance) Less(i, j int) bool {
imin, imax := a[i].Rect()
jmin, jmax := a[j].Rect()
for i := 2; i >= 0; i-- {
if imax[i] > jmax[i] |
if imax[i] < jmax[i] {
return i != 2
}
if imin[i] > jmin[i] {
return i == 2
}
if imin[i] < jmin[i] {
return i != 2
}
}
return false
}
func (a byDistance) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (p *Pinhole) Image(width, height int, opts *ImageOptions) *image.RGBA {
if opts == nil {
opts = DefaultImageOptions
}
sort.Sort(byDistance(p.lines))
for _, line := range p.lines {
line.drawcoords = nil
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := gg.NewContextForRGBA(img)
if opts.BGColor != nil {
c.SetColor(opts.BGColor)
c.DrawRectangle(0, 0, float64(width), float64(height))
c.Fill()
}
capsMap := make(map[color.Color]*capTree)
var ccolor color.Color
var caps *capTree
fwidth, fheight := float64(width), float64(height)
focal := math.Min(fwidth, fheight) / 2
maybeDraw := func(line *line) *fourcorners {
x1, y1, z1 := line.x1, line.y1, line.z1
x2, y2, z2 := line.x2, line.y2, line.z2
px1, py1 := projectPoint(x1, y1, z1, fwidth, fheight, focal, opts.Scale)
px2, py2 := projectPoint(x2, y2, z2, fwidth, fheight, focal, opts.Scale)
if !onscreen(fwidth, fheight, px1, py1, px2, py2) && !line.circle && line.str == "" {
return nil
}
t1 := lineWidthAtZ(z1, focal) * opts.LineWidth * line.scale
t2 := lineWidthAtZ(z2, focal) * opts.LineWidth * line.scale
if line.str != "" {
sz := 10 * t1
c.SetFontFace(truetype.NewFace(gof, &truetype.Options{Size: sz}))
w, h := c.MeasureString(line.str)
c.DrawString(line.str, px1-w/2, py1+h*.4)
return nil
}
var cap1, cap2 bool
if !line.nocaps {
cap1 = caps.insert(x1, y1, z1)
cap2 = caps.insert(x2, y2, z2)
}
return drawUnbalancedLineSegment(c,
px1, py1, px2, py2,
t1, t2,
cap1, cap2,
line.circle,
)
}
for _, line := range p.lines {
if line.color != ccolor {
ccolor = line.color
caps = capsMap[ccolor]
if caps == nil {
caps = newCapTree()
capsMap[ccolor] = caps
}
c.SetColor(ccolor)
}
if line.circle {
if line.drawcoords == nil {
// need to process the coords for all segments belonging to
// the current circle segment.
// first get the basic estimates
var coords []*fourcorners
seg := line.cfirst
for seg != nil {
seg.drawcoords = maybeDraw(seg)
if seg.drawcoords == nil {
panic("nil!")
}
coords = append(coords, seg.drawcoords)
seg = seg.cnext
}
// next reprocess to join the midpoints
for i := 0; i < len(coords); i++ {
var line1, line2 *fourcorners
if i == 0 {
line1 = coords[len(coords)-1]
} else {
line1 = coords[i-1]
}
line2 = coords[i]
midx1 := (line2.x1 + line1.x4) / 2
midy1 := (line2.y1 + line1.y4) / 2
midx2 := (line2.x2 + line1.x3) / 2
midy2 := (line2.y2 + line1.y3) / 2
line2.x1 = midx1
line2.y1 = midy1
line1.x4 = midx1
line1.y4 = midy1
line2.x2 = midx2
line2.y2 = midy2
line1.x3 = midx2
line1.y3 = midy2
}
}
// draw the cached coords
c.MoveTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x2-math.SmallestNonzeroFloat64, line.drawcoords.y2-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x3+math.SmallestNonzeroFloat64, line.drawcoords.y3+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x4+math.SmallestNonzeroFloat64, line.drawcoords.y4+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.ClosePath()
} else {
maybeDraw(line)
}
c.Fill()
}
return img
}
type fourcorners struct {
x1, y1, x2, y2, x3, y3, x4, y4 float64
}
func drawUnbalancedLineSegment(c *gg.Context,
x1, y1, x2, y2 float64,
t1, t2 float64,
cap1, cap2 bool,
circleSegment bool,
) *fourcorners {
if x1 == x2 && y1 == y2 {
c.DrawCircle(x1, y1, t1/2)
return nil
}
a := lineAngle(x1, y1, x2, y2)
dx1, dy1 := destination(x1, y1, a-math.Pi/2, t1/2)
dx2, dy2 := destination(x1, y1, a+math.Pi/2, t1/2)
dx3, dy3 := destination(x2, y2, a+math.Pi/2, t2/2)
dx4, dy4 := destination(x2, y2, a-math.Pi/2, t2/2)
if circleSegment {
return &fourcorners{dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4}
}
const cubicCorner = 1.0 / 3 * 2 //0.552284749831
if cap1 && t1 < 2 {
cap1 = false
}
if cap2 && t2 < 2 {
cap2 = false
}
c.MoveTo(dx1, dy1)
if cap1 {
ax1, ay1 := destination(dx1, dy1, a-math.Pi*2, t1*cubicCorner)
ax2, ay2 := destination(dx2, dy2, a-math.Pi*2, t1*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx2, dy2)
} else {
c.LineTo(dx2, dy2)
}
c.LineTo(dx3, dy3)
if cap2 {
ax1, ay1 := destination(dx3, dy3, a-math.Pi*2, -t2*cubicCorner)
ax2, ay2 := destination(dx4, dy4, a-math.Pi*2, -t2*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx4, dy4)
} else {
c.LineTo(dx4, dy4)
}
c.LineTo(dx1, dy1)
c.ClosePath()
return nil
}
func onscreen(w, h float64, x1, y1, x2, y2 float64) bool {
amin := [2]float64{0, 0}
amax := [2]float64{w, h}
var bmin [2]float64
var bmax [2]float64
if x1 < x2 {
bmin[0], bmax[0] = x1, x2
} else {
bmin[0], bmax[0] = x2, x1
}
if y1 < y2 {
bmin[1], bmax[1] = y1, y2
} else {
bmin[1], bmax[1] = y2, y1
}
for i := 0; i < len(amin); i++ {
if !(bmin[i] <= amax[i] && bmax[i] >= amin[i]) {
return false
}
}
return true
}
func (p *Pinhole) LoadObj(r io.Reader) error {
var faces [][][3]float64
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var verts [][3]float64
for ln, line := range strings.Split(string(data), "\n") {
for {
nline := strings.Replace(line, " ", " ", -1)
if len(nline) < len(line) {
line = nline
continue
}
break
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "v ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
verts = append(verts, [3]float64{})
for j := 0; j < 3; j++ {
if verts[len(verts)-1][j], err = strconv.ParseFloat(parts[j], 64); err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
}
}
} else if strings.HasPrefix(line, "f ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
faces = append(faces, [][3]float64{})
for _, part := range parts {
part = strings.Split(part, "/")[0]
idx, err := strconv.ParseUint(part, 10, 64)
if err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
if int(idx) > len(verts) {
return fmt.Errorf("line %d: invalid vert index: %d", ln+1, idx)
}
faces[len(faces)-1] = append(faces[len(faces)-1], verts[idx-1])
}
}
}
}
for _, faces := range faces {
var fx, fy, fz float64
var lx, ly, lz float64
var i int
for _, face := range faces {
if i == 0 {
fx, fy, fz = face[0], face[1], face[2]
} else {
p.DrawLine(lx, ly, lz, face[0], face[1], face[2])
}
lx, ly, lz = face[0], face[1], face[2]
i++
}
if i > 1 {
p.DrawLine(lx, ly, lz, fx, fy, fz)
}
}
return nil
}
func (p *Pinhole) SavePNG(path string, width, height int, opts *ImageOptions) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
return png.Encode(file, p.Image(width, height, opts))
}
// projectPoint projects a 3d point cartesian point to 2d screen coords.
// Origin is the center
// X is left/right
// Y is down/up
// Z is near/far, the 0 position is focal distance away from lens.
func projectPoint(
x, y, z float64, // 3d point to project
w, h, f float64, // width, height, focal
scale float64, // scale
) (px, py float64) { // projected point
x, y, z = x*scale*f, y*scale*f, z*scale*f
zz := z + f
if zz == 0 {
zz = math.SmallestNonzeroFloat64
}
px = x*(f/zz) + w/2
py = y*(f/zz) - h/2
py *= -1
return
}
func lineWidthAtZ(z float64, f float64) float64 {
return ((z*-1 + 1) / 2) * f * 0.04
}
func lineAngle(x1, y1, x2, y2 float64) float64 {
return math.Atan2(y1-y2, x1-x2)
}
func destination(x, y, angle, distance float64) (dx, dy float64) {
dx = x + math.Cos(angle)*distance
dy = y + math.Sin(angle)*distance
return
}
// https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/3drota.htm
func rotate(x, y, z float64, q float64, which int) (dx, dy, dz float64) {
switch which {
case 0: // x
dy = y*math.Cos(q) - z*math.Sin(q)
dz = y*math.Sin(q) + z*math.Cos(q)
dx = x
case 1: // y
dz = z*math.Cos(q) - x*math.Sin(q)
dx = z*math.Sin(q) + x*math.Cos(q)
dy = y
case 2: // z
dx = x*math.Cos(q) - y*math.Sin(q)
dy = x*math.Sin(q) + y*math.Cos(q)
dz = z
}
return
}
type capItem struct {
point [3]float64
}
func (a *capItem) Less(v btree.Item) bool {
b := v.(*capItem)
for i := 2; i >= 0; i-- {
if a.point[i] < b.point[i] {
return true
}
if a.point[i] > b.point[i] {
return false
}
}
return false
}
// really lazy structure.
type capTree struct {
tr *btree.BTree
}
func newCapTree() *capTree {
return &capTree{
tr: btree.New(9),
}
}
func (tr *capTree) insert(x, y, z float64) bool {
if tr.has(x, y, z) {
return false
}
tr.tr.ReplaceOrInsert(&capItem{point: [3]float64{x, y, z}})
return true
}
func (tr *capTree) has(x, y, z float64) bool {
return tr.tr.Has(&capItem{point: [3]float64{x, y, z}})
}
| {
return i == 2
} | conditional_block |
pinhole.go | package pinhole
import (
"fmt"
"image"
"image/color"
"image/png"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"golang.org/x/image/font/gofont/goregular"
"github.com/fogleman/gg"
"github.com/golang/freetype/truetype"
"github.com/google/btree"
)
const circleSteps = 45
var gof = func() *truetype.Font {
gof, err := truetype.Parse(goregular.TTF)
if err != nil {
panic(err)
}
return gof
}()
type line struct {
x1, y1, z1 float64
x2, y2, z2 float64
nocaps bool
color color.Color
str string
scale float64
circle bool
cfirst *line
cprev *line
cnext *line
drawcoords *fourcorners
}
func (l *line) Rect() (min, max [3]float64) {
if l.x1 < l.x2 {
min[0], max[0] = l.x1, l.x2
} else {
min[0], max[0] = l.x2, l.x1
}
if l.y1 < l.y2 {
min[1], max[1] = l.y1, l.y2
} else {
min[1], max[1] = l.y2, l.y1
}
if l.z1 < l.z2 {
min[2], max[2] = l.z1, l.z2
} else {
min[2], max[2] = l.z2, l.z1
}
return
}
func (l *line) | () []float64 {
min, max := l.Rect()
return []float64{
(max[0] + min[0]) / 2,
(max[1] + min[1]) / 2,
(max[2] + min[2]) / 2,
}
}
type Pinhole struct {
lines []*line
stack []int
}
func New() *Pinhole {
return &Pinhole{}
}
func (p *Pinhole) Begin() {
p.stack = append(p.stack, len(p.lines))
}
func (p *Pinhole) End() {
if len(p.stack) > 0 {
p.stack = p.stack[:len(p.stack)-1]
}
}
func (p *Pinhole) Rotate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
l := p.lines[i]
if x != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, x, 0)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, x, 0)
}
if y != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, y, 1)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, y, 1)
}
if z != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, z, 2)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, z, 2)
}
p.lines[i] = l
}
}
func (p *Pinhole) Translate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 += x
p.lines[i].y1 += y
p.lines[i].z1 += z
p.lines[i].x2 += x
p.lines[i].y2 += y
p.lines[i].z2 += z
}
}
func (p *Pinhole) Scale(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 *= x
p.lines[i].y1 *= y
p.lines[i].z1 *= z
p.lines[i].x2 *= x
p.lines[i].y2 *= y
p.lines[i].z2 *= z
if len(p.lines[i].str) > 0 {
p.lines[i].scale *= math.Min(x, y)
}
}
}
func (p *Pinhole) Colorize(color color.Color) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].color = color
}
}
func (p *Pinhole) Center() {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
minx, miny, minz := math.Inf(+1), math.Inf(+1), math.Inf(+1)
maxx, maxy, maxz := math.Inf(-1), math.Inf(-1), math.Inf(-1)
for ; i < len(p.lines); i++ {
if p.lines[i].x1 < minx {
minx = p.lines[i].x1
}
if p.lines[i].x1 > maxx {
maxx = p.lines[i].x1
}
if p.lines[i].y1 < miny {
miny = p.lines[i].y1
}
if p.lines[i].y1 > maxy {
maxy = p.lines[i].y1
}
if p.lines[i].z1 < minz {
minz = p.lines[i].z1
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
if p.lines[i].x2 < minx {
minx = p.lines[i].x2
}
if p.lines[i].x2 > maxx {
maxx = p.lines[i].x2
}
if p.lines[i].y2 < miny {
miny = p.lines[i].y2
}
if p.lines[i].y2 > maxy {
maxy = p.lines[i].y2
}
if p.lines[i].z2 < minz {
minz = p.lines[i].z2
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
}
x := (maxx + minx) / 2
y := (maxy + miny) / 2
z := (maxz + minz) / 2
p.Translate(-x, -y, -z)
}
func (p *Pinhole) DrawString(x, y, z float64, s string) {
if s != "" {
p.DrawLine(x, y, z, x, y, z)
//p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
p.lines[len(p.lines)-1].str = s
}
}
func (p *Pinhole) DrawRect(minx, miny, maxx, maxy, z float64) {
p.DrawLine(minx, maxy, z, maxx, maxy, z)
p.DrawLine(maxx, maxy, z, maxx, miny, z)
p.DrawLine(maxx, miny, z, minx, miny, z)
p.DrawLine(minx, miny, z, minx, maxy, z)
}
func (p *Pinhole) DrawCube(minx, miny, minz, maxx, maxy, maxz float64) {
p.DrawLine(minx, maxy, minz, maxx, maxy, minz)
p.DrawLine(maxx, maxy, minz, maxx, miny, minz)
p.DrawLine(maxx, miny, minz, minx, miny, minz)
p.DrawLine(minx, miny, minz, minx, maxy, minz)
p.DrawLine(minx, maxy, maxz, maxx, maxy, maxz)
p.DrawLine(maxx, maxy, maxz, maxx, miny, maxz)
p.DrawLine(maxx, miny, maxz, minx, miny, maxz)
p.DrawLine(minx, miny, maxz, minx, maxy, maxz)
p.DrawLine(minx, maxy, minz, minx, maxy, maxz)
p.DrawLine(maxx, maxy, minz, maxx, maxy, maxz)
p.DrawLine(maxx, miny, minz, maxx, miny, maxz)
p.DrawLine(minx, miny, minz, minx, miny, maxz)
}
func (p *Pinhole) DrawDot(x, y, z float64, radius float64) {
p.DrawLine(x, y, z, x, y, z)
p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
}
func (p *Pinhole) DrawLine(x1, y1, z1, x2, y2, z2 float64) {
l := &line{
x1: x1, y1: y1, z1: z1,
x2: x2, y2: y2, z2: z2,
color: color.Black,
scale: 1,
}
p.lines = append(p.lines, l)
}
func (p *Pinhole) DrawCircle(x, y, z float64, radius float64) {
var fx, fy, fz float64
var lx, ly, lz float64
var first, prev *line
// we go one beyond the steps because we need to join at the end
for i := float64(0); i <= circleSteps; i++ {
var dx, dy, dz float64
dx, dy = destination(x, y, (math.Pi*2)/circleSteps*i, radius)
dz = z
if i > 0 {
if i == circleSteps {
p.DrawLine(lx, ly, lz, fx, fy, fz)
} else {
p.DrawLine(lx, ly, lz, dx, dy, dz)
}
line := p.lines[len(p.lines)-1]
line.nocaps = true
line.circle = true
if first == nil {
first = line
}
line.cfirst = first
line.cprev = prev
if prev != nil {
prev.cnext = line
}
prev = line
} else {
fx, fy, fz = dx, dy, dz
}
lx, ly, lz = dx, dy, dz
}
}
type ImageOptions struct {
BGColor color.Color
LineWidth float64
Scale float64
}
var DefaultImageOptions = &ImageOptions{
BGColor: color.White,
LineWidth: 1,
Scale: 1,
}
type byDistance []*line
func (a byDistance) Len() int {
return len(a)
}
func (a byDistance) Less(i, j int) bool {
imin, imax := a[i].Rect()
jmin, jmax := a[j].Rect()
for i := 2; i >= 0; i-- {
if imax[i] > jmax[i] {
return i == 2
}
if imax[i] < jmax[i] {
return i != 2
}
if imin[i] > jmin[i] {
return i == 2
}
if imin[i] < jmin[i] {
return i != 2
}
}
return false
}
func (a byDistance) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (p *Pinhole) Image(width, height int, opts *ImageOptions) *image.RGBA {
if opts == nil {
opts = DefaultImageOptions
}
sort.Sort(byDistance(p.lines))
for _, line := range p.lines {
line.drawcoords = nil
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := gg.NewContextForRGBA(img)
if opts.BGColor != nil {
c.SetColor(opts.BGColor)
c.DrawRectangle(0, 0, float64(width), float64(height))
c.Fill()
}
capsMap := make(map[color.Color]*capTree)
var ccolor color.Color
var caps *capTree
fwidth, fheight := float64(width), float64(height)
focal := math.Min(fwidth, fheight) / 2
maybeDraw := func(line *line) *fourcorners {
x1, y1, z1 := line.x1, line.y1, line.z1
x2, y2, z2 := line.x2, line.y2, line.z2
px1, py1 := projectPoint(x1, y1, z1, fwidth, fheight, focal, opts.Scale)
px2, py2 := projectPoint(x2, y2, z2, fwidth, fheight, focal, opts.Scale)
if !onscreen(fwidth, fheight, px1, py1, px2, py2) && !line.circle && line.str == "" {
return nil
}
t1 := lineWidthAtZ(z1, focal) * opts.LineWidth * line.scale
t2 := lineWidthAtZ(z2, focal) * opts.LineWidth * line.scale
if line.str != "" {
sz := 10 * t1
c.SetFontFace(truetype.NewFace(gof, &truetype.Options{Size: sz}))
w, h := c.MeasureString(line.str)
c.DrawString(line.str, px1-w/2, py1+h*.4)
return nil
}
var cap1, cap2 bool
if !line.nocaps {
cap1 = caps.insert(x1, y1, z1)
cap2 = caps.insert(x2, y2, z2)
}
return drawUnbalancedLineSegment(c,
px1, py1, px2, py2,
t1, t2,
cap1, cap2,
line.circle,
)
}
for _, line := range p.lines {
if line.color != ccolor {
ccolor = line.color
caps = capsMap[ccolor]
if caps == nil {
caps = newCapTree()
capsMap[ccolor] = caps
}
c.SetColor(ccolor)
}
if line.circle {
if line.drawcoords == nil {
// need to process the coords for all segments belonging to
// the current circle segment.
// first get the basic estimates
var coords []*fourcorners
seg := line.cfirst
for seg != nil {
seg.drawcoords = maybeDraw(seg)
if seg.drawcoords == nil {
panic("nil!")
}
coords = append(coords, seg.drawcoords)
seg = seg.cnext
}
// next reprocess to join the midpoints
for i := 0; i < len(coords); i++ {
var line1, line2 *fourcorners
if i == 0 {
line1 = coords[len(coords)-1]
} else {
line1 = coords[i-1]
}
line2 = coords[i]
midx1 := (line2.x1 + line1.x4) / 2
midy1 := (line2.y1 + line1.y4) / 2
midx2 := (line2.x2 + line1.x3) / 2
midy2 := (line2.y2 + line1.y3) / 2
line2.x1 = midx1
line2.y1 = midy1
line1.x4 = midx1
line1.y4 = midy1
line2.x2 = midx2
line2.y2 = midy2
line1.x3 = midx2
line1.y3 = midy2
}
}
// draw the cached coords
c.MoveTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x2-math.SmallestNonzeroFloat64, line.drawcoords.y2-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x3+math.SmallestNonzeroFloat64, line.drawcoords.y3+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x4+math.SmallestNonzeroFloat64, line.drawcoords.y4+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.ClosePath()
} else {
maybeDraw(line)
}
c.Fill()
}
return img
}
type fourcorners struct {
x1, y1, x2, y2, x3, y3, x4, y4 float64
}
func drawUnbalancedLineSegment(c *gg.Context,
x1, y1, x2, y2 float64,
t1, t2 float64,
cap1, cap2 bool,
circleSegment bool,
) *fourcorners {
if x1 == x2 && y1 == y2 {
c.DrawCircle(x1, y1, t1/2)
return nil
}
a := lineAngle(x1, y1, x2, y2)
dx1, dy1 := destination(x1, y1, a-math.Pi/2, t1/2)
dx2, dy2 := destination(x1, y1, a+math.Pi/2, t1/2)
dx3, dy3 := destination(x2, y2, a+math.Pi/2, t2/2)
dx4, dy4 := destination(x2, y2, a-math.Pi/2, t2/2)
if circleSegment {
return &fourcorners{dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4}
}
const cubicCorner = 1.0 / 3 * 2 //0.552284749831
if cap1 && t1 < 2 {
cap1 = false
}
if cap2 && t2 < 2 {
cap2 = false
}
c.MoveTo(dx1, dy1)
if cap1 {
ax1, ay1 := destination(dx1, dy1, a-math.Pi*2, t1*cubicCorner)
ax2, ay2 := destination(dx2, dy2, a-math.Pi*2, t1*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx2, dy2)
} else {
c.LineTo(dx2, dy2)
}
c.LineTo(dx3, dy3)
if cap2 {
ax1, ay1 := destination(dx3, dy3, a-math.Pi*2, -t2*cubicCorner)
ax2, ay2 := destination(dx4, dy4, a-math.Pi*2, -t2*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx4, dy4)
} else {
c.LineTo(dx4, dy4)
}
c.LineTo(dx1, dy1)
c.ClosePath()
return nil
}
func onscreen(w, h float64, x1, y1, x2, y2 float64) bool {
amin := [2]float64{0, 0}
amax := [2]float64{w, h}
var bmin [2]float64
var bmax [2]float64
if x1 < x2 {
bmin[0], bmax[0] = x1, x2
} else {
bmin[0], bmax[0] = x2, x1
}
if y1 < y2 {
bmin[1], bmax[1] = y1, y2
} else {
bmin[1], bmax[1] = y2, y1
}
for i := 0; i < len(amin); i++ {
if !(bmin[i] <= amax[i] && bmax[i] >= amin[i]) {
return false
}
}
return true
}
func (p *Pinhole) LoadObj(r io.Reader) error {
var faces [][][3]float64
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var verts [][3]float64
for ln, line := range strings.Split(string(data), "\n") {
for {
nline := strings.Replace(line, " ", " ", -1)
if len(nline) < len(line) {
line = nline
continue
}
break
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "v ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
verts = append(verts, [3]float64{})
for j := 0; j < 3; j++ {
if verts[len(verts)-1][j], err = strconv.ParseFloat(parts[j], 64); err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
}
}
} else if strings.HasPrefix(line, "f ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
faces = append(faces, [][3]float64{})
for _, part := range parts {
part = strings.Split(part, "/")[0]
idx, err := strconv.ParseUint(part, 10, 64)
if err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
if int(idx) > len(verts) {
return fmt.Errorf("line %d: invalid vert index: %d", ln+1, idx)
}
faces[len(faces)-1] = append(faces[len(faces)-1], verts[idx-1])
}
}
}
}
for _, faces := range faces {
var fx, fy, fz float64
var lx, ly, lz float64
var i int
for _, face := range faces {
if i == 0 {
fx, fy, fz = face[0], face[1], face[2]
} else {
p.DrawLine(lx, ly, lz, face[0], face[1], face[2])
}
lx, ly, lz = face[0], face[1], face[2]
i++
}
if i > 1 {
p.DrawLine(lx, ly, lz, fx, fy, fz)
}
}
return nil
}
func (p *Pinhole) SavePNG(path string, width, height int, opts *ImageOptions) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
return png.Encode(file, p.Image(width, height, opts))
}
// projectPoint projects a 3d point cartesian point to 2d screen coords.
// Origin is the center
// X is left/right
// Y is down/up
// Z is near/far, the 0 position is focal distance away from lens.
func projectPoint(
x, y, z float64, // 3d point to project
w, h, f float64, // width, height, focal
scale float64, // scale
) (px, py float64) { // projected point
x, y, z = x*scale*f, y*scale*f, z*scale*f
zz := z + f
if zz == 0 {
zz = math.SmallestNonzeroFloat64
}
px = x*(f/zz) + w/2
py = y*(f/zz) - h/2
py *= -1
return
}
func lineWidthAtZ(z float64, f float64) float64 {
return ((z*-1 + 1) / 2) * f * 0.04
}
func lineAngle(x1, y1, x2, y2 float64) float64 {
return math.Atan2(y1-y2, x1-x2)
}
func destination(x, y, angle, distance float64) (dx, dy float64) {
dx = x + math.Cos(angle)*distance
dy = y + math.Sin(angle)*distance
return
}
// https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/3drota.htm
func rotate(x, y, z float64, q float64, which int) (dx, dy, dz float64) {
switch which {
case 0: // x
dy = y*math.Cos(q) - z*math.Sin(q)
dz = y*math.Sin(q) + z*math.Cos(q)
dx = x
case 1: // y
dz = z*math.Cos(q) - x*math.Sin(q)
dx = z*math.Sin(q) + x*math.Cos(q)
dy = y
case 2: // z
dx = x*math.Cos(q) - y*math.Sin(q)
dy = x*math.Sin(q) + y*math.Cos(q)
dz = z
}
return
}
type capItem struct {
point [3]float64
}
func (a *capItem) Less(v btree.Item) bool {
b := v.(*capItem)
for i := 2; i >= 0; i-- {
if a.point[i] < b.point[i] {
return true
}
if a.point[i] > b.point[i] {
return false
}
}
return false
}
// really lazy structure.
type capTree struct {
tr *btree.BTree
}
func newCapTree() *capTree {
return &capTree{
tr: btree.New(9),
}
}
func (tr *capTree) insert(x, y, z float64) bool {
if tr.has(x, y, z) {
return false
}
tr.tr.ReplaceOrInsert(&capItem{point: [3]float64{x, y, z}})
return true
}
func (tr *capTree) has(x, y, z float64) bool {
return tr.tr.Has(&capItem{point: [3]float64{x, y, z}})
}
| Center | identifier_name |
pinhole.go | package pinhole
import (
"fmt"
"image"
"image/color"
"image/png"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"golang.org/x/image/font/gofont/goregular"
"github.com/fogleman/gg"
"github.com/golang/freetype/truetype"
"github.com/google/btree"
)
const circleSteps = 45
var gof = func() *truetype.Font {
gof, err := truetype.Parse(goregular.TTF)
if err != nil {
panic(err)
}
return gof
}()
type line struct {
x1, y1, z1 float64
x2, y2, z2 float64
nocaps bool
color color.Color
str string
scale float64
circle bool
cfirst *line
cprev *line
cnext *line
drawcoords *fourcorners
}
func (l *line) Rect() (min, max [3]float64) {
if l.x1 < l.x2 {
min[0], max[0] = l.x1, l.x2
} else {
min[0], max[0] = l.x2, l.x1
}
if l.y1 < l.y2 {
min[1], max[1] = l.y1, l.y2
} else {
min[1], max[1] = l.y2, l.y1
}
if l.z1 < l.z2 {
min[2], max[2] = l.z1, l.z2
} else {
min[2], max[2] = l.z2, l.z1
}
return
}
func (l *line) Center() []float64 {
min, max := l.Rect()
return []float64{
(max[0] + min[0]) / 2,
(max[1] + min[1]) / 2,
(max[2] + min[2]) / 2,
}
}
type Pinhole struct {
lines []*line
stack []int
}
func New() *Pinhole {
return &Pinhole{}
}
func (p *Pinhole) Begin() {
p.stack = append(p.stack, len(p.lines))
}
func (p *Pinhole) End() {
if len(p.stack) > 0 {
p.stack = p.stack[:len(p.stack)-1]
}
}
func (p *Pinhole) Rotate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
l := p.lines[i]
if x != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, x, 0)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, x, 0)
}
if y != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, y, 1)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, y, 1)
}
if z != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, z, 2)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, z, 2)
}
p.lines[i] = l
}
}
func (p *Pinhole) Translate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 += x
p.lines[i].y1 += y
p.lines[i].z1 += z
p.lines[i].x2 += x
p.lines[i].y2 += y
p.lines[i].z2 += z
}
}
func (p *Pinhole) Scale(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 *= x
p.lines[i].y1 *= y
p.lines[i].z1 *= z
p.lines[i].x2 *= x
p.lines[i].y2 *= y
p.lines[i].z2 *= z
if len(p.lines[i].str) > 0 {
p.lines[i].scale *= math.Min(x, y)
}
}
}
func (p *Pinhole) Colorize(color color.Color) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].color = color
}
}
func (p *Pinhole) Center() {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
minx, miny, minz := math.Inf(+1), math.Inf(+1), math.Inf(+1)
maxx, maxy, maxz := math.Inf(-1), math.Inf(-1), math.Inf(-1)
for ; i < len(p.lines); i++ {
if p.lines[i].x1 < minx {
minx = p.lines[i].x1
}
if p.lines[i].x1 > maxx {
maxx = p.lines[i].x1
}
if p.lines[i].y1 < miny {
miny = p.lines[i].y1
}
if p.lines[i].y1 > maxy {
maxy = p.lines[i].y1
}
if p.lines[i].z1 < minz {
minz = p.lines[i].z1
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
if p.lines[i].x2 < minx {
minx = p.lines[i].x2
}
if p.lines[i].x2 > maxx {
maxx = p.lines[i].x2
}
if p.lines[i].y2 < miny {
miny = p.lines[i].y2
}
if p.lines[i].y2 > maxy {
maxy = p.lines[i].y2
}
if p.lines[i].z2 < minz {
minz = p.lines[i].z2
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
}
x := (maxx + minx) / 2
y := (maxy + miny) / 2
z := (maxz + minz) / 2
p.Translate(-x, -y, -z)
}
func (p *Pinhole) DrawString(x, y, z float64, s string) {
if s != "" {
p.DrawLine(x, y, z, x, y, z)
//p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
p.lines[len(p.lines)-1].str = s
}
}
func (p *Pinhole) DrawRect(minx, miny, maxx, maxy, z float64) {
p.DrawLine(minx, maxy, z, maxx, maxy, z)
p.DrawLine(maxx, maxy, z, maxx, miny, z)
p.DrawLine(maxx, miny, z, minx, miny, z)
p.DrawLine(minx, miny, z, minx, maxy, z)
}
func (p *Pinhole) DrawCube(minx, miny, minz, maxx, maxy, maxz float64) {
p.DrawLine(minx, maxy, minz, maxx, maxy, minz)
p.DrawLine(maxx, maxy, minz, maxx, miny, minz)
p.DrawLine(maxx, miny, minz, minx, miny, minz)
p.DrawLine(minx, miny, minz, minx, maxy, minz)
p.DrawLine(minx, maxy, maxz, maxx, maxy, maxz)
p.DrawLine(maxx, maxy, maxz, maxx, miny, maxz)
p.DrawLine(maxx, miny, maxz, minx, miny, maxz)
p.DrawLine(minx, miny, maxz, minx, maxy, maxz)
p.DrawLine(minx, maxy, minz, minx, maxy, maxz)
p.DrawLine(maxx, maxy, minz, maxx, maxy, maxz)
p.DrawLine(maxx, miny, minz, maxx, miny, maxz)
p.DrawLine(minx, miny, minz, minx, miny, maxz)
}
func (p *Pinhole) DrawDot(x, y, z float64, radius float64) {
p.DrawLine(x, y, z, x, y, z)
p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
}
func (p *Pinhole) DrawLine(x1, y1, z1, x2, y2, z2 float64) {
l := &line{
x1: x1, y1: y1, z1: z1,
x2: x2, y2: y2, z2: z2,
color: color.Black,
scale: 1,
}
p.lines = append(p.lines, l)
}
func (p *Pinhole) DrawCircle(x, y, z float64, radius float64) {
var fx, fy, fz float64
var lx, ly, lz float64
var first, prev *line
// we go one beyond the steps because we need to join at the end
for i := float64(0); i <= circleSteps; i++ {
var dx, dy, dz float64
dx, dy = destination(x, y, (math.Pi*2)/circleSteps*i, radius)
dz = z
if i > 0 {
if i == circleSteps {
p.DrawLine(lx, ly, lz, fx, fy, fz)
} else {
p.DrawLine(lx, ly, lz, dx, dy, dz)
}
line := p.lines[len(p.lines)-1]
line.nocaps = true
line.circle = true
if first == nil {
first = line
}
line.cfirst = first
line.cprev = prev
if prev != nil {
prev.cnext = line
}
prev = line
} else {
fx, fy, fz = dx, dy, dz
}
lx, ly, lz = dx, dy, dz
}
}
type ImageOptions struct {
BGColor color.Color
LineWidth float64
Scale float64
}
var DefaultImageOptions = &ImageOptions{
BGColor: color.White,
LineWidth: 1,
Scale: 1,
}
type byDistance []*line
func (a byDistance) Len() int {
return len(a)
}
func (a byDistance) Less(i, j int) bool {
imin, imax := a[i].Rect()
jmin, jmax := a[j].Rect()
for i := 2; i >= 0; i-- {
if imax[i] > jmax[i] {
return i == 2
}
if imax[i] < jmax[i] {
return i != 2
}
if imin[i] > jmin[i] {
return i == 2
}
if imin[i] < jmin[i] {
return i != 2
}
}
return false
}
func (a byDistance) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (p *Pinhole) Image(width, height int, opts *ImageOptions) *image.RGBA {
if opts == nil {
opts = DefaultImageOptions
}
sort.Sort(byDistance(p.lines))
for _, line := range p.lines {
line.drawcoords = nil
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := gg.NewContextForRGBA(img)
if opts.BGColor != nil {
c.SetColor(opts.BGColor)
c.DrawRectangle(0, 0, float64(width), float64(height))
c.Fill()
}
capsMap := make(map[color.Color]*capTree)
var ccolor color.Color
var caps *capTree
fwidth, fheight := float64(width), float64(height)
focal := math.Min(fwidth, fheight) / 2
maybeDraw := func(line *line) *fourcorners {
x1, y1, z1 := line.x1, line.y1, line.z1
x2, y2, z2 := line.x2, line.y2, line.z2
px1, py1 := projectPoint(x1, y1, z1, fwidth, fheight, focal, opts.Scale)
px2, py2 := projectPoint(x2, y2, z2, fwidth, fheight, focal, opts.Scale)
if !onscreen(fwidth, fheight, px1, py1, px2, py2) && !line.circle && line.str == "" {
return nil
}
t1 := lineWidthAtZ(z1, focal) * opts.LineWidth * line.scale
t2 := lineWidthAtZ(z2, focal) * opts.LineWidth * line.scale
if line.str != "" {
sz := 10 * t1
c.SetFontFace(truetype.NewFace(gof, &truetype.Options{Size: sz}))
w, h := c.MeasureString(line.str)
c.DrawString(line.str, px1-w/2, py1+h*.4)
return nil
}
var cap1, cap2 bool
if !line.nocaps {
cap1 = caps.insert(x1, y1, z1)
cap2 = caps.insert(x2, y2, z2)
}
return drawUnbalancedLineSegment(c,
px1, py1, px2, py2,
t1, t2,
cap1, cap2,
line.circle,
)
}
for _, line := range p.lines {
if line.color != ccolor {
ccolor = line.color
caps = capsMap[ccolor]
if caps == nil {
caps = newCapTree()
capsMap[ccolor] = caps
}
c.SetColor(ccolor)
}
if line.circle {
if line.drawcoords == nil {
// need to process the coords for all segments belonging to
// the current circle segment.
// first get the basic estimates
var coords []*fourcorners
seg := line.cfirst
for seg != nil {
seg.drawcoords = maybeDraw(seg)
if seg.drawcoords == nil {
panic("nil!")
}
coords = append(coords, seg.drawcoords)
seg = seg.cnext
}
// next reprocess to join the midpoints
for i := 0; i < len(coords); i++ {
var line1, line2 *fourcorners
if i == 0 {
line1 = coords[len(coords)-1]
} else {
line1 = coords[i-1]
}
line2 = coords[i]
midx1 := (line2.x1 + line1.x4) / 2
midy1 := (line2.y1 + line1.y4) / 2
midx2 := (line2.x2 + line1.x3) / 2
midy2 := (line2.y2 + line1.y3) / 2
line2.x1 = midx1
line2.y1 = midy1
line1.x4 = midx1
line1.y4 = midy1
line2.x2 = midx2
line2.y2 = midy2
line1.x3 = midx2
line1.y3 = midy2
}
}
// draw the cached coords
c.MoveTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x2-math.SmallestNonzeroFloat64, line.drawcoords.y2-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x3+math.SmallestNonzeroFloat64, line.drawcoords.y3+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x4+math.SmallestNonzeroFloat64, line.drawcoords.y4+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.ClosePath()
} else {
maybeDraw(line)
}
c.Fill()
}
return img
}
type fourcorners struct {
x1, y1, x2, y2, x3, y3, x4, y4 float64
}
func drawUnbalancedLineSegment(c *gg.Context,
x1, y1, x2, y2 float64,
t1, t2 float64,
cap1, cap2 bool,
circleSegment bool,
) *fourcorners {
if x1 == x2 && y1 == y2 {
c.DrawCircle(x1, y1, t1/2)
return nil
}
a := lineAngle(x1, y1, x2, y2)
dx1, dy1 := destination(x1, y1, a-math.Pi/2, t1/2)
dx2, dy2 := destination(x1, y1, a+math.Pi/2, t1/2)
dx3, dy3 := destination(x2, y2, a+math.Pi/2, t2/2)
dx4, dy4 := destination(x2, y2, a-math.Pi/2, t2/2)
if circleSegment {
return &fourcorners{dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4}
}
const cubicCorner = 1.0 / 3 * 2 //0.552284749831
if cap1 && t1 < 2 {
cap1 = false
}
if cap2 && t2 < 2 {
cap2 = false
}
c.MoveTo(dx1, dy1)
if cap1 {
ax1, ay1 := destination(dx1, dy1, a-math.Pi*2, t1*cubicCorner)
ax2, ay2 := destination(dx2, dy2, a-math.Pi*2, t1*cubicCorner) | c.LineTo(dx2, dy2)
}
c.LineTo(dx3, dy3)
if cap2 {
ax1, ay1 := destination(dx3, dy3, a-math.Pi*2, -t2*cubicCorner)
ax2, ay2 := destination(dx4, dy4, a-math.Pi*2, -t2*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx4, dy4)
} else {
c.LineTo(dx4, dy4)
}
c.LineTo(dx1, dy1)
c.ClosePath()
return nil
}
func onscreen(w, h float64, x1, y1, x2, y2 float64) bool {
amin := [2]float64{0, 0}
amax := [2]float64{w, h}
var bmin [2]float64
var bmax [2]float64
if x1 < x2 {
bmin[0], bmax[0] = x1, x2
} else {
bmin[0], bmax[0] = x2, x1
}
if y1 < y2 {
bmin[1], bmax[1] = y1, y2
} else {
bmin[1], bmax[1] = y2, y1
}
for i := 0; i < len(amin); i++ {
if !(bmin[i] <= amax[i] && bmax[i] >= amin[i]) {
return false
}
}
return true
}
func (p *Pinhole) LoadObj(r io.Reader) error {
var faces [][][3]float64
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var verts [][3]float64
for ln, line := range strings.Split(string(data), "\n") {
for {
nline := strings.Replace(line, " ", " ", -1)
if len(nline) < len(line) {
line = nline
continue
}
break
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "v ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
verts = append(verts, [3]float64{})
for j := 0; j < 3; j++ {
if verts[len(verts)-1][j], err = strconv.ParseFloat(parts[j], 64); err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
}
}
} else if strings.HasPrefix(line, "f ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
faces = append(faces, [][3]float64{})
for _, part := range parts {
part = strings.Split(part, "/")[0]
idx, err := strconv.ParseUint(part, 10, 64)
if err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
if int(idx) > len(verts) {
return fmt.Errorf("line %d: invalid vert index: %d", ln+1, idx)
}
faces[len(faces)-1] = append(faces[len(faces)-1], verts[idx-1])
}
}
}
}
for _, faces := range faces {
var fx, fy, fz float64
var lx, ly, lz float64
var i int
for _, face := range faces {
if i == 0 {
fx, fy, fz = face[0], face[1], face[2]
} else {
p.DrawLine(lx, ly, lz, face[0], face[1], face[2])
}
lx, ly, lz = face[0], face[1], face[2]
i++
}
if i > 1 {
p.DrawLine(lx, ly, lz, fx, fy, fz)
}
}
return nil
}
func (p *Pinhole) SavePNG(path string, width, height int, opts *ImageOptions) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
return png.Encode(file, p.Image(width, height, opts))
}
// projectPoint projects a 3d point cartesian point to 2d screen coords.
// Origin is the center
// X is left/right
// Y is down/up
// Z is near/far, the 0 position is focal distance away from lens.
func projectPoint(
x, y, z float64, // 3d point to project
w, h, f float64, // width, height, focal
scale float64, // scale
) (px, py float64) { // projected point
x, y, z = x*scale*f, y*scale*f, z*scale*f
zz := z + f
if zz == 0 {
zz = math.SmallestNonzeroFloat64
}
px = x*(f/zz) + w/2
py = y*(f/zz) - h/2
py *= -1
return
}
func lineWidthAtZ(z float64, f float64) float64 {
return ((z*-1 + 1) / 2) * f * 0.04
}
func lineAngle(x1, y1, x2, y2 float64) float64 {
return math.Atan2(y1-y2, x1-x2)
}
func destination(x, y, angle, distance float64) (dx, dy float64) {
dx = x + math.Cos(angle)*distance
dy = y + math.Sin(angle)*distance
return
}
// https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/3drota.htm
func rotate(x, y, z float64, q float64, which int) (dx, dy, dz float64) {
switch which {
case 0: // x
dy = y*math.Cos(q) - z*math.Sin(q)
dz = y*math.Sin(q) + z*math.Cos(q)
dx = x
case 1: // y
dz = z*math.Cos(q) - x*math.Sin(q)
dx = z*math.Sin(q) + x*math.Cos(q)
dy = y
case 2: // z
dx = x*math.Cos(q) - y*math.Sin(q)
dy = x*math.Sin(q) + y*math.Cos(q)
dz = z
}
return
}
type capItem struct {
point [3]float64
}
func (a *capItem) Less(v btree.Item) bool {
b := v.(*capItem)
for i := 2; i >= 0; i-- {
if a.point[i] < b.point[i] {
return true
}
if a.point[i] > b.point[i] {
return false
}
}
return false
}
// really lazy structure.
type capTree struct {
tr *btree.BTree
}
func newCapTree() *capTree {
return &capTree{
tr: btree.New(9),
}
}
func (tr *capTree) insert(x, y, z float64) bool {
if tr.has(x, y, z) {
return false
}
tr.tr.ReplaceOrInsert(&capItem{point: [3]float64{x, y, z}})
return true
}
func (tr *capTree) has(x, y, z float64) bool {
return tr.tr.Has(&capItem{point: [3]float64{x, y, z}})
} | c.CubicTo(ax1, ay1, ax2, ay2, dx2, dy2)
} else { | random_line_split |
pinhole.go | package pinhole
import (
"fmt"
"image"
"image/color"
"image/png"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"golang.org/x/image/font/gofont/goregular"
"github.com/fogleman/gg"
"github.com/golang/freetype/truetype"
"github.com/google/btree"
)
const circleSteps = 45
var gof = func() *truetype.Font {
gof, err := truetype.Parse(goregular.TTF)
if err != nil {
panic(err)
}
return gof
}()
type line struct {
x1, y1, z1 float64
x2, y2, z2 float64
nocaps bool
color color.Color
str string
scale float64
circle bool
cfirst *line
cprev *line
cnext *line
drawcoords *fourcorners
}
func (l *line) Rect() (min, max [3]float64) {
if l.x1 < l.x2 {
min[0], max[0] = l.x1, l.x2
} else {
min[0], max[0] = l.x2, l.x1
}
if l.y1 < l.y2 {
min[1], max[1] = l.y1, l.y2
} else {
min[1], max[1] = l.y2, l.y1
}
if l.z1 < l.z2 {
min[2], max[2] = l.z1, l.z2
} else {
min[2], max[2] = l.z2, l.z1
}
return
}
func (l *line) Center() []float64 |
type Pinhole struct {
lines []*line
stack []int
}
func New() *Pinhole {
return &Pinhole{}
}
func (p *Pinhole) Begin() {
p.stack = append(p.stack, len(p.lines))
}
func (p *Pinhole) End() {
if len(p.stack) > 0 {
p.stack = p.stack[:len(p.stack)-1]
}
}
func (p *Pinhole) Rotate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
l := p.lines[i]
if x != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, x, 0)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, x, 0)
}
if y != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, y, 1)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, y, 1)
}
if z != 0 {
l.x1, l.y1, l.z1 = rotate(l.x1, l.y1, l.z1, z, 2)
l.x2, l.y2, l.z2 = rotate(l.x2, l.y2, l.z2, z, 2)
}
p.lines[i] = l
}
}
func (p *Pinhole) Translate(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 += x
p.lines[i].y1 += y
p.lines[i].z1 += z
p.lines[i].x2 += x
p.lines[i].y2 += y
p.lines[i].z2 += z
}
}
func (p *Pinhole) Scale(x, y, z float64) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].x1 *= x
p.lines[i].y1 *= y
p.lines[i].z1 *= z
p.lines[i].x2 *= x
p.lines[i].y2 *= y
p.lines[i].z2 *= z
if len(p.lines[i].str) > 0 {
p.lines[i].scale *= math.Min(x, y)
}
}
}
func (p *Pinhole) Colorize(color color.Color) {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
for ; i < len(p.lines); i++ {
p.lines[i].color = color
}
}
func (p *Pinhole) Center() {
var i int
if len(p.stack) > 0 {
i = p.stack[len(p.stack)-1]
}
minx, miny, minz := math.Inf(+1), math.Inf(+1), math.Inf(+1)
maxx, maxy, maxz := math.Inf(-1), math.Inf(-1), math.Inf(-1)
for ; i < len(p.lines); i++ {
if p.lines[i].x1 < minx {
minx = p.lines[i].x1
}
if p.lines[i].x1 > maxx {
maxx = p.lines[i].x1
}
if p.lines[i].y1 < miny {
miny = p.lines[i].y1
}
if p.lines[i].y1 > maxy {
maxy = p.lines[i].y1
}
if p.lines[i].z1 < minz {
minz = p.lines[i].z1
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
if p.lines[i].x2 < minx {
minx = p.lines[i].x2
}
if p.lines[i].x2 > maxx {
maxx = p.lines[i].x2
}
if p.lines[i].y2 < miny {
miny = p.lines[i].y2
}
if p.lines[i].y2 > maxy {
maxy = p.lines[i].y2
}
if p.lines[i].z2 < minz {
minz = p.lines[i].z2
}
if p.lines[i].z2 > maxz {
maxz = p.lines[i].z2
}
}
x := (maxx + minx) / 2
y := (maxy + miny) / 2
z := (maxz + minz) / 2
p.Translate(-x, -y, -z)
}
func (p *Pinhole) DrawString(x, y, z float64, s string) {
if s != "" {
p.DrawLine(x, y, z, x, y, z)
//p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
p.lines[len(p.lines)-1].str = s
}
}
func (p *Pinhole) DrawRect(minx, miny, maxx, maxy, z float64) {
p.DrawLine(minx, maxy, z, maxx, maxy, z)
p.DrawLine(maxx, maxy, z, maxx, miny, z)
p.DrawLine(maxx, miny, z, minx, miny, z)
p.DrawLine(minx, miny, z, minx, maxy, z)
}
func (p *Pinhole) DrawCube(minx, miny, minz, maxx, maxy, maxz float64) {
p.DrawLine(minx, maxy, minz, maxx, maxy, minz)
p.DrawLine(maxx, maxy, minz, maxx, miny, minz)
p.DrawLine(maxx, miny, minz, minx, miny, minz)
p.DrawLine(minx, miny, minz, minx, maxy, minz)
p.DrawLine(minx, maxy, maxz, maxx, maxy, maxz)
p.DrawLine(maxx, maxy, maxz, maxx, miny, maxz)
p.DrawLine(maxx, miny, maxz, minx, miny, maxz)
p.DrawLine(minx, miny, maxz, minx, maxy, maxz)
p.DrawLine(minx, maxy, minz, minx, maxy, maxz)
p.DrawLine(maxx, maxy, minz, maxx, maxy, maxz)
p.DrawLine(maxx, miny, minz, maxx, miny, maxz)
p.DrawLine(minx, miny, minz, minx, miny, maxz)
}
func (p *Pinhole) DrawDot(x, y, z float64, radius float64) {
p.DrawLine(x, y, z, x, y, z)
p.lines[len(p.lines)-1].scale = 10 / 0.1 * radius
}
func (p *Pinhole) DrawLine(x1, y1, z1, x2, y2, z2 float64) {
l := &line{
x1: x1, y1: y1, z1: z1,
x2: x2, y2: y2, z2: z2,
color: color.Black,
scale: 1,
}
p.lines = append(p.lines, l)
}
func (p *Pinhole) DrawCircle(x, y, z float64, radius float64) {
var fx, fy, fz float64
var lx, ly, lz float64
var first, prev *line
// we go one beyond the steps because we need to join at the end
for i := float64(0); i <= circleSteps; i++ {
var dx, dy, dz float64
dx, dy = destination(x, y, (math.Pi*2)/circleSteps*i, radius)
dz = z
if i > 0 {
if i == circleSteps {
p.DrawLine(lx, ly, lz, fx, fy, fz)
} else {
p.DrawLine(lx, ly, lz, dx, dy, dz)
}
line := p.lines[len(p.lines)-1]
line.nocaps = true
line.circle = true
if first == nil {
first = line
}
line.cfirst = first
line.cprev = prev
if prev != nil {
prev.cnext = line
}
prev = line
} else {
fx, fy, fz = dx, dy, dz
}
lx, ly, lz = dx, dy, dz
}
}
type ImageOptions struct {
BGColor color.Color
LineWidth float64
Scale float64
}
var DefaultImageOptions = &ImageOptions{
BGColor: color.White,
LineWidth: 1,
Scale: 1,
}
type byDistance []*line
func (a byDistance) Len() int {
return len(a)
}
func (a byDistance) Less(i, j int) bool {
imin, imax := a[i].Rect()
jmin, jmax := a[j].Rect()
for i := 2; i >= 0; i-- {
if imax[i] > jmax[i] {
return i == 2
}
if imax[i] < jmax[i] {
return i != 2
}
if imin[i] > jmin[i] {
return i == 2
}
if imin[i] < jmin[i] {
return i != 2
}
}
return false
}
func (a byDistance) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (p *Pinhole) Image(width, height int, opts *ImageOptions) *image.RGBA {
if opts == nil {
opts = DefaultImageOptions
}
sort.Sort(byDistance(p.lines))
for _, line := range p.lines {
line.drawcoords = nil
}
img := image.NewRGBA(image.Rect(0, 0, width, height))
c := gg.NewContextForRGBA(img)
if opts.BGColor != nil {
c.SetColor(opts.BGColor)
c.DrawRectangle(0, 0, float64(width), float64(height))
c.Fill()
}
capsMap := make(map[color.Color]*capTree)
var ccolor color.Color
var caps *capTree
fwidth, fheight := float64(width), float64(height)
focal := math.Min(fwidth, fheight) / 2
maybeDraw := func(line *line) *fourcorners {
x1, y1, z1 := line.x1, line.y1, line.z1
x2, y2, z2 := line.x2, line.y2, line.z2
px1, py1 := projectPoint(x1, y1, z1, fwidth, fheight, focal, opts.Scale)
px2, py2 := projectPoint(x2, y2, z2, fwidth, fheight, focal, opts.Scale)
if !onscreen(fwidth, fheight, px1, py1, px2, py2) && !line.circle && line.str == "" {
return nil
}
t1 := lineWidthAtZ(z1, focal) * opts.LineWidth * line.scale
t2 := lineWidthAtZ(z2, focal) * opts.LineWidth * line.scale
if line.str != "" {
sz := 10 * t1
c.SetFontFace(truetype.NewFace(gof, &truetype.Options{Size: sz}))
w, h := c.MeasureString(line.str)
c.DrawString(line.str, px1-w/2, py1+h*.4)
return nil
}
var cap1, cap2 bool
if !line.nocaps {
cap1 = caps.insert(x1, y1, z1)
cap2 = caps.insert(x2, y2, z2)
}
return drawUnbalancedLineSegment(c,
px1, py1, px2, py2,
t1, t2,
cap1, cap2,
line.circle,
)
}
for _, line := range p.lines {
if line.color != ccolor {
ccolor = line.color
caps = capsMap[ccolor]
if caps == nil {
caps = newCapTree()
capsMap[ccolor] = caps
}
c.SetColor(ccolor)
}
if line.circle {
if line.drawcoords == nil {
// need to process the coords for all segments belonging to
// the current circle segment.
// first get the basic estimates
var coords []*fourcorners
seg := line.cfirst
for seg != nil {
seg.drawcoords = maybeDraw(seg)
if seg.drawcoords == nil {
panic("nil!")
}
coords = append(coords, seg.drawcoords)
seg = seg.cnext
}
// next reprocess to join the midpoints
for i := 0; i < len(coords); i++ {
var line1, line2 *fourcorners
if i == 0 {
line1 = coords[len(coords)-1]
} else {
line1 = coords[i-1]
}
line2 = coords[i]
midx1 := (line2.x1 + line1.x4) / 2
midy1 := (line2.y1 + line1.y4) / 2
midx2 := (line2.x2 + line1.x3) / 2
midy2 := (line2.y2 + line1.y3) / 2
line2.x1 = midx1
line2.y1 = midy1
line1.x4 = midx1
line1.y4 = midy1
line2.x2 = midx2
line2.y2 = midy2
line1.x3 = midx2
line1.y3 = midy2
}
}
// draw the cached coords
c.MoveTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x2-math.SmallestNonzeroFloat64, line.drawcoords.y2-math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x3+math.SmallestNonzeroFloat64, line.drawcoords.y3+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x4+math.SmallestNonzeroFloat64, line.drawcoords.y4+math.SmallestNonzeroFloat64)
c.LineTo(line.drawcoords.x1-math.SmallestNonzeroFloat64, line.drawcoords.y1-math.SmallestNonzeroFloat64)
c.ClosePath()
} else {
maybeDraw(line)
}
c.Fill()
}
return img
}
type fourcorners struct {
x1, y1, x2, y2, x3, y3, x4, y4 float64
}
func drawUnbalancedLineSegment(c *gg.Context,
x1, y1, x2, y2 float64,
t1, t2 float64,
cap1, cap2 bool,
circleSegment bool,
) *fourcorners {
if x1 == x2 && y1 == y2 {
c.DrawCircle(x1, y1, t1/2)
return nil
}
a := lineAngle(x1, y1, x2, y2)
dx1, dy1 := destination(x1, y1, a-math.Pi/2, t1/2)
dx2, dy2 := destination(x1, y1, a+math.Pi/2, t1/2)
dx3, dy3 := destination(x2, y2, a+math.Pi/2, t2/2)
dx4, dy4 := destination(x2, y2, a-math.Pi/2, t2/2)
if circleSegment {
return &fourcorners{dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4}
}
const cubicCorner = 1.0 / 3 * 2 //0.552284749831
if cap1 && t1 < 2 {
cap1 = false
}
if cap2 && t2 < 2 {
cap2 = false
}
c.MoveTo(dx1, dy1)
if cap1 {
ax1, ay1 := destination(dx1, dy1, a-math.Pi*2, t1*cubicCorner)
ax2, ay2 := destination(dx2, dy2, a-math.Pi*2, t1*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx2, dy2)
} else {
c.LineTo(dx2, dy2)
}
c.LineTo(dx3, dy3)
if cap2 {
ax1, ay1 := destination(dx3, dy3, a-math.Pi*2, -t2*cubicCorner)
ax2, ay2 := destination(dx4, dy4, a-math.Pi*2, -t2*cubicCorner)
c.CubicTo(ax1, ay1, ax2, ay2, dx4, dy4)
} else {
c.LineTo(dx4, dy4)
}
c.LineTo(dx1, dy1)
c.ClosePath()
return nil
}
func onscreen(w, h float64, x1, y1, x2, y2 float64) bool {
amin := [2]float64{0, 0}
amax := [2]float64{w, h}
var bmin [2]float64
var bmax [2]float64
if x1 < x2 {
bmin[0], bmax[0] = x1, x2
} else {
bmin[0], bmax[0] = x2, x1
}
if y1 < y2 {
bmin[1], bmax[1] = y1, y2
} else {
bmin[1], bmax[1] = y2, y1
}
for i := 0; i < len(amin); i++ {
if !(bmin[i] <= amax[i] && bmax[i] >= amin[i]) {
return false
}
}
return true
}
func (p *Pinhole) LoadObj(r io.Reader) error {
var faces [][][3]float64
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var verts [][3]float64
for ln, line := range strings.Split(string(data), "\n") {
for {
nline := strings.Replace(line, " ", " ", -1)
if len(nline) < len(line) {
line = nline
continue
}
break
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "v ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
verts = append(verts, [3]float64{})
for j := 0; j < 3; j++ {
if verts[len(verts)-1][j], err = strconv.ParseFloat(parts[j], 64); err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
}
}
} else if strings.HasPrefix(line, "f ") {
parts := strings.Split(line[2:], " ")
if len(parts) >= 3 {
faces = append(faces, [][3]float64{})
for _, part := range parts {
part = strings.Split(part, "/")[0]
idx, err := strconv.ParseUint(part, 10, 64)
if err != nil {
return fmt.Errorf("line %d: %s", ln+1, err.Error())
}
if int(idx) > len(verts) {
return fmt.Errorf("line %d: invalid vert index: %d", ln+1, idx)
}
faces[len(faces)-1] = append(faces[len(faces)-1], verts[idx-1])
}
}
}
}
for _, faces := range faces {
var fx, fy, fz float64
var lx, ly, lz float64
var i int
for _, face := range faces {
if i == 0 {
fx, fy, fz = face[0], face[1], face[2]
} else {
p.DrawLine(lx, ly, lz, face[0], face[1], face[2])
}
lx, ly, lz = face[0], face[1], face[2]
i++
}
if i > 1 {
p.DrawLine(lx, ly, lz, fx, fy, fz)
}
}
return nil
}
func (p *Pinhole) SavePNG(path string, width, height int, opts *ImageOptions) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
return png.Encode(file, p.Image(width, height, opts))
}
// projectPoint projects a 3d point cartesian point to 2d screen coords.
// Origin is the center
// X is left/right
// Y is down/up
// Z is near/far, the 0 position is focal distance away from lens.
func projectPoint(
x, y, z float64, // 3d point to project
w, h, f float64, // width, height, focal
scale float64, // scale
) (px, py float64) { // projected point
x, y, z = x*scale*f, y*scale*f, z*scale*f
zz := z + f
if zz == 0 {
zz = math.SmallestNonzeroFloat64
}
px = x*(f/zz) + w/2
py = y*(f/zz) - h/2
py *= -1
return
}
func lineWidthAtZ(z float64, f float64) float64 {
return ((z*-1 + 1) / 2) * f * 0.04
}
func lineAngle(x1, y1, x2, y2 float64) float64 {
return math.Atan2(y1-y2, x1-x2)
}
func destination(x, y, angle, distance float64) (dx, dy float64) {
dx = x + math.Cos(angle)*distance
dy = y + math.Sin(angle)*distance
return
}
// https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/3drota.htm
func rotate(x, y, z float64, q float64, which int) (dx, dy, dz float64) {
switch which {
case 0: // x
dy = y*math.Cos(q) - z*math.Sin(q)
dz = y*math.Sin(q) + z*math.Cos(q)
dx = x
case 1: // y
dz = z*math.Cos(q) - x*math.Sin(q)
dx = z*math.Sin(q) + x*math.Cos(q)
dy = y
case 2: // z
dx = x*math.Cos(q) - y*math.Sin(q)
dy = x*math.Sin(q) + y*math.Cos(q)
dz = z
}
return
}
type capItem struct {
point [3]float64
}
func (a *capItem) Less(v btree.Item) bool {
b := v.(*capItem)
for i := 2; i >= 0; i-- {
if a.point[i] < b.point[i] {
return true
}
if a.point[i] > b.point[i] {
return false
}
}
return false
}
// really lazy structure.
type capTree struct {
tr *btree.BTree
}
func newCapTree() *capTree {
return &capTree{
tr: btree.New(9),
}
}
func (tr *capTree) insert(x, y, z float64) bool {
if tr.has(x, y, z) {
return false
}
tr.tr.ReplaceOrInsert(&capItem{point: [3]float64{x, y, z}})
return true
}
func (tr *capTree) has(x, y, z float64) bool {
return tr.tr.Has(&capItem{point: [3]float64{x, y, z}})
}
| {
min, max := l.Rect()
return []float64{
(max[0] + min[0]) / 2,
(max[1] + min[1]) / 2,
(max[2] + min[2]) / 2,
}
} | identifier_body |
Tools.py | """
Created on Wed Jun 21 09:50:15 2017
@author: cwvanmierlo
"""
import time;
import sys;
import os;
def resource_path(relative_path):
#This function is needed for PyInstaller, without this function the
#incorrect path will be pulled while compiling the .EXE
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def splitGroups(fileName, errorFile):
f = open(fileName, "r");
lines = f.readlines();
f.close();
f = open(errorFile, "r");
errors = f.readlines();
f.close();
newUsers = [];
existingUsers = [];
for i in range(1, len(lines)):
addToExisting = False;
for j in range(len(errors)):
seperator = " ";
if "\t" in errors[j]:
seperator = "\t";
if (i + 1)==int(errors[j].split(seperator)[0]):
addToExisting = True;
break;
if addToExisting:
existingUsers.append(lines[i]);
else:
newUsers.append(lines[i]);
newFile = fileName[0:fileName.rfind(".")].split("_formatresult")[0];
fNew = open(newFile + "_new" + ".txt", "w");
fNew.write(lines[0]);
for newUser in newUsers:
fNew.write(newUser);
fNew.close();
sortOrder = lines[0];
exVarIndex1 = sortOrder.split(";").index("username");
exVarIndex2 = sortOrder.split(";").index("grouphand");
fExisting = open(newFile + "_existing" + ".txt", "w");
fExisting.write(lines[0].split(";")[exVarIndex1] + ";" + lines[0].split(";")[exVarIndex2] + "\n");
for existingUser in existingUsers:
split = existingUser.split(";");
fExisting.write(split[exVarIndex1] + ";" + split[exVarIndex2] + "\n");
#TODO: (minor) delete last \n from file
fExisting.close();
def getGroups(course_id, fileName, seperator, write):
password = "Geheim12345!";
f = open(fileName, "r", encoding="utf-8");
lines = f.readlines();
f.close();
cleanFileName = fileName[0:fileName.rfind(".")];
sortOrder = [item.lower() for item in lines.pop(0).split("\n")[0].split(seperator)];
result = [];
#TODO: fix formatting errors once and for all
import unicodedata;
for line in lines:
editedLine = u"".join([c for c in unicodedata.normalize('NFKD', line) if not unicodedata.combining(c)]).replace("ł", "l").replace("Ł", "L");
editedLine = editedLine.split("\n")[0];
result.append(editedLine);
if "-" in course_id:
course_id_firstPart = course_id.split("-")[0];
else:
course_id_firstPart = course_id;
groupList = [];
fileLength = len(result);
if write:
f = open(cleanFileName + "_formatresult" + ".txt", "w");
#first line
f.write("firstname;lastname;grouphand;role;status;username;password;email;syncstatus\n");
for i in range(fileLength):
line = result[i].split(seperator);
if True: #TODO add exclusion lists
if not (line[sortOrder.index("groupname")] == ""):
groupName = course_id_firstPart + "-" + line[sortOrder.index("groupname")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
currentUsername = "tudelft_" + line[sortOrder.index("username")];
if not "@" in currentUsername:
currentUsername += "@tudelft.nl";
currentFirstname = line[sortOrder.index("firstname")]
currentLastname = line[sortOrder.index("lastname")]
f.write(currentFirstname + ";" + currentLastname + ";" + groupName + ";" + "group_student,overall_filemanager" +
";" + "active" + ";" + currentUsername +
";" + password + ";" + line[sortOrder.index("email")] + ";" + "notsynced")
if not (i == (fileLength - 1)):
f.write("\n")
f.close();
else:
for i in range(fileLength):
line = result[i].split(seperator);
groupName = line[sortOrder.index("grouphand")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
return groupList[0:len(groupList)];
def errorChecker(fileName):
class possibleError(object):
ordNumber = 0;
ordCharacter = "";
lineOccurence = 0;
colOccurence = 0;
sourceLine = "";
def __init__(self, ordNumber, ordCharacter, lineOccurence, colOccurence, sourceLine):
self.ordNumber = ordNumber;
self.ordCharacter = ordCharacter;
self.lineOccurence = lineOccurence;
self.colOccurence = colOccurence;
self.sourceLine = sourceLine;
def getOrdNumber(self):
return self.ordNumber;
def getOrdCharacter(self):
return self.ordCharacter;
def getLineOccurence(self):
return self.lineOccurence;
def getColOccurence(self):
return self.colOccurence;
def getSourceLine(self):
re |
def __repr__(self):
return "ord:%d\t|\tchr:%s\t|\tline:%d\t|\tcolumn:%d\t\n" % (self.ordNumber, self.ordCharacter, self.lineOccurence, self.colOccurence);
f = open(fileName, "r");
lines = f.readlines();
f.close();
errorArray = []
for i in range(len(lines)):
numberedLine = []
for j in range(len(lines[i])):
numberedLine.append(ord(lines[i][j]));
if (max(numberedLine) > 255):
errorArray.append(possibleError(max(numberedLine), chr(max(numberedLine)), i, numberedLine.index(max(numberedLine)), lines[i]));
errorArray = errorArray[0:];
return errorArray;
def createScorionGroups(subGroupNames, courseId):
from selenium import webdriver;
chrome_path = "chromedriver.exe";
chromedriver = resource_path(chrome_path);
driver = webdriver.Chrome(executable_path = chromedriver);
driver.implicitly_wait(10);
driver.get("https://scorion3.parantion.nl");
def __del__():
driver.quit();
def waitForGroupsLoaded():
while len(driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")) <= 1:
if not checkTree():
openTree();
time.sleep(0.1);
return;
def waitForOptionsLoaded(n_options):
while len(driver.find_elements_by_tag_name("option")) <= n_options:
time.sleep(0.1);
return;
def waitForSubGroupOk(startOpts):
while len(driver.find_elements_by_tag_name("option")) > startOpts:
time.sleep(0.1);
return;
def selectOptionFromString(text):
options = driver.find_elements_by_tag_name("option");
for i in range(len(options)):
if text in options[i].get_attribute("outerText"):
options[i].click();
break;
return;
def goToGroups():
selected = driver.find_elements_by_class_name("selected ")[0]
correctLink = driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-user_group_1']")[-1].find_element_by_xpath(
'..');
if not (selected == correctLink):
correctLink.click();
return;
def inCourseId():
if len(driver.find_elements_by_class_name("jstree-clicked")) == 0:
return False;
return (courseId + " ") in driver.find_elements_by_class_name("jstree-clicked")[0].get_attribute("innerText");
def openTree():
elem = driver.find_elements_by_tag_name("ins")[0];
if not checkTree():
elem.click();
def checkTree():
elem = driver.find_elements_by_tag_name("ins")[0];
checkstr = elem.find_element_by_xpath("..").get_attribute("outerHTML");
return "jstree-open" in checkstr or "jstree-loading" in checkstr;
def selectCourseId():
if inCourseId():
return;
groups = driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")
courseIdExists = False;
for i in range(len(groups)):
if (courseId + " ") in groups[i].find_element_by_xpath('..').get_attribute(
"outerText"):
groups[i].find_element_by_xpath('..').click();
courseIdExists = True;
break;
if not courseIdExists:
createCourse();
waitForGroupsLoaded();
selectCourseId();
return;
def waitForInCourseId():
while not inCourseId():
time.sleep(0.1);
return;
def createSubGroup(subGroupName):
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').send_keys(webdriver.common.keys.Keys.RETURN);
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(subGroupName);
selectOptionFromString("Groep (Handmatig)");
waitForOptionsLoaded(12 + startOpts);
selectOptionFromString(courseId);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
def createCourse():
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').click();
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(courseId);
selectOptionFromString("Cursus (handmatig)");
time.sleep(1);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
driver.find_element_by_id("username").send_keys("tudpeeadmin");
driver.find_element_by_id("password").send_keys("rtgeh678");
driver.find_element_by_id("loginsubmit").click();
########################## go to correct group ################################
goToGroups();
waitForGroupsLoaded();
selectCourseId();
waitForInCourseId();
time.sleep(1);
waitForGroupsLoaded();
############################## create subgroups ################################
for i in range(len(subGroupNames)):
createSubGroup(subGroupNames[i]);
waitForGroupsLoaded();
return driver;
| turn self.sourceLine;
| identifier_body |
Tools.py | """
Created on Wed Jun 21 09:50:15 2017
@author: cwvanmierlo
"""
import time;
import sys;
import os;
def resource_path(relative_path):
#This function is needed for PyInstaller, without this function the
#incorrect path will be pulled while compiling the .EXE
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def splitGroups(fileName, errorFile):
f = open(fileName, "r");
lines = f.readlines();
f.close();
f = open(errorFile, "r");
errors = f.readlines();
f.close();
newUsers = [];
existingUsers = [];
for i in range(1, len(lines)):
addToExisting = False;
for j in range(len(errors)):
seperator = " ";
if "\t" in errors[j]:
seperator = "\t";
if (i + 1)==int(errors[j].split(seperator)[0]):
addToExisting = True;
break;
if addToExisting:
existingUsers.append(lines[i]);
else:
newUsers.append(lines[i]);
newFile = fileName[0:fileName.rfind(".")].split("_formatresult")[0];
fNew = open(newFile + "_new" + ".txt", "w");
fNew.write(lines[0]);
for newUser in newUsers:
fNew.write(newUser);
fNew.close();
sortOrder = lines[0];
exVarIndex1 = sortOrder.split(";").index("username");
exVarIndex2 = sortOrder.split(";").index("grouphand");
fExisting = open(newFile + "_existing" + ".txt", "w");
fExisting.write(lines[0].split(";")[exVarIndex1] + ";" + lines[0].split(";")[exVarIndex2] + "\n");
for existingUser in existingUsers:
split = existingUser.split(";");
fExisting.write(split[exVarIndex1] + ";" + split[exVarIndex2] + "\n");
#TODO: (minor) delete last \n from file
fExisting.close();
def getGroups(course_id, fileName, seperator, write):
password = "Geheim12345!";
f = open(fileName, "r", encoding="utf-8");
lines = f.readlines();
f.close();
cleanFileName = fileName[0:fileName.rfind(".")];
sortOrder = [item.lower() for item in lines.pop(0).split("\n")[0].split(seperator)];
result = [];
#TODO: fix formatting errors once and for all
import unicodedata;
for line in lines:
editedLine = u"".join([c for c in unicodedata.normalize('NFKD', line) if not unicodedata.combining(c)]).replace("ł", "l").replace("Ł", "L");
editedLine = editedLine.split("\n")[0];
result.append(editedLine);
if "-" in course_id:
course_id_firstPart = course_id.split("-")[0];
else:
course_id_firstPart = course_id;
groupList = [];
fileLength = len(result);
if write:
f = open(cleanFileName + "_formatresult" + ".txt", "w");
#first line
f.write("firstname;lastname;grouphand;role;status;username;password;email;syncstatus\n");
for i in range(fileLength):
line = result[i].split(seperator);
if True: #TODO add exclusion lists
if not (line[sortOrder.index("groupname")] == ""):
groupName = course_id_firstPart + "-" + line[sortOrder.index("groupname")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
currentUsername = "tudelft_" + line[sortOrder.index("username")];
if not "@" in currentUsername:
currentUsername += "@tudelft.nl";
currentFirstname = line[sortOrder.index("firstname")]
currentLastname = line[sortOrder.index("lastname")]
f.write(currentFirstname + ";" + currentLastname + ";" + groupName + ";" + "group_student,overall_filemanager" +
";" + "active" + ";" + currentUsername +
";" + password + ";" + line[sortOrder.index("email")] + ";" + "notsynced")
if not (i == (fileLength - 1)):
f.write("\n")
f.close();
else:
for i in range(fileLength):
line = result[i].split(seperator);
groupName = line[sortOrder.index("grouphand")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
return groupList[0:len(groupList)];
def errorChecker(fileName):
class possibleError(object):
ordNumber = 0;
ordCharacter = "";
lineOccurence = 0;
colOccurence = 0;
sourceLine = "";
def __init__(self, ordNumber, ordCharacter, lineOccurence, colOccurence, sourceLine):
self.ordNumber = ordNumber;
self.ordCharacter = ordCharacter;
self.lineOccurence = lineOccurence;
self.colOccurence = colOccurence;
self.sourceLine = sourceLine;
def ge | elf):
return self.ordNumber;
def getOrdCharacter(self):
return self.ordCharacter;
def getLineOccurence(self):
return self.lineOccurence;
def getColOccurence(self):
return self.colOccurence;
def getSourceLine(self):
return self.sourceLine;
def __repr__(self):
return "ord:%d\t|\tchr:%s\t|\tline:%d\t|\tcolumn:%d\t\n" % (self.ordNumber, self.ordCharacter, self.lineOccurence, self.colOccurence);
f = open(fileName, "r");
lines = f.readlines();
f.close();
errorArray = []
for i in range(len(lines)):
numberedLine = []
for j in range(len(lines[i])):
numberedLine.append(ord(lines[i][j]));
if (max(numberedLine) > 255):
errorArray.append(possibleError(max(numberedLine), chr(max(numberedLine)), i, numberedLine.index(max(numberedLine)), lines[i]));
errorArray = errorArray[0:];
return errorArray;
def createScorionGroups(subGroupNames, courseId):
from selenium import webdriver;
chrome_path = "chromedriver.exe";
chromedriver = resource_path(chrome_path);
driver = webdriver.Chrome(executable_path = chromedriver);
driver.implicitly_wait(10);
driver.get("https://scorion3.parantion.nl");
def __del__():
driver.quit();
def waitForGroupsLoaded():
while len(driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")) <= 1:
if not checkTree():
openTree();
time.sleep(0.1);
return;
def waitForOptionsLoaded(n_options):
while len(driver.find_elements_by_tag_name("option")) <= n_options:
time.sleep(0.1);
return;
def waitForSubGroupOk(startOpts):
while len(driver.find_elements_by_tag_name("option")) > startOpts:
time.sleep(0.1);
return;
def selectOptionFromString(text):
options = driver.find_elements_by_tag_name("option");
for i in range(len(options)):
if text in options[i].get_attribute("outerText"):
options[i].click();
break;
return;
def goToGroups():
selected = driver.find_elements_by_class_name("selected ")[0]
correctLink = driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-user_group_1']")[-1].find_element_by_xpath(
'..');
if not (selected == correctLink):
correctLink.click();
return;
def inCourseId():
if len(driver.find_elements_by_class_name("jstree-clicked")) == 0:
return False;
return (courseId + " ") in driver.find_elements_by_class_name("jstree-clicked")[0].get_attribute("innerText");
def openTree():
elem = driver.find_elements_by_tag_name("ins")[0];
if not checkTree():
elem.click();
def checkTree():
elem = driver.find_elements_by_tag_name("ins")[0];
checkstr = elem.find_element_by_xpath("..").get_attribute("outerHTML");
return "jstree-open" in checkstr or "jstree-loading" in checkstr;
def selectCourseId():
if inCourseId():
return;
groups = driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")
courseIdExists = False;
for i in range(len(groups)):
if (courseId + " ") in groups[i].find_element_by_xpath('..').get_attribute(
"outerText"):
groups[i].find_element_by_xpath('..').click();
courseIdExists = True;
break;
if not courseIdExists:
createCourse();
waitForGroupsLoaded();
selectCourseId();
return;
def waitForInCourseId():
while not inCourseId():
time.sleep(0.1);
return;
def createSubGroup(subGroupName):
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').send_keys(webdriver.common.keys.Keys.RETURN);
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(subGroupName);
selectOptionFromString("Groep (Handmatig)");
waitForOptionsLoaded(12 + startOpts);
selectOptionFromString(courseId);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
def createCourse():
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').click();
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(courseId);
selectOptionFromString("Cursus (handmatig)");
time.sleep(1);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
driver.find_element_by_id("username").send_keys("tudpeeadmin");
driver.find_element_by_id("password").send_keys("rtgeh678");
driver.find_element_by_id("loginsubmit").click();
########################## go to correct group ################################
goToGroups();
waitForGroupsLoaded();
selectCourseId();
waitForInCourseId();
time.sleep(1);
waitForGroupsLoaded();
############################## create subgroups ################################
for i in range(len(subGroupNames)):
createSubGroup(subGroupNames[i]);
waitForGroupsLoaded();
return driver;
| tOrdNumber(s | identifier_name |
Tools.py | """
Created on Wed Jun 21 09:50:15 2017
@author: cwvanmierlo
"""
import time;
import sys;
import os;
def resource_path(relative_path):
#This function is needed for PyInstaller, without this function the
#incorrect path will be pulled while compiling the .EXE
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def splitGroups(fileName, errorFile):
f = open(fileName, "r");
lines = f.readlines();
f.close();
f = open(errorFile, "r");
errors = f.readlines();
f.close();
newUsers = [];
existingUsers = [];
for i in range(1, len(lines)):
addToExisting = False;
for j in range(len(errors)):
seperator = " ";
if "\t" in errors[j]:
seperator = "\t";
if (i + 1)==int(errors[j].split(seperator)[0]):
addToExisting = True;
break;
if addToExisting:
existingUsers.append(lines[i]);
else:
newUsers.append(lines[i]);
newFile = fileName[0:fileName.rfind(".")].split("_formatresult")[0];
fNew = open(newFile + "_new" + ".txt", "w");
fNew.write(lines[0]);
for newUser in newUsers:
|
fNew.close();
sortOrder = lines[0];
exVarIndex1 = sortOrder.split(";").index("username");
exVarIndex2 = sortOrder.split(";").index("grouphand");
fExisting = open(newFile + "_existing" + ".txt", "w");
fExisting.write(lines[0].split(";")[exVarIndex1] + ";" + lines[0].split(";")[exVarIndex2] + "\n");
for existingUser in existingUsers:
split = existingUser.split(";");
fExisting.write(split[exVarIndex1] + ";" + split[exVarIndex2] + "\n");
#TODO: (minor) delete last \n from file
fExisting.close();
def getGroups(course_id, fileName, seperator, write):
password = "Geheim12345!";
f = open(fileName, "r", encoding="utf-8");
lines = f.readlines();
f.close();
cleanFileName = fileName[0:fileName.rfind(".")];
sortOrder = [item.lower() for item in lines.pop(0).split("\n")[0].split(seperator)];
result = [];
#TODO: fix formatting errors once and for all
import unicodedata;
for line in lines:
editedLine = u"".join([c for c in unicodedata.normalize('NFKD', line) if not unicodedata.combining(c)]).replace("ł", "l").replace("Ł", "L");
editedLine = editedLine.split("\n")[0];
result.append(editedLine);
if "-" in course_id:
course_id_firstPart = course_id.split("-")[0];
else:
course_id_firstPart = course_id;
groupList = [];
fileLength = len(result);
if write:
f = open(cleanFileName + "_formatresult" + ".txt", "w");
#first line
f.write("firstname;lastname;grouphand;role;status;username;password;email;syncstatus\n");
for i in range(fileLength):
line = result[i].split(seperator);
if True: #TODO add exclusion lists
if not (line[sortOrder.index("groupname")] == ""):
groupName = course_id_firstPart + "-" + line[sortOrder.index("groupname")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
currentUsername = "tudelft_" + line[sortOrder.index("username")];
if not "@" in currentUsername:
currentUsername += "@tudelft.nl";
currentFirstname = line[sortOrder.index("firstname")]
currentLastname = line[sortOrder.index("lastname")]
f.write(currentFirstname + ";" + currentLastname + ";" + groupName + ";" + "group_student,overall_filemanager" +
";" + "active" + ";" + currentUsername +
";" + password + ";" + line[sortOrder.index("email")] + ";" + "notsynced")
if not (i == (fileLength - 1)):
f.write("\n")
f.close();
else:
for i in range(fileLength):
line = result[i].split(seperator);
groupName = line[sortOrder.index("grouphand")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
return groupList[0:len(groupList)];
def errorChecker(fileName):
class possibleError(object):
ordNumber = 0;
ordCharacter = "";
lineOccurence = 0;
colOccurence = 0;
sourceLine = "";
def __init__(self, ordNumber, ordCharacter, lineOccurence, colOccurence, sourceLine):
self.ordNumber = ordNumber;
self.ordCharacter = ordCharacter;
self.lineOccurence = lineOccurence;
self.colOccurence = colOccurence;
self.sourceLine = sourceLine;
def getOrdNumber(self):
return self.ordNumber;
def getOrdCharacter(self):
return self.ordCharacter;
def getLineOccurence(self):
return self.lineOccurence;
def getColOccurence(self):
return self.colOccurence;
def getSourceLine(self):
return self.sourceLine;
def __repr__(self):
return "ord:%d\t|\tchr:%s\t|\tline:%d\t|\tcolumn:%d\t\n" % (self.ordNumber, self.ordCharacter, self.lineOccurence, self.colOccurence);
f = open(fileName, "r");
lines = f.readlines();
f.close();
errorArray = []
for i in range(len(lines)):
numberedLine = []
for j in range(len(lines[i])):
numberedLine.append(ord(lines[i][j]));
if (max(numberedLine) > 255):
errorArray.append(possibleError(max(numberedLine), chr(max(numberedLine)), i, numberedLine.index(max(numberedLine)), lines[i]));
errorArray = errorArray[0:];
return errorArray;
def createScorionGroups(subGroupNames, courseId):
from selenium import webdriver;
chrome_path = "chromedriver.exe";
chromedriver = resource_path(chrome_path);
driver = webdriver.Chrome(executable_path = chromedriver);
driver.implicitly_wait(10);
driver.get("https://scorion3.parantion.nl");
def __del__():
driver.quit();
def waitForGroupsLoaded():
while len(driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")) <= 1:
if not checkTree():
openTree();
time.sleep(0.1);
return;
def waitForOptionsLoaded(n_options):
while len(driver.find_elements_by_tag_name("option")) <= n_options:
time.sleep(0.1);
return;
def waitForSubGroupOk(startOpts):
while len(driver.find_elements_by_tag_name("option")) > startOpts:
time.sleep(0.1);
return;
def selectOptionFromString(text):
options = driver.find_elements_by_tag_name("option");
for i in range(len(options)):
if text in options[i].get_attribute("outerText"):
options[i].click();
break;
return;
def goToGroups():
selected = driver.find_elements_by_class_name("selected ")[0]
correctLink = driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-user_group_1']")[-1].find_element_by_xpath(
'..');
if not (selected == correctLink):
correctLink.click();
return;
def inCourseId():
if len(driver.find_elements_by_class_name("jstree-clicked")) == 0:
return False;
return (courseId + " ") in driver.find_elements_by_class_name("jstree-clicked")[0].get_attribute("innerText");
def openTree():
elem = driver.find_elements_by_tag_name("ins")[0];
if not checkTree():
elem.click();
def checkTree():
elem = driver.find_elements_by_tag_name("ins")[0];
checkstr = elem.find_element_by_xpath("..").get_attribute("outerHTML");
return "jstree-open" in checkstr or "jstree-loading" in checkstr;
def selectCourseId():
if inCourseId():
return;
groups = driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")
courseIdExists = False;
for i in range(len(groups)):
if (courseId + " ") in groups[i].find_element_by_xpath('..').get_attribute(
"outerText"):
groups[i].find_element_by_xpath('..').click();
courseIdExists = True;
break;
if not courseIdExists:
createCourse();
waitForGroupsLoaded();
selectCourseId();
return;
def waitForInCourseId():
while not inCourseId():
time.sleep(0.1);
return;
def createSubGroup(subGroupName):
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').send_keys(webdriver.common.keys.Keys.RETURN);
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(subGroupName);
selectOptionFromString("Groep (Handmatig)");
waitForOptionsLoaded(12 + startOpts);
selectOptionFromString(courseId);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
def createCourse():
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').click();
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(courseId);
selectOptionFromString("Cursus (handmatig)");
time.sleep(1);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
driver.find_element_by_id("username").send_keys("tudpeeadmin");
driver.find_element_by_id("password").send_keys("rtgeh678");
driver.find_element_by_id("loginsubmit").click();
########################## go to correct group ################################
goToGroups();
waitForGroupsLoaded();
selectCourseId();
waitForInCourseId();
time.sleep(1);
waitForGroupsLoaded();
############################## create subgroups ################################
for i in range(len(subGroupNames)):
createSubGroup(subGroupNames[i]);
waitForGroupsLoaded();
return driver;
| fNew.write(newUser); | conditional_block |
Tools.py | """
Created on Wed Jun 21 09:50:15 2017
@author: cwvanmierlo
"""
import time;
import sys;
import os;
def resource_path(relative_path):
#This function is needed for PyInstaller, without this function the
#incorrect path will be pulled while compiling the .EXE
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def splitGroups(fileName, errorFile):
f = open(fileName, "r");
lines = f.readlines();
f.close();
f = open(errorFile, "r");
errors = f.readlines();
f.close();
newUsers = [];
existingUsers = [];
for i in range(1, len(lines)):
addToExisting = False;
for j in range(len(errors)):
seperator = " ";
if "\t" in errors[j]:
seperator = "\t";
if (i + 1)==int(errors[j].split(seperator)[0]):
addToExisting = True;
break;
if addToExisting:
existingUsers.append(lines[i]);
else:
newUsers.append(lines[i]);
newFile = fileName[0:fileName.rfind(".")].split("_formatresult")[0];
fNew = open(newFile + "_new" + ".txt", "w");
fNew.write(lines[0]);
for newUser in newUsers:
fNew.write(newUser);
fNew.close();
sortOrder = lines[0];
exVarIndex1 = sortOrder.split(";").index("username");
exVarIndex2 = sortOrder.split(";").index("grouphand");
fExisting = open(newFile + "_existing" + ".txt", "w");
fExisting.write(lines[0].split(";")[exVarIndex1] + ";" + lines[0].split(";")[exVarIndex2] + "\n");
for existingUser in existingUsers:
split = existingUser.split(";");
fExisting.write(split[exVarIndex1] + ";" + split[exVarIndex2] + "\n");
#TODO: (minor) delete last \n from file
fExisting.close();
def getGroups(course_id, fileName, seperator, write):
password = "Geheim12345!";
f = open(fileName, "r", encoding="utf-8");
lines = f.readlines();
f.close();
cleanFileName = fileName[0:fileName.rfind(".")];
sortOrder = [item.lower() for item in lines.pop(0).split("\n")[0].split(seperator)];
result = [];
#TODO: fix formatting errors once and for all
import unicodedata;
for line in lines:
editedLine = u"".join([c for c in unicodedata.normalize('NFKD', line) if not unicodedata.combining(c)]).replace("ł", "l").replace("Ł", "L");
editedLine = editedLine.split("\n")[0];
result.append(editedLine);
if "-" in course_id:
course_id_firstPart = course_id.split("-")[0];
else:
course_id_firstPart = course_id;
groupList = [];
fileLength = len(result);
if write:
f = open(cleanFileName + "_formatresult" + ".txt", "w");
#first line
f.write("firstname;lastname;grouphand;role;status;username;password;email;syncstatus\n");
for i in range(fileLength):
line = result[i].split(seperator);
if True: #TODO add exclusion lists
if not (line[sortOrder.index("groupname")] == ""):
groupName = course_id_firstPart + "-" + line[sortOrder.index("groupname")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
currentUsername = "tudelft_" + line[sortOrder.index("username")];
if not "@" in currentUsername:
currentUsername += "@tudelft.nl";
currentFirstname = line[sortOrder.index("firstname")]
currentLastname = line[sortOrder.index("lastname")]
f.write(currentFirstname + ";" + currentLastname + ";" + groupName + ";" + "group_student,overall_filemanager" +
";" + "active" + ";" + currentUsername +
";" + password + ";" + line[sortOrder.index("email")] + ";" + "notsynced")
if not (i == (fileLength - 1)):
f.write("\n")
f.close();
else:
for i in range(fileLength):
line = result[i].split(seperator);
groupName = line[sortOrder.index("grouphand")];
groupName = groupName.replace(" ", "-");
if not (groupName in groupList):
groupList.append(groupName);
return groupList[0:len(groupList)];
def errorChecker(fileName):
class possibleError(object):
ordNumber = 0;
ordCharacter = "";
lineOccurence = 0;
colOccurence = 0;
sourceLine = "";
def __init__(self, ordNumber, ordCharacter, lineOccurence, colOccurence, sourceLine):
self.ordNumber = ordNumber;
self.ordCharacter = ordCharacter;
self.lineOccurence = lineOccurence;
self.colOccurence = colOccurence;
self.sourceLine = sourceLine;
def getOrdNumber(self):
return self.ordNumber;
def getOrdCharacter(self):
return self.ordCharacter;
def getLineOccurence(self):
return self.lineOccurence;
def getColOccurence(self):
return self.colOccurence;
def getSourceLine(self):
return self.sourceLine;
def __repr__(self):
return "ord:%d\t|\tchr:%s\t|\tline:%d\t|\tcolumn:%d\t\n" % (self.ordNumber, self.ordCharacter, self.lineOccurence, self.colOccurence);
f = open(fileName, "r");
lines = f.readlines();
f.close();
errorArray = []
for i in range(len(lines)):
numberedLine = []
for j in range(len(lines[i])):
numberedLine.append(ord(lines[i][j]));
if (max(numberedLine) > 255):
errorArray.append(possibleError(max(numberedLine), chr(max(numberedLine)), i, numberedLine.index(max(numberedLine)), lines[i]));
errorArray = errorArray[0:];
return errorArray;
def createScorionGroups(subGroupNames, courseId):
from selenium import webdriver;
chrome_path = "chromedriver.exe";
chromedriver = resource_path(chrome_path);
driver = webdriver.Chrome(executable_path = chromedriver);
driver.implicitly_wait(10);
driver.get("https://scorion3.parantion.nl");
def __del__():
driver.quit();
def waitForGroupsLoaded():
while len(driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")) <= 1:
if not checkTree():
openTree();
time.sleep(0.1);
return;
def waitForOptionsLoaded(n_options):
while len(driver.find_elements_by_tag_name("option")) <= n_options:
time.sleep(0.1);
return;
def waitForSubGroupOk(startOpts):
while len(driver.find_elements_by_tag_name("option")) > startOpts:
time.sleep(0.1);
return;
def selectOptionFromString(text):
options = driver.find_elements_by_tag_name("option");
for i in range(len(options)):
if text in options[i].get_attribute("outerText"):
options[i].click();
break;
return;
def goToGroups():
selected = driver.find_elements_by_class_name("selected ")[0]
correctLink = driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-user_group_1']")[-1].find_element_by_xpath(
'..');
if not (selected == correctLink):
correctLink.click();
return;
def inCourseId():
if len(driver.find_elements_by_class_name("jstree-clicked")) == 0:
return False;
return (courseId + " ") in driver.find_elements_by_class_name("jstree-clicked")[0].get_attribute("innerText");
def openTree():
elem = driver.find_elements_by_tag_name("ins")[0];
if not checkTree():
elem.click();
def checkTree():
elem = driver.find_elements_by_tag_name("ins")[0];
checkstr = elem.find_element_by_xpath("..").get_attribute("outerHTML");
return "jstree-open" in checkstr or "jstree-loading" in checkstr;
def selectCourseId():
if inCourseId():
return;
groups = driver.find_elements_by_xpath(
"//*[@class='jstree-icon glyphicon glyphicon-group']")
courseIdExists = False;
for i in range(len(groups)):
if (courseId + " ") in groups[i].find_element_by_xpath('..').get_attribute(
"outerText"):
groups[i].find_element_by_xpath('..').click();
courseIdExists = True;
break;
if not courseIdExists:
createCourse();
waitForGroupsLoaded();
selectCourseId();
return;
def waitForInCourseId():
while not inCourseId():
time.sleep(0.1);
return;
def createSubGroup(subGroupName):
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').send_keys(webdriver.common.keys.Keys.RETURN);
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(subGroupName);
selectOptionFromString("Groep (Handmatig)");
waitForOptionsLoaded(12 + startOpts);
selectOptionFromString(courseId);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
def createCourse():
startOpts = len(driver.find_elements_by_tag_name("option"));
driver.find_elements_by_xpath(
"//*[@class='glyphicon glyphicon-add_user_group_1']")[-1].find_element_by_xpath(
'..').click();
waitForOptionsLoaded(startOpts);
driver.find_element_by_id("label").send_keys(courseId);
selectOptionFromString("Cursus (handmatig)");
time.sleep(1);
driver.find_element_by_id("PopupAjaxOkButton1").click();
waitForSubGroupOk(startOpts);
driver.find_element_by_id("username").send_keys("tudpeeadmin");
driver.find_element_by_id("password").send_keys("rtgeh678");
driver.find_element_by_id("loginsubmit").click();
########################## go to correct group ################################
goToGroups();
waitForGroupsLoaded();
selectCourseId(); |
time.sleep(1);
waitForGroupsLoaded();
############################## create subgroups ################################
for i in range(len(subGroupNames)):
createSubGroup(subGroupNames[i]);
waitForGroupsLoaded();
return driver; |
waitForInCourseId(); | random_line_split |
traits.rs | /// One of the great discoveries in programming is that it’s possible to write code that operates on
/// values of many different types, even types that haven’t been invented yet.
///
/// It’s called "polymorphism".
///
/// # Traits and Generics
///
/// Rust supports polymorphism with two related features: traits and generics. These concepts will
/// be familiar to many programmers, but Rust takes a fresh approach inspired by Haskell’s
/// typeclasses.
///
/// Generics and traits are closely related. For example, you can write a function to compare two
/// values and find the smaller one. The function signature would looke like this:
///
/// fn min<T: Ord>(value1: T, value2: T) -> T
///
/// This function works with any type T that implements the Ord trait.
///
/// # Using Traits
///
/// A trait is a feature that any given type may or may not support. Most often, a trait represents
/// a "capability": something a type can do.
///
/// A value that implements std::io::Write can write out bytes.
///
/// A value that implements std::iter::Iterator can produce a sequence of values.
///
/// A value that implements std::clone::Clone can make clones of itself in memory.
///
/// A value that implements std::fmt::Debug can be printed using println!() with the
/// {:?} format specifier.
///
/// There is one unusual rule about trait methods: the trait itself must be in scope. Otherwise,
/// all its methods are hidden.
///
/// Rust has this rule because, as we’ll see later in this chapter, you can use traits to add new
/// methods to any type—even standard library types like u32 and str. Third-party crates can do the
/// same thing. Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of 'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
| fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
#[test]
fn operator_overloading() {
let rect = Rectangle {
width: 10,
height: 20,
};
let rect2 = rect * 10;
assert_eq!(100, rect2.width);
assert_eq!(200, rect2.height);
}
#[test]
fn test_combine_vecs() {
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5];
let mut v3 = combine_vecs(v1, v2);
assert_eq!(Some(1), v3.next());
assert_eq!(Some(2), v3.next());
assert_eq!(Some(3), v3.next());
assert_eq!(Some(4), v3.next());
assert_eq!(Some(5), v3.next());
assert_eq!(None, v3.next());
}
#[test]
fn test_make_adder() {
let add_one = make_adder(1);
assert_eq!(2, add_one(1));
}
#[test]
fn test_screen() {
let mut screen = Screen::default();
screen
.add_component(Box::new(Button {
width: 50,
height: 10,
label: String::from("OK"),
}))
.add_component(Box::new(SelectBox {
width: 75,
height: 10,
options: vec![
String::from("Yes"),
String::from("No"),
String::from("Maybe"),
],
}));
screen.run();
}
#[test]
fn test_post() {
let mut post = Post::default();
post.add_text("I ate a salad for lunch today");
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.reject();
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.approve();
assert_eq!("I ate a salad for lunch today", post.content());
}
} | pub struct Cow {}
impl Animal for Cow { | random_line_split |
traits.rs | /// One of the great discoveries in programming is that it’s possible to write code that operates on
/// values of many different types, even types that haven’t been invented yet.
///
/// It’s called "polymorphism".
///
/// # Traits and Generics
///
/// Rust supports polymorphism with two related features: traits and generics. These concepts will
/// be familiar to many programmers, but Rust takes a fresh approach inspired by Haskell’s
/// typeclasses.
///
/// Generics and traits are closely related. For example, you can write a function to compare two
/// values and find the smaller one. The function signature would looke like this:
///
/// fn min<T: Ord>(value1: T, value2: T) -> T
///
/// This function works with any type T that implements the Ord trait.
///
/// # Using Traits
///
/// A trait is a feature that any given type may or may not support. Most often, a trait represents
/// a "capability": something a type can do.
///
/// A value that implements std::io::Write can write out bytes.
///
/// A value that implements std::iter::Iterator can produce a sequence of values.
///
/// A value that implements std::clone::Clone can make clones of itself in memory.
///
/// A value that implements std::fmt::Debug can be printed using println!() with the
/// {:?} format specifier.
///
/// There is one unusual rule about trait methods: the trait itself must be in scope. Otherwise,
/// all its methods are hidden.
///
/// Rust has this rule because, as we’ll see later in this chapter, you can use traits to add new
/// methods to any type—even standard library types like u32 and str. Third-party crates can do the
/// same thing. Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of 'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) | f.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
#[test]
fn operator_overloading() {
let rect = Rectangle {
width: 10,
height: 20,
};
let rect2 = rect * 10;
assert_eq!(100, rect2.width);
assert_eq!(200, rect2.height);
}
#[test]
fn test_combine_vecs() {
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5];
let mut v3 = combine_vecs(v1, v2);
assert_eq!(Some(1), v3.next());
assert_eq!(Some(2), v3.next());
assert_eq!(Some(3), v3.next());
assert_eq!(Some(4), v3.next());
assert_eq!(Some(5), v3.next());
assert_eq!(None, v3.next());
}
#[test]
fn test_make_adder() {
let add_one = make_adder(1);
assert_eq!(2, add_one(1));
}
#[test]
fn test_screen() {
let mut screen = Screen::default();
screen
.add_component(Box::new(Button {
width: 50,
height: 10,
label: String::from("OK"),
}))
.add_component(Box::new(SelectBox {
width: 75,
height: 10,
options: vec![
String::from("Yes"),
String::from("No"),
String::from("Maybe"),
],
}));
screen.run();
}
#[test]
fn test_post() {
let mut post = Post::default();
post.add_text("I ate a salad for lunch today");
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.reject();
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.approve();
assert_eq!("I ate a salad for lunch today", post.content());
}
}
| = sel | identifier_name |
traits.rs | /// One of the great discoveries in programming is that it’s possible to write code that operates on
/// values of many different types, even types that haven’t been invented yet.
///
/// It’s called "polymorphism".
///
/// # Traits and Generics
///
/// Rust supports polymorphism with two related features: traits and generics. These concepts will
/// be familiar to many programmers, but Rust takes a fresh approach inspired by Haskell’s
/// typeclasses.
///
/// Generics and traits are closely related. For example, you can write a function to compare two
/// values and find the smaller one. The function signature would looke like this:
///
/// fn min<T: Ord>(value1: T, value2: T) -> T
///
/// This function works with any type T that implements the Ord trait.
///
/// # Using Traits
///
/// A trait is a feature that any given type may or may not support. Most often, a trait represents
/// a "capability": something a type can do.
///
/// A value that implements std::io::Write can write out bytes.
///
/// A value that implements std::iter::Iterator can produce a sequence of values.
///
/// A value that implements std::clone::Clone can make clones of itself in memory.
///
/// A value that implements std::fmt::Debug can be printed using println!() with the
/// {:?} format specifier.
///
/// There is one unusual rule about trait methods: the trait itself must be in scope. Otherwise,
/// all its methods are hidden.
///
/// Rust has this rule because, as we’ll see later in this chapter, you can use traits to add new
/// methods to any type—even standard library types like u32 and str. Third-party crates can do the
/// same thing. Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rus | ent any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of 'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
}
}
struct Published {}
impl State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
#[test]
fn operator_overloading() {
let rect = Rectangle {
width: 10,
height: 20,
};
let rect2 = rect * 10;
assert_eq!(100, rect2.width);
assert_eq!(200, rect2.height);
}
#[test]
fn test_combine_vecs() {
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5];
let mut v3 = combine_vecs(v1, v2);
assert_eq!(Some(1), v3.next());
assert_eq!(Some(2), v3.next());
assert_eq!(Some(3), v3.next());
assert_eq!(Some(4), v3.next());
assert_eq!(Some(5), v3.next());
assert_eq!(None, v3.next());
}
#[test]
fn test_make_adder() {
let add_one = make_adder(1);
assert_eq!(2, add_one(1));
}
#[test]
fn test_screen() {
let mut screen = Screen::default();
screen
.add_component(Box::new(Button {
width: 50,
height: 10,
label: String::from("OK"),
}))
.add_component(Box::new(SelectBox {
width: 75,
height: 10,
options: vec![
String::from("Yes"),
String::from("No"),
String::from("Maybe"),
],
}));
screen.run();
}
#[test]
fn test_post() {
let mut post = Post::default();
post.add_text("I ate a salad for lunch today");
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.reject();
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.approve();
assert_eq!("I ate a salad for lunch today", post.content());
}
}
| t lets you implem | conditional_block |
traits.rs | /// One of the great discoveries in programming is that it’s possible to write code that operates on
/// values of many different types, even types that haven’t been invented yet.
///
/// It’s called "polymorphism".
///
/// # Traits and Generics
///
/// Rust supports polymorphism with two related features: traits and generics. These concepts will
/// be familiar to many programmers, but Rust takes a fresh approach inspired by Haskell’s
/// typeclasses.
///
/// Generics and traits are closely related. For example, you can write a function to compare two
/// values and find the smaller one. The function signature would looke like this:
///
/// fn min<T: Ord>(value1: T, value2: T) -> T
///
/// This function works with any type T that implements the Ord trait.
///
/// # Using Traits
///
/// A trait is a feature that any given type may or may not support. Most often, a trait represents
/// a "capability": something a type can do.
///
/// A value that implements std::io::Write can write out bytes.
///
/// A value that implements std::iter::Iterator can produce a sequence of values.
///
/// A value that implements std::clone::Clone can make clones of itself in memory.
///
/// A value that implements std::fmt::Debug can be printed using println!() with the
/// {:?} format specifier.
///
/// There is one unusual rule about trait methods: the trait itself must be in scope. Otherwise,
/// all its methods are hidden.
///
/// Rust has this rule because, as we’ll see later in this chapter, you can use traits to add new
/// methods to any type—even standard library types like u32 and str. Third-party crates can do the
/// same thing. Clearly, this could lead to naming conflicts! But since Rust makes you import the
/// traits you plan to use, crates are free to take advantage of this superpower, and conflicts are
/// rare in practice.
///
/// The reason Clone and Iterator methods work without any special imports is that they’re always
/// in scope by default: they’re part of the standard prelude, names that Rust automatically
/// imports into every module. In fact, the prelude is mostly a carefully chosen selection of
/// traits.
///
///
/// ## when to use which (trait objects vs generic functions)
///
/// * dynamic dispatch: trait objects
/// * static dispatch: generic functions with trait bounds
///
/// How to understand "trait object"? Trait objects are very similar to how Java does dynamic
/// dispatch, ie "polymorphism". In Java, you can have references that point to various subtypes of
/// an interface. When you call methods on the reference, depending on the concrete subtype, a
/// different implemention may get invoked. That's called "dynamic dispatch". Trait objects are
/// equivalent to those references in Java and you can use "trait objects" to do dynamic dispatch.
///
/// Both features are based on traits. They have a lot in common but there are subtle differences.
///
/// 1. Trait objects are the right choice when you need a collection of values of mixed types, all together.
///
/// trait Vegetable {...}
///
/// struct Salad<V: Vegetable> {
/// veggies: Vec<V>
/// }
///
/// This works but each such salad consists entirely of a single type of vegetable.
///
/// struct Salad {
/// veggies: Vec<Vegetable> // error: `Vegetable` does not have
/// // a constant size
/// }
///
///
/// struct Salad {
/// veggies: Vec<Box<Vegetable>>
/// }
///
/// This code works because each Box<Vegetable> can own any type of vegetable, but the box itself
/// has a constant size—two pointers—suitable for storing in a vector.
///
/// 2. Another possible reason to use trait objects is to reduce the total amount of compiled code.
/// Rust may have to compile a generic function many times, once for each type it’s used with.
/// This could make the binary large, a phenomenon called code bloat in C++ circles.
///
/// ### when to use generic functions
///
/// Generics have two important advantages over trait objects, with the result that in Rust,
/// generics are the more common choice.
///
/// 1. The first advantage is speed. Each time the Rust compiler generates machine code for a
/// generic function, it knows which types it’s working with, so it knows at that time which
/// write method to call. This is called "static dispatch", in contrast to "dynamic dispatch".
///
/// Compare that to the behavior with trait objects. Rust never knows what type of value a trait
/// object points to until run time.
///
/// 2. The second advantage of generics is that not every trait can support trait objects. Traits
/// support several features, such as static methods, that work only with generics: they rule out
/// trait objects entirely.
///
/// You can only make "object-safe traits" into trait objects. Some complex rules govern all the
/// properties that make a trait object safe, but in practice, only two rules are relevant. A
/// trait is object safe if all the methods defined in the trait have the following properties:
///
/// * The return type isn’t Self.
/// * There are no generic type parameters.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::io;
use std::io::Write;
use std::ops::Mul;
pub fn run() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
};
println!("1 new tweet: {}", tweet.summarize());
let article = NewsArticle {
headline: String::from("Make America Great Again"),
location: String::from("Washington DC"),
author: String::from("Trump"),
content: String::from("Make America Great Again"),
};
println!("1 news article: {}", article.summarize3());
notify(tweet);
notify2(article);
}
pub trait Summary {
fn summarize(&self) -> String;
// trait can have methods with default implementation
// this can be overridden by types that implement this trait
fn summarize2(&self) -> String {
String::from("(Read more...)")
}
// Default implementations can call other methods in the same trait, even if those other
// methods don’t have a default implementation. In this way, a trait can provide a lot of
// useful functionality and only require implementors to specify a small part of it.
// This is the "template pattern". The template itself is implemented in the trait while
// various hooks are implemented by the types themselves.
fn summarize3(&self) -> String {
format!("(Read more from {}...)", self.summarize_author())
}
fn summarize_author(&self) -> String;
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!("{}, by {} ({})", self.headline, self.author, self.location)
}
fn summarize_author(&self) -> String {
format!("by {}", self.author)
}
}
impl NewsArticle {
// You can't define this function in the "impl Summary for NewsArticle" block
// because it's not a function of the NewsArticle trait!
pub fn get_headline(&self) -> &String {
&self.headline
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
fn summarize_author(&self) -> String {
format!("@{}", self.username)
}
}
// traits as parameters
// this function can be called with any type that implements Summary
pub fn notify(item: impl Summary) {
println!("Breaking news! {}", item.summarize());
}
// "trait bound"
// this is equivalent to the function above, which is actually syntax sugar
pub fn notify2<T: Summary>(item: T) {
println!("Breaking news! {}", item.summarize());
}
pub trait Display {
fn show(&self) -> String;
}
// specify multiple traits using +
pub fn notify3<T: Summary + Display>(item: T) {
println!("Breaking news! {}", item.summarize());
println!("Show me the item: {}", item.show());
}
// "trait bound" using "where" clause between return type and open curly brace
// this is easier to read when you have many trait bounds
pub fn some_function<T, U>(_t: T, _u: U) -> i32
where
T: Display + Clone,
U: Clone + Summary,
{
99
}
// returning types that implement traits
pub fn returns_summarizable() -> impl Summary {
Tweet {
username: String::from("horse_ebooks"),
content: String::from("of course, as you probably already know, people"),
reply: false,
retweet: false,
}
}
// This is a plain function that takes a "trait object".
pub fn say_hello(out: &mut dyn Write) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// In contrast, this is a generic function whose type parameter W is bound by "Write" trait.
pub fn say_hello2<W: Write>(out: &mut W) -> std::io::Result<()> {
out.write_all(b"hello world\n")?;
out.flush()
}
// Find the top occurring elements from a vector.
// This is how to special a type parameter that implements multiple traits.
pub fn top_ten<T>(values: &[T]) -> Vec<&T>
where
T: Debug + Hash + Eq,
{
let mut map = HashMap::new();
for value in values {
let counter = map.entry(value).or_insert(0);
*counter += 1;
}
let mut map_vec: Vec<_> = map.into_iter().collect();
map_vec.sort_by(|a, b| b.1.cmp(&a.1));
map_vec.into_iter().map(|a| a.0).take(10).collect()
}
pub trait Mapper {}
pub trait Reducer {}
pub trait Serialize {}
pub struct DataSet {}
// Generic functions can have multiple type parameters: M and R.
pub fn run_query<M: Mapper + Serialize, R: Reducer + Serialize>(
_data: &DataSet,
_map: M,
_reduce: R,
) {
unimplemented!()
}
// Alternative syntax: bounds can be specified in the where clause
pub fn run_query2<M, R>(_data: &DataSet, _map: M, _reduce: R)
where
M: Mapper + Serialize,
R: Reducer + Serialize,
{
unimplemented!()
}
pub trait MeasureDistance {}
// A generic function can have both lifetime parameters and type parameters. Lifetime parameters
// come first.
pub fn nearest<'t, 'c, P>(_target: &'t P, _candidates: &'c [P]) -> &'c P
where
P: MeasureDistance,
{
unimplemented!()
}
/// This is a generic function. It works with parameters that implement the "Ord" trait.
/// The compiler generates custom machine code for each type T that you actually use.
pub fn min<T: Ord>(m: T, n: T) -> T {
if m < n {
m
} else {
n
}
}
/// Rust lets you implement any trait on any type, as long as either the trait or the type is
/// introduced in the current crate. This means that any time you want to add a method to any type,
/// you can use a trait to do it. This is called an "extension trait".
pub trait IsEmoji {
fn is_emoji(&self) -> bool;
}
impl IsEmoji for char {
fn is_emoji(&self) -> bool {
unimplemented!()
}
}
/// We said earlier that when you implement a trait, either the trait or the type must be new in
/// the current crate. This is called the "coherence rule". It helps Rust ensure that trait
/// implementations are unique. Your code can’t "impl Write for u8", because both Write and u8 are
/// defined in the standard library. If Rust let crates do that, there could be multiple
/// implementations of Write for u8, in different crates, and Rust would have no reasonable way to
/// decide which implementation to use for a given method call.
///You can even use a generic impl block to add an extension trait to a whole family of types at once.
pub struct HtmlDocument {}
pub trait WriteHtml {
fn write_html(&mut self, html: &HtmlDocument) -> io::Result<()>;
}
/// You can write HTML to any std::io writer.
impl<W: Write> WriteHtml for W {
fn write_html(&mut self, _html: &HtmlDocument) -> io::Result<()> {
unimplemented!()
}
}
/// Self in traits
///
/// A trait can use the keyword Self as a type. It represents the trait itself.
pub trait MyClone {
fn clone(&self) -> Self;
}
/// Subtraits: we can define a trait is an extension of another trait
/// This means that every type that implements Creature must also implement the Display trait.
pub trait Creature: Display {
fn position(&self) -> (i32, i32);
}
// impl Display for Broom {}
// impl Creature for Broom {}
pub trait Animal {
// Instance methods
fn name(&self) -> &'static str;
fn noise(&self) -> &'static str;
// Traits can provide default implementation.
fn talk(&self) {
println!("{} says {}", self.name(), self.noise());
}
}
pub struct Sheep {
naked: bool,
name: &'static str,
}
impl Sheep {
fn is_naked(&self) -> bool {
self.naked
}
pub fn shear(&mut self) {
if self.is_naked() {
// You can call the trait method "name()" here because Sheep implements
// the Animal trait.
println!("{} is already naked...", self.name());
} else {
println!("{} gets a haircut!", self.name);
self.naked = true;
}
}
}
impl Animal for Sheep {
fn name(&self) -> &'static str {
self.name
}
fn noise(&self) -> &'static str {
if self.is_naked() {
"baaaaaa?"
} else {
"baaaaaa!"
}
}
// Default implementation can be overridden.
fn talk(&self) {
println!("{} pauses briefly... {}", self.name(), self.noise());
}
}
/// The compiler is capable of providing basic implementations for some traits via
/// the #[derive] attribute. The following is a list of derivable traits:
///
/// * Comparison traits: Eq, PartialEq, Ord, PartialOrd.
/// * Clone: to create T from &T via a copy.
/// * Copy: to give a type 'copy semantics' instead of 'move semantics'.
/// * Hash: to compute a hash from &T.
/// * Default: to create an empty instance of a data type.
/// * Debug: to format a value using the {:?} formatter.
/// Returning Traits with "dyn"
///
/// https://doc.rust-lang.org/edition-guide/rust-2018/trait-system/dyn-trait-for-trait-objects.html
///
/// The Rust compiler needs to know how much space every function's return type requires. This
/// means all your functions have to return a concrete type. Unlike other languages, if you have a
/// trait like Animal, you can't write a function that returns Animal, because its different
/// implementations will need different amounts of memory.
///
/// However, there's an easy workaround. Instead of returning a trait object directly, our
/// functions return a Box which contains some Animal. A box is just a reference to some memory in
/// the heap. Because a reference has a statically-known size, and the compiler can guarantee it
/// points to a heap-allocated Animal, we can return a trait from our function!
///
/// Rust tries to be as explicit as possible whenever it allocates memory on the heap. So if your
/// function returns a pointer-to-trait-on-heap in this way, you need to write the return type with
/// the dyn keyword, e.g. Box<dyn Animal>.
pub struct Cow {}
impl Animal for Cow {
fn name(&self) -> &'static str {
"Dave"
}
fn noise(&self) -> &'static str {
"Moo"
}
}
pub fn random_animal(random_number: f64) -> Box<dyn Animal> {
if random_number < 0.5 {
Box::new(Sheep {
name: "Bob",
naked: true,
})
} else {
Box::new(Cow {})
}
}
/// Operator overloading
/// https://doc.rust-lang.org/core/ops/
/// In Rust, many of the operators can be overloaded via traits. That is, some operators can be used to accomplish different tasks based on their input arguments. This is possible because operators are syntactic sugar for method calls. For example, the + operator in a + b calls the add method (as in a.add(b)). This add method is part of the Add trait. Hence, the + operator can be used by any implementor of the Add trait.
pub struct Rectangle {
width: u32,
height: u32,
}
impl Mul<u32> for Rectangle {
type Output = Self;
fn mul(self, times: u32) -> Self::Output {
Rectangle {
width: self.width * times,
height: self.height * times,
}
}
}
/// impl Trait
/// If your function returns a type that implements MyTrait, you can write its return
/// type as -> impl MyTrait. This can help simplify your type signatures quite a lot!
pub fn combine_vecs(v: Vec<i32>, u: Vec<i32>) -> impl Iterator<Item = i32> {
// You could also write the following which is a lot more complicated.
// -> std::iter::Chain<std::vec::IntoIter<i32>, std::vec::IntoIter<i32>> {
v.into_iter().chain(u.into_iter())
}
/// More importantly, some Rust types can't be written out. For example, every
/// closure has its own unnamed concrete type. Before impl Trait syntax, you had
/// to allocate on the heap in order to return a closure. But now you can do it
/// all statically, like this:
pub fn make_adder(y: i32) -> impl Fn(i32) -> i32 {
move |x: i32| x + y
}
/// Polymorphism via trait objects
pub trait Draw {
fn draw(&self);
}
pub struct Screen {
pub components: Vec<Box<dyn Draw>>,
}
impl Default for Screen {
fn default() -> Self {
Screen { components: vec![] }
}
}
impl Screen {
pub fn run(&self) {
for component in self.components.iter() {
component.draw();
}
}
pub fn add_component(&mut self, draw: Box<dyn Draw>) -> &mut Self {
self.components.push(draw);
self
}
}
#[derive(Debug)]
pub struct Button {
pub width: u32,
pub height: u32,
pub label: String,
}
impl Draw for Button {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
#[derive(Debug)]
struct SelectBox {
width: u32,
height: u32,
options: Vec<String>,
}
impl Draw for SelectBox {
fn draw(&self) {
println!("Drawing a {:?}", self);
}
}
/// Implement the "state pattern" via trait objects.
trait State {
// "self: Box<Self>" means that the method is only valid when called on a Box holding the type.
fn request_review(self: Box<Self>) -> Box<dyn State>;
fn approve(self: Box<Self>) -> Box<dyn State>;
fn reject(self: Box<Self>) -> Box<dyn State>;
fn content<'a>(&self, _post: &'a Post) -> &'a str {
""
}
}
struct Draft {}
/// Now we can start seeing the advantages of the state pattern: the request_review method on
/// Post is the same no matter its state value. Each state is responsible for its own rules.
impl State for Draft {
fn request_review(self: Box<Self>) -> Box<dyn State> {
Box::new(PendingReview {})
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
}
struct PendingReview {}
impl State for PendingReview {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
Box::new(Published {})
}
fn reject(self: Box<Self>) -> Box<dyn State> {
Box::new(Draft {})
| State for Published {
fn request_review(self: Box<Self>) -> Box<dyn State> {
self
}
fn approve(self: Box<Self>) -> Box<dyn State> {
self
}
fn reject(self: Box<Self>) -> Box<dyn State> {
self
}
fn content<'a>(&self, post: &'a Post) -> &'a str {
post.content.as_ref()
}
}
pub struct Post {
/// To consume the old state, the request_review method needs to take ownership of the state
/// value. This is where the Option in the state field of Post comes in: we call the take
/// method to take the Some value out of the state field and leave a None in its place,
/// because Rust doesn’t let us have unpopulated fields in structs. This lets us move the
/// state value out of Post rather than borrowing it. Then we’ll set the post’s state value
/// to the result of this operation.
state: Option<Box<dyn State>>,
content: String,
}
/// Post knows nothing about the various behaviors. It replies on various State objects to do
/// their jobs.
impl Default for Post {
fn default() -> Self {
Self {
state: Some(Box::new(Draft {})),
content: String::new(),
}
}
}
impl Post {
// This behavior doesn’t depend on the state the post is in, so it’s not part of the state
// pattern. The add_text method doesn’t interact with the state field at all, but it is part
// of the behavior we want to support.
pub fn add_text(&mut self, text: &str) {
self.content.push_str(text);
}
pub fn content(&self) -> &str {
match &self.state {
Some(s) => s.content(self),
None => "",
}
}
pub fn request_review(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.request_review());
}
}
pub fn reject(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.reject());
}
}
pub fn approve(&mut self) {
if let Some(s) = self.state.take() {
self.state = Some(s.approve());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_min() {
assert_eq!(min(3, 5), 3);
assert_eq!(min(30, 5), 5);
}
#[test]
fn traits_need_to_be_in_scope() {
// The Write trait needs to be in scope. Otherwise, all its methods (such as "write_all")
// are hidden.
use std::io::Write;
let mut buf: Vec<u8> = vec![];
buf.write_all(b"hello").unwrap();
assert_eq!(5, buf.len());
// Note that only Vec<u8> implements the Write trait. So the code below doesn't work!
// let mut buf: Vec<String> = vec![];
// buf.write_all("hello world").unwrap();
// assert_eq!(11, buf.len());
}
#[test]
fn trait_objects() {
let mut buf: Vec<u8> = vec![];
// Rust doesn’t permit variables of type Write!
// This line doesn't compile because a variable's size has to be known at compile time and
// types that implement Write can be any size.
// let writer: Write = buf;
// A reference to a trait type, like writer, is a called a "trait object". Like any other
// reference, a trait object points to some value, it has a lifetime, and it can be either
// mut or shared. The size of a reference is fixed!
let _writer: &mut dyn Write = &mut buf;
// What makes a trait object different is that Rust usually doesn’t know the type of the
// referent at compile time. So a trait object includes a little extra information about
// the referent’s type. This is strictly for Rust’s own use behind the scenes: when you
// call writer.write(data), Rust needs the type information to dynamically call the right
// write method depending on the type of *writer. You can’t query the type information
// directly, and Rust does not support downcasting from the trait object &mut Write back to
// a concrete type like Vec<u8>. In other words, you can only work with the "generic type"
// of the trait itself.
//
// In memory, a trait object is a "fat pointer" consisting of two pointers:
// 1. data pointer: a pointer to the value, plus
// 2. vtable pointer: a pointer to a table representing that value's type.
// (Vec<u8> in this example)
//
// A vtable is essentially a struct of function pointers, pointing to the concrete piece of
// machine code for each method in the implementation. A method call like
// trait_object.method() will retrieve the correct pointer out of the vtable and then do a
// dynamic call of it.
// Rust automatically converts ordinary references into trait objects when needed. Let's
// say "say_hello" is a function that takes a "&mut Write", this works:
//
// let mut local_file: File = File::create("hello.txt")?;
// say_hello(&mut local_file)?; // Rust converts "&mut File" to "&mut Write"
// This kind of conversion is the only way to create a trait object. What the computer is
// actually doing here is very simple. At the point where the conversion happens, Rust
// knows the referent’s true type (in this case, File), so it just adds the address of the
// appropriate "vtable", turning the regular pointer into a fat pointer.
}
#[test]
fn test_generic_functions() {}
#[test]
fn test_top_ten() {
let names = vec![
String::from("Oakland"),
String::from("Oakland"),
String::from("Oakland"),
String::from("Alameda"),
String::from("San Francisco"),
String::from("San Francisco"),
];
let top10 = top_ten(&names);
assert_eq!(vec!["Oakland", "San Francisco", "Alameda"], top10);
}
#[test]
fn test_sheep() {
let mut sheep = Sheep {
naked: true,
name: "Dolly",
};
sheep.talk();
sheep.shear();
sheep.talk();
}
#[test]
fn return_trait_object() {
let animal = random_animal(0.3);
assert_eq!("baaaaaa?", animal.noise());
}
#[test]
fn operator_overloading() {
let rect = Rectangle {
width: 10,
height: 20,
};
let rect2 = rect * 10;
assert_eq!(100, rect2.width);
assert_eq!(200, rect2.height);
}
#[test]
fn test_combine_vecs() {
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5];
let mut v3 = combine_vecs(v1, v2);
assert_eq!(Some(1), v3.next());
assert_eq!(Some(2), v3.next());
assert_eq!(Some(3), v3.next());
assert_eq!(Some(4), v3.next());
assert_eq!(Some(5), v3.next());
assert_eq!(None, v3.next());
}
#[test]
fn test_make_adder() {
let add_one = make_adder(1);
assert_eq!(2, add_one(1));
}
#[test]
fn test_screen() {
let mut screen = Screen::default();
screen
.add_component(Box::new(Button {
width: 50,
height: 10,
label: String::from("OK"),
}))
.add_component(Box::new(SelectBox {
width: 75,
height: 10,
options: vec![
String::from("Yes"),
String::from("No"),
String::from("Maybe"),
],
}));
screen.run();
}
#[test]
fn test_post() {
let mut post = Post::default();
post.add_text("I ate a salad for lunch today");
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.reject();
assert_eq!("", post.content());
post.request_review();
assert_eq!("", post.content());
post.approve();
assert_eq!("I ate a salad for lunch today", post.content());
}
}
| }
}
struct Published {}
impl | identifier_body |
message.rs | //! Definitions of network messages.
use std::error::Error;
use std::{net, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::block::{Block, BlockHeader, BlockHeaderHash};
use zebra_chain::{transaction::Transaction, types::BlockHeight};
use super::inv::InventoryHash;
use super::types::*;
use crate::meta_addr::MetaAddr;
/// A Bitcoin-like network message for the Zcash protocol.
///
/// The Zcash network protocol is mostly inherited from Bitcoin, and a list of
/// Bitcoin network messages can be found [on the Bitcoin
/// wiki][btc_wiki_protocol].
///
/// That page describes the wire format of the messages, while this enum stores
/// an internal representation. The internal representation is unlinked from the
/// wire format, and the translation between the two happens only during
/// serialization and deserialization. For instance, Bitcoin identifies messages
/// by a 12-byte ascii command string; we consider this a serialization detail
/// and use the enum discriminant instead. (As a side benefit, this also means
/// that we have a clearly-defined validation boundary for network messages
/// during serialization).
///
/// [btc_wiki_protocol]: https://en.bitcoin.it/wiki/Protocol_documentation
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Message {
/// A `version` message.
///
/// Note that although this is called `version` in Bitcoin, its role is really
/// analogous to a `ClientHello` message in TLS, used to begin a handshake, and
/// is distinct from a simple version number.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#version)
Version {
/// The network version number supported by the sender.
version: Version,
/// The network services advertised by the sender.
services: PeerServices,
/// The time when the version message was sent.
timestamp: DateTime<Utc>,
/// The network address of the node receiving this message, and its
/// advertised network services.
///
/// Q: how does the handshake know the remote peer's services already?
address_recv: (PeerServices, net::SocketAddr),
/// The network address of the node sending this message, and its
/// advertised network services.
address_from: (PeerServices, net::SocketAddr),
/// Node random nonce, randomly generated every time a version
/// packet is sent. This nonce is used to detect connections
/// to self.
nonce: Nonce,
/// The Zcash user agent advertised by the sender.
user_agent: String,
/// The last block received by the emitting node.
start_height: BlockHeight,
/// Whether the remote peer should announce relayed
/// transactions or not, see [BIP 0037](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
relay: bool,
},
/// A `verack` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#verack)
Verack,
/// A `ping` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping)
Ping(
/// A nonce unique to this [`Ping`] message.
Nonce,
),
/// A `pong` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong)
Pong(
/// The nonce from the [`Ping`] message this was in response to.
Nonce,
),
/// A `reject` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
Reject {
/// Type of message rejected.
// It's unclear if this is strictly limited to message command
// codes, so leaving it a String.
message: String,
/// RejectReason code relating to rejected message.
ccode: RejectReason,
/// Human-readable version of rejection reason.
reason: String,
/// Optional extra data provided for some errors.
// Currently, all errors which provide this field fill it with
// the TXID or block header hash of the object being rejected,
// so the field is 32 bytes.
//
// Q: can we tell Rust that this field is optional? Or just
// default its value to an empty array, I guess.
data: Option<[u8; 32]>,
},
/// An `addr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#addr)
Addr(Vec<MetaAddr>),
/// A `getaddr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr)
GetAddr,
/// A `block` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#block)
Block(Arc<Block>),
/// A `getblocks` message.
///
/// Requests the list of blocks starting right after the last
/// known hash in `block_locator_hashes`, up to `hash_stop` or 500
/// blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getblocks)
// The locator hashes are processed by a node in the order as they
// appear in the message. If a block hash is found in the node's
// main chain, the list of its children is returned back via the
// inv message and the remaining locators are ignored, no matter
// if the requested limit was reached, or not.
//
// The 500 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetBlocks {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block.
///
/// Set to zero to get as many blocks as possible (500).
hash_stop: BlockHeaderHash,
},
/// A `headers` message.
///
/// Returns block headers in response to a getheaders packet.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#headers)
// Note that the block headers in this packet include a
// transaction count (a var_int, so there can be more than 81
// bytes per header) as opposed to the block headers that are
// hashed by miners.
Headers(Vec<BlockHeader>),
/// A `getheaders` message.
///
/// Requests a series of block headers starting right after the
/// last known hash in `block_locator_hashes`, up to `hash_stop`
/// or 2000 blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
// The 2000 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetHeaders {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block header.
///
/// Set to zero to get as many block headers as possible (2000).
hash_stop: BlockHeaderHash,
},
/// An `inv` message.
///
/// Allows a node to advertise its knowledge of one or more
/// objects. It can be received unsolicited, or in reply to
/// `getblocks`.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#inv)
Inv(Vec<InventoryHash>),
/// A `getdata` message.
///
/// `getdata` is used in response to `inv`, to retrieve the
/// content of a specific object, and is usually sent after
/// receiving an `inv` packet, after filtering known elements.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getdata)
GetData(Vec<InventoryHash>),
/// A `notfound` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#notfound)
// See note above on `Inventory`.
NotFound(Vec<InventoryHash>),
/// A `tx` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#tx)
Tx(Arc<Transaction>),
/// A `mempool` message.
///
/// This was defined in [BIP35], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#mempool)
/// [BIP35]: https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki
Mempool,
/// A `filterload` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterLoad {
/// The filter itself is simply a bit field of arbitrary
/// byte-aligned size. The maximum size is 36,000 bytes.
filter: Filter,
/// The number of hash functions to use in this filter. The
/// maximum value allowed in this field is 50.
hash_functions_count: u32,
/// A random value to add to the seed value in the hash
/// function used by the bloom filter.
tweak: Tweak,
/// A set of flags that control how matched items are added to the filter.
flags: u8,
},
/// A `filteradd` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterAdd {
/// The data element to add to the current filter.
// The data field must be smaller than or equal to 520 bytes
// in size (the maximum size of any potentially matched
// object).
//
// A Vec instead of [u8; 520] because of needed traits.
data: Vec<u8>,
},
/// A `filterclear` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterClear,
}
impl<E> From<E> for Message
where
E: Error,
{
fn | (e: E) -> Self {
Message::Reject {
message: e.to_string(),
// The generic case, impls for specific error types should
// use specific varieties of `RejectReason`.
ccode: RejectReason::Other,
reason: e.source().unwrap().to_string(),
// Allow this to be overridden but not populated by default, methinks.
data: None,
}
}
}
/// Reject Reason CCodes
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(u8)]
#[allow(missing_docs)]
pub enum RejectReason {
Malformed = 0x01,
Invalid = 0x10,
Obsolete = 0x11,
Duplicate = 0x12,
Nonstandard = 0x40,
Dust = 0x41,
InsufficientFee = 0x42,
Checkpoint = 0x43,
Other = 0x50,
}
| from | identifier_name |
message.rs | //! Definitions of network messages.
use std::error::Error;
use std::{net, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::block::{Block, BlockHeader, BlockHeaderHash};
use zebra_chain::{transaction::Transaction, types::BlockHeight};
use super::inv::InventoryHash;
use super::types::*;
use crate::meta_addr::MetaAddr;
/// A Bitcoin-like network message for the Zcash protocol.
///
/// The Zcash network protocol is mostly inherited from Bitcoin, and a list of
/// Bitcoin network messages can be found [on the Bitcoin
/// wiki][btc_wiki_protocol].
///
/// That page describes the wire format of the messages, while this enum stores
/// an internal representation. The internal representation is unlinked from the
/// wire format, and the translation between the two happens only during
/// serialization and deserialization. For instance, Bitcoin identifies messages
/// by a 12-byte ascii command string; we consider this a serialization detail
/// and use the enum discriminant instead. (As a side benefit, this also means
/// that we have a clearly-defined validation boundary for network messages
/// during serialization).
///
/// [btc_wiki_protocol]: https://en.bitcoin.it/wiki/Protocol_documentation
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Message {
/// A `version` message.
///
/// Note that although this is called `version` in Bitcoin, its role is really
/// analogous to a `ClientHello` message in TLS, used to begin a handshake, and
/// is distinct from a simple version number.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#version)
Version {
/// The network version number supported by the sender.
version: Version,
/// The network services advertised by the sender.
services: PeerServices,
/// The time when the version message was sent.
timestamp: DateTime<Utc>,
/// The network address of the node receiving this message, and its
/// advertised network services.
///
/// Q: how does the handshake know the remote peer's services already?
address_recv: (PeerServices, net::SocketAddr),
/// The network address of the node sending this message, and its
/// advertised network services.
address_from: (PeerServices, net::SocketAddr),
/// Node random nonce, randomly generated every time a version
/// packet is sent. This nonce is used to detect connections
/// to self.
nonce: Nonce,
/// The Zcash user agent advertised by the sender.
user_agent: String,
/// The last block received by the emitting node.
start_height: BlockHeight,
/// Whether the remote peer should announce relayed
/// transactions or not, see [BIP 0037](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
relay: bool,
},
/// A `verack` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#verack)
Verack,
/// A `ping` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#ping)
Ping(
/// A nonce unique to this [`Ping`] message.
Nonce,
),
/// A `pong` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#pong)
Pong(
/// The nonce from the [`Ping`] message this was in response to.
Nonce,
),
/// A `reject` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
Reject {
/// Type of message rejected.
// It's unclear if this is strictly limited to message command
// codes, so leaving it a String.
message: String,
/// RejectReason code relating to rejected message.
ccode: RejectReason,
/// Human-readable version of rejection reason.
reason: String,
/// Optional extra data provided for some errors.
// Currently, all errors which provide this field fill it with
// the TXID or block header hash of the object being rejected,
// so the field is 32 bytes.
//
// Q: can we tell Rust that this field is optional? Or just
// default its value to an empty array, I guess.
data: Option<[u8; 32]>,
},
/// An `addr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#addr)
Addr(Vec<MetaAddr>),
/// A `getaddr` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr)
GetAddr,
/// A `block` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#block)
Block(Arc<Block>),
/// A `getblocks` message.
///
/// Requests the list of blocks starting right after the last
/// known hash in `block_locator_hashes`, up to `hash_stop` or 500
/// blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getblocks)
// The locator hashes are processed by a node in the order as they
// appear in the message. If a block hash is found in the node's
// main chain, the list of its children is returned back via the
// inv message and the remaining locators are ignored, no matter
// if the requested limit was reached, or not.
//
// The 500 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetBlocks {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block. | },
/// A `headers` message.
///
/// Returns block headers in response to a getheaders packet.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#headers)
// Note that the block headers in this packet include a
// transaction count (a var_int, so there can be more than 81
// bytes per header) as opposed to the block headers that are
// hashed by miners.
Headers(Vec<BlockHeader>),
/// A `getheaders` message.
///
/// Requests a series of block headers starting right after the
/// last known hash in `block_locator_hashes`, up to `hash_stop`
/// or 2000 blocks, whichever comes first.
///
/// You can send in fewer known hashes down to a minimum of just
/// one hash. However, the purpose of the block locator object is
/// to detect a wrong branch in the caller's main chain. If the
/// peer detects that you are off the main chain, it will send in
/// block hashes which are earlier than your last known block. So
/// if you just send in your last known hash and it is off the
/// main chain, the peer starts over at block #1.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getheaders)
// The 2000 headers number is from the Bitcoin docs, we are not
// certain (yet) that other implementations of Zcash obey this
// restriction, or if they don't, what happens if we send them too
// many results.
GetHeaders {
/// Block locators, from newest back to genesis block.
block_locator_hashes: Vec<BlockHeaderHash>,
/// `BlockHeaderHash` of the last desired block header.
///
/// Set to zero to get as many block headers as possible (2000).
hash_stop: BlockHeaderHash,
},
/// An `inv` message.
///
/// Allows a node to advertise its knowledge of one or more
/// objects. It can be received unsolicited, or in reply to
/// `getblocks`.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#inv)
Inv(Vec<InventoryHash>),
/// A `getdata` message.
///
/// `getdata` is used in response to `inv`, to retrieve the
/// content of a specific object, and is usually sent after
/// receiving an `inv` packet, after filtering known elements.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#getdata)
GetData(Vec<InventoryHash>),
/// A `notfound` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#notfound)
// See note above on `Inventory`.
NotFound(Vec<InventoryHash>),
/// A `tx` message.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#tx)
Tx(Arc<Transaction>),
/// A `mempool` message.
///
/// This was defined in [BIP35], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#mempool)
/// [BIP35]: https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki
Mempool,
/// A `filterload` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterLoad {
/// The filter itself is simply a bit field of arbitrary
/// byte-aligned size. The maximum size is 36,000 bytes.
filter: Filter,
/// The number of hash functions to use in this filter. The
/// maximum value allowed in this field is 50.
hash_functions_count: u32,
/// A random value to add to the seed value in the hash
/// function used by the bloom filter.
tweak: Tweak,
/// A set of flags that control how matched items are added to the filter.
flags: u8,
},
/// A `filteradd` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterAdd {
/// The data element to add to the current filter.
// The data field must be smaller than or equal to 520 bytes
// in size (the maximum size of any potentially matched
// object).
//
// A Vec instead of [u8; 520] because of needed traits.
data: Vec<u8>,
},
/// A `filterclear` message.
///
/// This was defined in [BIP37], which is included in Zcash.
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#filterload.2C_filteradd.2C_filterclear.2C_merkleblock)
/// [BIP37]: https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki
FilterClear,
}
impl<E> From<E> for Message
where
E: Error,
{
fn from(e: E) -> Self {
Message::Reject {
message: e.to_string(),
// The generic case, impls for specific error types should
// use specific varieties of `RejectReason`.
ccode: RejectReason::Other,
reason: e.source().unwrap().to_string(),
// Allow this to be overridden but not populated by default, methinks.
data: None,
}
}
}
/// Reject Reason CCodes
///
/// [Bitcoin reference](https://en.bitcoin.it/wiki/Protocol_documentation#reject)
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(u8)]
#[allow(missing_docs)]
pub enum RejectReason {
Malformed = 0x01,
Invalid = 0x10,
Obsolete = 0x11,
Duplicate = 0x12,
Nonstandard = 0x40,
Dust = 0x41,
InsufficientFee = 0x42,
Checkpoint = 0x43,
Other = 0x50,
} | ///
/// Set to zero to get as many blocks as possible (500).
hash_stop: BlockHeaderHash, | random_line_split |
openqabot.py | # -*- coding: utf-8 -*-
from collections import namedtuple
from datetime import date
import md5
from pprint import pformat
import re
from urllib2 import HTTPError
import requests
import osc.core
import ReviewBot
from osclib.comments import CommentAPI
from suse import SUSEUpdate
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
import simplejson as json
except ImportError:
import json
QA_UNKNOWN = 0
QA_INPROGRESS = 1
QA_FAILED = 2
QA_PASSED = 3
Package = namedtuple('Package', ('name', 'version', 'release'))
pkgname_re = re.compile(r'(?P<name>.+)-(?P<version>[^-]+)-(?P<release>[^-]+)\.(?P<arch>[^.]+)\.rpm')
comment_marker_re = re.compile(
r'<!-- openqa state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined|none))?(?: revision=(?P<revision>\d+))? -->')
class OpenQABot(ReviewBot.ReviewBot):
""" check ABI of library packages
"""
def __init__(self, *args, **kwargs):
super(OpenQABot, self).__init__(*args, **kwargs)
self.tgt_repo = {}
self.project_settings = {}
self.api_map = {}
self.force = False
self.openqa = None
self.commentapi = CommentAPI(self.apiurl)
self.update_test_builds = {}
self.pending_target_repos = set()
self.openqa_jobs = {}
def gather_test_builds(self):
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
buildnr = 0
cjob = 0
for j in self.jobs_for_target(u):
# avoid going backwards in job ID
if cjob > int(j['id']):
continue
buildnr = j['settings']['BUILD']
cjob = int(j['id'])
self.update_test_builds[prj] = buildnr
jobs = self.jobs_for_target(u, build=buildnr)
self.openqa_jobs[prj] = jobs
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.pending_target_repos.add(prj)
# reimplemention from baseclass
def | (self):
if self.ibs:
self.check_suse_incidents()
# first calculate the latest build number for current jobs
self.gather_test_builds()
started = []
# then check progress on running incidents
for req in self.requests:
jobs = self.request_get_openqa_jobs(req, incident=True, test_repo=True)
ret = self.calculate_qa_status(jobs)
if ret != QA_UNKNOWN:
started.append(req)
all_requests = self.requests
self.requests = started
self.logger.debug("check started requests")
super(OpenQABot, self).check_requests()
self.requests = all_requests
skipped_one = False
# now make sure the jobs are for current repo
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
if prj in self.pending_target_repos:
skipped_one = True
continue
self.trigger_build_for_target(prj, u)
# do not schedule new incidents unless we finished
# last wave
if skipped_one:
return
self.logger.debug("Check all requests")
super(OpenQABot, self).check_requests()
# check a set of repos for their primary checksums
@staticmethod
def calculate_repo_hash(repos):
m = md5.new()
# if you want to force it, increase this number
m.update('b')
for url in repos:
url += '/repodata/repomd.xml'
try:
root = ET.parse(osc.core.http_GET(url)).getroot()
except HTTPError:
raise
cs = root.find(
'.//{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
m.update(cs.text)
return m.hexdigest()
def is_incident_in_testing(self, incident):
# hard coded for now as we only run this code for SUSE Maintenance workflow
project = 'SUSE:Maintenance:{}'.format(incident)
xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
res = osc.core.search(self.apiurl, request=xpath)['request']
# return the one and only (or None)
return res.find('request')
def calculate_incidents(self, incidents):
"""
get incident numbers from SUSE:Maintenance:Test project
returns dict with openQA var name : string with numbers
"""
self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
l_incidents = []
for kind, prj in incidents.items():
packages = osc.core.meta_get_packagelist(self.apiurl, prj)
incidents = []
# filter out incidents in staging
for incident in packages:
# remove patchinfo. prefix
incident = incident.replace('_', '.').split('.')[1]
req = self.is_incident_in_testing(incident)
# without release request it's in staging
if not req:
continue
# skip kgraft patches from aggregation
req_ = osc.core.Request()
req_.read(req)
src_prjs = {a.src_project for a in req_.actions}
if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
self.logger.debug("calculate_incidents: Incident is kgraft - {} ".format(incident))
continue
incidents.append(incident)
l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
return l_incidents
def jobs_for_target(self, data, build=None):
settings = data['settings'][0]
values = {
'distri': settings['DISTRI'],
'version': settings['VERSION'],
'arch': settings['ARCH'],
'flavor': settings['FLAVOR'],
'scope': 'relevant',
'latest': '1',
}
if build:
values['build'] = build
else:
values['test'] = data['test']
self.logger.debug("Get jobs: {}".format(pformat(values)))
return self.openqa.openqa_request('GET', 'jobs', values)['jobs']
# we don't know the current BUILD and querying all jobs is too expensive
# so we need to check for one known TEST first
# if that job doesn't contain the proper hash, we trigger a new one
# and then we know the build
def trigger_build_for_target(self, prj, data):
today = date.today().strftime("%Y%m%d")
try:
repohash = self.calculate_repo_hash(data['repos'])
except HTTPError as e:
self.logger.debug("REPOHAS not calculated with response {}".format(e))
return
buildnr = None
jobs = self.jobs_for_target(data)
for job in jobs:
if job['settings'].get('REPOHASH', '') == repohash:
# take the last in the row
buildnr = job['settings']['BUILD']
self.update_test_builds[prj] = buildnr
# ignore old build numbers, we want a fresh run every day
# to find regressions in the tests and to get data about
# randomly failing tests
if buildnr and buildnr.startswith(today):
return
buildnr = 0
# not found, then check for the next free build nr
for job in jobs:
build = job['settings']['BUILD']
if build and build.startswith(today):
try:
nr = int(build.split('-')[1])
if nr > buildnr:
buildnr = nr
except ValueError:
continue
buildnr = "{!s}-{:d}".format(today, buildnr + 1)
for s in data['settings']:
# now schedule it for real
if 'incidents' in data.keys():
for x, y in self.calculate_incidents(data['incidents']):
s[x] = y
s['BUILD'] = buildnr
s['REPOHASH'] = repohash
self.logger.debug("Prepared: {}".format(pformat(s)))
if not self.dryrun:
try:
self.logger.info("Openqa isos POST {}".format(pformat(s)))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
except Exception as e:
self.logger.error(e)
self.update_test_builds[prj] = buildnr
def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
ret = None
types = {a.type for a in req.actions}
if 'maintenance_release' in types:
src_prjs = {a.src_project for a in req.actions}
if len(src_prjs) != 1:
raise Exception("can't handle maintenance_release from different incidents")
build = src_prjs.pop()
tgt_prjs = {a.tgt_project for a in req.actions}
ret = []
if incident:
ret += self.openqa_jobs[build]
for prj in sorted(tgt_prjs):
repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
if test_repo and prj in repo_settings:
repo_jobs = self.openqa_jobs[prj]
ret += repo_jobs
return ret
def calculate_qa_status(self, jobs=None):
if not jobs:
return QA_UNKNOWN
j = {}
has_failed = False
in_progress = False
for job in jobs:
if job['clone_id']:
continue
name = job['name']
if name in j and int(job['id']) < int(j[name]['id']):
continue
j[name] = job
if job['state'] not in ('cancelled', 'done'):
in_progress = True
else:
if job['result'] != 'passed' and job['result'] != 'softfailed':
has_failed = True
if not j:
return QA_UNKNOWN
if in_progress:
return QA_INPROGRESS
if has_failed:
return QA_FAILED
return QA_PASSED
def add_comment(self, msg, state, request_id=None, result=None):
if not self.do_comments:
return
comment = "<!-- openqa state={!s}{!s} -->\n".format(state, ' result={!s}'.format(result) if result else '')
comment += "\n" + msg
info = self.find_obs_request_comment(request_id=request_id)
comment_id = info.get('id', None)
if state == info.get('state', 'missing'):
lines_before = len(info['comment'].split('\n'))
lines_after = len(comment.split('\n'))
if lines_before == lines_after:
self.logger.info("not worth the update, previous comment %s is state %s", comment_id, info['state'])
return
self.logger.info("adding comment to %s, state %s result %s", request_id, state, result)
self.logger.info("message: %s", msg)
if not self.dryrun:
if comment_id:
self.commentapi.delete(comment_id)
self.commentapi.add_comment(request_id=request_id, comment=str(comment))
# escape markdown
@staticmethod
def emd(str):
return str.replace('_', r'\_')
@staticmethod
def get_step_url(testurl, modulename):
failurl = testurl + '/modules/{!s}/fails'.format(modulename)
fails = requests.get(failurl).json()
failed_step = fails.get('first_failed_step', 1)
return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)
@staticmethod
def job_test_name(job):
return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))
def summarize_one_openqa_job(self, job):
testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
if not job['result'] in ['passed', 'failed', 'softfailed']:
rstring = job['result']
if rstring == 'none':
return None
return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)
modstrings = []
for module in job['modules']:
if module['result'] != 'failed':
continue
modstrings.append(self.get_step_url(testurl, module['name']))
if modstrings:
return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
elif job['result'] == 'failed': # rare case: fail without module fails
return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
return ''
def summarize_openqa_jobs(self, jobs):
groups = {}
for job in jobs:
gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
if gl not in groups:
groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
{'version': job['settings']['VERSION'],
'groupid': job['group_id'],
'flavor': job['settings']['FLAVOR'],
'distri': job['settings']['DISTRI'],
'build': job['settings']['BUILD'],
})
groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
'passed': 0, 'unfinished': 0, 'failed': []}
job_summary = self.summarize_one_openqa_job(job)
if job_summary is None:
groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
continue
# None vs ''
if not len(job_summary):
groups[gl]['passed'] = groups[gl]['passed'] + 1
continue
# if there is something to report, hold the request
# TODO: what is this ?
# qa_state = QA_FAILED
# gmsg = groups[gl]
groups[gl]['failed'].append(job_summary)
msg = ''
for group in sorted(groups.keys()):
msg += "\n\n" + groups[group]['title']
infos = []
if groups[group]['passed']:
infos.append("{:d} tests passed".format(groups[group]['passed']))
if len(groups[group]['failed']):
infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
if groups[group]['unfinished']:
infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
msg += "(" + ', '.join(infos) + ")\n"
for fail in groups[group]['failed']:
msg += fail
return msg.rstrip('\n')
def check_one_request(self, req):
ret = None
try:
jobs = self.request_get_openqa_jobs(req)
qa_state = self.calculate_qa_status(jobs)
self.logger.debug("request %s state %s", req.reqid, qa_state)
msg = None
if self.force or qa_state == QA_UNKNOWN:
ret = super(OpenQABot, self).check_one_request(req)
jobs = self.request_get_openqa_jobs(req)
if self.force:
# make sure to delete previous comments if we're forcing
info = self.find_obs_request_comment(request_id=req.reqid)
if 'id' in info:
self.logger.debug("deleting old comment %s", info['id'])
if not self.dryrun:
self.commentapi.delete(info['id'])
if jobs:
# no notification until the result is done
osc.core.change_review_state(self.apiurl, req.reqid, newstate='new',
by_group=self.review_group, by_user=self.review_user,
message='now testing in openQA')
else:
msg = "no openQA tests defined"
self.add_comment(msg, 'done', request_id=req.reqid, result='accepted')
ret = True
elif qa_state == QA_FAILED or qa_state == QA_PASSED:
# don't take test repo results into the calculation of total
# this is for humans to decide which incident broke the test repo
jobs += self.request_get_openqa_jobs(req, incident=False, test_repo=True)
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.logger.info(
"incident tests for request %s are done, but need to wait for test repo", req.reqid)
return
if qa_state == QA_PASSED:
msg = "openQA tests passed\n"
result = 'accepted'
ret = True
else:
msg = "openQA tests problematic\n"
result = 'declined'
ret = False
msg += self.summarize_openqa_jobs(jobs)
self.add_comment(msg, 'done', result=result, request_id=req.reqid)
elif qa_state == QA_INPROGRESS:
self.logger.info("request %s still in progress", req.reqid)
else:
raise Exception("unknown QA state %d", qa_state)
except Exception:
import traceback
self.logger.error("unhandled exception in openQA Bot")
self.logger.error(traceback.format_exc())
ret = None
return ret
def find_obs_request_comment(self, request_id=None, project_name=None):
"""Return previous comments (should be one)."""
if self.do_comments:
comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
for c in comments.values():
m = comment_marker_re.match(c['comment'])
if m:
return {
'id': c['id'],
'state': m.group('state'),
'result': m.group('result'),
'comment': c['comment'],
'revision': m.group('revision')}
return {}
def check_product(self, job, product_prefix):
pmap = self.api_map[product_prefix]
posts = []
for arch in pmap['archs']:
need = False
settings = {'VERSION': pmap['version'], 'ARCH': arch}
settings['DISTRI'] = 'sle' if 'distri' not in pmap else pmap['distri']
issues = pmap.get('issues', {})
issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
required_issue = pmap.get('required_issue', False)
for key, prefix in issues.items():
self.logger.debug("{} {}".format(key, prefix))
if prefix + arch in job['channels']:
settings[key] = str(job['id'])
need = True
if required_issue:
if required_issue not in settings:
need = False
if need:
update = self.project_settings[product_prefix + arch]
update.apiurl = self.apiurl
update.logger = self.logger
for j in update.settings(
update.maintenance_project + ':' + str(job['id']),
product_prefix + arch, []):
if not job.get('openqa_build'):
job['openqa_build'] = update.get_max_revision(job)
if not job.get('openqa_build'):
return []
j['BUILD'] += '.' + str(job['openqa_build'])
j.update(settings)
# kGraft jobs can have different version
if 'real_version' in j:
j['VERSION'] = j['real_version']
del j['real_version']
posts.append(j)
self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
return posts
def incident_openqa_jobs(self, s):
return self.openqa.openqa_request(
'GET', 'jobs',
{
'distri': s['DISTRI'],
'version': s['VERSION'],
'arch': s['ARCH'],
'flavor': s['FLAVOR'],
'build': s['BUILD'],
'scope': 'relevant',
'latest': '1'
})['jobs']
def check_suse_incidents(self):
for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
self.logger.info("Incident number: {}".format(inc))
job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()
if job['meta']['state'] in ['final', 'gone']:
continue
# required in job: project, id, channels
self.test_job(job['base'])
def test_job(self, job):
self.logger.debug("Called test_job with: {}".format(job))
incident_project = str(job['project'])
try:
comment_info = self.find_obs_request_comment(project_name=incident_project)
except HTTPError as e:
self.logger.debug("Couldn't loaadd comments - {}".format(e))
return
comment_id = comment_info.get('id', None)
comment_build = str(comment_info.get('revision', ''))
openqa_posts = []
for prod in self.api_map.keys():
self.logger.debug("{} -- product in apimap".format(prod))
openqa_posts += self.check_product(job, prod)
openqa_jobs = []
for s in openqa_posts:
jobs = self.incident_openqa_jobs(s)
# take the project comment as marker for not posting jobs
if not len(jobs) and comment_build != str(job['openqa_build']):
if self.dryrun:
self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
else:
self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
openqa_jobs += self.incident_openqa_jobs(s)
else:
self.logger.info("{} got {}".format(pformat(s), len(jobs)))
openqa_jobs += jobs
self.openqa_jobs[incident_project] = openqa_jobs
if len(openqa_jobs) == 0:
self.logger.debug("No openqa jobs defined")
return
# print openqa_jobs
msg = self.summarize_openqa_jobs(openqa_jobs)
state = 'seen'
result = 'none'
qa_status = self.calculate_qa_status(openqa_jobs)
if qa_status == QA_PASSED:
result = 'accepted'
state = 'done'
if qa_status == QA_FAILED:
result = 'declined'
state = 'done'
comment = "<!-- openqa state={!s} result={!s} revision={!s} -->\n".format(
state, result, job.get('openqa_build'))
comment += msg
if comment_id and state != 'done':
self.logger.info("%s is already commented, wait until done", incident_project)
return
if comment_info.get('comment', '').rstrip('\n') == comment.rstrip('\n'):
self.logger.info("%s comment did not change", incident_project)
return
self.logger.info("adding comment to %s, state %s", incident_project, state)
if not self.dryrun:
if comment_id:
self.logger.debug("delete comment: {}".format(comment_id))
self.commentapi.delete(comment_id)
self.commentapi.add_comment(project_name=str(incident_project), comment=str(comment))
| check_requests | identifier_name |
openqabot.py | # -*- coding: utf-8 -*-
from collections import namedtuple
from datetime import date
import md5
from pprint import pformat
import re
from urllib2 import HTTPError
import requests
import osc.core
import ReviewBot
from osclib.comments import CommentAPI
from suse import SUSEUpdate
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
import simplejson as json
except ImportError:
import json
QA_UNKNOWN = 0
QA_INPROGRESS = 1
QA_FAILED = 2
QA_PASSED = 3
Package = namedtuple('Package', ('name', 'version', 'release'))
pkgname_re = re.compile(r'(?P<name>.+)-(?P<version>[^-]+)-(?P<release>[^-]+)\.(?P<arch>[^.]+)\.rpm')
comment_marker_re = re.compile(
r'<!-- openqa state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined|none))?(?: revision=(?P<revision>\d+))? -->')
class OpenQABot(ReviewBot.ReviewBot):
""" check ABI of library packages
"""
def __init__(self, *args, **kwargs):
super(OpenQABot, self).__init__(*args, **kwargs)
self.tgt_repo = {}
self.project_settings = {}
self.api_map = {}
self.force = False
self.openqa = None
self.commentapi = CommentAPI(self.apiurl)
self.update_test_builds = {}
self.pending_target_repos = set()
self.openqa_jobs = {}
def gather_test_builds(self):
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
buildnr = 0
cjob = 0
for j in self.jobs_for_target(u):
# avoid going backwards in job ID
if cjob > int(j['id']):
continue
buildnr = j['settings']['BUILD']
cjob = int(j['id'])
self.update_test_builds[prj] = buildnr
jobs = self.jobs_for_target(u, build=buildnr)
self.openqa_jobs[prj] = jobs
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.pending_target_repos.add(prj)
# reimplemention from baseclass
def check_requests(self):
if self.ibs:
self.check_suse_incidents()
# first calculate the latest build number for current jobs
self.gather_test_builds()
started = []
# then check progress on running incidents
for req in self.requests:
jobs = self.request_get_openqa_jobs(req, incident=True, test_repo=True)
ret = self.calculate_qa_status(jobs)
if ret != QA_UNKNOWN:
started.append(req)
all_requests = self.requests
self.requests = started
self.logger.debug("check started requests")
super(OpenQABot, self).check_requests()
self.requests = all_requests
skipped_one = False
# now make sure the jobs are for current repo
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
if prj in self.pending_target_repos:
skipped_one = True
continue
self.trigger_build_for_target(prj, u)
# do not schedule new incidents unless we finished
# last wave
if skipped_one:
return
self.logger.debug("Check all requests")
super(OpenQABot, self).check_requests()
# check a set of repos for their primary checksums
@staticmethod
def calculate_repo_hash(repos):
m = md5.new()
# if you want to force it, increase this number
m.update('b')
for url in repos:
url += '/repodata/repomd.xml'
try:
root = ET.parse(osc.core.http_GET(url)).getroot()
except HTTPError:
raise
cs = root.find(
'.//{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
m.update(cs.text)
return m.hexdigest()
def is_incident_in_testing(self, incident):
# hard coded for now as we only run this code for SUSE Maintenance workflow
project = 'SUSE:Maintenance:{}'.format(incident)
xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
res = osc.core.search(self.apiurl, request=xpath)['request']
# return the one and only (or None)
return res.find('request')
def calculate_incidents(self, incidents):
"""
get incident numbers from SUSE:Maintenance:Test project
returns dict with openQA var name : string with numbers
"""
self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
l_incidents = []
for kind, prj in incidents.items():
packages = osc.core.meta_get_packagelist(self.apiurl, prj)
incidents = []
# filter out incidents in staging
for incident in packages:
# remove patchinfo. prefix
incident = incident.replace('_', '.').split('.')[1]
req = self.is_incident_in_testing(incident)
# without release request it's in staging
if not req:
continue
# skip kgraft patches from aggregation
req_ = osc.core.Request()
req_.read(req)
src_prjs = {a.src_project for a in req_.actions}
if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
self.logger.debug("calculate_incidents: Incident is kgraft - {} ".format(incident))
continue
incidents.append(incident)
l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
return l_incidents
def jobs_for_target(self, data, build=None):
settings = data['settings'][0]
values = {
'distri': settings['DISTRI'],
'version': settings['VERSION'],
'arch': settings['ARCH'],
'flavor': settings['FLAVOR'],
'scope': 'relevant',
'latest': '1',
}
if build:
values['build'] = build
else:
values['test'] = data['test']
self.logger.debug("Get jobs: {}".format(pformat(values)))
return self.openqa.openqa_request('GET', 'jobs', values)['jobs']
# we don't know the current BUILD and querying all jobs is too expensive
# so we need to check for one known TEST first
# if that job doesn't contain the proper hash, we trigger a new one
# and then we know the build
def trigger_build_for_target(self, prj, data):
today = date.today().strftime("%Y%m%d")
try:
repohash = self.calculate_repo_hash(data['repos'])
except HTTPError as e:
self.logger.debug("REPOHAS not calculated with response {}".format(e))
return
buildnr = None
jobs = self.jobs_for_target(data)
for job in jobs:
if job['settings'].get('REPOHASH', '') == repohash:
# take the last in the row
buildnr = job['settings']['BUILD']
self.update_test_builds[prj] = buildnr
# ignore old build numbers, we want a fresh run every day
# to find regressions in the tests and to get data about
# randomly failing tests
if buildnr and buildnr.startswith(today):
return
buildnr = 0
# not found, then check for the next free build nr
for job in jobs:
build = job['settings']['BUILD']
if build and build.startswith(today):
try:
nr = int(build.split('-')[1])
if nr > buildnr:
buildnr = nr
except ValueError:
continue
buildnr = "{!s}-{:d}".format(today, buildnr + 1)
for s in data['settings']:
# now schedule it for real
if 'incidents' in data.keys():
for x, y in self.calculate_incidents(data['incidents']):
s[x] = y
s['BUILD'] = buildnr
s['REPOHASH'] = repohash
self.logger.debug("Prepared: {}".format(pformat(s)))
if not self.dryrun:
try:
self.logger.info("Openqa isos POST {}".format(pformat(s)))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
except Exception as e:
self.logger.error(e)
self.update_test_builds[prj] = buildnr
def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
ret = None
types = {a.type for a in req.actions}
if 'maintenance_release' in types:
src_prjs = {a.src_project for a in req.actions}
if len(src_prjs) != 1:
raise Exception("can't handle maintenance_release from different incidents")
build = src_prjs.pop()
tgt_prjs = {a.tgt_project for a in req.actions}
ret = []
if incident:
ret += self.openqa_jobs[build]
for prj in sorted(tgt_prjs):
repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
if test_repo and prj in repo_settings:
repo_jobs = self.openqa_jobs[prj]
ret += repo_jobs
return ret
def calculate_qa_status(self, jobs=None):
if not jobs:
return QA_UNKNOWN
j = {}
has_failed = False
in_progress = False
for job in jobs:
if job['clone_id']:
continue
name = job['name']
if name in j and int(job['id']) < int(j[name]['id']):
continue
j[name] = job
if job['state'] not in ('cancelled', 'done'):
in_progress = True
else:
if job['result'] != 'passed' and job['result'] != 'softfailed':
has_failed = True
if not j:
return QA_UNKNOWN
if in_progress:
return QA_INPROGRESS
if has_failed:
return QA_FAILED
return QA_PASSED
def add_comment(self, msg, state, request_id=None, result=None):
if not self.do_comments:
return
comment = "<!-- openqa state={!s}{!s} -->\n".format(state, ' result={!s}'.format(result) if result else '')
comment += "\n" + msg
info = self.find_obs_request_comment(request_id=request_id)
comment_id = info.get('id', None)
if state == info.get('state', 'missing'):
lines_before = len(info['comment'].split('\n'))
lines_after = len(comment.split('\n'))
if lines_before == lines_after:
self.logger.info("not worth the update, previous comment %s is state %s", comment_id, info['state'])
return
self.logger.info("adding comment to %s, state %s result %s", request_id, state, result)
self.logger.info("message: %s", msg)
if not self.dryrun:
if comment_id:
self.commentapi.delete(comment_id)
self.commentapi.add_comment(request_id=request_id, comment=str(comment))
# escape markdown
@staticmethod
def emd(str):
return str.replace('_', r'\_')
@staticmethod
def get_step_url(testurl, modulename):
failurl = testurl + '/modules/{!s}/fails'.format(modulename)
fails = requests.get(failurl).json()
failed_step = fails.get('first_failed_step', 1)
return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)
@staticmethod
def job_test_name(job):
return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))
def summarize_one_openqa_job(self, job):
testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
if not job['result'] in ['passed', 'failed', 'softfailed']:
rstring = job['result']
if rstring == 'none':
return None
return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)
modstrings = []
for module in job['modules']:
if module['result'] != 'failed':
continue
modstrings.append(self.get_step_url(testurl, module['name']))
if modstrings:
return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
elif job['result'] == 'failed': # rare case: fail without module fails
return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
return ''
def summarize_openqa_jobs(self, jobs):
groups = {}
for job in jobs:
gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
if gl not in groups:
groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
{'version': job['settings']['VERSION'],
'groupid': job['group_id'],
'flavor': job['settings']['FLAVOR'],
'distri': job['settings']['DISTRI'],
'build': job['settings']['BUILD'],
})
groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
'passed': 0, 'unfinished': 0, 'failed': []}
job_summary = self.summarize_one_openqa_job(job)
if job_summary is None:
groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
continue
# None vs ''
if not len(job_summary):
|
# if there is something to report, hold the request
# TODO: what is this ?
# qa_state = QA_FAILED
# gmsg = groups[gl]
groups[gl]['failed'].append(job_summary)
msg = ''
for group in sorted(groups.keys()):
msg += "\n\n" + groups[group]['title']
infos = []
if groups[group]['passed']:
infos.append("{:d} tests passed".format(groups[group]['passed']))
if len(groups[group]['failed']):
infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
if groups[group]['unfinished']:
infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
msg += "(" + ', '.join(infos) + ")\n"
for fail in groups[group]['failed']:
msg += fail
return msg.rstrip('\n')
def check_one_request(self, req):
ret = None
try:
jobs = self.request_get_openqa_jobs(req)
qa_state = self.calculate_qa_status(jobs)
self.logger.debug("request %s state %s", req.reqid, qa_state)
msg = None
if self.force or qa_state == QA_UNKNOWN:
ret = super(OpenQABot, self).check_one_request(req)
jobs = self.request_get_openqa_jobs(req)
if self.force:
# make sure to delete previous comments if we're forcing
info = self.find_obs_request_comment(request_id=req.reqid)
if 'id' in info:
self.logger.debug("deleting old comment %s", info['id'])
if not self.dryrun:
self.commentapi.delete(info['id'])
if jobs:
# no notification until the result is done
osc.core.change_review_state(self.apiurl, req.reqid, newstate='new',
by_group=self.review_group, by_user=self.review_user,
message='now testing in openQA')
else:
msg = "no openQA tests defined"
self.add_comment(msg, 'done', request_id=req.reqid, result='accepted')
ret = True
elif qa_state == QA_FAILED or qa_state == QA_PASSED:
# don't take test repo results into the calculation of total
# this is for humans to decide which incident broke the test repo
jobs += self.request_get_openqa_jobs(req, incident=False, test_repo=True)
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.logger.info(
"incident tests for request %s are done, but need to wait for test repo", req.reqid)
return
if qa_state == QA_PASSED:
msg = "openQA tests passed\n"
result = 'accepted'
ret = True
else:
msg = "openQA tests problematic\n"
result = 'declined'
ret = False
msg += self.summarize_openqa_jobs(jobs)
self.add_comment(msg, 'done', result=result, request_id=req.reqid)
elif qa_state == QA_INPROGRESS:
self.logger.info("request %s still in progress", req.reqid)
else:
raise Exception("unknown QA state %d", qa_state)
except Exception:
import traceback
self.logger.error("unhandled exception in openQA Bot")
self.logger.error(traceback.format_exc())
ret = None
return ret
def find_obs_request_comment(self, request_id=None, project_name=None):
"""Return previous comments (should be one)."""
if self.do_comments:
comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
for c in comments.values():
m = comment_marker_re.match(c['comment'])
if m:
return {
'id': c['id'],
'state': m.group('state'),
'result': m.group('result'),
'comment': c['comment'],
'revision': m.group('revision')}
return {}
def check_product(self, job, product_prefix):
pmap = self.api_map[product_prefix]
posts = []
for arch in pmap['archs']:
need = False
settings = {'VERSION': pmap['version'], 'ARCH': arch}
settings['DISTRI'] = 'sle' if 'distri' not in pmap else pmap['distri']
issues = pmap.get('issues', {})
issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
required_issue = pmap.get('required_issue', False)
for key, prefix in issues.items():
self.logger.debug("{} {}".format(key, prefix))
if prefix + arch in job['channels']:
settings[key] = str(job['id'])
need = True
if required_issue:
if required_issue not in settings:
need = False
if need:
update = self.project_settings[product_prefix + arch]
update.apiurl = self.apiurl
update.logger = self.logger
for j in update.settings(
update.maintenance_project + ':' + str(job['id']),
product_prefix + arch, []):
if not job.get('openqa_build'):
job['openqa_build'] = update.get_max_revision(job)
if not job.get('openqa_build'):
return []
j['BUILD'] += '.' + str(job['openqa_build'])
j.update(settings)
# kGraft jobs can have different version
if 'real_version' in j:
j['VERSION'] = j['real_version']
del j['real_version']
posts.append(j)
self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
return posts
def incident_openqa_jobs(self, s):
return self.openqa.openqa_request(
'GET', 'jobs',
{
'distri': s['DISTRI'],
'version': s['VERSION'],
'arch': s['ARCH'],
'flavor': s['FLAVOR'],
'build': s['BUILD'],
'scope': 'relevant',
'latest': '1'
})['jobs']
def check_suse_incidents(self):
for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
self.logger.info("Incident number: {}".format(inc))
job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()
if job['meta']['state'] in ['final', 'gone']:
continue
# required in job: project, id, channels
self.test_job(job['base'])
def test_job(self, job):
self.logger.debug("Called test_job with: {}".format(job))
incident_project = str(job['project'])
try:
comment_info = self.find_obs_request_comment(project_name=incident_project)
except HTTPError as e:
self.logger.debug("Couldn't loaadd comments - {}".format(e))
return
comment_id = comment_info.get('id', None)
comment_build = str(comment_info.get('revision', ''))
openqa_posts = []
for prod in self.api_map.keys():
self.logger.debug("{} -- product in apimap".format(prod))
openqa_posts += self.check_product(job, prod)
openqa_jobs = []
for s in openqa_posts:
jobs = self.incident_openqa_jobs(s)
# take the project comment as marker for not posting jobs
if not len(jobs) and comment_build != str(job['openqa_build']):
if self.dryrun:
self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
else:
self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
openqa_jobs += self.incident_openqa_jobs(s)
else:
self.logger.info("{} got {}".format(pformat(s), len(jobs)))
openqa_jobs += jobs
self.openqa_jobs[incident_project] = openqa_jobs
if len(openqa_jobs) == 0:
self.logger.debug("No openqa jobs defined")
return
# print openqa_jobs
msg = self.summarize_openqa_jobs(openqa_jobs)
state = 'seen'
result = 'none'
qa_status = self.calculate_qa_status(openqa_jobs)
if qa_status == QA_PASSED:
result = 'accepted'
state = 'done'
if qa_status == QA_FAILED:
result = 'declined'
state = 'done'
comment = "<!-- openqa state={!s} result={!s} revision={!s} -->\n".format(
state, result, job.get('openqa_build'))
comment += msg
if comment_id and state != 'done':
self.logger.info("%s is already commented, wait until done", incident_project)
return
if comment_info.get('comment', '').rstrip('\n') == comment.rstrip('\n'):
self.logger.info("%s comment did not change", incident_project)
return
self.logger.info("adding comment to %s, state %s", incident_project, state)
if not self.dryrun:
if comment_id:
self.logger.debug("delete comment: {}".format(comment_id))
self.commentapi.delete(comment_id)
self.commentapi.add_comment(project_name=str(incident_project), comment=str(comment))
| groups[gl]['passed'] = groups[gl]['passed'] + 1
continue | conditional_block |
openqabot.py | # -*- coding: utf-8 -*-
from collections import namedtuple
from datetime import date
import md5
from pprint import pformat
import re
from urllib2 import HTTPError
import requests
import osc.core
import ReviewBot
from osclib.comments import CommentAPI
from suse import SUSEUpdate
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
import simplejson as json
except ImportError:
import json
QA_UNKNOWN = 0
QA_INPROGRESS = 1
QA_FAILED = 2
QA_PASSED = 3
Package = namedtuple('Package', ('name', 'version', 'release'))
pkgname_re = re.compile(r'(?P<name>.+)-(?P<version>[^-]+)-(?P<release>[^-]+)\.(?P<arch>[^.]+)\.rpm')
comment_marker_re = re.compile(
r'<!-- openqa state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined|none))?(?: revision=(?P<revision>\d+))? -->')
class OpenQABot(ReviewBot.ReviewBot):
""" check ABI of library packages
"""
def __init__(self, *args, **kwargs):
super(OpenQABot, self).__init__(*args, **kwargs)
self.tgt_repo = {}
self.project_settings = {}
self.api_map = {}
self.force = False
self.openqa = None
self.commentapi = CommentAPI(self.apiurl)
self.update_test_builds = {}
self.pending_target_repos = set()
self.openqa_jobs = {}
def gather_test_builds(self):
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
buildnr = 0
cjob = 0
for j in self.jobs_for_target(u):
# avoid going backwards in job ID
if cjob > int(j['id']):
continue
buildnr = j['settings']['BUILD']
cjob = int(j['id'])
self.update_test_builds[prj] = buildnr
jobs = self.jobs_for_target(u, build=buildnr)
self.openqa_jobs[prj] = jobs
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.pending_target_repos.add(prj)
# reimplemention from baseclass
def check_requests(self):
if self.ibs:
self.check_suse_incidents()
# first calculate the latest build number for current jobs
self.gather_test_builds()
started = []
# then check progress on running incidents
for req in self.requests:
jobs = self.request_get_openqa_jobs(req, incident=True, test_repo=True)
ret = self.calculate_qa_status(jobs)
if ret != QA_UNKNOWN:
started.append(req)
all_requests = self.requests
self.requests = started
self.logger.debug("check started requests")
super(OpenQABot, self).check_requests()
self.requests = all_requests
skipped_one = False
# now make sure the jobs are for current repo
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
if prj in self.pending_target_repos:
skipped_one = True
continue
self.trigger_build_for_target(prj, u)
# do not schedule new incidents unless we finished
# last wave
if skipped_one:
return
self.logger.debug("Check all requests")
super(OpenQABot, self).check_requests()
# check a set of repos for their primary checksums
@staticmethod
def calculate_repo_hash(repos):
m = md5.new()
# if you want to force it, increase this number
m.update('b')
for url in repos:
url += '/repodata/repomd.xml'
try:
root = ET.parse(osc.core.http_GET(url)).getroot()
except HTTPError:
raise
cs = root.find(
'.//{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
m.update(cs.text)
return m.hexdigest()
def is_incident_in_testing(self, incident):
# hard coded for now as we only run this code for SUSE Maintenance workflow
|
def calculate_incidents(self, incidents):
"""
get incident numbers from SUSE:Maintenance:Test project
returns dict with openQA var name : string with numbers
"""
self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
l_incidents = []
for kind, prj in incidents.items():
packages = osc.core.meta_get_packagelist(self.apiurl, prj)
incidents = []
# filter out incidents in staging
for incident in packages:
# remove patchinfo. prefix
incident = incident.replace('_', '.').split('.')[1]
req = self.is_incident_in_testing(incident)
# without release request it's in staging
if not req:
continue
# skip kgraft patches from aggregation
req_ = osc.core.Request()
req_.read(req)
src_prjs = {a.src_project for a in req_.actions}
if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
self.logger.debug("calculate_incidents: Incident is kgraft - {} ".format(incident))
continue
incidents.append(incident)
l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
return l_incidents
def jobs_for_target(self, data, build=None):
settings = data['settings'][0]
values = {
'distri': settings['DISTRI'],
'version': settings['VERSION'],
'arch': settings['ARCH'],
'flavor': settings['FLAVOR'],
'scope': 'relevant',
'latest': '1',
}
if build:
values['build'] = build
else:
values['test'] = data['test']
self.logger.debug("Get jobs: {}".format(pformat(values)))
return self.openqa.openqa_request('GET', 'jobs', values)['jobs']
# we don't know the current BUILD and querying all jobs is too expensive
# so we need to check for one known TEST first
# if that job doesn't contain the proper hash, we trigger a new one
# and then we know the build
def trigger_build_for_target(self, prj, data):
today = date.today().strftime("%Y%m%d")
try:
repohash = self.calculate_repo_hash(data['repos'])
except HTTPError as e:
self.logger.debug("REPOHAS not calculated with response {}".format(e))
return
buildnr = None
jobs = self.jobs_for_target(data)
for job in jobs:
if job['settings'].get('REPOHASH', '') == repohash:
# take the last in the row
buildnr = job['settings']['BUILD']
self.update_test_builds[prj] = buildnr
# ignore old build numbers, we want a fresh run every day
# to find regressions in the tests and to get data about
# randomly failing tests
if buildnr and buildnr.startswith(today):
return
buildnr = 0
# not found, then check for the next free build nr
for job in jobs:
build = job['settings']['BUILD']
if build and build.startswith(today):
try:
nr = int(build.split('-')[1])
if nr > buildnr:
buildnr = nr
except ValueError:
continue
buildnr = "{!s}-{:d}".format(today, buildnr + 1)
for s in data['settings']:
# now schedule it for real
if 'incidents' in data.keys():
for x, y in self.calculate_incidents(data['incidents']):
s[x] = y
s['BUILD'] = buildnr
s['REPOHASH'] = repohash
self.logger.debug("Prepared: {}".format(pformat(s)))
if not self.dryrun:
try:
self.logger.info("Openqa isos POST {}".format(pformat(s)))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
except Exception as e:
self.logger.error(e)
self.update_test_builds[prj] = buildnr
def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
ret = None
types = {a.type for a in req.actions}
if 'maintenance_release' in types:
src_prjs = {a.src_project for a in req.actions}
if len(src_prjs) != 1:
raise Exception("can't handle maintenance_release from different incidents")
build = src_prjs.pop()
tgt_prjs = {a.tgt_project for a in req.actions}
ret = []
if incident:
ret += self.openqa_jobs[build]
for prj in sorted(tgt_prjs):
repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
if test_repo and prj in repo_settings:
repo_jobs = self.openqa_jobs[prj]
ret += repo_jobs
return ret
def calculate_qa_status(self, jobs=None):
if not jobs:
return QA_UNKNOWN
j = {}
has_failed = False
in_progress = False
for job in jobs:
if job['clone_id']:
continue
name = job['name']
if name in j and int(job['id']) < int(j[name]['id']):
continue
j[name] = job
if job['state'] not in ('cancelled', 'done'):
in_progress = True
else:
if job['result'] != 'passed' and job['result'] != 'softfailed':
has_failed = True
if not j:
return QA_UNKNOWN
if in_progress:
return QA_INPROGRESS
if has_failed:
return QA_FAILED
return QA_PASSED
def add_comment(self, msg, state, request_id=None, result=None):
if not self.do_comments:
return
comment = "<!-- openqa state={!s}{!s} -->\n".format(state, ' result={!s}'.format(result) if result else '')
comment += "\n" + msg
info = self.find_obs_request_comment(request_id=request_id)
comment_id = info.get('id', None)
if state == info.get('state', 'missing'):
lines_before = len(info['comment'].split('\n'))
lines_after = len(comment.split('\n'))
if lines_before == lines_after:
self.logger.info("not worth the update, previous comment %s is state %s", comment_id, info['state'])
return
self.logger.info("adding comment to %s, state %s result %s", request_id, state, result)
self.logger.info("message: %s", msg)
if not self.dryrun:
if comment_id:
self.commentapi.delete(comment_id)
self.commentapi.add_comment(request_id=request_id, comment=str(comment))
# escape markdown
@staticmethod
def emd(str):
return str.replace('_', r'\_')
@staticmethod
def get_step_url(testurl, modulename):
failurl = testurl + '/modules/{!s}/fails'.format(modulename)
fails = requests.get(failurl).json()
failed_step = fails.get('first_failed_step', 1)
return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)
@staticmethod
def job_test_name(job):
return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))
def summarize_one_openqa_job(self, job):
testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
if not job['result'] in ['passed', 'failed', 'softfailed']:
rstring = job['result']
if rstring == 'none':
return None
return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)
modstrings = []
for module in job['modules']:
if module['result'] != 'failed':
continue
modstrings.append(self.get_step_url(testurl, module['name']))
if modstrings:
return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
elif job['result'] == 'failed': # rare case: fail without module fails
return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
return ''
def summarize_openqa_jobs(self, jobs):
groups = {}
for job in jobs:
gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
if gl not in groups:
groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
{'version': job['settings']['VERSION'],
'groupid': job['group_id'],
'flavor': job['settings']['FLAVOR'],
'distri': job['settings']['DISTRI'],
'build': job['settings']['BUILD'],
})
groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
'passed': 0, 'unfinished': 0, 'failed': []}
job_summary = self.summarize_one_openqa_job(job)
if job_summary is None:
groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
continue
# None vs ''
if not len(job_summary):
groups[gl]['passed'] = groups[gl]['passed'] + 1
continue
# if there is something to report, hold the request
# TODO: what is this ?
# qa_state = QA_FAILED
# gmsg = groups[gl]
groups[gl]['failed'].append(job_summary)
msg = ''
for group in sorted(groups.keys()):
msg += "\n\n" + groups[group]['title']
infos = []
if groups[group]['passed']:
infos.append("{:d} tests passed".format(groups[group]['passed']))
if len(groups[group]['failed']):
infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
if groups[group]['unfinished']:
infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
msg += "(" + ', '.join(infos) + ")\n"
for fail in groups[group]['failed']:
msg += fail
return msg.rstrip('\n')
def check_one_request(self, req):
ret = None
try:
jobs = self.request_get_openqa_jobs(req)
qa_state = self.calculate_qa_status(jobs)
self.logger.debug("request %s state %s", req.reqid, qa_state)
msg = None
if self.force or qa_state == QA_UNKNOWN:
ret = super(OpenQABot, self).check_one_request(req)
jobs = self.request_get_openqa_jobs(req)
if self.force:
# make sure to delete previous comments if we're forcing
info = self.find_obs_request_comment(request_id=req.reqid)
if 'id' in info:
self.logger.debug("deleting old comment %s", info['id'])
if not self.dryrun:
self.commentapi.delete(info['id'])
if jobs:
# no notification until the result is done
osc.core.change_review_state(self.apiurl, req.reqid, newstate='new',
by_group=self.review_group, by_user=self.review_user,
message='now testing in openQA')
else:
msg = "no openQA tests defined"
self.add_comment(msg, 'done', request_id=req.reqid, result='accepted')
ret = True
elif qa_state == QA_FAILED or qa_state == QA_PASSED:
# don't take test repo results into the calculation of total
# this is for humans to decide which incident broke the test repo
jobs += self.request_get_openqa_jobs(req, incident=False, test_repo=True)
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.logger.info(
"incident tests for request %s are done, but need to wait for test repo", req.reqid)
return
if qa_state == QA_PASSED:
msg = "openQA tests passed\n"
result = 'accepted'
ret = True
else:
msg = "openQA tests problematic\n"
result = 'declined'
ret = False
msg += self.summarize_openqa_jobs(jobs)
self.add_comment(msg, 'done', result=result, request_id=req.reqid)
elif qa_state == QA_INPROGRESS:
self.logger.info("request %s still in progress", req.reqid)
else:
raise Exception("unknown QA state %d", qa_state)
except Exception:
import traceback
self.logger.error("unhandled exception in openQA Bot")
self.logger.error(traceback.format_exc())
ret = None
return ret
def find_obs_request_comment(self, request_id=None, project_name=None):
"""Return previous comments (should be one)."""
if self.do_comments:
comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
for c in comments.values():
m = comment_marker_re.match(c['comment'])
if m:
return {
'id': c['id'],
'state': m.group('state'),
'result': m.group('result'),
'comment': c['comment'],
'revision': m.group('revision')}
return {}
def check_product(self, job, product_prefix):
pmap = self.api_map[product_prefix]
posts = []
for arch in pmap['archs']:
need = False
settings = {'VERSION': pmap['version'], 'ARCH': arch}
settings['DISTRI'] = 'sle' if 'distri' not in pmap else pmap['distri']
issues = pmap.get('issues', {})
issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
required_issue = pmap.get('required_issue', False)
for key, prefix in issues.items():
self.logger.debug("{} {}".format(key, prefix))
if prefix + arch in job['channels']:
settings[key] = str(job['id'])
need = True
if required_issue:
if required_issue not in settings:
need = False
if need:
update = self.project_settings[product_prefix + arch]
update.apiurl = self.apiurl
update.logger = self.logger
for j in update.settings(
update.maintenance_project + ':' + str(job['id']),
product_prefix + arch, []):
if not job.get('openqa_build'):
job['openqa_build'] = update.get_max_revision(job)
if not job.get('openqa_build'):
return []
j['BUILD'] += '.' + str(job['openqa_build'])
j.update(settings)
# kGraft jobs can have different version
if 'real_version' in j:
j['VERSION'] = j['real_version']
del j['real_version']
posts.append(j)
self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
return posts
def incident_openqa_jobs(self, s):
return self.openqa.openqa_request(
'GET', 'jobs',
{
'distri': s['DISTRI'],
'version': s['VERSION'],
'arch': s['ARCH'],
'flavor': s['FLAVOR'],
'build': s['BUILD'],
'scope': 'relevant',
'latest': '1'
})['jobs']
def check_suse_incidents(self):
for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
self.logger.info("Incident number: {}".format(inc))
job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()
if job['meta']['state'] in ['final', 'gone']:
continue
# required in job: project, id, channels
self.test_job(job['base'])
def test_job(self, job):
self.logger.debug("Called test_job with: {}".format(job))
incident_project = str(job['project'])
try:
comment_info = self.find_obs_request_comment(project_name=incident_project)
except HTTPError as e:
self.logger.debug("Couldn't loaadd comments - {}".format(e))
return
comment_id = comment_info.get('id', None)
comment_build = str(comment_info.get('revision', ''))
openqa_posts = []
for prod in self.api_map.keys():
self.logger.debug("{} -- product in apimap".format(prod))
openqa_posts += self.check_product(job, prod)
openqa_jobs = []
for s in openqa_posts:
jobs = self.incident_openqa_jobs(s)
# take the project comment as marker for not posting jobs
if not len(jobs) and comment_build != str(job['openqa_build']):
if self.dryrun:
self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
else:
self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
openqa_jobs += self.incident_openqa_jobs(s)
else:
self.logger.info("{} got {}".format(pformat(s), len(jobs)))
openqa_jobs += jobs
self.openqa_jobs[incident_project] = openqa_jobs
if len(openqa_jobs) == 0:
self.logger.debug("No openqa jobs defined")
return
# print openqa_jobs
msg = self.summarize_openqa_jobs(openqa_jobs)
state = 'seen'
result = 'none'
qa_status = self.calculate_qa_status(openqa_jobs)
if qa_status == QA_PASSED:
result = 'accepted'
state = 'done'
if qa_status == QA_FAILED:
result = 'declined'
state = 'done'
comment = "<!-- openqa state={!s} result={!s} revision={!s} -->\n".format(
state, result, job.get('openqa_build'))
comment += msg
if comment_id and state != 'done':
self.logger.info("%s is already commented, wait until done", incident_project)
return
if comment_info.get('comment', '').rstrip('\n') == comment.rstrip('\n'):
self.logger.info("%s comment did not change", incident_project)
return
self.logger.info("adding comment to %s, state %s", incident_project, state)
if not self.dryrun:
if comment_id:
self.logger.debug("delete comment: {}".format(comment_id))
self.commentapi.delete(comment_id)
self.commentapi.add_comment(project_name=str(incident_project), comment=str(comment))
| project = 'SUSE:Maintenance:{}'.format(incident)
xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
res = osc.core.search(self.apiurl, request=xpath)['request']
# return the one and only (or None)
return res.find('request') | identifier_body |
openqabot.py | # -*- coding: utf-8 -*-
from collections import namedtuple
from datetime import date
import md5
from pprint import pformat
import re
from urllib2 import HTTPError
import requests
import osc.core
import ReviewBot
from osclib.comments import CommentAPI
from suse import SUSEUpdate
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
import simplejson as json
except ImportError:
import json
QA_UNKNOWN = 0
QA_INPROGRESS = 1
QA_FAILED = 2
QA_PASSED = 3
Package = namedtuple('Package', ('name', 'version', 'release'))
pkgname_re = re.compile(r'(?P<name>.+)-(?P<version>[^-]+)-(?P<release>[^-]+)\.(?P<arch>[^.]+)\.rpm')
comment_marker_re = re.compile(
r'<!-- openqa state=(?P<state>done|seen)(?: result=(?P<result>accepted|declined|none))?(?: revision=(?P<revision>\d+))? -->')
class OpenQABot(ReviewBot.ReviewBot):
""" check ABI of library packages
"""
def __init__(self, *args, **kwargs):
super(OpenQABot, self).__init__(*args, **kwargs)
self.tgt_repo = {}
self.project_settings = {}
self.api_map = {}
self.force = False
self.openqa = None
self.commentapi = CommentAPI(self.apiurl)
self.update_test_builds = {}
self.pending_target_repos = set()
self.openqa_jobs = {}
def gather_test_builds(self):
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
buildnr = 0
cjob = 0
for j in self.jobs_for_target(u):
# avoid going backwards in job ID
if cjob > int(j['id']):
continue
buildnr = j['settings']['BUILD']
cjob = int(j['id'])
self.update_test_builds[prj] = buildnr
jobs = self.jobs_for_target(u, build=buildnr)
self.openqa_jobs[prj] = jobs
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.pending_target_repos.add(prj)
# reimplemention from baseclass
def check_requests(self):
if self.ibs:
self.check_suse_incidents()
# first calculate the latest build number for current jobs
self.gather_test_builds()
started = []
# then check progress on running incidents
for req in self.requests:
jobs = self.request_get_openqa_jobs(req, incident=True, test_repo=True)
ret = self.calculate_qa_status(jobs)
if ret != QA_UNKNOWN:
started.append(req)
all_requests = self.requests
self.requests = started
self.logger.debug("check started requests")
super(OpenQABot, self).check_requests()
self.requests = all_requests
skipped_one = False
# now make sure the jobs are for current repo
for prj, u in self.tgt_repo[self.openqa.baseurl].items():
if prj in self.pending_target_repos:
skipped_one = True
continue
self.trigger_build_for_target(prj, u)
# do not schedule new incidents unless we finished
# last wave
if skipped_one:
return
self.logger.debug("Check all requests")
super(OpenQABot, self).check_requests()
# check a set of repos for their primary checksums
@staticmethod
def calculate_repo_hash(repos):
m = md5.new()
# if you want to force it, increase this number
m.update('b')
for url in repos:
url += '/repodata/repomd.xml'
try:
root = ET.parse(osc.core.http_GET(url)).getroot()
except HTTPError:
raise
cs = root.find(
'.//{http://linux.duke.edu/metadata/repo}data[@type="primary"]/{http://linux.duke.edu/metadata/repo}checksum')
m.update(cs.text)
return m.hexdigest()
def is_incident_in_testing(self, incident):
# hard coded for now as we only run this code for SUSE Maintenance workflow
project = 'SUSE:Maintenance:{}'.format(incident)
xpath = "(state/@name='review') and (action/source/@project='{}' and action/@type='maintenance_release')".format(project)
res = osc.core.search(self.apiurl, request=xpath)['request']
# return the one and only (or None)
return res.find('request')
def calculate_incidents(self, incidents):
"""
get incident numbers from SUSE:Maintenance:Test project
returns dict with openQA var name : string with numbers
"""
self.logger.debug("calculate_incidents: {}".format(pformat(incidents)))
l_incidents = []
for kind, prj in incidents.items():
packages = osc.core.meta_get_packagelist(self.apiurl, prj)
incidents = []
# filter out incidents in staging
for incident in packages:
# remove patchinfo. prefix
incident = incident.replace('_', '.').split('.')[1]
req = self.is_incident_in_testing(incident)
# without release request it's in staging
if not req:
continue
# skip kgraft patches from aggregation
req_ = osc.core.Request()
req_.read(req)
src_prjs = {a.src_project for a in req_.actions}
if SUSEUpdate.kgraft_target(self.apiurl, src_prjs.pop()):
self.logger.debug("calculate_incidents: Incident is kgraft - {} ".format(incident))
continue
incidents.append(incident)
l_incidents.append((kind + '_TEST_ISSUES', ','.join(incidents)))
self.logger.debug("Calculate incidents:{}".format(pformat(l_incidents)))
return l_incidents
def jobs_for_target(self, data, build=None):
settings = data['settings'][0]
values = {
'distri': settings['DISTRI'],
'version': settings['VERSION'],
'arch': settings['ARCH'],
'flavor': settings['FLAVOR'],
'scope': 'relevant',
'latest': '1',
}
if build:
values['build'] = build
else:
values['test'] = data['test']
self.logger.debug("Get jobs: {}".format(pformat(values)))
return self.openqa.openqa_request('GET', 'jobs', values)['jobs']
# we don't know the current BUILD and querying all jobs is too expensive
# so we need to check for one known TEST first
# if that job doesn't contain the proper hash, we trigger a new one
# and then we know the build
def trigger_build_for_target(self, prj, data):
today = date.today().strftime("%Y%m%d")
try:
repohash = self.calculate_repo_hash(data['repos'])
except HTTPError as e:
self.logger.debug("REPOHAS not calculated with response {}".format(e))
return
buildnr = None
jobs = self.jobs_for_target(data)
for job in jobs:
if job['settings'].get('REPOHASH', '') == repohash:
# take the last in the row
buildnr = job['settings']['BUILD']
self.update_test_builds[prj] = buildnr
# ignore old build numbers, we want a fresh run every day
# to find regressions in the tests and to get data about
# randomly failing tests
if buildnr and buildnr.startswith(today):
return
buildnr = 0
# not found, then check for the next free build nr
for job in jobs:
build = job['settings']['BUILD']
if build and build.startswith(today):
try:
nr = int(build.split('-')[1])
if nr > buildnr:
buildnr = nr
except ValueError:
continue
buildnr = "{!s}-{:d}".format(today, buildnr + 1)
for s in data['settings']:
# now schedule it for real
if 'incidents' in data.keys():
for x, y in self.calculate_incidents(data['incidents']):
s[x] = y
s['BUILD'] = buildnr
s['REPOHASH'] = repohash
self.logger.debug("Prepared: {}".format(pformat(s)))
if not self.dryrun:
try:
self.logger.info("Openqa isos POST {}".format(pformat(s)))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
except Exception as e:
self.logger.error(e)
self.update_test_builds[prj] = buildnr
| types = {a.type for a in req.actions}
if 'maintenance_release' in types:
src_prjs = {a.src_project for a in req.actions}
if len(src_prjs) != 1:
raise Exception("can't handle maintenance_release from different incidents")
build = src_prjs.pop()
tgt_prjs = {a.tgt_project for a in req.actions}
ret = []
if incident:
ret += self.openqa_jobs[build]
for prj in sorted(tgt_prjs):
repo_settings = self.tgt_repo.get(self.openqa.baseurl, {})
if test_repo and prj in repo_settings:
repo_jobs = self.openqa_jobs[prj]
ret += repo_jobs
return ret
def calculate_qa_status(self, jobs=None):
if not jobs:
return QA_UNKNOWN
j = {}
has_failed = False
in_progress = False
for job in jobs:
if job['clone_id']:
continue
name = job['name']
if name in j and int(job['id']) < int(j[name]['id']):
continue
j[name] = job
if job['state'] not in ('cancelled', 'done'):
in_progress = True
else:
if job['result'] != 'passed' and job['result'] != 'softfailed':
has_failed = True
if not j:
return QA_UNKNOWN
if in_progress:
return QA_INPROGRESS
if has_failed:
return QA_FAILED
return QA_PASSED
def add_comment(self, msg, state, request_id=None, result=None):
if not self.do_comments:
return
comment = "<!-- openqa state={!s}{!s} -->\n".format(state, ' result={!s}'.format(result) if result else '')
comment += "\n" + msg
info = self.find_obs_request_comment(request_id=request_id)
comment_id = info.get('id', None)
if state == info.get('state', 'missing'):
lines_before = len(info['comment'].split('\n'))
lines_after = len(comment.split('\n'))
if lines_before == lines_after:
self.logger.info("not worth the update, previous comment %s is state %s", comment_id, info['state'])
return
self.logger.info("adding comment to %s, state %s result %s", request_id, state, result)
self.logger.info("message: %s", msg)
if not self.dryrun:
if comment_id:
self.commentapi.delete(comment_id)
self.commentapi.add_comment(request_id=request_id, comment=str(comment))
# escape markdown
@staticmethod
def emd(str):
return str.replace('_', r'\_')
@staticmethod
def get_step_url(testurl, modulename):
failurl = testurl + '/modules/{!s}/fails'.format(modulename)
fails = requests.get(failurl).json()
failed_step = fails.get('first_failed_step', 1)
return "[{!s}]({!s}#step/{!s}/{:d})".format(OpenQABot.emd(modulename), testurl, modulename, failed_step)
@staticmethod
def job_test_name(job):
return "{!s}@{!s}".format(OpenQABot.emd(job['settings']['TEST']), OpenQABot.emd(job['settings']['MACHINE']))
def summarize_one_openqa_job(self, job):
testurl = osc.core.makeurl(self.openqa.baseurl, ['tests', str(job['id'])])
if not job['result'] in ['passed', 'failed', 'softfailed']:
rstring = job['result']
if rstring == 'none':
return None
return '\n- [{!s}]({!s}) is {!s}'.format(self.job_test_name(job), testurl, rstring)
modstrings = []
for module in job['modules']:
if module['result'] != 'failed':
continue
modstrings.append(self.get_step_url(testurl, module['name']))
if modstrings:
return '\n- [{!s}]({!s}) failed in {!s}'.format(self.job_test_name(job), testurl, ','.join(modstrings))
elif job['result'] == 'failed': # rare case: fail without module fails
return '\n- [{!s}]({!s}) failed'.format(self.job_test_name(job), testurl)
return ''
def summarize_openqa_jobs(self, jobs):
groups = {}
for job in jobs:
gl = "{!s}@{!s}".format(self.emd(job['group']), self.emd(job['settings']['FLAVOR']))
if gl not in groups:
groupurl = osc.core.makeurl(self.openqa.baseurl, ['tests', 'overview'],
{'version': job['settings']['VERSION'],
'groupid': job['group_id'],
'flavor': job['settings']['FLAVOR'],
'distri': job['settings']['DISTRI'],
'build': job['settings']['BUILD'],
})
groups[gl] = {'title': "__Group [{!s}]({!s})__\n".format(gl, groupurl),
'passed': 0, 'unfinished': 0, 'failed': []}
job_summary = self.summarize_one_openqa_job(job)
if job_summary is None:
groups[gl]['unfinished'] = groups[gl]['unfinished'] + 1
continue
# None vs ''
if not len(job_summary):
groups[gl]['passed'] = groups[gl]['passed'] + 1
continue
# if there is something to report, hold the request
# TODO: what is this ?
# qa_state = QA_FAILED
# gmsg = groups[gl]
groups[gl]['failed'].append(job_summary)
msg = ''
for group in sorted(groups.keys()):
msg += "\n\n" + groups[group]['title']
infos = []
if groups[group]['passed']:
infos.append("{:d} tests passed".format(groups[group]['passed']))
if len(groups[group]['failed']):
infos.append("{:d} tests failed".format(len(groups[group]['failed'])))
if groups[group]['unfinished']:
infos.append("{:d} unfinished tests".format(groups[group]['unfinished']))
msg += "(" + ', '.join(infos) + ")\n"
for fail in groups[group]['failed']:
msg += fail
return msg.rstrip('\n')
def check_one_request(self, req):
ret = None
try:
jobs = self.request_get_openqa_jobs(req)
qa_state = self.calculate_qa_status(jobs)
self.logger.debug("request %s state %s", req.reqid, qa_state)
msg = None
if self.force or qa_state == QA_UNKNOWN:
ret = super(OpenQABot, self).check_one_request(req)
jobs = self.request_get_openqa_jobs(req)
if self.force:
# make sure to delete previous comments if we're forcing
info = self.find_obs_request_comment(request_id=req.reqid)
if 'id' in info:
self.logger.debug("deleting old comment %s", info['id'])
if not self.dryrun:
self.commentapi.delete(info['id'])
if jobs:
# no notification until the result is done
osc.core.change_review_state(self.apiurl, req.reqid, newstate='new',
by_group=self.review_group, by_user=self.review_user,
message='now testing in openQA')
else:
msg = "no openQA tests defined"
self.add_comment(msg, 'done', request_id=req.reqid, result='accepted')
ret = True
elif qa_state == QA_FAILED or qa_state == QA_PASSED:
# don't take test repo results into the calculation of total
# this is for humans to decide which incident broke the test repo
jobs += self.request_get_openqa_jobs(req, incident=False, test_repo=True)
if self.calculate_qa_status(jobs) == QA_INPROGRESS:
self.logger.info(
"incident tests for request %s are done, but need to wait for test repo", req.reqid)
return
if qa_state == QA_PASSED:
msg = "openQA tests passed\n"
result = 'accepted'
ret = True
else:
msg = "openQA tests problematic\n"
result = 'declined'
ret = False
msg += self.summarize_openqa_jobs(jobs)
self.add_comment(msg, 'done', result=result, request_id=req.reqid)
elif qa_state == QA_INPROGRESS:
self.logger.info("request %s still in progress", req.reqid)
else:
raise Exception("unknown QA state %d", qa_state)
except Exception:
import traceback
self.logger.error("unhandled exception in openQA Bot")
self.logger.error(traceback.format_exc())
ret = None
return ret
def find_obs_request_comment(self, request_id=None, project_name=None):
"""Return previous comments (should be one)."""
if self.do_comments:
comments = self.commentapi.get_comments(request_id=request_id, project_name=project_name)
for c in comments.values():
m = comment_marker_re.match(c['comment'])
if m:
return {
'id': c['id'],
'state': m.group('state'),
'result': m.group('result'),
'comment': c['comment'],
'revision': m.group('revision')}
return {}
def check_product(self, job, product_prefix):
pmap = self.api_map[product_prefix]
posts = []
for arch in pmap['archs']:
need = False
settings = {'VERSION': pmap['version'], 'ARCH': arch}
settings['DISTRI'] = 'sle' if 'distri' not in pmap else pmap['distri']
issues = pmap.get('issues', {})
issues['OS_TEST_ISSUES'] = issues.get('OS_TEST_ISSUES', product_prefix)
required_issue = pmap.get('required_issue', False)
for key, prefix in issues.items():
self.logger.debug("{} {}".format(key, prefix))
if prefix + arch in job['channels']:
settings[key] = str(job['id'])
need = True
if required_issue:
if required_issue not in settings:
need = False
if need:
update = self.project_settings[product_prefix + arch]
update.apiurl = self.apiurl
update.logger = self.logger
for j in update.settings(
update.maintenance_project + ':' + str(job['id']),
product_prefix + arch, []):
if not job.get('openqa_build'):
job['openqa_build'] = update.get_max_revision(job)
if not job.get('openqa_build'):
return []
j['BUILD'] += '.' + str(job['openqa_build'])
j.update(settings)
# kGraft jobs can have different version
if 'real_version' in j:
j['VERSION'] = j['real_version']
del j['real_version']
posts.append(j)
self.logger.debug("Pmap: {} Posts: {}".format(pmap, posts))
return posts
def incident_openqa_jobs(self, s):
return self.openqa.openqa_request(
'GET', 'jobs',
{
'distri': s['DISTRI'],
'version': s['VERSION'],
'arch': s['ARCH'],
'flavor': s['FLAVOR'],
'build': s['BUILD'],
'scope': 'relevant',
'latest': '1'
})['jobs']
def check_suse_incidents(self):
for inc in requests.get('https://maintenance.suse.de/api/incident/active/').json():
self.logger.info("Incident number: {}".format(inc))
job = requests.get('https://maintenance.suse.de/api/incident/' + inc).json()
if job['meta']['state'] in ['final', 'gone']:
continue
# required in job: project, id, channels
self.test_job(job['base'])
def test_job(self, job):
self.logger.debug("Called test_job with: {}".format(job))
incident_project = str(job['project'])
try:
comment_info = self.find_obs_request_comment(project_name=incident_project)
except HTTPError as e:
self.logger.debug("Couldn't loaadd comments - {}".format(e))
return
comment_id = comment_info.get('id', None)
comment_build = str(comment_info.get('revision', ''))
openqa_posts = []
for prod in self.api_map.keys():
self.logger.debug("{} -- product in apimap".format(prod))
openqa_posts += self.check_product(job, prod)
openqa_jobs = []
for s in openqa_posts:
jobs = self.incident_openqa_jobs(s)
# take the project comment as marker for not posting jobs
if not len(jobs) and comment_build != str(job['openqa_build']):
if self.dryrun:
self.logger.info('WOULD POST:{}'.format(pformat(json.dumps(s, sort_keys=True))))
else:
self.logger.info("Posted: {}".format(pformat(json.dumps(s, sort_keys=True))))
self.openqa.openqa_request('POST', 'isos', data=s, retries=1)
openqa_jobs += self.incident_openqa_jobs(s)
else:
self.logger.info("{} got {}".format(pformat(s), len(jobs)))
openqa_jobs += jobs
self.openqa_jobs[incident_project] = openqa_jobs
if len(openqa_jobs) == 0:
self.logger.debug("No openqa jobs defined")
return
# print openqa_jobs
msg = self.summarize_openqa_jobs(openqa_jobs)
state = 'seen'
result = 'none'
qa_status = self.calculate_qa_status(openqa_jobs)
if qa_status == QA_PASSED:
result = 'accepted'
state = 'done'
if qa_status == QA_FAILED:
result = 'declined'
state = 'done'
comment = "<!-- openqa state={!s} result={!s} revision={!s} -->\n".format(
state, result, job.get('openqa_build'))
comment += msg
if comment_id and state != 'done':
self.logger.info("%s is already commented, wait until done", incident_project)
return
if comment_info.get('comment', '').rstrip('\n') == comment.rstrip('\n'):
self.logger.info("%s comment did not change", incident_project)
return
self.logger.info("adding comment to %s, state %s", incident_project, state)
if not self.dryrun:
if comment_id:
self.logger.debug("delete comment: {}".format(comment_id))
self.commentapi.delete(comment_id)
self.commentapi.add_comment(project_name=str(incident_project), comment=str(comment)) | def request_get_openqa_jobs(self, req, incident=True, test_repo=False):
ret = None | random_line_split |
all.js |
//子弹: 类(构造函数)
function Bullet() {
//属性:
this.ele = document.createElement("div");
//当前子弹所在gameEngine.bullets对象中的id
this.id = parseInt(Math.random()*100000) + "";
//方法:
//初始化方法init
this.init = function() {
this.ele.className = "bullet";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
gameEngine.bullets[this.id] = this; //添加子弹对象到gameEngine.bullets对象中
//console.log(gameEngine.bullets);
//位置
var left = myPlane.ele.offsetLeft + myPlane.ele.offsetWidth/2 - this.ele.offsetWidth/2;
this.ele.style.left = left + "px";
this.ele.style.top = myPlane.ele.offsetTop - this.ele.offsetHeight + "px";
return this;
},
//移动
this.move = function() {
var self = this;
//让子弹向上移动
this.timer = setInterval(function(){
//当子弹超出游戏区域的最上方, 则移除,并关闭定时器
if (self.ele.offsetTop < -18) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
delete gameEngine.bullets[self.id]; //从gameEngine.bullets中移除子弹对象
}
else {
self.ele.style.top = self.ele.offsetTop - 10 + "px";
}
}, 30);
}
//爆炸
this.boom = function() {
//先关闭move中的定时器, 让子弹停止移动
clearInterval(this.timer);
this.ele.className = "bullet-die";
//爆炸动画
var self = this;
var index = 0;
var dieImgs = ["images/die1.png", "images/die2.png"];
var dieTimer = setInterval(function(){
if (index >= 2) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
}
else {
self.ele.style.background = "url("+ dieImgs[index] +") no-repeat";
index++;
}
}, 50);
}
}
/*
* 碰撞检测
*/
function isCrash(obj1, obj2){
if(obj1 && obj2){
var leftSide = obj2.offsetLeft-obj1.offsetWidth/2;
var rightSide = obj2.offsetLeft+obj2.offsetWidth+obj1.offsetWidth/2;
var upSide = obj2.offsetTop - obj1.offsetHeight/2;
var downSide = obj2.offsetTop + obj2.offsetHeight + obj1.offsetHeight/2;
var x = obj1.offsetLeft+obj1.offsetWidth/2;
var y = obj1.offsetTop + obj1.offsetHeight/2;
if(x > leftSide && x < rightSide && y > upSide && y < downSide){
return true;
}
}
return false;
}
//敌机: 类(构造函数)
function Enemy(type) {
//属性:
this.ele = document.createElement("div");
this.hp = 0; //血量
this.speed = 0; //速度
this.dieImgs = []; //爆炸时的图片数组
//当前敌机所在gameEngine.enemys对象中的id
this.id = parseInt(Math.random()*100000) + "";
this.score = 0; //分数
//方法:
this.init = function() {
switch(type) {
//大型飞机
case this.Enemy_Type_Large:
this.ele.className = "enemy-large"; //css样式
this.hp = this.Enemy_HP_Large; //血量
this.speed = this.Enemy_Speed_Large; //速度
this.dieImgs = ["images/plane3_die1.png", "images/plane3_die2.png", "images/plane3_die3.png", "images/plane3_die4.png", "images/plane3_die5.png", "images/plane3_die6.png"];
this.score = 30;
break;
//中型飞机
case this.Enemy_Type_Middle:
this.ele.className = "enemy-middle"; //css样式
this.hp = this.Enemy_HP_Middle; //血量
this.speed = this.Enemy_Speed_Middle; //速度
this.dieImgs = ["images/plane2_die1.png", "images/plane2_die2.png", "images/plane2_die3.png", "images/plane2_die4.png"];
this.score = 20;
break;
//小型飞机
case this.Enemy_Type_Small:
this.ele.className = "enemy-small"; //css样式
this.hp = this.Enemy_HP_Small; //血量
this.speed = this.Enemy_Speed_Small; //速度
this.dieImgs = ["images/plane1_die1.png", "images/plane1_die2.png", "images/plane1_die3.png"];
this.score = 10;
break;
}
//添加敌机到游戏界面上
gameEngine.ele.appendChild(this.ele);
gameEngine.enemys[this.id] = this; //添加敌机对象到gameEngine.enemys对象中
//console.log(gameEngine.enemys);
//位置
var left = Math.random() * (gameEngine.ele.offsetWidth - this.ele.offsetWidth);
this.ele.style.left = left + "px";
this.ele.style.top = -this.ele.offsetHeight + "px";
return this;
}
//移动
this.move = function() {
var self = this;
//开启定时器, 让敌机向下移动
this.timer = setInterval(function() {
//如果敌机超出下边界, 则关闭定时器,且移除敌机
if (self.ele.offsetTop > gameEngine.ele.offsetHeight) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //从gameEngine.enemys对象中移除当前敌机对象
}
else {
//移动
self.ele.style.top = self.ele.offsetTop + self.speed + "px";
}
}, 30);
}
//受到一点伤害
this.hurt = function() {
this.hp--; //掉一点血
if (this.hp == 0) { //当血量为0时
this.boom(); //爆炸
//把分数添加
gameEngine.scoreNode.innerHTML = (gameEngine.scoreNode.innerHTML-0) + this.score;
}
}
//爆炸
this.boom = function() {
clearInterval(this.timer); //关闭move中的定时器, 让敌机停止移动
//爆炸动画
var self = this;
var index = 0;
var dieTimer = setInterval(function(){
if (index >= self.dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //将当前的敌机对象从gameEngine.enemys对象中移除
}
else {
self.ele.style.background = "url(" + self.dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
Enemy.prototype = {
Enemy_Type_Large: 1, //表示大型飞机
Enemy_Type_Middle: 2, //表示中型飞机
Enemy_Type_Small: 3, //表示小型飞机
Enemy_HP_Large: 8, //大型飞机的血量
Enemy_HP_Middle: 4, //中型飞机的血量
Enemy_HP_Small: 1, //小型飞机的血量
Enemy_Speed_Large: 2, //大型飞机的速度
Enemy_Speed_Middle: 4, //中型飞机的速度
Enemy_Speed_Small: 8 //小型飞机的速度
}
//游戏引擎(对象)
/*
* 开始游戏, 加载游戏, 进入游戏主界面
* 创建敌机, 控制移动我的飞机, 碰撞检测...
*/
var gameEngine = {
//属性ele:是游戏的主界面(游戏区域)
ele: null,
bullets: {}, //保存所有在游戏区域显示的子弹
enemys:{}, //保存所有在游戏区域显示的敌机
isCrashMyPlane: false, //是否碰撞到了我的飞机
scoreNode: null, //分数的节点对象
//方法:
//初始化方法init
init: function(){
this.ele = document.getElementById("main_body");
return this;
},
//开始游戏start
start: function(){
//加载游戏
gameEngine.loading(function(){
//现在已经加载游戏完毕
//现在可以正式游戏了
console.log("开始正式游戏");
//1, 显示我的飞机, 并发射子弹
myPlane.init().fire();
//2, 开启键盘监听
gameEngine.keyListening();
//3, 创建敌机
gameEngine.createEnemy();
//4, 碰撞检测
gameEngine.crashListening();
//5, 显示分数
gameEngine.showScore();
//6, 让背景图移动
gameEngine.move();
});
},
//加载游戏
loading: function(loadCallBack) {
//显示logo
var logo = document.createElement("div");
logo.className = "logo";
gameEngine.ele.appendChild(logo);
//显示加载动画的图片
var load = document.createElement("div");
load.className = "loading";
gameEngine.ele.appendChild(load);
//开始加载动画
var index = 0;
var loadImgs = ["images/loading1.png", "images/loading2.png", "images/loading3.png"];
var timer = setInterval(function(){
//当运动到index==5时, 则游戏加载结束
if (index >= 2) {
clearInterval(timer); //关闭定时器
//移除图片(logo,load)
gameEngine.ele.removeChild(logo);
gameEngine.ele.removeChild(load);
//回调
loadCallBack();
}
else {
//切换图片
index++;
load.style.background = "url(" + loadImgs[index%3] + ") no-repeat";
}
}, 500);
},
//开启键盘监听
keyListening: function(){
var speed = 0; //速度
//监听键盘
window.onkeydown = function(evt){
var oEvent = evt || event;
var keycode = oEvent.keyCode; //键码
console.log(keycode);
//使用键盘按下不松开的持续触发来移动
/*
//向左
if (keycode == 37) {
myPlane.ele.style.left = myPlane.ele.offsetLeft - 10 + "px";
}
//向右
else if (keycode == 39) {
myPlane.ele.style.left = myPlane.ele.offsetLeft + 10 + "px";
}
*/
//向左
if (keycode == 37) {
speed = -10;
}
//向右
else if (keycode == 39){
speed = 10;
}
}
//松开按键
window.onkeyup = function() {
speed = 0;
}
//通过速度speed来匀速移动飞机
setInterval(function(){
var x = myPlane.ele.offsetLeft + speed;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多在右边界的位置
else if (x > gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
}, 30);
},
//创建敌机
createEnemy: function() {
//随机创建大型敌机
setInterval(createBig, 6000);
function createBig() {
var flag = Math.random() > 0.5 ? true : false; //30%的几率创建敌机
if (flag) {
var bigEnemy = new Enemy(Enemy.prototype.Enemy_Type_Large); //创建大型敌机对象
bigEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建中型飞机
setInterval(createMiddle, 1000);
function createMiddle() {
var flag = Math.random() > 0.7 ? true : false; //30%的几率创建敌机
if (flag) {
var middleEnemy = new Enemy(Enemy.prototype.Enemy_Type_Middle); //创建中型敌机对象
middleEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建小型飞机
setInterval(createSmall, 500);
function createSmall() {
var flag = Math.random() > 0.5 ? true : false; //50%的几率创建敌机
if (flag) {
var smallEnemy = new Enemy(Enemy.prototype.Enemy_Type_Small); //创建小型敌机对象
smallEnemy.init().move(); //初始化,并开始向下移动
}
}
},
//碰撞检测
crashListening: function() {
/*
var a = [1,2,3,4,5]
var b = ["d",4,"g","t"]
for (var i=0; i<a.length; i++) {
var m = a[i];
for (var j=0; j<b.length; j++) {
if (m == b[j]) {
}
}
}
*/
//开启定时器, 每隔30毫秒检测是否有碰撞
setInterval(function(){
//遍历所有敌机对象和所有子弹对象, 判断每两个之间是否有碰撞(是否有交集)
for (var i in gameEngine.enemys) { //遍历所有敌机
for (var j in gameEngine.bullets) { //遍历所有子弹
//如果有碰撞
if (isCrash(gameEngine.enemys[i].ele, gameEngine.bullets[j].ele)) {
console.log("检测到碰撞");
//让子弹爆炸, 并从gameEngine.bullets移除该子弹
gameEngine.bullets[j].boom();
delete gameEngine.bullets[j];
//让敌机受到一点伤害
gameEngine.enemys[i].hurt();
}
}
//检测我的飞机是否和敌机碰撞
if (!self.isCrashMyPlane && isCrash(gameEngine.enemys[i].ele, myPlane.ele)) {
self.isCrashMyPlane = true; //将isCrashMyPlane改变成true
//让我的飞机爆炸
myPlane.boom(function(){
console.log("Game Over!");
| .ele.appendChild(this.scoreNode);
},
//让背景图移动
move: function() {
var y = 0;
setInterval(function(){
gameEngine.ele.style.backgroundPositionY = y++ + "px";
}, 30);
}
}
//我的飞机:(对象)
var myPlane = {
//属性ele: 我的飞机div节点
ele: null,
fireInterval: 80, //发射子弹的频率
//方法:
//初始化方法init:
init: function() {
this.ele = document.createElement("div");
this.ele.className = "myplane";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
//位置
var left = (gameEngine.ele.offsetWidth - this.ele.offsetWidth) / 2;
this.ele.style.left = left + "px";
this.ele.style.bottom = 0;
//现在可以开始拖拽飞机了
this.startDrag();
return this;
},
//发射子弹
fire: function(){
//开启定时器, 创建并发射子弹
this.timer = setInterval(function(){
//创建子弹,并让子弹移动
var bullet = new Bullet(); //创建子弹对象
bullet.init().move(); //初始化并发射子弹
}, this.fireInterval);
},
//可以拖拽
startDrag: function() {
//onmousedown
this.ele.onmousedown = function(evt) {
var oEvent = evt || event;
var disX = oEvent.offsetX;
var disY = oEvent.offsetY;
//onmousemove
document.onmousemove = function(evt) {
var oEvent = evt || event;
var x = oEvent.clientX - gameEngine.ele.offsetLeft - disX;
var y = oEvent.clientY - disY;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多显示在右边界的位置
else if (x > gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
myPlane.ele.style.top = y + "px";
}
//onmouseup
document.onmouseup = function(){
document.onmousemove = null;
document.onmouseup = null;
}
}
},
//爆炸
boom: function(callBack) {
clearInterval(this.timer); //关闭定时器, 不发射子弹
var dieImgs = ["images/me_die1.png", "images/me_die2.png", "images/me_die3.png", "images/me_die4.png"]
var index = 0;
var dieTimer = setInterval(function(){
if (index >= dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(myPlane.ele); //移除我的飞机
callBack(); //回调
}
else {
myPlane.ele.style.background = "url(" + dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
| alert("Game Over!");
location.reload();
});
}
}
}, 30);
},
//显示分数
showScore: function() {
this.scoreNode = document.createElement("div");
this.scoreNode.className = "score";
this.scoreNode.innerHTML = "0";
gameEngine | identifier_body |
all.js |
//子弹: 类(构造函数)
function Bullet() {
//属性:
this.ele = document.createElement("div");
//当前子弹所在gameEngine.bullets对象中的id
this.id = parseInt(Math.random()*100000) + "";
//方法:
//初始化方法init
this.init = function() {
this.ele.className = "bullet";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
gameEngine.bullets[this.id] = this; //添加子弹对象到gameEngine.bullets对象中
//console.log(gameEngine.bullets);
//位置
var left = myPlane.ele.offsetLeft + myPlane.ele.offsetWidth/2 - this.ele.offsetWidth/2;
this.ele.style.left = left + "px";
this.ele.style.top = myPlane.ele.offsetTop - this.ele.offsetHeight + "px";
return this;
},
//移动
this.move = function() {
var self = this;
//让子弹向上移动
this.timer = setInterval(function(){
//当子弹超出游戏区域的最上方, 则移除,并关闭定时器
if (self.ele.offsetTop < -18) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
delete gameEngine.bullets[self.id]; //从gameEngine.bullets中移除子弹对象
}
else {
self.ele.style.top = self.ele.offsetTop - 10 + "px";
}
}, 30);
}
//爆炸
this.boom = function() {
//先关闭move中的定时器, 让子弹停止移动
clearInterval(this.timer);
this.ele.className = "bullet-die";
//爆炸动画
var self = this;
var index = 0;
var dieImgs = ["images/die1.png", "images/die2.png"];
var dieTimer = setInterval(function(){
if (index >= 2) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
}
else {
self.ele.style.background = "url("+ dieImgs[index] +") no-repeat";
index++;
}
}, 50);
}
}
/*
* 碰撞检测
*/
function isCrash(obj1, obj2){
if(obj1 && obj2){
var leftSide = obj2.offsetLeft-obj1.offsetWidth/2;
var righ | - obj1.offsetHeight/2;
var downSide = obj2.offsetTop + obj2.offsetHeight + obj1.offsetHeight/2;
var x = obj1.offsetLeft+obj1.offsetWidth/2;
var y = obj1.offsetTop + obj1.offsetHeight/2;
if(x > leftSide && x < rightSide && y > upSide && y < downSide){
return true;
}
}
return false;
}
//敌机: 类(构造函数)
function Enemy(type) {
//属性:
this.ele = document.createElement("div");
this.hp = 0; //血量
this.speed = 0; //速度
this.dieImgs = []; //爆炸时的图片数组
//当前敌机所在gameEngine.enemys对象中的id
this.id = parseInt(Math.random()*100000) + "";
this.score = 0; //分数
//方法:
this.init = function() {
switch(type) {
//大型飞机
case this.Enemy_Type_Large:
this.ele.className = "enemy-large"; //css样式
this.hp = this.Enemy_HP_Large; //血量
this.speed = this.Enemy_Speed_Large; //速度
this.dieImgs = ["images/plane3_die1.png", "images/plane3_die2.png", "images/plane3_die3.png", "images/plane3_die4.png", "images/plane3_die5.png", "images/plane3_die6.png"];
this.score = 30;
break;
//中型飞机
case this.Enemy_Type_Middle:
this.ele.className = "enemy-middle"; //css样式
this.hp = this.Enemy_HP_Middle; //血量
this.speed = this.Enemy_Speed_Middle; //速度
this.dieImgs = ["images/plane2_die1.png", "images/plane2_die2.png", "images/plane2_die3.png", "images/plane2_die4.png"];
this.score = 20;
break;
//小型飞机
case this.Enemy_Type_Small:
this.ele.className = "enemy-small"; //css样式
this.hp = this.Enemy_HP_Small; //血量
this.speed = this.Enemy_Speed_Small; //速度
this.dieImgs = ["images/plane1_die1.png", "images/plane1_die2.png", "images/plane1_die3.png"];
this.score = 10;
break;
}
//添加敌机到游戏界面上
gameEngine.ele.appendChild(this.ele);
gameEngine.enemys[this.id] = this; //添加敌机对象到gameEngine.enemys对象中
//console.log(gameEngine.enemys);
//位置
var left = Math.random() * (gameEngine.ele.offsetWidth - this.ele.offsetWidth);
this.ele.style.left = left + "px";
this.ele.style.top = -this.ele.offsetHeight + "px";
return this;
}
//移动
this.move = function() {
var self = this;
//开启定时器, 让敌机向下移动
this.timer = setInterval(function() {
//如果敌机超出下边界, 则关闭定时器,且移除敌机
if (self.ele.offsetTop > gameEngine.ele.offsetHeight) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //从gameEngine.enemys对象中移除当前敌机对象
}
else {
//移动
self.ele.style.top = self.ele.offsetTop + self.speed + "px";
}
}, 30);
}
//受到一点伤害
this.hurt = function() {
this.hp--; //掉一点血
if (this.hp == 0) { //当血量为0时
this.boom(); //爆炸
//把分数添加
gameEngine.scoreNode.innerHTML = (gameEngine.scoreNode.innerHTML-0) + this.score;
}
}
//爆炸
this.boom = function() {
clearInterval(this.timer); //关闭move中的定时器, 让敌机停止移动
//爆炸动画
var self = this;
var index = 0;
var dieTimer = setInterval(function(){
if (index >= self.dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //将当前的敌机对象从gameEngine.enemys对象中移除
}
else {
self.ele.style.background = "url(" + self.dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
Enemy.prototype = {
Enemy_Type_Large: 1, //表示大型飞机
Enemy_Type_Middle: 2, //表示中型飞机
Enemy_Type_Small: 3, //表示小型飞机
Enemy_HP_Large: 8, //大型飞机的血量
Enemy_HP_Middle: 4, //中型飞机的血量
Enemy_HP_Small: 1, //小型飞机的血量
Enemy_Speed_Large: 2, //大型飞机的速度
Enemy_Speed_Middle: 4, //中型飞机的速度
Enemy_Speed_Small: 8 //小型飞机的速度
}
//游戏引擎(对象)
/*
* 开始游戏, 加载游戏, 进入游戏主界面
* 创建敌机, 控制移动我的飞机, 碰撞检测...
*/
var gameEngine = {
//属性ele:是游戏的主界面(游戏区域)
ele: null,
bullets: {}, //保存所有在游戏区域显示的子弹
enemys:{}, //保存所有在游戏区域显示的敌机
isCrashMyPlane: false, //是否碰撞到了我的飞机
scoreNode: null, //分数的节点对象
//方法:
//初始化方法init
init: function(){
this.ele = document.getElementById("main_body");
return this;
},
//开始游戏start
start: function(){
//加载游戏
gameEngine.loading(function(){
//现在已经加载游戏完毕
//现在可以正式游戏了
console.log("开始正式游戏");
//1, 显示我的飞机, 并发射子弹
myPlane.init().fire();
//2, 开启键盘监听
gameEngine.keyListening();
//3, 创建敌机
gameEngine.createEnemy();
//4, 碰撞检测
gameEngine.crashListening();
//5, 显示分数
gameEngine.showScore();
//6, 让背景图移动
gameEngine.move();
});
},
//加载游戏
loading: function(loadCallBack) {
//显示logo
var logo = document.createElement("div");
logo.className = "logo";
gameEngine.ele.appendChild(logo);
//显示加载动画的图片
var load = document.createElement("div");
load.className = "loading";
gameEngine.ele.appendChild(load);
//开始加载动画
var index = 0;
var loadImgs = ["images/loading1.png", "images/loading2.png", "images/loading3.png"];
var timer = setInterval(function(){
//当运动到index==5时, 则游戏加载结束
if (index >= 2) {
clearInterval(timer); //关闭定时器
//移除图片(logo,load)
gameEngine.ele.removeChild(logo);
gameEngine.ele.removeChild(load);
//回调
loadCallBack();
}
else {
//切换图片
index++;
load.style.background = "url(" + loadImgs[index%3] + ") no-repeat";
}
}, 500);
},
//开启键盘监听
keyListening: function(){
var speed = 0; //速度
//监听键盘
window.onkeydown = function(evt){
var oEvent = evt || event;
var keycode = oEvent.keyCode; //键码
console.log(keycode);
//使用键盘按下不松开的持续触发来移动
/*
//向左
if (keycode == 37) {
myPlane.ele.style.left = myPlane.ele.offsetLeft - 10 + "px";
}
//向右
else if (keycode == 39) {
myPlane.ele.style.left = myPlane.ele.offsetLeft + 10 + "px";
}
*/
//向左
if (keycode == 37) {
speed = -10;
}
//向右
else if (keycode == 39){
speed = 10;
}
}
//松开按键
window.onkeyup = function() {
speed = 0;
}
//通过速度speed来匀速移动飞机
setInterval(function(){
var x = myPlane.ele.offsetLeft + speed;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多在右边界的位置
else if (x > gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
}, 30);
},
//创建敌机
createEnemy: function() {
//随机创建大型敌机
setInterval(createBig, 6000);
function createBig() {
var flag = Math.random() > 0.5 ? true : false; //30%的几率创建敌机
if (flag) {
var bigEnemy = new Enemy(Enemy.prototype.Enemy_Type_Large); //创建大型敌机对象
bigEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建中型飞机
setInterval(createMiddle, 1000);
function createMiddle() {
var flag = Math.random() > 0.7 ? true : false; //30%的几率创建敌机
if (flag) {
var middleEnemy = new Enemy(Enemy.prototype.Enemy_Type_Middle); //创建中型敌机对象
middleEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建小型飞机
setInterval(createSmall, 500);
function createSmall() {
var flag = Math.random() > 0.5 ? true : false; //50%的几率创建敌机
if (flag) {
var smallEnemy = new Enemy(Enemy.prototype.Enemy_Type_Small); //创建小型敌机对象
smallEnemy.init().move(); //初始化,并开始向下移动
}
}
},
//碰撞检测
crashListening: function() {
/*
var a = [1,2,3,4,5]
var b = ["d",4,"g","t"]
for (var i=0; i<a.length; i++) {
var m = a[i];
for (var j=0; j<b.length; j++) {
if (m == b[j]) {
}
}
}
*/
//开启定时器, 每隔30毫秒检测是否有碰撞
setInterval(function(){
//遍历所有敌机对象和所有子弹对象, 判断每两个之间是否有碰撞(是否有交集)
for (var i in gameEngine.enemys) { //遍历所有敌机
for (var j in gameEngine.bullets) { //遍历所有子弹
//如果有碰撞
if (isCrash(gameEngine.enemys[i].ele, gameEngine.bullets[j].ele)) {
console.log("检测到碰撞");
//让子弹爆炸, 并从gameEngine.bullets移除该子弹
gameEngine.bullets[j].boom();
delete gameEngine.bullets[j];
//让敌机受到一点伤害
gameEngine.enemys[i].hurt();
}
}
//检测我的飞机是否和敌机碰撞
if (!self.isCrashMyPlane && isCrash(gameEngine.enemys[i].ele, myPlane.ele)) {
self.isCrashMyPlane = true; //将isCrashMyPlane改变成true
//让我的飞机爆炸
myPlane.boom(function(){
console.log("Game Over!");
alert("Game Over!");
location.reload();
});
}
}
}, 30);
},
//显示分数
showScore: function() {
this.scoreNode = document.createElement("div");
this.scoreNode.className = "score";
this.scoreNode.innerHTML = "0";
gameEngine.ele.appendChild(this.scoreNode);
},
//让背景图移动
move: function() {
var y = 0;
setInterval(function(){
gameEngine.ele.style.backgroundPositionY = y++ + "px";
}, 30);
}
}
//我的飞机:(对象)
var myPlane = {
//属性ele: 我的飞机div节点
ele: null,
fireInterval: 80, //发射子弹的频率
//方法:
//初始化方法init:
init: function() {
this.ele = document.createElement("div");
this.ele.className = "myplane";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
//位置
var left = (gameEngine.ele.offsetWidth - this.ele.offsetWidth) / 2;
this.ele.style.left = left + "px";
this.ele.style.bottom = 0;
//现在可以开始拖拽飞机了
this.startDrag();
return this;
},
//发射子弹
fire: function(){
//开启定时器, 创建并发射子弹
this.timer = setInterval(function(){
//创建子弹,并让子弹移动
var bullet = new Bullet(); //创建子弹对象
bullet.init().move(); //初始化并发射子弹
}, this.fireInterval);
},
//可以拖拽
startDrag: function() {
//onmousedown
this.ele.onmousedown = function(evt) {
var oEvent = evt || event;
var disX = oEvent.offsetX;
var disY = oEvent.offsetY;
//onmousemove
document.onmousemove = function(evt) {
var oEvent = evt || event;
var x = oEvent.clientX - gameEngine.ele.offsetLeft - disX;
var y = oEvent.clientY - disY;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多显示在右边界的位置
else if (x > gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
myPlane.ele.style.top = y + "px";
}
//onmouseup
document.onmouseup = function(){
document.onmousemove = null;
document.onmouseup = null;
}
}
},
//爆炸
boom: function(callBack) {
clearInterval(this.timer); //关闭定时器, 不发射子弹
var dieImgs = ["images/me_die1.png", "images/me_die2.png", "images/me_die3.png", "images/me_die4.png"]
var index = 0;
var dieTimer = setInterval(function(){
if (index >= dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(myPlane.ele); //移除我的飞机
callBack(); //回调
}
else {
myPlane.ele.style.background = "url(" + dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
| tSide = obj2.offsetLeft+obj2.offsetWidth+obj1.offsetWidth/2;
var upSide = obj2.offsetTop | conditional_block |
all.js | //子弹: 类(构造函数)
function Bullet() {
//属性:
this.ele = document.createElement("div");
//当前子弹所在gameEngine.bullets对象中的id
this.id = parseInt(Math.random()*100000) + "";
//方法:
//初始化方法init
this.init = function() {
this.ele.className = "bullet";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
gameEngine.bullets[this.id] = this; //添加子弹对象到gameEngine.bullets对象中
//console.log(gameEngine.bullets);
//位置
var left = myPlane.ele.offsetLeft + myPlane.ele.offsetWidth/2 - this.ele.offsetWidth/2;
this.ele.style.left = left + "px";
this.ele.style.top = myPlane.ele.offsetTop - this.ele.offsetHeight + "px";
return this;
},
//移动
this.move = function() {
var self = this;
//让子弹向上移动
this.timer = setInterval(function(){
//当子弹超出游戏区域的最上方, 则移除,并关闭定时器
if (self.ele.offsetTop < -18) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
delete gameEngine.bullets[self.id]; //从gameEngine.bullets中移除子弹对象
}
else {
self.ele.style.top = self.ele.offsetTop - 10 + "px";
}
}, 30);
}
//爆炸
this.boom = function() {
//先关闭move中的定时器, 让子弹停止移动
clearInterval(this.timer);
this.ele.className = "bullet-die";
//爆炸动画
var self = this;
var index = 0;
var dieImgs = ["images/die1.png", "images/die2.png"];
var dieTimer = setInterval(function(){
if (index >= 2) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
}
else {
self.ele.style.background = "url("+ dieImgs[index] +") no-repeat";
index++;
}
}, 50);
}
}
/*
* 碰撞检测
*/
function isCrash(obj1, obj2){
if(obj1 && obj2){
var leftSide = obj2.offsetLeft-obj1.offsetWidth/2;
var rightSide = obj2.offsetLeft+obj2.offsetWidth+obj1.offsetWidth/2;
var upSide = obj2.offsetTop - obj1.offsetHeight/2;
var downSide = obj2.offsetTop + obj2.offsetHeight + obj1.offsetHeight/2;
var x = obj1.offsetLeft+obj1.offsetWidth/2;
var y = obj1.offsetTop + obj1.offsetHeight/2;
if(x > leftSide && x < rightSide && y > upSide && y < downSide){
return true;
}
}
return false;
}
//敌机: 类(构造函数)
function Enemy(type) {
//属性:
this.ele = document.createElement("div");
this.hp = 0; //血量
this.speed = 0; //速度
this.dieImgs = []; //爆炸时的图片数组
//当前敌机所在gameEngine.enemys对象中的id
this.id = parseInt(Math.random()*100000) + "";
this.score = 0; //分数
//方法:
this.init = function() {
switch(type) {
//大型飞机
case this.Enemy_Type_Large:
this.ele.className = "enemy-large"; //css样式
this.hp = this.Enemy_HP_Large; //血量
this.speed = this.Enemy_Speed_Large; //速度
this.dieImgs = ["images/plane3_die1.png", "images/plane3_die2.png", "images/plane3_die3.png", "images/plane3_die4.png", "images/plane3_die5.png", "images/plane3_die6.png"];
this.score = 30;
break;
//中型飞机
case this.Enemy_Type_Middle:
this.ele.className = "enemy-middle"; //css样式
this.hp = this.Enemy_HP_Middle; //血量
this.speed = this.Enemy_Speed_Middle; //速度
this.dieImgs = ["images/plane2_die1.png", "images/plane2_die2.png", "images/plane2_die3.png", "images/plane2_die4.png"];
this.score = 20;
break;
//小型飞机
case this.Enemy_Type_Small:
this.ele.className = "enemy-small"; //css样式
this.hp = this.Enemy_HP_Small; //血量
this.speed = this.Enemy_Speed_Small; //速度
this.dieImgs = ["images/plane1_die1.png", "images/plane1_die2.png", "images/plane1_die3.png"];
this.score = 10;
break;
}
//添加敌机到游戏界面上
gameEngine.ele.appendChild(this.ele);
gameEngine.enemys[this.id] = this; //添加敌机对象到gameEngine.enemys对象中
//console.log(gameEngine.enemys);
//位置
var left = Math.random() * (gameEngine.ele.offsetWidth - this.ele.offsetWidth);
this.ele.style.left = left + "px";
this.ele.style.top = -this.ele.offsetHeight + "px";
return this;
}
//移动
this.move = function() {
var self = this;
//开启定时器, 让敌机向下移动
this.timer = setInterval(function() {
//如果敌机超出下边界, 则关闭定时器,且移除敌机
if (self.ele.offsetTop > gameEngine.ele.offsetHeight) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //从gameEngine.enemys对象中移除当前敌机对象
}
else {
//移动 | }
}, 30);
}
//受到一点伤害
this.hurt = function() {
this.hp--; //掉一点血
if (this.hp == 0) { //当血量为0时
this.boom(); //爆炸
//把分数添加
gameEngine.scoreNode.innerHTML = (gameEngine.scoreNode.innerHTML-0) + this.score;
}
}
//爆炸
this.boom = function() {
clearInterval(this.timer); //关闭move中的定时器, 让敌机停止移动
//爆炸动画
var self = this;
var index = 0;
var dieTimer = setInterval(function(){
if (index >= self.dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //将当前的敌机对象从gameEngine.enemys对象中移除
}
else {
self.ele.style.background = "url(" + self.dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
Enemy.prototype = {
Enemy_Type_Large: 1, //表示大型飞机
Enemy_Type_Middle: 2, //表示中型飞机
Enemy_Type_Small: 3, //表示小型飞机
Enemy_HP_Large: 8, //大型飞机的血量
Enemy_HP_Middle: 4, //中型飞机的血量
Enemy_HP_Small: 1, //小型飞机的血量
Enemy_Speed_Large: 2, //大型飞机的速度
Enemy_Speed_Middle: 4, //中型飞机的速度
Enemy_Speed_Small: 8 //小型飞机的速度
}
//游戏引擎(对象)
/*
* 开始游戏, 加载游戏, 进入游戏主界面
* 创建敌机, 控制移动我的飞机, 碰撞检测...
*/
var gameEngine = {
//属性ele:是游戏的主界面(游戏区域)
ele: null,
bullets: {}, //保存所有在游戏区域显示的子弹
enemys:{}, //保存所有在游戏区域显示的敌机
isCrashMyPlane: false, //是否碰撞到了我的飞机
scoreNode: null, //分数的节点对象
//方法:
//初始化方法init
init: function(){
this.ele = document.getElementById("main_body");
return this;
},
//开始游戏start
start: function(){
//加载游戏
gameEngine.loading(function(){
//现在已经加载游戏完毕
//现在可以正式游戏了
console.log("开始正式游戏");
//1, 显示我的飞机, 并发射子弹
myPlane.init().fire();
//2, 开启键盘监听
gameEngine.keyListening();
//3, 创建敌机
gameEngine.createEnemy();
//4, 碰撞检测
gameEngine.crashListening();
//5, 显示分数
gameEngine.showScore();
//6, 让背景图移动
gameEngine.move();
});
},
//加载游戏
loading: function(loadCallBack) {
//显示logo
var logo = document.createElement("div");
logo.className = "logo";
gameEngine.ele.appendChild(logo);
//显示加载动画的图片
var load = document.createElement("div");
load.className = "loading";
gameEngine.ele.appendChild(load);
//开始加载动画
var index = 0;
var loadImgs = ["images/loading1.png", "images/loading2.png", "images/loading3.png"];
var timer = setInterval(function(){
//当运动到index==5时, 则游戏加载结束
if (index >= 2) {
clearInterval(timer); //关闭定时器
//移除图片(logo,load)
gameEngine.ele.removeChild(logo);
gameEngine.ele.removeChild(load);
//回调
loadCallBack();
}
else {
//切换图片
index++;
load.style.background = "url(" + loadImgs[index%3] + ") no-repeat";
}
}, 500);
},
//开启键盘监听
keyListening: function(){
var speed = 0; //速度
//监听键盘
window.onkeydown = function(evt){
var oEvent = evt || event;
var keycode = oEvent.keyCode; //键码
console.log(keycode);
//使用键盘按下不松开的持续触发来移动
/*
//向左
if (keycode == 37) {
myPlane.ele.style.left = myPlane.ele.offsetLeft - 10 + "px";
}
//向右
else if (keycode == 39) {
myPlane.ele.style.left = myPlane.ele.offsetLeft + 10 + "px";
}
*/
//向左
if (keycode == 37) {
speed = -10;
}
//向右
else if (keycode == 39){
speed = 10;
}
}
//松开按键
window.onkeyup = function() {
speed = 0;
}
//通过速度speed来匀速移动飞机
setInterval(function(){
var x = myPlane.ele.offsetLeft + speed;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多在右边界的位置
else if (x > gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
}, 30);
},
//创建敌机
createEnemy: function() {
//随机创建大型敌机
setInterval(createBig, 6000);
function createBig() {
var flag = Math.random() > 0.5 ? true : false; //30%的几率创建敌机
if (flag) {
var bigEnemy = new Enemy(Enemy.prototype.Enemy_Type_Large); //创建大型敌机对象
bigEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建中型飞机
setInterval(createMiddle, 1000);
function createMiddle() {
var flag = Math.random() > 0.7 ? true : false; //30%的几率创建敌机
if (flag) {
var middleEnemy = new Enemy(Enemy.prototype.Enemy_Type_Middle); //创建中型敌机对象
middleEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建小型飞机
setInterval(createSmall, 500);
function createSmall() {
var flag = Math.random() > 0.5 ? true : false; //50%的几率创建敌机
if (flag) {
var smallEnemy = new Enemy(Enemy.prototype.Enemy_Type_Small); //创建小型敌机对象
smallEnemy.init().move(); //初始化,并开始向下移动
}
}
},
//碰撞检测
crashListening: function() {
/*
var a = [1,2,3,4,5]
var b = ["d",4,"g","t"]
for (var i=0; i<a.length; i++) {
var m = a[i];
for (var j=0; j<b.length; j++) {
if (m == b[j]) {
}
}
}
*/
//开启定时器, 每隔30毫秒检测是否有碰撞
setInterval(function(){
//遍历所有敌机对象和所有子弹对象, 判断每两个之间是否有碰撞(是否有交集)
for (var i in gameEngine.enemys) { //遍历所有敌机
for (var j in gameEngine.bullets) { //遍历所有子弹
//如果有碰撞
if (isCrash(gameEngine.enemys[i].ele, gameEngine.bullets[j].ele)) {
console.log("检测到碰撞");
//让子弹爆炸, 并从gameEngine.bullets移除该子弹
gameEngine.bullets[j].boom();
delete gameEngine.bullets[j];
//让敌机受到一点伤害
gameEngine.enemys[i].hurt();
}
}
//检测我的飞机是否和敌机碰撞
if (!self.isCrashMyPlane && isCrash(gameEngine.enemys[i].ele, myPlane.ele)) {
self.isCrashMyPlane = true; //将isCrashMyPlane改变成true
//让我的飞机爆炸
myPlane.boom(function(){
console.log("Game Over!");
alert("Game Over!");
location.reload();
});
}
}
}, 30);
},
//显示分数
showScore: function() {
this.scoreNode = document.createElement("div");
this.scoreNode.className = "score";
this.scoreNode.innerHTML = "0";
gameEngine.ele.appendChild(this.scoreNode);
},
//让背景图移动
move: function() {
var y = 0;
setInterval(function(){
gameEngine.ele.style.backgroundPositionY = y++ + "px";
}, 30);
}
}
//我的飞机:(对象)
var myPlane = {
//属性ele: 我的飞机div节点
ele: null,
fireInterval: 80, //发射子弹的频率
//方法:
//初始化方法init:
init: function() {
this.ele = document.createElement("div");
this.ele.className = "myplane";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
//位置
var left = (gameEngine.ele.offsetWidth - this.ele.offsetWidth) / 2;
this.ele.style.left = left + "px";
this.ele.style.bottom = 0;
//现在可以开始拖拽飞机了
this.startDrag();
return this;
},
//发射子弹
fire: function(){
//开启定时器, 创建并发射子弹
this.timer = setInterval(function(){
//创建子弹,并让子弹移动
var bullet = new Bullet(); //创建子弹对象
bullet.init().move(); //初始化并发射子弹
}, this.fireInterval);
},
//可以拖拽
startDrag: function() {
//onmousedown
this.ele.onmousedown = function(evt) {
var oEvent = evt || event;
var disX = oEvent.offsetX;
var disY = oEvent.offsetY;
//onmousemove
document.onmousemove = function(evt) {
var oEvent = evt || event;
var x = oEvent.clientX - gameEngine.ele.offsetLeft - disX;
var y = oEvent.clientY - disY;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多显示在右边界的位置
else if (x > gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
myPlane.ele.style.top = y + "px";
}
//onmouseup
document.onmouseup = function(){
document.onmousemove = null;
document.onmouseup = null;
}
}
},
//爆炸
boom: function(callBack) {
clearInterval(this.timer); //关闭定时器, 不发射子弹
var dieImgs = ["images/me_die1.png", "images/me_die2.png", "images/me_die3.png", "images/me_die4.png"]
var index = 0;
var dieTimer = setInterval(function(){
if (index >= dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(myPlane.ele); //移除我的飞机
callBack(); //回调
}
else {
myPlane.ele.style.background = "url(" + dieImgs[index++] + ") no-repeat";
}
}, 50);
}
} | self.ele.style.top = self.ele.offsetTop + self.speed + "px"; | random_line_split |
all.js |
//子弹: 类(构造函数)
function Bullet() {
//属性:
this.ele = document.createElement("div");
//当前子弹所在gameEngine.bullets对象中的id
this.id = parseInt(Math.random()*100000) + "";
//方法:
//初始化方法init
this.init = function() {
this.ele.className = "bullet";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
gameEngine.bullets[this.id] = this; //添加子弹对象到gameEngine.bullets对象中
//console.log(gameEngine.bullets);
//位置
var left = myPlane.ele.offsetLeft + myPlane.ele.offsetWidth/2 - this.ele.offsetWidth/2;
this.ele.style.left = left + "px";
this.ele.style.top = myPlane.ele.offsetTop - this.ele.offsetHeight + "px";
return this;
},
//移动
this.move = function() {
var self = this;
//让子弹向上移动
this.timer = setInterval(function(){
//当子弹超出游戏区域的最上方, 则移除,并关闭定时器
if (self.ele.offsetTop < -18) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
delete gameEngine.bullets[self.id]; //从gameEngine.bullets中移除子弹对象
}
else {
self.ele.style.top = self.ele.offsetTop - 10 + "px";
}
}, 30);
}
//爆炸
this.boom = function() {
//先关闭move中的定时器, 让子弹停止移动
clearInterval(this.timer);
this.ele.className = "bullet-die";
//爆炸动画
var self = this;
var index = 0;
var dieImgs = ["images/die1.png", "images/die2.png"];
var dieTimer = setInterval(function(){
if (index >= 2) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除子弹
}
else {
self.ele.style.background = "url("+ dieImgs[index] +") no-repeat";
index++;
}
}, 50);
}
}
/*
* 碰撞检测
*/
function isCrash(obj1, obj2){
if(obj1 && obj2){
var leftSide = obj2.offsetLeft-obj1.offsetWidth/2;
var rightSide = obj2.offsetLeft+obj2.offsetWidth+obj1.offsetWidth/2;
var upSide = obj2.offsetTop - obj1.offsetHeight/2;
var downSide = obj2.offsetTop + obj2. | eight + obj1.offsetHeight/2;
var x = obj1.offsetLeft+obj1.offsetWidth/2;
var y = obj1.offsetTop + obj1.offsetHeight/2;
if(x > leftSide && x < rightSide && y > upSide && y < downSide){
return true;
}
}
return false;
}
//敌机: 类(构造函数)
function Enemy(type) {
//属性:
this.ele = document.createElement("div");
this.hp = 0; //血量
this.speed = 0; //速度
this.dieImgs = []; //爆炸时的图片数组
//当前敌机所在gameEngine.enemys对象中的id
this.id = parseInt(Math.random()*100000) + "";
this.score = 0; //分数
//方法:
this.init = function() {
switch(type) {
//大型飞机
case this.Enemy_Type_Large:
this.ele.className = "enemy-large"; //css样式
this.hp = this.Enemy_HP_Large; //血量
this.speed = this.Enemy_Speed_Large; //速度
this.dieImgs = ["images/plane3_die1.png", "images/plane3_die2.png", "images/plane3_die3.png", "images/plane3_die4.png", "images/plane3_die5.png", "images/plane3_die6.png"];
this.score = 30;
break;
//中型飞机
case this.Enemy_Type_Middle:
this.ele.className = "enemy-middle"; //css样式
this.hp = this.Enemy_HP_Middle; //血量
this.speed = this.Enemy_Speed_Middle; //速度
this.dieImgs = ["images/plane2_die1.png", "images/plane2_die2.png", "images/plane2_die3.png", "images/plane2_die4.png"];
this.score = 20;
break;
//小型飞机
case this.Enemy_Type_Small:
this.ele.className = "enemy-small"; //css样式
this.hp = this.Enemy_HP_Small; //血量
this.speed = this.Enemy_Speed_Small; //速度
this.dieImgs = ["images/plane1_die1.png", "images/plane1_die2.png", "images/plane1_die3.png"];
this.score = 10;
break;
}
//添加敌机到游戏界面上
gameEngine.ele.appendChild(this.ele);
gameEngine.enemys[this.id] = this; //添加敌机对象到gameEngine.enemys对象中
//console.log(gameEngine.enemys);
//位置
var left = Math.random() * (gameEngine.ele.offsetWidth - this.ele.offsetWidth);
this.ele.style.left = left + "px";
this.ele.style.top = -this.ele.offsetHeight + "px";
return this;
}
//移动
this.move = function() {
var self = this;
//开启定时器, 让敌机向下移动
this.timer = setInterval(function() {
//如果敌机超出下边界, 则关闭定时器,且移除敌机
if (self.ele.offsetTop > gameEngine.ele.offsetHeight) {
clearInterval(self.timer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //从gameEngine.enemys对象中移除当前敌机对象
}
else {
//移动
self.ele.style.top = self.ele.offsetTop + self.speed + "px";
}
}, 30);
}
//受到一点伤害
this.hurt = function() {
this.hp--; //掉一点血
if (this.hp == 0) { //当血量为0时
this.boom(); //爆炸
//把分数添加
gameEngine.scoreNode.innerHTML = (gameEngine.scoreNode.innerHTML-0) + this.score;
}
}
//爆炸
this.boom = function() {
clearInterval(this.timer); //关闭move中的定时器, 让敌机停止移动
//爆炸动画
var self = this;
var index = 0;
var dieTimer = setInterval(function(){
if (index >= self.dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(self.ele); //移除敌机
delete gameEngine.enemys[self.id]; //将当前的敌机对象从gameEngine.enemys对象中移除
}
else {
self.ele.style.background = "url(" + self.dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
Enemy.prototype = {
Enemy_Type_Large: 1, //表示大型飞机
Enemy_Type_Middle: 2, //表示中型飞机
Enemy_Type_Small: 3, //表示小型飞机
Enemy_HP_Large: 8, //大型飞机的血量
Enemy_HP_Middle: 4, //中型飞机的血量
Enemy_HP_Small: 1, //小型飞机的血量
Enemy_Speed_Large: 2, //大型飞机的速度
Enemy_Speed_Middle: 4, //中型飞机的速度
Enemy_Speed_Small: 8 //小型飞机的速度
}
//游戏引擎(对象)
/*
* 开始游戏, 加载游戏, 进入游戏主界面
* 创建敌机, 控制移动我的飞机, 碰撞检测...
*/
var gameEngine = {
//属性ele:是游戏的主界面(游戏区域)
ele: null,
bullets: {}, //保存所有在游戏区域显示的子弹
enemys:{}, //保存所有在游戏区域显示的敌机
isCrashMyPlane: false, //是否碰撞到了我的飞机
scoreNode: null, //分数的节点对象
//方法:
//初始化方法init
init: function(){
this.ele = document.getElementById("main_body");
return this;
},
//开始游戏start
start: function(){
//加载游戏
gameEngine.loading(function(){
//现在已经加载游戏完毕
//现在可以正式游戏了
console.log("开始正式游戏");
//1, 显示我的飞机, 并发射子弹
myPlane.init().fire();
//2, 开启键盘监听
gameEngine.keyListening();
//3, 创建敌机
gameEngine.createEnemy();
//4, 碰撞检测
gameEngine.crashListening();
//5, 显示分数
gameEngine.showScore();
//6, 让背景图移动
gameEngine.move();
});
},
//加载游戏
loading: function(loadCallBack) {
//显示logo
var logo = document.createElement("div");
logo.className = "logo";
gameEngine.ele.appendChild(logo);
//显示加载动画的图片
var load = document.createElement("div");
load.className = "loading";
gameEngine.ele.appendChild(load);
//开始加载动画
var index = 0;
var loadImgs = ["images/loading1.png", "images/loading2.png", "images/loading3.png"];
var timer = setInterval(function(){
//当运动到index==5时, 则游戏加载结束
if (index >= 2) {
clearInterval(timer); //关闭定时器
//移除图片(logo,load)
gameEngine.ele.removeChild(logo);
gameEngine.ele.removeChild(load);
//回调
loadCallBack();
}
else {
//切换图片
index++;
load.style.background = "url(" + loadImgs[index%3] + ") no-repeat";
}
}, 500);
},
//开启键盘监听
keyListening: function(){
var speed = 0; //速度
//监听键盘
window.onkeydown = function(evt){
var oEvent = evt || event;
var keycode = oEvent.keyCode; //键码
console.log(keycode);
//使用键盘按下不松开的持续触发来移动
/*
//向左
if (keycode == 37) {
myPlane.ele.style.left = myPlane.ele.offsetLeft - 10 + "px";
}
//向右
else if (keycode == 39) {
myPlane.ele.style.left = myPlane.ele.offsetLeft + 10 + "px";
}
*/
//向左
if (keycode == 37) {
speed = -10;
}
//向右
else if (keycode == 39){
speed = 10;
}
}
//松开按键
window.onkeyup = function() {
speed = 0;
}
//通过速度speed来匀速移动飞机
setInterval(function(){
var x = myPlane.ele.offsetLeft + speed;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多在右边界的位置
else if (x > gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth-myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
}, 30);
},
//创建敌机
createEnemy: function() {
//随机创建大型敌机
setInterval(createBig, 6000);
function createBig() {
var flag = Math.random() > 0.5 ? true : false; //30%的几率创建敌机
if (flag) {
var bigEnemy = new Enemy(Enemy.prototype.Enemy_Type_Large); //创建大型敌机对象
bigEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建中型飞机
setInterval(createMiddle, 1000);
function createMiddle() {
var flag = Math.random() > 0.7 ? true : false; //30%的几率创建敌机
if (flag) {
var middleEnemy = new Enemy(Enemy.prototype.Enemy_Type_Middle); //创建中型敌机对象
middleEnemy.init().move(); //初始化,并开始向下移动
}
}
//随机创建小型飞机
setInterval(createSmall, 500);
function createSmall() {
var flag = Math.random() > 0.5 ? true : false; //50%的几率创建敌机
if (flag) {
var smallEnemy = new Enemy(Enemy.prototype.Enemy_Type_Small); //创建小型敌机对象
smallEnemy.init().move(); //初始化,并开始向下移动
}
}
},
//碰撞检测
crashListening: function() {
/*
var a = [1,2,3,4,5]
var b = ["d",4,"g","t"]
for (var i=0; i<a.length; i++) {
var m = a[i];
for (var j=0; j<b.length; j++) {
if (m == b[j]) {
}
}
}
*/
//开启定时器, 每隔30毫秒检测是否有碰撞
setInterval(function(){
//遍历所有敌机对象和所有子弹对象, 判断每两个之间是否有碰撞(是否有交集)
for (var i in gameEngine.enemys) { //遍历所有敌机
for (var j in gameEngine.bullets) { //遍历所有子弹
//如果有碰撞
if (isCrash(gameEngine.enemys[i].ele, gameEngine.bullets[j].ele)) {
console.log("检测到碰撞");
//让子弹爆炸, 并从gameEngine.bullets移除该子弹
gameEngine.bullets[j].boom();
delete gameEngine.bullets[j];
//让敌机受到一点伤害
gameEngine.enemys[i].hurt();
}
}
//检测我的飞机是否和敌机碰撞
if (!self.isCrashMyPlane && isCrash(gameEngine.enemys[i].ele, myPlane.ele)) {
self.isCrashMyPlane = true; //将isCrashMyPlane改变成true
//让我的飞机爆炸
myPlane.boom(function(){
console.log("Game Over!");
alert("Game Over!");
location.reload();
});
}
}
}, 30);
},
//显示分数
showScore: function() {
this.scoreNode = document.createElement("div");
this.scoreNode.className = "score";
this.scoreNode.innerHTML = "0";
gameEngine.ele.appendChild(this.scoreNode);
},
//让背景图移动
move: function() {
var y = 0;
setInterval(function(){
gameEngine.ele.style.backgroundPositionY = y++ + "px";
}, 30);
}
}
//我的飞机:(对象)
var myPlane = {
//属性ele: 我的飞机div节点
ele: null,
fireInterval: 80, //发射子弹的频率
//方法:
//初始化方法init:
init: function() {
this.ele = document.createElement("div");
this.ele.className = "myplane";
gameEngine.ele.appendChild(this.ele); //添加到游戏界面main上
//位置
var left = (gameEngine.ele.offsetWidth - this.ele.offsetWidth) / 2;
this.ele.style.left = left + "px";
this.ele.style.bottom = 0;
//现在可以开始拖拽飞机了
this.startDrag();
return this;
},
//发射子弹
fire: function(){
//开启定时器, 创建并发射子弹
this.timer = setInterval(function(){
//创建子弹,并让子弹移动
var bullet = new Bullet(); //创建子弹对象
bullet.init().move(); //初始化并发射子弹
}, this.fireInterval);
},
//可以拖拽
startDrag: function() {
//onmousedown
this.ele.onmousedown = function(evt) {
var oEvent = evt || event;
var disX = oEvent.offsetX;
var disY = oEvent.offsetY;
//onmousemove
document.onmousemove = function(evt) {
var oEvent = evt || event;
var x = oEvent.clientX - gameEngine.ele.offsetLeft - disX;
var y = oEvent.clientY - disY;
if (x < 0) { //如果超出左边界, 则最多在左边界的位置
x = 0;
}
//如果超出右边界, 则最多显示在右边界的位置
else if (x > gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth) {
x = gameEngine.ele.offsetWidth - myPlane.ele.offsetWidth;
}
myPlane.ele.style.left = x + "px";
myPlane.ele.style.top = y + "px";
}
//onmouseup
document.onmouseup = function(){
document.onmousemove = null;
document.onmouseup = null;
}
}
},
//爆炸
boom: function(callBack) {
clearInterval(this.timer); //关闭定时器, 不发射子弹
var dieImgs = ["images/me_die1.png", "images/me_die2.png", "images/me_die3.png", "images/me_die4.png"]
var index = 0;
var dieTimer = setInterval(function(){
if (index >= dieImgs.length) {
clearInterval(dieTimer); //关闭定时器
gameEngine.ele.removeChild(myPlane.ele); //移除我的飞机
callBack(); //回调
}
else {
myPlane.ele.style.background = "url(" + dieImgs[index++] + ") no-repeat";
}
}, 50);
}
}
| offsetH | identifier_name |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1, ?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" = ?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while !stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1, ?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while !stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v | else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| { "" } | conditional_block |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) |
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1, ?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" = ?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while !stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1, ?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while !stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
} | identifier_body |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct | {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1, ?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" = ?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while !stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1, ?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8);
rng.fill_bytes(&mut value);
while !stop.load(Ordering::Relaxed) {
let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
}
| Database | identifier_name |
main.rs | use random_fast_rng::{FastRng, Random};
use rusqlite::{params, Connection, DropBehavior};
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::{Duration, Instant};
const ITER_SECS: u64 = 5;
const USE_RWLOCK: bool = false;
const SEED_COUNT: usize = 20;
const NEW_ITEM_SIZE: usize = 40 * 1024;
const PRINT_VALUES: bool = false;
/// SQLite's approach to concurrency requires waiting/backing off in case of
/// readers/writers conflict. This sets a max duration before failing.
const DB_TIMEOUT: Duration = Duration::from_secs(6);
struct Database {
conn: rusqlite::Connection,
}
#[derive(Copy, Clone, Debug)]
struct DbOptions {
wal: bool,
shared_cache: bool,
}
impl DbOptions {
fn db_flags(&self) -> rusqlite::OpenFlags {
use rusqlite::OpenFlags;
let mut flags = OpenFlags::empty();
flags.set(OpenFlags::SQLITE_OPEN_CREATE, true);
flags.set(OpenFlags::SQLITE_OPEN_READ_WRITE, true);
flags.set(OpenFlags::SQLITE_OPEN_SHARED_CACHE, self.shared_cache);
flags
}
}
impl Database {
pub fn create<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let path: &Path = path.as_ref();
if path.exists() {
fs::remove_file(path).expect("Could not delete existing database file");
}
let mut db = Self::open(path, options);
db.create_tables(options);
db
}
pub fn open<P: AsRef<Path>>(path: P, options: &DbOptions) -> Self {
let conn = Connection::open_with_flags(path, options.db_flags())
.expect("Could not create SQLite connection");
conn.busy_timeout(DB_TIMEOUT)
.expect("Error setting the database timeout");
Database { conn }
}
fn create_tables(&mut self, options: &DbOptions) {
if options.wal {
self.conn
.pragma_update(None, "journal_mode", &"WAL".to_owned())
.expect("Error applying WAL journal_mode");
}
self.conn
.execute(
r#"
CREATE TABLE "kv" (
"key" INTEGER NOT NULL,
"value" BLOB NOT NULL,
PRIMARY KEY("key")
) WITHOUT ROWID;
"#,
[],
)
.expect("Error creating tables");
}
pub fn seed(&mut self) -> std::io::Result<Vec<u16>> {
let mut transaction = self
.conn
.transaction()
.expect("Could not open DB transaction");
transaction.set_drop_behavior(DropBehavior::Commit);
let mut query = transaction
.prepare(
r#"
INSERT INTO "kv" VALUES (?1, ?2);
"#,
)
.expect("Failed to prepare insert query");
let mut keys = Vec::new();
let mut rng = FastRng::new();
for k in &mut keys {
*k = rng.get_u16();
}
for _ in 0..SEED_COUNT {
let (key, value) = (rng.get_u16(), rng.get_u16());
keys.push(key);
query
.execute(params![key, value])
.expect("Insertion failure seeding database!");
}
Ok(keys)
}
}
fn read_loop(
db: Database,
keys: &[u16],
stop: Arc<AtomicBool>,
rwlock: Arc<RwLock<()>>,
) -> (i32, Vec<i64>) {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
SELECT "value" FROM "kv"
WHERE "key" = ?1
LIMIT 1;"#,
)
.expect("Failed to prepare query statement");
let mut reads = 0;
let mut rng = FastRng::new();
while !stop.load(Ordering::Relaxed) {
let key_index = rng.get_usize() % keys.len();
let key = &keys[key_index as usize];
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.read().expect("Cannot unlock for read!");
}
let value: Result<String, _> = query.query_row(&[key], |result| result.get(0));
reads += 1;
let elapsed = timer.elapsed();
if PRINT_VALUES {
if let Ok(value) = value {
println!("{}: {}", key, value);
}
}
times.push(elapsed.as_nanos() as i64);
}
(reads, times)
}
fn write_loop(db: Database, stop: Arc<AtomicBool>, rwlock: Arc<RwLock<()>>) -> Vec<i64> {
let mut times = Vec::new();
let mut query = db
.conn
.prepare(
r#"
INSERT OR IGNORE INTO "kv" ("key", "value")
VALUES (?1, ?2)
"#,
)
.expect("Failed to prepare update statement");
let mut rng = FastRng::new();
let mut value = Vec::new();
value.resize(NEW_ITEM_SIZE, 0u8); | let key = rng.get_u16();
let timer = Instant::now();
let _guard;
if USE_RWLOCK {
_guard = rwlock.write().expect("Cannot unlock for read!");
}
let rows_updated = query
.execute(params![key, value])
.expect("Failed to issue update query!");
let elapsed = timer.elapsed();
if PRINT_VALUES && rows_updated > 0 {
println!("{} set", key);
}
times.push(elapsed.as_nanos() as i64);
}
times
}
fn average(nums: &[i64]) -> f64 {
let sum: i128 = nums.iter().map(|n| *n as i128).sum();
sum as f64 / (nums.len() as f64)
}
struct PerfRecord {
config: String,
readers: i32,
writers: i32,
reads_per_sec: f64,
writes_per_sec: f64,
read_p95: f64,
read_p99: f64,
read_p999: f64,
write_p95: Option<f64>,
write_p99: Option<f64>,
write_p999: Option<f64>,
}
fn main() {
let mut perf_vec = Vec::new();
for options in [
DbOptions { shared_cache: false, wal: false },
DbOptions { shared_cache: false, wal: true },
// Shared cache w/out wal requires unlock_notify to work
DbOptions { shared_cache: true, wal: false },
DbOptions { shared_cache: true, wal: true },
] {
println!("## {:?}", options);
println!("");
let keys = {
let mut db = Database::create("test.db", &options);
db.seed().expect("Error seeding database!")
};
for writers in 0..4 {
let done = Arc::new(AtomicBool::new(false));
let rwlock = Arc::new(RwLock::new(()));
let options = Arc::new(options);
{
let done = done.clone();
thread::spawn(move || {
thread::sleep(Duration::from_secs(ITER_SECS));
done.store(true, Ordering::Release);
});
}
let db = Database::open("test.db", &options);
let (write_counts_send, write_counts_recv) = mpsc::channel();
for _ in 0..writers {
let done = done.clone();
let sender = write_counts_send.clone();
let rwlock = rwlock.clone();
let options = options.clone();
thread::spawn(move || {
let write_db = Database::open("test.db", &options);
let write_times = write_loop(write_db, done, rwlock);
sender
.send(write_times)
.expect("Could not send write count!");
});
}
drop(write_counts_send);
let (total_reads, mut read_times) = read_loop(db, &keys, done.clone(), rwlock.clone());
read_times.sort();
let mut total_writes = 0;
let mut write_times = Vec::new();
for _ in 0..writers {
let mut writes = write_counts_recv
.recv()
.expect("Failed to receive write counts!");
total_writes += writes.len();
write_times.append(&mut writes);
}
write_times.sort();
println!("{} writers:", writers);
println!("- Read {} values from the database.", read_times.len());
println!("- Wrote {} values to the database.", total_writes);
println!(
"- Mean read time: {:.5} ms",
average(&read_times) / 1000_000f64
);
let p95_nanos = read_times[(0.95 * (read_times.len() as f64)) as usize];
let p95_millis = p95_nanos as f64 / 1000_000f64;
println!("- P95: {} ms", p95_millis);
let p99_nanos = read_times[(0.99 * (read_times.len() as f64)) as usize];
let p99_millis = p99_nanos as f64 / 1000_000f64;
println!("- P99: {} ms", p99_millis);
let p99_9_nanos = read_times[(0.999 * (read_times.len() as f64)) as usize];
let p99_9_millis = p99_9_nanos as f64 / 1000_000f64;
println!("- P99.9: {} ms", p99_9_millis);
println!("");
fn not_str(v: bool) -> &'static str {
if v { "" } else { "!" }
}
perf_vec.push(PerfRecord {
config: format!("{}wal, {}shared_cache", not_str(options.wal), not_str(options.shared_cache)),
readers: 1,
writers,
reads_per_sec: total_reads as f64 / ITER_SECS as f64,
writes_per_sec: total_writes as f64 / ITER_SECS as f64,
read_p95: p95_millis,
read_p99: p99_millis,
read_p999: p99_9_millis,
write_p95: if write_times.len() > 0 { Some(write_times[(0.95 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p99: if write_times.len() > 0 { Some(write_times[(0.99 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
write_p999: if write_times.len() > 0 { Some(write_times[(0.999 * (write_times.len() as f64)) as usize] as f64 / 1000_000f64) } else { None },
});
}
}
fn print_or<T: std::fmt::Display>(v: Option<T>, o: &str) -> String {
v.map(|v| v.to_string())
.unwrap_or(o.to_owned())
}
let title_width = perf_vec.iter().map(|r| r.config.len()).max().unwrap();
println!("---------------------------------");
println!("");
println!("| configuration | readers | writers | reads/sec | writes/sec | read p95 (ms) | read p99 | read p99.9 | write p95 | write p99 | write p99.9 |");
println!("| ------------- | ------- | ------- | --------- | ---------- | ------------- | -------- | ---------- | --------- | --------- | ----------- |");
for row in perf_vec {
println!("| {:w0$} | {:2} | {:2} | {} | {} | {} | {} | {} | {} | {} | {} |",
row.config, row.readers, row.writers, row.reads_per_sec, row.writes_per_sec,
row.read_p95, row.read_p99, row.read_p999,
print_or(row.write_p95, "N/A"), print_or(row.write_p99, "N/A"), print_or(row.write_p999, "N/A"),
w0 = title_width,
);
}
} | rng.fill_bytes(&mut value);
while !stop.load(Ordering::Relaxed) { | random_line_split |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T: 'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T: 'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self
.config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits)
.with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> |
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn default() -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
}
| {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
} | identifier_body |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T: 'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T: 'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self
.config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits)
.with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
}
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn | () -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
}
| default | identifier_name |
lib.rs | //! Extensions for [glutin](https://crates.io/crates/glutin) to initialize & update old school
//! [gfx](https://crates.io/crates/gfx). _An alternative to gfx_window_glutin_.
//!
//! # Example
//! ```no_run
//! type ColorFormat = gfx::format::Srgba8;
//! type DepthFormat = gfx::format::DepthStencil;
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let event_loop = winit::event_loop::EventLoop::new();
//! let window_builder = winit::window::WindowBuilder::new();
//!
//! // Initialise winit window, glutin context & gfx views
//! let old_school_gfx_glutin_ext::Init {
//! // winit window
//! window,
//! // glutin bits
//! gl_config,
//! gl_surface,
//! gl_context,
//! // gfx bits
//! mut device,
//! mut factory,
//! mut color_view,
//! mut depth_view,
//! ..
//! } = old_school_gfx_glutin_ext::window_builder(&event_loop, window_builder)
//! .build::<ColorFormat, DepthFormat>()?;
//!
//! # let new_size = winit::dpi::PhysicalSize::new(1, 1);
//! // Update gfx views, e.g. after a window resize
//! old_school_gfx_glutin_ext::resize_views(new_size, &mut color_view, &mut depth_view);
//! # Ok(()) }
//! ```
use gfx_core::{
format::{ChannelType, DepthFormat, Format, RenderFormat},
handle::{DepthStencilView, RawDepthStencilView, RawRenderTargetView, RenderTargetView},
memory::Typed,
texture,
};
use gfx_device_gl::Resources as R;
use glutin::{
config::{ColorBufferType, ConfigTemplateBuilder},
context::ContextAttributesBuilder,
display::GetGlDisplay,
prelude::{GlConfig, GlDisplay, NotCurrentGlContextSurfaceAccessor},
surface::{SurfaceAttributesBuilder, WindowSurface},
};
use glutin_winit::GlWindow;
use raw_window_handle::HasRawWindowHandle;
use std::{error::Error, ffi::CString};
/// Returns a builder for initialising a winit window, glutin context & gfx views.
pub fn window_builder<T: 'static>(
event_loop: &winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
) -> Builder<'_, T> {
Builder {
event_loop,
winit,
surface_attrs: <_>::default(),
ctx_attrs: <_>::default(),
config_attrs: <_>::default(),
sample_number_pref: <_>::default(),
}
}
/// Builder for initialising a winit window, glutin context & gfx views.
#[derive(Debug, Clone)]
pub struct Builder<'a, T: 'static> {
event_loop: &'a winit::event_loop::EventLoop<T>,
winit: winit::window::WindowBuilder,
surface_attrs: Option<SurfaceAttributesBuilder<WindowSurface>>,
ctx_attrs: ContextAttributesBuilder,
config_attrs: ConfigTemplateBuilder,
sample_number_pref: NumberOfSamples,
}
impl<T> Builder<'_, T> {
/// Configure surface attributes.
///
/// If not called glutin default settings are used.
pub fn surface_attributes(
mut self,
surface_attrs: SurfaceAttributesBuilder<WindowSurface>,
) -> Self {
self.surface_attrs = Some(surface_attrs);
self
}
/// Configure context attributes.
///
/// If not called glutin default settings are used.
pub fn context_attributes(mut self, ctx_attrs: ContextAttributesBuilder) -> Self {
self.ctx_attrs = ctx_attrs;
self
}
/// Configure [`ConfigTemplateBuilder`].
pub fn config_template(mut self, conf: ConfigTemplateBuilder) -> Self {
self.config_attrs = conf;
self
}
/// Configure [`NumberOfSamples`] preference.
///
/// Default `0` / no samples.
pub fn number_of_samples(mut self, pref: impl Into<NumberOfSamples>) -> Self {
self.sample_number_pref = pref.into();
self
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build<Color, Depth>(self) -> Result<Init<Color, Depth>, Box<dyn Error>>
where
Color: RenderFormat,
Depth: DepthFormat,
{
self.build_raw(Color::get_format(), Depth::get_format())
.map(|i| i.into_typed())
}
/// Initialise a winit window, glutin context & gfx views.
pub fn build_raw(
self,
color_format: Format,
depth_format: Format,
) -> Result<RawInit, Box<dyn Error>> {
let Format(color_surface, color_channel) = color_format;
let color_total_bits = color_surface.get_total_bits();
let alpha_bits = color_surface.get_alpha_stencil_bits();
let depth_total_bits = depth_format.0.get_total_bits();
let stencil_bits = depth_format.0.get_alpha_stencil_bits();
let srgb = color_channel == ChannelType::Srgb;
let surface_attrs = self
.surface_attrs
.unwrap_or_else(|| SurfaceAttributesBuilder::new().with_srgb(srgb.then_some(true)));
let config_attrs = self | .with_stencil_size(stencil_bits);
let mut no_suitable_config = false;
let (window, gl_config) = glutin_winit::DisplayBuilder::new()
.with_window_builder(Some(self.winit))
.build(self.event_loop, config_attrs, |configs| {
let mut configs: Vec<_> = configs.collect();
assert!(!configs.is_empty(), "no gl configs?");
let best = self
.sample_number_pref
.find(configs.iter().enumerate().filter(|(_, c)| {
let color_bits = match c.color_buffer_type() {
None => 0,
Some(ColorBufferType::Luminance(s)) => s,
Some(ColorBufferType::Rgb {
r_size,
g_size,
b_size,
}) => r_size + g_size + b_size,
};
(!srgb || c.srgb_capable())
&& color_bits == color_total_bits - alpha_bits
&& c.alpha_size() == alpha_bits
&& c.depth_size() == depth_total_bits - stencil_bits
&& c.stencil_size() == stencil_bits
}));
match best {
Some((idx, _)) => configs.swap_remove(idx),
None => {
no_suitable_config = true;
configs.swap_remove(0)
}
}
})?;
if no_suitable_config {
return Err("no suitable gl config found, color+depth not supported?".into());
}
let window = window.unwrap(); // set in display builder
let raw_window_handle = window.raw_window_handle();
let gl_display = gl_config.display();
let (gl_surface, gl_context) = {
let ctx_attrs = self.ctx_attrs.build(Some(raw_window_handle));
let surface_attrs = window.build_surface_attributes(surface_attrs);
let surface = unsafe { gl_display.create_window_surface(&gl_config, &surface_attrs)? };
let context = unsafe { gl_display.create_context(&gl_config, &ctx_attrs)? }
.make_current(&surface)?;
(surface, context)
};
let (device, factory) =
gfx_device_gl::create(|s| gl_display.get_proc_address(&CString::new(s).unwrap()) as _);
let window_size = window.inner_size();
let tex_dimensions = (
window_size.width as _,
window_size.height as _,
1,
gl_config.num_samples().into(),
);
let (color_view, depth_view) =
gfx_device_gl::create_main_targets_raw(tex_dimensions, color_surface, depth_format.0);
Ok(RawInit {
window,
gl_config,
gl_surface,
gl_context,
device,
factory,
color_view,
depth_view,
})
}
}
/// Initialised winit, glutin & gfx state.
#[non_exhaustive]
pub struct InitState<ColorView, DepthView> {
// winit
pub window: winit::window::Window,
// glutin
pub gl_config: glutin::config::Config,
pub gl_surface: glutin::surface::Surface<WindowSurface>,
pub gl_context: glutin::context::PossiblyCurrentContext,
// gfx
pub device: gfx_device_gl::Device,
pub factory: gfx_device_gl::Factory,
pub color_view: ColorView,
pub depth_view: DepthView,
}
/// "Raw" initialised winit, glutin & gfx state.
pub type RawInit = InitState<RawRenderTargetView<R>, RawDepthStencilView<R>>;
/// Initialised winit, glutin & gfx state.
pub type Init<Color, Depth> = InitState<RenderTargetView<R, Color>, DepthStencilView<R, Depth>>;
impl RawInit {
fn into_typed<Color: RenderFormat, Depth: DepthFormat>(self) -> Init<Color, Depth> {
Init {
window: self.window,
gl_config: self.gl_config,
gl_surface: self.gl_surface,
gl_context: self.gl_context,
device: self.device,
factory: self.factory,
color_view: Typed::new(self.color_view),
depth_view: Typed::new(self.depth_view),
}
}
}
/// Recreate and replace gfx views if the dimensions have changed.
pub fn resize_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &mut RenderTargetView<R, Color>,
depth_view: &mut DepthStencilView<R, Depth>,
) {
if let Some((cv, dv)) = resized_views(new_size, color_view, depth_view) {
*color_view = cv;
*depth_view = dv;
}
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views<Color: RenderFormat, Depth: DepthFormat>(
new_size: winit::dpi::PhysicalSize<u32>,
color_view: &RenderTargetView<R, Color>,
depth_view: &DepthStencilView<R, Depth>,
) -> Option<(RenderTargetView<R, Color>, DepthStencilView<R, Depth>)> {
let old_dimensions = color_view.get_dimensions();
debug_assert_eq!(old_dimensions, depth_view.get_dimensions());
let (cv, dv) = resized_views_raw(
new_size,
old_dimensions,
Color::get_format(),
Depth::get_format(),
)?;
Some((Typed::new(cv), Typed::new(dv)))
}
/// Return new gfx views if the dimensions have changed.
#[must_use]
pub fn resized_views_raw(
new_size: winit::dpi::PhysicalSize<u32>,
old_dimensions: texture::Dimensions,
color_fmt: Format,
ds_fmt: Format,
) -> Option<(RawRenderTargetView<R>, RawDepthStencilView<R>)> {
let new_dimensions = (
new_size.width as _,
new_size.height as _,
old_dimensions.2,
old_dimensions.3,
);
if old_dimensions == new_dimensions {
return None;
}
Some(gfx_device_gl::create_main_targets_raw(
new_dimensions,
color_fmt.0,
ds_fmt.0,
))
}
/// Preference for picking [`glutin::config::GlConfig::num_samples`].
#[derive(Debug, Clone, Copy)]
pub enum NumberOfSamples {
/// Pick a config with the highest number of samples.
Max,
/// Pick a config with a specific number of samples.
///
/// E.g. `Specific(0)` mean no multisamples.
Specific(u8),
}
impl Default for NumberOfSamples {
fn default() -> Self {
Self::Specific(0)
}
}
impl From<u8> for NumberOfSamples {
fn from(val: u8) -> Self {
Self::Specific(val)
}
}
impl NumberOfSamples {
fn find<'a>(
self,
mut configs: impl Iterator<Item = (usize, &'a glutin::config::Config)>,
) -> Option<(usize, &'a glutin::config::Config)> {
match self {
Self::Max => configs.max_by_key(|(_, c)| c.num_samples()),
Self::Specific(n) => configs.find(|(_, c)| c.num_samples() == n),
}
}
} | .config_attrs
.with_alpha_size(alpha_bits)
.with_depth_size(depth_total_bits - stencil_bits) | random_line_split |
__init___.py | # -*- coding: utf-8 -*-
import shutil
import os
import datetime
from config import BaseConfig
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from form import LoginForm, SignUpForm
from flask.ext.mail import Mail, Message
from werkzeug import secure_filename
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from collections import OrderedDict
from flask import url_for, redirect, flash, Flask, render_template, request, \
jsonify
from flask.ext.login import LoginManager, login_user, \
login_required, logout_user, current_user
app = Flask(__name__)
app.config.from_object(BaseConfig)
db = SQLAlchemy(app)
# Import database models with app context
with app.app_context():
from models import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.errorhandler(401)
def unau(e):
return "Acceso no autorizado, favor de iniciar sesión.".decode('utf8'), 401
@login_manager.user_loader
def load_user(user_id):
return User.query.get(unicode(user_id))
@app.route('/inicio/<success>')
@login_required
def inicio(success):
user = current_user
status = False
if user.status == 'Listo':
status = True
files = {'Acta': user.acta, 'Credencial': user.cred, 'Foto': user.foto}
files_status = {'acta': user.status_acta, 'cred': user.status_credencial,
'foto': user.status_foto}
# return str(files2)
return render_template('docs.html', file_uploaded=success, datos=files,
status=status, files_status=files_status)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in BaseConfig.ALLOWED_EXTENSIONS
@app.route('/order', methods=['POST'])
def order():
engine = create_engine("sqlite:///"+os.path.
abspath(os.path.dirname(__file__))+"/app.db")
if "criteria" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where admin=0 order\
by status = '"+request.json["criteria"]+"'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
elif "nombre" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where name LIKE '"+request.json["nombre"]+"%'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
@app.route('/files', methods=['POST'])
@login_required
def files():
# si el usuario es administrador redirecciona (un admin no podra entrar a
# sitios de un usuario comun)
if request.method == 'POST':
if current_user.admin == 1:
return redirect(url_for('admin'))
file_uploaded = False
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
user = current_user
folder = BaseConfig.UPLOAD_FOLDER + "/" + user.email
# Recorro sobre los archivos subidos
if len(request.files.items()):
for key, archivo in request.files.items():
filename = secure_filename(archivo.filename)
if filename != '' and allowed_file(filename):
with engine.connect() as connection:
a = engine.execute("select "+key+" from users where email='"+user.email+"'")
row = a.fetchone()
# Si ya habia subido archivo lo reemplazara
if row[key] != '':
os.remove(folder+"/"+row[key].split('/')[2])
with engine.connect() as connection:
engine.execute("update users set "+key+"='"+'static/'+\
user.email+'/'+filename+"' where email='"+user.email+"'")
file_path = os.path.join(folder, filename)
archivo.save(file_path)
file_uploaded = True
if file_uploaded:
with engine.connect() as connection:
a = engine.execute("select acta, cred, foto from users where email='"+user.email+"'")
row = a.fetchone()
if row[0] != '' and row[1] != '' and row[2] != '':
query = "update users set status='Espera' where email='"+user.email+"'"
else:
query = "update users set status='Enviando' where email='"+user.email+"'"
with engine.connect() as connection:
engine.execute(query)
return redirect(url_for('inicio', success=file_uploaded))
@app.route('/registro', methods=['POST'])
def registro():
sign_form = SignUpForm(prefix="sign_form")
log_form = LoginForm()
if sign_form.validate_on_submit() and request.method == 'POST':
if User.query.filter_by(email=sign_form.correo.data).first():
return "<script type=\"text/javascript\">\
alert(\"El correo que introdujiste ya esta en uso. Utiliza otro correo para continuar.\");\
window.location.href = '/'\
</script>"
u = User()
u.name = str(sign_form.nombre.data).upper()
u.apellidos = str(sign_form.apellidos.data).upper()
u.email = sign_form.correo.data
u.curp = str(sign_form.curp.data).upper()
u.edad = sign_form.edad.data
u.escuela = sign_form.escuela.data
u.ciudad = sign_form.ciudad.data
u.concursos = ", ".join(sign_form.concursos.data)
u.password = sign_form.password.data
u.admin = 0
u.status = 'Registrado'
u.fecha = datetime.datetime.now()
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
if os.path.exists(folder):
shutil.rmtree(folder)
os.mkdir(folder)
os.chmod(folder, 0o777)
db.session.add(u)
db.session.commit()
mensaje = "Has quedado registrado en el portal del concurso regional de física y matemáticas<br>Inicia sesión en el portal para empezar a subir los archivos necesarios. Una vez que hayas subido todos tus documentos el comite organizador se encargara de revisarlos y aprobarlos. En caso de que todo este correcto, recibiras un correo en el transcurso de unos días indicando que haz quedado inscrito al concurso.<br><br>Tus datos de ingreso al portal son:<br><b>Correo: </b>%s<br><b>Contraseña:</b> %s<br><b>Nombre: </b>%s<br><b>Apellidos: </b>%s<br><b>CURP: </b>%s<br><b>Edad: </b>%s<br><b>Escuela: </b>%s<br><b>Ciudad: </b>%s<br><b>Concursos: </b>%s<br><br><p align='center'>Gracias por participar.<br>Atentamente:<br>Universidad de Sonora</p><br><br>Dudas: adrianvo@hotmail.com".decode('utf8') % (u.email, sign_form.password.data, u.name, u.apellidos, u.curp, u.edad, u.escuela, u.ciudad, u.concursos)
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Registro exitoso. Se han enviado tus datos al correo que proporcionaste en el registro.\");\
window.location.href = '/'\
</script>"
return render_template('index.html', form_login=log_form, sign_form=sign_form)
@app.route('/admin', methods=['GET'])
@login_required
def admin(): | if current_user.admin != 1:
return redirect(url_for('index'))
users = User.query.filter_by(admin=0).all()
return render_template('lista.html', usuarios=users, admin=1)
@app.route('/datos/<estudiante>', methods=['GET'])
@login_required
def datos(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
user = User.query.filter_by(email=estudiante).first()
return render_template('estudiante.html', user=user, admin=1)
@app.route('/calificar/<estudiante>', methods=['post'])
@login_required
def calificar(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
if len(request.form.items()) == 0:
return "<script type=\"text/javascript\">\
window.location.href = '/admin'\
</script>"
u = User.query.filter_by(email=estudiante).first()
revisados = []
rechazados = []
aceptados = []
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
for item in request.form.items():
doc = item[0].split('_')[1]
revisados.append(doc.title())
if item[1] == "1":
aceptados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=1 where email ='"+u.email+"'")
else:
rechazados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=3 where email ='"+u.email+"'")
a = engine.execute("select "+doc[:4]+" from users where email='"+u.email+"'")
row = a.fetchone()
if row[0] != '':
os.remove(folder+"/"+row[0].split('/')[2])
engine.execute("update users set "+doc[:4]+"='' where email ='"+u.email+"'")
row = engine.execute("select status_acta, status_credencial, status_foto from users where email='"+u.email+"'")
estados = tuple(row.fetchone())
# return "<script type=\"text/javascript\">\
# alert(\""+str(estados)+"\");\
# window.location.href = '/admin'\
# </script>"
if len(revisados) != 0:
mensaje = "Estimado estudiante, el comité del Concuros Regional de Física y Matemáticas reviso tus documentos: \
"+", ".join(revisados)+" y estas fueron las observaciónes:<br>Documentos aceptados: "+", ".join(aceptados)+"\
<br>Documentos rechazados: "+", ".join(rechazados)+"<br>".decode('utf8')
with engine.connect() as connection:
engine.execute("update users set revisor='"+current_user.email+"' where email ='"+u.email+"'")
if 0 in estados or 3 in estados:
with engine.connect() as connection:
engine.execute("update users set status='Revisado' where email ='"+u.email+"'")
mensaje = mensaje + "Aún tienes documentos pendientes por enviar o rechazados.\
Sube tus documentos para que no te quedes fuera!".decode('utf8')
else:
with engine.connect() as connection:
engine.execute("update users set status='Listo' where email ='"+u.email+"'")
conc = engine.execute("select concursos from users where email ='"+u.email+"'").first()
iniciales = "".join(map(lambda x: x[0], conc.values()[0].split(" ", 2)))
folio = engine.execute("select folio from users").fetchall()[-1].values()[0]
if folio == '':
folio_int = int(folio.split("-")[-1]
engine.execute("update users set folio='CRFYM-"+iniciales+"-10000' where email ='"+u.email+"'")
else:
engine.execute("update users set folio='CRFYM-"+iniciales+"-"+str(folio_int+1)+"' where email ='"+u.email+"'")
mensaje = mensaje + "Has completado el registro al concurso, exito!!"
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Datos revisados. El alumno recibirá un correo con las observaciónes.\");\
window.location.href = '/admin'\
</script>"
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
logout_user()
form_login = LoginForm(prefix="form_login")
sign_form = SignUpForm(prefix="sign_form")
if form_login.validate_on_submit() and request.method == 'POST':
user = User.query.filter_by(email=form_login.email.data).first()
if user is not None and user.verify_password(form_login.password.data):
if user.admin == 1:
login_user(user)
return redirect(request.args.get('next') or url_for('admin'))
login_user(user)
return redirect(request.args.get('next') or url_for('inicio', success=False))
# flash("Correo o contrasena invalido", category='error')
return "<script type=\"text/javascript\">\
alert(\"Correo o contraseña inválido.\");\
window.location.href = '/'\
</script>"
login_inc = False
if form_login.validate_on_submit() == False and request.method == 'POST':
login_inc = True
return render_template('index.html', login_inc=login_inc, form_login=form_login, sign_form=sign_form)
if __name__ == '__main__':
manager.run()
| identifier_name |
|
__init___.py | # -*- coding: utf-8 -*-
import shutil
import os
import datetime
from config import BaseConfig
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from form import LoginForm, SignUpForm
from flask.ext.mail import Mail, Message
from werkzeug import secure_filename
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from collections import OrderedDict
from flask import url_for, redirect, flash, Flask, render_template, request, \
jsonify
from flask.ext.login import LoginManager, login_user, \
login_required, logout_user, current_user
app = Flask(__name__)
app.config.from_object(BaseConfig)
db = SQLAlchemy(app)
# Import database models with app context
with app.app_context():
from models import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.errorhandler(401)
def unau(e):
return "Acceso no autorizado, favor de iniciar sesión.".decode('utf8'), 401
@login_manager.user_loader
def load_user(user_id):
return User.query.get(unicode(user_id))
@app.route('/inicio/<success>')
@login_required
def inicio(success):
user = current_user
status = False
if user.status == 'Listo':
status = True
files = {'Acta': user.acta, 'Credencial': user.cred, 'Foto': user.foto}
files_status = {'acta': user.status_acta, 'cred': user.status_credencial,
'foto': user.status_foto}
# return str(files2)
return render_template('docs.html', file_uploaded=success, datos=files,
status=status, files_status=files_status)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in BaseConfig.ALLOWED_EXTENSIONS
@app.route('/order', methods=['POST'])
def order():
engine = create_engine("sqlite:///"+os.path.
abspath(os.path.dirname(__file__))+"/app.db")
if "criteria" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where admin=0 order\
by status = '"+request.json["criteria"]+"'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
elif "nombre" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where name LIKE '"+request.json["nombre"]+"%'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
@app.route('/files', methods=['POST'])
@login_required
def files():
# si el usuario es administrador redirecciona (un admin no podra entrar a
# sitios de un usuario comun)
if request.method == 'POST':
if current_user.admin == 1:
return redirect(url_for('admin'))
file_uploaded = False
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
user = current_user
folder = BaseConfig.UPLOAD_FOLDER + "/" + user.email
# Recorro sobre los archivos subidos
if len(request.files.items()):
for key, archivo in request.files.items():
filename = secure_filename(archivo.filename)
if filename != '' and allowed_file(filename):
with engine.connect() as connection:
a = engine.execute("select "+key+" from users where email='"+user.email+"'")
row = a.fetchone()
# Si ya habia subido archivo lo reemplazara
if row[key] != '':
os.remove(folder+"/"+row[key].split('/')[2])
with engine.connect() as connection:
engine.execute("update users set "+key+"='"+'static/'+\
user.email+'/'+filename+"' where email='"+user.email+"'")
file_path = os.path.join(folder, filename)
archivo.save(file_path)
file_uploaded = True
if file_uploaded:
with engine.connect() as connection:
a = engine.execute("select acta, cred, foto from users where email='"+user.email+"'")
row = a.fetchone()
if row[0] != '' and row[1] != '' and row[2] != '':
query = "update users set status='Espera' where email='"+user.email+"'"
else:
query = "update users set status='Enviando' where email='"+user.email+"'"
with engine.connect() as connection:
engine.execute(query)
return redirect(url_for('inicio', success=file_uploaded))
@app.route('/registro', methods=['POST'])
def registro():
sign_form = SignUpForm(prefix="sign_form")
log_form = LoginForm()
if sign_form.validate_on_submit() and request.method == 'POST':
if User.query.filter_by(email=sign_form.correo.data).first():
return "<script type=\"text/javascript\">\
alert(\"El correo que introdujiste ya esta en uso. Utiliza otro correo para continuar.\");\
window.location.href = '/'\
</script>"
u = User()
u.name = str(sign_form.nombre.data).upper()
u.apellidos = str(sign_form.apellidos.data).upper()
u.email = sign_form.correo.data
u.curp = str(sign_form.curp.data).upper()
u.edad = sign_form.edad.data
u.escuela = sign_form.escuela.data
u.ciudad = sign_form.ciudad.data
u.concursos = ", ".join(sign_form.concursos.data)
u.password = sign_form.password.data
u.admin = 0
u.status = 'Registrado'
u.fecha = datetime.datetime.now()
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
if os.path.exists(folder):
shutil.rmtree(folder)
os.mkdir(folder)
os.chmod(folder, 0o777)
db.session.add(u)
db.session.commit()
mensaje = "Has quedado registrado en el portal del concurso regional de física y matemáticas<br>Inicia sesión en el portal para empezar a subir los archivos necesarios. Una vez que hayas subido todos tus documentos el comite organizador se encargara de revisarlos y aprobarlos. En caso de que todo este correcto, recibiras un correo en el transcurso de unos días indicando que haz quedado inscrito al concurso.<br><br>Tus datos de ingreso al portal son:<br><b>Correo: </b>%s<br><b>Contraseña:</b> %s<br><b>Nombre: </b>%s<br><b>Apellidos: </b>%s<br><b>CURP: </b>%s<br><b>Edad: </b>%s<br><b>Escuela: </b>%s<br><b>Ciudad: </b>%s<br><b>Concursos: </b>%s<br><br><p align='center'>Gracias por participar.<br>Atentamente:<br>Universidad de Sonora</p><br><br>Dudas: adrianvo@hotmail.com".decode('utf8') % (u.email, sign_form.password.data, u.name, u.apellidos, u.curp, u.edad, u.escuela, u.ciudad, u.concursos)
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Registro exitoso. Se han enviado tus datos al correo que proporcionaste en el registro.\");\
window.location.href = '/'\
</script>"
return render_template('index.html', form_login=log_form, sign_form=sign_form)
@app.route('/admin', methods=['GET'])
@login_required
def admin():
if current_user.admin != 1:
return redirect(url_for('index'))
users = User.query.filter_by(admin=0).all()
return render_template('lista.html', usuarios=users, admin=1)
@app.route('/datos/<estudiante>', methods=['GET'])
@login_required
def datos(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
user = User.query.filter_by(email=estudiante).first()
return render_template('estudiante.html', user=user, admin=1)
@app.route('/calificar/<estudiante>', methods=['post'])
@login_required
def calificar(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
if len(request.form.items()) == 0:
return "<script type=\"text/javascript\">\
window.location.href = '/admin'\
</script>"
u = User.query.filter_by(email=estudiante).first()
revisados = []
rechazados = []
aceptados = []
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
for item in request.form.items(): | if item[1] == "1":
aceptados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=1 where email ='"+u.email+"'")
else:
rechazados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=3 where email ='"+u.email+"'")
a = engine.execute("select "+doc[:4]+" from users where email='"+u.email+"'")
row = a.fetchone()
if row[0] != '':
os.remove(folder+"/"+row[0].split('/')[2])
engine.execute("update users set "+doc[:4]+"='' where email ='"+u.email+"'")
row = engine.execute("select status_acta, status_credencial, status_foto from users where email='"+u.email+"'")
estados = tuple(row.fetchone())
# return "<script type=\"text/javascript\">\
# alert(\""+str(estados)+"\");\
# window.location.href = '/admin'\
# </script>"
if len(revisados) != 0:
mensaje = "Estimado estudiante, el comité del Concuros Regional de Física y Matemáticas reviso tus documentos: \
"+", ".join(revisados)+" y estas fueron las observaciónes:<br>Documentos aceptados: "+", ".join(aceptados)+"\
<br>Documentos rechazados: "+", ".join(rechazados)+"<br>".decode('utf8')
with engine.connect() as connection:
engine.execute("update users set revisor='"+current_user.email+"' where email ='"+u.email+"'")
if 0 in estados or 3 in estados:
with engine.connect() as connection:
engine.execute("update users set status='Revisado' where email ='"+u.email+"'")
mensaje = mensaje + "Aún tienes documentos pendientes por enviar o rechazados.\
Sube tus documentos para que no te quedes fuera!".decode('utf8')
else:
with engine.connect() as connection:
engine.execute("update users set status='Listo' where email ='"+u.email+"'")
conc = engine.execute("select concursos from users where email ='"+u.email+"'").first()
iniciales = "".join(map(lambda x: x[0], conc.values()[0].split(" ", 2)))
folio = engine.execute("select folio from users").fetchall()[-1].values()[0]
if folio == '':
folio_int = int(folio.split("-")[-1]
engine.execute("update users set folio='CRFYM-"+iniciales+"-10000' where email ='"+u.email+"'")
else:
engine.execute("update users set folio='CRFYM-"+iniciales+"-"+str(folio_int+1)+"' where email ='"+u.email+"'")
mensaje = mensaje + "Has completado el registro al concurso, exito!!"
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Datos revisados. El alumno recibirá un correo con las observaciónes.\");\
window.location.href = '/admin'\
</script>"
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
logout_user()
form_login = LoginForm(prefix="form_login")
sign_form = SignUpForm(prefix="sign_form")
if form_login.validate_on_submit() and request.method == 'POST':
user = User.query.filter_by(email=form_login.email.data).first()
if user is not None and user.verify_password(form_login.password.data):
if user.admin == 1:
login_user(user)
return redirect(request.args.get('next') or url_for('admin'))
login_user(user)
return redirect(request.args.get('next') or url_for('inicio', success=False))
# flash("Correo o contrasena invalido", category='error')
return "<script type=\"text/javascript\">\
alert(\"Correo o contraseña inválido.\");\
window.location.href = '/'\
</script>"
login_inc = False
if form_login.validate_on_submit() == False and request.method == 'POST':
login_inc = True
return render_template('index.html', login_inc=login_inc, form_login=form_login, sign_form=sign_form)
if __name__ == '__main__':
manager.run() | doc = item[0].split('_')[1]
revisados.append(doc.title()) | random_line_split |
__init___.py | # -*- coding: utf-8 -*-
import shutil
import os
import datetime
from config import BaseConfig
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from form import LoginForm, SignUpForm
from flask.ext.mail import Mail, Message
from werkzeug import secure_filename
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from collections import OrderedDict
from flask import url_for, redirect, flash, Flask, render_template, request, \
jsonify
from flask.ext.login import LoginManager, login_user, \
login_required, logout_user, current_user
app = Flask(__name__)
app.config.from_object(BaseConfig)
db = SQLAlchemy(app)
# Import database models with app context
with app.app_context():
from models import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.errorhandler(401)
def unau(e):
|
@login_manager.user_loader
def load_user(user_id):
return User.query.get(unicode(user_id))
@app.route('/inicio/<success>')
@login_required
def inicio(success):
user = current_user
status = False
if user.status == 'Listo':
status = True
files = {'Acta': user.acta, 'Credencial': user.cred, 'Foto': user.foto}
files_status = {'acta': user.status_acta, 'cred': user.status_credencial,
'foto': user.status_foto}
# return str(files2)
return render_template('docs.html', file_uploaded=success, datos=files,
status=status, files_status=files_status)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in BaseConfig.ALLOWED_EXTENSIONS
@app.route('/order', methods=['POST'])
def order():
engine = create_engine("sqlite:///"+os.path.
abspath(os.path.dirname(__file__))+"/app.db")
if "criteria" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where admin=0 order\
by status = '"+request.json["criteria"]+"'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
elif "nombre" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where name LIKE '"+request.json["nombre"]+"%'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
@app.route('/files', methods=['POST'])
@login_required
def files():
# si el usuario es administrador redirecciona (un admin no podra entrar a
# sitios de un usuario comun)
if request.method == 'POST':
if current_user.admin == 1:
return redirect(url_for('admin'))
file_uploaded = False
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
user = current_user
folder = BaseConfig.UPLOAD_FOLDER + "/" + user.email
# Recorro sobre los archivos subidos
if len(request.files.items()):
for key, archivo in request.files.items():
filename = secure_filename(archivo.filename)
if filename != '' and allowed_file(filename):
with engine.connect() as connection:
a = engine.execute("select "+key+" from users where email='"+user.email+"'")
row = a.fetchone()
# Si ya habia subido archivo lo reemplazara
if row[key] != '':
os.remove(folder+"/"+row[key].split('/')[2])
with engine.connect() as connection:
engine.execute("update users set "+key+"='"+'static/'+\
user.email+'/'+filename+"' where email='"+user.email+"'")
file_path = os.path.join(folder, filename)
archivo.save(file_path)
file_uploaded = True
if file_uploaded:
with engine.connect() as connection:
a = engine.execute("select acta, cred, foto from users where email='"+user.email+"'")
row = a.fetchone()
if row[0] != '' and row[1] != '' and row[2] != '':
query = "update users set status='Espera' where email='"+user.email+"'"
else:
query = "update users set status='Enviando' where email='"+user.email+"'"
with engine.connect() as connection:
engine.execute(query)
return redirect(url_for('inicio', success=file_uploaded))
@app.route('/registro', methods=['POST'])
def registro():
sign_form = SignUpForm(prefix="sign_form")
log_form = LoginForm()
if sign_form.validate_on_submit() and request.method == 'POST':
if User.query.filter_by(email=sign_form.correo.data).first():
return "<script type=\"text/javascript\">\
alert(\"El correo que introdujiste ya esta en uso. Utiliza otro correo para continuar.\");\
window.location.href = '/'\
</script>"
u = User()
u.name = str(sign_form.nombre.data).upper()
u.apellidos = str(sign_form.apellidos.data).upper()
u.email = sign_form.correo.data
u.curp = str(sign_form.curp.data).upper()
u.edad = sign_form.edad.data
u.escuela = sign_form.escuela.data
u.ciudad = sign_form.ciudad.data
u.concursos = ", ".join(sign_form.concursos.data)
u.password = sign_form.password.data
u.admin = 0
u.status = 'Registrado'
u.fecha = datetime.datetime.now()
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
if os.path.exists(folder):
shutil.rmtree(folder)
os.mkdir(folder)
os.chmod(folder, 0o777)
db.session.add(u)
db.session.commit()
mensaje = "Has quedado registrado en el portal del concurso regional de física y matemáticas<br>Inicia sesión en el portal para empezar a subir los archivos necesarios. Una vez que hayas subido todos tus documentos el comite organizador se encargara de revisarlos y aprobarlos. En caso de que todo este correcto, recibiras un correo en el transcurso de unos días indicando que haz quedado inscrito al concurso.<br><br>Tus datos de ingreso al portal son:<br><b>Correo: </b>%s<br><b>Contraseña:</b> %s<br><b>Nombre: </b>%s<br><b>Apellidos: </b>%s<br><b>CURP: </b>%s<br><b>Edad: </b>%s<br><b>Escuela: </b>%s<br><b>Ciudad: </b>%s<br><b>Concursos: </b>%s<br><br><p align='center'>Gracias por participar.<br>Atentamente:<br>Universidad de Sonora</p><br><br>Dudas: adrianvo@hotmail.com".decode('utf8') % (u.email, sign_form.password.data, u.name, u.apellidos, u.curp, u.edad, u.escuela, u.ciudad, u.concursos)
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Registro exitoso. Se han enviado tus datos al correo que proporcionaste en el registro.\");\
window.location.href = '/'\
</script>"
return render_template('index.html', form_login=log_form, sign_form=sign_form)
@app.route('/admin', methods=['GET'])
@login_required
def admin():
if current_user.admin != 1:
return redirect(url_for('index'))
users = User.query.filter_by(admin=0).all()
return render_template('lista.html', usuarios=users, admin=1)
@app.route('/datos/<estudiante>', methods=['GET'])
@login_required
def datos(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
user = User.query.filter_by(email=estudiante).first()
return render_template('estudiante.html', user=user, admin=1)
@app.route('/calificar/<estudiante>', methods=['post'])
@login_required
def calificar(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
if len(request.form.items()) == 0:
return "<script type=\"text/javascript\">\
window.location.href = '/admin'\
</script>"
u = User.query.filter_by(email=estudiante).first()
revisados = []
rechazados = []
aceptados = []
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
for item in request.form.items():
doc = item[0].split('_')[1]
revisados.append(doc.title())
if item[1] == "1":
aceptados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=1 where email ='"+u.email+"'")
else:
rechazados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=3 where email ='"+u.email+"'")
a = engine.execute("select "+doc[:4]+" from users where email='"+u.email+"'")
row = a.fetchone()
if row[0] != '':
os.remove(folder+"/"+row[0].split('/')[2])
engine.execute("update users set "+doc[:4]+"='' where email ='"+u.email+"'")
row = engine.execute("select status_acta, status_credencial, status_foto from users where email='"+u.email+"'")
estados = tuple(row.fetchone())
# return "<script type=\"text/javascript\">\
# alert(\""+str(estados)+"\");\
# window.location.href = '/admin'\
# </script>"
if len(revisados) != 0:
mensaje = "Estimado estudiante, el comité del Concuros Regional de Física y Matemáticas reviso tus documentos: \
"+", ".join(revisados)+" y estas fueron las observaciónes:<br>Documentos aceptados: "+", ".join(aceptados)+"\
<br>Documentos rechazados: "+", ".join(rechazados)+"<br>".decode('utf8')
with engine.connect() as connection:
engine.execute("update users set revisor='"+current_user.email+"' where email ='"+u.email+"'")
if 0 in estados or 3 in estados:
with engine.connect() as connection:
engine.execute("update users set status='Revisado' where email ='"+u.email+"'")
mensaje = mensaje + "Aún tienes documentos pendientes por enviar o rechazados.\
Sube tus documentos para que no te quedes fuera!".decode('utf8')
else:
with engine.connect() as connection:
engine.execute("update users set status='Listo' where email ='"+u.email+"'")
conc = engine.execute("select concursos from users where email ='"+u.email+"'").first()
iniciales = "".join(map(lambda x: x[0], conc.values()[0].split(" ", 2)))
folio = engine.execute("select folio from users").fetchall()[-1].values()[0]
if folio == '':
folio_int = int(folio.split("-")[-1]
engine.execute("update users set folio='CRFYM-"+iniciales+"-10000' where email ='"+u.email+"'")
else:
engine.execute("update users set folio='CRFYM-"+iniciales+"-"+str(folio_int+1)+"' where email ='"+u.email+"'")
mensaje = mensaje + "Has completado el registro al concurso, exito!!"
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Datos revisados. El alumno recibirá un correo con las observaciónes.\");\
window.location.href = '/admin'\
</script>"
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
logout_user()
form_login = LoginForm(prefix="form_login")
sign_form = SignUpForm(prefix="sign_form")
if form_login.validate_on_submit() and request.method == 'POST':
user = User.query.filter_by(email=form_login.email.data).first()
if user is not None and user.verify_password(form_login.password.data):
if user.admin == 1:
login_user(user)
return redirect(request.args.get('next') or url_for('admin'))
login_user(user)
return redirect(request.args.get('next') or url_for('inicio', success=False))
# flash("Correo o contrasena invalido", category='error')
return "<script type=\"text/javascript\">\
alert(\"Correo o contraseña inválido.\");\
window.location.href = '/'\
</script>"
login_inc = False
if form_login.validate_on_submit() == False and request.method == 'POST':
login_inc = True
return render_template('index.html', login_inc=login_inc, form_login=form_login, sign_form=sign_form)
if __name__ == '__main__':
manager.run()
| return "Acceso no autorizado, favor de iniciar sesión.".decode('utf8'), 401
| identifier_body |
__init___.py | # -*- coding: utf-8 -*-
import shutil
import os
import datetime
from config import BaseConfig
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from form import LoginForm, SignUpForm
from flask.ext.mail import Mail, Message
from werkzeug import secure_filename
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from collections import OrderedDict
from flask import url_for, redirect, flash, Flask, render_template, request, \
jsonify
from flask.ext.login import LoginManager, login_user, \
login_required, logout_user, current_user
app = Flask(__name__)
app.config.from_object(BaseConfig)
db = SQLAlchemy(app)
# Import database models with app context
with app.app_context():
from models import *
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
mail = Mail(app)
login_manager = LoginManager()
login_manager.init_app(app)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.errorhandler(401)
def unau(e):
return "Acceso no autorizado, favor de iniciar sesión.".decode('utf8'), 401
@login_manager.user_loader
def load_user(user_id):
return User.query.get(unicode(user_id))
@app.route('/inicio/<success>')
@login_required
def inicio(success):
user = current_user
status = False
if user.status == 'Listo':
status = True
files = {'Acta': user.acta, 'Credencial': user.cred, 'Foto': user.foto}
files_status = {'acta': user.status_acta, 'cred': user.status_credencial,
'foto': user.status_foto}
# return str(files2)
return render_template('docs.html', file_uploaded=success, datos=files,
status=status, files_status=files_status)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in BaseConfig.ALLOWED_EXTENSIONS
@app.route('/order', methods=['POST'])
def order():
engine = create_engine("sqlite:///"+os.path.
abspath(os.path.dirname(__file__))+"/app.db")
if "criteria" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where admin=0 order\
by status = '"+request.json["criteria"]+"'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
elif "nombre" in request.json:
with engine.connect() as connection:
query = engine.execute("select * from users where name LIKE '"+request.json["nombre"]+"%'").fetchall()
users = {"users": [list(user) for user in query]}
return jsonify(users)
@app.route('/files', methods=['POST'])
@login_required
def files():
# si el usuario es administrador redirecciona (un admin no podra entrar a
# sitios de un usuario comun)
if request.method == 'POST':
if current_user.admin == 1:
return redirect(url_for('admin'))
file_uploaded = False
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
user = current_user
folder = BaseConfig.UPLOAD_FOLDER + "/" + user.email
# Recorro sobre los archivos subidos
if len(request.files.items()):
for key, archivo in request.files.items():
filename = secure_filename(archivo.filename)
if filename != '' and allowed_file(filename):
with engine.connect() as connection:
a = engine.execute("select "+key+" from users where email='"+user.email+"'")
row = a.fetchone()
# Si ya habia subido archivo lo reemplazara
if row[key] != '':
os.remove(folder+"/"+row[key].split('/')[2])
with engine.connect() as connection:
engine.execute("update users set "+key+"='"+'static/'+\
user.email+'/'+filename+"' where email='"+user.email+"'")
file_path = os.path.join(folder, filename)
archivo.save(file_path)
file_uploaded = True
if file_uploaded:
with engine.connect() as connection:
a = engine.execute("select acta, cred, foto from users where email='"+user.email+"'")
row = a.fetchone()
if row[0] != '' and row[1] != '' and row[2] != '':
query = "update users set status='Espera' where email='"+user.email+"'"
else:
query = "update users set status='Enviando' where email='"+user.email+"'"
with engine.connect() as connection:
engine.execute(query)
return redirect(url_for('inicio', success=file_uploaded))
@app.route('/registro', methods=['POST'])
def registro():
sign_form = SignUpForm(prefix="sign_form")
log_form = LoginForm()
if sign_form.validate_on_submit() and request.method == 'POST':
if User.query.filter_by(email=sign_form.correo.data).first():
return "<script type=\"text/javascript\">\
alert(\"El correo que introdujiste ya esta en uso. Utiliza otro correo para continuar.\");\
window.location.href = '/'\
</script>"
u = User()
u.name = str(sign_form.nombre.data).upper()
u.apellidos = str(sign_form.apellidos.data).upper()
u.email = sign_form.correo.data
u.curp = str(sign_form.curp.data).upper()
u.edad = sign_form.edad.data
u.escuela = sign_form.escuela.data
u.ciudad = sign_form.ciudad.data
u.concursos = ", ".join(sign_form.concursos.data)
u.password = sign_form.password.data
u.admin = 0
u.status = 'Registrado'
u.fecha = datetime.datetime.now()
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
if os.path.exists(folder):
shutil.rmtree(folder)
os.mkdir(folder)
os.chmod(folder, 0o777)
db.session.add(u)
db.session.commit()
mensaje = "Has quedado registrado en el portal del concurso regional de física y matemáticas<br>Inicia sesión en el portal para empezar a subir los archivos necesarios. Una vez que hayas subido todos tus documentos el comite organizador se encargara de revisarlos y aprobarlos. En caso de que todo este correcto, recibiras un correo en el transcurso de unos días indicando que haz quedado inscrito al concurso.<br><br>Tus datos de ingreso al portal son:<br><b>Correo: </b>%s<br><b>Contraseña:</b> %s<br><b>Nombre: </b>%s<br><b>Apellidos: </b>%s<br><b>CURP: </b>%s<br><b>Edad: </b>%s<br><b>Escuela: </b>%s<br><b>Ciudad: </b>%s<br><b>Concursos: </b>%s<br><br><p align='center'>Gracias por participar.<br>Atentamente:<br>Universidad de Sonora</p><br><br>Dudas: adrianvo@hotmail.com".decode('utf8') % (u.email, sign_form.password.data, u.name, u.apellidos, u.curp, u.edad, u.escuela, u.ciudad, u.concursos)
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Registro exitoso. Se han enviado tus datos al correo que proporcionaste en el registro.\");\
window.location.href = '/'\
</script>"
return render_template('index.html', form_login=log_form, sign_form=sign_form)
@app.route('/admin', methods=['GET'])
@login_required
def admin():
if current_user.admin != 1:
return redirect(url_for('index'))
users = User.query.filter_by(admin=0).all()
return render_template('lista.html', usuarios=users, admin=1)
@app.route('/datos/<estudiante>', methods=['GET'])
@login_required
def datos(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
user = User.query.filter_by(email=estudiante).first()
return render_template('estudiante.html', user=user, admin=1)
@app.route('/calificar/<estudiante>', methods=['post'])
@login_required
def calificar(estudiante):
if current_user.admin != 1:
return redirect(url_for('index'))
if len(request.form.items()) == 0:
return "<script type=\"text/javascript\">\
window.location.href = '/admin'\
</script>"
u = User.query.filter_by(email=estudiante).first()
revisados = []
rechazados = []
aceptados = []
engine = create_engine("sqlite:///"+os.path.abspath(os.path.dirname(__file__))+"/app.db")
folder = BaseConfig.UPLOAD_FOLDER + "/" + u.email
for item in request.form.items():
doc = it | w = engine.execute("select status_acta, status_credencial, status_foto from users where email='"+u.email+"'")
estados = tuple(row.fetchone())
# return "<script type=\"text/javascript\">\
# alert(\""+str(estados)+"\");\
# window.location.href = '/admin'\
# </script>"
if len(revisados) != 0:
mensaje = "Estimado estudiante, el comité del Concuros Regional de Física y Matemáticas reviso tus documentos: \
"+", ".join(revisados)+" y estas fueron las observaciónes:<br>Documentos aceptados: "+", ".join(aceptados)+"\
<br>Documentos rechazados: "+", ".join(rechazados)+"<br>".decode('utf8')
with engine.connect() as connection:
engine.execute("update users set revisor='"+current_user.email+"' where email ='"+u.email+"'")
if 0 in estados or 3 in estados:
with engine.connect() as connection:
engine.execute("update users set status='Revisado' where email ='"+u.email+"'")
mensaje = mensaje + "Aún tienes documentos pendientes por enviar o rechazados.\
Sube tus documentos para que no te quedes fuera!".decode('utf8')
else:
with engine.connect() as connection:
engine.execute("update users set status='Listo' where email ='"+u.email+"'")
conc = engine.execute("select concursos from users where email ='"+u.email+"'").first()
iniciales = "".join(map(lambda x: x[0], conc.values()[0].split(" ", 2)))
folio = engine.execute("select folio from users").fetchall()[-1].values()[0]
if folio == '':
folio_int = int(folio.split("-")[-1]
engine.execute("update users set folio='CRFYM-"+iniciales+"-10000' where email ='"+u.email+"'")
else:
engine.execute("update users set folio='CRFYM-"+iniciales+"-"+str(folio_int+1)+"' where email ='"+u.email+"'")
mensaje = mensaje + "Has completado el registro al concurso, exito!!"
msg = Message("Registro concurso regional de física y matemáticas".decode('utf8'), sender = "noreply@mat.uson.mx", recipients=[u.email, "adrianvo@hotmail.com"])
msg.html = mensaje
mail.send(msg)
return "<script type=\"text/javascript\">\
alert(\"Datos revisados. El alumno recibirá un correo con las observaciónes.\");\
window.location.href = '/admin'\
</script>"
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
logout_user()
form_login = LoginForm(prefix="form_login")
sign_form = SignUpForm(prefix="sign_form")
if form_login.validate_on_submit() and request.method == 'POST':
user = User.query.filter_by(email=form_login.email.data).first()
if user is not None and user.verify_password(form_login.password.data):
if user.admin == 1:
login_user(user)
return redirect(request.args.get('next') or url_for('admin'))
login_user(user)
return redirect(request.args.get('next') or url_for('inicio', success=False))
# flash("Correo o contrasena invalido", category='error')
return "<script type=\"text/javascript\">\
alert(\"Correo o contraseña inválido.\");\
window.location.href = '/'\
</script>"
login_inc = False
if form_login.validate_on_submit() == False and request.method == 'POST':
login_inc = True
return render_template('index.html', login_inc=login_inc, form_login=form_login, sign_form=sign_form)
if __name__ == '__main__':
manager.run()
| em[0].split('_')[1]
revisados.append(doc.title())
if item[1] == "1":
aceptados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=1 where email ='"+u.email+"'")
else:
rechazados.append(doc)
with engine.connect() as connection:
engine.execute("update users set status_"+doc+"=3 where email ='"+u.email+"'")
a = engine.execute("select "+doc[:4]+" from users where email='"+u.email+"'")
row = a.fetchone()
if row[0] != '':
os.remove(folder+"/"+row[0].split('/')[2])
engine.execute("update users set "+doc[:4]+"='' where email ='"+u.email+"'")
ro | conditional_block |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if !line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if !line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if !line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
| a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() {
s
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/ | let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
} | random_line_split |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if !line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if !line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if !line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() {
s
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn | (&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/
| fmt | identifier_name |
ply_loader.rs | use std::io::{Read, Seek, BufReader, BufRead, SeekFrom};
use std::error;
use std::fmt;
use crate::model::ply::{PlyFileHeader, PlyElementDescriptor, standard_formats, PlyPropertyDescriptor, PlyScalar, PlyDatatype};
use std::str::{SplitAsciiWhitespace, FromStr};
use byteorder::{LittleEndian, ByteOrder};
use num::{self, NumCast};
use std::marker::PhantomData;
pub struct PlyMeshLoader<'r, R: Read + Seek> {
reader: &'r mut R,
// file_header: Option<PlyFileHeader>,
// parse_state: Option<FileParseState>,
}
impl<'r, R: Read + Seek> PlyMeshLoader<'r, R> {
pub fn parse_header(self) -> Result<PlyDataPuller<'r, R>, Box<dyn error::Error>> {
fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
Err(Box::from(PlyError::new(message)))
}
// if let None = self.file_header {
// Make buf reader
let mut buf_reader = BufReader::new(self.reader);
// Read file header
let mut lines = (&mut buf_reader).lines();
let mut element_vec: Vec<PlyElementDescriptor> = Vec::new();
let mut current_element: Option<PlyElementDescriptor> = None;
let mut i = 0;
// let mut k = 0;
'header_loop: loop {
let line = if let Some(l) = lines.next() {
if let Ok(l) = l {
l
}
else {
return Err(Box::from(l.unwrap_err()));
}
}
else {
return ply_err("Header missing required fields or has no 'end_header' line");
};
// // DEBUG:
// println!("DEBUG: line: {}", line);
// if k > 40 {
// break;
// }
// k += 1;
// Ignore comment lines
if line.starts_with("comment") {
continue 'header_loop;
}
// End of header
if line.as_str().eq("end_header") {
break 'header_loop;
}
// Magic number
if i == 0 {
if !line.as_str().eq("ply") {
return ply_err("Header missing ply fingerprint");
}
i = 1;
}
// Format and version
else if i == 1 {
if !line.starts_with("format") {
return ply_err("Header missing ply format line")
}
if !line.as_str().eq("format ascii 1.0") {
return ply_err("Unknown or invalid ply format (only ascii 1.0 is currently supported)");
}
i = 2;
}
// Element descriptor
else if line.starts_with("element") {
// Put previous descriptor into list if we have one
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'element' token
let elem_name = String::from({
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
});
let num_entries = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid element descriptor");
}
let a = a.unwrap();
let a = a.parse::<u32>();
if a.is_err() {
return ply_err("Invalid element descriptor");
}
a.unwrap()
};
// Make new descriptor
let elem_index = element_vec.len() as u32;
current_element = Some(PlyElementDescriptor::new(elem_index, elem_name, num_entries));
}
// Property descriptor
else if line.starts_with("property") {
// Check that we are actually in an element
if let None = current_element {
return ply_err("Misplaced property line outside of element descriptor");
}
// Read element line
let mut split_line = line.split_ascii_whitespace();
let _ = split_line.next(); // Skip 'property' token
let prop_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor");
}
let a = a.unwrap();
if a.eq("list") {
let list_index_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list index type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list index type"),
}
};
let list_data_type = {
let a = split_line.next();
if a.is_none() {
return ply_err("Invalid property descriptor: Cannot read list data type");
}
match PlyScalar::from_str(a.unwrap()) {
Some(s) => s,
None => return ply_err("Invalid property descriptor: Unknown list data type"),
}
};
PlyDatatype::List {
index: list_index_type,
element: list_data_type,
}
}
else {
match PlyScalar::from_str(a) {
Some(s) => PlyDatatype::Scalar(s),
None => return ply_err("Unkown type in property descriptor")
}
}
};
let prop_name = {
let a = split_line.next();
let a = if let Some(a) = a {
String::from(a)
}
else {
return ply_err("Invalid property descriptor: Invalid name");
};
a
};
// Create property descriptor
let property_descriptor = PlyPropertyDescriptor {
name: prop_name,
datatype: prop_type,
};
// Add to current element
current_element.as_mut().unwrap().properties.push(property_descriptor);
}
}
// Put last descriptor into list
if let Some(elem) = current_element.take() {
// elem.recalc_full_element_size();
element_vec.push(elem);
}
// Create file header
let file_header = PlyFileHeader {
format: standard_formats::ASCII_10,
elements: element_vec,
};
// Get back our file at the proper position
let real_seek_pos = buf_reader.seek(SeekFrom::Current(0)).map_err(|_| PlyError::new("Failed to seek file pos after header (this is probably a bug)"))?;
let reader = buf_reader.into_inner();
reader.seek(SeekFrom::Start(real_seek_pos))?;
// Make puller
let puller = PlyDataPuller {
buf_reader: BufReader::new(reader),
file_header,
parse_state: None,
_phantom: PhantomData,
};
return Ok(puller);
// }
// else {
// return ply_err("Cannot parse header more than once");
// }
}
pub fn new(source: &'r mut R) -> PlyMeshLoader<'r, R> {
PlyMeshLoader {
reader: source,
}
}
}
pub struct PlyDataPuller<'r, R: Read + Seek> {
buf_reader: BufReader<&'r mut R>,
file_header: PlyFileHeader,
parse_state: Option<FileParseState>,
_phantom: PhantomData<()>
}
impl<'r, R: Read + Seek> PlyDataPuller<'r, R> {
pub fn next_event<'a>(&'a mut self) -> PullEvent<'a, 'r, R> {
return if self.parse_state.is_none() {
if self.file_header.elements.len() <= 0 {
return PullEvent::End
}
// Create initial parse state
self.parse_state = Some(FileParseState {
current_element_index: 0,
// entries_left: self.file_header.elements.first().unwrap().num_entries,
});
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.first().unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
// If we still have elements left update index
let state = self.parse_state.as_mut().unwrap();
if state.current_element_index < self.file_header.elements.len().saturating_sub(1) as u32 {
state.current_element_index += 1;
let parser = PlyElementParser::new(&mut self.buf_reader, self.file_header.elements.get(state.current_element_index as usize).unwrap(), self.parse_state.as_mut().unwrap());
PullEvent::Element(parser)
}
else {
PullEvent::End
}
}
}
pub fn header(&self) -> &PlyFileHeader {
&self.file_header
}
}
struct FileParseState {
current_element_index: u32
}
pub enum PullEvent<'a, 'r: 'a, R: Read + Seek> {
Element(PlyElementParser<'a, 'r, R>),
End,
}
impl<'a, 'r: 'a, R: Read + Seek> PullEvent<'a, 'r, R> {
}
pub struct PlyElementParser<'a, 'r, R: Read + Seek> {
buf_reader: &'a mut BufReader<&'r mut R>,
// parse_state: &'a mut FileParseState,
element_descriptor: &'a PlyElementDescriptor,
// full_element_size: u32,
entries_left: u32,
}
impl<'a, 'r: 'a, R: Read + Seek> PlyElementParser<'a, 'r, R> {
pub fn read_entry(&mut self, buffer: &mut [u8]) -> Result<(), PlyReadError> {
// fn ply_err<T>(message: &'static str) -> Result<T, Box<dyn error::Error>> {
// Err(Box::from(PlyError::new(message)))
// }
// Return appropriate error if no more lines are left
if self.entries_left <= 0 {
return Err(PlyReadError::NoMoreEntries);
}
// Get initial stream pos so we can rewind later when the given buffer is
// too small.
// NOTE: This discards the internal buffer of the buffered reader so this
// is fcking stupid, but without implementing it myself there is no other way
let initial_stream_pos = match self.buf_reader.seek(SeekFrom::Current(0)) {
Ok(pos) => pos,
Err(err) => return Err(PlyReadError::Other(Box::new(err))),
};
let mut lines = self.buf_reader.lines();
let mut buffer_pos = 0usize;
let mut only_measuring_size = false;
// Get line
let line = lines.next();
let line = if let Some(l) = line {
if let Ok(l) = l {
l
} else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
}
} else {
// return ply_err("Unexpectedly no more lines left")
return Err(PlyReadError::Other(Box::new(PlyError::new("Unexpected line"))));
};
// Split line at whitespace
let mut split_line = line.split_ascii_whitespace();
// Read entry line
for p in &self.element_descriptor.properties {
fn write_value<T: NumCast>(scalar_type: PlyScalar, value: T, data_size: usize, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) {
// Buffer is too small, eventually return a TooSmall error but
// for now only set the flag so we can continue calculating the
// actually needed buffer size
let final_pos = *buffer_pos + data_size;
if buffer.len() < final_pos {
*only_measure = true;
}
if *only_measure {
*buffer_pos += data_size; // Increment anyway so we know what the final needed buffer size is
}
else {
// Get offset buffer slice
let slice = &mut buffer[*buffer_pos..final_pos];
match scalar_type {
S::uchar => slice[0] = num::cast::<_, u8>(value).unwrap(),
S::uint => LittleEndian::write_u32(slice, num::cast::<_, u32>(value).unwrap()),
S::float => LittleEndian::write_f32(slice, num::cast::<_, f32>(value).unwrap()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
// Increment buffer pos
*buffer_pos += data_size;
}
}
fn process_value<T: Copy + FromStr + NumCast>(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<T, PlyReadError> {
let value_str = if let Some(s) = split_line.next() | else {
return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Missing property value"))));
};
let val: T = match value_str.parse::<T>() {
Ok(val) => val,
Err(_err) => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid entry line: Failed to parse value")))),
};
// Write the value into the buffer
write_value::<T>(scalar_type, val, std::mem::size_of::<T>(), buffer, buffer_pos, only_measure);
Ok(val as T)
}
fn process_scalar_uncast(scalar_type: PlyScalar, split_line: &mut SplitAsciiWhitespace, buffer: &mut [u8], buffer_pos: &mut usize, only_measure: &mut bool) -> Result<(), PlyReadError> {
match scalar_type {
S::uchar => process_value::<u8>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::uint => process_value::<u32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
S::float => process_value::<f32>(scalar_type, split_line, buffer, buffer_pos, only_measure).map(|_| ()),
_ => unimplemented!("DEBUG: Datatype not implemented yet"),
}
}
use PlyScalar as S;
match p.datatype {
PlyDatatype::Scalar(scalar) => {
process_scalar_uncast(scalar, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
PlyDatatype::List {index, element} => {
let num_elements = match index {
S::uchar => process_value::<u8>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::ushort => process_value::<u16>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
S::uint => process_value::<u32>(index, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)? as u64,
_ => return Err(PlyReadError::Other(Box::new(PlyError::new("Invalid list index datatype: Only uchar, ushort and uint are valid")))),
};
for _ in 0..num_elements {
process_scalar_uncast(element, &mut split_line, buffer, &mut buffer_pos, &mut only_measuring_size)?;
}
}
}
}
if only_measuring_size {
// Rewind reader
if let Err(e) = self.buf_reader.seek(SeekFrom::Start(initial_stream_pos)) {
return Err(PlyReadError::Other(Box::new(e)));
}
// Return the min buffer size based on the final offset (since we still go over all elements even if the buffer is too small)
Err(PlyReadError::BufferTooSmall {min_buffer_size: buffer_pos})
}
else {
self.entries_left -= 1;
Ok(())
}
}
pub fn element_descriptor(&self) -> &'a PlyElementDescriptor {
self.element_descriptor
}
fn new(reader: &'a mut BufReader<&'r mut R>, element_descriptor: &'a PlyElementDescriptor, _parse_state: &'a mut FileParseState) -> PlyElementParser<'a, 'r, R> {
// // Calc full element size
// let mut full_element_size = 0u32;
// for p in &element_descriptor.properties {
// full_element_size += p.datatype.byte_size();
// }
let entries_left = element_descriptor.num_entries;
PlyElementParser {
buf_reader: reader,
element_descriptor,
// full_element_size,
// parse_state,
entries_left,
}
}
}
//mod generic_byteorder {
// use byteorder::{WriteBytesExt, LittleEndian, ByteOrder};
//
// pub trait GenericByteOrder<E: ByteOrder> {
// fn write_into_slice(self, buffer: &mut [u8]);
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for f32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_f32(buffer, self)
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u8 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// buffer[0] = self
// }
// }
//
// impl<E: ByteOrder> GenericByteOrder<E> for u32 {
// fn write_into_slice(self, buffer: &mut [u8]) {
// E::write_u32(buffer, self)
// }
// }
//}
pub enum PlyReadError {
NoMoreEntries,
BufferTooSmall {
min_buffer_size: usize,
},
Other(Box<dyn error::Error>),
}
impl error::Error for PlyReadError {}
impl fmt::Display for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
use PlyReadError as E;
match self {
E::NoMoreEntries => write!(f, "PlyReadError: No more entries"),
E::BufferTooSmall {min_buffer_size} => write!(f, "PlyReadError: Buffer too small: min size = {}", min_buffer_size),
E::Other(error) => <Box<dyn error::Error> as fmt::Display>::fmt(error, f)
}
}
}
impl fmt::Debug for PlyReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub struct PlyError {
message: &'static str,
}
impl PlyError {
pub fn new(message: &'static str) -> PlyError {
PlyError {
message
}
}
}
impl error::Error for PlyError {}
impl fmt::Display for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "PlyError: {}", self.message)
}
}
impl fmt::Debug for PlyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
<Self as fmt::Display>::fmt(self, f)
}
}
pub fn dump_ply_header(header: &PlyFileHeader) {
for element in &header.elements {
println!("element '{}' {}", element.name, element.num_entries);
for property in &element.properties {
println!(" property '{}' {:?}", property.name, property.datatype)
}
}
}
/*
pub fn test() -> Result<(), Box<dyn error::Error>> {
let mut file = OpenOptions::new().read(true).open(r"C:\Users\Jan\Desktop\Lee Head\Lee Head.ply")?;
let loader = PlyMeshLoader::new(&mut file);
let mut puller = loader.parse_header()?;
dump_ply_header(&puller.file_header);
// let mut puller = RefCell::new(puller);
loop {
// let mut borrowed_puller = puller.borrow_mut();
match puller.next_event() {
PullEvent::Element(mut parser) => {
let mut buffer = [0u8; 32];
let res = parser.read_entry(&mut buffer);
if let Err(PlyReadError::BufferTooSmall {min_buffer_size}) = res {
println!("Buffer too small! (min {})", min_buffer_size);
return Ok(());
}
else if let Ok(_) = res {
let mut pos = 0;
for p in parser.element_descriptor.properties() {
match p.datatype {
PlyDatatype::Scalar(scalar) => {
let final_pos = pos + scalar.byte_size();
match scalar {
PlyScalar::float => {
let val = LittleEndian::read_f32(&buffer[(pos as usize)..(final_pos as usize)]);
println!("f32({})", val);
},
_ => unimplemented!()
}
pos = final_pos;
},
PlyDatatype::List {index, element} => {
}
}
}
}
}
PullEvent::End => break,
}
break;
}
Ok(())
}
*/
| {
s
} | conditional_block |
main.rs | mod xcb_util;
use log::debug;
use crate::xcb_util::{
geometry::*,
window::WindowExt,
};
use std::str;
use anyhow::{
anyhow,
Error,
};
use structopt::StructOpt;
use xcb::{
base as xbase,
randr as xrandr,
xproto,
};
#[derive(StructOpt)]
struct GlobalOptions {}
#[derive(StructOpt)]
struct Fract {
num: f32,
denom: f32,
}
impl Fract {
fn value(&self) -> f32 { self.num / self.denom }
}
impl std::str::FromStr for Fract {
type Err = Error;
fn from_str(s: &str) -> Result<Fract, Error> {
let parts = s.split('/').collect::<Vec<_>>();
Ok(Fract {
num: f32::from_str(parts[0])?,
denom: f32::from_str(parts[1])?,
})
}
}
struct Geometry<'a> {
pub setup: xproto::Setup<'a>,
pub root_win: xproto::Window,
pub root_win_frame: ScreenRect,
pub srs: xrandr::GetScreenResourcesCurrentReply,
pub display_frames: Vec<ScreenRect>,
pub work_areas: Vec<ScreenRect>,
pub active_window: xproto::Window,
pub active_window_frame: ScreenRect,
pub active_window_insets: ScreenInsets,
}
fn get_geometry(conn: &xbase::Connection) -> Result<Geometry, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let root_window_rect = root_window.get_geometry(&conn)?.as_rect();
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let display_frames = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.collect();
debug!("display_frames: {:?}", display_frames);
let gvec: Vec<i32> =
root_window.get_property(&conn, "_NET_WORKAREA", xproto::ATOM_CARDINAL, 8)?;
debug!("gvec: {:?}", gvec);
let work_area = gvec
.as_slice()
.chunks(4)
.map(|slc| {
ScreenRect::new(
ScreenPoint::new(slc[0] as i32, slc[1] as i32),
ScreenSize::new(slc[2] as i32, slc[3] as i32),
)
})
.collect::<Vec<ScreenRect>>();
debug!("Work area: {:?}", work_area);
use xcb_util::geometry::*;
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = active_window.get_geometry(&conn)?.as_rect();
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let insets = active_window.get_property(&conn, "_NET_FRAME_EXTENTS", xproto::ATOM_CARDINAL, 4)?;
let insets = if let [left, right, top, bottom] = insets.as_slice() {
ScreenInsets::new(*top, *right, *bottom, *left)
} else {
ScreenInsets::zero()
};
Ok(Geometry {
setup,
root_win: root_window,
root_win_frame: root_window_rect,
srs,
display_frames,
work_areas: work_area,
active_window,
active_window_frame,
active_window_insets: insets,
})
}
#[derive(StructOpt)]
struct MoveWindowOnOutput {
x: Fract,
y: Fract,
w: Fract,
h: Fract,
}
fn inset_frame_by_struts(conn: &xbase::Connection, mut frame: ScreenRect, root_window: xproto::Window) -> Result<ScreenRect, Error> {
let mut queue = vec![root_window];
while let Some(w) = queue.pop() {
let strut: Vec<i32> =
w.get_property(conn, "_NET_WM_STRUT_PARTIAL", xproto::ATOM_CARDINAL, 12)?;
if !strut.is_empty() {
#[derive(Debug)]
struct Strut {
left: i32,
right: i32,
top: i32,
bottom: i32,
left_start_y: i32,
left_end_y: i32,
right_start_y: i32,
right_end_y: i32,
top_start_x: i32,
top_end_x: i32,
bottom_start_x: i32,
bottom_end_x: i32,
}
let strut = Strut {
left: strut[0],
right: strut[1],
top: strut[2],
bottom: strut[3],
left_start_y: strut[4],
left_end_y: strut[5],
right_start_y: strut[6],
right_end_y: strut[7],
top_start_x: strut[8],
top_end_x: strut[9],
bottom_start_x: strut[10],
bottom_end_x: strut[11],
};
// TODO:
// - Check if the strut-lines (NOT the whole rect) are contained within the
// target display frame
// - IF so, adjust the display frame
if strut.top > frame.origin.y
&& strut.top < frame.origin.y + frame.size.height
&& strut.top_start_x >= frame.origin.x
&& strut.top_end_x <= frame.origin.x + frame.size.width
{
let overlap = strut.top - frame.origin.y;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.y += overlap;
frame.size.height -= overlap;
}
if strut.left > frame.origin.x
&& strut.left < frame.origin.x + frame.size.width
&& strut.left_start_y >= frame.origin.y
&& strut.left_end_y <= frame.origin.y + frame.size.height
{
let overlap = strut.left - frame.origin.x;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.x += overlap;
frame.size.width -= overlap;
}
if strut.bottom < frame.origin.y + frame.size.height
&& strut.bottom > frame.origin.y
&& strut.bottom_start_x >= frame.origin.x
&& strut.bottom_end_x <= frame.origin.x + frame.size.width
{
let overlap = frame.origin.y + frame.size.height - strut.bottom;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.height -= overlap;
}
if strut.right < frame.origin.x + frame.size.width
&& strut.right > frame.origin.x
&& strut.right_start_y >= frame.origin.y
&& strut.right_end_y <= frame.origin.y + frame.size.height
{
let overlap = frame.origin.x + frame.size.width - strut.left;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.width -= overlap;
}
}
let mut children = xproto::query_tree(conn, w).get_reply()?.children().to_vec();
queue.append(&mut children);
}
Ok(frame)
}
// TODO (alaroldai):
// Compute "output dimensions" by:
// - Getting the rects of connected outputs
// - Finding all windows that set the _NET_STRUT_PARTIAL
// - FOR EACH, Inset the rect of the containing output if necessary
// - Return the inset outputs.
fn get_output_available_rect(conn: &xbase::Connection) -> Result<ScreenRect, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = dbg!(active_window.get_geometry(&conn)?.as_rect());
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let mut display_frame = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&active_window_frame);
debug!(
"{}: {} intersection with {}",
frame,
if new.is_some() { "Some" } else { "No" },
active_window_frame
);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(frame),
(Some(_), None) => Some(frame),
_ => init,
}
})
.unwrap();
display_frame = inset_frame_by_struts(conn, display_frame, root_window)?;
Ok(display_frame)
}
impl MoveWindowOnOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?; | let pct = DisplayPercentageSpaceRect::new(
DisplayPercentageSpacePoint::new(self.x.value(), self.y.value()),
DisplayPercentageSpaceSize::new(self.w.value(), self.h.value()),
);
let new_rect = pct
.to_rect(display_frame)
.inner_rect(geom.active_window_insets);
dbg!(&new_rect);
// NOTE: Some window managers (Kwin and XFWM, for example) may refuse to
// position windows as requested if they are in a "tiled" or "maximised"
// state. In the case of Kwin, this can be fixed by using a window rule to
// force the "ignore requested geometry" flag to `false`.
geom
.root_win
.move_resize(&conn, geom.active_window, new_rect)?;
Ok(())
}
}
#[derive(StructOpt)]
enum Direction {
North,
South,
East,
West,
}
impl std::str::FromStr for Direction {
type Err = Error;
fn from_str(s: &str) -> Result<Direction, Error> {
match s {
"h" => Ok(Direction::West),
"j" => Ok(Direction::South),
"k" => Ok(Direction::North),
"l" => Ok(Direction::East),
_ => Err(anyhow!("Not a known direction - use hjkl")),
}
}
}
#[derive(StructOpt)]
struct MoveWindowToOutput {
direction: Direction,
}
impl MoveWindowToOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let mut geom = get_geometry(&conn)?;
let (x, y) = match self.direction {
Direction::West => (-1.0, 0.0),
Direction::South => (0.0, 1.0),
Direction::North => (0.0, -1.0),
Direction::East => (1.0, 0.0),
};
let direction: euclid::Vector2D<f32, ScreenSpace> = euclid::Vector2D::new(x as f32, y as f32);
let current_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&geom.active_window_frame);
println!("Found intersection: {:#?}", new);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(*frame),
(Some(_), None) => Some(*frame),
_ => init,
}
})
.and_then(|frame| inset_frame_by_struts(&conn, frame, geom.root_win).ok())
.unwrap();
let new_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let vec: euclid::Vector2D<f32, ScreenSpace> =
(frame.center() - current_output_frame.center()).cast::<f32>();
let old: Option<euclid::Vector2D<f32, ScreenSpace>> =
init.map(|init| (init.center() - current_output_frame.center()).cast::<f32>());
let projection = vec.dot(direction);
match old {
None if projection > 0.0 => {
println!(
"Starting with output {:#?} / projection {:#?}",
frame, projection
);
Some(*frame)
}
Some(old) if projection < old.dot(direction) && projection > 0.0 => {
println!(
"Replacing projection {} ({}) with {} ({})",
init.unwrap(),
old.dot(direction),
frame,
projection
);
Some(*frame)
}
_ => {
println!(
"Ignoring output {:#?} with projection {:#?}",
frame, projection
);
init
}
}
})
.unwrap();
let new_output_frame = inset_frame_by_struts(&conn, new_output_frame, geom.root_win)?;
dbg!(&geom.active_window_frame);
// geom.active_window_frame = geom.active_window_frame.inner_rect(geom.active_window_insets);
dbg!(&geom.active_window_insets);
dbg!(¤t_output_frame);
dbg!(&new_output_frame);
let decorated_source_frame = geom.active_window_frame.outer_rect(geom.active_window_insets);
let pct_rect = decorated_source_frame.as_dps(current_output_frame);
dbg!(&pct_rect);
let decorated_dest_frame = pct_rect.to_rect(new_output_frame);
let bare_dest_frame = decorated_dest_frame.inner_rect(geom.active_window_insets);
dbg!(&bare_dest_frame);
geom
.root_win
.move_resize(&conn, geom.active_window, bare_dest_frame)
}
}
fn main() -> Result<(), Error> {
env_logger::init();
#[derive(StructOpt)]
enum Action {
MoveWindowOnOutput(MoveWindowOnOutput),
MoveWindowToOutput(MoveWindowToOutput),
}
#[derive(StructOpt)]
struct App {
#[structopt(flatten)]
options: GlobalOptions,
#[structopt(subcommand)]
action: Action,
}
impl App {
fn run(self) -> Result<(), Error> {
match self.action {
Action::MoveWindowOnOutput(opts) => opts.run(self.options),
Action::MoveWindowToOutput(opts) => opts.run(self.options),
}
}
}
App::from_args().run()
} | let display_frame = get_output_available_rect(&conn)?;
let geom = get_geometry(&conn)?;
| random_line_split |
main.rs | mod xcb_util;
use log::debug;
use crate::xcb_util::{
geometry::*,
window::WindowExt,
};
use std::str;
use anyhow::{
anyhow,
Error,
};
use structopt::StructOpt;
use xcb::{
base as xbase,
randr as xrandr,
xproto,
};
#[derive(StructOpt)]
struct GlobalOptions {}
#[derive(StructOpt)]
struct | {
num: f32,
denom: f32,
}
impl Fract {
fn value(&self) -> f32 { self.num / self.denom }
}
impl std::str::FromStr for Fract {
type Err = Error;
fn from_str(s: &str) -> Result<Fract, Error> {
let parts = s.split('/').collect::<Vec<_>>();
Ok(Fract {
num: f32::from_str(parts[0])?,
denom: f32::from_str(parts[1])?,
})
}
}
struct Geometry<'a> {
pub setup: xproto::Setup<'a>,
pub root_win: xproto::Window,
pub root_win_frame: ScreenRect,
pub srs: xrandr::GetScreenResourcesCurrentReply,
pub display_frames: Vec<ScreenRect>,
pub work_areas: Vec<ScreenRect>,
pub active_window: xproto::Window,
pub active_window_frame: ScreenRect,
pub active_window_insets: ScreenInsets,
}
fn get_geometry(conn: &xbase::Connection) -> Result<Geometry, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let root_window_rect = root_window.get_geometry(&conn)?.as_rect();
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let display_frames = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.collect();
debug!("display_frames: {:?}", display_frames);
let gvec: Vec<i32> =
root_window.get_property(&conn, "_NET_WORKAREA", xproto::ATOM_CARDINAL, 8)?;
debug!("gvec: {:?}", gvec);
let work_area = gvec
.as_slice()
.chunks(4)
.map(|slc| {
ScreenRect::new(
ScreenPoint::new(slc[0] as i32, slc[1] as i32),
ScreenSize::new(slc[2] as i32, slc[3] as i32),
)
})
.collect::<Vec<ScreenRect>>();
debug!("Work area: {:?}", work_area);
use xcb_util::geometry::*;
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = active_window.get_geometry(&conn)?.as_rect();
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let insets = active_window.get_property(&conn, "_NET_FRAME_EXTENTS", xproto::ATOM_CARDINAL, 4)?;
let insets = if let [left, right, top, bottom] = insets.as_slice() {
ScreenInsets::new(*top, *right, *bottom, *left)
} else {
ScreenInsets::zero()
};
Ok(Geometry {
setup,
root_win: root_window,
root_win_frame: root_window_rect,
srs,
display_frames,
work_areas: work_area,
active_window,
active_window_frame,
active_window_insets: insets,
})
}
#[derive(StructOpt)]
struct MoveWindowOnOutput {
x: Fract,
y: Fract,
w: Fract,
h: Fract,
}
fn inset_frame_by_struts(conn: &xbase::Connection, mut frame: ScreenRect, root_window: xproto::Window) -> Result<ScreenRect, Error> {
let mut queue = vec![root_window];
while let Some(w) = queue.pop() {
let strut: Vec<i32> =
w.get_property(conn, "_NET_WM_STRUT_PARTIAL", xproto::ATOM_CARDINAL, 12)?;
if !strut.is_empty() {
#[derive(Debug)]
struct Strut {
left: i32,
right: i32,
top: i32,
bottom: i32,
left_start_y: i32,
left_end_y: i32,
right_start_y: i32,
right_end_y: i32,
top_start_x: i32,
top_end_x: i32,
bottom_start_x: i32,
bottom_end_x: i32,
}
let strut = Strut {
left: strut[0],
right: strut[1],
top: strut[2],
bottom: strut[3],
left_start_y: strut[4],
left_end_y: strut[5],
right_start_y: strut[6],
right_end_y: strut[7],
top_start_x: strut[8],
top_end_x: strut[9],
bottom_start_x: strut[10],
bottom_end_x: strut[11],
};
// TODO:
// - Check if the strut-lines (NOT the whole rect) are contained within the
// target display frame
// - IF so, adjust the display frame
if strut.top > frame.origin.y
&& strut.top < frame.origin.y + frame.size.height
&& strut.top_start_x >= frame.origin.x
&& strut.top_end_x <= frame.origin.x + frame.size.width
{
let overlap = strut.top - frame.origin.y;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.y += overlap;
frame.size.height -= overlap;
}
if strut.left > frame.origin.x
&& strut.left < frame.origin.x + frame.size.width
&& strut.left_start_y >= frame.origin.y
&& strut.left_end_y <= frame.origin.y + frame.size.height
{
let overlap = strut.left - frame.origin.x;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.origin.x += overlap;
frame.size.width -= overlap;
}
if strut.bottom < frame.origin.y + frame.size.height
&& strut.bottom > frame.origin.y
&& strut.bottom_start_x >= frame.origin.x
&& strut.bottom_end_x <= frame.origin.x + frame.size.width
{
let overlap = frame.origin.y + frame.size.height - strut.bottom;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.height -= overlap;
}
if strut.right < frame.origin.x + frame.size.width
&& strut.right > frame.origin.x
&& strut.right_start_y >= frame.origin.y
&& strut.right_end_y <= frame.origin.y + frame.size.height
{
let overlap = frame.origin.x + frame.size.width - strut.left;
debug!("Found strut (overlap: {}): {:#?}", overlap, strut);
frame.size.width -= overlap;
}
}
let mut children = xproto::query_tree(conn, w).get_reply()?.children().to_vec();
queue.append(&mut children);
}
Ok(frame)
}
// TODO (alaroldai):
// Compute "output dimensions" by:
// - Getting the rects of connected outputs
// - Finding all windows that set the _NET_STRUT_PARTIAL
// - FOR EACH, Inset the rect of the containing output if necessary
// - Return the inset outputs.
fn get_output_available_rect(conn: &xbase::Connection) -> Result<ScreenRect, Error> {
let setup = conn.get_setup();
let screen = setup
.roots()
.next()
.ok_or_else(|| anyhow!("Couldn't unwrap screen 0"))?;
let root_window = screen.root();
let active_window: xproto::Window =
root_window.get_property(&conn, "_NET_ACTIVE_WINDOW", xproto::ATOM_WINDOW, 1)?[0];
let mut active_window_frame = dbg!(active_window.get_geometry(&conn)?.as_rect());
let translated =
xproto::translate_coordinates(&conn, active_window, root_window, 0, 0).get_reply()?;
active_window_frame.origin.x = translated.dst_x() as i32;
active_window_frame.origin.y = translated.dst_y() as i32;
let srs = root_window.get_screen_resources_current(&conn)?;
let timestamp = srs.config_timestamp();
let mut display_frame = srs
.outputs()
.iter()
.filter_map(|o| {
let info = xrandr::get_output_info(&conn, *o, timestamp)
.get_reply()
.ok()?;
match info.connection() as u32 {
xrandr::CONNECTION_CONNECTED => {
let crtc = xrandr::get_crtc_info(&conn, info.crtc(), timestamp)
.get_reply()
.ok()?;
Some(crtc.as_rect())
}
_ => None,
}
})
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&active_window_frame);
debug!(
"{}: {} intersection with {}",
frame,
if new.is_some() { "Some" } else { "No" },
active_window_frame
);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(frame),
(Some(_), None) => Some(frame),
_ => init,
}
})
.unwrap();
display_frame = inset_frame_by_struts(conn, display_frame, root_window)?;
Ok(display_frame)
}
impl MoveWindowOnOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let display_frame = get_output_available_rect(&conn)?;
let geom = get_geometry(&conn)?;
let pct = DisplayPercentageSpaceRect::new(
DisplayPercentageSpacePoint::new(self.x.value(), self.y.value()),
DisplayPercentageSpaceSize::new(self.w.value(), self.h.value()),
);
let new_rect = pct
.to_rect(display_frame)
.inner_rect(geom.active_window_insets);
dbg!(&new_rect);
// NOTE: Some window managers (Kwin and XFWM, for example) may refuse to
// position windows as requested if they are in a "tiled" or "maximised"
// state. In the case of Kwin, this can be fixed by using a window rule to
// force the "ignore requested geometry" flag to `false`.
geom
.root_win
.move_resize(&conn, geom.active_window, new_rect)?;
Ok(())
}
}
#[derive(StructOpt)]
enum Direction {
North,
South,
East,
West,
}
impl std::str::FromStr for Direction {
type Err = Error;
fn from_str(s: &str) -> Result<Direction, Error> {
match s {
"h" => Ok(Direction::West),
"j" => Ok(Direction::South),
"k" => Ok(Direction::North),
"l" => Ok(Direction::East),
_ => Err(anyhow!("Not a known direction - use hjkl")),
}
}
}
#[derive(StructOpt)]
struct MoveWindowToOutput {
direction: Direction,
}
impl MoveWindowToOutput {
fn run(self, _: GlobalOptions) -> Result<(), Error> {
let (conn, _) = xbase::Connection::connect(None)?;
let mut geom = get_geometry(&conn)?;
let (x, y) = match self.direction {
Direction::West => (-1.0, 0.0),
Direction::South => (0.0, 1.0),
Direction::North => (0.0, -1.0),
Direction::East => (1.0, 0.0),
};
let direction: euclid::Vector2D<f32, ScreenSpace> = euclid::Vector2D::new(x as f32, y as f32);
let current_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let new = frame.intersection(&geom.active_window_frame);
println!("Found intersection: {:#?}", new);
match (new, init) {
(Some(new), Some(old)) if new.area() > old.area() => Some(*frame),
(Some(_), None) => Some(*frame),
_ => init,
}
})
.and_then(|frame| inset_frame_by_struts(&conn, frame, geom.root_win).ok())
.unwrap();
let new_output_frame = geom
.display_frames
.iter()
.fold(None, |init: Option<ScreenRect>, frame| {
let vec: euclid::Vector2D<f32, ScreenSpace> =
(frame.center() - current_output_frame.center()).cast::<f32>();
let old: Option<euclid::Vector2D<f32, ScreenSpace>> =
init.map(|init| (init.center() - current_output_frame.center()).cast::<f32>());
let projection = vec.dot(direction);
match old {
None if projection > 0.0 => {
println!(
"Starting with output {:#?} / projection {:#?}",
frame, projection
);
Some(*frame)
}
Some(old) if projection < old.dot(direction) && projection > 0.0 => {
println!(
"Replacing projection {} ({}) with {} ({})",
init.unwrap(),
old.dot(direction),
frame,
projection
);
Some(*frame)
}
_ => {
println!(
"Ignoring output {:#?} with projection {:#?}",
frame, projection
);
init
}
}
})
.unwrap();
let new_output_frame = inset_frame_by_struts(&conn, new_output_frame, geom.root_win)?;
dbg!(&geom.active_window_frame);
// geom.active_window_frame = geom.active_window_frame.inner_rect(geom.active_window_insets);
dbg!(&geom.active_window_insets);
dbg!(¤t_output_frame);
dbg!(&new_output_frame);
let decorated_source_frame = geom.active_window_frame.outer_rect(geom.active_window_insets);
let pct_rect = decorated_source_frame.as_dps(current_output_frame);
dbg!(&pct_rect);
let decorated_dest_frame = pct_rect.to_rect(new_output_frame);
let bare_dest_frame = decorated_dest_frame.inner_rect(geom.active_window_insets);
dbg!(&bare_dest_frame);
geom
.root_win
.move_resize(&conn, geom.active_window, bare_dest_frame)
}
}
fn main() -> Result<(), Error> {
env_logger::init();
#[derive(StructOpt)]
enum Action {
MoveWindowOnOutput(MoveWindowOnOutput),
MoveWindowToOutput(MoveWindowToOutput),
}
#[derive(StructOpt)]
struct App {
#[structopt(flatten)]
options: GlobalOptions,
#[structopt(subcommand)]
action: Action,
}
impl App {
fn run(self) -> Result<(), Error> {
match self.action {
Action::MoveWindowOnOutput(opts) => opts.run(self.options),
Action::MoveWindowToOutput(opts) => opts.run(self.options),
}
}
}
App::from_args().run()
}
| Fract | identifier_name |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset, .. } => |
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start != 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= !decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b| !b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b && !a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
} | conditional_block |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn | () -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset, .. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start != 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= !decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b| !b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b && !a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| default | identifier_name |
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index. | // TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset, .. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start != 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> {
self.intersect_assign(other)?;
Ok(self)
}
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= !decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b| !b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b && !a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
} | random_line_split |
|
lib.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod bitvec_serde;
pub mod rleplus;
pub use bitvec;
use bitvec::prelude::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use fnv::FnvHashSet;
use std::iter::FromIterator;
type BitVec = bitvec::prelude::BitVec<Lsb0, u8>;
type Result<T> = std::result::Result<T, &'static str>;
/// Represents a bitfield to track bits set at indexes in the range of `u64`.
#[derive(Debug, Clone)]
pub enum BitField {
Encoded {
bv: BitVec,
set: FnvHashSet<u64>,
unset: FnvHashSet<u64>,
},
// TODO would be beneficial in future to only keep encoded bitvec in memory, but comes at a cost
Decoded(BitVec),
}
impl Default for BitField {
fn default() -> Self {
Self::Decoded(BitVec::new())
}
}
impl BitField {
pub fn new() -> Self {
Self::default()
}
/// Generates a new bitfield with a slice of all indexes to set.
pub fn new_from_set(set_bits: &[u64]) -> Self {
let mut vec = match set_bits.iter().max() {
Some(&max) => bitvec![_, u8; 0; max as usize + 1],
None => return Self::new(),
};
// Set all bits in bitfield
for b in set_bits {
vec.set(*b as usize, true);
}
Self::Decoded(vec)
}
/// Sets bit at bit index provided
pub fn set(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
unset.remove(&bit);
set.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
bv.resize(index + 1, false);
}
bv.set(index, true);
}
}
}
/// Removes bit at bit index provided
pub fn unset(&mut self, bit: u64) {
match self {
BitField::Encoded { set, unset, .. } => {
set.remove(&bit);
unset.insert(bit);
}
BitField::Decoded(bv) => {
let index = bit as usize;
if bv.len() <= index {
return;
}
bv.set(index, false);
}
}
}
/// Gets the bit at the given index.
// TODO this probably should not require mut self and RLE decode bits
pub fn get(&mut self, index: u64) -> Result<bool> {
match self {
BitField::Encoded { set, unset, .. } => {
if set.contains(&index) {
return Ok(true);
}
if unset.contains(&index) {
return Ok(false);
}
// Check in encoded for the given bit
// This can be changed to not flush changes
if let Some(true) = self.as_mut_flushed()?.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
BitField::Decoded(bv) => {
if let Some(true) = bv.get(index as usize) {
Ok(true)
} else {
Ok(false)
}
}
}
}
/// Retrieves the index of the first set bit, and error if invalid encoding or no bits set.
pub fn first(&mut self) -> Result<u64> {
for (i, b) in (0..).zip(self.as_mut_flushed()?.iter()) {
if b == &true {
return Ok(i);
}
}
// Return error if none found, not ideal but no reason not to match
Err("Bitfield has no set bits")
}
fn retrieve_set_indices<B: FromIterator<u64>>(&mut self, max: usize) -> Result<B> {
let flushed = self.as_mut_flushed()?;
if flushed.count_ones() > max {
return Err("Bits set exceeds max in retrieval");
}
Ok((0..)
.zip(flushed.iter())
.filter_map(|(i, b)| if b == &true { Some(i) } else { None })
.collect())
}
/// Returns a vector of indexes of all set bits
pub fn all(&mut self, max: usize) -> Result<Vec<u64>> {
self.retrieve_set_indices(max)
}
/// Returns a Hash set of indexes of all set bits
pub fn all_set(&mut self, max: usize) -> Result<FnvHashSet<u64>> {
self.retrieve_set_indices(max)
}
pub fn for_each<F>(&mut self, mut callback: F) -> std::result::Result<(), String>
where
F: FnMut(u64) -> std::result::Result<(), String>,
{
let flushed = self.as_mut_flushed()?;
for (i, &b) in (0..).zip(flushed.iter()) {
if b {
callback(i)?;
}
}
Ok(())
}
/// Returns true if there are no bits set, false if the bitfield is empty.
pub fn is_empty(&mut self) -> Result<bool> {
for b in self.as_mut_flushed()?.iter() {
if b == &true {
return Ok(false);
}
}
Ok(true)
}
/// Returns a slice of the bitfield with the start index of set bits
/// and number of bits to include in slice.
pub fn slice(&mut self, start: u64, count: u64) -> Result<BitField> {
if count == 0 {
return Ok(BitField::default());
}
// These conversions aren't ideal, but we aren't supporting 32 bit targets
let mut start = start as usize;
let mut count = count as usize;
let bitvec = self.as_mut_flushed()?;
let mut start_idx: usize = 0;
let mut range: usize = 0;
if start != 0 {
for (i, v) in bitvec.iter().enumerate() {
if v == &true {
start -= 1;
if start == 0 {
start_idx = i + 1;
break;
}
}
}
}
for (i, v) in bitvec[start_idx..].iter().enumerate() {
if v == &true {
count -= 1;
if count == 0 {
range = i + 1;
break;
}
}
}
if count > 0 {
return Err("Not enough bits to index the slice");
}
let mut slice = BitVec::with_capacity(start_idx + range);
slice.resize(start_idx, false);
slice.extend_from_slice(&bitvec[start_idx..start_idx + range]);
Ok(BitField::Decoded(slice))
}
/// Retrieves number of set bits in the bitfield
///
/// This function requires a mutable reference for now to be able to handle the cached
/// changes in the case of an RLE encoded bitfield.
pub fn count(&mut self) -> Result<usize> {
Ok(self.as_mut_flushed()?.count_ones())
}
fn flush(&mut self) -> Result<()> {
if let BitField::Encoded { bv, set, unset } = self {
*self = BitField::Decoded(decode_and_apply_cache(bv, set, unset)?);
}
Ok(())
}
fn into_flushed(mut self) -> Result<BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
fn as_mut_flushed(&mut self) -> Result<&mut BitVec> {
self.flush()?;
match self {
BitField::Decoded(bv) => Ok(bv),
// Unreachable because flushed before this.
_ => unreachable!(),
}
}
/// Merges to bitfields together (equivalent of bitwise OR `|` operator)
pub fn merge(mut self, other: &Self) -> Result<Self> {
self.merge_assign(other)?;
Ok(self)
}
/// Merges to bitfields into `self` (equivalent of bitwise OR `|` operator)
pub fn merge_assign(&mut self, other: &Self) -> Result<()> {
let a = self.as_mut_flushed()?;
match other {
BitField::Encoded { bv, set, unset } => {
let v = decode_and_apply_cache(bv, set, unset)?;
bit_or(a, v.into_iter())
}
BitField::Decoded(bv) => bit_or(a, bv.iter().copied()),
}
Ok(())
}
/// Intersection of two bitfields (equivalent of bit AND `&`)
pub fn intersect(mut self, other: &Self) -> Result<Self> |
/// Intersection of two bitfields and assigns to self (equivalent of bit AND `&`)
pub fn intersect_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied(),
}
Ok(())
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract(mut self, other: &Self) -> Result<Self> {
self.subtract_assign(other)?;
Ok(self)
}
/// Subtract other bitfield from self (equivalent of `a & !b`)
pub fn subtract_assign(&mut self, other: &Self) -> Result<()> {
match other {
BitField::Encoded { bv, set, unset } => {
*self.as_mut_flushed()? &= !decode_and_apply_cache(bv, set, unset)?
}
BitField::Decoded(bv) => *self.as_mut_flushed()? &= bv.iter().copied().map(|b| !b),
}
Ok(())
}
/// Creates a bitfield which is a union of a vector of bitfields.
pub fn union<'a>(bit_fields: impl IntoIterator<Item = &'a Self>) -> Result<Self> {
let mut ret = Self::default();
for bf in bit_fields.into_iter() {
ret.merge_assign(bf)?;
}
Ok(ret)
}
/// Returns true if BitFields have any overlapping bits.
pub fn contains_any(&mut self, other: &mut BitField) -> Result<bool> {
for (&a, &b) in self
.as_mut_flushed()?
.iter()
.zip(other.as_mut_flushed()?.iter())
{
if a && b {
return Ok(true);
}
}
Ok(false)
}
/// Returns true if the self `BitField` has all the bits set in the other `BitField`.
pub fn contains_all(&mut self, other: &mut BitField) -> Result<bool> {
let a_bf = self.as_mut_flushed()?;
let b_bf = other.as_mut_flushed()?;
// Checking lengths should be sufficient in most cases, but does not take into account
// decoded bitfields with extra 0 bits. This makes sure there are no extra bits in the
// extension.
if b_bf.len() > a_bf.len() && b_bf[a_bf.len()..].count_ones() > 0 {
return Ok(false);
}
for (a, b) in a_bf.iter().zip(b_bf.iter()) {
if *b && !a {
return Ok(false);
}
}
Ok(true)
}
}
fn bit_or<I>(a: &mut BitVec, mut b: I)
where
I: Iterator<Item = bool>,
{
for mut a_i in a.iter_mut() {
match b.next() {
Some(true) => *a_i = true,
Some(false) => (),
None => return,
}
}
a.extend(b);
}
fn decode_and_apply_cache(
bit_vec: &BitVec,
set: &FnvHashSet<u64>,
unset: &FnvHashSet<u64>,
) -> Result<BitVec> {
let mut decoded = rleplus::decode(bit_vec)?;
// Resize before setting any values
if let Some(&max) = set.iter().max() {
let max = max as usize;
if max >= bit_vec.len() {
decoded.resize(max + 1, false);
}
};
// Set all values in the cache
for &b in set.iter() {
decoded.set(b as usize, true);
}
// Unset all values from the encoded cache
for &b in unset.iter() {
decoded.set(b as usize, false);
}
Ok(decoded)
}
impl AsRef<BitField> for BitField {
fn as_ref(&self) -> &Self {
self
}
}
impl From<BitVec> for BitField {
fn from(b: BitVec) -> Self {
Self::Decoded(b)
}
}
impl<B> BitOr<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitor(self, rhs: B) -> Self {
self.merge(rhs.as_ref()).unwrap()
}
}
impl<B> BitOrAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitor_assign(&mut self, rhs: B) {
self.merge_assign(rhs.as_ref()).unwrap()
}
}
impl<B> BitAnd<B> for BitField
where
B: AsRef<Self>,
{
type Output = Self;
#[inline]
fn bitand(self, rhs: B) -> Self::Output {
self.intersect(rhs.as_ref()).unwrap()
}
}
impl<B> BitAndAssign<B> for BitField
where
B: AsRef<Self>,
{
#[inline]
fn bitand_assign(&mut self, rhs: B) {
self.intersect_assign(rhs.as_ref()).unwrap()
}
}
impl Not for BitField {
type Output = Self;
#[inline]
fn not(self) -> Self::Output {
Self::Decoded(!self.into_flushed().unwrap())
}
}
| {
self.intersect_assign(other)?;
Ok(self)
} | identifier_body |
pymines.py | #!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
__all__ = ['Mines']
class _CoordsFormatter():
"""
Formats coordinates in the interactive plot mode
"""
def __init__(self, width, height):
self.width = width
self.height = height
def __call__(self, x, y):
string = ''
try:
i = int(round(y))
j = int(round(x))
if i >= 0 and i < self.height and j >= 0 and j < self.width:
string = ' i = {}, j = {}'.format(i, j)
except Exception:
pass
return string
class Mines:
"""
Minesweeper
Parameters
----------
width : int
Width of minefield
height : int
Height of minefield
n_mines : int
Number of mines
show : bool (optional)
If True, displays game when initialized
"""
# Colormap object used for showing wrong cells
cmap_reds_alpha = LinearSegmentedColormap.from_list(name='Reds_alpha',
colors=[[0, 0, 0, 0], [.9, 0, 0, 1]])
# Figure dimensions (min width and height in inches and scale factor)
figsize = {'minw': 4, 'minh': 3, 'scale': .7}
# Color dictionary for coloring the revealed cells according with number
# of mines in the neighboring cells
color_dict = {1: [0, 0, 1], 2: [0, 1, 0], 3: [1, 0, 0], 4: [0, 0, .5],
5: [.5, 0, 0], 6: [0, 0, .66], 7: [0, 0, .33], 8: [0, 0, 0]}
# Pre-defined levels (level: [width, height, mines])
levels = {0: [8, 8, 10], 1: [16, 16, 40], 2: [30, 16, 99]}
# Aliases for the levels
level_aliases = {**dict.fromkeys(['beginner', 'b', '0', 0], 0),
**dict.fromkeys(['intermediate', 'i', '1', 1], 1),
**dict.fromkeys(['expert', 'e', '2', 2], 2)}
def __init__(self, width, height, n_mines, show=True):
self.width = width
self.height = height
self.n = self.width*self.height
self.n_mines = n_mines
if self.n_mines >= self.n:
raise Exception('n_mines must be < width*height')
self.n_not_mines = self.n - self.n_mines
self.ii, self.jj = np.mgrid[:self.height, :self.width]
self.i, self.j = self.ii.ravel(), self.jj.ravel()
self.mines = np.full((self.height, self.width), False, dtype=bool) # boolean, mine or not
# number of mines in the neighboring cells
self.mines_count = np.full((self.height, self.width), 0, dtype=int)
self.flags = np.full((self.height, self.width), False, dtype=bool) # mine flags
self.revealed = np.full((self.height, self.width), False, dtype=bool) # revealed cells
self.wrong = np.full((self.height, self.width), False, dtype=bool) # wrong guesses
self.mines_pts = None # once initialized, Lines2D object
self.flags_pts = None # Line2D objects
self.mines_count_txt = np.full((self.height, self.width), None,
dtype=object) # 2D array of Text objects
self.revealed_img = None # AxesImage object
self.wrong_img = None # AxesImage object
self.title_txt = None # Text object
self.is_initialized = False # if game is initialized
self.is_game_over = False
# Connection ids of mouse click and key press events
self.cid_mouse = None
self.cid_key = None
self.fig, self.ax = plt.subplots(figsize=(max(self.width*self.figsize['scale'],
self.figsize['minw']),
max(self.height*self.figsize['scale'],
self.figsize['minh'])))
self.fig.canvas.manager.set_window_title(
u'pymines {} × {} ({} mines)'.format(self.width, self.height, self.n_mines))
self.draw_minefield()
if show:
plt.show()
def refresh_canvas(self):
"""
Updates minefield
"""
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw_minefield(self):
"""
Draws initial empty minefield board
"""
# Resets member variables to initial values
self.is_initialized = False
self.is_game_over = False
self.mines[:, :] = False
self.mines_count[:, :] = 0
self.flags[:, :] = False
self.revealed[:, :] = False
# Clears plot, sets limits
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.axis('off')
self.ax.set_xlim(-.6, self.width - .4)
self.ax.set_ylim(-.6, self.height - .4)
# Draws grid lines
for j in np.arange(-.5, self.width):
self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')
for i in np.arange(-.5, self.height):
self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')
# Connects mouse click and key press event handlers and coordinates formatter
if self.cid_mouse is None:
self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.ax.format_coord = _CoordsFormatter(self.width, self.height)
# Title text: number of flags/total mines
self.title_txt = self.ax.set_title(
'{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def initialize(self, i, j):
"""
Initializes new game. This function is called after first click
in order to prevent the first click being straight over a mine
"""
population = set(range(self.n))
population.remove(i*self.width + j) # removes initial click
idx = random.sample(population, self.n_mines) # choose mines
# Sets mines
self.mines[self.i[idx], self.j[idx]] = True
# Sets neighbor mines counter
for i, j in zip(self.i, self.j):
self.mines_count[i, j] = self.count_neighbor_mines(i, j)
# Sets wrong guesses
self.wrong = ~self.mines & self.flags
# Initializes plot objects
self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)
self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')
self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)
# Initializes text objects of neighbor mines counter. They're
# initially set as non visible. As the cells are revealed, their
# status is changed to visible
p_count = self.mines_count > 0
for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):
self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',
color=self.color_dict[count], ha='center',
va='center', visible=False)
self.is_initialized = True
self.refresh_canvas()
def get_ij_neighbors(self, i, j):
"""
Gets the i, j coordinates (i is row, y coordinate, j is column,
x coordinate) of the neighboring cells
"""
ii, jj = np.mgrid[i-1:i+2, j-1:j+2]
ii, jj = ii.ravel(), jj.ravel()
filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)
ij_neighbors = set(zip(ii[filtr], jj[filtr]))
ij_neighbors.remove((i, j))
return ij_neighbors
def count_neighbor_mines(self, i, j):
" |
def count_neighbor_flags(self, i, j):
"""
Counts the number of flags in the neighboring cells
"""
return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
def update_revealed(self, i, j):
"""
Updates revealed cells by checking i, j cell and, recursevely,
the contiguous cells without mines
"""
if not self.revealed[i, j]:
# If not revealed cell
if self.mines_count[i, j] < 0:
# If wrong guess, games is over
self.wrong = ~self.mines & self.flags
self.wrong[i, j] = True
self.game_over()
else:
# If guess is correct
self.revealed[i, j] = True
if self.mines_count[i, j] == 0:
# Recursively looks for contiguous cells without mines
for _i, _j in self.get_ij_neighbors(i, j):
if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:
self.flags[_i, _j] = False
self.update_revealed(_i, _j)
elif self.mines_count[i, j] > 0:
# The line below only makes sense when it's in the middle of the
# recursion. For instance, a cell is flagged, but it is part of a
# big blob that's going to be revealed. The game doesn't punish
# the player in this scenario. This behavior has been copied
# from gnome-mines
self.flags[i, j] = False
# Reveals mine count
self.mines_count_txt[i, j].set_visible(True)
elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):
# If cell that's already revealed is clicked and the number of
# neighboring flags is the same as the number of neighboring
# mines, then the hidden neighbor cells are recursevely
# revealed. Evidently, if any flag guess is wrong, the game is
# over.
for _i, _j in self.get_ij_neighbors(i, j):
if not self.flags[_i, _j] and not self.revealed[_i, _j]:
self.update_revealed(_i, _j)
def reveal(self, i, j):
"""
Reveals clicked cell and contiguous cells without mines
"""
if not self.is_game_over:
if not self.flags[i, j]:
# Game is initialized after first click in order to prevent
# the first click being straight over a mine
if not self.is_initialized:
self.initialize(i, j)
self.update_revealed(i, j)
self.revealed_img.set_data(self.revealed)
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.refresh_canvas()
if np.count_nonzero(self.revealed) == self.n_not_mines:
self.game_over(True)
def flag(self, i, j):
"""
Flags i, j cell
"""
# Does not allow starting a game with a flag
if not self.is_game_over and self.is_initialized:
if not self.revealed[i, j]:
self.flags[i, j] = not self.flags[i, j]
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def game_over(self, win=False):
"""
Callback when game is over
"""
self.is_game_over = True
if win:
self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags
self.title_txt.set_text('You win! Press F2 to start a new game')
else:
self.wrong_img.set_data(self.wrong) # wrong guesses
self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],
self.ii[self.mines & ~self.flags],
'kX', ms=10) # shows mines
self.title_txt.set_text('You lose! Press F2 to start a new game')
self.refresh_canvas()
def on_mouse_click(self, event):
"""
Callback when mouse is clicked
"""
if not self.is_game_over:
try:
# i, j coordinates of the click event
i = int(round(event.ydata))
j = int(round(event.xdata))
# Left button
if event.button == 1 or event.button == 2:
self.reveal(i, j)
# Right button
elif event.button == 3:
self.flag(i, j)
except (TypeError, IndexError):
pass
def on_key_press(self, event):
"""
Callback when key is pressed
"""
# F2 for starting new game
if event.key == 'f2':
self.draw_minefield()
@staticmethod
def new_game(*args, level='beginner', show=True):
"""
Static method for initializing the game with custom settings or in pre-defined levels
(beginner, intermediate, expert)
"""
if len(args) == 3:
minefield = args
else:
minefield = Mines.levels[Mines.level_aliases[level]]
return Mines(*minefield, show)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', metavar='level (b, i, e)', default='beginner', help='level, i.e., '
'beginner (8 x 8, 10 mines), intermediate (16 x 16, 40 mines), expert (30 '
'x 16, 99 mines)')
parser.add_argument('-c', metavar=('width', 'height', 'mines'), default=[], type=int, nargs=3,
help='custom game, provided width, height, and number of mines')
args = parser.parse_args()
game = Mines.new_game(*args.c, level=args.l)
| ""
Counts the number of mines in the neighboring cells
"""
n_neighbor_mines = -1
if not self.mines[i, j]:
n_neighbor_mines = np.count_nonzero(
self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
return n_neighbor_mines
| identifier_body |
pymines.py | #!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
__all__ = ['Mines']
class _CoordsFormatter():
"""
Formats coordinates in the interactive plot mode
"""
def __init__(self, width, height):
self.width = width
self.height = height
def __call__(self, x, y):
string = ''
try:
i = int(round(y))
j = int(round(x))
if i >= 0 and i < self.height and j >= 0 and j < self.width:
string = ' i = {}, j = {}'.format(i, j)
except Exception:
pass
return string
class Mines:
"""
Minesweeper
Parameters
----------
width : int
Width of minefield
height : int
Height of minefield
n_mines : int
Number of mines
show : bool (optional)
If True, displays game when initialized
"""
# Colormap object used for showing wrong cells
cmap_reds_alpha = LinearSegmentedColormap.from_list(name='Reds_alpha',
colors=[[0, 0, 0, 0], [.9, 0, 0, 1]])
# Figure dimensions (min width and height in inches and scale factor)
figsize = {'minw': 4, 'minh': 3, 'scale': .7}
# Color dictionary for coloring the revealed cells according with number
# of mines in the neighboring cells
color_dict = {1: [0, 0, 1], 2: [0, 1, 0], 3: [1, 0, 0], 4: [0, 0, .5],
5: [.5, 0, 0], 6: [0, 0, .66], 7: [0, 0, .33], 8: [0, 0, 0]}
# Pre-defined levels (level: [width, height, mines])
levels = {0: [8, 8, 10], 1: [16, 16, 40], 2: [30, 16, 99]}
# Aliases for the levels
level_aliases = {**dict.fromkeys(['beginner', 'b', '0', 0], 0),
**dict.fromkeys(['intermediate', 'i', '1', 1], 1),
**dict.fromkeys(['expert', 'e', '2', 2], 2)}
def __init__(self, width, height, n_mines, show=True):
self.width = width
self.height = height
self.n = self.width*self.height
self.n_mines = n_mines
if self.n_mines >= self.n:
raise Exception('n_mines must be < width*height')
self.n_not_mines = self.n - self.n_mines
self.ii, self.jj = np.mgrid[:self.height, :self.width]
self.i, self.j = self.ii.ravel(), self.jj.ravel()
self.mines = np.full((self.height, self.width), False, dtype=bool) # boolean, mine or not
# number of mines in the neighboring cells
self.mines_count = np.full((self.height, self.width), 0, dtype=int)
self.flags = np.full((self.height, self.width), False, dtype=bool) # mine flags
self.revealed = np.full((self.height, self.width), False, dtype=bool) # revealed cells
self.wrong = np.full((self.height, self.width), False, dtype=bool) # wrong guesses
self.mines_pts = None # once initialized, Lines2D object
self.flags_pts = None # Line2D objects
self.mines_count_txt = np.full((self.height, self.width), None,
dtype=object) # 2D array of Text objects
self.revealed_img = None # AxesImage object
self.wrong_img = None # AxesImage object
self.title_txt = None # Text object
self.is_initialized = False # if game is initialized
self.is_game_over = False
# Connection ids of mouse click and key press events
self.cid_mouse = None
self.cid_key = None
self.fig, self.ax = plt.subplots(figsize=(max(self.width*self.figsize['scale'],
self.figsize['minw']),
max(self.height*self.figsize['scale'],
self.figsize['minh'])))
self.fig.canvas.manager.set_window_title(
u'pymines {} × {} ({} mines)'.format(self.width, self.height, self.n_mines))
self.draw_minefield()
if show:
plt.show()
def refresh_canvas(self):
"""
Updates minefield
"""
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw_minefield(self):
"""
Draws initial empty minefield board
"""
# Resets member variables to initial values
self.is_initialized = False
self.is_game_over = False
self.mines[:, :] = False
self.mines_count[:, :] = 0
self.flags[:, :] = False
self.revealed[:, :] = False
# Clears plot, sets limits
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.axis('off')
self.ax.set_xlim(-.6, self.width - .4)
self.ax.set_ylim(-.6, self.height - .4)
# Draws grid lines
for j in np.arange(-.5, self.width):
self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')
for i in np.arange(-.5, self.height):
self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')
# Connects mouse click and key press event handlers and coordinates formatter
if self.cid_mouse is None:
self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.ax.format_coord = _CoordsFormatter(self.width, self.height)
# Title text: number of flags/total mines
self.title_txt = self.ax.set_title(
'{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def initialize(self, i, j):
"""
Initializes new game. This function is called after first click
in order to prevent the first click being straight over a mine
"""
population = set(range(self.n))
population.remove(i*self.width + j) # removes initial click
idx = random.sample(population, self.n_mines) # choose mines
# Sets mines
self.mines[self.i[idx], self.j[idx]] = True
# Sets neighbor mines counter
for i, j in zip(self.i, self.j):
self.mines_count[i, j] = self.count_neighbor_mines(i, j)
# Sets wrong guesses
self.wrong = ~self.mines & self.flags
# Initializes plot objects
self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)
self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')
self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)
# Initializes text objects of neighbor mines counter. They're
# initially set as non visible. As the cells are revealed, their
# status is changed to visible
p_count = self.mines_count > 0
for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):
self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',
color=self.color_dict[count], ha='center',
va='center', visible=False)
self.is_initialized = True
self.refresh_canvas()
def get_ij_neighbors(self, i, j):
"""
Gets the i, j coordinates (i is row, y coordinate, j is column,
x coordinate) of the neighboring cells
"""
ii, jj = np.mgrid[i-1:i+2, j-1:j+2]
ii, jj = ii.ravel(), jj.ravel()
filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)
ij_neighbors = set(zip(ii[filtr], jj[filtr]))
ij_neighbors.remove((i, j))
return ij_neighbors
def count_neighbor_mines(self, i, j):
"""
Counts the number of mines in the neighboring cells
"""
n_neighbor_mines = -1
if not self.mines[i, j]:
n_neighbor_mines = np.count_nonzero(
self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
return n_neighbor_mines
def count_neighbor_flags(self, i, j):
"""
Counts the number of flags in the neighboring cells
"""
return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
def update_revealed(self, i, j):
"""
Updates revealed cells by checking i, j cell and, recursevely,
the contiguous cells without mines
"""
if not self.revealed[i, j]:
# If not revealed cell
if self.mines_count[i, j] < 0:
# If wrong guess, games is over
self.wrong = ~self.mines & self.flags
self.wrong[i, j] = True
self.game_over()
else:
# If guess is correct
self.revealed[i, j] = True
if self.mines_count[i, j] == 0:
# Recursively looks for contiguous cells without mines
for _i, _j in self.get_ij_neighbors(i, j):
if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:
self.flags[_i, _j] = False
self.update_revealed(_i, _j)
elif self.mines_count[i, j] > 0:
# The line below only makes sense when it's in the middle of the
# recursion. For instance, a cell is flagged, but it is part of a
# big blob that's going to be revealed. The game doesn't punish
# the player in this scenario. This behavior has been copied
# from gnome-mines
self.flags[i, j] = False
# Reveals mine count
self.mines_count_txt[i, j].set_visible(True)
elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):
# If cell that's already revealed is clicked and the number of
# neighboring flags is the same as the number of neighboring
# mines, then the hidden neighbor cells are recursevely
# revealed. Evidently, if any flag guess is wrong, the game is
# over.
for _i, _j in self.get_ij_neighbors(i, j):
if not self.flags[_i, _j] and not self.revealed[_i, _j]:
self.update_revealed(_i, _j)
def reveal(self, i, j):
"""
Reveals clicked cell and contiguous cells without mines
"""
if not self.is_game_over:
if not self.flags[i, j]:
# Game is initialized after first click in order to prevent
# the first click being straight over a mine
if not self.is_initialized:
self.initialize(i, j)
self.update_revealed(i, j)
self.revealed_img.set_data(self.revealed)
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.refresh_canvas()
if np.count_nonzero(self.revealed) == self.n_not_mines:
self.game_over(True)
def flag(self, i, j):
"""
Flags i, j cell
"""
# Does not allow starting a game with a flag
if not self.is_game_over and self.is_initialized:
if not self.revealed[i, j]:
self.flags[i, j] = not self.flags[i, j]
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def game_over(self, win=False):
"""
Callback when game is over
"""
self.is_game_over = True
if win:
self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags
self.title_txt.set_text('You win! Press F2 to start a new game')
else:
self.wrong_img.set_data(self.wrong) # wrong guesses
self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],
self.ii[self.mines & ~self.flags],
'kX', ms=10) # shows mines
self.title_txt.set_text('You lose! Press F2 to start a new game')
self.refresh_canvas()
def on_mouse_click(self, event):
"""
Callback when mouse is clicked
"""
if not self.is_game_over:
try:
# i, j coordinates of the click event
i = int(round(event.ydata))
j = int(round(event.xdata))
# Left button
if event.button == 1 or event.button == 2:
self.reveal(i, j)
# Right button
elif event.button == 3:
self.flag(i, j)
except (TypeError, IndexError):
pass
def on_key_press(self, event):
"""
Callback when key is pressed
"""
# F2 for starting new game
if event.key == 'f2':
self.draw_minefield()
@staticmethod
def new_game(*args, level='beginner', show=True):
"""
Static method for initializing the game with custom settings or in pre-defined levels
(beginner, intermediate, expert)
"""
if len(args) == 3:
minefield = args
else:
minefield = Mines.levels[Mines.level_aliases[level]]
return Mines(*minefield, show)
if __name__ == '__main__':
i | mport argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', metavar='level (b, i, e)', default='beginner', help='level, i.e., '
'beginner (8 x 8, 10 mines), intermediate (16 x 16, 40 mines), expert (30 '
'x 16, 99 mines)')
parser.add_argument('-c', metavar=('width', 'height', 'mines'), default=[], type=int, nargs=3,
help='custom game, provided width, height, and number of mines')
args = parser.parse_args()
game = Mines.new_game(*args.c, level=args.l)
| conditional_block |
|
pymines.py | #!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
__all__ = ['Mines']
class _CoordsFormatter():
"""
Formats coordinates in the interactive plot mode
"""
def __init__(self, width, height):
self.width = width
self.height = height
def __call__(self, x, y):
string = ''
try:
i = int(round(y))
j = int(round(x))
if i >= 0 and i < self.height and j >= 0 and j < self.width:
string = ' i = {}, j = {}'.format(i, j)
except Exception:
pass
return string
class | :
"""
Minesweeper
Parameters
----------
width : int
Width of minefield
height : int
Height of minefield
n_mines : int
Number of mines
show : bool (optional)
If True, displays game when initialized
"""
# Colormap object used for showing wrong cells
cmap_reds_alpha = LinearSegmentedColormap.from_list(name='Reds_alpha',
colors=[[0, 0, 0, 0], [.9, 0, 0, 1]])
# Figure dimensions (min width and height in inches and scale factor)
figsize = {'minw': 4, 'minh': 3, 'scale': .7}
# Color dictionary for coloring the revealed cells according with number
# of mines in the neighboring cells
color_dict = {1: [0, 0, 1], 2: [0, 1, 0], 3: [1, 0, 0], 4: [0, 0, .5],
5: [.5, 0, 0], 6: [0, 0, .66], 7: [0, 0, .33], 8: [0, 0, 0]}
# Pre-defined levels (level: [width, height, mines])
levels = {0: [8, 8, 10], 1: [16, 16, 40], 2: [30, 16, 99]}
# Aliases for the levels
level_aliases = {**dict.fromkeys(['beginner', 'b', '0', 0], 0),
**dict.fromkeys(['intermediate', 'i', '1', 1], 1),
**dict.fromkeys(['expert', 'e', '2', 2], 2)}
def __init__(self, width, height, n_mines, show=True):
self.width = width
self.height = height
self.n = self.width*self.height
self.n_mines = n_mines
if self.n_mines >= self.n:
raise Exception('n_mines must be < width*height')
self.n_not_mines = self.n - self.n_mines
self.ii, self.jj = np.mgrid[:self.height, :self.width]
self.i, self.j = self.ii.ravel(), self.jj.ravel()
self.mines = np.full((self.height, self.width), False, dtype=bool) # boolean, mine or not
# number of mines in the neighboring cells
self.mines_count = np.full((self.height, self.width), 0, dtype=int)
self.flags = np.full((self.height, self.width), False, dtype=bool) # mine flags
self.revealed = np.full((self.height, self.width), False, dtype=bool) # revealed cells
self.wrong = np.full((self.height, self.width), False, dtype=bool) # wrong guesses
self.mines_pts = None # once initialized, Lines2D object
self.flags_pts = None # Line2D objects
self.mines_count_txt = np.full((self.height, self.width), None,
dtype=object) # 2D array of Text objects
self.revealed_img = None # AxesImage object
self.wrong_img = None # AxesImage object
self.title_txt = None # Text object
self.is_initialized = False # if game is initialized
self.is_game_over = False
# Connection ids of mouse click and key press events
self.cid_mouse = None
self.cid_key = None
self.fig, self.ax = plt.subplots(figsize=(max(self.width*self.figsize['scale'],
self.figsize['minw']),
max(self.height*self.figsize['scale'],
self.figsize['minh'])))
self.fig.canvas.manager.set_window_title(
u'pymines {} × {} ({} mines)'.format(self.width, self.height, self.n_mines))
self.draw_minefield()
if show:
plt.show()
def refresh_canvas(self):
"""
Updates minefield
"""
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw_minefield(self):
"""
Draws initial empty minefield board
"""
# Resets member variables to initial values
self.is_initialized = False
self.is_game_over = False
self.mines[:, :] = False
self.mines_count[:, :] = 0
self.flags[:, :] = False
self.revealed[:, :] = False
# Clears plot, sets limits
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.axis('off')
self.ax.set_xlim(-.6, self.width - .4)
self.ax.set_ylim(-.6, self.height - .4)
# Draws grid lines
for j in np.arange(-.5, self.width):
self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')
for i in np.arange(-.5, self.height):
self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')
# Connects mouse click and key press event handlers and coordinates formatter
if self.cid_mouse is None:
self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.ax.format_coord = _CoordsFormatter(self.width, self.height)
# Title text: number of flags/total mines
self.title_txt = self.ax.set_title(
'{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def initialize(self, i, j):
"""
Initializes new game. This function is called after first click
in order to prevent the first click being straight over a mine
"""
population = set(range(self.n))
population.remove(i*self.width + j) # removes initial click
idx = random.sample(population, self.n_mines) # choose mines
# Sets mines
self.mines[self.i[idx], self.j[idx]] = True
# Sets neighbor mines counter
for i, j in zip(self.i, self.j):
self.mines_count[i, j] = self.count_neighbor_mines(i, j)
# Sets wrong guesses
self.wrong = ~self.mines & self.flags
# Initializes plot objects
self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)
self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')
self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)
# Initializes text objects of neighbor mines counter. They're
# initially set as non visible. As the cells are revealed, their
# status is changed to visible
p_count = self.mines_count > 0
for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):
self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',
color=self.color_dict[count], ha='center',
va='center', visible=False)
self.is_initialized = True
self.refresh_canvas()
def get_ij_neighbors(self, i, j):
"""
Gets the i, j coordinates (i is row, y coordinate, j is column,
x coordinate) of the neighboring cells
"""
ii, jj = np.mgrid[i-1:i+2, j-1:j+2]
ii, jj = ii.ravel(), jj.ravel()
filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)
ij_neighbors = set(zip(ii[filtr], jj[filtr]))
ij_neighbors.remove((i, j))
return ij_neighbors
def count_neighbor_mines(self, i, j):
"""
Counts the number of mines in the neighboring cells
"""
n_neighbor_mines = -1
if not self.mines[i, j]:
n_neighbor_mines = np.count_nonzero(
self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
return n_neighbor_mines
def count_neighbor_flags(self, i, j):
"""
Counts the number of flags in the neighboring cells
"""
return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
def update_revealed(self, i, j):
"""
Updates revealed cells by checking i, j cell and, recursevely,
the contiguous cells without mines
"""
if not self.revealed[i, j]:
# If not revealed cell
if self.mines_count[i, j] < 0:
# If wrong guess, games is over
self.wrong = ~self.mines & self.flags
self.wrong[i, j] = True
self.game_over()
else:
# If guess is correct
self.revealed[i, j] = True
if self.mines_count[i, j] == 0:
# Recursively looks for contiguous cells without mines
for _i, _j in self.get_ij_neighbors(i, j):
if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:
self.flags[_i, _j] = False
self.update_revealed(_i, _j)
elif self.mines_count[i, j] > 0:
# The line below only makes sense when it's in the middle of the
# recursion. For instance, a cell is flagged, but it is part of a
# big blob that's going to be revealed. The game doesn't punish
# the player in this scenario. This behavior has been copied
# from gnome-mines
self.flags[i, j] = False
# Reveals mine count
self.mines_count_txt[i, j].set_visible(True)
elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):
# If cell that's already revealed is clicked and the number of
# neighboring flags is the same as the number of neighboring
# mines, then the hidden neighbor cells are recursevely
# revealed. Evidently, if any flag guess is wrong, the game is
# over.
for _i, _j in self.get_ij_neighbors(i, j):
if not self.flags[_i, _j] and not self.revealed[_i, _j]:
self.update_revealed(_i, _j)
def reveal(self, i, j):
"""
Reveals clicked cell and contiguous cells without mines
"""
if not self.is_game_over:
if not self.flags[i, j]:
# Game is initialized after first click in order to prevent
# the first click being straight over a mine
if not self.is_initialized:
self.initialize(i, j)
self.update_revealed(i, j)
self.revealed_img.set_data(self.revealed)
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.refresh_canvas()
if np.count_nonzero(self.revealed) == self.n_not_mines:
self.game_over(True)
def flag(self, i, j):
"""
Flags i, j cell
"""
# Does not allow starting a game with a flag
if not self.is_game_over and self.is_initialized:
if not self.revealed[i, j]:
self.flags[i, j] = not self.flags[i, j]
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def game_over(self, win=False):
"""
Callback when game is over
"""
self.is_game_over = True
if win:
self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags
self.title_txt.set_text('You win! Press F2 to start a new game')
else:
self.wrong_img.set_data(self.wrong) # wrong guesses
self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],
self.ii[self.mines & ~self.flags],
'kX', ms=10) # shows mines
self.title_txt.set_text('You lose! Press F2 to start a new game')
self.refresh_canvas()
def on_mouse_click(self, event):
"""
Callback when mouse is clicked
"""
if not self.is_game_over:
try:
# i, j coordinates of the click event
i = int(round(event.ydata))
j = int(round(event.xdata))
# Left button
if event.button == 1 or event.button == 2:
self.reveal(i, j)
# Right button
elif event.button == 3:
self.flag(i, j)
except (TypeError, IndexError):
pass
def on_key_press(self, event):
"""
Callback when key is pressed
"""
# F2 for starting new game
if event.key == 'f2':
self.draw_minefield()
@staticmethod
def new_game(*args, level='beginner', show=True):
"""
Static method for initializing the game with custom settings or in pre-defined levels
(beginner, intermediate, expert)
"""
if len(args) == 3:
minefield = args
else:
minefield = Mines.levels[Mines.level_aliases[level]]
return Mines(*minefield, show)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', metavar='level (b, i, e)', default='beginner', help='level, i.e., '
'beginner (8 x 8, 10 mines), intermediate (16 x 16, 40 mines), expert (30 '
'x 16, 99 mines)')
parser.add_argument('-c', metavar=('width', 'height', 'mines'), default=[], type=int, nargs=3,
help='custom game, provided width, height, and number of mines')
args = parser.parse_args()
game = Mines.new_game(*args.c, level=args.l)
| Mines | identifier_name |
pymines.py | #!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
__all__ = ['Mines']
class _CoordsFormatter():
"""
Formats coordinates in the interactive plot mode
"""
def __init__(self, width, height):
self.width = width
self.height = height
def __call__(self, x, y):
string = ''
try:
i = int(round(y))
j = int(round(x))
if i >= 0 and i < self.height and j >= 0 and j < self.width:
string = ' i = {}, j = {}'.format(i, j)
except Exception:
pass
return string
class Mines:
"""
Minesweeper
Parameters
----------
width : int
Width of minefield
height : int
Height of minefield
n_mines : int
Number of mines
show : bool (optional)
If True, displays game when initialized
"""
# Colormap object used for showing wrong cells
cmap_reds_alpha = LinearSegmentedColormap.from_list(name='Reds_alpha',
colors=[[0, 0, 0, 0], [.9, 0, 0, 1]])
# Figure dimensions (min width and height in inches and scale factor)
figsize = {'minw': 4, 'minh': 3, 'scale': .7}
# Color dictionary for coloring the revealed cells according with number
# of mines in the neighboring cells
color_dict = {1: [0, 0, 1], 2: [0, 1, 0], 3: [1, 0, 0], 4: [0, 0, .5],
5: [.5, 0, 0], 6: [0, 0, .66], 7: [0, 0, .33], 8: [0, 0, 0]}
# Pre-defined levels (level: [width, height, mines])
levels = {0: [8, 8, 10], 1: [16, 16, 40], 2: [30, 16, 99]}
# Aliases for the levels
level_aliases = {**dict.fromkeys(['beginner', 'b', '0', 0], 0),
**dict.fromkeys(['intermediate', 'i', '1', 1], 1),
**dict.fromkeys(['expert', 'e', '2', 2], 2)}
def __init__(self, width, height, n_mines, show=True):
self.width = width
self.height = height
self.n = self.width*self.height
self.n_mines = n_mines
if self.n_mines >= self.n:
raise Exception('n_mines must be < width*height')
self.n_not_mines = self.n - self.n_mines
self.ii, self.jj = np.mgrid[:self.height, :self.width]
self.i, self.j = self.ii.ravel(), self.jj.ravel() | # number of mines in the neighboring cells
self.mines_count = np.full((self.height, self.width), 0, dtype=int)
self.flags = np.full((self.height, self.width), False, dtype=bool) # mine flags
self.revealed = np.full((self.height, self.width), False, dtype=bool) # revealed cells
self.wrong = np.full((self.height, self.width), False, dtype=bool) # wrong guesses
self.mines_pts = None # once initialized, Lines2D object
self.flags_pts = None # Line2D objects
self.mines_count_txt = np.full((self.height, self.width), None,
dtype=object) # 2D array of Text objects
self.revealed_img = None # AxesImage object
self.wrong_img = None # AxesImage object
self.title_txt = None # Text object
self.is_initialized = False # if game is initialized
self.is_game_over = False
# Connection ids of mouse click and key press events
self.cid_mouse = None
self.cid_key = None
self.fig, self.ax = plt.subplots(figsize=(max(self.width*self.figsize['scale'],
self.figsize['minw']),
max(self.height*self.figsize['scale'],
self.figsize['minh'])))
self.fig.canvas.manager.set_window_title(
u'pymines {} × {} ({} mines)'.format(self.width, self.height, self.n_mines))
self.draw_minefield()
if show:
plt.show()
def refresh_canvas(self):
"""
Updates minefield
"""
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw_minefield(self):
"""
Draws initial empty minefield board
"""
# Resets member variables to initial values
self.is_initialized = False
self.is_game_over = False
self.mines[:, :] = False
self.mines_count[:, :] = 0
self.flags[:, :] = False
self.revealed[:, :] = False
# Clears plot, sets limits
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.axis('off')
self.ax.set_xlim(-.6, self.width - .4)
self.ax.set_ylim(-.6, self.height - .4)
# Draws grid lines
for j in np.arange(-.5, self.width):
self.ax.plot([j, j], [-.5, self.height-.5], lw=1, color='k')
for i in np.arange(-.5, self.height):
self.ax.plot([-.5, self.width-.5], [i, i], lw=1, color='k')
# Connects mouse click and key press event handlers and coordinates formatter
if self.cid_mouse is None:
self.cid_mouse = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
self.cid_key = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.ax.format_coord = _CoordsFormatter(self.width, self.height)
# Title text: number of flags/total mines
self.title_txt = self.ax.set_title(
'{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def initialize(self, i, j):
"""
Initializes new game. This function is called after first click
in order to prevent the first click being straight over a mine
"""
population = set(range(self.n))
population.remove(i*self.width + j) # removes initial click
idx = random.sample(population, self.n_mines) # choose mines
# Sets mines
self.mines[self.i[idx], self.j[idx]] = True
# Sets neighbor mines counter
for i, j in zip(self.i, self.j):
self.mines_count[i, j] = self.count_neighbor_mines(i, j)
# Sets wrong guesses
self.wrong = ~self.mines & self.flags
# Initializes plot objects
self.flags_pts, = self.ax.plot([], [], 'k>', ms=8)
self.revealed_img = self.ax.imshow(self.revealed, vmin=0, vmax=4, cmap='gray_r')
self.wrong_img = self.ax.imshow(self.wrong, vmin=0, vmax=1, cmap=self.cmap_reds_alpha)
# Initializes text objects of neighbor mines counter. They're
# initially set as non visible. As the cells are revealed, their
# status is changed to visible
p_count = self.mines_count > 0
for i, j, count in zip(self.ii[p_count], self.jj[p_count], self.mines_count[p_count]):
self.mines_count_txt[i, j] = self.ax.text(j, i, str(count), fontweight='bold',
color=self.color_dict[count], ha='center',
va='center', visible=False)
self.is_initialized = True
self.refresh_canvas()
def get_ij_neighbors(self, i, j):
"""
Gets the i, j coordinates (i is row, y coordinate, j is column,
x coordinate) of the neighboring cells
"""
ii, jj = np.mgrid[i-1:i+2, j-1:j+2]
ii, jj = ii.ravel(), jj.ravel()
filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)
ij_neighbors = set(zip(ii[filtr], jj[filtr]))
ij_neighbors.remove((i, j))
return ij_neighbors
def count_neighbor_mines(self, i, j):
"""
Counts the number of mines in the neighboring cells
"""
n_neighbor_mines = -1
if not self.mines[i, j]:
n_neighbor_mines = np.count_nonzero(
self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
return n_neighbor_mines
def count_neighbor_flags(self, i, j):
"""
Counts the number of flags in the neighboring cells
"""
return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])
def update_revealed(self, i, j):
"""
Updates revealed cells by checking i, j cell and, recursevely,
the contiguous cells without mines
"""
if not self.revealed[i, j]:
# If not revealed cell
if self.mines_count[i, j] < 0:
# If wrong guess, games is over
self.wrong = ~self.mines & self.flags
self.wrong[i, j] = True
self.game_over()
else:
# If guess is correct
self.revealed[i, j] = True
if self.mines_count[i, j] == 0:
# Recursively looks for contiguous cells without mines
for _i, _j in self.get_ij_neighbors(i, j):
if self.mines_count[_i, _j] >= 0 and not self.revealed[_i, _j]:
self.flags[_i, _j] = False
self.update_revealed(_i, _j)
elif self.mines_count[i, j] > 0:
# The line below only makes sense when it's in the middle of the
# recursion. For instance, a cell is flagged, but it is part of a
# big blob that's going to be revealed. The game doesn't punish
# the player in this scenario. This behavior has been copied
# from gnome-mines
self.flags[i, j] = False
# Reveals mine count
self.mines_count_txt[i, j].set_visible(True)
elif self.mines_count[i, j] == self.count_neighbor_flags(i, j):
# If cell that's already revealed is clicked and the number of
# neighboring flags is the same as the number of neighboring
# mines, then the hidden neighbor cells are recursevely
# revealed. Evidently, if any flag guess is wrong, the game is
# over.
for _i, _j in self.get_ij_neighbors(i, j):
if not self.flags[_i, _j] and not self.revealed[_i, _j]:
self.update_revealed(_i, _j)
def reveal(self, i, j):
"""
Reveals clicked cell and contiguous cells without mines
"""
if not self.is_game_over:
if not self.flags[i, j]:
# Game is initialized after first click in order to prevent
# the first click being straight over a mine
if not self.is_initialized:
self.initialize(i, j)
self.update_revealed(i, j)
self.revealed_img.set_data(self.revealed)
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.refresh_canvas()
if np.count_nonzero(self.revealed) == self.n_not_mines:
self.game_over(True)
def flag(self, i, j):
"""
Flags i, j cell
"""
# Does not allow starting a game with a flag
if not self.is_game_over and self.is_initialized:
if not self.revealed[i, j]:
self.flags[i, j] = not self.flags[i, j]
self.flags_pts.set_data(*np.where(self.flags)[::-1])
self.title_txt.set_text('{}/{}'.format(np.count_nonzero(self.flags), self.n_mines))
self.refresh_canvas()
def game_over(self, win=False):
"""
Callback when game is over
"""
self.is_game_over = True
if win:
self.flags_pts.set_data(*np.where(self.mines)[::-1]) # shows mines marked with flags
self.title_txt.set_text('You win! Press F2 to start a new game')
else:
self.wrong_img.set_data(self.wrong) # wrong guesses
self.mines_pts = self.ax.plot(self.jj[self.mines & ~self.flags],
self.ii[self.mines & ~self.flags],
'kX', ms=10) # shows mines
self.title_txt.set_text('You lose! Press F2 to start a new game')
self.refresh_canvas()
def on_mouse_click(self, event):
"""
Callback when mouse is clicked
"""
if not self.is_game_over:
try:
# i, j coordinates of the click event
i = int(round(event.ydata))
j = int(round(event.xdata))
# Left button
if event.button == 1 or event.button == 2:
self.reveal(i, j)
# Right button
elif event.button == 3:
self.flag(i, j)
except (TypeError, IndexError):
pass
def on_key_press(self, event):
"""
Callback when key is pressed
"""
# F2 for starting new game
if event.key == 'f2':
self.draw_minefield()
@staticmethod
def new_game(*args, level='beginner', show=True):
"""
Static method for initializing the game with custom settings or in pre-defined levels
(beginner, intermediate, expert)
"""
if len(args) == 3:
minefield = args
else:
minefield = Mines.levels[Mines.level_aliases[level]]
return Mines(*minefield, show)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', metavar='level (b, i, e)', default='beginner', help='level, i.e., '
'beginner (8 x 8, 10 mines), intermediate (16 x 16, 40 mines), expert (30 '
'x 16, 99 mines)')
parser.add_argument('-c', metavar=('width', 'height', 'mines'), default=[], type=int, nargs=3,
help='custom game, provided width, height, and number of mines')
args = parser.parse_args()
game = Mines.new_game(*args.c, level=args.l) |
self.mines = np.full((self.height, self.width), False, dtype=bool) # boolean, mine or not | random_line_split |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.