text
stringlengths 78
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
487
|
---|---|---|---|
def _update_progress(self, percentage, **kwargs):
"""
Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None
"""
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(percentage * 10000)
if self._progress_callback is not None:
self._progress_callback(percentage, **kwargs) | [
"def",
"_update_progress",
"(",
"self",
",",
"percentage",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_show_progressbar",
":",
"if",
"self",
".",
"_progressbar",
"is",
"None",
":",
"self",
".",
"_initialize_progressbar",
"(",
")",
"self",
".",
"_progressbar",
".",
"update",
"(",
"percentage",
"*",
"10000",
")",
"if",
"self",
".",
"_progress_callback",
"is",
"not",
"None",
":",
"self",
".",
"_progress_callback",
"(",
"percentage",
",",
"*",
"*",
"kwargs",
")"
] | 38.555556 | 24.888889 |
def _parse_response(self, result_page):
"""
Takes a result page of sending the sms, returns an extracted tuple:
('numeric_err_code', '<sent_queued_message_id>', '<smsglobalmsgid>')
Returns None if unable to extract info from result_page, it should be
safe to assume that it was either a failed result or worse, the interface
contract has changed.
"""
# Sample result_page, single line -> "OK: 0; Sent queued message ID: 2063619577732703 SMSGlobalMsgID:6171799108850954"
resultline = result_page.splitlines()[0] # get result line
if resultline.startswith('ERROR:'):
raise Exception(resultline.replace('ERROR: ', ''))
patt = re.compile(r'^.+?:\s*(.+?)\s*;\s*Sent queued message ID:\s*(.+?)\s*SMSGlobalMsgID:(.+?)$', re.IGNORECASE)
m = patt.match(resultline)
if m:
return (m.group(1), m.group(2), m.group(3))
return None | [
"def",
"_parse_response",
"(",
"self",
",",
"result_page",
")",
":",
"# Sample result_page, single line -> \"OK: 0; Sent queued message ID: 2063619577732703 SMSGlobalMsgID:6171799108850954\"",
"resultline",
"=",
"result_page",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"# get result line",
"if",
"resultline",
".",
"startswith",
"(",
"'ERROR:'",
")",
":",
"raise",
"Exception",
"(",
"resultline",
".",
"replace",
"(",
"'ERROR: '",
",",
"''",
")",
")",
"patt",
"=",
"re",
".",
"compile",
"(",
"r'^.+?:\\s*(.+?)\\s*;\\s*Sent queued message ID:\\s*(.+?)\\s*SMSGlobalMsgID:(.+?)$'",
",",
"re",
".",
"IGNORECASE",
")",
"m",
"=",
"patt",
".",
"match",
"(",
"resultline",
")",
"if",
"m",
":",
"return",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"m",
".",
"group",
"(",
"2",
")",
",",
"m",
".",
"group",
"(",
"3",
")",
")",
"return",
"None"
] | 55.411765 | 26.588235 |
def parse_args(args):
"""Uses python argparse to collect positional args"""
Log.info("Input args: %r" % args)
parser = argparse.ArgumentParser()
parser.add_argument("--shard", type=int, required=True)
parser.add_argument("--topology-name", required=True)
parser.add_argument("--topology-id", required=True)
parser.add_argument("--topology-defn-file", required=True)
parser.add_argument("--state-manager-connection", required=True)
parser.add_argument("--state-manager-root", required=True)
parser.add_argument("--state-manager-config-file", required=True)
parser.add_argument("--tmaster-binary", required=True)
parser.add_argument("--stmgr-binary", required=True)
parser.add_argument("--metrics-manager-classpath", required=True)
parser.add_argument("--instance-jvm-opts", required=True)
parser.add_argument("--classpath", required=True)
parser.add_argument("--master-port", required=True)
parser.add_argument("--tmaster-controller-port", required=True)
parser.add_argument("--tmaster-stats-port", required=True)
parser.add_argument("--heron-internals-config-file", required=True)
parser.add_argument("--override-config-file", required=True)
parser.add_argument("--component-ram-map", required=True)
parser.add_argument("--component-jvm-opts", required=True)
parser.add_argument("--pkg-type", required=True)
parser.add_argument("--topology-binary-file", required=True)
parser.add_argument("--heron-java-home", required=True)
parser.add_argument("--shell-port", required=True)
parser.add_argument("--heron-shell-binary", required=True)
parser.add_argument("--metrics-manager-port", required=True)
parser.add_argument("--cluster", required=True)
parser.add_argument("--role", required=True)
parser.add_argument("--environment", required=True)
parser.add_argument("--instance-classpath", required=True)
parser.add_argument("--metrics-sinks-config-file", required=True)
parser.add_argument("--scheduler-classpath", required=True)
parser.add_argument("--scheduler-port", required=True)
parser.add_argument("--python-instance-binary", required=True)
parser.add_argument("--cpp-instance-binary", required=True)
parser.add_argument("--metricscache-manager-classpath", required=True)
parser.add_argument("--metricscache-manager-master-port", required=True)
parser.add_argument("--metricscache-manager-stats-port", required=True)
parser.add_argument("--metricscache-manager-mode", required=False)
parser.add_argument("--is-stateful", required=True)
parser.add_argument("--checkpoint-manager-classpath", required=True)
parser.add_argument("--checkpoint-manager-port", required=True)
parser.add_argument("--checkpoint-manager-ram", type=long, required=True)
parser.add_argument("--stateful-config-file", required=True)
parser.add_argument("--health-manager-mode", required=True)
parser.add_argument("--health-manager-classpath", required=True)
parser.add_argument("--jvm-remote-debugger-ports", required=False,
help="ports to be used by a remote debugger for JVM instances")
parsed_args, unknown_args = parser.parse_known_args(args[1:])
if unknown_args:
Log.error('Unknown argument: %s' % unknown_args[0])
parser.print_help()
sys.exit(1)
return parsed_args | [
"def",
"parse_args",
"(",
"args",
")",
":",
"Log",
".",
"info",
"(",
"\"Input args: %r\"",
"%",
"args",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--shard\"",
",",
"type",
"=",
"int",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--topology-name\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--topology-id\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--topology-defn-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--state-manager-connection\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--state-manager-root\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--state-manager-config-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--tmaster-binary\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--stmgr-binary\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metrics-manager-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--instance-jvm-opts\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--master-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--tmaster-controller-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--tmaster-stats-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--heron-internals-config-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--override-config-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--component-ram-map\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--component-jvm-opts\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--pkg-type\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--topology-binary-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--heron-java-home\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--shell-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--heron-shell-binary\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metrics-manager-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--cluster\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--role\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--environment\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--instance-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metrics-sinks-config-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--scheduler-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--scheduler-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--python-instance-binary\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--cpp-instance-binary\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metricscache-manager-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metricscache-manager-master-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metricscache-manager-stats-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--metricscache-manager-mode\"",
",",
"required",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"--is-stateful\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-manager-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-manager-port\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--checkpoint-manager-ram\"",
",",
"type",
"=",
"long",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--stateful-config-file\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--health-manager-mode\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--health-manager-classpath\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"\"--jvm-remote-debugger-ports\"",
",",
"required",
"=",
"False",
",",
"help",
"=",
"\"ports to be used by a remote debugger for JVM instances\"",
")",
"parsed_args",
",",
"unknown_args",
"=",
"parser",
".",
"parse_known_args",
"(",
"args",
"[",
"1",
":",
"]",
")",
"if",
"unknown_args",
":",
"Log",
".",
"error",
"(",
"'Unknown argument: %s'",
"%",
"unknown_args",
"[",
"0",
"]",
")",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"parsed_args"
] | 53.83871 | 23.241935 |
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask | [
"def",
"polygon_to_mask",
"(",
"coords",
",",
"dims",
",",
"z",
"=",
"None",
")",
":",
"bounds",
"=",
"array",
"(",
"coords",
")",
".",
"astype",
"(",
"'int'",
")",
"path",
"=",
"Path",
"(",
"bounds",
")",
"grid",
"=",
"meshgrid",
"(",
"range",
"(",
"dims",
"[",
"1",
"]",
")",
",",
"range",
"(",
"dims",
"[",
"0",
"]",
")",
")",
"grid_flat",
"=",
"zip",
"(",
"grid",
"[",
"0",
"]",
".",
"ravel",
"(",
")",
",",
"grid",
"[",
"1",
"]",
".",
"ravel",
"(",
")",
")",
"mask",
"=",
"path",
".",
"contains_points",
"(",
"grid_flat",
")",
".",
"reshape",
"(",
"dims",
"[",
"0",
":",
"2",
"]",
")",
".",
"astype",
"(",
"'int'",
")",
"if",
"z",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"dims",
")",
"<",
"3",
":",
"raise",
"Exception",
"(",
"'Dims must have three-dimensions for embedding z-index'",
")",
"if",
"z",
">=",
"dims",
"[",
"2",
"]",
":",
"raise",
"Exception",
"(",
"'Z-index %g exceeds third dimension %g'",
"%",
"(",
"z",
",",
"dims",
"[",
"2",
"]",
")",
")",
"tmp",
"=",
"zeros",
"(",
"dims",
")",
"tmp",
"[",
":",
",",
":",
",",
"z",
"]",
"=",
"mask",
"mask",
"=",
"tmp",
"return",
"mask"
] | 31.666667 | 22.916667 |
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op | [
"def",
"apply_spectral_norm",
"(",
"x",
")",
":",
"weights_shape",
"=",
"shape_list",
"(",
"x",
")",
"other",
",",
"num_filters",
"=",
"tf",
".",
"reduce_prod",
"(",
"weights_shape",
"[",
":",
"-",
"1",
"]",
")",
",",
"weights_shape",
"[",
"-",
"1",
"]",
"# Reshape into a 2-D matrix with outer size num_filters.",
"weights_2d",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"(",
"other",
",",
"num_filters",
")",
")",
"# v = Wu / ||W u||",
"with",
"tf",
".",
"variable_scope",
"(",
"\"u\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"u",
"=",
"tf",
".",
"get_variable",
"(",
"\"u\"",
",",
"[",
"num_filters",
",",
"1",
"]",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
")",
",",
"trainable",
"=",
"False",
")",
"v",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"tf",
".",
"matmul",
"(",
"weights_2d",
",",
"u",
")",
")",
"# u_new = vW / ||v W||",
"u_new",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"v",
")",
",",
"weights_2d",
")",
")",
"# s = v*W*u",
"spectral_norm",
"=",
"tf",
".",
"squeeze",
"(",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"v",
")",
",",
"tf",
".",
"matmul",
"(",
"weights_2d",
",",
"tf",
".",
"transpose",
"(",
"u_new",
")",
")",
")",
")",
"# set u equal to u_new in the next iteration.",
"assign_op",
"=",
"tf",
".",
"assign",
"(",
"u",
",",
"tf",
".",
"transpose",
"(",
"u_new",
")",
")",
"return",
"tf",
".",
"divide",
"(",
"x",
",",
"spectral_norm",
")",
",",
"assign_op"
] | 34.447368 | 22.736842 |
def update_progressbar(self):
"""
Updates the progressbar by re-calculating the label.
It is not required to manually call this method since setting any of the
properties of this class will automatically trigger a re-calculation.
"""
n,nmin,nmax = self.wprogressbar.n,self.wprogressbar.nmin,self.wprogressbar.nmax
if (nmax-nmin)==0:
percent = 0 # prevents ZeroDivisionError
else:
percent = max(min((n-nmin)/(nmax-nmin),1.),0.)*100
dat = {"value":round(n,4),"n":round(n,4),"nmin":round(nmin,4),"nmax":round(nmax,4),"percent":round(percent,4),"p":round(percent,4)}
txt = self._label_progressbar.format(**dat)
self.wprogresslabel.label = txt | [
"def",
"update_progressbar",
"(",
"self",
")",
":",
"n",
",",
"nmin",
",",
"nmax",
"=",
"self",
".",
"wprogressbar",
".",
"n",
",",
"self",
".",
"wprogressbar",
".",
"nmin",
",",
"self",
".",
"wprogressbar",
".",
"nmax",
"if",
"(",
"nmax",
"-",
"nmin",
")",
"==",
"0",
":",
"percent",
"=",
"0",
"# prevents ZeroDivisionError",
"else",
":",
"percent",
"=",
"max",
"(",
"min",
"(",
"(",
"n",
"-",
"nmin",
")",
"/",
"(",
"nmax",
"-",
"nmin",
")",
",",
"1.",
")",
",",
"0.",
")",
"*",
"100",
"dat",
"=",
"{",
"\"value\"",
":",
"round",
"(",
"n",
",",
"4",
")",
",",
"\"n\"",
":",
"round",
"(",
"n",
",",
"4",
")",
",",
"\"nmin\"",
":",
"round",
"(",
"nmin",
",",
"4",
")",
",",
"\"nmax\"",
":",
"round",
"(",
"nmax",
",",
"4",
")",
",",
"\"percent\"",
":",
"round",
"(",
"percent",
",",
"4",
")",
",",
"\"p\"",
":",
"round",
"(",
"percent",
",",
"4",
")",
"}",
"txt",
"=",
"self",
".",
"_label_progressbar",
".",
"format",
"(",
"*",
"*",
"dat",
")",
"self",
".",
"wprogresslabel",
".",
"label",
"=",
"txt"
] | 49.666667 | 24.866667 |
def remove(id_):
"""
Remove the callback and its schedule
"""
with LOCK:
thread = REGISTRY.pop(id_, None)
if thread is not None:
thread.cancel()
return thread | [
"def",
"remove",
"(",
"id_",
")",
":",
"with",
"LOCK",
":",
"thread",
"=",
"REGISTRY",
".",
"pop",
"(",
"id_",
",",
"None",
")",
"if",
"thread",
"is",
"not",
"None",
":",
"thread",
".",
"cancel",
"(",
")",
"return",
"thread"
] | 19.8 | 13.6 |
def block(self, hash):
"""
Retrieves a json representation of **block**
:param hash: Hash of block to return representation for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.block(
... hash="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"account": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
"work": "0000000000000000",
"source": "FA5B51D063BADDF345EFD7EF0D3C5FB115C85B1EF4CDE89D8B7DF3EAF60A04A4",
"representative": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000",
"signature": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"type": "open"
}
"""
hash = self._process_value(hash, 'block')
payload = {"hash": hash}
resp = self.call('block', payload)
return json.loads(resp['contents']) | [
"def",
"block",
"(",
"self",
",",
"hash",
")",
":",
"hash",
"=",
"self",
".",
"_process_value",
"(",
"hash",
",",
"'block'",
")",
"payload",
"=",
"{",
"\"hash\"",
":",
"hash",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'block'",
",",
"payload",
")",
"return",
"json",
".",
"loads",
"(",
"resp",
"[",
"'contents'",
"]",
")"
] | 34.433333 | 28.7 |
def _add_addon_views(self):
"""
Registers declared addon's
"""
for addon in self._addon_managers:
addon_class = dynamic_class_import(addon)
if addon_class:
# Instantiate manager with appbuilder (self)
addon_class = addon_class(self)
try:
addon_class.pre_process()
addon_class.register_views()
addon_class.post_process()
self.addon_managers[addon] = addon_class
log.info(LOGMSG_INF_FAB_ADDON_ADDED.format(str(addon)))
except Exception as e:
log.exception(e)
log.error(LOGMSG_ERR_FAB_ADDON_PROCESS.format(addon, e)) | [
"def",
"_add_addon_views",
"(",
"self",
")",
":",
"for",
"addon",
"in",
"self",
".",
"_addon_managers",
":",
"addon_class",
"=",
"dynamic_class_import",
"(",
"addon",
")",
"if",
"addon_class",
":",
"# Instantiate manager with appbuilder (self)",
"addon_class",
"=",
"addon_class",
"(",
"self",
")",
"try",
":",
"addon_class",
".",
"pre_process",
"(",
")",
"addon_class",
".",
"register_views",
"(",
")",
"addon_class",
".",
"post_process",
"(",
")",
"self",
".",
"addon_managers",
"[",
"addon",
"]",
"=",
"addon_class",
"log",
".",
"info",
"(",
"LOGMSG_INF_FAB_ADDON_ADDED",
".",
"format",
"(",
"str",
"(",
"addon",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"log",
".",
"error",
"(",
"LOGMSG_ERR_FAB_ADDON_PROCESS",
".",
"format",
"(",
"addon",
",",
"e",
")",
")"
] | 42.222222 | 11.444444 |
def _updateCallSetIds(self, variantFile):
"""
Updates the call set IDs based on the specified variant file.
"""
if len(self._callSetIdMap) == 0:
for sample in variantFile.header.samples:
self.addCallSetFromName(sample) | [
"def",
"_updateCallSetIds",
"(",
"self",
",",
"variantFile",
")",
":",
"if",
"len",
"(",
"self",
".",
"_callSetIdMap",
")",
"==",
"0",
":",
"for",
"sample",
"in",
"variantFile",
".",
"header",
".",
"samples",
":",
"self",
".",
"addCallSetFromName",
"(",
"sample",
")"
] | 38.857143 | 7.142857 |
def clear(self):
"""
Cleans up the manager. The manager can't be used after this method has
been called
"""
self._lock = None
self._ipopo_instance = None
self._context = None
self.requirement = None
self._value = None
self._field = None | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"_lock",
"=",
"None",
"self",
".",
"_ipopo_instance",
"=",
"None",
"self",
".",
"_context",
"=",
"None",
"self",
".",
"requirement",
"=",
"None",
"self",
".",
"_value",
"=",
"None",
"self",
".",
"_field",
"=",
"None"
] | 27.818182 | 13.818182 |
def from_xx(cls, xx):
"""
Create a new Language instance from a ISO639 string
:param xx: ISO639 as string
:return: Language instance with instance.xx() == xx if xx is valid else instance of UnknownLanguage
"""
xx = str(xx).lower()
if xx is 'unknown':
return UnknownLanguage(xx)
try:
return cls._from_xyz('ISO639', xx)
except NotALanguageException:
log.warning('Unknown ISO639: {}'.format(xx))
return UnknownLanguage(xx) | [
"def",
"from_xx",
"(",
"cls",
",",
"xx",
")",
":",
"xx",
"=",
"str",
"(",
"xx",
")",
".",
"lower",
"(",
")",
"if",
"xx",
"is",
"'unknown'",
":",
"return",
"UnknownLanguage",
"(",
"xx",
")",
"try",
":",
"return",
"cls",
".",
"_from_xyz",
"(",
"'ISO639'",
",",
"xx",
")",
"except",
"NotALanguageException",
":",
"log",
".",
"warning",
"(",
"'Unknown ISO639: {}'",
".",
"format",
"(",
"xx",
")",
")",
"return",
"UnknownLanguage",
"(",
"xx",
")"
] | 37.571429 | 13.714286 |
def clean_response(response):
""" Cleans string quoting in response. """
response = re.sub("^['\"]", "", response)
response = re.sub("['\"]$", "", response)
return response | [
"def",
"clean_response",
"(",
"response",
")",
":",
"response",
"=",
"re",
".",
"sub",
"(",
"\"^['\\\"]\"",
",",
"\"\"",
",",
"response",
")",
"response",
"=",
"re",
".",
"sub",
"(",
"\"['\\\"]$\"",
",",
"\"\"",
",",
"response",
")",
"return",
"response"
] | 36.8 | 8.4 |
def clear_cache(backend=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
return 'No cache was cleared'
return ret | [
"def",
"clear_cache",
"(",
"backend",
"=",
"None",
")",
":",
"fileserver",
"=",
"salt",
".",
"fileserver",
".",
"Fileserver",
"(",
"__opts__",
")",
"cleared",
",",
"errors",
"=",
"fileserver",
".",
"clear_cache",
"(",
"back",
"=",
"backend",
")",
"ret",
"=",
"{",
"}",
"if",
"cleared",
":",
"ret",
"[",
"'cleared'",
"]",
"=",
"cleared",
"if",
"errors",
":",
"ret",
"[",
"'errors'",
"]",
"=",
"errors",
"if",
"not",
"ret",
":",
"return",
"'No cache was cleared'",
"return",
"ret"
] | 36 | 24.166667 |
def optimize_skyline(self, n_points=20, stiffness=2.0, method = 'SLSQP',
tol=0.03, regularization=10.0, **kwarks):
'''
optimize the trajectory of the merger rate 1./T_c to maximize the
coalescent likelihood.
parameters:
n_points -- number of pivots of the Tc interpolation object
stiffness -- penalty for rapid changes in log(Tc)
methods -- method used to optimize
tol -- optimization tolerance
regularization -- cost of moving logTc outsize of the range [-100,0]
merger rate is measured in branch length units, no
plausible rates should never be outside this window
'''
self.logger("Coalescent:optimize_skyline:... current LH: %f"%self.total_LH(),2)
from scipy.optimize import minimize
initial_Tc = self.Tc
tvals = np.linspace(self.tree_events[0,0], self.tree_events[-1,0], n_points)
def cost(logTc):
# cap log Tc to avoid under or overflow and nan in logs
self.set_Tc(np.exp(np.maximum(-200,np.minimum(100,logTc))), tvals)
neglogLH = -self.total_LH() + stiffness*np.sum(np.diff(logTc)**2) \
+ np.sum((logTc>0)*logTc*regularization)\
- np.sum((logTc<-100)*logTc*regularization)
return neglogLH
sol = minimize(cost, np.ones_like(tvals)*np.log(self.Tc.y.mean()), method=method, tol=tol)
if "success" in sol and sol["success"]:
dlogTc = 0.1
opt_logTc = sol['x']
dcost = []
for ii in range(len(opt_logTc)):
tmp = opt_logTc.copy()
tmp[ii]+=dlogTc
cost_plus = cost(tmp)
tmp[ii]-=2*dlogTc
cost_minus = cost(tmp)
dcost.append([cost_minus, cost_plus])
dcost = np.array(dcost)
optimal_cost = cost(opt_logTc)
self.confidence = -dlogTc/(2*optimal_cost - dcost[:,0] - dcost[:,1])
self.logger("Coalescent:optimize_skyline:...done. new LH: %f"%self.total_LH(),2)
else:
self.set_Tc(initial_Tc.y, T=initial_Tc.x)
self.logger("Coalescent:optimize_skyline:...failed:"+str(sol),0, warn=True) | [
"def",
"optimize_skyline",
"(",
"self",
",",
"n_points",
"=",
"20",
",",
"stiffness",
"=",
"2.0",
",",
"method",
"=",
"'SLSQP'",
",",
"tol",
"=",
"0.03",
",",
"regularization",
"=",
"10.0",
",",
"*",
"*",
"kwarks",
")",
":",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:... current LH: %f\"",
"%",
"self",
".",
"total_LH",
"(",
")",
",",
"2",
")",
"from",
"scipy",
".",
"optimize",
"import",
"minimize",
"initial_Tc",
"=",
"self",
".",
"Tc",
"tvals",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"tree_events",
"[",
"0",
",",
"0",
"]",
",",
"self",
".",
"tree_events",
"[",
"-",
"1",
",",
"0",
"]",
",",
"n_points",
")",
"def",
"cost",
"(",
"logTc",
")",
":",
"# cap log Tc to avoid under or overflow and nan in logs",
"self",
".",
"set_Tc",
"(",
"np",
".",
"exp",
"(",
"np",
".",
"maximum",
"(",
"-",
"200",
",",
"np",
".",
"minimum",
"(",
"100",
",",
"logTc",
")",
")",
")",
",",
"tvals",
")",
"neglogLH",
"=",
"-",
"self",
".",
"total_LH",
"(",
")",
"+",
"stiffness",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"diff",
"(",
"logTc",
")",
"**",
"2",
")",
"+",
"np",
".",
"sum",
"(",
"(",
"logTc",
">",
"0",
")",
"*",
"logTc",
"*",
"regularization",
")",
"-",
"np",
".",
"sum",
"(",
"(",
"logTc",
"<",
"-",
"100",
")",
"*",
"logTc",
"*",
"regularization",
")",
"return",
"neglogLH",
"sol",
"=",
"minimize",
"(",
"cost",
",",
"np",
".",
"ones_like",
"(",
"tvals",
")",
"*",
"np",
".",
"log",
"(",
"self",
".",
"Tc",
".",
"y",
".",
"mean",
"(",
")",
")",
",",
"method",
"=",
"method",
",",
"tol",
"=",
"tol",
")",
"if",
"\"success\"",
"in",
"sol",
"and",
"sol",
"[",
"\"success\"",
"]",
":",
"dlogTc",
"=",
"0.1",
"opt_logTc",
"=",
"sol",
"[",
"'x'",
"]",
"dcost",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"opt_logTc",
")",
")",
":",
"tmp",
"=",
"opt_logTc",
".",
"copy",
"(",
")",
"tmp",
"[",
"ii",
"]",
"+=",
"dlogTc",
"cost_plus",
"=",
"cost",
"(",
"tmp",
")",
"tmp",
"[",
"ii",
"]",
"-=",
"2",
"*",
"dlogTc",
"cost_minus",
"=",
"cost",
"(",
"tmp",
")",
"dcost",
".",
"append",
"(",
"[",
"cost_minus",
",",
"cost_plus",
"]",
")",
"dcost",
"=",
"np",
".",
"array",
"(",
"dcost",
")",
"optimal_cost",
"=",
"cost",
"(",
"opt_logTc",
")",
"self",
".",
"confidence",
"=",
"-",
"dlogTc",
"/",
"(",
"2",
"*",
"optimal_cost",
"-",
"dcost",
"[",
":",
",",
"0",
"]",
"-",
"dcost",
"[",
":",
",",
"1",
"]",
")",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:...done. new LH: %f\"",
"%",
"self",
".",
"total_LH",
"(",
")",
",",
"2",
")",
"else",
":",
"self",
".",
"set_Tc",
"(",
"initial_Tc",
".",
"y",
",",
"T",
"=",
"initial_Tc",
".",
"x",
")",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:...failed:\"",
"+",
"str",
"(",
"sol",
")",
",",
"0",
",",
"warn",
"=",
"True",
")"
] | 50.26087 | 23.608696 |
def make_factor(var, e, bn):
"""Return the factor for var in bn's joint distribution given e.
That is, bn's full joint distribution, projected to accord with e,
is the pointwise product of these factors for bn's variables."""
node = bn.variable_node(var)
vars = [X for X in [var] + node.parents if X not in e]
cpt = dict((event_values(e1, vars), node.p(e1[var], e1))
for e1 in all_events(vars, bn, e))
return Factor(vars, cpt) | [
"def",
"make_factor",
"(",
"var",
",",
"e",
",",
"bn",
")",
":",
"node",
"=",
"bn",
".",
"variable_node",
"(",
"var",
")",
"vars",
"=",
"[",
"X",
"for",
"X",
"in",
"[",
"var",
"]",
"+",
"node",
".",
"parents",
"if",
"X",
"not",
"in",
"e",
"]",
"cpt",
"=",
"dict",
"(",
"(",
"event_values",
"(",
"e1",
",",
"vars",
")",
",",
"node",
".",
"p",
"(",
"e1",
"[",
"var",
"]",
",",
"e1",
")",
")",
"for",
"e1",
"in",
"all_events",
"(",
"vars",
",",
"bn",
",",
"e",
")",
")",
"return",
"Factor",
"(",
"vars",
",",
"cpt",
")"
] | 51.222222 | 12.111111 |
def delete_user(self, username, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html>`_
:arg username: username
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if username in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'username'.")
return self.transport.perform_request(
"DELETE", _make_path("_security", "user", username), params=params
) | [
"def",
"delete_user",
"(",
"self",
",",
"username",
",",
"params",
"=",
"None",
")",
":",
"if",
"username",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument 'username'.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"DELETE\"",
",",
"_make_path",
"(",
"\"_security\"",
",",
"\"user\"",
",",
"username",
")",
",",
"params",
"=",
"params",
")"
] | 49.625 | 26 |
def bulk_launch(self, jobs=None, filter=None, all=False): # pylint: disable=redefined-builtin
"""Bulk launch a set of jobs.
:param jobs: :class:`jobs.Job <jobs.Job>` list
:param filter: (optional) Filters to apply as a string list.
:param all: (optional) Apply to all if bool `True`.
"""
json = None
if jobs is not None:
schema = JobSchema(exclude=('id', 'status', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic'))
jobs_json = self.service.encode(schema, jobs, many=True)
json = {self.RESOURCE: jobs_json}
schema = JobSchema()
resp = self.service.post(self.base,
params={'bulk': 'launch', 'filter': filter, 'all': all}, json=json)
return self.service.decode(schema, resp, many=True) | [
"def",
"bulk_launch",
"(",
"self",
",",
"jobs",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"all",
"=",
"False",
")",
":",
"# pylint: disable=redefined-builtin",
"json",
"=",
"None",
"if",
"jobs",
"is",
"not",
"None",
":",
"schema",
"=",
"JobSchema",
"(",
"exclude",
"=",
"(",
"'id'",
",",
"'status'",
",",
"'package_name'",
",",
"'config_name'",
",",
"'device_name'",
",",
"'result_id'",
",",
"'user_id'",
",",
"'created'",
",",
"'updated'",
",",
"'automatic'",
")",
")",
"jobs_json",
"=",
"self",
".",
"service",
".",
"encode",
"(",
"schema",
",",
"jobs",
",",
"many",
"=",
"True",
")",
"json",
"=",
"{",
"self",
".",
"RESOURCE",
":",
"jobs_json",
"}",
"schema",
"=",
"JobSchema",
"(",
")",
"resp",
"=",
"self",
".",
"service",
".",
"post",
"(",
"self",
".",
"base",
",",
"params",
"=",
"{",
"'bulk'",
":",
"'launch'",
",",
"'filter'",
":",
"filter",
",",
"'all'",
":",
"all",
"}",
",",
"json",
"=",
"json",
")",
"return",
"self",
".",
"service",
".",
"decode",
"(",
"schema",
",",
"resp",
",",
"many",
"=",
"True",
")"
] | 51.352941 | 27.941176 |
def _max_weight_state(states: Iterable[TensorProductState]) -> Union[None, TensorProductState]:
"""Construct a TensorProductState by taking the single-qubit state at each
qubit position.
This function will return ``None`` if the input states are not compatible
For example, the max_weight_state of ["(+X, q0)", "(-Z, q1)"] is "(+X, q0; -Z q1)". Asking for
the max weight state of something like ["(+X, q0)", "(+Z, q0)"] will return None.
"""
mapping = dict() # type: Dict[int, _OneQState]
for state in states:
for oneq_state in state.states:
if oneq_state.qubit in mapping:
if mapping[oneq_state.qubit] != oneq_state:
return None
else:
mapping[oneq_state.qubit] = oneq_state
return TensorProductState(list(mapping.values())) | [
"def",
"_max_weight_state",
"(",
"states",
":",
"Iterable",
"[",
"TensorProductState",
"]",
")",
"->",
"Union",
"[",
"None",
",",
"TensorProductState",
"]",
":",
"mapping",
"=",
"dict",
"(",
")",
"# type: Dict[int, _OneQState]",
"for",
"state",
"in",
"states",
":",
"for",
"oneq_state",
"in",
"state",
".",
"states",
":",
"if",
"oneq_state",
".",
"qubit",
"in",
"mapping",
":",
"if",
"mapping",
"[",
"oneq_state",
".",
"qubit",
"]",
"!=",
"oneq_state",
":",
"return",
"None",
"else",
":",
"mapping",
"[",
"oneq_state",
".",
"qubit",
"]",
"=",
"oneq_state",
"return",
"TensorProductState",
"(",
"list",
"(",
"mapping",
".",
"values",
"(",
")",
")",
")"
] | 46.111111 | 22.5 |
def _dist_corr(dist, phi1, phi2, phi3):
"""
Generic distance-decaying correlation function
:param dist: Distance between catchment centrolds in km
:type dist: float
:param phi1: Decay function parameters 1
:type phi1: float
:param phi2: Decay function parameters 2
:type phi2: float
:param phi3: Decay function parameters 3
:type phi3: float
:return: Correlation coefficient, r
:rtype: float
"""
return phi1 * exp(-phi2 * dist) + (1 - phi1) * exp(-phi3 * dist) | [
"def",
"_dist_corr",
"(",
"dist",
",",
"phi1",
",",
"phi2",
",",
"phi3",
")",
":",
"return",
"phi1",
"*",
"exp",
"(",
"-",
"phi2",
"*",
"dist",
")",
"+",
"(",
"1",
"-",
"phi1",
")",
"*",
"exp",
"(",
"-",
"phi3",
"*",
"dist",
")"
] | 34.875 | 13.5 |
def update_from_delta(self, delta, *args):
"""Apply the changes described in the dict ``delta``."""
for (node, extant) in delta.get('nodes', {}).items():
if extant:
if node in delta.get('node_val', {}) \
and 'location' in delta['node_val'][node]\
and node not in self.pawn:
self.add_pawn(node)
elif node not in self.spot:
self.add_spot(node)
spot = self.spot[node]
if '_x' not in spot.place or '_y' not in spot.place:
self.spots_unposd.append(spot)
else:
if node in self.pawn:
self.rm_pawn(node)
if node in self.spot:
self.rm_spot(node)
for (node, stats) in delta.get('node_val', {}).items():
if node in self.spot:
spot = self.spot[node]
x = stats.get('_x')
y = stats.get('_y')
if x is not None:
spot.x = x * self.width
if y is not None:
spot.y = y * self.height
if '_image_paths' in stats:
spot.paths = stats['_image_paths'] or spot.default_image_paths
elif node in self.pawn:
pawn = self.pawn[node]
if 'location' in stats:
pawn.loc_name = stats['location']
if '_image_paths' in stats:
pawn.paths = stats['_image_paths'] or pawn.default_image_paths
else:
Logger.warning(
"Board: diff tried to change stats of node {} "
"but I don't have a widget for it".format(node)
)
for (orig, dests) in delta.get('edges', {}).items():
for (dest, extant) in dests.items():
if extant and (orig not in self.arrow or dest not in self.arrow[orig]):
self.add_arrow(orig, dest)
elif not extant and orig in self.arrow and dest in self.arrow[orig]:
self.rm_arrow(orig, dest) | [
"def",
"update_from_delta",
"(",
"self",
",",
"delta",
",",
"*",
"args",
")",
":",
"for",
"(",
"node",
",",
"extant",
")",
"in",
"delta",
".",
"get",
"(",
"'nodes'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"extant",
":",
"if",
"node",
"in",
"delta",
".",
"get",
"(",
"'node_val'",
",",
"{",
"}",
")",
"and",
"'location'",
"in",
"delta",
"[",
"'node_val'",
"]",
"[",
"node",
"]",
"and",
"node",
"not",
"in",
"self",
".",
"pawn",
":",
"self",
".",
"add_pawn",
"(",
"node",
")",
"elif",
"node",
"not",
"in",
"self",
".",
"spot",
":",
"self",
".",
"add_spot",
"(",
"node",
")",
"spot",
"=",
"self",
".",
"spot",
"[",
"node",
"]",
"if",
"'_x'",
"not",
"in",
"spot",
".",
"place",
"or",
"'_y'",
"not",
"in",
"spot",
".",
"place",
":",
"self",
".",
"spots_unposd",
".",
"append",
"(",
"spot",
")",
"else",
":",
"if",
"node",
"in",
"self",
".",
"pawn",
":",
"self",
".",
"rm_pawn",
"(",
"node",
")",
"if",
"node",
"in",
"self",
".",
"spot",
":",
"self",
".",
"rm_spot",
"(",
"node",
")",
"for",
"(",
"node",
",",
"stats",
")",
"in",
"delta",
".",
"get",
"(",
"'node_val'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"node",
"in",
"self",
".",
"spot",
":",
"spot",
"=",
"self",
".",
"spot",
"[",
"node",
"]",
"x",
"=",
"stats",
".",
"get",
"(",
"'_x'",
")",
"y",
"=",
"stats",
".",
"get",
"(",
"'_y'",
")",
"if",
"x",
"is",
"not",
"None",
":",
"spot",
".",
"x",
"=",
"x",
"*",
"self",
".",
"width",
"if",
"y",
"is",
"not",
"None",
":",
"spot",
".",
"y",
"=",
"y",
"*",
"self",
".",
"height",
"if",
"'_image_paths'",
"in",
"stats",
":",
"spot",
".",
"paths",
"=",
"stats",
"[",
"'_image_paths'",
"]",
"or",
"spot",
".",
"default_image_paths",
"elif",
"node",
"in",
"self",
".",
"pawn",
":",
"pawn",
"=",
"self",
".",
"pawn",
"[",
"node",
"]",
"if",
"'location'",
"in",
"stats",
":",
"pawn",
".",
"loc_name",
"=",
"stats",
"[",
"'location'",
"]",
"if",
"'_image_paths'",
"in",
"stats",
":",
"pawn",
".",
"paths",
"=",
"stats",
"[",
"'_image_paths'",
"]",
"or",
"pawn",
".",
"default_image_paths",
"else",
":",
"Logger",
".",
"warning",
"(",
"\"Board: diff tried to change stats of node {} \"",
"\"but I don't have a widget for it\"",
".",
"format",
"(",
"node",
")",
")",
"for",
"(",
"orig",
",",
"dests",
")",
"in",
"delta",
".",
"get",
"(",
"'edges'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"for",
"(",
"dest",
",",
"extant",
")",
"in",
"dests",
".",
"items",
"(",
")",
":",
"if",
"extant",
"and",
"(",
"orig",
"not",
"in",
"self",
".",
"arrow",
"or",
"dest",
"not",
"in",
"self",
".",
"arrow",
"[",
"orig",
"]",
")",
":",
"self",
".",
"add_arrow",
"(",
"orig",
",",
"dest",
")",
"elif",
"not",
"extant",
"and",
"orig",
"in",
"self",
".",
"arrow",
"and",
"dest",
"in",
"self",
".",
"arrow",
"[",
"orig",
"]",
":",
"self",
".",
"rm_arrow",
"(",
"orig",
",",
"dest",
")"
] | 46.869565 | 12.826087 |
def _generate_ndarray_function_code(handle, name, func_name, signature_only=False):
"""Generate function for ndarray op by handle and function name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
if not signature_only:
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = _np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):"""%(func_name, ', '.join(signature)))
if not signature_only:
code.append("""
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(_np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
if not signature_only:
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
else:
code.append("""
return (0,)""")
doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s
for s in 'r"""{doc_str}"""'.format(doc_str=doc_str)
.splitlines(True)])
code.insert(1, doc_str_lines)
return ''.join(code), doc_str | [
"def",
"_generate_ndarray_function_code",
"(",
"handle",
",",
"name",
",",
"func_name",
",",
"signature_only",
"=",
"False",
")",
":",
"real_name",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"desc",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"num_args",
"=",
"mx_uint",
"(",
")",
"arg_names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_types",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_descs",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"key_var_num_args",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"ret_type",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXSymbolGetAtomicSymbolInfo",
"(",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"real_name",
")",
",",
"ctypes",
".",
"byref",
"(",
"desc",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_names",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_types",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_descs",
")",
",",
"ctypes",
".",
"byref",
"(",
"key_var_num_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"ret_type",
")",
")",
")",
"narg",
"=",
"int",
"(",
"num_args",
".",
"value",
")",
"arg_names",
"=",
"[",
"py_str",
"(",
"arg_names",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
"arg_types",
"=",
"[",
"py_str",
"(",
"arg_types",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
"key_var_num_args",
"=",
"py_str",
"(",
"key_var_num_args",
".",
"value",
")",
"ret_type",
"=",
"py_str",
"(",
"ret_type",
".",
"value",
")",
"if",
"ret_type",
".",
"value",
"is",
"not",
"None",
"else",
"''",
"doc_str",
"=",
"_build_doc",
"(",
"name",
",",
"py_str",
"(",
"desc",
".",
"value",
")",
",",
"arg_names",
",",
"arg_types",
",",
"[",
"py_str",
"(",
"arg_descs",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
",",
"key_var_num_args",
",",
"ret_type",
")",
"dtype_name",
"=",
"None",
"arr_name",
"=",
"None",
"ndsignature",
"=",
"[",
"]",
"signature",
"=",
"[",
"]",
"ndarg_names",
"=",
"[",
"]",
"kwarg_names",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
":",
"name",
",",
"atype",
"=",
"arg_names",
"[",
"i",
"]",
",",
"arg_types",
"[",
"i",
"]",
"if",
"name",
"==",
"'dtype'",
":",
"dtype_name",
"=",
"name",
"signature",
".",
"append",
"(",
"'%s=_Null'",
"%",
"name",
")",
"elif",
"atype",
".",
"startswith",
"(",
"'NDArray'",
")",
"or",
"atype",
".",
"startswith",
"(",
"'Symbol'",
")",
":",
"assert",
"not",
"arr_name",
",",
"\"Op can only have one argument with variable \"",
"\"size and it must be the last argument.\"",
"if",
"atype",
".",
"endswith",
"(",
"'[]'",
")",
":",
"ndsignature",
".",
"append",
"(",
"'*%s'",
"%",
"name",
")",
"arr_name",
"=",
"name",
"else",
":",
"ndsignature",
".",
"append",
"(",
"'%s=None'",
"%",
"name",
")",
"ndarg_names",
".",
"append",
"(",
"name",
")",
"else",
":",
"signature",
".",
"append",
"(",
"'%s=_Null'",
"%",
"name",
")",
"kwarg_names",
".",
"append",
"(",
"name",
")",
"signature",
".",
"append",
"(",
"'out=None'",
")",
"signature",
".",
"append",
"(",
"'name=None'",
")",
"signature",
".",
"append",
"(",
"'**kwargs'",
")",
"signature",
"=",
"ndsignature",
"+",
"signature",
"code",
"=",
"[",
"]",
"if",
"arr_name",
":",
"code",
".",
"append",
"(",
"\"\"\"\ndef %s(*%s, **kwargs):\"\"\"",
"%",
"(",
"func_name",
",",
"arr_name",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n ndargs = []\n for i in {}:\n assert isinstance(i, NDArrayBase), \\\\\n \"Positional arguments must have NDArray type, \" \\\\\n \"but got %s\"%str(i)\n ndargs.append(i)\"\"\"",
".",
"format",
"(",
"arr_name",
")",
")",
"if",
"dtype_name",
"is",
"not",
"None",
":",
"code",
".",
"append",
"(",
"\"\"\"\n if '%s' in kwargs:\n kwargs['%s'] = _np.dtype(kwargs['%s']).name\"\"\"",
"%",
"(",
"dtype_name",
",",
"dtype_name",
",",
"dtype_name",
")",
")",
"code",
".",
"append",
"(",
"\"\"\"\n _ = kwargs.pop('name', None)\n out = kwargs.pop('out', None)\n keys = list(kwargs.keys())\n vals = list(kwargs.values())\"\"\"",
")",
"else",
":",
"code",
".",
"append",
"(",
"\"\"\"\ndef %s(%s):\"\"\"",
"%",
"(",
"func_name",
",",
"', '",
".",
"join",
"(",
"signature",
")",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n ndargs = []\n keys = list(kwargs.keys())\n vals = list(kwargs.values())\"\"\"",
")",
"# NDArray args",
"for",
"name",
"in",
"ndarg_names",
":",
"# pylint: disable=redefined-argument-from-local",
"code",
".",
"append",
"(",
"\"\"\"\n if {name} is not None:\n assert isinstance({name}, NDArrayBase), \\\\\n \"Argument {name} must have NDArray type, but got %s\"%str({name})\n ndargs.append({name})\"\"\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"# kwargs",
"for",
"name",
"in",
"kwarg_names",
":",
"# pylint: disable=redefined-argument-from-local",
"code",
".",
"append",
"(",
"\"\"\"\n if %s is not _Null:\n keys.append('%s')\n vals.append(%s)\"\"\"",
"%",
"(",
"name",
",",
"name",
",",
"name",
")",
")",
"# dtype",
"if",
"dtype_name",
"is",
"not",
"None",
":",
"code",
".",
"append",
"(",
"\"\"\"\n if %s is not _Null:\n keys.append('%s')\n vals.append(_np.dtype(%s).name)\"\"\"",
"%",
"(",
"dtype_name",
",",
"dtype_name",
",",
"dtype_name",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n return _imperative_invoke(%d, ndargs, keys, vals, out)\"\"\"",
"%",
"(",
"handle",
".",
"value",
")",
")",
"else",
":",
"code",
".",
"append",
"(",
"\"\"\"\n return (0,)\"\"\"",
")",
"doc_str_lines",
"=",
"_os",
".",
"linesep",
"+",
"''",
".",
"join",
"(",
"[",
"' '",
"+",
"s",
"if",
"s",
".",
"strip",
"(",
")",
"else",
"s",
"for",
"s",
"in",
"'r\"\"\"{doc_str}\"\"\"'",
".",
"format",
"(",
"doc_str",
"=",
"doc_str",
")",
".",
"splitlines",
"(",
"True",
")",
"]",
")",
"code",
".",
"insert",
"(",
"1",
",",
"doc_str_lines",
")",
"return",
"''",
".",
"join",
"(",
"code",
")",
",",
"doc_str"
] | 36.290323 | 12.685484 |
async def create_signing_key(self, seed: str = None, metadata: dict = None) -> KeyInfo:
"""
Create a new signing key pair.
Raise WalletState if wallet is closed, ExtantRecord if verification key already exists.
:param seed: optional seed allowing deterministic key creation
:param metadata: optional metadata to store with key pair
:return: KeyInfo for new key pair
"""
LOGGER.debug('Wallet.create_signing_key >>> seed: [SEED], metadata: %s', metadata)
if not self.handle:
LOGGER.debug('Wallet.create_signing_key <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
verkey = await crypto.create_key(self.handle, json.dumps({'seed': seed} if seed else {}))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemAlreadyExists:
LOGGER.debug('Wallet.create_signing_key <!< Verification key already present in wallet %s', self.name)
raise ExtantRecord('Verification key already present in wallet {}'.format(self.name))
LOGGER.debug('Wallet.create_signing_key <!< indy-sdk raised error %s', x_indy.error_code)
raise
await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {})) # coerce None to empty
rv = KeyInfo(verkey, metadata or {})
LOGGER.debug('Wallet.create_signing_key <<< %s', rv)
return rv | [
"async",
"def",
"create_signing_key",
"(",
"self",
",",
"seed",
":",
"str",
"=",
"None",
",",
"metadata",
":",
"dict",
"=",
"None",
")",
"->",
"KeyInfo",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.create_signing_key >>> seed: [SEED], metadata: %s'",
",",
"metadata",
")",
"if",
"not",
"self",
".",
"handle",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.create_signing_key <!< Wallet %s is closed'",
",",
"self",
".",
"name",
")",
"raise",
"WalletState",
"(",
"'Wallet {} is closed'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"try",
":",
"verkey",
"=",
"await",
"crypto",
".",
"create_key",
"(",
"self",
".",
"handle",
",",
"json",
".",
"dumps",
"(",
"{",
"'seed'",
":",
"seed",
"}",
"if",
"seed",
"else",
"{",
"}",
")",
")",
"except",
"IndyError",
"as",
"x_indy",
":",
"if",
"x_indy",
".",
"error_code",
"==",
"ErrorCode",
".",
"WalletItemAlreadyExists",
":",
"LOGGER",
".",
"debug",
"(",
"'Wallet.create_signing_key <!< Verification key already present in wallet %s'",
",",
"self",
".",
"name",
")",
"raise",
"ExtantRecord",
"(",
"'Verification key already present in wallet {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'Wallet.create_signing_key <!< indy-sdk raised error %s'",
",",
"x_indy",
".",
"error_code",
")",
"raise",
"await",
"crypto",
".",
"set_key_metadata",
"(",
"self",
".",
"handle",
",",
"verkey",
",",
"json",
".",
"dumps",
"(",
"metadata",
"or",
"{",
"}",
")",
")",
"# coerce None to empty",
"rv",
"=",
"KeyInfo",
"(",
"verkey",
",",
"metadata",
"or",
"{",
"}",
")",
"LOGGER",
".",
"debug",
"(",
"'Wallet.create_signing_key <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | 47.709677 | 33.709677 |
def only_once(action=None):
"""
.. deprecated:: 0.5.0
Use :func:`when_not` in combination with :func:`set_state` instead. This
handler is deprecated because it might actually be
`called multiple times <https://github.com/juju-solutions/charms.reactive/issues/22>`_.
Register the decorated function to be run once, and only once.
This decorator will never cause arguments to be passed to the handler.
"""
if action is None:
# allow to be used as @only_once or @only_once()
return only_once
action_id = _action_id(action)
handler = Handler.get(action)
handler.add_predicate(lambda: not was_invoked(action_id))
handler.add_post_callback(partial(mark_invoked, action_id))
return action | [
"def",
"only_once",
"(",
"action",
"=",
"None",
")",
":",
"if",
"action",
"is",
"None",
":",
"# allow to be used as @only_once or @only_once()",
"return",
"only_once",
"action_id",
"=",
"_action_id",
"(",
"action",
")",
"handler",
"=",
"Handler",
".",
"get",
"(",
"action",
")",
"handler",
".",
"add_predicate",
"(",
"lambda",
":",
"not",
"was_invoked",
"(",
"action_id",
")",
")",
"handler",
".",
"add_post_callback",
"(",
"partial",
"(",
"mark_invoked",
",",
"action_id",
")",
")",
"return",
"action"
] | 37.3 | 22.4 |
def selectInvert( self ):
"""
Inverts the currently selected items in the scene.
"""
currLayer = self._currentLayer
for item in self.items():
layer = item.layer()
if ( layer == currLayer or not layer ):
item.setSelected(not item.isSelected()) | [
"def",
"selectInvert",
"(",
"self",
")",
":",
"currLayer",
"=",
"self",
".",
"_currentLayer",
"for",
"item",
"in",
"self",
".",
"items",
"(",
")",
":",
"layer",
"=",
"item",
".",
"layer",
"(",
")",
"if",
"(",
"layer",
"==",
"currLayer",
"or",
"not",
"layer",
")",
":",
"item",
".",
"setSelected",
"(",
"not",
"item",
".",
"isSelected",
"(",
")",
")"
] | 34.888889 | 8.444444 |
def temp45(msg):
"""Static air temperature.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
float: tmeperature in Celsius degree
"""
d = hex2bin(data(msg))
sign = int(d[16])
value = bin2int(d[17:26])
if sign:
value = value - 512
temp = value * 0.25 # celsius
temp = round(temp, 1)
return temp | [
"def",
"temp45",
"(",
"msg",
")",
":",
"d",
"=",
"hex2bin",
"(",
"data",
"(",
"msg",
")",
")",
"sign",
"=",
"int",
"(",
"d",
"[",
"16",
"]",
")",
"value",
"=",
"bin2int",
"(",
"d",
"[",
"17",
":",
"26",
"]",
")",
"if",
"sign",
":",
"value",
"=",
"value",
"-",
"512",
"temp",
"=",
"value",
"*",
"0.25",
"# celsius",
"temp",
"=",
"round",
"(",
"temp",
",",
"1",
")",
"return",
"temp"
] | 16.590909 | 23.363636 |
def from_ssl_socket(cls, ssl_socket):
"""Get certificate data from an SSL socket.
"""
try:
data = ssl_socket.getpeercert(True)
except AttributeError:
# PyPy doesn't have .getpeercert
data = None
if not data:
logger.debug("No certificate infromation")
return cls()
result = cls.from_der_data(data)
result.validated = bool(ssl_socket.getpeercert())
return result | [
"def",
"from_ssl_socket",
"(",
"cls",
",",
"ssl_socket",
")",
":",
"try",
":",
"data",
"=",
"ssl_socket",
".",
"getpeercert",
"(",
"True",
")",
"except",
"AttributeError",
":",
"# PyPy doesn't have .getpeercert",
"data",
"=",
"None",
"if",
"not",
"data",
":",
"logger",
".",
"debug",
"(",
"\"No certificate infromation\"",
")",
"return",
"cls",
"(",
")",
"result",
"=",
"cls",
".",
"from_der_data",
"(",
"data",
")",
"result",
".",
"validated",
"=",
"bool",
"(",
"ssl_socket",
".",
"getpeercert",
"(",
")",
")",
"return",
"result"
] | 33.642857 | 11.071429 |
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
try:
if not config.remove_option(service, username):
raise PasswordDeleteError("Password not found")
except configparser.NoSectionError:
raise PasswordDeleteError("Password not found")
# update the file
with open(self.file_path, 'w') as config_file:
config.write(config_file) | [
"def",
"delete_password",
"(",
"self",
",",
"service",
",",
"username",
")",
":",
"service",
"=",
"escape_for_ini",
"(",
"service",
")",
"username",
"=",
"escape_for_ini",
"(",
"username",
")",
"config",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"file_path",
")",
":",
"config",
".",
"read",
"(",
"self",
".",
"file_path",
")",
"try",
":",
"if",
"not",
"config",
".",
"remove_option",
"(",
"service",
",",
"username",
")",
":",
"raise",
"PasswordDeleteError",
"(",
"\"Password not found\"",
")",
"except",
"configparser",
".",
"NoSectionError",
":",
"raise",
"PasswordDeleteError",
"(",
"\"Password not found\"",
")",
"# update the file",
"with",
"open",
"(",
"self",
".",
"file_path",
",",
"'w'",
")",
"as",
"config_file",
":",
"config",
".",
"write",
"(",
"config_file",
")"
] | 42.6875 | 8.9375 |
def two_qubit_state_tomography(sampler: sim.Sampler,
first_qubit: devices.GridQubit,
second_qubit: devices.GridQubit,
circuit: circuits.Circuit,
repetitions: int = 1000) -> TomographyResult:
r"""Two-qubit state tomography.
To measure the density matrix of the output state of a two-qubit circuit,
different combinations of I, X/2 and Y/2 operations are applied to the
two qubits before measurements in the z-basis to determine the state
probabilities P_00, P_01, P_10.
The density matrix rho is decomposed into an operator-sum representation
\sum_{i, j} c_ij * sigma_i \bigotimes sigma_j, where i, j = 0, 1, 2,
3 and sigma_0 = I, sigma_1 = sigma_x, sigma_2 = sigma_y, sigma_3 =
sigma_z are the single-qubit Identity and Pauli matrices.
Based on the measured probabilities probs and the transformations of the
measurement operator by different basis rotations, one can build an
overdetermined set of linear equations.
As an example, if the identity operation (I) is applied to both qubits,
the measurement operators are (I +/- sigma_z) \bigotimes (I +/- sigma_z).
The state probabilities P_00, P_01, P_10 thus obtained contribute to the
following linear equations (setting c_00 = 1):
c_03 + c_30 + c_33 = 4*P_00 - 1
-c_03 + c_30 - c_33 = 4*P_01 - 1
c_03 - c_30 - c_33 = 4*P_10 - 1
And if a Y/2 rotation is applied to the first qubit and a X/2 rotation
is applied to the second qubit before measurement, the measurement
operators are (I -/+ sigma_x) \bigotimes (I +/- sigma_y). The probabilites
obtained instead contribute to the following linear equations:
c_02 - c_10 - c_12 = 4*P_00 - 1
-c_02 - c_10 + c_12 = 4*P_01 - 1
c_02 + c_10 + c_12 = 4*P_10 - 1
Note that this set of equations has the same form as the first set under
the transformation c_03 <-> c_02, c_30 <-> -c_10 and c_33 <-> -c_12.
Since there are 9 possible combinations of rotations (each producing 3
independent probabilities) and a total of 15 unknown coefficients c_ij,
one can cast all the measurement results into a overdetermined set of
linear equations numpy.dot(mat, c) = probs. Here c is of length 15 and
contains all the c_ij's (except c_00 which is set to 1), and mat is a 27
by 15 matrix having three non-zero elements in each row that are either
1 or -1.
The least-square solution to the above set of linear equations is then
used to construct the density matrix rho.
See Vandersypen and Chuang, Rev. Mod. Phys. 76, 1037 for details and
Steffen et al, Science 313, 1423 for a related experiment.
Args:
sampler: The quantum engine or simulator to run the circuits.
first_qubit: The first qubit under test.
second_qubit: The second qubit under test.
circuit: The circuit to execute on the qubits before tomography.
repetitions: The number of measurements for each basis rotation.
Returns:
A TomographyResult object that stores and plots the density matrix.
"""
# The size of the system of linear equations to be solved.
num_rows = 27
num_cols = 15
def _measurement(two_qubit_circuit: circuits.Circuit) -> np.ndarray:
two_qubit_circuit.append(ops.measure(first_qubit, second_qubit,
key='z'))
results = sampler.run(two_qubit_circuit, repetitions=repetitions)
results_hist = results.histogram(key='z')
prob_list = [results_hist[0], results_hist[1], results_hist[2]]
return np.asarray(prob_list) / repetitions
sigma_0 = np.eye(2) / 2.0
sigma_1 = np.array([[0.0, 1.0], [1.0, 0.0]]) / 2.0
sigma_2 = np.array([[0.0, -1.0j], [1.0j, 0.0]]) / 2.0
sigma_3 = np.array([[1.0, 0.0], [0.0, -1.0]]) / 2.0
sigmas = [sigma_0, sigma_1, sigma_2, sigma_3]
# Stores all 27 measured probabilities (P_00, P_01, P_10 after 9
# different basis rotations).
probs = np.array([])
rots = [ops.X ** 0, ops.X ** 0.5, ops.Y ** 0.5]
# Represents the coefficients in front of the c_ij's (-1, 0 or 1) in the
# system of 27 linear equations.
mat = np.zeros((num_rows, num_cols))
# Represents the relative signs between the linear equations for P_00,
# P_01, and P_10.
s = np.array([[1.0, 1.0, 1.0], [-1.0, 1.0, -1.0], [1.0, -1.0, -1.0]])
for i, rot_1 in enumerate(rots):
for j, rot_2 in enumerate(rots):
m_idx, indices, signs = _indices_after_basis_rot(i, j)
mat[m_idx: (m_idx + 3), indices] = s * np.tile(signs, (3, 1))
test_circuit = circuit + circuits.Circuit.from_ops(rot_1(
second_qubit))
test_circuit.append(rot_2(first_qubit))
probs = np.concatenate((probs, _measurement(test_circuit)))
c, _, _, _ = np.linalg.lstsq(mat, 4.0 * probs - 1.0, rcond=-1)
c = np.concatenate(([1.0], c))
c = c.reshape(4, 4)
rho = np.zeros((4, 4))
for i in range(4):
for j in range(4):
rho = rho + c[i, j] * np.kron(sigmas[i], sigmas[j])
return TomographyResult(rho) | [
"def",
"two_qubit_state_tomography",
"(",
"sampler",
":",
"sim",
".",
"Sampler",
",",
"first_qubit",
":",
"devices",
".",
"GridQubit",
",",
"second_qubit",
":",
"devices",
".",
"GridQubit",
",",
"circuit",
":",
"circuits",
".",
"Circuit",
",",
"repetitions",
":",
"int",
"=",
"1000",
")",
"->",
"TomographyResult",
":",
"# The size of the system of linear equations to be solved.",
"num_rows",
"=",
"27",
"num_cols",
"=",
"15",
"def",
"_measurement",
"(",
"two_qubit_circuit",
":",
"circuits",
".",
"Circuit",
")",
"->",
"np",
".",
"ndarray",
":",
"two_qubit_circuit",
".",
"append",
"(",
"ops",
".",
"measure",
"(",
"first_qubit",
",",
"second_qubit",
",",
"key",
"=",
"'z'",
")",
")",
"results",
"=",
"sampler",
".",
"run",
"(",
"two_qubit_circuit",
",",
"repetitions",
"=",
"repetitions",
")",
"results_hist",
"=",
"results",
".",
"histogram",
"(",
"key",
"=",
"'z'",
")",
"prob_list",
"=",
"[",
"results_hist",
"[",
"0",
"]",
",",
"results_hist",
"[",
"1",
"]",
",",
"results_hist",
"[",
"2",
"]",
"]",
"return",
"np",
".",
"asarray",
"(",
"prob_list",
")",
"/",
"repetitions",
"sigma_0",
"=",
"np",
".",
"eye",
"(",
"2",
")",
"/",
"2.0",
"sigma_1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.0",
",",
"1.0",
"]",
",",
"[",
"1.0",
",",
"0.0",
"]",
"]",
")",
"/",
"2.0",
"sigma_2",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.0",
",",
"-",
"1.0j",
"]",
",",
"[",
"1.0j",
",",
"0.0",
"]",
"]",
")",
"/",
"2.0",
"sigma_3",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.0",
",",
"0.0",
"]",
",",
"[",
"0.0",
",",
"-",
"1.0",
"]",
"]",
")",
"/",
"2.0",
"sigmas",
"=",
"[",
"sigma_0",
",",
"sigma_1",
",",
"sigma_2",
",",
"sigma_3",
"]",
"# Stores all 27 measured probabilities (P_00, P_01, P_10 after 9",
"# different basis rotations).",
"probs",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"rots",
"=",
"[",
"ops",
".",
"X",
"**",
"0",
",",
"ops",
".",
"X",
"**",
"0.5",
",",
"ops",
".",
"Y",
"**",
"0.5",
"]",
"# Represents the coefficients in front of the c_ij's (-1, 0 or 1) in the",
"# system of 27 linear equations.",
"mat",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_rows",
",",
"num_cols",
")",
")",
"# Represents the relative signs between the linear equations for P_00,",
"# P_01, and P_10.",
"s",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.0",
",",
"1.0",
",",
"1.0",
"]",
",",
"[",
"-",
"1.0",
",",
"1.0",
",",
"-",
"1.0",
"]",
",",
"[",
"1.0",
",",
"-",
"1.0",
",",
"-",
"1.0",
"]",
"]",
")",
"for",
"i",
",",
"rot_1",
"in",
"enumerate",
"(",
"rots",
")",
":",
"for",
"j",
",",
"rot_2",
"in",
"enumerate",
"(",
"rots",
")",
":",
"m_idx",
",",
"indices",
",",
"signs",
"=",
"_indices_after_basis_rot",
"(",
"i",
",",
"j",
")",
"mat",
"[",
"m_idx",
":",
"(",
"m_idx",
"+",
"3",
")",
",",
"indices",
"]",
"=",
"s",
"*",
"np",
".",
"tile",
"(",
"signs",
",",
"(",
"3",
",",
"1",
")",
")",
"test_circuit",
"=",
"circuit",
"+",
"circuits",
".",
"Circuit",
".",
"from_ops",
"(",
"rot_1",
"(",
"second_qubit",
")",
")",
"test_circuit",
".",
"append",
"(",
"rot_2",
"(",
"first_qubit",
")",
")",
"probs",
"=",
"np",
".",
"concatenate",
"(",
"(",
"probs",
",",
"_measurement",
"(",
"test_circuit",
")",
")",
")",
"c",
",",
"_",
",",
"_",
",",
"_",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"mat",
",",
"4.0",
"*",
"probs",
"-",
"1.0",
",",
"rcond",
"=",
"-",
"1",
")",
"c",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"1.0",
"]",
",",
"c",
")",
")",
"c",
"=",
"c",
".",
"reshape",
"(",
"4",
",",
"4",
")",
"rho",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"for",
"j",
"in",
"range",
"(",
"4",
")",
":",
"rho",
"=",
"rho",
"+",
"c",
"[",
"i",
",",
"j",
"]",
"*",
"np",
".",
"kron",
"(",
"sigmas",
"[",
"i",
"]",
",",
"sigmas",
"[",
"j",
"]",
")",
"return",
"TomographyResult",
"(",
"rho",
")"
] | 43.82906 | 25.247863 |
def set_position(self, position):
"""Sets the if-statement position."""
if position < 0:
position += len(self.checkdefs)
while position >= len(self.checkdefs):
self.checkdefs.append(([], []))
self.position = position | [
"def",
"set_position",
"(",
"self",
",",
"position",
")",
":",
"if",
"position",
"<",
"0",
":",
"position",
"+=",
"len",
"(",
"self",
".",
"checkdefs",
")",
"while",
"position",
">=",
"len",
"(",
"self",
".",
"checkdefs",
")",
":",
"self",
".",
"checkdefs",
".",
"append",
"(",
"(",
"[",
"]",
",",
"[",
"]",
")",
")",
"self",
".",
"position",
"=",
"position"
] | 38 | 6.142857 |
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None):
"""Calculate a PSD of this `TimeSeries` using a median average method
The median average is similar to Welch's method, using a median average
rather than mean.
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumMedian
"""
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap,
method='median', window=window, plan=plan) | [
"def",
"median",
"(",
"timeseries",
",",
"segmentlength",
",",
"noverlap",
"=",
"None",
",",
"window",
"=",
"None",
",",
"plan",
"=",
"None",
")",
":",
"return",
"_lal_spectrum",
"(",
"timeseries",
",",
"segmentlength",
",",
"noverlap",
"=",
"noverlap",
",",
"method",
"=",
"'median'",
",",
"window",
"=",
"window",
",",
"plan",
"=",
"plan",
")"
] | 30.058824 | 23.176471 |
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
"""
rCutHard = rCut + 5
assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax)
assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax)
assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard)
assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard)
assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax)
assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax)
assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta)
# get clusgeo internal format for c-code
Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes)
Hpos = np.array(Hpos)
py_Hsize = Hpos.shape[0]
# flatten arrays
Hpos = Hpos.flatten()
alp = alp.flatten()
bet = bet.flatten()
# convert int to c_int
lMax = c_int(Lmax)
Hsize = c_int(py_Hsize)
Ntypes = c_int(py_Ntypes)
totalAN = c_int(totalAN)
rCutHard = c_double(rCutHard)
Nsize = c_int(nMax)
c_eta = c_double(eta)
#convert int array to c_int array
typeNs = (c_int * len(typeNs))(*typeNs)
# convert to c_double arrays
# alphas
alphas = (c_double * len(alp))(*alp.tolist())
# betas
betas = (c_double * len(bet))(*bet.tolist())
#Apos
axyz = (c_double * len(Apos))(*Apos.tolist())
#Hpos
hxyz = (c_double * len(Hpos))(*Hpos.tolist())
### START SOAP###
#path_to_so = os.path.dirname(os.path.abspath(__file__))
_PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__))
_SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS
if py_Ntypes == 1 or (not crossOver):
substring = "lib/libsoapPySig."
libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoap.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))()
libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
else:
substring = "lib/libsoapGTO."
libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoapGTO.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))()
libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
# return c;
if crossOver:
crosTypes = int((py_Ntypes*(py_Ntypes+1))/2)
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes)
else:
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes)
a = np.ctypeslib.as_array(c)
a = a.reshape(shape)
return a | [
"def",
"get_soap_locals",
"(",
"obj",
",",
"Hpos",
",",
"alp",
",",
"bet",
",",
"rCut",
"=",
"5.0",
",",
"nMax",
"=",
"5",
",",
"Lmax",
"=",
"5",
",",
"crossOver",
"=",
"True",
",",
"all_atomtypes",
"=",
"None",
",",
"eta",
"=",
"1.0",
")",
":",
"rCutHard",
"=",
"rCut",
"+",
"5",
"assert",
"Lmax",
"<=",
"9",
",",
"\"l cannot exceed 9. Lmax={}\"",
".",
"format",
"(",
"Lmax",
")",
"assert",
"Lmax",
">=",
"0",
",",
"\"l cannot be negative.Lmax={}\"",
".",
"format",
"(",
"Lmax",
")",
"assert",
"rCutHard",
"<",
"17.0001",
",",
"\"hard radius cuttof cannot be larger than 17 Angs. rCut={}\"",
".",
"format",
"(",
"rCutHard",
")",
"assert",
"rCutHard",
">",
"1.999",
",",
"\"hard redius cuttof cannot be lower than 1 Ang. rCut={}\"",
".",
"format",
"(",
"rCutHard",
")",
"assert",
"nMax",
">=",
"2",
",",
"\"number of basis functions cannot be lower than 2. nMax={}\"",
".",
"format",
"(",
"nMax",
")",
"assert",
"nMax",
"<=",
"13",
",",
"\"number of basis functions cannot exceed 12. nMax={}\"",
".",
"format",
"(",
"nMax",
")",
"assert",
"eta",
">=",
"0.0001",
",",
"\"Eta cannot be zero or negative. nMax={}\"",
".",
"format",
"(",
"eta",
")",
"# get clusgeo internal format for c-code",
"Apos",
",",
"typeNs",
",",
"py_Ntypes",
",",
"atomtype_lst",
",",
"totalAN",
"=",
"_format_ase2clusgeo",
"(",
"obj",
",",
"all_atomtypes",
")",
"Hpos",
"=",
"np",
".",
"array",
"(",
"Hpos",
")",
"py_Hsize",
"=",
"Hpos",
".",
"shape",
"[",
"0",
"]",
"# flatten arrays",
"Hpos",
"=",
"Hpos",
".",
"flatten",
"(",
")",
"alp",
"=",
"alp",
".",
"flatten",
"(",
")",
"bet",
"=",
"bet",
".",
"flatten",
"(",
")",
"# convert int to c_int",
"lMax",
"=",
"c_int",
"(",
"Lmax",
")",
"Hsize",
"=",
"c_int",
"(",
"py_Hsize",
")",
"Ntypes",
"=",
"c_int",
"(",
"py_Ntypes",
")",
"totalAN",
"=",
"c_int",
"(",
"totalAN",
")",
"rCutHard",
"=",
"c_double",
"(",
"rCutHard",
")",
"Nsize",
"=",
"c_int",
"(",
"nMax",
")",
"c_eta",
"=",
"c_double",
"(",
"eta",
")",
"#convert int array to c_int array",
"typeNs",
"=",
"(",
"c_int",
"*",
"len",
"(",
"typeNs",
")",
")",
"(",
"*",
"typeNs",
")",
"# convert to c_double arrays",
"# alphas",
"alphas",
"=",
"(",
"c_double",
"*",
"len",
"(",
"alp",
")",
")",
"(",
"*",
"alp",
".",
"tolist",
"(",
")",
")",
"# betas",
"betas",
"=",
"(",
"c_double",
"*",
"len",
"(",
"bet",
")",
")",
"(",
"*",
"bet",
".",
"tolist",
"(",
")",
")",
"#Apos",
"axyz",
"=",
"(",
"c_double",
"*",
"len",
"(",
"Apos",
")",
")",
"(",
"*",
"Apos",
".",
"tolist",
"(",
")",
")",
"#Hpos",
"hxyz",
"=",
"(",
"c_double",
"*",
"len",
"(",
"Hpos",
")",
")",
"(",
"*",
"Hpos",
".",
"tolist",
"(",
")",
")",
"### START SOAP###",
"#path_to_so = os.path.dirname(os.path.abspath(__file__))",
"_PATH_TO_SOAPLITE_SO",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"_SOAPLITE_SOFILES",
"=",
"glob",
".",
"glob",
"(",
"\"\"",
".",
"join",
"(",
"[",
"_PATH_TO_SOAPLITE_SO",
",",
"\"/../lib/libsoap*.*so\"",
"]",
")",
")",
"## NOT SURE ABOUT THIS",
"if",
"py_Ntypes",
"==",
"1",
"or",
"(",
"not",
"crossOver",
")",
":",
"substring",
"=",
"\"lib/libsoapPySig.\"",
"libsoap",
"=",
"CDLL",
"(",
"next",
"(",
"(",
"s",
"for",
"s",
"in",
"_SOAPLITE_SOFILES",
"if",
"substring",
"in",
"s",
")",
",",
"None",
")",
")",
"libsoap",
".",
"soap",
".",
"argtypes",
"=",
"[",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_double",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_double",
"]",
"libsoap",
".",
"soap",
".",
"restype",
"=",
"POINTER",
"(",
"c_double",
")",
"c",
"=",
"(",
"c_double",
"*",
"(",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"py_Ntypes",
"*",
"py_Hsize",
")",
")",
"(",
")",
"libsoap",
".",
"soap",
"(",
"c",
",",
"axyz",
",",
"hxyz",
",",
"alphas",
",",
"betas",
",",
"typeNs",
",",
"rCutHard",
",",
"totalAN",
",",
"Ntypes",
",",
"Nsize",
",",
"lMax",
",",
"Hsize",
",",
"c_eta",
")",
"else",
":",
"substring",
"=",
"\"lib/libsoapGTO.\"",
"libsoapGTO",
"=",
"CDLL",
"(",
"next",
"(",
"(",
"s",
"for",
"s",
"in",
"_SOAPLITE_SOFILES",
"if",
"substring",
"in",
"s",
")",
",",
"None",
")",
")",
"libsoapGTO",
".",
"soap",
".",
"argtypes",
"=",
"[",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_double",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_double",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_int",
",",
"c_double",
"]",
"libsoapGTO",
".",
"soap",
".",
"restype",
"=",
"POINTER",
"(",
"c_double",
")",
"c",
"=",
"(",
"c_double",
"*",
"(",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"int",
"(",
"(",
"py_Ntypes",
"*",
"(",
"py_Ntypes",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"py_Hsize",
")",
")",
"(",
")",
"libsoapGTO",
".",
"soap",
"(",
"c",
",",
"axyz",
",",
"hxyz",
",",
"alphas",
",",
"betas",
",",
"typeNs",
",",
"rCutHard",
",",
"totalAN",
",",
"Ntypes",
",",
"Nsize",
",",
"lMax",
",",
"Hsize",
",",
"c_eta",
")",
"# return c;",
"if",
"crossOver",
":",
"crosTypes",
"=",
"int",
"(",
"(",
"py_Ntypes",
"*",
"(",
"py_Ntypes",
"+",
"1",
")",
")",
"/",
"2",
")",
"shape",
"=",
"(",
"py_Hsize",
",",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"crosTypes",
")",
"else",
":",
"shape",
"=",
"(",
"py_Hsize",
",",
"int",
"(",
"(",
"nMax",
"*",
"(",
"nMax",
"+",
"1",
")",
")",
"/",
"2",
")",
"*",
"(",
"Lmax",
"+",
"1",
")",
"*",
"py_Ntypes",
")",
"a",
"=",
"np",
".",
"ctypeslib",
".",
"as_array",
"(",
"c",
")",
"a",
"=",
"a",
".",
"reshape",
"(",
"shape",
")",
"return",
"a"
] | 45.4 | 28.566667 |
def computeGeneralExpectations(self, A_in, u_ln, state_list, compute_uncertainty=True,
uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the expectations of multiple observables of phase space functions on multiple states.
Compute the expectations of multiple observables of phase
space functions. [A_0(x),A_1(x),...,A_i(x)] along with the
covariances of their estimates at multiple states.
We calculate all observables at all states that are specified by the choice of state list.
Generally, it will be called in specific conditions.
Parameters
----------
A_in : np.ndarray, float, shape=(I, N)
A_in[i,n] = A_i(x_n), the value of phase observable i for configuration n
u_ln : np.ndarray, float, shape=(L, N)
u_n[l,n] is the reduced potential of configuration n at state l
if u_ln = None, we use self.u_kn
state_list : np.ndarray, int, shape (NS,2), where NS is the
total number of states of interest. it will be
of the form [[0,0],[1,1],[2,1]] which would
indicate we want to output the properties of
three observables total: the first property A[0]
at the 0th state, the 2nd property A[1] at the
1th state, and the 2nd property A[1] at the 2nd
state. This allows us to tailor this to a number of different situations.
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A_i : np.ndarray, float, shape = (I)
A_i[i] is the estimate for the expectation of A_state_list[i](x) at the state specified by u_n[state_list[i],:]
d2A_ik : np.ndarray, float, shape = (I, J)
d2A_ij[i,j] is the COVARIANCE in the estimates of observables A_i and A_j (as determined by the state list)
(* not the square root *)
General cases this will be used for.
single observable, multiple states (replacement for computeExpectations)
multiple observables, single state (replacement for computeMultipleExpectations)
diagonal cases of multiple states, single states.
Examples
--------
update this example to be more general
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_in = np.array([x_n,x_n**2,x_n**3])
>>> u_n = u_kn[:2,:]
>>> state_list = np.array([[0,0],[1,0],[2,0],[2,1]],int)
>>> [A_i, d2A_ij] = mbar.computeGeneralExpectations(A_in, u_n, state_list)
"""
# Retrieve N and K for convenience.
S = len(state_list) # number of computed expectations examined
K = self.K
N = self.N # N is total number of samples
# make observables all positive
A_list = np.unique(state_list[:,0])
I = len(A_list) # number of observables used
A_min = np.zeros([I], dtype=np.float64)
for i in A_list: # only need to zero the ones we will use. May be some repetition here.
A_min[i] = np.min(A_in[i, :]) #find the minimum
A_in[i, :] = A_in[i,:] - (A_min[i] - 1) #all values now positive so that we can work in logarithmic scale
# Augment W_nk, N_k, and c_k for q_A(x) for the observables, with one
# row for the specified state and I rows for the observable at that
# state.
# log weight matrix
sizea = K + 2*S # augmented size
Log_W_nk = np.zeros([N, sizea], np.float64) # log weight matrix
N_k = np.zeros([sizea], np.int32) # counts
f_k = np.zeros([sizea], np.float64) # free energies
# Fill in first section of matrix with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
f_k[0:K] = self.f_k
# Compute row of W matrix for the extra states corresponding to u_ln according to the state list
for s in range(S):
l = state_list[s,1]
la = K+s #l augmented
Log_W_nk[:, la] = self._computeUnnormalizedLogWeights(u_ln[l,:])
f_k[la] = -_logsum(Log_W_nk[:, la])
Log_W_nk[:, la] += f_k[l]
# Compute the remaining rows/columns of W_nk and c_k for the
# observables.
for s in range(S):
sa = K+S+s # augmented s
i = state_list[s,0]
l = state_list[s,1]
Log_W_nk[:, sa] = np.log(A_in[i, :]) + Log_W_nk[:, K+l]
f_k[sa] = -_logsum(Log_W_nk[:, sa])
Log_W_nk[:, sa] += f_k[sa] # normalize this row
# Compute estimates.
A_i = np.zeros([S], np.float64)
for s in range(S):
A_i[s] = np.exp(-f_k[K + S + s])
if compute_uncertainty or return_theta:
# Compute augmented asymptotic covariance matrix.
W_nk = np.exp(Log_W_nk)
Theta_ij = self._computeAsymptoticCovarianceMatrix(
W_nk, N_k, method=uncertainty_method)
if compute_uncertainty:
# Compute estimates of statistical covariance
# these variances will be the same whether or not we subtract a different constant from each A_i
# todo: vectorize
# compute the covariance component without doing the double loop
d2A_ij = np.zeros([S, S], np.float64)
for i in range(S):
si = K+S+i
li = K+state_list[i,1]
for j in range(S):
sj = K+S+j
lj = K+state_list[j,1]
d2A_ij[i, j] = A_i[i] * A_i[j] * (
Theta_ij[si, sj] - Theta_ij[si, li] - Theta_ij[lj, sj] + Theta_ij[li, lj])
# Now that covariances are computed, add the constants back to A_i that
# were required to enforce positivity
for s in range(S):
A_i[s] += (A_min[state_list[s,0]] - 1)
# these values may be used outside the routine, so copy back.
for i in A_list:
A_in[i, :] = A_in[i,:] + (A_min[i] - 1)
returns = []
returns.append(A_i)
if compute_uncertainty:
returns.append(d2A_ij)
if return_theta:
returns.append(Theta_ij)
# Return expectations and uncertainties.
return returns | [
"def",
"computeGeneralExpectations",
"(",
"self",
",",
"A_in",
",",
"u_ln",
",",
"state_list",
",",
"compute_uncertainty",
"=",
"True",
",",
"uncertainty_method",
"=",
"None",
",",
"warning_cutoff",
"=",
"1.0e-10",
",",
"return_theta",
"=",
"False",
")",
":",
"# Retrieve N and K for convenience.",
"S",
"=",
"len",
"(",
"state_list",
")",
"# number of computed expectations examined",
"K",
"=",
"self",
".",
"K",
"N",
"=",
"self",
".",
"N",
"# N is total number of samples",
"# make observables all positive",
"A_list",
"=",
"np",
".",
"unique",
"(",
"state_list",
"[",
":",
",",
"0",
"]",
")",
"I",
"=",
"len",
"(",
"A_list",
")",
"# number of observables used",
"A_min",
"=",
"np",
".",
"zeros",
"(",
"[",
"I",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"for",
"i",
"in",
"A_list",
":",
"# only need to zero the ones we will use. May be some repetition here.",
"A_min",
"[",
"i",
"]",
"=",
"np",
".",
"min",
"(",
"A_in",
"[",
"i",
",",
":",
"]",
")",
"#find the minimum",
"A_in",
"[",
"i",
",",
":",
"]",
"=",
"A_in",
"[",
"i",
",",
":",
"]",
"-",
"(",
"A_min",
"[",
"i",
"]",
"-",
"1",
")",
"#all values now positive so that we can work in logarithmic scale",
"# Augment W_nk, N_k, and c_k for q_A(x) for the observables, with one",
"# row for the specified state and I rows for the observable at that",
"# state.",
"# log weight matrix",
"sizea",
"=",
"K",
"+",
"2",
"*",
"S",
"# augmented size",
"Log_W_nk",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"sizea",
"]",
",",
"np",
".",
"float64",
")",
"# log weight matrix",
"N_k",
"=",
"np",
".",
"zeros",
"(",
"[",
"sizea",
"]",
",",
"np",
".",
"int32",
")",
"# counts",
"f_k",
"=",
"np",
".",
"zeros",
"(",
"[",
"sizea",
"]",
",",
"np",
".",
"float64",
")",
"# free energies",
"# Fill in first section of matrix with existing q_k(x) from states.",
"Log_W_nk",
"[",
":",
",",
"0",
":",
"K",
"]",
"=",
"self",
".",
"Log_W_nk",
"N_k",
"[",
"0",
":",
"K",
"]",
"=",
"self",
".",
"N_k",
"f_k",
"[",
"0",
":",
"K",
"]",
"=",
"self",
".",
"f_k",
"# Compute row of W matrix for the extra states corresponding to u_ln according to the state list",
"for",
"s",
"in",
"range",
"(",
"S",
")",
":",
"l",
"=",
"state_list",
"[",
"s",
",",
"1",
"]",
"la",
"=",
"K",
"+",
"s",
"#l augmented",
"Log_W_nk",
"[",
":",
",",
"la",
"]",
"=",
"self",
".",
"_computeUnnormalizedLogWeights",
"(",
"u_ln",
"[",
"l",
",",
":",
"]",
")",
"f_k",
"[",
"la",
"]",
"=",
"-",
"_logsum",
"(",
"Log_W_nk",
"[",
":",
",",
"la",
"]",
")",
"Log_W_nk",
"[",
":",
",",
"la",
"]",
"+=",
"f_k",
"[",
"l",
"]",
"# Compute the remaining rows/columns of W_nk and c_k for the",
"# observables.",
"for",
"s",
"in",
"range",
"(",
"S",
")",
":",
"sa",
"=",
"K",
"+",
"S",
"+",
"s",
"# augmented s",
"i",
"=",
"state_list",
"[",
"s",
",",
"0",
"]",
"l",
"=",
"state_list",
"[",
"s",
",",
"1",
"]",
"Log_W_nk",
"[",
":",
",",
"sa",
"]",
"=",
"np",
".",
"log",
"(",
"A_in",
"[",
"i",
",",
":",
"]",
")",
"+",
"Log_W_nk",
"[",
":",
",",
"K",
"+",
"l",
"]",
"f_k",
"[",
"sa",
"]",
"=",
"-",
"_logsum",
"(",
"Log_W_nk",
"[",
":",
",",
"sa",
"]",
")",
"Log_W_nk",
"[",
":",
",",
"sa",
"]",
"+=",
"f_k",
"[",
"sa",
"]",
"# normalize this row",
"# Compute estimates.",
"A_i",
"=",
"np",
".",
"zeros",
"(",
"[",
"S",
"]",
",",
"np",
".",
"float64",
")",
"for",
"s",
"in",
"range",
"(",
"S",
")",
":",
"A_i",
"[",
"s",
"]",
"=",
"np",
".",
"exp",
"(",
"-",
"f_k",
"[",
"K",
"+",
"S",
"+",
"s",
"]",
")",
"if",
"compute_uncertainty",
"or",
"return_theta",
":",
"# Compute augmented asymptotic covariance matrix.",
"W_nk",
"=",
"np",
".",
"exp",
"(",
"Log_W_nk",
")",
"Theta_ij",
"=",
"self",
".",
"_computeAsymptoticCovarianceMatrix",
"(",
"W_nk",
",",
"N_k",
",",
"method",
"=",
"uncertainty_method",
")",
"if",
"compute_uncertainty",
":",
"# Compute estimates of statistical covariance",
"# these variances will be the same whether or not we subtract a different constant from each A_i",
"# todo: vectorize",
"# compute the covariance component without doing the double loop",
"d2A_ij",
"=",
"np",
".",
"zeros",
"(",
"[",
"S",
",",
"S",
"]",
",",
"np",
".",
"float64",
")",
"for",
"i",
"in",
"range",
"(",
"S",
")",
":",
"si",
"=",
"K",
"+",
"S",
"+",
"i",
"li",
"=",
"K",
"+",
"state_list",
"[",
"i",
",",
"1",
"]",
"for",
"j",
"in",
"range",
"(",
"S",
")",
":",
"sj",
"=",
"K",
"+",
"S",
"+",
"j",
"lj",
"=",
"K",
"+",
"state_list",
"[",
"j",
",",
"1",
"]",
"d2A_ij",
"[",
"i",
",",
"j",
"]",
"=",
"A_i",
"[",
"i",
"]",
"*",
"A_i",
"[",
"j",
"]",
"*",
"(",
"Theta_ij",
"[",
"si",
",",
"sj",
"]",
"-",
"Theta_ij",
"[",
"si",
",",
"li",
"]",
"-",
"Theta_ij",
"[",
"lj",
",",
"sj",
"]",
"+",
"Theta_ij",
"[",
"li",
",",
"lj",
"]",
")",
"# Now that covariances are computed, add the constants back to A_i that",
"# were required to enforce positivity",
"for",
"s",
"in",
"range",
"(",
"S",
")",
":",
"A_i",
"[",
"s",
"]",
"+=",
"(",
"A_min",
"[",
"state_list",
"[",
"s",
",",
"0",
"]",
"]",
"-",
"1",
")",
"# these values may be used outside the routine, so copy back.",
"for",
"i",
"in",
"A_list",
":",
"A_in",
"[",
"i",
",",
":",
"]",
"=",
"A_in",
"[",
"i",
",",
":",
"]",
"+",
"(",
"A_min",
"[",
"i",
"]",
"-",
"1",
")",
"returns",
"=",
"[",
"]",
"returns",
".",
"append",
"(",
"A_i",
")",
"if",
"compute_uncertainty",
":",
"returns",
".",
"append",
"(",
"d2A_ij",
")",
"if",
"return_theta",
":",
"returns",
".",
"append",
"(",
"Theta_ij",
")",
"# Return expectations and uncertainties.",
"return",
"returns"
] | 43.683544 | 25.06962 |
def by_publications(self):
"""
The Creators who have been most-read, ordered by number of read
publications (ignoring if any of those publicatinos have been read
multiple times.)
Each Creator will have a `num_publications` attribute.
"""
if not spectator_apps.is_enabled('reading'):
raise ImproperlyConfigured("To use the CreatorManager.by_publications() method, 'spectator.reading' must by in INSTALLED_APPS.")
qs = self.get_queryset()
qs = qs.exclude(publications__reading__isnull=True) \
.annotate(num_publications=Count('publications')) \
.order_by('-num_publications', 'name_sort')
return qs | [
"def",
"by_publications",
"(",
"self",
")",
":",
"if",
"not",
"spectator_apps",
".",
"is_enabled",
"(",
"'reading'",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"To use the CreatorManager.by_publications() method, 'spectator.reading' must by in INSTALLED_APPS.\"",
")",
"qs",
"=",
"self",
".",
"get_queryset",
"(",
")",
"qs",
"=",
"qs",
".",
"exclude",
"(",
"publications__reading__isnull",
"=",
"True",
")",
".",
"annotate",
"(",
"num_publications",
"=",
"Count",
"(",
"'publications'",
")",
")",
".",
"order_by",
"(",
"'-num_publications'",
",",
"'name_sort'",
")",
"return",
"qs"
] | 39.722222 | 27.5 |
def processPrePrepare(self, pre_prepare: PrePrepare, sender: str):
"""
Validate and process provided PRE-PREPARE, create and
broadcast PREPARE for it.
:param pre_prepare: message
:param sender: name of the node that sent this message
"""
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
self.logger.debug("{} received PRE-PREPARE{} from {}".format(self, key, sender))
# TODO: should we still do it?
# Converting each req_idrs from list to tuple
req_idrs = {f.REQ_IDR.nm: [key for key in pre_prepare.reqIdr]}
pre_prepare = updateNamedTuple(pre_prepare, **req_idrs)
def report_suspicious(reason):
ex = SuspiciousNode(sender, reason, pre_prepare)
self.report_suspicious_node(ex)
why_not = self._can_process_pre_prepare(pre_prepare, sender)
if why_not is None:
why_not_applied = \
self._process_valid_preprepare(pre_prepare, sender)
if why_not_applied is not None:
if why_not_applied == PP_APPLY_REJECT_WRONG:
report_suspicious(Suspicions.PPR_REJECT_WRONG)
elif why_not_applied == PP_APPLY_WRONG_DIGEST:
report_suspicious(Suspicions.PPR_DIGEST_WRONG)
elif why_not_applied == PP_APPLY_WRONG_STATE:
report_suspicious(Suspicions.PPR_STATE_WRONG)
elif why_not_applied == PP_APPLY_ROOT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_TXN_WRONG)
elif why_not_applied == PP_APPLY_HOOK_ERROR:
report_suspicious(Suspicions.PPR_PLUGIN_EXCEPTION)
elif why_not_applied == PP_SUB_SEQ_NO_WRONG:
report_suspicious(Suspicions.PPR_SUB_SEQ_NO_WRONG)
elif why_not_applied == PP_NOT_FINAL:
# this is fine, just wait for another
return
elif why_not_applied == PP_APPLY_AUDIT_HASH_MISMATCH:
report_suspicious(Suspicions.PPR_AUDIT_TXN_ROOT_HASH_WRONG)
elif why_not_applied == PP_REQUEST_ALREADY_ORDERED:
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
elif why_not == PP_CHECK_NOT_FROM_PRIMARY:
report_suspicious(Suspicions.PPR_FRM_NON_PRIMARY)
elif why_not == PP_CHECK_TO_PRIMARY:
report_suspicious(Suspicions.PPR_TO_PRIMARY)
elif why_not == PP_CHECK_DUPLICATE:
report_suspicious(Suspicions.DUPLICATE_PPR_SENT)
elif why_not == PP_CHECK_INCORRECT_POOL_STATE_ROOT:
report_suspicious(Suspicions.PPR_POOL_STATE_ROOT_HASH_WRONG)
elif why_not == PP_CHECK_OLD:
self.logger.info("PRE-PREPARE {} has ppSeqNo lower "
"then the latest one - ignoring it".format(key))
elif why_not == PP_CHECK_REQUEST_NOT_FINALIZED:
absents = set()
non_fin = set()
non_fin_payload = set()
for key in pre_prepare.reqIdr:
req = self.requests.get(key)
if req is None:
absents.add(key)
elif not req.finalised:
non_fin.add(key)
non_fin_payload.add(req.request.payload_digest)
absent_str = ', '.join(str(key) for key in absents)
non_fin_str = ', '.join(
'{} ({} : {})'.format(str(key),
str(len(self.requests[key].propagates)),
', '.join(self.requests[key].propagates.keys())) for key in non_fin)
self.logger.warning(
"{} found requests in the incoming pp, of {} ledger, that are not finalized. "
"{} of them don't have propagates: {}."
"{} of them don't have enough propagates: {}.".format(self, pre_prepare.ledgerId,
len(absents), absent_str,
len(non_fin), non_fin_str))
def signal_suspicious(req):
self.logger.info("Request digest {} already ordered. Discard {} "
"from {}".format(req, pre_prepare, sender))
report_suspicious(Suspicions.PPR_WITH_ORDERED_REQUEST)
# checking for payload digest is more effective
for payload_key in non_fin_payload:
if self.node.seqNoDB.get_by_payload_digest(payload_key) != (None, None):
signal_suspicious(payload_key)
return
# for absents we can only check full digest
for full_key in absents:
if self.node.seqNoDB.get_by_full_digest(full_key) is not None:
signal_suspicious(full_key)
return
bad_reqs = absents | non_fin
self.enqueue_pre_prepare(pre_prepare, sender, bad_reqs)
# TODO: An optimisation might be to not request PROPAGATEs
# if some PROPAGATEs are present or a client request is
# present and sufficient PREPAREs and PRE-PREPARE are present,
# then the digest can be compared but this is expensive as the
# PREPARE and PRE-PREPARE contain a combined digest
self._schedule(partial(self.request_propagates_if_needed, bad_reqs, pre_prepare),
self.config.PROPAGATE_REQUEST_DELAY)
elif why_not == PP_CHECK_NOT_NEXT:
pp_view_no = pre_prepare.viewNo
pp_seq_no = pre_prepare.ppSeqNo
last_pp_view_no, last_pp_seq_no = self.__last_pp_3pc
if pp_view_no >= last_pp_view_no and (
self.isMaster or self.last_ordered_3pc[1] != 0):
seq_frm = last_pp_seq_no + 1 if pp_view_no == last_pp_view_no else 1
seq_to = pp_seq_no - 1
if seq_to >= seq_frm >= pp_seq_no - CHK_FREQ + 1:
self.logger.warning(
"{} missing PRE-PREPAREs from {} to {}, "
"going to request".format(self, seq_frm, seq_to))
self._request_missing_three_phase_messages(
pp_view_no, seq_frm, seq_to)
self.enqueue_pre_prepare(pre_prepare, sender)
self._setup_last_ordered_for_non_master()
elif why_not == PP_CHECK_WRONG_TIME:
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
item = (pre_prepare, sender, False)
self.pre_prepares_stashed_for_incorrect_time[key] = item
report_suspicious(Suspicions.PPR_TIME_WRONG)
elif why_not == BlsBftReplica.PPR_BLS_MULTISIG_WRONG:
report_suspicious(Suspicions.PPR_BLS_MULTISIG_WRONG)
else:
self.logger.warning("Unknown PRE-PREPARE check status: {}".format(why_not)) | [
"def",
"processPrePrepare",
"(",
"self",
",",
"pre_prepare",
":",
"PrePrepare",
",",
"sender",
":",
"str",
")",
":",
"key",
"=",
"(",
"pre_prepare",
".",
"viewNo",
",",
"pre_prepare",
".",
"ppSeqNo",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"{} received PRE-PREPARE{} from {}\"",
".",
"format",
"(",
"self",
",",
"key",
",",
"sender",
")",
")",
"# TODO: should we still do it?",
"# Converting each req_idrs from list to tuple",
"req_idrs",
"=",
"{",
"f",
".",
"REQ_IDR",
".",
"nm",
":",
"[",
"key",
"for",
"key",
"in",
"pre_prepare",
".",
"reqIdr",
"]",
"}",
"pre_prepare",
"=",
"updateNamedTuple",
"(",
"pre_prepare",
",",
"*",
"*",
"req_idrs",
")",
"def",
"report_suspicious",
"(",
"reason",
")",
":",
"ex",
"=",
"SuspiciousNode",
"(",
"sender",
",",
"reason",
",",
"pre_prepare",
")",
"self",
".",
"report_suspicious_node",
"(",
"ex",
")",
"why_not",
"=",
"self",
".",
"_can_process_pre_prepare",
"(",
"pre_prepare",
",",
"sender",
")",
"if",
"why_not",
"is",
"None",
":",
"why_not_applied",
"=",
"self",
".",
"_process_valid_preprepare",
"(",
"pre_prepare",
",",
"sender",
")",
"if",
"why_not_applied",
"is",
"not",
"None",
":",
"if",
"why_not_applied",
"==",
"PP_APPLY_REJECT_WRONG",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_REJECT_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_APPLY_WRONG_DIGEST",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_DIGEST_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_APPLY_WRONG_STATE",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_STATE_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_APPLY_ROOT_HASH_MISMATCH",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_TXN_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_APPLY_HOOK_ERROR",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_PLUGIN_EXCEPTION",
")",
"elif",
"why_not_applied",
"==",
"PP_SUB_SEQ_NO_WRONG",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_SUB_SEQ_NO_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_NOT_FINAL",
":",
"# this is fine, just wait for another",
"return",
"elif",
"why_not_applied",
"==",
"PP_APPLY_AUDIT_HASH_MISMATCH",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_AUDIT_TXN_ROOT_HASH_WRONG",
")",
"elif",
"why_not_applied",
"==",
"PP_REQUEST_ALREADY_ORDERED",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_WITH_ORDERED_REQUEST",
")",
"elif",
"why_not",
"==",
"PP_CHECK_NOT_FROM_PRIMARY",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_FRM_NON_PRIMARY",
")",
"elif",
"why_not",
"==",
"PP_CHECK_TO_PRIMARY",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_TO_PRIMARY",
")",
"elif",
"why_not",
"==",
"PP_CHECK_DUPLICATE",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"DUPLICATE_PPR_SENT",
")",
"elif",
"why_not",
"==",
"PP_CHECK_INCORRECT_POOL_STATE_ROOT",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_POOL_STATE_ROOT_HASH_WRONG",
")",
"elif",
"why_not",
"==",
"PP_CHECK_OLD",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"PRE-PREPARE {} has ppSeqNo lower \"",
"\"then the latest one - ignoring it\"",
".",
"format",
"(",
"key",
")",
")",
"elif",
"why_not",
"==",
"PP_CHECK_REQUEST_NOT_FINALIZED",
":",
"absents",
"=",
"set",
"(",
")",
"non_fin",
"=",
"set",
"(",
")",
"non_fin_payload",
"=",
"set",
"(",
")",
"for",
"key",
"in",
"pre_prepare",
".",
"reqIdr",
":",
"req",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"key",
")",
"if",
"req",
"is",
"None",
":",
"absents",
".",
"add",
"(",
"key",
")",
"elif",
"not",
"req",
".",
"finalised",
":",
"non_fin",
".",
"add",
"(",
"key",
")",
"non_fin_payload",
".",
"add",
"(",
"req",
".",
"request",
".",
"payload_digest",
")",
"absent_str",
"=",
"', '",
".",
"join",
"(",
"str",
"(",
"key",
")",
"for",
"key",
"in",
"absents",
")",
"non_fin_str",
"=",
"', '",
".",
"join",
"(",
"'{} ({} : {})'",
".",
"format",
"(",
"str",
"(",
"key",
")",
",",
"str",
"(",
"len",
"(",
"self",
".",
"requests",
"[",
"key",
"]",
".",
"propagates",
")",
")",
",",
"', '",
".",
"join",
"(",
"self",
".",
"requests",
"[",
"key",
"]",
".",
"propagates",
".",
"keys",
"(",
")",
")",
")",
"for",
"key",
"in",
"non_fin",
")",
"self",
".",
"logger",
".",
"warning",
"(",
"\"{} found requests in the incoming pp, of {} ledger, that are not finalized. \"",
"\"{} of them don't have propagates: {}.\"",
"\"{} of them don't have enough propagates: {}.\"",
".",
"format",
"(",
"self",
",",
"pre_prepare",
".",
"ledgerId",
",",
"len",
"(",
"absents",
")",
",",
"absent_str",
",",
"len",
"(",
"non_fin",
")",
",",
"non_fin_str",
")",
")",
"def",
"signal_suspicious",
"(",
"req",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Request digest {} already ordered. Discard {} \"",
"\"from {}\"",
".",
"format",
"(",
"req",
",",
"pre_prepare",
",",
"sender",
")",
")",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_WITH_ORDERED_REQUEST",
")",
"# checking for payload digest is more effective",
"for",
"payload_key",
"in",
"non_fin_payload",
":",
"if",
"self",
".",
"node",
".",
"seqNoDB",
".",
"get_by_payload_digest",
"(",
"payload_key",
")",
"!=",
"(",
"None",
",",
"None",
")",
":",
"signal_suspicious",
"(",
"payload_key",
")",
"return",
"# for absents we can only check full digest",
"for",
"full_key",
"in",
"absents",
":",
"if",
"self",
".",
"node",
".",
"seqNoDB",
".",
"get_by_full_digest",
"(",
"full_key",
")",
"is",
"not",
"None",
":",
"signal_suspicious",
"(",
"full_key",
")",
"return",
"bad_reqs",
"=",
"absents",
"|",
"non_fin",
"self",
".",
"enqueue_pre_prepare",
"(",
"pre_prepare",
",",
"sender",
",",
"bad_reqs",
")",
"# TODO: An optimisation might be to not request PROPAGATEs",
"# if some PROPAGATEs are present or a client request is",
"# present and sufficient PREPAREs and PRE-PREPARE are present,",
"# then the digest can be compared but this is expensive as the",
"# PREPARE and PRE-PREPARE contain a combined digest",
"self",
".",
"_schedule",
"(",
"partial",
"(",
"self",
".",
"request_propagates_if_needed",
",",
"bad_reqs",
",",
"pre_prepare",
")",
",",
"self",
".",
"config",
".",
"PROPAGATE_REQUEST_DELAY",
")",
"elif",
"why_not",
"==",
"PP_CHECK_NOT_NEXT",
":",
"pp_view_no",
"=",
"pre_prepare",
".",
"viewNo",
"pp_seq_no",
"=",
"pre_prepare",
".",
"ppSeqNo",
"last_pp_view_no",
",",
"last_pp_seq_no",
"=",
"self",
".",
"__last_pp_3pc",
"if",
"pp_view_no",
">=",
"last_pp_view_no",
"and",
"(",
"self",
".",
"isMaster",
"or",
"self",
".",
"last_ordered_3pc",
"[",
"1",
"]",
"!=",
"0",
")",
":",
"seq_frm",
"=",
"last_pp_seq_no",
"+",
"1",
"if",
"pp_view_no",
"==",
"last_pp_view_no",
"else",
"1",
"seq_to",
"=",
"pp_seq_no",
"-",
"1",
"if",
"seq_to",
">=",
"seq_frm",
">=",
"pp_seq_no",
"-",
"CHK_FREQ",
"+",
"1",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"{} missing PRE-PREPAREs from {} to {}, \"",
"\"going to request\"",
".",
"format",
"(",
"self",
",",
"seq_frm",
",",
"seq_to",
")",
")",
"self",
".",
"_request_missing_three_phase_messages",
"(",
"pp_view_no",
",",
"seq_frm",
",",
"seq_to",
")",
"self",
".",
"enqueue_pre_prepare",
"(",
"pre_prepare",
",",
"sender",
")",
"self",
".",
"_setup_last_ordered_for_non_master",
"(",
")",
"elif",
"why_not",
"==",
"PP_CHECK_WRONG_TIME",
":",
"key",
"=",
"(",
"pre_prepare",
".",
"viewNo",
",",
"pre_prepare",
".",
"ppSeqNo",
")",
"item",
"=",
"(",
"pre_prepare",
",",
"sender",
",",
"False",
")",
"self",
".",
"pre_prepares_stashed_for_incorrect_time",
"[",
"key",
"]",
"=",
"item",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_TIME_WRONG",
")",
"elif",
"why_not",
"==",
"BlsBftReplica",
".",
"PPR_BLS_MULTISIG_WRONG",
":",
"report_suspicious",
"(",
"Suspicions",
".",
"PPR_BLS_MULTISIG_WRONG",
")",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Unknown PRE-PREPARE check status: {}\"",
".",
"format",
"(",
"why_not",
")",
")"
] | 53.248062 | 21.418605 |
def impute_dataframe_range(df_impute, col_to_max, col_to_min, col_to_median):
"""
Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values
from the provided dictionaries.
This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by
* ``-inf`` -> by value in col_to_min
* ``+inf`` -> by value in col_to_max
* ``NaN`` -> by value in col_to_median
If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError.
Also, if one of the values to replace is not finite a ValueError is returned
This function modifies `df_impute` in place. Afterwards df_impute is
guaranteed to not contain any non-finite values.
Also, all columns will be guaranteed to be of type ``np.float64``.
:param df_impute: DataFrame to impute
:type df_impute: pandas.DataFrame
:param col_to_max: Dictionary mapping column names to max values
:type col_to_max: dict
:param col_to_min: Dictionary mapping column names to min values
:type col_to_max: dict
:param col_to_median: Dictionary mapping column names to median values
:type col_to_max: dict
:return df_impute: imputed DataFrame
:rtype df_impute: pandas.DataFrame
:raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value
to replace is non finite
"""
columns = df_impute.columns
# Making sure col_to_median, col_to_max and col_to_min have entries for every column
if not set(columns) <= set(col_to_median.keys()) or \
not set(columns) <= set(col_to_max.keys()) or \
not set(columns) <= set(col_to_min.keys()):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys "
"than the column names in df")
# check if there are non finite values for the replacement
if np.any(~np.isfinite(list(col_to_median.values()))) or \
np.any(~np.isfinite(list(col_to_min.values()))) or \
np.any(~np.isfinite(list(col_to_max.values()))):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values "
"to replace")
# Make the replacement dataframes as large as the real one
col_to_max = pd.DataFrame([col_to_max]*len(df_impute), index=df_impute.index)
col_to_min = pd.DataFrame([col_to_min]*len(df_impute), index=df_impute.index)
col_to_median = pd.DataFrame([col_to_median]*len(df_impute), index=df_impute.index)
df_impute.where(df_impute.values != np.PINF, other=col_to_max, inplace=True)
df_impute.where(df_impute.values != np.NINF, other=col_to_min, inplace=True)
df_impute.where(~np.isnan(df_impute.values), other=col_to_median, inplace=True)
df_impute.astype(np.float64, copy=False)
return df_impute | [
"def",
"impute_dataframe_range",
"(",
"df_impute",
",",
"col_to_max",
",",
"col_to_min",
",",
"col_to_median",
")",
":",
"columns",
"=",
"df_impute",
".",
"columns",
"# Making sure col_to_median, col_to_max and col_to_min have entries for every column",
"if",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_median",
".",
"keys",
"(",
")",
")",
"or",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_max",
".",
"keys",
"(",
")",
")",
"or",
"not",
"set",
"(",
"columns",
")",
"<=",
"set",
"(",
"col_to_min",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys \"",
"\"than the column names in df\"",
")",
"# check if there are non finite values for the replacement",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_median",
".",
"values",
"(",
")",
")",
")",
")",
"or",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_min",
".",
"values",
"(",
")",
")",
")",
")",
"or",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"list",
"(",
"col_to_max",
".",
"values",
"(",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values \"",
"\"to replace\"",
")",
"# Make the replacement dataframes as large as the real one",
"col_to_max",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_max",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"col_to_min",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_min",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"col_to_median",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"col_to_median",
"]",
"*",
"len",
"(",
"df_impute",
")",
",",
"index",
"=",
"df_impute",
".",
"index",
")",
"df_impute",
".",
"where",
"(",
"df_impute",
".",
"values",
"!=",
"np",
".",
"PINF",
",",
"other",
"=",
"col_to_max",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"where",
"(",
"df_impute",
".",
"values",
"!=",
"np",
".",
"NINF",
",",
"other",
"=",
"col_to_min",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"where",
"(",
"~",
"np",
".",
"isnan",
"(",
"df_impute",
".",
"values",
")",
",",
"other",
"=",
"col_to_median",
",",
"inplace",
"=",
"True",
")",
"df_impute",
".",
"astype",
"(",
"np",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"return",
"df_impute"
] | 49.830508 | 28.677966 |
def vprjpi(vin, projpl, invpl):
"""
Find the vector in a specified plane that maps to a specified
vector in another plane under orthogonal projection.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vprjpi_c.html
:param vin: The projected vector.
:type vin: 3-Element Array of floats
:param projpl: Plane containing vin.
:type projpl: spiceypy.utils.support_types.Plane
:param invpl: Plane containing inverse image of vin.
:type invpl: spiceypy.utils.support_types.Plane
:return: Inverse projection of vin.
:rtype: list
"""
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(3)
found = ctypes.c_int()
libspice.vprjpi_c(vin, ctypes.byref(projpl), ctypes.byref(invpl), vout,
ctypes.byref(found))
return stypes.cVectorToPython(vout), bool(found.value) | [
"def",
"vprjpi",
"(",
"vin",
",",
"projpl",
",",
"invpl",
")",
":",
"vin",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"vin",
")",
"vout",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"3",
")",
"found",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"libspice",
".",
"vprjpi_c",
"(",
"vin",
",",
"ctypes",
".",
"byref",
"(",
"projpl",
")",
",",
"ctypes",
".",
"byref",
"(",
"invpl",
")",
",",
"vout",
",",
"ctypes",
".",
"byref",
"(",
"found",
")",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"vout",
")",
",",
"bool",
"(",
"found",
".",
"value",
")"
] | 38.545455 | 13.909091 |
def find_duplicate_metabolites_in_compartments(model):
"""
Return list of metabolites with duplicates in the same compartment.
This function identifies duplicate metabolites in each compartment by
determining if any two metabolites have identical InChI-key annotations.
For instance, this function would find compounds with IDs ATP1 and ATP2 in
the cytosolic compartment, with both having the same InChI annotations.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
list
A list of tuples of duplicate metabolites.
"""
unique_identifiers = ["inchikey", "inchi"]
duplicates = []
for met_1, met_2 in combinations(model.metabolites, 2):
if met_1.compartment == met_2.compartment:
for key in unique_identifiers:
if key in met_1.annotation and key in met_2.annotation:
if met_1.annotation[key] == met_2.annotation[key]:
duplicates.append((met_1.id, met_2.id))
break
return duplicates | [
"def",
"find_duplicate_metabolites_in_compartments",
"(",
"model",
")",
":",
"unique_identifiers",
"=",
"[",
"\"inchikey\"",
",",
"\"inchi\"",
"]",
"duplicates",
"=",
"[",
"]",
"for",
"met_1",
",",
"met_2",
"in",
"combinations",
"(",
"model",
".",
"metabolites",
",",
"2",
")",
":",
"if",
"met_1",
".",
"compartment",
"==",
"met_2",
".",
"compartment",
":",
"for",
"key",
"in",
"unique_identifiers",
":",
"if",
"key",
"in",
"met_1",
".",
"annotation",
"and",
"key",
"in",
"met_2",
".",
"annotation",
":",
"if",
"met_1",
".",
"annotation",
"[",
"key",
"]",
"==",
"met_2",
".",
"annotation",
"[",
"key",
"]",
":",
"duplicates",
".",
"append",
"(",
"(",
"met_1",
".",
"id",
",",
"met_2",
".",
"id",
")",
")",
"break",
"return",
"duplicates"
] | 36.333333 | 23.2 |
def peak_memory_mb() -> float:
"""
Get peak memory usage for this process, as measured by
max-resident-set size:
https://unix.stackexchange.com/questions/30940/getrusage-system-call-what-is-maximum-resident-set-size
Only works on OSX and Linux, returns 0.0 otherwise.
"""
if resource is None or sys.platform not in ('linux', 'darwin'):
return 0.0
# TODO(joelgrus): For whatever, our pinned version 0.521 of mypy does not like
# next line, but later versions (e.g. 0.530) are fine with it. Once we get that
# figured out, remove the type: ignore.
peak = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # type: ignore
if sys.platform == 'darwin':
# On OSX the result is in bytes.
return peak / 1_000_000
else:
# On Linux the result is in kilobytes.
return peak / 1_000 | [
"def",
"peak_memory_mb",
"(",
")",
"->",
"float",
":",
"if",
"resource",
"is",
"None",
"or",
"sys",
".",
"platform",
"not",
"in",
"(",
"'linux'",
",",
"'darwin'",
")",
":",
"return",
"0.0",
"# TODO(joelgrus): For whatever, our pinned version 0.521 of mypy does not like",
"# next line, but later versions (e.g. 0.530) are fine with it. Once we get that",
"# figured out, remove the type: ignore.",
"peak",
"=",
"resource",
".",
"getrusage",
"(",
"resource",
".",
"RUSAGE_SELF",
")",
".",
"ru_maxrss",
"# type: ignore",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# On OSX the result is in bytes.",
"return",
"peak",
"/",
"1_000_000",
"else",
":",
"# On Linux the result is in kilobytes.",
"return",
"peak",
"/",
"1_000"
] | 35.166667 | 23.5 |
def createObjectMachine(machineType, **kwargs):
"""
Return an object machine of the appropriate type.
@param machineType (str) A supported ObjectMachine type
@param kwargs (dict) Constructor argument for the class that will be
instantiated. Keyword parameters specific to each
model type should be passed in here.
"""
if machineType not in ObjectMachineTypes.getTypes():
raise RuntimeError("Unknown model type: " + machineType)
return getattr(ObjectMachineTypes, machineType)(**kwargs) | [
"def",
"createObjectMachine",
"(",
"machineType",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"machineType",
"not",
"in",
"ObjectMachineTypes",
".",
"getTypes",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown model type: \"",
"+",
"machineType",
")",
"return",
"getattr",
"(",
"ObjectMachineTypes",
",",
"machineType",
")",
"(",
"*",
"*",
"kwargs",
")"
] | 37 | 23 |
def thin_samples_for_writing(fp, samples, parameters, last_iteration):
"""Thins samples for writing to disk.
The thinning interval to use is determined by the given file handler's
``thinned_by`` attribute. If that attribute is 1, just returns the samples.
Parameters
----------
fp : MCMCMetadataIO instance
The file the sampels will be written to. Needed to determine the
thin interval used on disk.
samples : dict
Dictionary mapping parameter names to arrays of (unthinned) samples.
The arrays are thinned along their last dimension.
parameters : list of str
The parameters to thin in ``samples`` before writing. All listed
parameters must be in ``samples``.
last_iteration : int
The iteration that the last sample in ``samples`` occurred at. This is
needed to figure out where to start the thinning in ``samples``, such
that the interval between the last sample on disk and the first new
sample is the same as all of the other samples.
Returns
-------
dict :
Dictionary of the thinned samples to write.
"""
if fp.thinned_by > 1:
if last_iteration is None:
raise ValueError("File's thinned_by attribute is > 1 ({}), "
"but last_iteration not provided."
.format(fp.thinned_by))
thinned_samples = {}
for param in parameters:
data = samples[param]
nsamples = data.shape[-1]
# To figure out where to start:
# the last iteration in the file + the file's thinning interval
# gives the iteration of the next sample that should be written;
# last_iteration - nsamples gives the iteration of the first
# sample in samples. Subtracting the latter from the former - 1
# (-1 to convert from iteration to index) therefore gives the index
# in the samples data to start using samples.
thin_start = fp.last_iteration(param) + fp.thinned_by \
- (last_iteration - nsamples) - 1
thinned_samples[param] = data[..., thin_start::fp.thinned_by]
else:
thinned_samples = samples
return thinned_samples | [
"def",
"thin_samples_for_writing",
"(",
"fp",
",",
"samples",
",",
"parameters",
",",
"last_iteration",
")",
":",
"if",
"fp",
".",
"thinned_by",
">",
"1",
":",
"if",
"last_iteration",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"File's thinned_by attribute is > 1 ({}), \"",
"\"but last_iteration not provided.\"",
".",
"format",
"(",
"fp",
".",
"thinned_by",
")",
")",
"thinned_samples",
"=",
"{",
"}",
"for",
"param",
"in",
"parameters",
":",
"data",
"=",
"samples",
"[",
"param",
"]",
"nsamples",
"=",
"data",
".",
"shape",
"[",
"-",
"1",
"]",
"# To figure out where to start:",
"# the last iteration in the file + the file's thinning interval",
"# gives the iteration of the next sample that should be written;",
"# last_iteration - nsamples gives the iteration of the first",
"# sample in samples. Subtracting the latter from the former - 1",
"# (-1 to convert from iteration to index) therefore gives the index",
"# in the samples data to start using samples.",
"thin_start",
"=",
"fp",
".",
"last_iteration",
"(",
"param",
")",
"+",
"fp",
".",
"thinned_by",
"-",
"(",
"last_iteration",
"-",
"nsamples",
")",
"-",
"1",
"thinned_samples",
"[",
"param",
"]",
"=",
"data",
"[",
"...",
",",
"thin_start",
":",
":",
"fp",
".",
"thinned_by",
"]",
"else",
":",
"thinned_samples",
"=",
"samples",
"return",
"thinned_samples"
] | 44.68 | 22.36 |
def _control_resp(self, data, state):
""" Handle a control response.
:param data: Payload.
:param state: Requested state.
:returns: Acknowledged state.
"""
if _is_control_response(data):
ack_state = bytes([data[22]])
if state == ack_state:
_LOGGER.debug("Received state ack from %s, state: %s",
self.host, ord(ack_state))
return ack_state | [
"def",
"_control_resp",
"(",
"self",
",",
"data",
",",
"state",
")",
":",
"if",
"_is_control_response",
"(",
"data",
")",
":",
"ack_state",
"=",
"bytes",
"(",
"[",
"data",
"[",
"22",
"]",
"]",
")",
"if",
"state",
"==",
"ack_state",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Received state ack from %s, state: %s\"",
",",
"self",
".",
"host",
",",
"ord",
"(",
"ack_state",
")",
")",
"return",
"ack_state"
] | 35.461538 | 9.384615 |
def _hash_filter_fn(self, filter_fn, **kwargs):
""" Construct string representing state of filter_fn
Used to cache filtered variants or effects uniquely depending on filter fn values
"""
filter_fn_name = self._get_function_name(filter_fn, default="filter-none")
logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs))))
# hash function source code
fn_source = str(dill.source.getsource(filter_fn))
pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string
hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11)
# hash kwarg values
kw_dict = dict(**kwargs)
kw_hash = list()
if not kw_dict:
kw_hash = ["default"]
else:
[kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())]
# hash closure vars - for case where filter_fn is defined within closure of filter_fn
closure = []
nonlocals = inspect.getclosurevars(filter_fn).nonlocals
for (key, val) in nonlocals.items():
## capture hash for any function within closure
if inspect.isfunction(val):
closure.append(self._hash_filter_fn(val))
closure.sort() # Sorted for file name consistency
closure_str = "null" if len(closure) == 0 else "-".join(closure)
# construct final string comprising hashed components
hashed_fn = ".".join(["-".join([filter_fn_name,
str(hashed_fn_source)]),
".".join(kw_hash),
closure_str]
)
return hashed_fn | [
"def",
"_hash_filter_fn",
"(",
"self",
",",
"filter_fn",
",",
"*",
"*",
"kwargs",
")",
":",
"filter_fn_name",
"=",
"self",
".",
"_get_function_name",
"(",
"filter_fn",
",",
"default",
"=",
"\"filter-none\"",
")",
"logger",
".",
"debug",
"(",
"\"Computing hash for filter_fn: {} with kwargs {}\"",
".",
"format",
"(",
"filter_fn_name",
",",
"str",
"(",
"dict",
"(",
"*",
"*",
"kwargs",
")",
")",
")",
")",
"# hash function source code",
"fn_source",
"=",
"str",
"(",
"dill",
".",
"source",
".",
"getsource",
"(",
"filter_fn",
")",
")",
"pickled_fn_source",
"=",
"pickle",
".",
"dumps",
"(",
"fn_source",
")",
"## encode as byte string",
"hashed_fn_source",
"=",
"int",
"(",
"hashlib",
".",
"sha1",
"(",
"pickled_fn_source",
")",
".",
"hexdigest",
"(",
")",
",",
"16",
")",
"%",
"(",
"10",
"**",
"11",
")",
"# hash kwarg values",
"kw_dict",
"=",
"dict",
"(",
"*",
"*",
"kwargs",
")",
"kw_hash",
"=",
"list",
"(",
")",
"if",
"not",
"kw_dict",
":",
"kw_hash",
"=",
"[",
"\"default\"",
"]",
"else",
":",
"[",
"kw_hash",
".",
"append",
"(",
"\"{}-{}\"",
".",
"format",
"(",
"key",
",",
"h",
")",
")",
"for",
"(",
"key",
",",
"h",
")",
"in",
"sorted",
"(",
"kw_dict",
".",
"items",
"(",
")",
")",
"]",
"# hash closure vars - for case where filter_fn is defined within closure of filter_fn",
"closure",
"=",
"[",
"]",
"nonlocals",
"=",
"inspect",
".",
"getclosurevars",
"(",
"filter_fn",
")",
".",
"nonlocals",
"for",
"(",
"key",
",",
"val",
")",
"in",
"nonlocals",
".",
"items",
"(",
")",
":",
"## capture hash for any function within closure",
"if",
"inspect",
".",
"isfunction",
"(",
"val",
")",
":",
"closure",
".",
"append",
"(",
"self",
".",
"_hash_filter_fn",
"(",
"val",
")",
")",
"closure",
".",
"sort",
"(",
")",
"# Sorted for file name consistency",
"closure_str",
"=",
"\"null\"",
"if",
"len",
"(",
"closure",
")",
"==",
"0",
"else",
"\"-\"",
".",
"join",
"(",
"closure",
")",
"# construct final string comprising hashed components",
"hashed_fn",
"=",
"\".\"",
".",
"join",
"(",
"[",
"\"-\"",
".",
"join",
"(",
"[",
"filter_fn_name",
",",
"str",
"(",
"hashed_fn_source",
")",
"]",
")",
",",
"\".\"",
".",
"join",
"(",
"kw_hash",
")",
",",
"closure_str",
"]",
")",
"return",
"hashed_fn"
] | 52.69697 | 21.515152 |
def _ExtractGoogleDocsSearchQuery(self, url):
"""Extracts a search query from a Google docs URL.
Google Docs: https://docs.google.com/.*/u/0/?q=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
"""
if 'q=' not in url:
return None
line = self._GetBetweenQEqualsAndAmpersand(url)
if not line:
return None
return line.replace('+', ' ') | [
"def",
"_ExtractGoogleDocsSearchQuery",
"(",
"self",
",",
"url",
")",
":",
"if",
"'q='",
"not",
"in",
"url",
":",
"return",
"None",
"line",
"=",
"self",
".",
"_GetBetweenQEqualsAndAmpersand",
"(",
"url",
")",
"if",
"not",
"line",
":",
"return",
"None",
"return",
"line",
".",
"replace",
"(",
"'+'",
",",
"' '",
")"
] | 21.894737 | 22.052632 |
def getOverlayTransformAbsolute(self, ulOverlayHandle):
"""Gets the transform if it is absolute. Returns an error if the transform is some other type."""
fn = self.function_table.getOverlayTransformAbsolute
peTrackingOrigin = ETrackingUniverseOrigin()
pmatTrackingOriginToOverlayTransform = HmdMatrix34_t()
result = fn(ulOverlayHandle, byref(peTrackingOrigin), byref(pmatTrackingOriginToOverlayTransform))
return result, peTrackingOrigin, pmatTrackingOriginToOverlayTransform | [
"def",
"getOverlayTransformAbsolute",
"(",
"self",
",",
"ulOverlayHandle",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getOverlayTransformAbsolute",
"peTrackingOrigin",
"=",
"ETrackingUniverseOrigin",
"(",
")",
"pmatTrackingOriginToOverlayTransform",
"=",
"HmdMatrix34_t",
"(",
")",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"byref",
"(",
"peTrackingOrigin",
")",
",",
"byref",
"(",
"pmatTrackingOriginToOverlayTransform",
")",
")",
"return",
"result",
",",
"peTrackingOrigin",
",",
"pmatTrackingOriginToOverlayTransform"
] | 64.625 | 26.5 |
def get_pac(self):
"""
Search for, download, and parse PAC file if it hasn't already been done.
This method is called upon the first use of :meth:`request`,
but can also be called manually beforehand if desired.
Subsequent calls to this method will only return the obtained PAC file, if any.
:returns: The obtained PAC file, if any.
:rtype: PACFile|None
:raises MalformedPacError: If something that claims to be a PAC file was downloaded but could not be parsed.
"""
if self._tried_get_pac:
return self._proxy_resolver.pac if self._proxy_resolver else None
if not self.pac_enabled:
return
pac = get_pac()
self._tried_get_pac = True
if pac:
self._proxy_resolver = self._get_proxy_resolver(pac)
return pac | [
"def",
"get_pac",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tried_get_pac",
":",
"return",
"self",
".",
"_proxy_resolver",
".",
"pac",
"if",
"self",
".",
"_proxy_resolver",
"else",
"None",
"if",
"not",
"self",
".",
"pac_enabled",
":",
"return",
"pac",
"=",
"get_pac",
"(",
")",
"self",
".",
"_tried_get_pac",
"=",
"True",
"if",
"pac",
":",
"self",
".",
"_proxy_resolver",
"=",
"self",
".",
"_get_proxy_resolver",
"(",
"pac",
")",
"return",
"pac"
] | 39.181818 | 24.636364 |
def get_inline_views_from_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
inline_views = []
for _, opts in fieldsets or ():
if 'fieldsets' in opts:
inline_views += get_inline_views_from_fieldsets(opts.get('fieldsets'))
elif 'inline_view' in opts:
inline_views.append(opts.get('inline_view'))
return inline_views | [
"def",
"get_inline_views_from_fieldsets",
"(",
"fieldsets",
")",
":",
"inline_views",
"=",
"[",
"]",
"for",
"_",
",",
"opts",
"in",
"fieldsets",
"or",
"(",
")",
":",
"if",
"'fieldsets'",
"in",
"opts",
":",
"inline_views",
"+=",
"get_inline_views_from_fieldsets",
"(",
"opts",
".",
"get",
"(",
"'fieldsets'",
")",
")",
"elif",
"'inline_view'",
"in",
"opts",
":",
"inline_views",
".",
"append",
"(",
"opts",
".",
"get",
"(",
"'inline_view'",
")",
")",
"return",
"inline_views"
] | 44.888889 | 13.333333 |
def all(self, page=None, per_page=None, include_totals=False, extra_params=None):
"""Retrieves all grants.
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
extra_params (dictionary, optional): The extra parameters to add to
the request. The page, per_page, and include_totals values
specified as parameters take precedence over the ones defined here.
See: https://auth0.com/docs/api/management/v2#!/Grants/get_grants
"""
params = extra_params or {}
params.update({
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower()
})
return self.client.get(self._url(), params=params) | [
"def",
"all",
"(",
"self",
",",
"page",
"=",
"None",
",",
"per_page",
"=",
"None",
",",
"include_totals",
"=",
"False",
",",
"extra_params",
"=",
"None",
")",
":",
"params",
"=",
"extra_params",
"or",
"{",
"}",
"params",
".",
"update",
"(",
"{",
"'page'",
":",
"page",
",",
"'per_page'",
":",
"per_page",
",",
"'include_totals'",
":",
"str",
"(",
"include_totals",
")",
".",
"lower",
"(",
")",
"}",
")",
"return",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"_url",
"(",
")",
",",
"params",
"=",
"params",
")"
] | 38.72 | 26.52 |
def mount_control_encode(self, target_system, target_component, input_a, input_b, input_c, save_position):
'''
Message to control a camera mount, directional antenna, etc.
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
input_a : pitch(deg*100) or lat, depending on mount mode (int32_t)
input_b : roll(deg*100) or lon depending on mount mode (int32_t)
input_c : yaw(deg*100) or alt (in cm) depending on mount mode (int32_t)
save_position : if "1" it will save current trimmed position on EEPROM (just valid for NEUTRAL and LANDING) (uint8_t)
'''
return MAVLink_mount_control_message(target_system, target_component, input_a, input_b, input_c, save_position) | [
"def",
"mount_control_encode",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"input_a",
",",
"input_b",
",",
"input_c",
",",
"save_position",
")",
":",
"return",
"MAVLink_mount_control_message",
"(",
"target_system",
",",
"target_component",
",",
"input_a",
",",
"input_b",
",",
"input_c",
",",
"save_position",
")"
] | 71.076923 | 49.846154 |
def complete(self):
"""is the game over?"""
if None not in [v for v in self.squares]:
return True
if self.winner() is not None:
return True
return False | [
"def",
"complete",
"(",
"self",
")",
":",
"if",
"None",
"not",
"in",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"squares",
"]",
":",
"return",
"True",
"if",
"self",
".",
"winner",
"(",
")",
"is",
"not",
"None",
":",
"return",
"True",
"return",
"False"
] | 28.857143 | 12.428571 |
def select(self, column, agg=None, _as=None, distinct=False):
"""
What columnns to select in query.
:column should be a column name or equation to produce column **not** aggregated
:agg should be a valid aggregate method used to producte the figure
:_as should be a string used to represent the column. Required when agg present
"""
if agg and not _as:
raise ValueError("Aggregate colunns require `_as` to be specified")
if column is False:
self._selects = {}
else:
self._selects.setdefault((_as or column), (column, agg, _as, distinct)) | [
"def",
"select",
"(",
"self",
",",
"column",
",",
"agg",
"=",
"None",
",",
"_as",
"=",
"None",
",",
"distinct",
"=",
"False",
")",
":",
"if",
"agg",
"and",
"not",
"_as",
":",
"raise",
"ValueError",
"(",
"\"Aggregate colunns require `_as` to be specified\"",
")",
"if",
"column",
"is",
"False",
":",
"self",
".",
"_selects",
"=",
"{",
"}",
"else",
":",
"self",
".",
"_selects",
".",
"setdefault",
"(",
"(",
"_as",
"or",
"column",
")",
",",
"(",
"column",
",",
"agg",
",",
"_as",
",",
"distinct",
")",
")"
] | 48.692308 | 22.846154 |
def human(value):
"If val>=1000 return val/1024+KiB, etc."
if value >= 1073741824000:
return '{:.1f} T'.format(value / 1099511627776.0)
if value >= 1048576000:
return '{:.1f} G'.format(value / 1073741824.0)
if value >= 1024000:
return '{:.1f} M'.format(value / 1048576.0)
if value >= 1000:
return '{:.1f} K'.format(value / 1024.0)
return '{} B'.format(value) | [
"def",
"human",
"(",
"value",
")",
":",
"if",
"value",
">=",
"1073741824000",
":",
"return",
"'{:.1f} T'",
".",
"format",
"(",
"value",
"/",
"1099511627776.0",
")",
"if",
"value",
">=",
"1048576000",
":",
"return",
"'{:.1f} G'",
".",
"format",
"(",
"value",
"/",
"1073741824.0",
")",
"if",
"value",
">=",
"1024000",
":",
"return",
"'{:.1f} M'",
".",
"format",
"(",
"value",
"/",
"1048576.0",
")",
"if",
"value",
">=",
"1000",
":",
"return",
"'{:.1f} K'",
".",
"format",
"(",
"value",
"/",
"1024.0",
")",
"return",
"'{} B'",
".",
"format",
"(",
"value",
")"
] | 36.909091 | 12.909091 |
def wait_for_import_to_complete(self, import_id, region='us-east-1'):
"""
Monitors the status of aws import, waiting for it to complete, or error out
:param import_id: id of import task to monitor
"""
task_running = True
while task_running:
import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id)
res = subprocess.check_output(shlex.split(import_status_cmd))
print "Current status: {}".format(res)
res_json = json.loads(res)
task_running, image_id = self.check_task_status_and_id(res_json) | [
"def",
"wait_for_import_to_complete",
"(",
"self",
",",
"import_id",
",",
"region",
"=",
"'us-east-1'",
")",
":",
"task_running",
"=",
"True",
"while",
"task_running",
":",
"import_status_cmd",
"=",
"\"aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}\"",
".",
"format",
"(",
"self",
".",
"aws_project",
",",
"region",
",",
"import_id",
")",
"res",
"=",
"subprocess",
".",
"check_output",
"(",
"shlex",
".",
"split",
"(",
"import_status_cmd",
")",
")",
"print",
"\"Current status: {}\"",
".",
"format",
"(",
"res",
")",
"res_json",
"=",
"json",
".",
"loads",
"(",
"res",
")",
"task_running",
",",
"image_id",
"=",
"self",
".",
"check_task_status_and_id",
"(",
"res_json",
")"
] | 58.083333 | 27.583333 |
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
APFSFileEntry: parent file entry or None if not available.
"""
parent_location = None
location = getattr(self.path_spec, 'location', None)
if location is not None:
parent_location = self._file_system.DirnamePath(location)
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
parent_identifier = self._fsapfs_file_entry.parent_identifier
if parent_identifier is None:
return None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=parent_location, identifier=parent_identifier,
parent=parent_path_spec)
is_root = bool(
parent_location == self._file_system.LOCATION_ROOT or
parent_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | [
"def",
"GetParentFileEntry",
"(",
"self",
")",
":",
"parent_location",
"=",
"None",
"location",
"=",
"getattr",
"(",
"self",
".",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"location",
"is",
"not",
"None",
":",
"parent_location",
"=",
"self",
".",
"_file_system",
".",
"DirnamePath",
"(",
"location",
")",
"if",
"parent_location",
"==",
"''",
":",
"parent_location",
"=",
"self",
".",
"_file_system",
".",
"PATH_SEPARATOR",
"parent_identifier",
"=",
"self",
".",
"_fsapfs_file_entry",
".",
"parent_identifier",
"if",
"parent_identifier",
"is",
"None",
":",
"return",
"None",
"parent_path_spec",
"=",
"getattr",
"(",
"self",
".",
"path_spec",
",",
"'parent'",
",",
"None",
")",
"path_spec",
"=",
"apfs_path_spec",
".",
"APFSPathSpec",
"(",
"location",
"=",
"parent_location",
",",
"identifier",
"=",
"parent_identifier",
",",
"parent",
"=",
"parent_path_spec",
")",
"is_root",
"=",
"bool",
"(",
"parent_location",
"==",
"self",
".",
"_file_system",
".",
"LOCATION_ROOT",
"or",
"parent_identifier",
"==",
"self",
".",
"_file_system",
".",
"ROOT_DIRECTORY_IDENTIFIER",
")",
"return",
"APFSFileEntry",
"(",
"self",
".",
"_resolver_context",
",",
"self",
".",
"_file_system",
",",
"path_spec",
",",
"is_root",
"=",
"is_root",
")"
] | 33.965517 | 21.896552 |
def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
"""
concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups:
"""
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug("shared_column_headers: {}".format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
# Remove any column headers that will prevent dfs from being identical
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors="ignore", inplace=True)
# Concatenate all dfs and then remove duplicate rows
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df_with_dups.columns: {}".format(all_meta_df_with_dups.columns))
logger.debug("all_meta_df_with_dups.index: {}".format(all_meta_df_with_dups.index))
# If all metadata dfs were empty, df will be empty
if all_meta_df_with_dups.empty:
# Simply return unique ids
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique())
else:
all_meta_df_with_dups["concat_column_for_index"] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop("concat_column_for_index", axis=1, inplace=True)
all_meta_df_with_dups.drop("concat_column_for_index", axis=1, inplace=True)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df.shape: {}".format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) | [
"def",
"build_common_all_meta_df",
"(",
"common_meta_dfs",
",",
"fields_to_remove",
",",
"remove_all_metadata_fields",
")",
":",
"if",
"remove_all_metadata_fields",
":",
"trimmed_common_meta_dfs",
"=",
"[",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"df",
".",
"index",
")",
"for",
"df",
"in",
"common_meta_dfs",
"]",
"else",
":",
"shared_column_headers",
"=",
"sorted",
"(",
"set",
".",
"intersection",
"(",
"*",
"[",
"set",
"(",
"df",
".",
"columns",
")",
"for",
"df",
"in",
"common_meta_dfs",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"shared_column_headers: {}\"",
".",
"format",
"(",
"shared_column_headers",
")",
")",
"trimmed_common_meta_dfs",
"=",
"[",
"df",
"[",
"shared_column_headers",
"]",
"for",
"df",
"in",
"common_meta_dfs",
"]",
"# Remove any column headers that will prevent dfs from being identical",
"for",
"df",
"in",
"trimmed_common_meta_dfs",
":",
"df",
".",
"drop",
"(",
"fields_to_remove",
",",
"axis",
"=",
"1",
",",
"errors",
"=",
"\"ignore\"",
",",
"inplace",
"=",
"True",
")",
"# Concatenate all dfs and then remove duplicate rows",
"all_meta_df_with_dups",
"=",
"pd",
".",
"concat",
"(",
"trimmed_common_meta_dfs",
",",
"axis",
"=",
"0",
")",
"logger",
".",
"debug",
"(",
"\"all_meta_df_with_dups.shape: {}\"",
".",
"format",
"(",
"all_meta_df_with_dups",
".",
"shape",
")",
")",
"logger",
".",
"debug",
"(",
"\"all_meta_df_with_dups.columns: {}\"",
".",
"format",
"(",
"all_meta_df_with_dups",
".",
"columns",
")",
")",
"logger",
".",
"debug",
"(",
"\"all_meta_df_with_dups.index: {}\"",
".",
"format",
"(",
"all_meta_df_with_dups",
".",
"index",
")",
")",
"# If all metadata dfs were empty, df will be empty",
"if",
"all_meta_df_with_dups",
".",
"empty",
":",
"# Simply return unique ids",
"all_meta_df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"all_meta_df_with_dups",
".",
"index",
".",
"unique",
"(",
")",
")",
"else",
":",
"all_meta_df_with_dups",
"[",
"\"concat_column_for_index\"",
"]",
"=",
"all_meta_df_with_dups",
".",
"index",
"all_meta_df",
"=",
"all_meta_df_with_dups",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
".",
"drop_duplicates",
"(",
")",
"all_meta_df",
".",
"drop",
"(",
"\"concat_column_for_index\"",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"all_meta_df_with_dups",
".",
"drop",
"(",
"\"concat_column_for_index\"",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"\"all_meta_df_with_dups.shape: {}\"",
".",
"format",
"(",
"all_meta_df_with_dups",
".",
"shape",
")",
")",
"logger",
".",
"debug",
"(",
"\"all_meta_df.shape: {}\"",
".",
"format",
"(",
"all_meta_df",
".",
"shape",
")",
")",
"return",
"(",
"all_meta_df",
",",
"all_meta_df_with_dups",
")"
] | 51.698113 | 35.773585 |
def to_dict(self, include_null=True):
"""
Convert to dict.
"""
if include_null:
return dict(self.items())
else:
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_sa_")
} | [
"def",
"to_dict",
"(",
"self",
",",
"include_null",
"=",
"True",
")",
":",
"if",
"include_null",
":",
"return",
"dict",
"(",
"self",
".",
"items",
"(",
")",
")",
"else",
":",
"return",
"{",
"attr",
":",
"value",
"for",
"attr",
",",
"value",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"not",
"attr",
".",
"startswith",
"(",
"\"_sa_\"",
")",
"}"
] | 26.583333 | 12.25 |
def _set_trap(self, v, load=False):
"""
Setter method for trap, mapped from YANG variable /snmp_server/enable/trap (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_trap is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trap() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=trap.trap, is_container='container', presence=False, yang_name="trap", rest_name="trap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable the traps.', u'callpoint': u'snmptraps'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trap must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=trap.trap, is_container='container', presence=False, yang_name="trap", rest_name="trap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable the traps.', u'callpoint': u'snmptraps'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)""",
})
self.__trap = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_trap",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"trap",
".",
"trap",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"trap\"",
",",
"rest_name",
"=",
"\"trap\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Enable/Disable the traps.'",
",",
"u'callpoint'",
":",
"u'snmptraps'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-snmp'",
",",
"defining_module",
"=",
"'brocade-snmp'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"trap must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=trap.trap, is_container='container', presence=False, yang_name=\"trap\", rest_name=\"trap\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable the traps.', u'callpoint': u'snmptraps'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__trap",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 70.954545 | 33 |
def paintEvent(self, event):
"""
Reimplements the :meth:`QWidget.paintEvent` method.
:param event: Event.
:type event: QEvent
"""
def __set_bold(state):
"""
Sets the current painter font bold state.
:return: Definiton success.
:rtype: bool
"""
font = painter.font()
font.setBold(state)
painter.setFont(font)
return True
painter = QPainter(self)
painter.fillRect(event.rect(), self.__background_color)
pen = QPen(QBrush(), self.__separator_width)
pen.setColor(self.__separator_color)
painter.setPen(pen)
top_right_corner = event.rect().topRight()
bottom_right_corner = event.rect().bottomRight()
painter.drawLine(top_right_corner.x(), top_right_corner.y(), bottom_right_corner.x(), bottom_right_corner.y())
painter.setPen(self.__color)
viewport_height = self.__editor.viewport().height()
metrics = QFontMetrics(self.__editor.document().defaultFont())
current_block = self.__editor.document().findBlock(
self.__editor.textCursor().position())
block = self.__editor.firstVisibleBlock()
block_number = block.blockNumber()
painter.setFont(self.__editor.document().defaultFont())
while block.isValid():
block_number += 1
position = self.__editor.blockBoundingGeometry(block).topLeft() + self.__editor.contentOffset()
if position.y() > viewport_height:
break
if not block.isVisible():
continue
block == current_block and __set_bold(True) or __set_bold(False)
painter.drawText(
self.width() - metrics.width(foundations.strings.to_string(block_number)) - self.__margin / 3,
round(position.y() + metrics.ascent() + metrics.descent() -
(self.__editor.blockBoundingRect(block).height() * 8.0 / 100)),
foundations.strings.to_string(block_number))
block = block.next()
painter.end()
QWidget.paintEvent(self, event) | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"def",
"__set_bold",
"(",
"state",
")",
":",
"\"\"\"\n Sets the current painter font bold state.\n\n :return: Definiton success.\n :rtype: bool\n \"\"\"",
"font",
"=",
"painter",
".",
"font",
"(",
")",
"font",
".",
"setBold",
"(",
"state",
")",
"painter",
".",
"setFont",
"(",
"font",
")",
"return",
"True",
"painter",
"=",
"QPainter",
"(",
"self",
")",
"painter",
".",
"fillRect",
"(",
"event",
".",
"rect",
"(",
")",
",",
"self",
".",
"__background_color",
")",
"pen",
"=",
"QPen",
"(",
"QBrush",
"(",
")",
",",
"self",
".",
"__separator_width",
")",
"pen",
".",
"setColor",
"(",
"self",
".",
"__separator_color",
")",
"painter",
".",
"setPen",
"(",
"pen",
")",
"top_right_corner",
"=",
"event",
".",
"rect",
"(",
")",
".",
"topRight",
"(",
")",
"bottom_right_corner",
"=",
"event",
".",
"rect",
"(",
")",
".",
"bottomRight",
"(",
")",
"painter",
".",
"drawLine",
"(",
"top_right_corner",
".",
"x",
"(",
")",
",",
"top_right_corner",
".",
"y",
"(",
")",
",",
"bottom_right_corner",
".",
"x",
"(",
")",
",",
"bottom_right_corner",
".",
"y",
"(",
")",
")",
"painter",
".",
"setPen",
"(",
"self",
".",
"__color",
")",
"viewport_height",
"=",
"self",
".",
"__editor",
".",
"viewport",
"(",
")",
".",
"height",
"(",
")",
"metrics",
"=",
"QFontMetrics",
"(",
"self",
".",
"__editor",
".",
"document",
"(",
")",
".",
"defaultFont",
"(",
")",
")",
"current_block",
"=",
"self",
".",
"__editor",
".",
"document",
"(",
")",
".",
"findBlock",
"(",
"self",
".",
"__editor",
".",
"textCursor",
"(",
")",
".",
"position",
"(",
")",
")",
"block",
"=",
"self",
".",
"__editor",
".",
"firstVisibleBlock",
"(",
")",
"block_number",
"=",
"block",
".",
"blockNumber",
"(",
")",
"painter",
".",
"setFont",
"(",
"self",
".",
"__editor",
".",
"document",
"(",
")",
".",
"defaultFont",
"(",
")",
")",
"while",
"block",
".",
"isValid",
"(",
")",
":",
"block_number",
"+=",
"1",
"position",
"=",
"self",
".",
"__editor",
".",
"blockBoundingGeometry",
"(",
"block",
")",
".",
"topLeft",
"(",
")",
"+",
"self",
".",
"__editor",
".",
"contentOffset",
"(",
")",
"if",
"position",
".",
"y",
"(",
")",
">",
"viewport_height",
":",
"break",
"if",
"not",
"block",
".",
"isVisible",
"(",
")",
":",
"continue",
"block",
"==",
"current_block",
"and",
"__set_bold",
"(",
"True",
")",
"or",
"__set_bold",
"(",
"False",
")",
"painter",
".",
"drawText",
"(",
"self",
".",
"width",
"(",
")",
"-",
"metrics",
".",
"width",
"(",
"foundations",
".",
"strings",
".",
"to_string",
"(",
"block_number",
")",
")",
"-",
"self",
".",
"__margin",
"/",
"3",
",",
"round",
"(",
"position",
".",
"y",
"(",
")",
"+",
"metrics",
".",
"ascent",
"(",
")",
"+",
"metrics",
".",
"descent",
"(",
")",
"-",
"(",
"self",
".",
"__editor",
".",
"blockBoundingRect",
"(",
"block",
")",
".",
"height",
"(",
")",
"*",
"8.0",
"/",
"100",
")",
")",
",",
"foundations",
".",
"strings",
".",
"to_string",
"(",
"block_number",
")",
")",
"block",
"=",
"block",
".",
"next",
"(",
")",
"painter",
".",
"end",
"(",
")",
"QWidget",
".",
"paintEvent",
"(",
"self",
",",
"event",
")"
] | 35.85 | 21.216667 |
def channels_voice_greeting_category_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/voice-api/greetings#show-greeting-category"
api_path = "/api/v2/channels/voice/greeting_categories/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | [
"def",
"channels_voice_greeting_category_show",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/channels/voice/greeting_categories/{id}.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"id",
"=",
"id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
] | 63.2 | 23.2 |
def dump(self, filename):
"""Dump the registered fields to a file
Argument:
| ``filename`` -- the file to write to
"""
with open(filename, "w") as f:
for name in sorted(self._fields):
self._fields[name].dump(f, name) | [
"def",
"dump",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"name",
"in",
"sorted",
"(",
"self",
".",
"_fields",
")",
":",
"self",
".",
"_fields",
"[",
"name",
"]",
".",
"dump",
"(",
"f",
",",
"name",
")"
] | 31.777778 | 11.333333 |
def _onSelectItem(self, selection, previousSelection):
'''Handle selection of item in listing.'''
self._acceptButton.setEnabled(True)
del self._selected[:]
item = self._filesystemWidget.model().item(selection)
self._selected.append(item.path) | [
"def",
"_onSelectItem",
"(",
"self",
",",
"selection",
",",
"previousSelection",
")",
":",
"self",
".",
"_acceptButton",
".",
"setEnabled",
"(",
"True",
")",
"del",
"self",
".",
"_selected",
"[",
":",
"]",
"item",
"=",
"self",
".",
"_filesystemWidget",
".",
"model",
"(",
")",
".",
"item",
"(",
"selection",
")",
"self",
".",
"_selected",
".",
"append",
"(",
"item",
".",
"path",
")"
] | 46.166667 | 9.833333 |
def parsed_file(config_file):
"""Parse an ini-style config file."""
parser = ConfigParser(allow_no_value=True)
parser.readfp(config_file)
return parser | [
"def",
"parsed_file",
"(",
"config_file",
")",
":",
"parser",
"=",
"ConfigParser",
"(",
"allow_no_value",
"=",
"True",
")",
"parser",
".",
"readfp",
"(",
"config_file",
")",
"return",
"parser"
] | 32.6 | 10 |
def set_bool_param(params, name, value):
"""
Set a boolean parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param bool value:
The value of the parameter. If ``None``, the field will not be set. If
``True`` or ``False``, the relevant field in ``params`` will be set to
``'true'`` or ``'false'``. Any other value will raise a `ValueError`.
:returns: ``None``
"""
if value is None:
return
if value is True:
params[name] = 'true'
elif value is False:
params[name] = 'false'
else:
raise ValueError("Parameter '%s' must be boolean or None, got %r." % (
name, value)) | [
"def",
"set_bool_param",
"(",
"params",
",",
"name",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"if",
"value",
"is",
"True",
":",
"params",
"[",
"name",
"]",
"=",
"'true'",
"elif",
"value",
"is",
"False",
":",
"params",
"[",
"name",
"]",
"=",
"'false'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Parameter '%s' must be boolean or None, got %r.\"",
"%",
"(",
"name",
",",
"value",
")",
")"
] | 29.6 | 22.88 |
def segwit_addr_decode(addr, hrp=bech32_prefix):
"""
Decode a segwit address.
Returns (version, hash_bin) on success
Returns (None, None) on error
"""
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], ''.join([chr(x) for x in decoded])) | [
"def",
"segwit_addr_decode",
"(",
"addr",
",",
"hrp",
"=",
"bech32_prefix",
")",
":",
"hrpgot",
",",
"data",
"=",
"bech32_decode",
"(",
"addr",
")",
"if",
"hrpgot",
"!=",
"hrp",
":",
"return",
"(",
"None",
",",
"None",
")",
"decoded",
"=",
"convertbits",
"(",
"data",
"[",
"1",
":",
"]",
",",
"5",
",",
"8",
",",
"False",
")",
"if",
"decoded",
"is",
"None",
"or",
"len",
"(",
"decoded",
")",
"<",
"2",
"or",
"len",
"(",
"decoded",
")",
">",
"40",
":",
"return",
"(",
"None",
",",
"None",
")",
"if",
"data",
"[",
"0",
"]",
">",
"16",
":",
"return",
"(",
"None",
",",
"None",
")",
"if",
"data",
"[",
"0",
"]",
"==",
"0",
"and",
"len",
"(",
"decoded",
")",
"!=",
"20",
"and",
"len",
"(",
"decoded",
")",
"!=",
"32",
":",
"return",
"(",
"None",
",",
"None",
")",
"return",
"(",
"data",
"[",
"0",
"]",
",",
"''",
".",
"join",
"(",
"[",
"chr",
"(",
"x",
")",
"for",
"x",
"in",
"decoded",
"]",
")",
")"
] | 34.470588 | 11.529412 |
def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True):
"""Return a grid from sampling an implicit interval product uniformly.
Parameters
----------
min_pt : float or sequence of float
Vectors of lower ends of the intervals in the product.
max_pt : float or sequence of float
Vectors of upper ends of the intervals in the product.
shape : int or sequence of ints
Number of nodes per axis. Entries corresponding to degenerate axes
must be equal to 1.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Returns
-------
uniform_grid : `RectGrid`
The resulting uniform grid.
See Also
--------
uniform_grid_fromintv :
sample a given interval product
odl.discr.partition.uniform_partition :
divide implicitly defined interval product into equally
sized subsets
Examples
--------
By default, the min/max points are included in the grid:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (3, 3))
>>> grid.coord_vectors
(array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ]))
If ``shape`` is supposed to refer to small subvolumes, and the grid
should be their centers, use the option ``nodes_on_bdry=False``:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (2, 2),
... nodes_on_bdry=False)
>>> grid.coord_vectors
(array([-1.25, -0.75]), array([ 2.25, 2.75]))
In 1D, we don't need sequences:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
"""
return uniform_grid_fromintv(IntervalProd(min_pt, max_pt), shape,
nodes_on_bdry=nodes_on_bdry) | [
"def",
"uniform_grid",
"(",
"min_pt",
",",
"max_pt",
",",
"shape",
",",
"nodes_on_bdry",
"=",
"True",
")",
":",
"return",
"uniform_grid_fromintv",
"(",
"IntervalProd",
"(",
"min_pt",
",",
"max_pt",
")",
",",
"shape",
",",
"nodes_on_bdry",
"=",
"nodes_on_bdry",
")"
] | 36.245902 | 21.377049 |
def apply_diff(text, diff, reverse=False, verify=True):
"""
SOME EXAMPLES OF diff
#@@ -1 +1 @@
#-before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
#+before china goes live (end January developer release, June general audience release) , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -0,0 +1,3 @@
+before china goes live, the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+
+kward has the details.
@@ -1 +1 @@
-before china goes live (end January developer release, June general audience release), the content team will have to manually update the settings for the china-ready apps currently in marketplace.
+before china goes live , the content team will have to manually update the settings for the china-ready apps currently in marketplace.
@@ -3 +3 ,6 @@
-kward has the details.+kward has the details.
+
+Target Release Dates :
+https://mana.mozilla.org/wiki/display/PM/Firefox+OS+Wave+Launch+Cross+Functional+View
+
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40
"""
if not diff:
return text
output = text
hunks = [
(new_diff[start_hunk], new_diff[start_hunk+1:end_hunk])
for new_diff in [[d.lstrip() for d in diff if d.lstrip() and d != "\\ No newline at end of file"] + ["@@"]] # ANOTHER REPAIR
for start_hunk, end_hunk in pairwise(i for i, l in enumerate(new_diff) if l.startswith('@@'))
]
for header, hunk_body in (reversed(hunks) if reverse else hunks):
matches = DIFF_PREFIX.match(header.strip())
if not matches:
if not _Log:
_late_import()
_Log.error("Can not handle \n---\n{{diff}}\n---\n", diff=diff)
removes = tuple(int(i.strip()) for i in matches.group(1).split(",")) # EXPECTING start_line, length TO REMOVE
remove = Data(start=removes[0], length=1 if len(removes) == 1 else removes[1]) # ASSUME FIRST LINE
adds = tuple(int(i.strip()) for i in matches.group(2).split(",")) # EXPECTING start_line, length TO ADD
add = Data(start=adds[0], length=1 if len(adds) == 1 else adds[1])
if add.length == 0 and add.start == 0:
add.start = remove.start
def repair_hunk(hunk_body):
# THE LAST DELETED LINE MAY MISS A "\n" MEANING THE FIRST
# ADDED LINE WILL BE APPENDED TO THE LAST DELETED LINE
# EXAMPLE: -kward has the details.+kward has the details.
# DETECT THIS PROBLEM FOR THIS HUNK AND FIX THE DIFF
if reverse:
last_lines = [
o
for b, o in zip(reversed(hunk_body), reversed(output))
if b != "+" + o
]
if not last_lines:
return hunk_body
last_line = last_lines[0]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('-') and problem_line.endswith('+' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('+' + last_line + "-"):
split_point = len(last_line) + 1
break
else:
return hunk_body
else:
if not output:
return hunk_body
last_line = output[-1]
for problem_index, problem_line in enumerate(hunk_body):
if problem_line.startswith('+') and problem_line.endswith('-' + last_line):
split_point = len(problem_line) - (len(last_line) + 1)
break
elif problem_line.startswith('-' + last_line + "+"):
split_point = len(last_line) + 1
break
else:
return hunk_body
new_hunk_body = (
hunk_body[:problem_index] +
[problem_line[:split_point], problem_line[split_point:]] +
hunk_body[problem_index + 1:]
)
return new_hunk_body
hunk_body = repair_hunk(hunk_body)
if reverse:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '-'] +
output[add.start + add.length - 1:]
)
else:
new_output = (
output[:add.start - 1] +
[d[1:] for d in hunk_body if d and d[0] == '+'] +
output[add.start + remove.length - 1:]
)
output = new_output
if verify:
original = apply_diff(output, diff, not reverse, False)
if set(text) != set(original): # bugzilla-etl diffs are a jumble
for t, o in zip_longest(text, original):
if t in ['reports: https://goo.gl/70o6w6\r']:
break # KNOWN INCONSISTENCIES
if t != o:
if not _Log:
_late_import()
_Log.error("logical verification check failed")
break
return output | [
"def",
"apply_diff",
"(",
"text",
",",
"diff",
",",
"reverse",
"=",
"False",
",",
"verify",
"=",
"True",
")",
":",
"if",
"not",
"diff",
":",
"return",
"text",
"output",
"=",
"text",
"hunks",
"=",
"[",
"(",
"new_diff",
"[",
"start_hunk",
"]",
",",
"new_diff",
"[",
"start_hunk",
"+",
"1",
":",
"end_hunk",
"]",
")",
"for",
"new_diff",
"in",
"[",
"[",
"d",
".",
"lstrip",
"(",
")",
"for",
"d",
"in",
"diff",
"if",
"d",
".",
"lstrip",
"(",
")",
"and",
"d",
"!=",
"\"\\\\ No newline at end of file\"",
"]",
"+",
"[",
"\"@@\"",
"]",
"]",
"# ANOTHER REPAIR",
"for",
"start_hunk",
",",
"end_hunk",
"in",
"pairwise",
"(",
"i",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"new_diff",
")",
"if",
"l",
".",
"startswith",
"(",
"'@@'",
")",
")",
"]",
"for",
"header",
",",
"hunk_body",
"in",
"(",
"reversed",
"(",
"hunks",
")",
"if",
"reverse",
"else",
"hunks",
")",
":",
"matches",
"=",
"DIFF_PREFIX",
".",
"match",
"(",
"header",
".",
"strip",
"(",
")",
")",
"if",
"not",
"matches",
":",
"if",
"not",
"_Log",
":",
"_late_import",
"(",
")",
"_Log",
".",
"error",
"(",
"\"Can not handle \\n---\\n{{diff}}\\n---\\n\"",
",",
"diff",
"=",
"diff",
")",
"removes",
"=",
"tuple",
"(",
"int",
"(",
"i",
".",
"strip",
"(",
")",
")",
"for",
"i",
"in",
"matches",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"\",\"",
")",
")",
"# EXPECTING start_line, length TO REMOVE",
"remove",
"=",
"Data",
"(",
"start",
"=",
"removes",
"[",
"0",
"]",
",",
"length",
"=",
"1",
"if",
"len",
"(",
"removes",
")",
"==",
"1",
"else",
"removes",
"[",
"1",
"]",
")",
"# ASSUME FIRST LINE",
"adds",
"=",
"tuple",
"(",
"int",
"(",
"i",
".",
"strip",
"(",
")",
")",
"for",
"i",
"in",
"matches",
".",
"group",
"(",
"2",
")",
".",
"split",
"(",
"\",\"",
")",
")",
"# EXPECTING start_line, length TO ADD",
"add",
"=",
"Data",
"(",
"start",
"=",
"adds",
"[",
"0",
"]",
",",
"length",
"=",
"1",
"if",
"len",
"(",
"adds",
")",
"==",
"1",
"else",
"adds",
"[",
"1",
"]",
")",
"if",
"add",
".",
"length",
"==",
"0",
"and",
"add",
".",
"start",
"==",
"0",
":",
"add",
".",
"start",
"=",
"remove",
".",
"start",
"def",
"repair_hunk",
"(",
"hunk_body",
")",
":",
"# THE LAST DELETED LINE MAY MISS A \"\\n\" MEANING THE FIRST",
"# ADDED LINE WILL BE APPENDED TO THE LAST DELETED LINE",
"# EXAMPLE: -kward has the details.+kward has the details.",
"# DETECT THIS PROBLEM FOR THIS HUNK AND FIX THE DIFF",
"if",
"reverse",
":",
"last_lines",
"=",
"[",
"o",
"for",
"b",
",",
"o",
"in",
"zip",
"(",
"reversed",
"(",
"hunk_body",
")",
",",
"reversed",
"(",
"output",
")",
")",
"if",
"b",
"!=",
"\"+\"",
"+",
"o",
"]",
"if",
"not",
"last_lines",
":",
"return",
"hunk_body",
"last_line",
"=",
"last_lines",
"[",
"0",
"]",
"for",
"problem_index",
",",
"problem_line",
"in",
"enumerate",
"(",
"hunk_body",
")",
":",
"if",
"problem_line",
".",
"startswith",
"(",
"'-'",
")",
"and",
"problem_line",
".",
"endswith",
"(",
"'+'",
"+",
"last_line",
")",
":",
"split_point",
"=",
"len",
"(",
"problem_line",
")",
"-",
"(",
"len",
"(",
"last_line",
")",
"+",
"1",
")",
"break",
"elif",
"problem_line",
".",
"startswith",
"(",
"'+'",
"+",
"last_line",
"+",
"\"-\"",
")",
":",
"split_point",
"=",
"len",
"(",
"last_line",
")",
"+",
"1",
"break",
"else",
":",
"return",
"hunk_body",
"else",
":",
"if",
"not",
"output",
":",
"return",
"hunk_body",
"last_line",
"=",
"output",
"[",
"-",
"1",
"]",
"for",
"problem_index",
",",
"problem_line",
"in",
"enumerate",
"(",
"hunk_body",
")",
":",
"if",
"problem_line",
".",
"startswith",
"(",
"'+'",
")",
"and",
"problem_line",
".",
"endswith",
"(",
"'-'",
"+",
"last_line",
")",
":",
"split_point",
"=",
"len",
"(",
"problem_line",
")",
"-",
"(",
"len",
"(",
"last_line",
")",
"+",
"1",
")",
"break",
"elif",
"problem_line",
".",
"startswith",
"(",
"'-'",
"+",
"last_line",
"+",
"\"+\"",
")",
":",
"split_point",
"=",
"len",
"(",
"last_line",
")",
"+",
"1",
"break",
"else",
":",
"return",
"hunk_body",
"new_hunk_body",
"=",
"(",
"hunk_body",
"[",
":",
"problem_index",
"]",
"+",
"[",
"problem_line",
"[",
":",
"split_point",
"]",
",",
"problem_line",
"[",
"split_point",
":",
"]",
"]",
"+",
"hunk_body",
"[",
"problem_index",
"+",
"1",
":",
"]",
")",
"return",
"new_hunk_body",
"hunk_body",
"=",
"repair_hunk",
"(",
"hunk_body",
")",
"if",
"reverse",
":",
"new_output",
"=",
"(",
"output",
"[",
":",
"add",
".",
"start",
"-",
"1",
"]",
"+",
"[",
"d",
"[",
"1",
":",
"]",
"for",
"d",
"in",
"hunk_body",
"if",
"d",
"and",
"d",
"[",
"0",
"]",
"==",
"'-'",
"]",
"+",
"output",
"[",
"add",
".",
"start",
"+",
"add",
".",
"length",
"-",
"1",
":",
"]",
")",
"else",
":",
"new_output",
"=",
"(",
"output",
"[",
":",
"add",
".",
"start",
"-",
"1",
"]",
"+",
"[",
"d",
"[",
"1",
":",
"]",
"for",
"d",
"in",
"hunk_body",
"if",
"d",
"and",
"d",
"[",
"0",
"]",
"==",
"'+'",
"]",
"+",
"output",
"[",
"add",
".",
"start",
"+",
"remove",
".",
"length",
"-",
"1",
":",
"]",
")",
"output",
"=",
"new_output",
"if",
"verify",
":",
"original",
"=",
"apply_diff",
"(",
"output",
",",
"diff",
",",
"not",
"reverse",
",",
"False",
")",
"if",
"set",
"(",
"text",
")",
"!=",
"set",
"(",
"original",
")",
":",
"# bugzilla-etl diffs are a jumble",
"for",
"t",
",",
"o",
"in",
"zip_longest",
"(",
"text",
",",
"original",
")",
":",
"if",
"t",
"in",
"[",
"'reports: https://goo.gl/70o6w6\\r'",
"]",
":",
"break",
"# KNOWN INCONSISTENCIES",
"if",
"t",
"!=",
"o",
":",
"if",
"not",
"_Log",
":",
"_late_import",
"(",
")",
"_Log",
".",
"error",
"(",
"\"logical verification check failed\"",
")",
"break",
"return",
"output"
] | 44.683333 | 27.733333 |
def get_jvm_options(self):
"""Return the options to run this JVM with.
These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on.
Thus named because get_options() already exists (and returns this object's Pants options).
"""
ret = []
for opt in self.get_options().options:
ret.extend(safe_shlex_split(opt))
if (self.get_options().debug or
self.get_options().is_flagged('debug_port') or
self.get_options().is_flagged('debug_args')):
debug_port = self.get_options().debug_port
ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args)
return ret | [
"def",
"get_jvm_options",
"(",
"self",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"opt",
"in",
"self",
".",
"get_options",
"(",
")",
".",
"options",
":",
"ret",
".",
"extend",
"(",
"safe_shlex_split",
"(",
"opt",
")",
")",
"if",
"(",
"self",
".",
"get_options",
"(",
")",
".",
"debug",
"or",
"self",
".",
"get_options",
"(",
")",
".",
"is_flagged",
"(",
"'debug_port'",
")",
"or",
"self",
".",
"get_options",
"(",
")",
".",
"is_flagged",
"(",
"'debug_args'",
")",
")",
":",
"debug_port",
"=",
"self",
".",
"get_options",
"(",
")",
".",
"debug_port",
"ret",
".",
"extend",
"(",
"arg",
".",
"format",
"(",
"debug_port",
"=",
"debug_port",
")",
"for",
"arg",
"in",
"self",
".",
"get_options",
"(",
")",
".",
"debug_args",
")",
"return",
"ret"
] | 38.882353 | 23.235294 |
def from_text(cls, text: str):
""" Construct an AnalysisGraph object from text, using Eidos to perform
machine reading. """
eidosProcessor = process_text(text)
return cls.from_statements(eidosProcessor.statements) | [
"def",
"from_text",
"(",
"cls",
",",
"text",
":",
"str",
")",
":",
"eidosProcessor",
"=",
"process_text",
"(",
"text",
")",
"return",
"cls",
".",
"from_statements",
"(",
"eidosProcessor",
".",
"statements",
")"
] | 40.166667 | 12.333333 |
def geocode(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
candidates=None, # TODO: change default value to `1` in geopy 2.0
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int candidates: An integer between 1 and 10 indicating the max
number of candidate addresses to return if a valid address
could be found. Defaults to `1`.
.. versionadded:: 1.19.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if candidates is None:
candidates = self.candidates
if candidates is None:
candidates = 1 # TODO: move to default args in geopy 2.0.
if candidates:
if not (1 <= candidates <= 10):
raise ValueError('candidates must be between 1 and 10')
query = {
'auth-id': self.auth_id,
'auth-token': self.auth_token,
'street': self.format_string % query,
'candidates': candidates,
}
url = '{url}?{query}'.format(url=self.api, query=urlencode(query))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(self._call_geocoder(url, timeout=timeout),
exactly_one) | [
"def",
"geocode",
"(",
"self",
",",
"query",
",",
"exactly_one",
"=",
"True",
",",
"timeout",
"=",
"DEFAULT_SENTINEL",
",",
"candidates",
"=",
"None",
",",
"# TODO: change default value to `1` in geopy 2.0",
")",
":",
"if",
"candidates",
"is",
"None",
":",
"candidates",
"=",
"self",
".",
"candidates",
"if",
"candidates",
"is",
"None",
":",
"candidates",
"=",
"1",
"# TODO: move to default args in geopy 2.0.",
"if",
"candidates",
":",
"if",
"not",
"(",
"1",
"<=",
"candidates",
"<=",
"10",
")",
":",
"raise",
"ValueError",
"(",
"'candidates must be between 1 and 10'",
")",
"query",
"=",
"{",
"'auth-id'",
":",
"self",
".",
"auth_id",
",",
"'auth-token'",
":",
"self",
".",
"auth_token",
",",
"'street'",
":",
"self",
".",
"format_string",
"%",
"query",
",",
"'candidates'",
":",
"candidates",
",",
"}",
"url",
"=",
"'{url}?{query}'",
".",
"format",
"(",
"url",
"=",
"self",
".",
"api",
",",
"query",
"=",
"urlencode",
"(",
"query",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s.geocode: %s\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"url",
")",
"return",
"self",
".",
"_parse_json",
"(",
"self",
".",
"_call_geocoder",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
",",
"exactly_one",
")"
] | 35.686275 | 23.803922 |
def _get_action_name(self, items=None, past=False):
"""Retreive action name based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
action_attr = getattr(self, "action_%s" % action_type)(count)
if isinstance(action_attr, (six.string_types, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
return action | [
"def",
"_get_action_name",
"(",
"self",
",",
"items",
"=",
"None",
",",
"past",
"=",
"False",
")",
":",
"action_type",
"=",
"\"past\"",
"if",
"past",
"else",
"\"present\"",
"if",
"items",
"is",
"None",
":",
"# Called without items parameter (by a single instance.)",
"count",
"=",
"1",
"else",
":",
"count",
"=",
"len",
"(",
"items",
")",
"action_attr",
"=",
"getattr",
"(",
"self",
",",
"\"action_%s\"",
"%",
"action_type",
")",
"(",
"count",
")",
"if",
"isinstance",
"(",
"action_attr",
",",
"(",
"six",
".",
"string_types",
",",
"Promise",
")",
")",
":",
"action",
"=",
"action_attr",
"else",
":",
"toggle_selection",
"=",
"getattr",
"(",
"self",
",",
"\"current_%s_action\"",
"%",
"action_type",
")",
"action",
"=",
"action_attr",
"[",
"toggle_selection",
"]",
"return",
"action"
] | 39 | 23.764706 |
def exists():
"""Verify that the ENV defined NVMe device exists"""
if env():
cij.err("cij.nvm.exists: Invalid NVMe ENV.")
return 1
nvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)
cmd = ['[[ -b "%s" ]]' % nvm["DEV_PATH"]]
rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)
return rcode | [
"def",
"exists",
"(",
")",
":",
"if",
"env",
"(",
")",
":",
"cij",
".",
"err",
"(",
"\"cij.nvm.exists: Invalid NVMe ENV.\"",
")",
"return",
"1",
"nvm",
"=",
"cij",
".",
"env_to_dict",
"(",
"PREFIX",
",",
"EXPORTED",
"+",
"REQUIRED",
")",
"cmd",
"=",
"[",
"'[[ -b \"%s\" ]]'",
"%",
"nvm",
"[",
"\"DEV_PATH\"",
"]",
"]",
"rcode",
",",
"_",
",",
"_",
"=",
"cij",
".",
"ssh",
".",
"command",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"echo",
"=",
"False",
")",
"return",
"rcode"
] | 25.153846 | 24.230769 |
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
return _ast_util.parse(code, '<unknown>', mode)
except Exception:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50]
), **exception_kwargs) | [
"def",
"parse",
"(",
"code",
",",
"mode",
"=",
"'exec'",
",",
"*",
"*",
"exception_kwargs",
")",
":",
"try",
":",
"return",
"_ast_util",
".",
"parse",
"(",
"code",
",",
"'<unknown>'",
",",
"mode",
")",
"except",
"Exception",
":",
"raise",
"exceptions",
".",
"SyntaxException",
"(",
"\"(%s) %s (%r)\"",
"%",
"(",
"compat",
".",
"exception_as",
"(",
")",
".",
"__class__",
".",
"__name__",
",",
"compat",
".",
"exception_as",
"(",
")",
",",
"code",
"[",
"0",
":",
"50",
"]",
")",
",",
"*",
"*",
"exception_kwargs",
")"
] | 36.416667 | 13.083333 |
def read_boolean_option(self, section, option):
"""Read a boolean option."""
if self.has_option(section, option):
self.config[option] = self.getboolean(section, option) | [
"def",
"read_boolean_option",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"option",
")",
":",
"self",
".",
"config",
"[",
"option",
"]",
"=",
"self",
".",
"getboolean",
"(",
"section",
",",
"option",
")"
] | 48.25 | 9.25 |
def quast_predicted_genes_barplot(self):
"""
Make a bar plot showing the number and length of predicted genes
for each assembly
"""
# Prep the data
# extract the ranges given to quast with "--gene-thresholds"
prefix = '# predicted genes (>= '
suffix = ' bp)'
all_thresholds = sorted(list(set([
int(key[len(prefix):-len(suffix)])
for _, d in self.quast_data.items()
for key in d.keys()
if key.startswith(prefix)
])))
data = {}
ourpat = '>= {}{} bp'
theirpat = prefix+"{}"+suffix
for s_name, d in self.quast_data.items():
thresholds = sorted(list(set([
int(key[len(prefix):-len(suffix)])
for _, x in self.quast_data.items()
for key in x.keys()
if key.startswith(prefix)
])))
if len(thresholds)<2: continue
p = dict()
try:
p = { ourpat.format(thresholds[-1],""): d[theirpat.format(thresholds[-1])] }
for low,high in zip(thresholds[:-1], thresholds[1:]):
p[ourpat.format(low,-high)] = d[theirpat.format(low)] - d[theirpat.format(high)]
assert sum(p.values()) == d[theirpat.format(0)]
except AssertionError:
log.warning("Predicted gene counts didn't add up properly for \"{}\"".format(s_name))
except KeyError:
log.warning("Not all predicted gene thresholds available for \"{}\"".format(s_name))
data[s_name] = p
cats = [ ourpat.format(low,-high if high else "")
for low,high in zip(all_thresholds, all_thresholds[1:]+[None]) ]
if len(cats) > 0:
return bargraph.plot(data, cats, {'id': 'quast_predicted_genes', 'title': 'QUAST: Number of predicted genes', 'ylab': 'Number of predicted genes'})
else:
return None | [
"def",
"quast_predicted_genes_barplot",
"(",
"self",
")",
":",
"# Prep the data",
"# extract the ranges given to quast with \"--gene-thresholds\"",
"prefix",
"=",
"'# predicted genes (>= '",
"suffix",
"=",
"' bp)'",
"all_thresholds",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"[",
"int",
"(",
"key",
"[",
"len",
"(",
"prefix",
")",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
")",
"for",
"_",
",",
"d",
"in",
"self",
".",
"quast_data",
".",
"items",
"(",
")",
"for",
"key",
"in",
"d",
".",
"keys",
"(",
")",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
"]",
")",
")",
")",
"data",
"=",
"{",
"}",
"ourpat",
"=",
"'>= {}{} bp'",
"theirpat",
"=",
"prefix",
"+",
"\"{}\"",
"+",
"suffix",
"for",
"s_name",
",",
"d",
"in",
"self",
".",
"quast_data",
".",
"items",
"(",
")",
":",
"thresholds",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"[",
"int",
"(",
"key",
"[",
"len",
"(",
"prefix",
")",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
")",
"for",
"_",
",",
"x",
"in",
"self",
".",
"quast_data",
".",
"items",
"(",
")",
"for",
"key",
"in",
"x",
".",
"keys",
"(",
")",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
"]",
")",
")",
")",
"if",
"len",
"(",
"thresholds",
")",
"<",
"2",
":",
"continue",
"p",
"=",
"dict",
"(",
")",
"try",
":",
"p",
"=",
"{",
"ourpat",
".",
"format",
"(",
"thresholds",
"[",
"-",
"1",
"]",
",",
"\"\"",
")",
":",
"d",
"[",
"theirpat",
".",
"format",
"(",
"thresholds",
"[",
"-",
"1",
"]",
")",
"]",
"}",
"for",
"low",
",",
"high",
"in",
"zip",
"(",
"thresholds",
"[",
":",
"-",
"1",
"]",
",",
"thresholds",
"[",
"1",
":",
"]",
")",
":",
"p",
"[",
"ourpat",
".",
"format",
"(",
"low",
",",
"-",
"high",
")",
"]",
"=",
"d",
"[",
"theirpat",
".",
"format",
"(",
"low",
")",
"]",
"-",
"d",
"[",
"theirpat",
".",
"format",
"(",
"high",
")",
"]",
"assert",
"sum",
"(",
"p",
".",
"values",
"(",
")",
")",
"==",
"d",
"[",
"theirpat",
".",
"format",
"(",
"0",
")",
"]",
"except",
"AssertionError",
":",
"log",
".",
"warning",
"(",
"\"Predicted gene counts didn't add up properly for \\\"{}\\\"\"",
".",
"format",
"(",
"s_name",
")",
")",
"except",
"KeyError",
":",
"log",
".",
"warning",
"(",
"\"Not all predicted gene thresholds available for \\\"{}\\\"\"",
".",
"format",
"(",
"s_name",
")",
")",
"data",
"[",
"s_name",
"]",
"=",
"p",
"cats",
"=",
"[",
"ourpat",
".",
"format",
"(",
"low",
",",
"-",
"high",
"if",
"high",
"else",
"\"\"",
")",
"for",
"low",
",",
"high",
"in",
"zip",
"(",
"all_thresholds",
",",
"all_thresholds",
"[",
"1",
":",
"]",
"+",
"[",
"None",
"]",
")",
"]",
"if",
"len",
"(",
"cats",
")",
">",
"0",
":",
"return",
"bargraph",
".",
"plot",
"(",
"data",
",",
"cats",
",",
"{",
"'id'",
":",
"'quast_predicted_genes'",
",",
"'title'",
":",
"'QUAST: Number of predicted genes'",
",",
"'ylab'",
":",
"'Number of predicted genes'",
"}",
")",
"else",
":",
"return",
"None"
] | 39.857143 | 22.346939 |
def match_to_dict(match):
"""Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
"""
balance, indent, account_fragment = match.group(1, 2, 3)
return {
'balance': decimal.Decimal(balance),
'indent': len(indent),
'account_fragment': account_fragment,
'parent': None,
'children': [],
} | [
"def",
"match_to_dict",
"(",
"match",
")",
":",
"balance",
",",
"indent",
",",
"account_fragment",
"=",
"match",
".",
"group",
"(",
"1",
",",
"2",
",",
"3",
")",
"return",
"{",
"'balance'",
":",
"decimal",
".",
"Decimal",
"(",
"balance",
")",
",",
"'indent'",
":",
"len",
"(",
"indent",
")",
",",
"'account_fragment'",
":",
"account_fragment",
",",
"'parent'",
":",
"None",
",",
"'children'",
":",
"[",
"]",
",",
"}"
] | 30.5 | 13.777778 |
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc) | [
"def",
"explode",
"(",
"col",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"explode",
"(",
"_to_java_column",
"(",
"col",
")",
")",
"return",
"Column",
"(",
"jc",
")"
] | 34.904762 | 22.047619 |
def extract_keyhandle(path, filepath):
"""extract keyhandle value from the path"""
keyhandle = filepath.lstrip(path)
keyhandle = keyhandle.split("/")
return keyhandle[0] | [
"def",
"extract_keyhandle",
"(",
"path",
",",
"filepath",
")",
":",
"keyhandle",
"=",
"filepath",
".",
"lstrip",
"(",
"path",
")",
"keyhandle",
"=",
"keyhandle",
".",
"split",
"(",
"\"/\"",
")",
"return",
"keyhandle",
"[",
"0",
"]"
] | 30.166667 | 11 |
def set_app_args(self, *args):
"""Sets ``sys.argv`` for python apps.
Examples:
* pyargv="one two three" will set ``sys.argv`` to ``('one', 'two', 'three')``.
:param args:
"""
if args:
self._set('pyargv', ' '.join(args))
return self._section | [
"def",
"set_app_args",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"self",
".",
"_set",
"(",
"'pyargv'",
",",
"' '",
".",
"join",
"(",
"args",
")",
")",
"return",
"self",
".",
"_section"
] | 25.333333 | 22.166667 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 7