text
stringlengths 94
87.1k
| code_tokens
sequence | avg_line_len
float64 7.91
668
| score
sequence |
---|---|---|---|
def make_country_nationality_list(cts, ct_file):
"""Combine list of countries and list of nationalities"""
countries = pd.read_csv(ct_file)
nationality = dict(zip(countries.nationality,countries.alpha_3_code))
both_codes = {**nationality, **cts}
return both_codes | [
"def",
"make_country_nationality_list",
"(",
"cts",
",",
"ct_file",
")",
":",
"countries",
"=",
"pd",
".",
"read_csv",
"(",
"ct_file",
")",
"nationality",
"=",
"dict",
"(",
"zip",
"(",
"countries",
".",
"nationality",
",",
"countries",
".",
"alpha_3_code",
")",
")",
"both_codes",
"=",
"{",
"*",
"*",
"nationality",
",",
"*",
"*",
"cts",
"}",
"return",
"both_codes"
] | 46.333333 | [
0.020833333333333332,
0.03278688524590164,
0.05555555555555555,
0.0410958904109589,
0.05128205128205128,
0.09523809523809523
] |
def available_migrations():
'''
List available migrations for udata and enabled plugins
Each row is tuple with following signature:
(plugin, package, filename)
'''
migrations = []
for filename in resource_listdir('udata', 'migrations'):
if filename.endswith('.js'):
migrations.append(('udata', 'udata', filename))
plugins = entrypoints.get_enabled('udata.models', current_app)
for plugin, module in plugins.items():
if resource_isdir(module.__name__, 'migrations'):
for filename in resource_listdir(module.__name__, 'migrations'):
if filename.endswith('.js'):
migrations.append((plugin, module.__name__, filename))
return sorted(migrations, key=lambda r: r[2]) | [
"def",
"available_migrations",
"(",
")",
":",
"migrations",
"=",
"[",
"]",
"for",
"filename",
"in",
"resource_listdir",
"(",
"'udata'",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"'udata'",
",",
"'udata'",
",",
"filename",
")",
")",
"plugins",
"=",
"entrypoints",
".",
"get_enabled",
"(",
"'udata.models'",
",",
"current_app",
")",
"for",
"plugin",
",",
"module",
"in",
"plugins",
".",
"items",
"(",
")",
":",
"if",
"resource_isdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"for",
"filename",
"in",
"resource_listdir",
"(",
"module",
".",
"__name__",
",",
"'migrations'",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.js'",
")",
":",
"migrations",
".",
"append",
"(",
"(",
"plugin",
",",
"module",
".",
"__name__",
",",
"filename",
")",
")",
"return",
"sorted",
"(",
"migrations",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
"[",
"2",
"]",
")"
] | 38.2 | [
0.037037037037037035,
0.2857142857142857,
0.03389830508474576,
0,
0.0425531914893617,
0,
0.05714285714285714,
0.2857142857142857,
0.10526315789473684,
0.03333333333333333,
0.05555555555555555,
0.03389830508474576,
0,
0.030303030303030304,
0.047619047619047616,
0.03508771929824561,
0.02631578947368421,
0.045454545454545456,
0.02702702702702703,
0.04081632653061224
] |
def dispose(self):
"""Disposes of this events writer manager, making it no longer usable.
Call this method when this object is done being used in order to clean up
resources and handlers. This method should ever only be called once.
"""
self._lock.acquire()
self._events_writer.Close()
self._events_writer = None
self._lock.release() | [
"def",
"dispose",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"self",
".",
"_events_writer",
".",
"Close",
"(",
")",
"self",
".",
"_events_writer",
"=",
"None",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | 35.7 | [
0.05555555555555555,
0.02702702702702703,
0,
0.025974025974025976,
0.027777777777777776,
0.2857142857142857,
0.08333333333333333,
0.06451612903225806,
0.06666666666666667,
0.08333333333333333
] |
def get_package_manager(self, target=None):
"""Returns package manager for target argument or global config."""
package_manager = None
if target:
target_package_manager_field = target.payload.get_field('package_manager')
if target_package_manager_field:
package_manager = target_package_manager_field.value
return self.node_distribution.get_package_manager(package_manager=package_manager) | [
"def",
"get_package_manager",
"(",
"self",
",",
"target",
"=",
"None",
")",
":",
"package_manager",
"=",
"None",
"if",
"target",
":",
"target_package_manager_field",
"=",
"target",
".",
"payload",
".",
"get_field",
"(",
"'package_manager'",
")",
"if",
"target_package_manager_field",
":",
"package_manager",
"=",
"target_package_manager_field",
".",
"value",
"return",
"self",
".",
"node_distribution",
".",
"get_package_manager",
"(",
"package_manager",
"=",
"package_manager",
")"
] | 52.25 | [
0.023255813953488372,
0.028169014084507043,
0.07692307692307693,
0.14285714285714285,
0.05,
0.07894736842105263,
0.03333333333333333,
0.03488372093023256
] |
def detect_infinitive_phrase(sentence):
"""Given a string, return true if it is an infinitive phrase fragment"""
# eliminate sentences without to
if not 'to' in sentence.lower():
return False
doc = nlp(sentence)
prev_word = None
for w in doc:
# if statement will execute exactly once
if prev_word == 'to':
if w.dep_ == 'ROOT' and w.tag_.startswith('VB'):
return True # this is quite likely to be an infinitive phrase
else:
return False
prev_word = w.text.lower() | [
"def",
"detect_infinitive_phrase",
"(",
"sentence",
")",
":",
"# eliminate sentences without to",
"if",
"not",
"'to'",
"in",
"sentence",
".",
"lower",
"(",
")",
":",
"return",
"False",
"doc",
"=",
"nlp",
"(",
"sentence",
")",
"prev_word",
"=",
"None",
"for",
"w",
"in",
"doc",
":",
"# if statement will execute exactly once",
"if",
"prev_word",
"==",
"'to'",
":",
"if",
"w",
".",
"dep_",
"==",
"'ROOT'",
"and",
"w",
".",
"tag_",
".",
"startswith",
"(",
"'VB'",
")",
":",
"return",
"True",
"# this is quite likely to be an infinitive phrase",
"else",
":",
"return",
"False",
"prev_word",
"=",
"w",
".",
"text",
".",
"lower",
"(",
")"
] | 33 | [
0.02564102564102564,
0.02631578947368421,
0,
0.05555555555555555,
0.08333333333333333,
0.1,
0,
0.08695652173913043,
0.1,
0.11764705882352941,
0.041666666666666664,
0.06896551724137931,
0.04918032786885246,
0.03896103896103896,
0.11764705882352941,
0.07142857142857142,
0.058823529411764705
] |
def drop_schema(self, schema, cascade=False):
"""Drop specified schema
"""
if schema in self.schemas:
sql = "DROP SCHEMA " + schema
if cascade:
sql = sql + " CASCADE"
self.execute(sql) | [
"def",
"drop_schema",
"(",
"self",
",",
"schema",
",",
"cascade",
"=",
"False",
")",
":",
"if",
"schema",
"in",
"self",
".",
"schemas",
":",
"sql",
"=",
"\"DROP SCHEMA \"",
"+",
"schema",
"if",
"cascade",
":",
"sql",
"=",
"sql",
"+",
"\" CASCADE\"",
"self",
".",
"execute",
"(",
"sql",
")"
] | 31.625 | [
0.022222222222222223,
0.0625,
0.18181818181818182,
0.058823529411764705,
0.04878048780487805,
0.08695652173913043,
0.05263157894736842,
0.06896551724137931
] |
def get(self, identifier):
"""get provider by id"""
for provider in self._providers:
if provider.identifier == identifier:
return provider
return None | [
"def",
"get",
"(",
"self",
",",
"identifier",
")",
":",
"for",
"provider",
"in",
"self",
".",
"_providers",
":",
"if",
"provider",
".",
"identifier",
"==",
"identifier",
":",
"return",
"provider",
"return",
"None"
] | 32.833333 | [
0.038461538461538464,
0.0625,
0.05,
0.04081632653061224,
0.06451612903225806,
0.10526315789473684
] |
def run_command(command, env):
"""Run command in sub process.
Runs the command in a sub process with the variables from `env`
added in the current environment variables.
Parameters
----------
command: List[str]
The command and it's parameters
env: Dict
The additional environment variables
Returns
-------
int
The return code of the command
"""
# copy the current environment variables and add the vales from
# `env`
cmd_env = os.environ.copy()
cmd_env.update(env)
p = Popen(command,
universal_newlines=True,
bufsize=0,
shell=False,
env=cmd_env)
_, _ = p.communicate()
return p.returncode | [
"def",
"run_command",
"(",
"command",
",",
"env",
")",
":",
"# copy the current environment variables and add the vales from",
"# `env`",
"cmd_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"cmd_env",
".",
"update",
"(",
"env",
")",
"p",
"=",
"Popen",
"(",
"command",
",",
"universal_newlines",
"=",
"True",
",",
"bufsize",
"=",
"0",
",",
"shell",
"=",
"False",
",",
"env",
"=",
"cmd_env",
")",
"_",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"p",
".",
"returncode"
] | 22.34375 | [
0.03333333333333333,
0.058823529411764705,
0,
0.04477611940298507,
0.0425531914893617,
0,
0.14285714285714285,
0.14285714285714285,
0.09090909090909091,
0.05128205128205128,
0.15384615384615385,
0.045454545454545456,
0,
0.18181818181818182,
0.18181818181818182,
0.2857142857142857,
0.05263157894736842,
0,
0.2857142857142857,
0.029850746268656716,
0.18181818181818182,
0.06451612903225806,
0.08695652173913043,
0,
0.13636363636363635,
0.10526315789473684,
0.16666666666666666,
0.15384615384615385,
0.19230769230769232,
0.07692307692307693,
0,
0.08695652173913043
] |
def write_real (self, url_data):
"""Write url_data.url."""
self.writeln("<tr><td>"+self.part("realurl")+u"</td><td>"+
u'<a target="top" href="'+url_data.url+
u'">'+cgi.escape(url_data.url)+u"</a></td></tr>") | [
"def",
"write_real",
"(",
"self",
",",
"url_data",
")",
":",
"self",
".",
"writeln",
"(",
"\"<tr><td>\"",
"+",
"self",
".",
"part",
"(",
"\"realurl\"",
")",
"+",
"u\"</td><td>\"",
"+",
"u'<a target=\"top\" href=\"'",
"+",
"url_data",
".",
"url",
"+",
"u'\">'",
"+",
"cgi",
".",
"escape",
"(",
"url_data",
".",
"url",
")",
"+",
"u\"</a></td></tr>\"",
")"
] | 52.2 | [
0.0625,
0.06060606060606061,
0.045454545454545456,
0.05,
0.05714285714285714
] |
def find_prop_overlap(rdf, prop1, prop2):
"""Generate (subject,object) pairs connected by two properties."""
for s, o in sorted(rdf.subject_objects(prop1)):
if (s, prop2, o) in rdf:
yield (s, o) | [
"def",
"find_prop_overlap",
"(",
"rdf",
",",
"prop1",
",",
"prop2",
")",
":",
"for",
"s",
",",
"o",
"in",
"sorted",
"(",
"rdf",
".",
"subject_objects",
"(",
"prop1",
")",
")",
":",
"if",
"(",
"s",
",",
"prop2",
",",
"o",
")",
"in",
"rdf",
":",
"yield",
"(",
"s",
",",
"o",
")"
] | 43.6 | [
0.024390243902439025,
0.02857142857142857,
0.0392156862745098,
0.0625,
0.08333333333333333
] |
def _get_total_read_size(self):
"""How much event data to process at once."""
if self.read_size:
read_size = EVENT_SIZE * self.read_size
else:
read_size = EVENT_SIZE
return read_size | [
"def",
"_get_total_read_size",
"(",
"self",
")",
":",
"if",
"self",
".",
"read_size",
":",
"read_size",
"=",
"EVENT_SIZE",
"*",
"self",
".",
"read_size",
"else",
":",
"read_size",
"=",
"EVENT_SIZE",
"return",
"read_size"
] | 33.142857 | [
0.03225806451612903,
0.03773584905660377,
0.07692307692307693,
0.0392156862745098,
0.15384615384615385,
0.058823529411764705,
0.08333333333333333
] |
def get(which):
"DEPRECATED; see :func:`~skyfield.data.hipparcos.load_dataframe() instead."
if isinstance(which, str):
pattern = ('H| %6s' % which).encode('ascii')
for star in load(lambda line: line.startswith(pattern)):
return star
else:
patterns = set(id.encode('ascii').rjust(6) for id in which)
return list(load(lambda line: line[8:14] in patterns)) | [
"def",
"get",
"(",
"which",
")",
":",
"if",
"isinstance",
"(",
"which",
",",
"str",
")",
":",
"pattern",
"=",
"(",
"'H| %6s'",
"%",
"which",
")",
".",
"encode",
"(",
"'ascii'",
")",
"for",
"star",
"in",
"load",
"(",
"lambda",
"line",
":",
"line",
".",
"startswith",
"(",
"pattern",
")",
")",
":",
"return",
"star",
"else",
":",
"patterns",
"=",
"set",
"(",
"id",
".",
"encode",
"(",
"'ascii'",
")",
".",
"rjust",
"(",
"6",
")",
"for",
"id",
"in",
"which",
")",
"return",
"list",
"(",
"load",
"(",
"lambda",
"line",
":",
"line",
"[",
"8",
":",
"14",
"]",
"in",
"patterns",
")",
")"
] | 45.111111 | [
0.06666666666666667,
0.02531645569620253,
0.06666666666666667,
0.03508771929824561,
0.03125,
0.08695652173913043,
0.2222222222222222,
0.029850746268656716,
0.03225806451612903
] |
def _compress_content(self, content):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content.read())
zfile.close()
content.file = zbuf
return content | [
"def",
"_compress_content",
"(",
"self",
",",
"content",
")",
":",
"zbuf",
"=",
"StringIO",
"(",
")",
"zfile",
"=",
"GzipFile",
"(",
"mode",
"=",
"'wb'",
",",
"compresslevel",
"=",
"6",
",",
"fileobj",
"=",
"zbuf",
")",
"zfile",
".",
"write",
"(",
"content",
".",
"read",
"(",
")",
")",
"zfile",
".",
"close",
"(",
")",
"content",
".",
"file",
"=",
"zbuf",
"return",
"content"
] | 33.375 | [
0.02702702702702703,
0.058823529411764705,
0.08,
0.030303030303030304,
0.05714285714285714,
0.09523809523809523,
0.07407407407407407,
0.09090909090909091
] |
def dirpaths(self):
"""Split the dirname into individual directory names
An absolute path starts with an empty string, a relative path does not
>>> p = DotPath(u'/path/to/x.py')
>>> p.paths == p.dirpaths()
True
"""
parts = self.parts()
result = [DotPath(parts[0] or '/')]
for name in parts[1:]:
result.append(result[-1] / name)
return result | [
"def",
"dirpaths",
"(",
"self",
")",
":",
"parts",
"=",
"self",
".",
"parts",
"(",
")",
"result",
"=",
"[",
"DotPath",
"(",
"parts",
"[",
"0",
"]",
"or",
"'/'",
")",
"]",
"for",
"name",
"in",
"parts",
"[",
"1",
":",
"]",
":",
"result",
".",
"append",
"(",
"result",
"[",
"-",
"1",
"]",
"/",
"name",
")",
"return",
"result"
] | 30.142857 | [
0.05263157894736842,
0.03333333333333333,
0,
0.02564102564102564,
0,
0.07317073170731707,
0.08571428571428572,
0.16666666666666666,
0.18181818181818182,
0.07142857142857142,
0.046511627906976744,
0.06666666666666667,
0.045454545454545456,
0.09523809523809523
] |
def _get_unique_function_name(function_type, functions):
'''Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name
'''
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name | [
"def",
"_get_unique_function_name",
"(",
"function_type",
",",
"functions",
")",
":",
"function_name",
"=",
"function_name_base",
"=",
"function_type",
"count",
"=",
"2",
"while",
"function_name",
"in",
"functions",
":",
"function_name",
"=",
"'{}_{}'",
".",
"format",
"(",
"function_name_base",
",",
"count",
")",
"count",
"+=",
"1",
"return",
"function_name"
] | 30 | [
0.017857142857142856,
0.058823529411764705,
0,
0.2222222222222222,
0.043478260869565216,
0.08333333333333333,
0,
0.125,
0.06666666666666667,
0.2857142857142857,
0.037037037037037035,
0.15384615384615385,
0.05405405405405406,
0.03076923076923077,
0.1111111111111111,
0.08333333333333333
] |
def add_handlers(web_app, config):
"""Add the appropriate handlers to the web app.
"""
base_url = web_app.settings['base_url']
url = ujoin(base_url, config.page_url)
assets_dir = config.assets_dir
package_file = os.path.join(assets_dir, 'package.json')
with open(package_file) as fid:
data = json.load(fid)
config.version = config.version or data['version']
config.name = config.name or data['name']
handlers = [
# TODO Redirect to /tree
(url + r'/?', NAppHandler, {'config': config, 'page': 'tree'}),
(url + r"/tree%s" % path_regex, NAppHandler, {'config': config, 'page': 'tree'}),
(url + r"/edit%s" % path_regex, NAppHandler, {'config': config, 'page': 'edit'}),
(url + r"/view%s" % path_regex, NAppHandler, {'config': config, 'page': 'view'}),
(url + r"/static/(.*)", FileFindHandler, {'path': assets_dir}),
]
web_app.add_handlers(".*$", handlers) | [
"def",
"add_handlers",
"(",
"web_app",
",",
"config",
")",
":",
"base_url",
"=",
"web_app",
".",
"settings",
"[",
"'base_url'",
"]",
"url",
"=",
"ujoin",
"(",
"base_url",
",",
"config",
".",
"page_url",
")",
"assets_dir",
"=",
"config",
".",
"assets_dir",
"package_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"assets_dir",
",",
"'package.json'",
")",
"with",
"open",
"(",
"package_file",
")",
"as",
"fid",
":",
"data",
"=",
"json",
".",
"load",
"(",
"fid",
")",
"config",
".",
"version",
"=",
"config",
".",
"version",
"or",
"data",
"[",
"'version'",
"]",
"config",
".",
"name",
"=",
"config",
".",
"name",
"or",
"data",
"[",
"'name'",
"]",
"handlers",
"=",
"[",
"# TODO Redirect to /tree",
"(",
"url",
"+",
"r'/?'",
",",
"NAppHandler",
",",
"{",
"'config'",
":",
"config",
",",
"'page'",
":",
"'tree'",
"}",
")",
",",
"(",
"url",
"+",
"r\"/tree%s\"",
"%",
"path_regex",
",",
"NAppHandler",
",",
"{",
"'config'",
":",
"config",
",",
"'page'",
":",
"'tree'",
"}",
")",
",",
"(",
"url",
"+",
"r\"/edit%s\"",
"%",
"path_regex",
",",
"NAppHandler",
",",
"{",
"'config'",
":",
"config",
",",
"'page'",
":",
"'edit'",
"}",
")",
",",
"(",
"url",
"+",
"r\"/view%s\"",
"%",
"path_regex",
",",
"NAppHandler",
",",
"{",
"'config'",
":",
"config",
",",
"'page'",
":",
"'view'",
"}",
")",
",",
"(",
"url",
"+",
"r\"/static/(.*)\"",
",",
"FileFindHandler",
",",
"{",
"'path'",
":",
"assets_dir",
"}",
")",
",",
"]",
"web_app",
".",
"add_handlers",
"(",
"\".*$\"",
",",
"handlers",
")"
] | 39 | [
0.029411764705882353,
0.0392156862745098,
0.2857142857142857,
0.046511627906976744,
0.047619047619047616,
0.058823529411764705,
0,
0.03389830508474576,
0.05714285714285714,
0.06896551724137931,
0,
0.037037037037037035,
0.044444444444444446,
0,
0.1875,
0.0625,
0.028169014084507043,
0.033707865168539325,
0.033707865168539325,
0.033707865168539325,
0.028169014084507043,
0.6,
0,
0.04878048780487805
] |
def last_valid_lockset(self):
"highest valid lockset on height"
for r in self.rounds:
ls = self.rounds[r].lockset
if ls.is_valid:
return ls
return None | [
"def",
"last_valid_lockset",
"(",
"self",
")",
":",
"for",
"r",
"in",
"self",
".",
"rounds",
":",
"ls",
"=",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lockset",
"if",
"ls",
".",
"is_valid",
":",
"return",
"ls",
"return",
"None"
] | 29.857143 | [
0.034482758620689655,
0.04878048780487805,
0.06896551724137931,
0.05128205128205128,
0.07407407407407407,
0.08,
0.10526315789473684
] |
def parse(self):
"""
Iterate over each line of the log, running each parser against it.
Stream lines from the gzip file and run each parser against it,
building the ``artifact`` as we go.
"""
with make_request(self.url, stream=True) as response:
download_size_in_bytes = int(response.headers.get('Content-Length', -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
newrelic.agent.add_custom_parameter(
'unstructured_log_size',
download_size_in_bytes
)
newrelic.agent.add_custom_parameter(
'unstructured_log_encoding',
response.headers.get('Content-Encoding', 'None')
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline
# characters such as `\u0085` (which can appear in test output) are treated the same
# as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`.
for line in response.iter_lines():
for builder in self.builders:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode('utf-8', 'replace'))
# gather the artifacts from all builders
for builder in self.builders:
# Run end-of-parsing actions for this parser,
# in case the artifact needs clean-up/summarising.
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
if name == 'performance_data' and not artifact[name]:
continue
self.artifacts[name] = artifact | [
"def",
"parse",
"(",
"self",
")",
":",
"with",
"make_request",
"(",
"self",
".",
"url",
",",
"stream",
"=",
"True",
")",
"as",
"response",
":",
"download_size_in_bytes",
"=",
"int",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Length'",
",",
"-",
"1",
")",
")",
"# Temporary annotation of log size to help set thresholds in bug 1295997.",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"'unstructured_log_size'",
",",
"download_size_in_bytes",
")",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"'unstructured_log_encoding'",
",",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Encoding'",
",",
"'None'",
")",
")",
"if",
"download_size_in_bytes",
">",
"MAX_DOWNLOAD_SIZE_IN_BYTES",
":",
"raise",
"LogSizeException",
"(",
"'Download size of %i bytes exceeds limit'",
"%",
"download_size_in_bytes",
")",
"# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default",
"# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline",
"# characters such as `\\u0085` (which can appear in test output) are treated the same",
"# as `\\n` or `\\r`, and so split into unwanted additional lines by `iter_lines()`.",
"for",
"line",
"in",
"response",
".",
"iter_lines",
"(",
")",
":",
"for",
"builder",
"in",
"self",
".",
"builders",
":",
"# Using `replace` to prevent malformed unicode (which might possibly exist",
"# in test message output) from breaking parsing of the rest of the log.",
"builder",
".",
"parse_line",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"# gather the artifacts from all builders",
"for",
"builder",
"in",
"self",
".",
"builders",
":",
"# Run end-of-parsing actions for this parser,",
"# in case the artifact needs clean-up/summarising.",
"builder",
".",
"finish_parse",
"(",
")",
"name",
"=",
"builder",
".",
"name",
"artifact",
"=",
"builder",
".",
"get_artifact",
"(",
")",
"if",
"name",
"==",
"'performance_data'",
"and",
"not",
"artifact",
"[",
"name",
"]",
":",
"continue",
"self",
".",
"artifacts",
"[",
"name",
"]",
"=",
"artifact"
] | 49 | [
0.0625,
0.18181818181818182,
0.02702702702702703,
0,
0.028169014084507043,
0.06976744186046512,
0.18181818181818182,
0.03278688524590164,
0.03571428571428571,
0,
0.03529411764705882,
0.0625,
0.05,
0.05263157894736842,
0.23076923076923078,
0.0625,
0.045454545454545456,
0.03125,
0.23076923076923078,
0,
0.029850746268656716,
0.02830188679245283,
0,
0.03225806451612903,
0.031578947368421054,
0.03125,
0.03225806451612903,
0.043478260869565216,
0.044444444444444446,
0.031914893617021274,
0.03296703296703297,
0.028169014084507043,
0,
0.041666666666666664,
0.05405405405405406,
0.03508771929824561,
0.03225806451612903,
0.058823529411764705,
0.06451612903225806,
0.044444444444444446,
0.03076923076923077,
0.08333333333333333,
0.046511627906976744
] |
def decode_seq(self, inputs, states, valid_length=None):
"""Decode the decoder inputs. This function is only used for training.
Parameters
----------
inputs : NDArray, Shape (batch_size, length, C_in)
states : list of NDArrays or None
Initial states. The list of decoder states
valid_length : NDArray or None
Valid lengths of each sequence. This is usually used when part of sequence has
been padded. Shape (batch_size,)
Returns
-------
output : NDArray, Shape (batch_size, length, C_out)
states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
additional_outputs : list of list
Either be an empty list or contains the attention weights in this step.
The attention weights will have shape (batch_size, length, mem_length) or
(batch_size, num_heads, length, mem_length)
"""
batch_size = inputs.shape[0]
length = inputs.shape[1]
length_array = mx.nd.arange(length, ctx=inputs.context, dtype=inputs.dtype)
mask = mx.nd.broadcast_lesser_equal(
length_array.reshape((1, -1)),
length_array.reshape((-1, 1)))
if valid_length is not None:
arange = mx.nd.arange(length, ctx=valid_length.context, dtype=valid_length.dtype)
batch_mask = mx.nd.broadcast_lesser(
arange.reshape((1, -1)),
valid_length.reshape((-1, 1)))
mask = mx.nd.broadcast_mul(mx.nd.expand_dims(batch_mask, -1),
mx.nd.expand_dims(mask, 0))
else:
mask = mx.nd.broadcast_axes(mx.nd.expand_dims(mask, axis=0), axis=0, size=batch_size)
states = [None] + states
output, states, additional_outputs = self.forward(inputs, states, mask)
states = states[1:]
if valid_length is not None:
output = mx.nd.SequenceMask(output,
sequence_length=valid_length,
use_sequence_length=True,
axis=1)
return output, states, additional_outputs | [
"def",
"decode_seq",
"(",
"self",
",",
"inputs",
",",
"states",
",",
"valid_length",
"=",
"None",
")",
":",
"batch_size",
"=",
"inputs",
".",
"shape",
"[",
"0",
"]",
"length",
"=",
"inputs",
".",
"shape",
"[",
"1",
"]",
"length_array",
"=",
"mx",
".",
"nd",
".",
"arange",
"(",
"length",
",",
"ctx",
"=",
"inputs",
".",
"context",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"mask",
"=",
"mx",
".",
"nd",
".",
"broadcast_lesser_equal",
"(",
"length_array",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"length_array",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"if",
"valid_length",
"is",
"not",
"None",
":",
"arange",
"=",
"mx",
".",
"nd",
".",
"arange",
"(",
"length",
",",
"ctx",
"=",
"valid_length",
".",
"context",
",",
"dtype",
"=",
"valid_length",
".",
"dtype",
")",
"batch_mask",
"=",
"mx",
".",
"nd",
".",
"broadcast_lesser",
"(",
"arange",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"valid_length",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"mask",
"=",
"mx",
".",
"nd",
".",
"broadcast_mul",
"(",
"mx",
".",
"nd",
".",
"expand_dims",
"(",
"batch_mask",
",",
"-",
"1",
")",
",",
"mx",
".",
"nd",
".",
"expand_dims",
"(",
"mask",
",",
"0",
")",
")",
"else",
":",
"mask",
"=",
"mx",
".",
"nd",
".",
"broadcast_axes",
"(",
"mx",
".",
"nd",
".",
"expand_dims",
"(",
"mask",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
",",
"size",
"=",
"batch_size",
")",
"states",
"=",
"[",
"None",
"]",
"+",
"states",
"output",
",",
"states",
",",
"additional_outputs",
"=",
"self",
".",
"forward",
"(",
"inputs",
",",
"states",
",",
"mask",
")",
"states",
"=",
"states",
"[",
"1",
":",
"]",
"if",
"valid_length",
"is",
"not",
"None",
":",
"output",
"=",
"mx",
".",
"nd",
".",
"SequenceMask",
"(",
"output",
",",
"sequence_length",
"=",
"valid_length",
",",
"use_sequence_length",
"=",
"True",
",",
"axis",
"=",
"1",
")",
"return",
"output",
",",
"states",
",",
"additional_outputs"
] | 45.693878 | [
0.017857142857142856,
0.02564102564102564,
0,
0.1111111111111111,
0.1111111111111111,
0.06896551724137931,
0.07317073170731707,
0.037037037037037035,
0.07894736842105263,
0.03333333333333333,
0.06818181818181818,
0,
0.13333333333333333,
0.13333333333333333,
0.06779661016949153,
0.14285714285714285,
0.04878048780487805,
0,
0.09090909090909091,
0.06976744186046512,
0.07317073170731707,
0.03614457831325301,
0.047058823529411764,
0.03636363636363636,
0.18181818181818182,
0.05555555555555555,
0.0625,
0.03614457831325301,
0.06818181818181818,
0.047619047619047616,
0.07142857142857142,
0.05555555555555555,
0.03225806451612903,
0.0625,
0.05,
0.06521739130434782,
0.0410958904109589,
0.06060606060606061,
0.15384615384615385,
0.030927835051546393,
0.0625,
0.02531645569620253,
0.07407407407407407,
0.05555555555555555,
0.06382978723404255,
0.043478260869565216,
0.046153846153846156,
0.0851063829787234,
0.04081632653061224
] |
def tem(fEM, off, freq, time, signal, ft, ftarg, conv=True):
r"""Return the time-domain response of the frequency-domain response fEM.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# 1. Scale frequencies if switch-on/off response
# Step function for causal times is like a unit fct, therefore an impulse
# in frequency domain
if signal in [-1, 1]:
# Divide by signal/(2j*pi*f) to obtain step response
fact = signal/(2j*np.pi*freq)
else:
fact = 1
# 2. f->t transform
tEM = np.zeros((time.size, off.size))
for i in range(off.size):
out = getattr(transform, ft)(fEM[:, i]*fact, time, freq, ftarg)
tEM[:, i] += out[0]
conv *= out[1]
return tEM*2/np.pi, conv | [
"def",
"tem",
"(",
"fEM",
",",
"off",
",",
"freq",
",",
"time",
",",
"signal",
",",
"ft",
",",
"ftarg",
",",
"conv",
"=",
"True",
")",
":",
"# 1. Scale frequencies if switch-on/off response",
"# Step function for causal times is like a unit fct, therefore an impulse",
"# in frequency domain",
"if",
"signal",
"in",
"[",
"-",
"1",
",",
"1",
"]",
":",
"# Divide by signal/(2j*pi*f) to obtain step response",
"fact",
"=",
"signal",
"/",
"(",
"2j",
"*",
"np",
".",
"pi",
"*",
"freq",
")",
"else",
":",
"fact",
"=",
"1",
"# 2. f->t transform",
"tEM",
"=",
"np",
".",
"zeros",
"(",
"(",
"time",
".",
"size",
",",
"off",
".",
"size",
")",
")",
"for",
"i",
"in",
"range",
"(",
"off",
".",
"size",
")",
":",
"out",
"=",
"getattr",
"(",
"transform",
",",
"ft",
")",
"(",
"fEM",
"[",
":",
",",
"i",
"]",
"*",
"fact",
",",
"time",
",",
"freq",
",",
"ftarg",
")",
"tEM",
"[",
":",
",",
"i",
"]",
"+=",
"out",
"[",
"0",
"]",
"conv",
"*=",
"out",
"[",
"1",
"]",
"return",
"tEM",
"*",
"2",
"/",
"np",
".",
"pi",
",",
"conv"
] | 38.275862 | [
0.016666666666666666,
0.025974025974025976,
0,
0.027777777777777776,
0.07792207792207792,
0.034482758620689655,
0,
0.02531645569620253,
0.025974025974025976,
0.03278688524590164,
0,
0.2857142857142857,
0.038461538461538464,
0.025974025974025976,
0.08,
0.08,
0.03333333333333333,
0.05405405405405406,
0.2222222222222222,
0.125,
0,
0.08695652173913043,
0.04878048780487805,
0.06896551724137931,
0.028169014084507043,
0.07407407407407407,
0.09090909090909091,
0,
0.07142857142857142
] |
def task_estimates(channel, states):
"""
Estimate remaining time for all tasks in this channel.
:param channel: txkoji.channel.Channel
:param list states: list of task_states ints, eg [task_states.OPEN]
:returns: deferred that when fired returns a list of
(task, est_remaining) tuples
"""
for state in states:
if state != task_states.OPEN:
raise NotImplementedError('only estimate OPEN tasks')
tasks = yield channel.tasks(state=states)
# Estimate all the unique packages.
packages = set([task.package for task in tasks])
print('checking avg build duration for %i packages:' % len(packages))
packages = list(packages)
durations = yield average_build_durations(channel.connection, packages)
avg_package_durations = dict(zip(packages, durations))
# pprint(avg_package_durations)
# Determine estimates for all our tasks.
results = []
utcnow = datetime.utcnow()
for task in tasks:
avg_duration = avg_package_durations[task.package]
est_complete = task.started + avg_duration
est_remaining = est_complete - utcnow
result = (task, est_remaining)
results.append(result)
defer.returnValue(results) | [
"def",
"task_estimates",
"(",
"channel",
",",
"states",
")",
":",
"for",
"state",
"in",
"states",
":",
"if",
"state",
"!=",
"task_states",
".",
"OPEN",
":",
"raise",
"NotImplementedError",
"(",
"'only estimate OPEN tasks'",
")",
"tasks",
"=",
"yield",
"channel",
".",
"tasks",
"(",
"state",
"=",
"states",
")",
"# Estimate all the unique packages.",
"packages",
"=",
"set",
"(",
"[",
"task",
".",
"package",
"for",
"task",
"in",
"tasks",
"]",
")",
"print",
"(",
"'checking avg build duration for %i packages:'",
"%",
"len",
"(",
"packages",
")",
")",
"packages",
"=",
"list",
"(",
"packages",
")",
"durations",
"=",
"yield",
"average_build_durations",
"(",
"channel",
".",
"connection",
",",
"packages",
")",
"avg_package_durations",
"=",
"dict",
"(",
"zip",
"(",
"packages",
",",
"durations",
")",
")",
"# pprint(avg_package_durations)",
"# Determine estimates for all our tasks.",
"results",
"=",
"[",
"]",
"utcnow",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"for",
"task",
"in",
"tasks",
":",
"avg_duration",
"=",
"avg_package_durations",
"[",
"task",
".",
"package",
"]",
"est_complete",
"=",
"task",
".",
"started",
"+",
"avg_duration",
"est_remaining",
"=",
"est_complete",
"-",
"utcnow",
"result",
"=",
"(",
"task",
",",
"est_remaining",
")",
"results",
".",
"append",
"(",
"result",
")",
"defer",
".",
"returnValue",
"(",
"results",
")"
] | 40.466667 | [
0.027777777777777776,
0.2857142857142857,
0.034482758620689655,
0,
0.07142857142857142,
0.056338028169014086,
0.05357142857142857,
0.07142857142857142,
0.2857142857142857,
0.08333333333333333,
0.05405405405405406,
0.03076923076923077,
0.044444444444444446,
0.05128205128205128,
0.038461538461538464,
0.0273972602739726,
0.06896551724137931,
0.02666666666666667,
0.034482758620689655,
0.05714285714285714,
0.045454545454545456,
0.125,
0.06666666666666667,
0.09090909090909091,
0.034482758620689655,
0.04,
0.044444444444444446,
0.05263157894736842,
0.06666666666666667,
0.06666666666666667
] |
def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="",
who="", add_join=False):
"""
Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
"""
node_type = node[1][consts.Consts.type]
node_classification = nodes_classification[node[0]]
outgoing_flows = node[1].get(consts.Consts.outgoing_flow)
if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \
and node[1][consts.Consts.default] is not None:
default_flow_id = node[1][consts.Consts.default]
else:
default_flow_id = None
if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join:
# If the node is a join, then retract the recursion back to the split.
# In case of activity - return current node. In case of gateway - return outgoing node
# (we are making assumption that join has only one outgoing node)
if node_type == consts.Consts.task or node_type == consts.Consts.subprocess:
return node
else:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return outgoing_node
else:
if node_type == consts.Consts.task:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif node_type == consts.Consts.subprocess:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""})
if BpmnDiagramGraphCsvExport.classification_split in node_classification:
next_node = None
alphabet_suffix_index = 0
for outgoing_flow_id in outgoing_flows:
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
# This will work only up to 26 outgoing flows
suffix = string.ascii_lowercase[alphabet_suffix_index]
next_prefix = prefix + str(order) + suffix
alphabet_suffix_index += 1
# parallel gateway does not uses conditions
if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \
and outgoing_flow[2][consts.Consts.name] is not None:
condition = outgoing_flow[2][consts.Consts.name]
else:
condition = ""
if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]:
export_elements.append(
{"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1),
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif outgoing_flow_id == default_flow_id:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix, "else",
who)
if tmp_next_node is not None:
next_node = tmp_next_node
else:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix,
condition, who)
if tmp_next_node is not None:
next_node = tmp_next_node
if next_node is not None:
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who, add_join=True)
elif len(outgoing_flows) == 1:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who)
else:
return None | [
"def",
"export_element",
"(",
"bpmn_graph",
",",
"export_elements",
",",
"node",
",",
"nodes_classification",
",",
"order",
"=",
"0",
",",
"prefix",
"=",
"\"\"",
",",
"condition",
"=",
"\"\"",
",",
"who",
"=",
"\"\"",
",",
"add_join",
"=",
"False",
")",
":",
"node_type",
"=",
"node",
"[",
"1",
"]",
"[",
"consts",
".",
"Consts",
".",
"type",
"]",
"node_classification",
"=",
"nodes_classification",
"[",
"node",
"[",
"0",
"]",
"]",
"outgoing_flows",
"=",
"node",
"[",
"1",
"]",
".",
"get",
"(",
"consts",
".",
"Consts",
".",
"outgoing_flow",
")",
"if",
"node_type",
"!=",
"consts",
".",
"Consts",
".",
"parallel_gateway",
"and",
"consts",
".",
"Consts",
".",
"default",
"in",
"node",
"[",
"1",
"]",
"and",
"node",
"[",
"1",
"]",
"[",
"consts",
".",
"Consts",
".",
"default",
"]",
"is",
"not",
"None",
":",
"default_flow_id",
"=",
"node",
"[",
"1",
"]",
"[",
"consts",
".",
"Consts",
".",
"default",
"]",
"else",
":",
"default_flow_id",
"=",
"None",
"if",
"BpmnDiagramGraphCsvExport",
".",
"classification_join",
"in",
"node_classification",
"and",
"not",
"add_join",
":",
"# If the node is a join, then retract the recursion back to the split.",
"# In case of activity - return current node. In case of gateway - return outgoing node",
"# (we are making assumption that join has only one outgoing node)",
"if",
"node_type",
"==",
"consts",
".",
"Consts",
".",
"task",
"or",
"node_type",
"==",
"consts",
".",
"Consts",
".",
"subprocess",
":",
"return",
"node",
"else",
":",
"outgoing_flow_id",
"=",
"outgoing_flows",
"[",
"0",
"]",
"outgoing_flow",
"=",
"bpmn_graph",
".",
"get_flow_by_id",
"(",
"outgoing_flow_id",
")",
"outgoing_node",
"=",
"bpmn_graph",
".",
"get_node_by_id",
"(",
"outgoing_flow",
"[",
"2",
"]",
"[",
"consts",
".",
"Consts",
".",
"target_ref",
"]",
")",
"return",
"outgoing_node",
"else",
":",
"if",
"node_type",
"==",
"consts",
".",
"Consts",
".",
"task",
":",
"export_elements",
".",
"append",
"(",
"{",
"\"Order\"",
":",
"prefix",
"+",
"str",
"(",
"order",
")",
",",
"\"Activity\"",
":",
"node",
"[",
"1",
"]",
"[",
"consts",
".",
"Consts",
".",
"node_name",
"]",
",",
"\"Condition\"",
":",
"condition",
",",
"\"Who\"",
":",
"who",
",",
"\"Subprocess\"",
":",
"\"\"",
",",
"\"Terminated\"",
":",
"\"\"",
"}",
")",
"elif",
"node_type",
"==",
"consts",
".",
"Consts",
".",
"subprocess",
":",
"export_elements",
".",
"append",
"(",
"{",
"\"Order\"",
":",
"prefix",
"+",
"str",
"(",
"order",
")",
",",
"\"Activity\"",
":",
"node",
"[",
"1",
"]",
"[",
"consts",
".",
"Consts",
".",
"node_name",
"]",
",",
"\"Condition\"",
":",
"condition",
",",
"\"Who\"",
":",
"who",
",",
"\"Subprocess\"",
":",
"\"yes\"",
",",
"\"Terminated\"",
":",
"\"\"",
"}",
")",
"if",
"BpmnDiagramGraphCsvExport",
".",
"classification_split",
"in",
"node_classification",
":",
"next_node",
"=",
"None",
"alphabet_suffix_index",
"=",
"0",
"for",
"outgoing_flow_id",
"in",
"outgoing_flows",
":",
"outgoing_flow",
"=",
"bpmn_graph",
".",
"get_flow_by_id",
"(",
"outgoing_flow_id",
")",
"outgoing_node",
"=",
"bpmn_graph",
".",
"get_node_by_id",
"(",
"outgoing_flow",
"[",
"2",
"]",
"[",
"consts",
".",
"Consts",
".",
"target_ref",
"]",
")",
"# This will work only up to 26 outgoing flows",
"suffix",
"=",
"string",
".",
"ascii_lowercase",
"[",
"alphabet_suffix_index",
"]",
"next_prefix",
"=",
"prefix",
"+",
"str",
"(",
"order",
")",
"+",
"suffix",
"alphabet_suffix_index",
"+=",
"1",
"# parallel gateway does not uses conditions",
"if",
"node_type",
"!=",
"consts",
".",
"Consts",
".",
"parallel_gateway",
"and",
"consts",
".",
"Consts",
".",
"name",
"in",
"outgoing_flow",
"[",
"2",
"]",
"and",
"outgoing_flow",
"[",
"2",
"]",
"[",
"consts",
".",
"Consts",
".",
"name",
"]",
"is",
"not",
"None",
":",
"condition",
"=",
"outgoing_flow",
"[",
"2",
"]",
"[",
"consts",
".",
"Consts",
".",
"name",
"]",
"else",
":",
"condition",
"=",
"\"\"",
"if",
"BpmnDiagramGraphCsvExport",
".",
"classification_join",
"in",
"nodes_classification",
"[",
"outgoing_node",
"[",
"0",
"]",
"]",
":",
"export_elements",
".",
"append",
"(",
"{",
"\"Order\"",
":",
"next_prefix",
"+",
"str",
"(",
"1",
")",
",",
"\"Activity\"",
":",
"\"goto \"",
"+",
"prefix",
"+",
"str",
"(",
"order",
"+",
"1",
")",
",",
"\"Condition\"",
":",
"condition",
",",
"\"Who\"",
":",
"who",
",",
"\"Subprocess\"",
":",
"\"\"",
",",
"\"Terminated\"",
":",
"\"\"",
"}",
")",
"elif",
"outgoing_flow_id",
"==",
"default_flow_id",
":",
"tmp_next_node",
"=",
"BpmnDiagramGraphCsvExport",
".",
"export_node",
"(",
"bpmn_graph",
",",
"export_elements",
",",
"outgoing_node",
",",
"nodes_classification",
",",
"1",
",",
"next_prefix",
",",
"\"else\"",
",",
"who",
")",
"if",
"tmp_next_node",
"is",
"not",
"None",
":",
"next_node",
"=",
"tmp_next_node",
"else",
":",
"tmp_next_node",
"=",
"BpmnDiagramGraphCsvExport",
".",
"export_node",
"(",
"bpmn_graph",
",",
"export_elements",
",",
"outgoing_node",
",",
"nodes_classification",
",",
"1",
",",
"next_prefix",
",",
"condition",
",",
"who",
")",
"if",
"tmp_next_node",
"is",
"not",
"None",
":",
"next_node",
"=",
"tmp_next_node",
"if",
"next_node",
"is",
"not",
"None",
":",
"return",
"BpmnDiagramGraphCsvExport",
".",
"export_node",
"(",
"bpmn_graph",
",",
"export_elements",
",",
"next_node",
",",
"nodes_classification",
",",
"order",
"=",
"(",
"order",
"+",
"1",
")",
",",
"prefix",
"=",
"prefix",
",",
"who",
"=",
"who",
",",
"add_join",
"=",
"True",
")",
"elif",
"len",
"(",
"outgoing_flows",
")",
"==",
"1",
":",
"outgoing_flow_id",
"=",
"outgoing_flows",
"[",
"0",
"]",
"outgoing_flow",
"=",
"bpmn_graph",
".",
"get_flow_by_id",
"(",
"outgoing_flow_id",
")",
"outgoing_node",
"=",
"bpmn_graph",
".",
"get_node_by_id",
"(",
"outgoing_flow",
"[",
"2",
"]",
"[",
"consts",
".",
"Consts",
".",
"target_ref",
"]",
")",
"return",
"BpmnDiagramGraphCsvExport",
".",
"export_node",
"(",
"bpmn_graph",
",",
"export_elements",
",",
"outgoing_node",
",",
"nodes_classification",
",",
"order",
"=",
"(",
"order",
"+",
"1",
")",
",",
"prefix",
"=",
"prefix",
",",
"who",
"=",
"who",
")",
"else",
":",
"return",
"None"
] | 62.833333 | [
0.027522935779816515,
0.1276595744680851,
0.18181818181818182,
0.04938271604938271,
0,
0.046153846153846156,
0.034782608695652174,
0.05660377358490566,
0.07142857142857142,
0.03508771929824561,
0.05454545454545454,
0.03418803418803419,
0.12,
0.047619047619047616,
0.05263157894736842,
0.041237113402061855,
0.046511627906976744,
0.18181818181818182,
0.0425531914893617,
0.03389830508474576,
0,
0.03076923076923077,
0.03225806451612903,
0.031746031746031744,
0.03333333333333333,
0.15384615384615385,
0.058823529411764705,
0,
0.030927835051546393,
0.036585365853658534,
0.030612244897959183,
0.025974025974025976,
0.03409090909090909,
0.07407407407407407,
0.11764705882352941,
0.038461538461538464,
0.02666666666666667,
0.0297029702970297,
0.05555555555555555,
0.15384615384615385,
0.0425531914893617,
0.034782608695652174,
0.03571428571428571,
0.03636363636363636,
0.034782608695652174,
0.034782608695652174,
0,
0.037037037037037035,
0.07142857142857142,
0.05405405405405406,
0.0392156862745098,
0.02666666666666667,
0.0297029702970297,
0,
0.03278688524590164,
0.02857142857142857,
0.034482758620689655,
0.047619047619047616,
0.03389830508474576,
0.028037383177570093,
0.025974025974025976,
0.029411764705882353,
0.09523809523809523,
0.058823529411764705,
0,
0.028037383177570093,
0.06976744186046512,
0.0392156862745098,
0.05154639175257732,
0.03508771929824561,
0.03418803418803419,
0.03361344537815126,
0.05128205128205128,
0.04081632653061224,
0.04081632653061224,
0.09523809523809523,
0.03418803418803419,
0.036036036036036036,
0.056179775280898875,
0.04081632653061224,
0.04081632653061224,
0,
0.05405405405405406,
0.04,
0.05172413793103448,
0.08333333333333333,
0,
0.05263157894736842,
0.041666666666666664,
0.028169014084507043,
0.030927835051546393,
0.04,
0.05357142857142857,
0.07692307692307693,
0.15384615384615385,
0.08695652173913043
] |
def get_input(problem):
"""" Returns the specified problem answer in the form
problem: problem id
Returns string, or bytes if a file is loaded
"""
input_data = load_input()
pbsplit = problem.split(":")
problem_input = input_data['input'][pbsplit[0]]
if isinstance(problem_input, dict) and "filename" in problem_input and "value" in problem_input:
if len(pbsplit) > 1 and pbsplit[1] == 'filename':
return problem_input["filename"]
else:
return open(problem_input["value"], 'rb').read()
else:
return problem_input | [
"def",
"get_input",
"(",
"problem",
")",
":",
"input_data",
"=",
"load_input",
"(",
")",
"pbsplit",
"=",
"problem",
".",
"split",
"(",
"\":\"",
")",
"problem_input",
"=",
"input_data",
"[",
"'input'",
"]",
"[",
"pbsplit",
"[",
"0",
"]",
"]",
"if",
"isinstance",
"(",
"problem_input",
",",
"dict",
")",
"and",
"\"filename\"",
"in",
"problem_input",
"and",
"\"value\"",
"in",
"problem_input",
":",
"if",
"len",
"(",
"pbsplit",
")",
">",
"1",
"and",
"pbsplit",
"[",
"1",
"]",
"==",
"'filename'",
":",
"return",
"problem_input",
"[",
"\"filename\"",
"]",
"else",
":",
"return",
"open",
"(",
"problem_input",
"[",
"\"value\"",
"]",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"else",
":",
"return",
"problem_input"
] | 39.466667 | [
0.043478260869565216,
0.05172413793103448,
0.10714285714285714,
0.05660377358490566,
0.2857142857142857,
0.06896551724137931,
0.0625,
0.0392156862745098,
0.03,
0.03508771929824561,
0.045454545454545456,
0.15384615384615385,
0.03333333333333333,
0.2222222222222222,
0.07142857142857142
] |
def search_all(self):
'''a "list all" search that doesn't require a query. Here we return to
the user all objects that have custom properties value type set to
container, which is set when the image is pushed.
IMPORTANT: the upload function adds this metadata. For a container to
be found by the client, it must have the properties value with type
as container. It also should have a "uri" in properties to show the
user, otherwise the user will have to query / download based on the id
'''
results = self._list_containers()
matches = []
bot.info("[drive://%s] Containers" %self._base)
rows = []
for i in results:
# Fallback to the image name without the extension
uri = i['name'].replace('.simg','')
# However the properties should include the uri
if 'properties' in i:
if 'uri' in i['properties']:
uri = i['properties']['uri']
rows.append([i['id'],uri])
# Give the user back a uri
i['uri'] = uri
matches.append(i)
bot.custom(prefix=" [drive://%s]" %self._base,
message="\t\t[id]\t[uri]",
color="PURPLE")
bot.table(rows)
return matches | [
"def",
"search_all",
"(",
"self",
")",
":",
"results",
"=",
"self",
".",
"_list_containers",
"(",
")",
"matches",
"=",
"[",
"]",
"bot",
".",
"info",
"(",
"\"[drive://%s] Containers\"",
"%",
"self",
".",
"_base",
")",
"rows",
"=",
"[",
"]",
"for",
"i",
"in",
"results",
":",
"# Fallback to the image name without the extension",
"uri",
"=",
"i",
"[",
"'name'",
"]",
".",
"replace",
"(",
"'.simg'",
",",
"''",
")",
"# However the properties should include the uri",
"if",
"'properties'",
"in",
"i",
":",
"if",
"'uri'",
"in",
"i",
"[",
"'properties'",
"]",
":",
"uri",
"=",
"i",
"[",
"'properties'",
"]",
"[",
"'uri'",
"]",
"rows",
".",
"append",
"(",
"[",
"i",
"[",
"'id'",
"]",
",",
"uri",
"]",
")",
"# Give the user back a uri",
"i",
"[",
"'uri'",
"]",
"=",
"uri",
"matches",
".",
"append",
"(",
"i",
")",
"bot",
".",
"custom",
"(",
"prefix",
"=",
"\" [drive://%s]\"",
"%",
"self",
".",
"_base",
",",
"message",
"=",
"\"\\t\\t[id]\\t[uri]\"",
",",
"color",
"=",
"\"PURPLE\"",
")",
"bot",
".",
"table",
"(",
"rows",
")",
"return",
"matches"
] | 32.078947 | [
0.047619047619047616,
0.02702702702702703,
0.0410958904109589,
0.07017543859649122,
0,
0.039473684210526314,
0.04054054054054054,
0.05333333333333334,
0.03896103896103896,
0.2857142857142857,
2,
0.05405405405405406,
0.125,
0,
0.058823529411764705,
0,
0.15384615384615385,
0.09523809523809523,
0,
0.034482758620689655,
0.06976744186046512,
0,
0.03636363636363636,
0.06896551724137931,
0.05,
0.045454545454545456,
0.08823529411764706,
0,
0.058823529411764705,
0.09090909090909091,
0.08,
0,
0.09433962264150944,
0.11904761904761904,
0.16666666666666666,
0,
0.10526315789473684,
0.1111111111111111
] |
def parents(self, vertex):
"""
Return the list of immediate parents of this vertex.
"""
return [self.tail(edge) for edge in self.in_edges(vertex)] | [
"def",
"parents",
"(",
"self",
",",
"vertex",
")",
":",
"return",
"[",
"self",
".",
"tail",
"(",
"edge",
")",
"for",
"edge",
"in",
"self",
".",
"in_edges",
"(",
"vertex",
")",
"]"
] | 29 | [
0.038461538461538464,
0.18181818181818182,
0.03333333333333333,
0,
0.18181818181818182,
0.030303030303030304
] |
def create_oqhazardlib_source(self, tom, mesh_spacing, use_defaults=False):
"""
Returns an instance of the :class:
`openquake.hazardlib.source.simple_fault.SimpleFaultSource`
:param tom:
Temporal occurrance model
:param float mesh_spacing:
Mesh spacing
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return SimpleFaultSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.upper_depth,
self.lower_depth,
self.fault_trace,
self.dip,
self.rake) | [
"def",
"create_oqhazardlib_source",
"(",
"self",
",",
"tom",
",",
"mesh_spacing",
",",
"use_defaults",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"mfd",
":",
"raise",
"ValueError",
"(",
"\"Cannot write to hazardlib without MFD\"",
")",
"return",
"SimpleFaultSource",
"(",
"self",
".",
"id",
",",
"self",
".",
"name",
",",
"self",
".",
"trt",
",",
"self",
".",
"mfd",
",",
"mesh_spacing",
",",
"conv",
".",
"mag_scale_rel_to_hazardlib",
"(",
"self",
".",
"mag_scale_rel",
",",
"use_defaults",
")",
",",
"conv",
".",
"render_aspect_ratio",
"(",
"self",
".",
"rupt_aspect_ratio",
",",
"use_defaults",
")",
",",
"tom",
",",
"self",
".",
"upper_depth",
",",
"self",
".",
"lower_depth",
",",
"self",
".",
"fault_trace",
",",
"self",
".",
"dip",
",",
"self",
".",
"rake",
")"
] | 32.923077 | [
0.013333333333333334,
0.18181818181818182,
0.09523809523809523,
0.04477611940298507,
0,
0.15789473684210525,
0.07894736842105263,
0.08823529411764706,
0.12,
0.18181818181818182,
0.08333333333333333,
0.028985507246376812,
0.09090909090909091,
0.1,
0.09090909090909091,
0.09523809523809523,
0.09523809523809523,
0.08,
0.02564102564102564,
0.02666666666666667,
0.125,
0.06896551724137931,
0.06896551724137931,
0.06896551724137931,
0.09523809523809523,
0.13636363636363635
] |
def setup(self, config_file=None, aws_config=None, gpg_config=None,
decrypt_gpg=True, decrypt_kms=True):
"""Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
"""
if aws_config is not None:
self.aws_config = aws_config
if gpg_config is not None:
self.gpg_config = gpg_config
if decrypt_kms is not None:
self.decrypt_kms = decrypt_kms
if decrypt_gpg is not None:
self.decrypt_gpg = decrypt_gpg
# Again, load the file last so that it can rely on other properties.
if config_file is not None:
self.config_file = config_file
return self | [
"def",
"setup",
"(",
"self",
",",
"config_file",
"=",
"None",
",",
"aws_config",
"=",
"None",
",",
"gpg_config",
"=",
"None",
",",
"decrypt_gpg",
"=",
"True",
",",
"decrypt_kms",
"=",
"True",
")",
":",
"if",
"aws_config",
"is",
"not",
"None",
":",
"self",
".",
"aws_config",
"=",
"aws_config",
"if",
"gpg_config",
"is",
"not",
"None",
":",
"self",
".",
"gpg_config",
"=",
"gpg_config",
"if",
"decrypt_kms",
"is",
"not",
"None",
":",
"self",
".",
"decrypt_kms",
"=",
"decrypt_kms",
"if",
"decrypt_gpg",
"is",
"not",
"None",
":",
"self",
".",
"decrypt_gpg",
"=",
"decrypt_gpg",
"# Again, load the file last so that it can rely on other properties.",
"if",
"config_file",
"is",
"not",
"None",
":",
"self",
".",
"config_file",
"=",
"config_file",
"return",
"self"
] | 42.068966 | [
0.029850746268656716,
0.12,
0.031746031746031744,
0,
0.07407407407407407,
0.036585365853658534,
0.02857142857142857,
0,
0.07142857142857142,
0.11764705882352941,
0.1724137931034483,
0,
0.02857142857142857,
0.038461538461538464,
0.03389830508474576,
0.02564102564102564,
0.18181818181818182,
0.058823529411764705,
0.05,
0.058823529411764705,
0.05,
0.05714285714285714,
0.047619047619047616,
0.05714285714285714,
0.047619047619047616,
0.02631578947368421,
0.05714285714285714,
0.047619047619047616,
0.10526315789473684
] |
def objectgetter(model, attr_name='pk', field_name='pk'):
"""
Helper that returns a function suitable for use as the ``fn`` argument
to the ``permission_required`` decorator. Internally uses
``get_object_or_404``, so keep in mind that this may raise ``Http404``.
``model`` can be a model class, manager or queryset.
``attr_name`` is the name of the view attribute.
``field_name`` is the model's field name by which the lookup is made, eg.
"id", "slug", etc.
"""
def _getter(request, *view_args, **view_kwargs):
if attr_name not in view_kwargs:
raise ImproperlyConfigured(
'Argument {0} is not available. Given arguments: [{1}]'
.format(attr_name, ', '.join(view_kwargs.keys())))
try:
return get_object_or_404(model, **{field_name: view_kwargs[attr_name]})
except FieldError:
raise ImproperlyConfigured(
'Model {0} has no field named {1}'
.format(model, field_name))
return _getter | [
"def",
"objectgetter",
"(",
"model",
",",
"attr_name",
"=",
"'pk'",
",",
"field_name",
"=",
"'pk'",
")",
":",
"def",
"_getter",
"(",
"request",
",",
"*",
"view_args",
",",
"*",
"*",
"view_kwargs",
")",
":",
"if",
"attr_name",
"not",
"in",
"view_kwargs",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Argument {0} is not available. Given arguments: [{1}]'",
".",
"format",
"(",
"attr_name",
",",
"', '",
".",
"join",
"(",
"view_kwargs",
".",
"keys",
"(",
")",
")",
")",
")",
"try",
":",
"return",
"get_object_or_404",
"(",
"model",
",",
"*",
"*",
"{",
"field_name",
":",
"view_kwargs",
"[",
"attr_name",
"]",
"}",
")",
"except",
"FieldError",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Model {0} has no field named {1}'",
".",
"format",
"(",
"model",
",",
"field_name",
")",
")",
"return",
"_getter"
] | 41.08 | [
0.017543859649122806,
0.2857142857142857,
0.04054054054054054,
0.04918032786885246,
0.04,
0,
0.05357142857142857,
0,
0.057692307692307696,
0,
0.03896103896103896,
0.09090909090909091,
0.2857142857142857,
0.038461538461538464,
0.05,
0.07692307692307693,
0.028169014084507043,
0.045454545454545456,
0.16666666666666666,
0.03614457831325301,
0.07692307692307693,
0.07692307692307693,
0.04,
0.06976744186046512,
0.1111111111111111
] |
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1,
train_global_model=False):
"""Train the Concise model in cross-validation.
Args:
X_feat: See :py:func:`concise.Concise.train`
X_seq: See :py:func:`concise.Concise.train`
y: See :py:func:`concise.Concise.train`
id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`.
n_folds (int): Number of CV-folds to use.
use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated.
n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.
train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).
"""
# TODO: input check - dimensions
self._use_stored_folds = use_stored_folds
self._n_folds = n_folds
self._n_rows = X_feat.shape[0]
# TODO: - fix the get_cv_accuracy
# save:
# - each model
# - each model's performance
# - each model's predictions
# - globally:
# - mean perfomance
# - sd performance
# - predictions
self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds)
cv_obj = {}
if id_vec is None:
id_vec = np.arange(1, self._n_rows + 1)
best_val_acc_epoch_l = []
for fold, train, test in self._kf:
X_feat_train = X_feat[train]
X_seq_train = X_seq[train]
y_train = y[train]
X_feat_test = X_feat[test]
X_seq_test = X_seq[test]
y_test = y[test]
id_vec_test = id_vec[test]
print(fold, "/", n_folds)
# copy the object
dc = copy.deepcopy(self._concise_model)
dc.train(X_feat_train, X_seq_train, y_train,
X_feat_test, X_seq_test, y_test,
n_cores=n_cores
)
dc._test(X_feat_test, X_seq_test, y_test, id_vec_test)
cv_obj[fold] = dc
best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"])
self._cv_model = cv_obj
# additionaly train the global model
if train_global_model:
dc = copy.deepcopy(self._concise_model)
# overwrite n_epochs with the best average number of best epochs
dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean())
print("tranining global model with n_epochs = " + str(dc._param["n_epochs"]))
dc.train(X_feat, X_seq, y,
n_cores=n_cores
)
dc._test(X_feat, X_seq, y, id_vec)
self._concise_global_model = dc | [
"def",
"train",
"(",
"self",
",",
"X_feat",
",",
"X_seq",
",",
"y",
",",
"id_vec",
"=",
"None",
",",
"n_folds",
"=",
"10",
",",
"use_stored_folds",
"=",
"None",
",",
"n_cores",
"=",
"1",
",",
"train_global_model",
"=",
"False",
")",
":",
"# TODO: input check - dimensions",
"self",
".",
"_use_stored_folds",
"=",
"use_stored_folds",
"self",
".",
"_n_folds",
"=",
"n_folds",
"self",
".",
"_n_rows",
"=",
"X_feat",
".",
"shape",
"[",
"0",
"]",
"# TODO: - fix the get_cv_accuracy",
"# save:",
"# - each model",
"# - each model's performance",
"# - each model's predictions",
"# - globally:",
"# - mean perfomance",
"# - sd performance",
"# - predictions",
"self",
".",
"_kf",
"=",
"self",
".",
"_get_folds",
"(",
"self",
".",
"_n_rows",
",",
"self",
".",
"_n_folds",
",",
"self",
".",
"_use_stored_folds",
")",
"cv_obj",
"=",
"{",
"}",
"if",
"id_vec",
"is",
"None",
":",
"id_vec",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"self",
".",
"_n_rows",
"+",
"1",
")",
"best_val_acc_epoch_l",
"=",
"[",
"]",
"for",
"fold",
",",
"train",
",",
"test",
"in",
"self",
".",
"_kf",
":",
"X_feat_train",
"=",
"X_feat",
"[",
"train",
"]",
"X_seq_train",
"=",
"X_seq",
"[",
"train",
"]",
"y_train",
"=",
"y",
"[",
"train",
"]",
"X_feat_test",
"=",
"X_feat",
"[",
"test",
"]",
"X_seq_test",
"=",
"X_seq",
"[",
"test",
"]",
"y_test",
"=",
"y",
"[",
"test",
"]",
"id_vec_test",
"=",
"id_vec",
"[",
"test",
"]",
"print",
"(",
"fold",
",",
"\"/\"",
",",
"n_folds",
")",
"# copy the object",
"dc",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_concise_model",
")",
"dc",
".",
"train",
"(",
"X_feat_train",
",",
"X_seq_train",
",",
"y_train",
",",
"X_feat_test",
",",
"X_seq_test",
",",
"y_test",
",",
"n_cores",
"=",
"n_cores",
")",
"dc",
".",
"_test",
"(",
"X_feat_test",
",",
"X_seq_test",
",",
"y_test",
",",
"id_vec_test",
")",
"cv_obj",
"[",
"fold",
"]",
"=",
"dc",
"best_val_acc_epoch_l",
".",
"append",
"(",
"dc",
".",
"get_accuracy",
"(",
")",
"[",
"\"best_val_acc_epoch\"",
"]",
")",
"self",
".",
"_cv_model",
"=",
"cv_obj",
"# additionaly train the global model",
"if",
"train_global_model",
":",
"dc",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_concise_model",
")",
"# overwrite n_epochs with the best average number of best epochs",
"dc",
".",
"_param",
"[",
"\"n_epochs\"",
"]",
"=",
"int",
"(",
"np",
".",
"array",
"(",
"best_val_acc_epoch_l",
")",
".",
"mean",
"(",
")",
")",
"print",
"(",
"\"tranining global model with n_epochs = \"",
"+",
"str",
"(",
"dc",
".",
"_param",
"[",
"\"n_epochs\"",
"]",
")",
")",
"dc",
".",
"train",
"(",
"X_feat",
",",
"X_seq",
",",
"y",
",",
"n_cores",
"=",
"n_cores",
")",
"dc",
".",
"_test",
"(",
"X_feat",
",",
"X_seq",
",",
"y",
",",
"id_vec",
")",
"self",
".",
"_concise_global_model",
"=",
"dc"
] | 41.013699 | [
0.03260869565217391,
0.125,
0.03636363636363636,
0,
0.15384615384615385,
0.125,
0.12727272727272726,
0.13725490196078433,
0.06153846153846154,
0.05660377358490566,
0.052083333333333336,
0.029850746268656716,
0.05405405405405406,
0.18181818181818182,
0.05,
0.04081632653061224,
0.06451612903225806,
0.05263157894736842,
0,
0.05405405405405406,
0.18181818181818182,
0.1111111111111111,
0.0625,
0.0625,
0.11764705882352941,
0.07692307692307693,
0.08,
0.09090909090909091,
0,
0.034482758620689655,
0,
0.10526315789473684,
0,
0.07692307692307693,
0.0392156862745098,
0,
0.06060606060606061,
0.047619047619047616,
0.05,
0.05263157894736842,
0.06666666666666667,
0,
0.05263157894736842,
0.05555555555555555,
0.07142857142857142,
0.05263157894736842,
0.05405405405405406,
0,
0.06896551724137931,
0.0392156862745098,
0.05357142857142857,
0.05660377358490566,
0.1111111111111111,
0.18181818181818182,
0,
0.030303030303030304,
0.06896551724137931,
0.0375,
0.06451612903225806,
0,
0.045454545454545456,
0.06666666666666667,
0.0392156862745098,
0,
0.02631578947368421,
0.02564102564102564,
0.033707865168539325,
0,
0.07894736842105263,
0.1111111111111111,
0.18181818181818182,
0.043478260869565216,
0.046511627906976744
] |
def launch(program, sock, stderr=True, cwd=None, env=None):
"""
A static method for launching a process that is connected to a given
socket. Same rules from the Process constructor apply.
"""
if stderr is True:
err = sock # redirect to socket
elif stderr is False:
err = open(os.devnull, 'wb') # hide
elif stderr is None:
err = None # redirect to console
p = subprocess.Popen(program,
shell=type(program) not in (list, tuple),
stdin=sock, stdout=sock, stderr=err,
cwd=cwd, env=env,
close_fds=True)
sock.close()
return p | [
"def",
"launch",
"(",
"program",
",",
"sock",
",",
"stderr",
"=",
"True",
",",
"cwd",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"if",
"stderr",
"is",
"True",
":",
"err",
"=",
"sock",
"# redirect to socket",
"elif",
"stderr",
"is",
"False",
":",
"err",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'wb'",
")",
"# hide",
"elif",
"stderr",
"is",
"None",
":",
"err",
"=",
"None",
"# redirect to console",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"program",
",",
"shell",
"=",
"type",
"(",
"program",
")",
"not",
"in",
"(",
"list",
",",
"tuple",
")",
",",
"stdin",
"=",
"sock",
",",
"stdout",
"=",
"sock",
",",
"stderr",
"=",
"err",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"env",
",",
"close_fds",
"=",
"True",
")",
"sock",
".",
"close",
"(",
")",
"return",
"p"
] | 34.1 | [
0.01694915254237288,
0.18181818181818182,
0.02631578947368421,
0.03225806451612903,
0.18181818181818182,
0.07692307692307693,
0.06976744186046512,
0.06896551724137931,
0.06382978723404255,
0.07142857142857142,
0.06818181818181818,
0,
0.08108108108108109,
0.05263157894736842,
0.09615384615384616,
0.12121212121212122,
0.12903225806451613,
0,
0.1,
0.125
] |
def _get_normal_peptides(job, mhc_df, iars, peplen):
"""
Get the corresponding normal peptides for the tumor peptides that have already been subjected to
mhc:peptide binding prediction.
:param pandas.DataFrame mhc_df: The dataframe of mhc:peptide binding results
:param dict iars: The dict of lists of tumor and normal peptide iar sequences
:param str peplen: Length of the peptides to consider.
:return: normal peptides and the updated results containing the normal peptides
:rtype: tuple(pandas.DataFrame, list)
"""
peplen = int(peplen)
normal_peptides = []
for pred in mhc_df.itertuples():
containing_iars = [i for i, sl in iars.items() if pred.pept in sl[0]]
assert len(containing_iars) != 0, "No IARS contained the peptide"
if len(iars[containing_iars[0]]) == 1:
# This is a fusion and has no corresponding normal
normal_peptides.append('N' * peplen)
else:
# If there are multiple IARs, they all or none of them have to have a corresponding
# normal.
if len(set([len(y) for x, y in iars.items() if x in containing_iars])) != 1:
job.fileStore.logToMaster('Some IARS were found to contain the substring but were'
'inconsistent with the presence of a corresponding '
'normal.')
normal_peptides.append('N' * peplen)
else:
tum, norm = iars[containing_iars[0]]
pos = tum.find(pred.pept)
temp_normal_pept = norm[pos:pos + peplen]
ndiff = pept_diff(pred.pept, temp_normal_pept)
assert ndiff != 0
if ndiff == 1:
normal_peptides.append(norm[pos:pos + peplen])
else:
if len(tum) == len(norm):
# Too (2+) many single nucleotide changes to warrant having a normal
# counterpart. This might be an artifact
normal_peptides.append('N' * peplen)
else:
# There is an indel in play. The difference cannot be in the last AA as that
# would have come out properly in the first case. There is a possibility
# that the indel was in the first AA causing a shift. We can handle that by
# looking at the suffix.
pos = norm.find(pred.pept[1:])
if pos != -1:
# The suffix was found,
normal_peptides.append(norm[pos-1:pos + peplen])
else:
# The indel was too large to warrant having a normal counterpart
normal_peptides.append('N' * peplen)
mhc_df['normal_pept'] = normal_peptides
return mhc_df, normal_peptides | [
"def",
"_get_normal_peptides",
"(",
"job",
",",
"mhc_df",
",",
"iars",
",",
"peplen",
")",
":",
"peplen",
"=",
"int",
"(",
"peplen",
")",
"normal_peptides",
"=",
"[",
"]",
"for",
"pred",
"in",
"mhc_df",
".",
"itertuples",
"(",
")",
":",
"containing_iars",
"=",
"[",
"i",
"for",
"i",
",",
"sl",
"in",
"iars",
".",
"items",
"(",
")",
"if",
"pred",
".",
"pept",
"in",
"sl",
"[",
"0",
"]",
"]",
"assert",
"len",
"(",
"containing_iars",
")",
"!=",
"0",
",",
"\"No IARS contained the peptide\"",
"if",
"len",
"(",
"iars",
"[",
"containing_iars",
"[",
"0",
"]",
"]",
")",
"==",
"1",
":",
"# This is a fusion and has no corresponding normal",
"normal_peptides",
".",
"append",
"(",
"'N'",
"*",
"peplen",
")",
"else",
":",
"# If there are multiple IARs, they all or none of them have to have a corresponding",
"# normal.",
"if",
"len",
"(",
"set",
"(",
"[",
"len",
"(",
"y",
")",
"for",
"x",
",",
"y",
"in",
"iars",
".",
"items",
"(",
")",
"if",
"x",
"in",
"containing_iars",
"]",
")",
")",
"!=",
"1",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Some IARS were found to contain the substring but were'",
"'inconsistent with the presence of a corresponding '",
"'normal.'",
")",
"normal_peptides",
".",
"append",
"(",
"'N'",
"*",
"peplen",
")",
"else",
":",
"tum",
",",
"norm",
"=",
"iars",
"[",
"containing_iars",
"[",
"0",
"]",
"]",
"pos",
"=",
"tum",
".",
"find",
"(",
"pred",
".",
"pept",
")",
"temp_normal_pept",
"=",
"norm",
"[",
"pos",
":",
"pos",
"+",
"peplen",
"]",
"ndiff",
"=",
"pept_diff",
"(",
"pred",
".",
"pept",
",",
"temp_normal_pept",
")",
"assert",
"ndiff",
"!=",
"0",
"if",
"ndiff",
"==",
"1",
":",
"normal_peptides",
".",
"append",
"(",
"norm",
"[",
"pos",
":",
"pos",
"+",
"peplen",
"]",
")",
"else",
":",
"if",
"len",
"(",
"tum",
")",
"==",
"len",
"(",
"norm",
")",
":",
"# Too (2+) many single nucleotide changes to warrant having a normal",
"# counterpart. This might be an artifact",
"normal_peptides",
".",
"append",
"(",
"'N'",
"*",
"peplen",
")",
"else",
":",
"# There is an indel in play. The difference cannot be in the last AA as that",
"# would have come out properly in the first case. There is a possibility",
"# that the indel was in the first AA causing a shift. We can handle that by",
"# looking at the suffix.",
"pos",
"=",
"norm",
".",
"find",
"(",
"pred",
".",
"pept",
"[",
"1",
":",
"]",
")",
"if",
"pos",
"!=",
"-",
"1",
":",
"# The suffix was found,",
"normal_peptides",
".",
"append",
"(",
"norm",
"[",
"pos",
"-",
"1",
":",
"pos",
"+",
"peplen",
"]",
")",
"else",
":",
"# The indel was too large to warrant having a normal counterpart",
"normal_peptides",
".",
"append",
"(",
"'N'",
"*",
"peplen",
")",
"mhc_df",
"[",
"'normal_pept'",
"]",
"=",
"normal_peptides",
"return",
"mhc_df",
",",
"normal_peptides"
] | 53.363636 | [
0.019230769230769232,
0.2857142857142857,
0.03,
0.08571428571428572,
0,
0.0625,
0.04938271604938271,
0.05172413793103448,
0.04819277108433735,
0.07317073170731707,
0.2857142857142857,
0.08333333333333333,
0.08333333333333333,
0.05555555555555555,
0.025974025974025976,
0.0273972602739726,
0.043478260869565216,
0.03225806451612903,
0.041666666666666664,
0.15384615384615385,
0.031578947368421054,
0.09523809523809523,
0.03409090909090909,
0.04081632653061224,
0.0425531914893617,
0.07692307692307693,
0.038461538461538464,
0.11764705882352941,
0.038461538461538464,
0.04878048780487805,
0.03508771929824561,
0.03225806451612903,
0.06060606060606061,
0.06666666666666667,
0.030303030303030304,
0.09523809523809523,
0.044444444444444446,
0.03260869565217391,
0.03125,
0.03333333333333333,
0.08,
0.03,
0.03125,
0.030303030303030304,
0.041666666666666664,
0.037037037037037035,
0.05405405405405406,
0.0392156862745098,
0.02631578947368421,
0.06896551724137931,
0.03260869565217391,
0.03125,
0,
0.046511627906976744,
0.058823529411764705
] |
def get_index_from_alias(alias_name, index_client=None):
"""Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
"""
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0] | [
"def",
"get_index_from_alias",
"(",
"alias_name",
",",
"index_client",
"=",
"None",
")",
":",
"index_client",
"=",
"index_client",
"or",
"indices_client",
"(",
")",
"if",
"not",
"index_client",
".",
"exists_alias",
"(",
"name",
"=",
"alias_name",
")",
":",
"return",
"None",
"return",
"list",
"(",
"index_client",
".",
"get_alias",
"(",
"name",
"=",
"alias_name",
")",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]"
] | 36.857143 | [
0.017857142857142856,
0.04081632653061224,
0,
0.2222222222222222,
0.07142857142857142,
0.0410958904109589,
0.034482758620689655,
0,
0.0625,
0.2857142857142857,
0.0392156862745098,
0.037037037037037035,
0.10526315789473684,
0.030303030303030304
] |
def header(self):
"""
Return the BAM/SAM header
Returns
-------
generator
Each line of the header
"""
cmd = [self.__samtools, 'view', '-H', self.__bam]
stdout = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
for l in stdout:
yield l.decode('utf-8').strip()
stdout.close() | [
"def",
"header",
"(",
"self",
")",
":",
"cmd",
"=",
"[",
"self",
".",
"__samtools",
",",
"'view'",
",",
"'-H'",
",",
"self",
".",
"__bam",
"]",
"stdout",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"stdout",
"for",
"l",
"in",
"stdout",
":",
"yield",
"l",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"stdout",
".",
"close",
"(",
")"
] | 21.388889 | [
0.058823529411764705,
0.18181818181818182,
0.06060606060606061,
0.25,
0.13333333333333333,
0.13333333333333333,
0.11764705882352941,
0.05714285714285714,
0.18181818181818182,
0.25,
0.03508771929824561,
0,
0.028985507246376812,
0,
0.125,
0.046511627906976744,
0,
0.09090909090909091
] |
def _raise_error_network(option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_network(option, expected)
log.error(msg)
raise AttributeError(msg) | [
"def",
"_raise_error_network",
"(",
"option",
",",
"expected",
")",
":",
"msg",
"=",
"_error_msg_network",
"(",
"option",
",",
"expected",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"AttributeError",
"(",
"msg",
")"
] | 30 | [
0.023255813953488372,
0.2857142857142857,
0.03333333333333333,
0.2857142857142857,
0.043478260869565216,
0.1111111111111111,
0.06896551724137931
] |
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
box_cls, box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, box_cls, box_regression, targets)
else:
return self._forward_test(anchors, box_cls, box_regression) | [
"def",
"forward",
"(",
"self",
",",
"images",
",",
"features",
",",
"targets",
"=",
"None",
")",
":",
"box_cls",
",",
"box_regression",
"=",
"self",
".",
"head",
"(",
"features",
")",
"anchors",
"=",
"self",
".",
"anchor_generator",
"(",
"images",
",",
"features",
")",
"if",
"self",
".",
"training",
":",
"return",
"self",
".",
"_forward_train",
"(",
"anchors",
",",
"box_cls",
",",
"box_regression",
",",
"targets",
")",
"else",
":",
"return",
"self",
".",
"_forward_test",
"(",
"anchors",
",",
"box_cls",
",",
"box_regression",
")"
] | 46.272727 | [
0.02,
0.18181818181818182,
0.1111111111111111,
0.04819277108433735,
0.0379746835443038,
0.02666666666666667,
0.037037037037037035,
0.06976744186046512,
0,
0.125,
0.047619047619047616,
0.09090909090909091,
0.04819277108433735,
0.044444444444444446,
0.18181818181818182,
0.03773584905660377,
0.03508771929824561,
2,
0.08,
0.037037037037037035,
0.15384615384615385,
0.028169014084507043
] |
def apply(self, docs, split=0, clear=True, parallelism=None, progress_bar=True):
"""Run the CandidateExtractor.
:Example: To extract candidates from a set of training documents using
4 cores::
candidate_extractor.apply(train_docs, split=0, parallelism=4)
:param docs: Set of documents to extract from.
:param split: Which split to assign the extracted Candidates to.
:type split: int
:param clear: Whether or not to clear the existing Candidates
beforehand.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the
CandidateExtractor if it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
super(CandidateExtractor, self).apply(
docs,
split=split,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
) | [
"def",
"apply",
"(",
"self",
",",
"docs",
",",
"split",
"=",
"0",
",",
"clear",
"=",
"True",
",",
"parallelism",
"=",
"None",
",",
"progress_bar",
"=",
"True",
")",
":",
"super",
"(",
"CandidateExtractor",
",",
"self",
")",
".",
"apply",
"(",
"docs",
",",
"split",
"=",
"split",
",",
"clear",
"=",
"clear",
",",
"parallelism",
"=",
"parallelism",
",",
"progress_bar",
"=",
"progress_bar",
",",
")"
] | 39.413793 | [
0.025,
0.05263157894736842,
0,
0.038461538461538464,
0.14285714285714285,
0,
0.025974025974025976,
0,
0.05555555555555555,
0.041666666666666664,
0.125,
0.043478260869565216,
0.08695652173913043,
0.12,
0.03896103896103896,
0.03076923076923077,
0.04081632653061224,
0.1,
0.04054054054054054,
0.04,
0.09375,
0.18181818181818182,
0.06521739130434782,
0.11764705882352941,
0.125,
0.125,
0.08333333333333333,
0.07894736842105263,
0.3333333333333333
] |
def waitForOSState(rh, userid, desiredState, maxQueries=90, sleepSecs=5):
"""
Wait for the virtual OS to go into the indicated state.
Input:
Request Handle
userid whose state is to be monitored
Desired state, 'up' or 'down', case sensitive
Maximum attempts to wait for desired state before giving up
Sleep duration between waits
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from execCmdThruIUCV if overallRC = 0.
rs - RS returned from execCmdThruIUCV if overallRC = 0.
errno - Errno returned from execCmdThruIUCV if overallRC = 0.
response - Updated with an error message if wait times out.
Note:
"""
rh.printSysLog("Enter vmUtils.waitForOSState, userid: " + userid +
" state: " + desiredState +
" maxWait: " + str(maxQueries) +
" sleepSecs: " + str(sleepSecs))
results = {}
strCmd = "echo 'ping'"
stateFnd = False
for i in range(1, maxQueries + 1):
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
if desiredState == 'up':
stateFnd = True
break
else:
if desiredState == 'down':
stateFnd = True
break
if i < maxQueries:
time.sleep(sleepSecs)
if stateFnd is True:
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
else:
maxWait = maxQueries * sleepSecs
rh.printLn("ES", msgs.msg['0413'][1] % (modId, userid,
desiredState, maxWait))
results = msgs.msg['0413'][0]
rh.printSysLog("Exit vmUtils.waitForOSState, rc: " +
str(results['overallRC']))
return results | [
"def",
"waitForOSState",
"(",
"rh",
",",
"userid",
",",
"desiredState",
",",
"maxQueries",
"=",
"90",
",",
"sleepSecs",
"=",
"5",
")",
":",
"rh",
".",
"printSysLog",
"(",
"\"Enter vmUtils.waitForOSState, userid: \"",
"+",
"userid",
"+",
"\" state: \"",
"+",
"desiredState",
"+",
"\" maxWait: \"",
"+",
"str",
"(",
"maxQueries",
")",
"+",
"\" sleepSecs: \"",
"+",
"str",
"(",
"sleepSecs",
")",
")",
"results",
"=",
"{",
"}",
"strCmd",
"=",
"\"echo 'ping'\"",
"stateFnd",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"maxQueries",
"+",
"1",
")",
":",
"results",
"=",
"execCmdThruIUCV",
"(",
"rh",
",",
"rh",
".",
"userid",
",",
"strCmd",
")",
"if",
"results",
"[",
"'overallRC'",
"]",
"==",
"0",
":",
"if",
"desiredState",
"==",
"'up'",
":",
"stateFnd",
"=",
"True",
"break",
"else",
":",
"if",
"desiredState",
"==",
"'down'",
":",
"stateFnd",
"=",
"True",
"break",
"if",
"i",
"<",
"maxQueries",
":",
"time",
".",
"sleep",
"(",
"sleepSecs",
")",
"if",
"stateFnd",
"is",
"True",
":",
"results",
"=",
"{",
"'overallRC'",
":",
"0",
",",
"'rc'",
":",
"0",
",",
"'rs'",
":",
"0",
",",
"}",
"else",
":",
"maxWait",
"=",
"maxQueries",
"*",
"sleepSecs",
"rh",
".",
"printLn",
"(",
"\"ES\"",
",",
"msgs",
".",
"msg",
"[",
"'0413'",
"]",
"[",
"1",
"]",
"%",
"(",
"modId",
",",
"userid",
",",
"desiredState",
",",
"maxWait",
")",
")",
"results",
"=",
"msgs",
".",
"msg",
"[",
"'0413'",
"]",
"[",
"0",
"]",
"rh",
".",
"printSysLog",
"(",
"\"Exit vmUtils.waitForOSState, rc: \"",
"+",
"str",
"(",
"results",
"[",
"'overallRC'",
"]",
")",
")",
"return",
"results"
] | 30.790323 | [
0.0136986301369863,
0.2857142857142857,
0.03389830508474576,
0,
0.2,
0.14285714285714285,
0.06818181818181818,
0.057692307692307696,
0.045454545454545456,
0.08571428571428572,
0,
0.18181818181818182,
0.06976744186046512,
0.041666666666666664,
0.05555555555555555,
0.05555555555555555,
0.05333333333333334,
0.05714285714285714,
0,
0.2222222222222222,
0,
0.2857142857142857,
0,
0.04285714285714286,
0.05555555555555555,
0.05084745762711865,
0.06779661016949153,
0,
0.125,
0,
0.07692307692307693,
0.1,
0,
0.05263157894736842,
0.03571428571428571,
0.05405405405405406,
0.05555555555555555,
0.06451612903225806,
0.09523809523809523,
0.15384615384615385,
0.05263157894736842,
0.06451612903225806,
0.09523809523809523,
0,
0.07692307692307693,
0.06060606060606061,
0,
0.08333333333333333,
0.15789473684210525,
0.06451612903225806,
0.08333333333333333,
0.08333333333333333,
0.23076923076923078,
0.2222222222222222,
0.05,
0.04838709677419355,
0.08571428571428572,
0.05405405405405406,
0,
0.05357142857142857,
0.08823529411764706,
0.1111111111111111
] |
def write_to_console(self, message: str):
"""
Writes the specified message to the console stdout without including
it in the notebook display.
"""
if not self._step:
raise ValueError(
'Cannot write to the console stdout on an uninitialized step'
)
interceptor = self._step.report.stdout_interceptor
interceptor.write_source('{}'.format(message)) | [
"def",
"write_to_console",
"(",
"self",
",",
"message",
":",
"str",
")",
":",
"if",
"not",
"self",
".",
"_step",
":",
"raise",
"ValueError",
"(",
"'Cannot write to the console stdout on an uninitialized step'",
")",
"interceptor",
"=",
"self",
".",
"_step",
".",
"report",
".",
"stdout_interceptor",
"interceptor",
".",
"write_source",
"(",
"'{}'",
".",
"format",
"(",
"message",
")",
")"
] | 39.181818 | [
0.024390243902439025,
0.18181818181818182,
0.02631578947368421,
0.05714285714285714,
0.18181818181818182,
0.07692307692307693,
0.10344827586206896,
0.025974025974025976,
0.23076923076923078,
0.034482758620689655,
0.037037037037037035
] |
def LSR_C(value, amount, width):
"""
The ARM LSR_C (logical shift right with carry) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value and carry result
:rtype tuple
"""
assert amount > 0
result = GetNBits(value >> amount, width)
carry = Bit(value >> (amount - 1), 0)
return (result, carry) | [
"def",
"LSR_C",
"(",
"value",
",",
"amount",
",",
"width",
")",
":",
"assert",
"amount",
">",
"0",
"result",
"=",
"GetNBits",
"(",
"value",
">>",
"amount",
",",
"width",
")",
"carry",
"=",
"Bit",
"(",
"value",
">>",
"(",
"amount",
"-",
"1",
")",
",",
"0",
")",
"return",
"(",
"result",
",",
"carry",
")"
] | 30.666667 | [
0.03125,
0.2857142857142857,
0.04918032786885246,
0,
0.09375,
0.07894736842105263,
0.061224489795918366,
0.075,
0.06666666666666667,
0.1875,
0.2857142857142857,
0.09523809523809523,
0.044444444444444446,
0.04878048780487805,
0.07692307692307693
] |
def _extract_translations(self, domains):
"""Extract the translations into `.pot` files"""
for domain, options in domains.items():
# Create the extractor
extractor = babel_frontend.extract_messages()
extractor.initialize_options()
# The temporary location to write the `.pot` file
extractor.output_file = options['pot']
# Add the comments marked with 'tn:' to the translation file for translators to read. Strip the marker.
extractor.add_comments = ['tn:']
extractor.strip_comments = True
# The directory where the sources for this domain are located
extractor.input_paths = [options['source']]
# Pass the metadata to the translator
extractor.msgid_bugs_address = self.manager.args.contact
extractor.copyright_holder = self.manager.args.copyright
extractor.version = self.manager.args.version
extractor.project = self.manager.args.project
extractor.finalize_options()
# Add keywords for lazy translation functions, based on their non-lazy variants
extractor.keywords.update({
'gettext_lazy': extractor.keywords['gettext'],
'ngettext_lazy': extractor.keywords['ngettext'],
'__': extractor.keywords['gettext'], # double underscore for lazy
})
# Do the extraction
_run_babel_command(extractor) | [
"def",
"_extract_translations",
"(",
"self",
",",
"domains",
")",
":",
"for",
"domain",
",",
"options",
"in",
"domains",
".",
"items",
"(",
")",
":",
"# Create the extractor",
"extractor",
"=",
"babel_frontend",
".",
"extract_messages",
"(",
")",
"extractor",
".",
"initialize_options",
"(",
")",
"# The temporary location to write the `.pot` file",
"extractor",
".",
"output_file",
"=",
"options",
"[",
"'pot'",
"]",
"# Add the comments marked with 'tn:' to the translation file for translators to read. Strip the marker.",
"extractor",
".",
"add_comments",
"=",
"[",
"'tn:'",
"]",
"extractor",
".",
"strip_comments",
"=",
"True",
"# The directory where the sources for this domain are located",
"extractor",
".",
"input_paths",
"=",
"[",
"options",
"[",
"'source'",
"]",
"]",
"# Pass the metadata to the translator",
"extractor",
".",
"msgid_bugs_address",
"=",
"self",
".",
"manager",
".",
"args",
".",
"contact",
"extractor",
".",
"copyright_holder",
"=",
"self",
".",
"manager",
".",
"args",
".",
"copyright",
"extractor",
".",
"version",
"=",
"self",
".",
"manager",
".",
"args",
".",
"version",
"extractor",
".",
"project",
"=",
"self",
".",
"manager",
".",
"args",
".",
"project",
"extractor",
".",
"finalize_options",
"(",
")",
"# Add keywords for lazy translation functions, based on their non-lazy variants",
"extractor",
".",
"keywords",
".",
"update",
"(",
"{",
"'gettext_lazy'",
":",
"extractor",
".",
"keywords",
"[",
"'gettext'",
"]",
",",
"'ngettext_lazy'",
":",
"extractor",
".",
"keywords",
"[",
"'ngettext'",
"]",
",",
"'__'",
":",
"extractor",
".",
"keywords",
"[",
"'gettext'",
"]",
",",
"# double underscore for lazy",
"}",
")",
"# Do the extraction",
"_run_babel_command",
"(",
"extractor",
")"
] | 54.851852 | [
0.024390243902439025,
0.03571428571428571,
0.0425531914893617,
0.058823529411764705,
0.03508771929824561,
0.047619047619047616,
0.03278688524590164,
0.04,
0.02608695652173913,
0.045454545454545456,
0.046511627906976744,
0.0273972602739726,
0.03636363636363636,
0.04081632653061224,
0.029411764705882353,
0.029411764705882353,
0.03508771929824561,
0.03508771929824561,
0.05,
0.03296703296703297,
0.07692307692307693,
0.03225806451612903,
0.03125,
0.036585365853658534,
0.21428571428571427,
0.06451612903225806,
0.04878048780487805
] |
def is_long_year(self):
"""
Determines if the instance is a long year
See link `https://en.wikipedia.org/wiki/ISO_8601#Week_dates`_
:rtype: bool
"""
return (
pendulum.datetime(self.year, 12, 28, 0, 0, 0, tz=self.tz).isocalendar()[1]
== 53
) | [
"def",
"is_long_year",
"(",
"self",
")",
":",
"return",
"(",
"pendulum",
".",
"datetime",
"(",
"self",
".",
"year",
",",
"12",
",",
"28",
",",
"0",
",",
"0",
",",
"0",
",",
"tz",
"=",
"self",
".",
"tz",
")",
".",
"isocalendar",
"(",
")",
"[",
"1",
"]",
"==",
"53",
")"
] | 25.916667 | [
0.043478260869565216,
0.18181818181818182,
0.04081632653061224,
0,
0.08695652173913043,
0,
0.15,
0.18181818181818182,
0.1875,
0.03488372093023256,
0.11764705882352941,
0.3333333333333333
] |
def list_all_products(cls, **kwargs):
"""List Products
Return a list of Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_products(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Product]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_products_with_http_info(**kwargs)
else:
(data) = cls._list_all_products_with_http_info(**kwargs)
return data | [
"def",
"list_all_products",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_list_all_products_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_list_all_products_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 36.173913 | [
0.02702702702702703,
0.08333333333333333,
0,
0.06060606060606061,
0.02666666666666667,
0.07017543859649122,
0.07407407407407407,
0.09090909090909091,
0,
0.16,
0.08333333333333333,
0.08823529411764706,
0.08571428571428572,
0.1,
0.05357142857142857,
0.06818181818181818,
0.18181818181818182,
0.0425531914893617,
0.06451612903225806,
0.030303030303030304,
0.15384615384615385,
0.029411764705882353,
0.08695652173913043
] |
def get_area_def(self, dsid):
"""Get the area definition of the band."""
cfac = np.int32(self.mda['cfac'])
lfac = np.int32(self.mda['lfac'])
coff = np.float32(self.mda['coff'])
loff = np.float32(self.mda['loff'])
a = self.mda['projection_parameters']['a']
b = self.mda['projection_parameters']['b']
h = self.mda['projection_parameters']['h']
lon_0 = self.mda['projection_parameters']['SSP_longitude']
nlines = int(self.mda['number_of_lines'])
ncols = int(self.mda['number_of_columns'])
area_extent = self.get_area_extent((nlines, ncols),
(loff, coff),
(lfac, cfac),
h)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'some_area_name',
"On-the-fly area",
'geosmsg',
proj_dict,
ncols,
nlines,
area_extent)
self.area = area
return area | [
"def",
"get_area_def",
"(",
"self",
",",
"dsid",
")",
":",
"cfac",
"=",
"np",
".",
"int32",
"(",
"self",
".",
"mda",
"[",
"'cfac'",
"]",
")",
"lfac",
"=",
"np",
".",
"int32",
"(",
"self",
".",
"mda",
"[",
"'lfac'",
"]",
")",
"coff",
"=",
"np",
".",
"float32",
"(",
"self",
".",
"mda",
"[",
"'coff'",
"]",
")",
"loff",
"=",
"np",
".",
"float32",
"(",
"self",
".",
"mda",
"[",
"'loff'",
"]",
")",
"a",
"=",
"self",
".",
"mda",
"[",
"'projection_parameters'",
"]",
"[",
"'a'",
"]",
"b",
"=",
"self",
".",
"mda",
"[",
"'projection_parameters'",
"]",
"[",
"'b'",
"]",
"h",
"=",
"self",
".",
"mda",
"[",
"'projection_parameters'",
"]",
"[",
"'h'",
"]",
"lon_0",
"=",
"self",
".",
"mda",
"[",
"'projection_parameters'",
"]",
"[",
"'SSP_longitude'",
"]",
"nlines",
"=",
"int",
"(",
"self",
".",
"mda",
"[",
"'number_of_lines'",
"]",
")",
"ncols",
"=",
"int",
"(",
"self",
".",
"mda",
"[",
"'number_of_columns'",
"]",
")",
"area_extent",
"=",
"self",
".",
"get_area_extent",
"(",
"(",
"nlines",
",",
"ncols",
")",
",",
"(",
"loff",
",",
"coff",
")",
",",
"(",
"lfac",
",",
"cfac",
")",
",",
"h",
")",
"proj_dict",
"=",
"{",
"'a'",
":",
"float",
"(",
"a",
")",
",",
"'b'",
":",
"float",
"(",
"b",
")",
",",
"'lon_0'",
":",
"float",
"(",
"lon_0",
")",
",",
"'h'",
":",
"float",
"(",
"h",
")",
",",
"'proj'",
":",
"'geos'",
",",
"'units'",
":",
"'m'",
"}",
"area",
"=",
"geometry",
".",
"AreaDefinition",
"(",
"'some_area_name'",
",",
"\"On-the-fly area\"",
",",
"'geosmsg'",
",",
"proj_dict",
",",
"ncols",
",",
"nlines",
",",
"area_extent",
")",
"self",
".",
"area",
"=",
"area",
"return",
"area"
] | 33.567568 | [
0.034482758620689655,
0.04,
0.04878048780487805,
0.04878048780487805,
0.046511627906976744,
0.046511627906976744,
0,
0.04,
0.04,
0.04,
0.030303030303030304,
0.04081632653061224,
0.04,
0,
0.05084745762711865,
0.05357142857142857,
0.05357142857142857,
0.08888888888888889,
0,
0.08571428571428572,
0.08571428571428572,
0.06976744186046512,
0.08571428571428572,
0.08333333333333333,
0.11764705882352941,
0,
0.07692307692307693,
0.06896551724137931,
0.06666666666666667,
0.09090909090909091,
0.09090909090909091,
0.1111111111111111,
0.10526315789473684,
0.125,
0,
0.08333333333333333,
0.10526315789473684
] |
def lerp_quat(from_quat, to_quat, percent):
"""Return linear interpolation of two quaternions."""
# Check if signs need to be reversed.
if dot_quat(from_quat, to_quat) < 0.0:
to_sign = -1
else:
to_sign = 1
# Simple linear interpolation
percent_from = 1.0 - percent
percent_to = percent
result = Quat(
percent_from * from_quat.x + to_sign * percent_to * to_quat.x,
percent_from * from_quat.y + to_sign * percent_to * to_quat.y,
percent_from * from_quat.z + to_sign * percent_to * to_quat.z,
percent_from * from_quat.w + to_sign * percent_to * to_quat.w)
return result | [
"def",
"lerp_quat",
"(",
"from_quat",
",",
"to_quat",
",",
"percent",
")",
":",
"# Check if signs need to be reversed.",
"if",
"dot_quat",
"(",
"from_quat",
",",
"to_quat",
")",
"<",
"0.0",
":",
"to_sign",
"=",
"-",
"1",
"else",
":",
"to_sign",
"=",
"1",
"# Simple linear interpolation",
"percent_from",
"=",
"1.0",
"-",
"percent",
"percent_to",
"=",
"percent",
"result",
"=",
"Quat",
"(",
"percent_from",
"*",
"from_quat",
".",
"x",
"+",
"to_sign",
"*",
"percent_to",
"*",
"to_quat",
".",
"x",
",",
"percent_from",
"*",
"from_quat",
".",
"y",
"+",
"to_sign",
"*",
"percent_to",
"*",
"to_quat",
".",
"y",
",",
"percent_from",
"*",
"from_quat",
".",
"z",
"+",
"to_sign",
"*",
"percent_to",
"*",
"to_quat",
".",
"z",
",",
"percent_from",
"*",
"from_quat",
".",
"w",
"+",
"to_sign",
"*",
"percent_to",
"*",
"to_quat",
".",
"w",
")",
"return",
"result"
] | 31.75 | [
0.023255813953488372,
0.03508771929824561,
0,
0.04878048780487805,
0.047619047619047616,
0.1,
0.2222222222222222,
0.10526315789473684,
0,
0.06060606060606061,
0.0625,
0.08333333333333333,
0,
0.16666666666666666,
0.02857142857142857,
0.02857142857142857,
0.02857142857142857,
0.04285714285714286,
0,
0.11764705882352941
] |
def _get_per_location_glob(tasks, outputs, regexes):
"""
Builds a glob listing existing output paths.
Esoteric reverse engineering, but worth it given that (compared to an
equivalent contiguousness guarantee by naive complete() checks)
requests to the filesystem are cut by orders of magnitude, and users
don't even have to retrofit existing tasks anyhow.
"""
paths = [o.path for o in outputs]
# naive, because some matches could be confused by numbers earlier
# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00
matches = [r.search(p) for r, p in zip(regexes, paths)]
for m, p, t in zip(matches, paths, tasks):
if m is None:
raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t))
n_groups = len(matches[0].groups())
# the most common position of every group is likely
# to be conclusive hit or miss
positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)]
glob = list(paths[0]) # FIXME sanity check that it's the same for all paths
for start, end in positions:
glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:]
# chop off the last path item
# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)
return ''.join(glob).rsplit('/', 1)[0] | [
"def",
"_get_per_location_glob",
"(",
"tasks",
",",
"outputs",
",",
"regexes",
")",
":",
"paths",
"=",
"[",
"o",
".",
"path",
"for",
"o",
"in",
"outputs",
"]",
"# naive, because some matches could be confused by numbers earlier",
"# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00",
"matches",
"=",
"[",
"r",
".",
"search",
"(",
"p",
")",
"for",
"r",
",",
"p",
"in",
"zip",
"(",
"regexes",
",",
"paths",
")",
"]",
"for",
"m",
",",
"p",
",",
"t",
"in",
"zip",
"(",
"matches",
",",
"paths",
",",
"tasks",
")",
":",
"if",
"m",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Couldn't deduce datehour representation in output path %r of task %s\"",
"%",
"(",
"p",
",",
"t",
")",
")",
"n_groups",
"=",
"len",
"(",
"matches",
"[",
"0",
"]",
".",
"groups",
"(",
")",
")",
"# the most common position of every group is likely",
"# to be conclusive hit or miss",
"positions",
"=",
"[",
"most_common",
"(",
"(",
"m",
".",
"start",
"(",
"i",
")",
",",
"m",
".",
"end",
"(",
"i",
")",
")",
"for",
"m",
"in",
"matches",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n_groups",
"+",
"1",
")",
"]",
"glob",
"=",
"list",
"(",
"paths",
"[",
"0",
"]",
")",
"# FIXME sanity check that it's the same for all paths",
"for",
"start",
",",
"end",
"in",
"positions",
":",
"glob",
"=",
"glob",
"[",
":",
"start",
"]",
"+",
"[",
"'[0-9]'",
"]",
"*",
"(",
"end",
"-",
"start",
")",
"+",
"glob",
"[",
"end",
":",
"]",
"# chop off the last path item",
"# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)",
"return",
"''",
".",
"join",
"(",
"glob",
")",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]"
] | 46.310345 | [
0.019230769230769232,
0.2857142857142857,
0.041666666666666664,
0,
0.0547945205479452,
0.04477611940298507,
0.027777777777777776,
0.037037037037037035,
0.2857142857142857,
0.05405405405405406,
0.02857142857142857,
0.038461538461538464,
0.03389830508474576,
0,
0.043478260869565216,
0.09523809523809523,
0.025423728813559324,
0,
0.05128205128205128,
0.03636363636363636,
0.058823529411764705,
0.02857142857142857,
0,
0.0375,
0.0625,
0.029411764705882353,
0.06060606060606061,
0.027777777777777776,
0.047619047619047616
] |
def read_crn(filename):
"""
Read NOAA USCRN [1]_ [2]_ fixed-width file into pandas dataframe.
Parameters
----------
filename: str
filepath or url to read for the fixed-width file.
Returns
-------
data: Dataframe
A dataframe with DatetimeIndex and all of the variables in the
file.
Notes
-----
CRN files contain 5 minute averages labeled by the interval ending
time. Here, missing data is flagged as NaN, rather than the lowest
possible integer for a field (e.g. -999 or -99). Air temperature in
deg C. Wind speed in m/s at a height of 1.5 m above ground level.
Variables corresponding to standard pvlib variables are renamed,
e.g. `SOLAR_RADIATION` becomes `ghi`. See the
`pvlib.iotools.crn.VARIABLE_MAP` dict for the complete mapping.
References
----------
.. [1] U.S. Climate Reference Network
`https://www.ncdc.noaa.gov/crn/qcdatasets.html
<https://www.ncdc.noaa.gov/crn/qcdatasets.html>`_
.. [2] Diamond, H. J. et. al., 2013: U.S. Climate Reference Network
after one decade of operations: status and assessment. Bull.
Amer. Meteor. Soc., 94, 489-498. :doi:`10.1175/BAMS-D-12-00170.1`
"""
# read in data
data = pd.read_fwf(filename, header=None, names=HEADERS.split(' '),
widths=WIDTHS)
# loop here because dtype kwarg not supported in read_fwf until 0.20
for (col, _dtype) in zip(data.columns, DTYPES):
data[col] = data[col].astype(_dtype)
# set index
# UTC_TIME does not have leading 0s, so must zfill(4) to comply
# with %H%M format
dts = data[['UTC_DATE', 'UTC_TIME']].astype(str)
dtindex = pd.to_datetime(dts['UTC_DATE'] + dts['UTC_TIME'].str.zfill(4),
format='%Y%m%d%H%M', utc=True)
data = data.set_index(dtindex)
try:
# to_datetime(utc=True) does not work in older versions of pandas
data = data.tz_localize('UTC')
except TypeError:
pass
# set nans
for val in [-99, -999, -9999]:
data = data.where(data != val, np.nan)
data = data.rename(columns=VARIABLE_MAP)
return data | [
"def",
"read_crn",
"(",
"filename",
")",
":",
"# read in data",
"data",
"=",
"pd",
".",
"read_fwf",
"(",
"filename",
",",
"header",
"=",
"None",
",",
"names",
"=",
"HEADERS",
".",
"split",
"(",
"' '",
")",
",",
"widths",
"=",
"WIDTHS",
")",
"# loop here because dtype kwarg not supported in read_fwf until 0.20",
"for",
"(",
"col",
",",
"_dtype",
")",
"in",
"zip",
"(",
"data",
".",
"columns",
",",
"DTYPES",
")",
":",
"data",
"[",
"col",
"]",
"=",
"data",
"[",
"col",
"]",
".",
"astype",
"(",
"_dtype",
")",
"# set index",
"# UTC_TIME does not have leading 0s, so must zfill(4) to comply",
"# with %H%M format",
"dts",
"=",
"data",
"[",
"[",
"'UTC_DATE'",
",",
"'UTC_TIME'",
"]",
"]",
".",
"astype",
"(",
"str",
")",
"dtindex",
"=",
"pd",
".",
"to_datetime",
"(",
"dts",
"[",
"'UTC_DATE'",
"]",
"+",
"dts",
"[",
"'UTC_TIME'",
"]",
".",
"str",
".",
"zfill",
"(",
"4",
")",
",",
"format",
"=",
"'%Y%m%d%H%M'",
",",
"utc",
"=",
"True",
")",
"data",
"=",
"data",
".",
"set_index",
"(",
"dtindex",
")",
"try",
":",
"# to_datetime(utc=True) does not work in older versions of pandas",
"data",
"=",
"data",
".",
"tz_localize",
"(",
"'UTC'",
")",
"except",
"TypeError",
":",
"pass",
"# set nans",
"for",
"val",
"in",
"[",
"-",
"99",
",",
"-",
"999",
",",
"-",
"9999",
"]",
":",
"data",
"=",
"data",
".",
"where",
"(",
"data",
"!=",
"val",
",",
"np",
".",
"nan",
")",
"data",
"=",
"data",
".",
"rename",
"(",
"columns",
"=",
"VARIABLE_MAP",
")",
"return",
"data"
] | 33.203125 | [
0.043478260869565216,
0.2857142857142857,
0.057971014492753624,
0,
0.14285714285714285,
0.14285714285714285,
0.11764705882352941,
0.03508771929824561,
0,
0.18181818181818182,
0.18181818181818182,
0.10526315789473684,
0.02857142857142857,
0.15384615384615385,
0,
0.2222222222222222,
0.2222222222222222,
0.02857142857142857,
0.02857142857142857,
0.04225352112676056,
0.028985507246376812,
0,
0.029411764705882353,
0.061224489795918366,
0.04477611940298507,
0,
0.14285714285714285,
0.14285714285714285,
0.04878048780487805,
0.09433962264150944,
0.10714285714285714,
0,
0.028169014084507043,
0.04477611940298507,
0.09722222222222222,
0.2857142857142857,
0,
0.1111111111111111,
0.04225352112676056,
0.13513513513513514,
0.027777777777777776,
0.0392156862745098,
0.045454545454545456,
0,
0.13333333333333333,
0.029850746268656716,
0.09090909090909091,
0.038461538461538464,
0.039473684210526314,
0.1016949152542373,
0.058823529411764705,
0.25,
0.0273972602739726,
0.05263157894736842,
0.09523809523809523,
0.16666666666666666,
0,
0.14285714285714285,
0.058823529411764705,
0.043478260869565216,
0,
0.045454545454545456,
0,
0.13333333333333333
] |
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second) | [
"def",
"_roll_negative_time_fields",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")",
":",
"if",
"second",
"<",
"0",
":",
"minute",
"+=",
"int",
"(",
"second",
"/",
"60.0",
")",
"# Adjust by whole minute in secs",
"minute",
"-=",
"1",
"# Subtract 1 for negative second",
"second",
"%=",
"60",
"# Convert negative second to positive remainder",
"if",
"minute",
"<",
"0",
":",
"hour",
"+=",
"int",
"(",
"minute",
"/",
"60.0",
")",
"# Adjust by whole hour in minutes",
"hour",
"-=",
"1",
"# Subtract 1 for negative minutes",
"minute",
"%=",
"60",
"# Convert negative minute to positive remainder",
"if",
"hour",
"<",
"0",
":",
"day",
"+=",
"int",
"(",
"hour",
"/",
"24.0",
")",
"# Adjust by whole day in hours",
"day",
"-=",
"1",
"# Subtract 1 for negative minutes",
"hour",
"%=",
"24",
"# Convert negative hour to positive remainder",
"if",
"day",
"<",
"0",
":",
"month",
"+=",
"int",
"(",
"day",
"/",
"30.0",
")",
"# Adjust by whole month in days (assume 30)",
"month",
"-=",
"1",
"# Subtract 1 for negative minutes",
"day",
"%=",
"30",
"# Convert negative day to positive remainder",
"if",
"month",
"<",
"0",
":",
"year",
"+=",
"int",
"(",
"month",
"/",
"12.0",
")",
"# Adjust by whole year in months",
"year",
"-=",
"1",
"# Subtract 1 for negative minutes",
"month",
"%=",
"12",
"# Convert negative month to positive remainder",
"return",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")"
] | 49.918919 | [
0.014084507042253521,
0.2857142857142857,
0.02702702702702703,
0.02564102564102564,
0.03125,
0,
0.09333333333333334,
0.15,
0,
0.02666666666666667,
0.0379746835443038,
0.02666666666666667,
0.028985507246376812,
0,
0.02564102564102564,
0.2857142857142857,
0.1111111111111111,
0.02857142857142857,
0.03773584905660377,
0.028985507246376812,
0.1111111111111111,
0.028985507246376812,
0.038461538461538464,
0.028985507246376812,
0.125,
0.031746031746031744,
0.0392156862745098,
0.03076923076923077,
0.13333333333333333,
0.025974025974025976,
0.03773584905660377,
0.031746031746031744,
0.11764705882352941,
0.029850746268656716,
0.038461538461538464,
0.029850746268656716,
0.0392156862745098
] |
def merge(self, cluster_ids, to=None):
"""Merge several clusters to a new cluster.
Parameters
----------
cluster_ids : array-like
List of clusters to merge.
to : integer or None
The id of the new cluster. By default, this is `new_cluster_id()`.
Returns
-------
up : UpdateInfo instance
"""
if not _is_array_like(cluster_ids):
raise ValueError("The first argument should be a list or "
"an array.")
cluster_ids = sorted(cluster_ids)
if not set(cluster_ids) <= set(self.cluster_ids):
raise ValueError("Some clusters do not exist.")
# Find the new cluster number.
if to is None:
to = self.new_cluster_id()
if to < self.new_cluster_id():
raise ValueError("The new cluster numbers should be higher than "
"{0}.".format(self.new_cluster_id()))
# NOTE: we could have called self.assign() here, but we don't.
# We circumvent self.assign() for performance reasons.
# assign() is a relatively costly operation, whereas merging is a much
# cheaper operation.
# Find all spikes in the specified clusters.
spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)
up = self._do_merge(spike_ids, cluster_ids, to)
undo_state = self.emit('request_undo_state', up)
# Add to stack.
self._undo_stack.add((spike_ids, [to], undo_state))
self.emit('cluster', up)
return up | [
"def",
"merge",
"(",
"self",
",",
"cluster_ids",
",",
"to",
"=",
"None",
")",
":",
"if",
"not",
"_is_array_like",
"(",
"cluster_ids",
")",
":",
"raise",
"ValueError",
"(",
"\"The first argument should be a list or \"",
"\"an array.\"",
")",
"cluster_ids",
"=",
"sorted",
"(",
"cluster_ids",
")",
"if",
"not",
"set",
"(",
"cluster_ids",
")",
"<=",
"set",
"(",
"self",
".",
"cluster_ids",
")",
":",
"raise",
"ValueError",
"(",
"\"Some clusters do not exist.\"",
")",
"# Find the new cluster number.",
"if",
"to",
"is",
"None",
":",
"to",
"=",
"self",
".",
"new_cluster_id",
"(",
")",
"if",
"to",
"<",
"self",
".",
"new_cluster_id",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"The new cluster numbers should be higher than \"",
"\"{0}.\"",
".",
"format",
"(",
"self",
".",
"new_cluster_id",
"(",
")",
")",
")",
"# NOTE: we could have called self.assign() here, but we don't.",
"# We circumvent self.assign() for performance reasons.",
"# assign() is a relatively costly operation, whereas merging is a much",
"# cheaper operation.",
"# Find all spikes in the specified clusters.",
"spike_ids",
"=",
"_spikes_in_clusters",
"(",
"self",
".",
"spike_clusters",
",",
"cluster_ids",
")",
"up",
"=",
"self",
".",
"_do_merge",
"(",
"spike_ids",
",",
"cluster_ids",
",",
"to",
")",
"undo_state",
"=",
"self",
".",
"emit",
"(",
"'request_undo_state'",
",",
"up",
")",
"# Add to stack.",
"self",
".",
"_undo_stack",
".",
"add",
"(",
"(",
"spike_ids",
",",
"[",
"to",
"]",
",",
"undo_state",
")",
")",
"self",
".",
"emit",
"(",
"'cluster'",
",",
"up",
")",
"return",
"up"
] | 32.020408 | [
0.02631578947368421,
0.0392156862745098,
0,
0.1111111111111111,
0.1111111111111111,
0,
0.09375,
0.05263157894736842,
0.10714285714285714,
0.038461538461538464,
0,
0.13333333333333333,
0.13333333333333333,
0,
0.09375,
0,
0.18181818181818182,
0,
0.046511627906976744,
0.04285714285714286,
0.0975609756097561,
0,
0.04878048780487805,
0.03508771929824561,
0.03389830508474576,
0,
0.05263157894736842,
0.09090909090909091,
0.05263157894736842,
0.05263157894736842,
0.03896103896103896,
0.06060606060606061,
0,
0.02857142857142857,
0.03225806451612903,
0.02564102564102564,
0.07142857142857142,
0,
0.038461538461538464,
0.0273972602739726,
0,
0.03636363636363636,
0.03571428571428571,
0,
0.08695652173913043,
0.03389830508474576,
0,
0.0625,
0.11764705882352941
] |
def mark_experimental(fn):
# type: (FunctionType) -> FunctionType
""" Mark function as experimental.
Args:
fn (FunctionType):
The command function to decorate.
"""
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper | [
"def",
"mark_experimental",
"(",
"fn",
")",
":",
"# type: (FunctionType) -> FunctionType",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# pylint: disable=missing-docstring",
"from",
"peltak",
".",
"core",
"import",
"shell",
"if",
"shell",
".",
"is_tty",
":",
"warnings",
".",
"warn",
"(",
"\"This command is has experimental status. The \"",
"\"interface is not yet stable and might change \"",
"\"without notice within with a patch version update. \"",
"\"Use at your own risk\"",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"wrapper"
] | 32.85 | [
0.038461538461538464,
0.047619047619047616,
0.05263157894736842,
0,
0.2222222222222222,
0.11538461538461539,
0.044444444444444446,
0.2857142857142857,
0.14285714285714285,
0.029850746268656716,
0.05405405405405406,
0,
0.08333333333333333,
0.0410958904109589,
0.0410958904109589,
0.0379746835443038,
0.08163265306122448,
0.06666666666666667,
0,
0.1111111111111111
] |
def delete_record(self, identifier=None, rtype=None, name=None, content=None, **kwargs):
"""
Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content.
"""
if not rtype and kwargs.get('type'):
warnings.warn('Parameter "type" is deprecated, use "rtype" instead.',
DeprecationWarning)
rtype = kwargs.get('type')
return self._delete_record(identifier=identifier, rtype=rtype, name=name, content=content) | [
"def",
"delete_record",
"(",
"self",
",",
"identifier",
"=",
"None",
",",
"rtype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"content",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"rtype",
"and",
"kwargs",
".",
"get",
"(",
"'type'",
")",
":",
"warnings",
".",
"warn",
"(",
"'Parameter \"type\" is deprecated, use \"rtype\" instead.'",
",",
"DeprecationWarning",
")",
"rtype",
"=",
"kwargs",
".",
"get",
"(",
"'type'",
")",
"return",
"self",
".",
"_delete_record",
"(",
"identifier",
"=",
"identifier",
",",
"rtype",
"=",
"rtype",
",",
"name",
"=",
"name",
",",
"content",
"=",
"content",
")"
] | 49.416667 | [
0.022727272727272728,
0.18181818181818182,
0.058823529411764705,
0.044444444444444446,
0.030612244897959183,
0.18181818181818182,
0.045454545454545456,
0.04938271604938271,
0.08888888888888889,
0.05263157894736842,
0,
0.030612244897959183
] |
def checkIfRemoteIsNewer(self, localfile):
"""
Need to figure out how biothings records releases,
for now if the file exists we will assume it is
a fully downloaded cache
:param localfile: str file path
:return: boolean True if remote file is newer else False
"""
is_remote_newer = False
if localfile.exists() and localfile.stat().st_size > 0:
LOG.info("File exists locally, using cache")
else:
is_remote_newer = True
LOG.info("No cache file, fetching entries")
return is_remote_newer | [
"def",
"checkIfRemoteIsNewer",
"(",
"self",
",",
"localfile",
")",
":",
"is_remote_newer",
"=",
"False",
"if",
"localfile",
".",
"exists",
"(",
")",
"and",
"localfile",
".",
"stat",
"(",
")",
".",
"st_size",
">",
"0",
":",
"LOG",
".",
"info",
"(",
"\"File exists locally, using cache\"",
")",
"else",
":",
"is_remote_newer",
"=",
"True",
"LOG",
".",
"info",
"(",
"\"No cache file, fetching entries\"",
")",
"return",
"is_remote_newer"
] | 39.6 | [
0.023809523809523808,
0.18181818181818182,
0.034482758620689655,
0.03636363636363636,
0.0625,
0.07692307692307693,
0.046875,
0.18181818181818182,
0.06451612903225806,
0.031746031746031744,
0.03571428571428571,
0.15384615384615385,
0.058823529411764705,
0.03636363636363636,
0.06666666666666667
] |
def locate(self, store=current_store):
"""Gets the URL of the image from the ``store``.
:param store: the storage which contains the image.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the url of the image
:rtype: :class:`str`
"""
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
return store.locate(self) | [
"def",
"locate",
"(",
"self",
",",
"store",
"=",
"current_store",
")",
":",
"if",
"not",
"isinstance",
"(",
"store",
",",
"Store",
")",
":",
"raise",
"TypeError",
"(",
"'store must be an instance of '",
"'sqlalchemy_imageattach.store.Store, not '",
"+",
"repr",
"(",
"store",
")",
")",
"return",
"store",
".",
"locate",
"(",
"self",
")"
] | 40.375 | [
0.02631578947368421,
0.03571428571428571,
0,
0.05084745762711865,
0.08,
0.09375,
0.1076923076923077,
0.07894736842105263,
0.25,
0,
0.18181818181818182,
0.05,
0.05084745762711865,
0.027777777777777776,
0.075,
0.06060606060606061
] |
def Introspect(self, object_path, connection):
'''Return XML description of this object's interfaces, methods and signals.
This wraps dbus-python's Introspect() method to include the dynamic
methods and properties.
'''
# temporarily add our dynamic methods
cls = self.__class__.__module__ + '.' + self.__class__.__name__
orig_interfaces = self._dbus_class_table[cls]
mock_interfaces = orig_interfaces.copy()
for interface, methods in self.methods.items():
for method in methods:
mock_interfaces.setdefault(interface, {})[method] = self.methods[interface][method][3]
self._dbus_class_table[cls] = mock_interfaces
xml = dbus.service.Object.Introspect(self, object_path, connection)
tree = ElementTree.fromstring(xml)
for name in self.props:
# We might have properties for new interfaces we don't know about
# yet. Try to find an existing <interface> node named after our
# interface to append to, and create one if we can't.
interface = tree.find(".//interface[@name='%s']" % name)
if interface is None:
interface = ElementTree.Element("interface", {"name": name})
tree.append(interface)
for prop, val in self.props[name].items():
if val is None:
# can't guess type from None, skip
continue
elem = ElementTree.Element("property", {
"name": prop,
# We don't store the signature anywhere, so guess it.
"type": dbus.lowlevel.Message.guess_signature(val),
"access": "readwrite"})
interface.append(elem)
xml = ElementTree.tostring(tree, encoding='utf8', method='xml').decode('utf8')
# restore original class table
self._dbus_class_table[cls] = orig_interfaces
return xml | [
"def",
"Introspect",
"(",
"self",
",",
"object_path",
",",
"connection",
")",
":",
"# temporarily add our dynamic methods",
"cls",
"=",
"self",
".",
"__class__",
".",
"__module__",
"+",
"'.'",
"+",
"self",
".",
"__class__",
".",
"__name__",
"orig_interfaces",
"=",
"self",
".",
"_dbus_class_table",
"[",
"cls",
"]",
"mock_interfaces",
"=",
"orig_interfaces",
".",
"copy",
"(",
")",
"for",
"interface",
",",
"methods",
"in",
"self",
".",
"methods",
".",
"items",
"(",
")",
":",
"for",
"method",
"in",
"methods",
":",
"mock_interfaces",
".",
"setdefault",
"(",
"interface",
",",
"{",
"}",
")",
"[",
"method",
"]",
"=",
"self",
".",
"methods",
"[",
"interface",
"]",
"[",
"method",
"]",
"[",
"3",
"]",
"self",
".",
"_dbus_class_table",
"[",
"cls",
"]",
"=",
"mock_interfaces",
"xml",
"=",
"dbus",
".",
"service",
".",
"Object",
".",
"Introspect",
"(",
"self",
",",
"object_path",
",",
"connection",
")",
"tree",
"=",
"ElementTree",
".",
"fromstring",
"(",
"xml",
")",
"for",
"name",
"in",
"self",
".",
"props",
":",
"# We might have properties for new interfaces we don't know about",
"# yet. Try to find an existing <interface> node named after our",
"# interface to append to, and create one if we can't.",
"interface",
"=",
"tree",
".",
"find",
"(",
"\".//interface[@name='%s']\"",
"%",
"name",
")",
"if",
"interface",
"is",
"None",
":",
"interface",
"=",
"ElementTree",
".",
"Element",
"(",
"\"interface\"",
",",
"{",
"\"name\"",
":",
"name",
"}",
")",
"tree",
".",
"append",
"(",
"interface",
")",
"for",
"prop",
",",
"val",
"in",
"self",
".",
"props",
"[",
"name",
"]",
".",
"items",
"(",
")",
":",
"if",
"val",
"is",
"None",
":",
"# can't guess type from None, skip",
"continue",
"elem",
"=",
"ElementTree",
".",
"Element",
"(",
"\"property\"",
",",
"{",
"\"name\"",
":",
"prop",
",",
"# We don't store the signature anywhere, so guess it.",
"\"type\"",
":",
"dbus",
".",
"lowlevel",
".",
"Message",
".",
"guess_signature",
"(",
"val",
")",
",",
"\"access\"",
":",
"\"readwrite\"",
"}",
")",
"interface",
".",
"append",
"(",
"elem",
")",
"xml",
"=",
"ElementTree",
".",
"tostring",
"(",
"tree",
",",
"encoding",
"=",
"'utf8'",
",",
"method",
"=",
"'xml'",
")",
".",
"decode",
"(",
"'utf8'",
")",
"# restore original class table",
"self",
".",
"_dbus_class_table",
"[",
"cls",
"]",
"=",
"orig_interfaces",
"return",
"xml"
] | 41.765957 | [
0.021739130434782608,
0.03614457831325301,
0,
0.02666666666666667,
0.06451612903225806,
0.18181818181818182,
0.044444444444444446,
0.028169014084507043,
0.03773584905660377,
0,
0.041666666666666664,
0.03636363636363636,
0.058823529411764705,
0.029411764705882353,
0.03773584905660377,
0,
0.02666666666666667,
0,
0.047619047619047616,
0,
0.06451612903225806,
0.025974025974025976,
0.02666666666666667,
0.03076923076923077,
0.029411764705882353,
0.06060606060606061,
0.02631578947368421,
0.05263157894736842,
0,
0.037037037037037035,
0.06451612903225806,
0.037037037037037035,
0.07142857142857142,
0.05357142857142857,
0.06060606060606061,
0.0273972602739726,
0.028169014084507043,
0.06976744186046512,
0,
0.05263157894736842,
0,
0.03488372093023256,
0,
0.05263157894736842,
0.03773584905660377,
0,
0.1111111111111111
] |
def finalize_content(self):
""" Finalize the additons """
self.write_closed = True
body = self.raw_body.decode(self.encoding)
self._init_xml(body)
self._form_output() | [
"def",
"finalize_content",
"(",
"self",
")",
":",
"self",
".",
"write_closed",
"=",
"True",
"body",
"=",
"self",
".",
"raw_body",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"self",
".",
"_init_xml",
"(",
"body",
")",
"self",
".",
"_form_output",
"(",
")"
] | 33.5 | [
0.037037037037037035,
0.05405405405405406,
0.0625,
0.04,
0.07142857142857142,
0.07407407407407407
] |
def tokenize(self: object, untokenized_string: str, include_blanks=False):
"""Tokenize lines by '\n'.
:type untokenized_string: str
:param untokenized_string: A string containing one of more sentences.
:param include_blanks: Boolean; If True, blanks will be preserved by "" in returned list of strings; Default is False.
:rtype : list of strings
"""
# load tokenizer
assert isinstance(untokenized_string, str), 'Incoming argument must be a string.'
# make list of tokenized sentences
if include_blanks:
tokenized_lines = untokenized_string.splitlines()
else:
tokenized_lines = [line for line in untokenized_string.splitlines() if line != '']
return tokenized_lines | [
"def",
"tokenize",
"(",
"self",
":",
"object",
",",
"untokenized_string",
":",
"str",
",",
"include_blanks",
"=",
"False",
")",
":",
"# load tokenizer",
"assert",
"isinstance",
"(",
"untokenized_string",
",",
"str",
")",
",",
"'Incoming argument must be a string.'",
"# make list of tokenized sentences",
"if",
"include_blanks",
":",
"tokenized_lines",
"=",
"untokenized_string",
".",
"splitlines",
"(",
")",
"else",
":",
"tokenized_lines",
"=",
"[",
"line",
"for",
"line",
"in",
"untokenized_string",
".",
"splitlines",
"(",
")",
"if",
"line",
"!=",
"''",
"]",
"return",
"tokenized_lines"
] | 45.764706 | [
0.013513513513513514,
0.058823529411764705,
0.08108108108108109,
0.03896103896103896,
0.047619047619047616,
0.125,
0.18181818181818182,
0.25,
0.08333333333333333,
0.033707865168539325,
0,
0.047619047619047616,
0.07692307692307693,
0.03278688524590164,
0.15384615384615385,
0.031914893617021274,
0.06666666666666667
] |
def extract_lzma(archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an LZMA archive with the lzma Python module."""
return _extract(archive, compression, cmd, 'alone', verbosity, outdir) | [
"def",
"extract_lzma",
"(",
"archive",
",",
"compression",
",",
"cmd",
",",
"verbosity",
",",
"interactive",
",",
"outdir",
")",
":",
"return",
"_extract",
"(",
"archive",
",",
"compression",
",",
"cmd",
",",
"'alone'",
",",
"verbosity",
",",
"outdir",
")"
] | 70.666667 | [
0.013157894736842105,
0.03225806451612903,
0.02702702702702703
] |
def _write_mflist_ins(ins_filename,df,prefix):
""" write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names
"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
name_len = 11 - (len(prefix)+1)
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix,col[:name_len],dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n") | [
"def",
"_write_mflist_ins",
"(",
"ins_filename",
",",
"df",
",",
"prefix",
")",
":",
"dt_str",
"=",
"df",
".",
"index",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
"name_len",
"=",
"11",
"-",
"(",
"len",
"(",
"prefix",
")",
"+",
"1",
")",
"with",
"open",
"(",
"ins_filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'pif ~\\nl1\\n'",
")",
"for",
"dt",
"in",
"dt_str",
":",
"f",
".",
"write",
"(",
"\"l1 \"",
")",
"for",
"col",
"in",
"df",
".",
"columns",
":",
"obsnme",
"=",
"\"{0}_{1}_{2}\"",
".",
"format",
"(",
"prefix",
",",
"col",
"[",
":",
"name_len",
"]",
",",
"dt",
")",
"f",
".",
"write",
"(",
"\" w !{0}!\"",
".",
"format",
"(",
"obsnme",
")",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")"
] | 29.115385 | [
0.06521739130434782,
0.03508771929824561,
0,
0.14285714285714285,
0.14285714285714285,
0.13636363636363635,
0.044444444444444446,
0.12,
0.047619047619047616,
0.1875,
0.03773584905660377,
0.07692307692307693,
0,
0.2857142857142857,
0,
0.03508771929824561,
0.05714285714285714,
0.08108108108108109,
0.06666666666666667,
0,
0.08,
0.07692307692307693,
0.058823529411764705,
0.056338028169014086,
0.04,
0.08
] |
def get_figure(new_fig=True, subplot='111', params=None):
"""
Function to be used for viewing - plotting,
to initialize the matplotlib figure - axes.
Args:
new_fig(bool): Defines if a new figure will be created, if false current figure is used
subplot (tuple or matplolib subplot specifier string): Create axes with these parameters
params (dict): extra options passed to add_subplot()
Returns:
Matplotlib Figure and Axes
"""
_get_plt()
if new_fig:
fig = plt.figure()
else:
fig = plt.gcf()
params = dict_if_none(params)
if isinstance(subplot, (tuple, list)):
ax = fig.add_subplot(*subplot, **params)
else:
ax = fig.add_subplot(subplot, **params)
return fig, ax | [
"def",
"get_figure",
"(",
"new_fig",
"=",
"True",
",",
"subplot",
"=",
"'111'",
",",
"params",
"=",
"None",
")",
":",
"_get_plt",
"(",
")",
"if",
"new_fig",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"else",
":",
"fig",
"=",
"plt",
".",
"gcf",
"(",
")",
"params",
"=",
"dict_if_none",
"(",
"params",
")",
"if",
"isinstance",
"(",
"subplot",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"*",
"subplot",
",",
"*",
"*",
"params",
")",
"else",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"subplot",
",",
"*",
"*",
"params",
")",
"return",
"fig",
",",
"ax"
] | 26.964286 | [
0.017543859649122806,
0.2857142857142857,
0.0425531914893617,
0.0425531914893617,
0,
0.2222222222222222,
0.031578947368421054,
0.041666666666666664,
0.05,
0,
0.16666666666666666,
0.058823529411764705,
0.2857142857142857,
0.14285714285714285,
0,
0.13333333333333333,
0.07692307692307693,
0.2222222222222222,
0.08695652173913043,
0,
0.06060606060606061,
0,
0.047619047619047616,
0.041666666666666664,
0.2222222222222222,
0.0425531914893617,
0,
0.1111111111111111
] |
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1) | [
"def",
"_load_next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_load_by_date",
":",
"next_date",
"=",
"self",
".",
"date",
"+",
"pds",
".",
"DateOffset",
"(",
"days",
"=",
"1",
")",
"return",
"self",
".",
"_load_data",
"(",
"date",
"=",
"next_date",
")",
"else",
":",
"return",
"self",
".",
"_load_data",
"(",
"fid",
"=",
"self",
".",
"_fid",
"+",
"1",
")"
] | 40.538462 | [
0.047619047619047616,
0.02666666666666667,
0.0375,
0.25,
0.046153846153846156,
0.05357142857142857,
0.2222222222222222,
0.18181818181818182,
0.06666666666666667,
0.034482758620689655,
0.04,
0.15384615384615385,
0.0392156862745098
] |
def pksign(self, conn):
"""Sign a message digest using a private EC key."""
log.debug('signing %r digest (algo #%s)', self.digest, self.algo)
identity = self.get_identity(keygrip=self.keygrip)
r, s = self.client.sign(identity=identity,
digest=binascii.unhexlify(self.digest))
result = sig_encode(r, s)
log.debug('result: %r', result)
keyring.sendline(conn, b'D ' + result) | [
"def",
"pksign",
"(",
"self",
",",
"conn",
")",
":",
"log",
".",
"debug",
"(",
"'signing %r digest (algo #%s)'",
",",
"self",
".",
"digest",
",",
"self",
".",
"algo",
")",
"identity",
"=",
"self",
".",
"get_identity",
"(",
"keygrip",
"=",
"self",
".",
"keygrip",
")",
"r",
",",
"s",
"=",
"self",
".",
"client",
".",
"sign",
"(",
"identity",
"=",
"identity",
",",
"digest",
"=",
"binascii",
".",
"unhexlify",
"(",
"self",
".",
"digest",
")",
")",
"result",
"=",
"sig_encode",
"(",
"r",
",",
"s",
")",
"log",
".",
"debug",
"(",
"'result: %r'",
",",
"result",
")",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'D '",
"+",
"result",
")"
] | 50.222222 | [
0.043478260869565216,
0.03389830508474576,
0.0273972602739726,
0.034482758620689655,
0.06,
0.056338028169014086,
0.06060606060606061,
0.05128205128205128,
0.043478260869565216
] |
def parse_fileshare_or_file_snapshot_parameter(url):
# type: (str) -> Tuple[str, str]
"""Checks if the fileshare or file is a snapshot
:param url str: file url
:rtype: tuple
:return: (url, snapshot)
"""
if is_not_empty(url):
if '?sharesnapshot=' in url:
try:
tmp = url.split('?sharesnapshot=')
if len(tmp) == 2:
dateutil.parser.parse(tmp[1])
return tmp[0], tmp[1]
except (ValueError, OverflowError):
pass
elif '?snapshot=' in url:
try:
tmp = url.split('?snapshot=')
if len(tmp) == 2:
dateutil.parser.parse(tmp[1])
return tmp[0], tmp[1]
except (ValueError, OverflowError):
pass
return url, None | [
"def",
"parse_fileshare_or_file_snapshot_parameter",
"(",
"url",
")",
":",
"# type: (str) -> Tuple[str, str]",
"if",
"is_not_empty",
"(",
"url",
")",
":",
"if",
"'?sharesnapshot='",
"in",
"url",
":",
"try",
":",
"tmp",
"=",
"url",
".",
"split",
"(",
"'?sharesnapshot='",
")",
"if",
"len",
"(",
"tmp",
")",
"==",
"2",
":",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"tmp",
"[",
"1",
"]",
")",
"return",
"tmp",
"[",
"0",
"]",
",",
"tmp",
"[",
"1",
"]",
"except",
"(",
"ValueError",
",",
"OverflowError",
")",
":",
"pass",
"elif",
"'?snapshot='",
"in",
"url",
":",
"try",
":",
"tmp",
"=",
"url",
".",
"split",
"(",
"'?snapshot='",
")",
"if",
"len",
"(",
"tmp",
")",
"==",
"2",
":",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"tmp",
"[",
"1",
"]",
")",
"return",
"tmp",
"[",
"0",
"]",
",",
"tmp",
"[",
"1",
"]",
"except",
"(",
"ValueError",
",",
"OverflowError",
")",
":",
"pass",
"return",
"url",
",",
"None"
] | 33.64 | [
0.019230769230769232,
0.05555555555555555,
0.038461538461538464,
0.10714285714285714,
0.17647058823529413,
0.10714285714285714,
0.2857142857142857,
0.08,
0.05555555555555555,
0.125,
0.04,
0.06060606060606061,
0.04081632653061224,
0.04878048780487805,
0.0425531914893617,
0.1,
0.06060606060606061,
0.125,
0.044444444444444446,
0.06060606060606061,
0.04081632653061224,
0.04878048780487805,
0.0425531914893617,
0.1,
0.1
] |
def plot_autocorr(data, var_names=None, max_lag=100, combined=False, figsize=None, textsize=None):
"""Bar plot of the autocorrelation function for a sequence of data.
Useful in particular for posteriors from MCMC samples which may display correlation.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names : list of variable names, optional
Variables to be plotted, if None all variable are plotted.
Vector-value stochastics are handled automatically.
max_lag : int, optional
Maximum lag to calculate autocorrelation. Defaults to 100.
combined : bool
Flag for combining multiple chains into a single chain. If False (default), chains will be
plotted separately.
figsize : tuple
Figure size. If None it will be defined automatically.
Note this is not used if ax is supplied.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot default autocorrelation
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_autocorr(data)
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'] )
Combine chains collapsing by variable
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], combined=True)
Specify maximum lag (x axis bound)
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True)
"""
data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, data)
plotters = list(xarray_var_iter(data, var_names, combined))
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters)
figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(
figsize, textsize, rows, cols
)
_, axes = _create_axes_grid(
length_plotters, rows, cols, figsize=figsize, squeeze=False, sharex=True, sharey=True
)
axes = np.atleast_2d(axes) # in case of only 1 plot
for (var_name, selection, x), ax in zip(plotters, axes.flatten()):
x_prime = x
if combined:
x_prime = x.flatten()
y = autocorr(x_prime)
ax.vlines(x=np.arange(0, max_lag), ymin=0, ymax=y[0:max_lag], lw=linewidth)
ax.hlines(0, 0, max_lag, "steelblue")
ax.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
ax.tick_params(labelsize=xt_labelsize)
if axes.size > 0:
axes[0, 0].set_xlim(0, max_lag)
axes[0, 0].set_ylim(-1, 1)
return axes | [
"def",
"plot_autocorr",
"(",
"data",
",",
"var_names",
"=",
"None",
",",
"max_lag",
"=",
"100",
",",
"combined",
"=",
"False",
",",
"figsize",
"=",
"None",
",",
"textsize",
"=",
"None",
")",
":",
"data",
"=",
"convert_to_dataset",
"(",
"data",
",",
"group",
"=",
"\"posterior\"",
")",
"var_names",
"=",
"_var_names",
"(",
"var_names",
",",
"data",
")",
"plotters",
"=",
"list",
"(",
"xarray_var_iter",
"(",
"data",
",",
"var_names",
",",
"combined",
")",
")",
"length_plotters",
"=",
"len",
"(",
"plotters",
")",
"rows",
",",
"cols",
"=",
"default_grid",
"(",
"length_plotters",
")",
"figsize",
",",
"_",
",",
"titlesize",
",",
"xt_labelsize",
",",
"linewidth",
",",
"_",
"=",
"_scale_fig_size",
"(",
"figsize",
",",
"textsize",
",",
"rows",
",",
"cols",
")",
"_",
",",
"axes",
"=",
"_create_axes_grid",
"(",
"length_plotters",
",",
"rows",
",",
"cols",
",",
"figsize",
"=",
"figsize",
",",
"squeeze",
"=",
"False",
",",
"sharex",
"=",
"True",
",",
"sharey",
"=",
"True",
")",
"axes",
"=",
"np",
".",
"atleast_2d",
"(",
"axes",
")",
"# in case of only 1 plot",
"for",
"(",
"var_name",
",",
"selection",
",",
"x",
")",
",",
"ax",
"in",
"zip",
"(",
"plotters",
",",
"axes",
".",
"flatten",
"(",
")",
")",
":",
"x_prime",
"=",
"x",
"if",
"combined",
":",
"x_prime",
"=",
"x",
".",
"flatten",
"(",
")",
"y",
"=",
"autocorr",
"(",
"x_prime",
")",
"ax",
".",
"vlines",
"(",
"x",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"max_lag",
")",
",",
"ymin",
"=",
"0",
",",
"ymax",
"=",
"y",
"[",
"0",
":",
"max_lag",
"]",
",",
"lw",
"=",
"linewidth",
")",
"ax",
".",
"hlines",
"(",
"0",
",",
"0",
",",
"max_lag",
",",
"\"steelblue\"",
")",
"ax",
".",
"set_title",
"(",
"make_label",
"(",
"var_name",
",",
"selection",
")",
",",
"fontsize",
"=",
"titlesize",
",",
"wrap",
"=",
"True",
")",
"ax",
".",
"tick_params",
"(",
"labelsize",
"=",
"xt_labelsize",
")",
"if",
"axes",
".",
"size",
">",
"0",
":",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"set_xlim",
"(",
"0",
",",
"max_lag",
")",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"set_ylim",
"(",
"-",
"1",
",",
"1",
")",
"return",
"axes"
] | 30.360825 | [
0.02040816326530612,
0.028169014084507043,
0,
0.03409090909090909,
0,
0.14285714285714285,
0.14285714285714285,
0.21428571428571427,
0.02857142857142857,
0.029850746268656716,
0.0625,
0.030303030303030304,
0.03389830508474576,
0.1111111111111111,
0.030303030303030304,
0.15789473684210525,
0.030612244897959183,
0.07407407407407407,
0.15789473684210525,
0.03225806451612903,
0.041666666666666664,
0.10526315789473684,
0.030612244897959183,
0.10526315789473684,
0,
0.18181818181818182,
0.18181818181818182,
0.11538461538461539,
0,
0.16666666666666666,
0.16666666666666666,
0.0625,
0,
0.23076923076923078,
0.10714285714285714,
0,
0.1,
0.05454545454545454,
0.08823529411764706,
0,
0.03278688524590164,
0,
0.23076923076923078,
0.10714285714285714,
0,
0.06666666666666667,
0,
0,
0.04878048780487805,
0,
0.23076923076923078,
0.10714285714285714,
0,
0.04054054054054054,
0,
0,
0.07894736842105263,
0,
0.23076923076923078,
0.10714285714285714,
0,
0.04597701149425287,
0.2857142857142857,
0.037037037037037035,
0.046511627906976744,
0,
0.031746031746031744,
0.05714285714285714,
0.043478260869565216,
0,
0.041666666666666664,
0.05405405405405406,
0.6,
0,
0.09375,
0.07526881720430108,
0.6,
0,
0.03571428571428571,
0.02857142857142857,
0.10526315789473684,
0,
0.1,
0.06060606060606061,
0,
0.06896551724137931,
0,
0.03614457831325301,
0.044444444444444446,
0.03571428571428571,
0.043478260869565216,
0,
0.09523809523809523,
0.05128205128205128,
0.058823529411764705,
0,
0.13333333333333333
] |
def failed_update(self, exception):
"""Update cluster state given a failed MetadataRequest."""
f = None
with self._lock:
if self._future:
f = self._future
self._future = None
if f:
f.failure(exception)
self._last_refresh_ms = time.time() * 1000 | [
"def",
"failed_update",
"(",
"self",
",",
"exception",
")",
":",
"f",
"=",
"None",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"_future",
":",
"f",
"=",
"self",
".",
"_future",
"self",
".",
"_future",
"=",
"None",
"if",
"f",
":",
"f",
".",
"failure",
"(",
"exception",
")",
"self",
".",
"_last_refresh_ms",
"=",
"time",
".",
"time",
"(",
")",
"*",
"1000"
] | 33.1 | [
0.02857142857142857,
0.030303030303030304,
0.125,
0.08333333333333333,
0.07142857142857142,
0.0625,
0.05714285714285714,
0.15384615384615385,
0.0625,
0.04
] |
def data_group_type(self, group_data):
"""Return dict representation of group data.
Args:
group_data (dict|obj): The group data dict or object.
Returns:
dict: The group data in dict format.
"""
if isinstance(group_data, dict):
# process file content
file_content = group_data.pop('fileContent', None)
if file_content is not None:
self._files[group_data.get('xid')] = {
'fileContent': file_content,
'type': group_data.get('type'),
}
else:
GROUPS_STRINGS_WITH_FILE_CONTENTS = ['Document', 'Report']
# process file content
if group_data.data.get('type') in GROUPS_STRINGS_WITH_FILE_CONTENTS:
self._files[group_data.data.get('xid')] = group_data.file_data
group_data = group_data.data
return group_data | [
"def",
"data_group_type",
"(",
"self",
",",
"group_data",
")",
":",
"if",
"isinstance",
"(",
"group_data",
",",
"dict",
")",
":",
"# process file content",
"file_content",
"=",
"group_data",
".",
"pop",
"(",
"'fileContent'",
",",
"None",
")",
"if",
"file_content",
"is",
"not",
"None",
":",
"self",
".",
"_files",
"[",
"group_data",
".",
"get",
"(",
"'xid'",
")",
"]",
"=",
"{",
"'fileContent'",
":",
"file_content",
",",
"'type'",
":",
"group_data",
".",
"get",
"(",
"'type'",
")",
",",
"}",
"else",
":",
"GROUPS_STRINGS_WITH_FILE_CONTENTS",
"=",
"[",
"'Document'",
",",
"'Report'",
"]",
"# process file content",
"if",
"group_data",
".",
"data",
".",
"get",
"(",
"'type'",
")",
"in",
"GROUPS_STRINGS_WITH_FILE_CONTENTS",
":",
"self",
".",
"_files",
"[",
"group_data",
".",
"data",
".",
"get",
"(",
"'xid'",
")",
"]",
"=",
"group_data",
".",
"file_data",
"group_data",
"=",
"group_data",
".",
"data",
"return",
"group_data"
] | 38.708333 | [
0.02631578947368421,
0.038461538461538464,
0,
0.15384615384615385,
0.06153846153846154,
0,
0.125,
0.041666666666666664,
0.18181818181818182,
0.05,
0.058823529411764705,
0.03225806451612903,
0.05,
0.05555555555555555,
0.041666666666666664,
0.0392156862745098,
0.17647058823529413,
0.15384615384615385,
0.02857142857142857,
0.058823529411764705,
0.0375,
0.02564102564102564,
0.05,
0.08
] |
def autoencoder_residual_discrete_big():
"""Residual discrete autoencoder model, big version."""
hparams = autoencoder_residual_discrete()
hparams.hidden_size = 128
hparams.max_hidden_size = 4096
hparams.bottleneck_noise = 0.1
hparams.residual_dropout = 0.4
return hparams | [
"def",
"autoencoder_residual_discrete_big",
"(",
")",
":",
"hparams",
"=",
"autoencoder_residual_discrete",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"128",
"hparams",
".",
"max_hidden_size",
"=",
"4096",
"hparams",
".",
"bottleneck_noise",
"=",
"0.1",
"hparams",
".",
"residual_dropout",
"=",
"0.4",
"return",
"hparams"
] | 34.875 | [
0.025,
0.05263157894736842,
0.06976744186046512,
0.1111111111111111,
0.09375,
0.09375,
0.09375,
0.1875
] |
def to_json(self):
"""
Serialize object to json dict
:return: dict
"""
res = dict()
res['Handle'] = ''
res['Name'] = self.name
res['ImageUrl'] = self.url
res['Description'] = self.desc
res["EntityOptions"] = self.options
return res | [
"def",
"to_json",
"(",
"self",
")",
":",
"res",
"=",
"dict",
"(",
")",
"res",
"[",
"'Handle'",
"]",
"=",
"''",
"res",
"[",
"'Name'",
"]",
"=",
"self",
".",
"name",
"res",
"[",
"'ImageUrl'",
"]",
"=",
"self",
".",
"url",
"res",
"[",
"'Description'",
"]",
"=",
"self",
".",
"desc",
"res",
"[",
"\"EntityOptions\"",
"]",
"=",
"self",
".",
"options",
"return",
"res"
] | 23.692308 | [
0.05555555555555555,
0.18181818181818182,
0.05405405405405406,
0,
0.14285714285714285,
0.18181818181818182,
0.1,
0.07692307692307693,
0.06451612903225806,
0.058823529411764705,
0.05263157894736842,
0.046511627906976744,
0.1111111111111111
] |
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator | [
"def",
"view",
"(",
"tpl_name",
",",
"*",
"*",
"defaults",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"result",
",",
"(",
"dict",
",",
"DictMixin",
")",
")",
":",
"tplvars",
"=",
"defaults",
".",
"copy",
"(",
")",
"tplvars",
".",
"update",
"(",
"result",
")",
"return",
"template",
"(",
"tpl_name",
",",
"*",
"*",
"tplvars",
")",
"elif",
"result",
"is",
"None",
":",
"return",
"template",
"(",
"tpl_name",
",",
"defaults",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] | 41.608696 | [
0.03225806451612903,
0.038461538461538464,
0.03636363636363636,
0,
0.04477611940298507,
0.038461538461538464,
0.04285714285714286,
0.03125,
0.030303030303030304,
0.2857142857142857,
0.08333333333333333,
0.06666666666666667,
0.05405405405405406,
0.047619047619047616,
0.03773584905660377,
0.04878048780487805,
0.05263157894736842,
0.038461538461538464,
0.0625,
0.0392156862745098,
0.08,
0.09090909090909091,
0.1
] |
def random(self, count=1, **kwargs):
"""
Retrieve a single random photo, given optional filters.
Note: If supplying multiple category ID’s,
the resulting photos will be those that
match all of the given categories, not ones that match any category.
Note: You can’t use the collections and query parameters in the same request
Note: When supplying a count parameter
- and only then - the response will be an array of photos,
even if the value of count is 1.
All parameters are optional, and can be combined to narrow
the pool of photos from which a random one will be chosen.
:param count [integer]: The number of photos to return. (Default: 1; max: 30)
:param category: Category ID(‘s) to filter selection. If multiple, comma-separated. (deprecated)
:param collections: Public collection ID(‘s) to filter selection. If multiple, comma-separated
:param featured: Limit selection to featured photos.
:param username: Limit selection to a single user.
:param query: Limit selection to photos matching a search term.
:param w: Image width in pixels.
:param h: Image height in pixels.
:param orientation: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:return: [Array] or [Photo]: A single page of the curated Photo list or The Unsplash Photo. .
:raise UnsplashError: If the given orientation is not in the default orientation values.
"""
kwargs.update({"count": count})
orientation = kwargs.get("orientation", None)
if orientation and orientation not in self.orientation_values:
raise Exception()
url = "/photos/random"
result = self._get(url, params=kwargs)
return PhotoModel.parse_list(result) | [
"def",
"random",
"(",
"self",
",",
"count",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"\"count\"",
":",
"count",
"}",
")",
"orientation",
"=",
"kwargs",
".",
"get",
"(",
"\"orientation\"",
",",
"None",
")",
"if",
"orientation",
"and",
"orientation",
"not",
"in",
"self",
".",
"orientation_values",
":",
"raise",
"Exception",
"(",
")",
"url",
"=",
"\"/photos/random\"",
"result",
"=",
"self",
".",
"_get",
"(",
"url",
",",
"params",
"=",
"kwargs",
")",
"return",
"PhotoModel",
".",
"parse_list",
"(",
"result",
")"
] | 50.324324 | [
0.027777777777777776,
0.18181818181818182,
0.031746031746031744,
0,
0.04,
0.0425531914893617,
0.02631578947368421,
0,
0.03571428571428571,
0,
0.043478260869565216,
0.030303030303030304,
0.05,
0,
0.030303030303030304,
0.030303030303030304,
0,
0.07058823529411765,
0.038461538461538464,
0.0392156862745098,
0.05,
0.05084745762711865,
0.04225352112676056,
0.075,
0.07317073170731707,
0.04225352112676056,
0.03389830508474576,
0.039603960396039604,
0.041666666666666664,
0.18181818181818182,
0.05128205128205128,
0.03773584905660377,
0.02857142857142857,
0.06896551724137931,
0.06666666666666667,
0.043478260869565216,
0.045454545454545456
] |
def _get_fans(shape):
r"""Returns the size of input dimension and output dimension, given `shape`.
Args:
shape: A list of integers.
Returns:
fan_in: An int. The value of input dimension.
fan_out: An int. The value of output dimension.
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) == 4 or len(shape) == 5:
# assuming convolution kernels (2D or 3D).
kernel_size = np.prod(shape[:2])
fan_in = shape[-2] * kernel_size
fan_out = shape[-1] * kernel_size
else:
# no specific assumptions
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out | [
"def",
"_get_fans",
"(",
"shape",
")",
":",
"if",
"len",
"(",
"shape",
")",
"==",
"2",
":",
"fan_in",
"=",
"shape",
"[",
"0",
"]",
"fan_out",
"=",
"shape",
"[",
"1",
"]",
"elif",
"len",
"(",
"shape",
")",
"==",
"4",
"or",
"len",
"(",
"shape",
")",
"==",
"5",
":",
"# assuming convolution kernels (2D or 3D).",
"kernel_size",
"=",
"np",
".",
"prod",
"(",
"shape",
"[",
":",
"2",
"]",
")",
"fan_in",
"=",
"shape",
"[",
"-",
"2",
"]",
"*",
"kernel_size",
"fan_out",
"=",
"shape",
"[",
"-",
"1",
"]",
"*",
"kernel_size",
"else",
":",
"# no specific assumptions",
"fan_in",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"prod",
"(",
"shape",
")",
")",
"fan_out",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"prod",
"(",
"shape",
")",
")",
"return",
"fan_in",
",",
"fan_out"
] | 30.913043 | [
0.047619047619047616,
0.0375,
0.5,
0.2222222222222222,
0.09375,
0.5,
0.16666666666666666,
0.058823529411764705,
0.05660377358490566,
0.2857142857142857,
0.08695652173913043,
0.08,
0.07692307692307693,
0.045454545454545456,
0.04,
0.05,
0.05,
0.04878048780487805,
0.2222222222222222,
0.06060606060606061,
0.05,
0.04878048780487805,
0.07692307692307693
] |
def check_name(name, safe_chars):
'''
Check whether the specified name contains invalid characters
'''
regexp = re.compile('[^{0}]'.format(safe_chars))
if regexp.search(name):
raise SaltCloudException(
'{0} contains characters not supported by this cloud provider. '
'Valid characters are: {1}'.format(
name, safe_chars
)
) | [
"def",
"check_name",
"(",
"name",
",",
"safe_chars",
")",
":",
"regexp",
"=",
"re",
".",
"compile",
"(",
"'[^{0}]'",
".",
"format",
"(",
"safe_chars",
")",
")",
"if",
"regexp",
".",
"search",
"(",
"name",
")",
":",
"raise",
"SaltCloudException",
"(",
"'{0} contains characters not supported by this cloud provider. '",
"'Valid characters are: {1}'",
".",
"format",
"(",
"name",
",",
"safe_chars",
")",
")"
] | 33.333333 | [
0.030303030303030304,
0.2857142857142857,
0.03125,
0.2857142857142857,
0.038461538461538464,
0.07407407407407407,
0.09090909090909091,
0.02631578947368421,
0.06382978723404255,
0.0625,
0.23076923076923078,
0.3333333333333333
] |
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- variance: a vector tha contains the coefficient-wise variance.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js) | [
"def",
"metrics",
"(",
"*",
"metrics",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"js",
"=",
"JavaWrapper",
".",
"_new_java_obj",
"(",
"\"org.apache.spark.ml.stat.Summarizer.metrics\"",
",",
"_to_seq",
"(",
"sc",
",",
"metrics",
")",
")",
"return",
"SummaryBuilder",
"(",
"js",
")"
] | 44.785714 | [
0.045454545454545456,
0.18181818181818182,
0.030927835051546393,
0,
0.047619047619047616,
0,
0.05,
0.045454545454545456,
0.0410958904109589,
0.0625,
0.04819277108433735,
0.061224489795918366,
0.061224489795918366,
0.05084745762711865,
0.0625,
0,
0.13043478260869565,
0.07894736842105263,
0.1875,
0.125,
0,
0.030303030303030304,
0.1111111111111111,
0.18181818181818182,
0.0425531914893617,
0.047058823529411764,
0.06666666666666667,
0.06060606060606061
] |
def accepts(**schemas):
"""Create a decorator for validating function parameters.
Example::
@accepts(a="number", body={"+field_ids": [int], "is_ok": bool})
def f(a, body):
print (a, body["field_ids"], body.get("is_ok"))
:param schemas: The schema for validating a given parameter.
"""
validate = parse(schemas).validate
@decorator
def validating(func, *args, **kwargs):
validate(inspect.getcallargs(func, *args, **kwargs), adapt=False)
return func(*args, **kwargs)
return validating | [
"def",
"accepts",
"(",
"*",
"*",
"schemas",
")",
":",
"validate",
"=",
"parse",
"(",
"schemas",
")",
".",
"validate",
"@",
"decorator",
"def",
"validating",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"validate",
"(",
"inspect",
".",
"getcallargs",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"adapt",
"=",
"False",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"validating"
] | 30.277778 | [
0.043478260869565216,
0.03278688524590164,
0,
0.23076923076923078,
0,
0.028169014084507043,
0.08695652173913043,
0.05084745762711865,
0,
0.046875,
0.2857142857142857,
0.05263157894736842,
0,
0.14285714285714285,
0.047619047619047616,
0.0273972602739726,
0.05555555555555555,
0.09523809523809523
] |
def get_tool_dir():
"""
Get the directory of Visual Studio
from environment variables.
"""
def _is_comntools(name):
return re.match(r'vs\d+comntools', name.lower())
def _get_version_from_name(name):
return (re.search(r'\d+', name).group(0), name)
names = [name for name in os.environ if _is_comntools(name)]
logging.debug(_('found vscomntools: %s'), names)
versions = [_get_version_from_name(name) for name in names]
logging.debug(_('extracted versions: %s'), versions)
try:
version = max(versions)
except ValueError:
raise OSError(_('Failed to find the VSCOMNTOOLS. '
'Have you installed Visual Studio?'))
else:
logging.info(_('using version: %s'), version)
vscomntools = os.environ[version[1]]
logging.info(_('using vscomntools: %s'), vscomntools)
return vscomntools | [
"def",
"get_tool_dir",
"(",
")",
":",
"def",
"_is_comntools",
"(",
"name",
")",
":",
"return",
"re",
".",
"match",
"(",
"r'vs\\d+comntools'",
",",
"name",
".",
"lower",
"(",
")",
")",
"def",
"_get_version_from_name",
"(",
"name",
")",
":",
"return",
"(",
"re",
".",
"search",
"(",
"r'\\d+'",
",",
"name",
")",
".",
"group",
"(",
"0",
")",
",",
"name",
")",
"names",
"=",
"[",
"name",
"for",
"name",
"in",
"os",
".",
"environ",
"if",
"_is_comntools",
"(",
"name",
")",
"]",
"logging",
".",
"debug",
"(",
"_",
"(",
"'found vscomntools: %s'",
")",
",",
"names",
")",
"versions",
"=",
"[",
"_get_version_from_name",
"(",
"name",
")",
"for",
"name",
"in",
"names",
"]",
"logging",
".",
"debug",
"(",
"_",
"(",
"'extracted versions: %s'",
")",
",",
"versions",
")",
"try",
":",
"version",
"=",
"max",
"(",
"versions",
")",
"except",
"ValueError",
":",
"raise",
"OSError",
"(",
"_",
"(",
"'Failed to find the VSCOMNTOOLS. '",
"'Have you installed Visual Studio?'",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"_",
"(",
"'using version: %s'",
")",
",",
"version",
")",
"vscomntools",
"=",
"os",
".",
"environ",
"[",
"version",
"[",
"1",
"]",
"]",
"logging",
".",
"info",
"(",
"_",
"(",
"'using vscomntools: %s'",
")",
",",
"vscomntools",
")",
"return",
"vscomntools"
] | 38.96 | [
0.05263157894736842,
0.18181818181818182,
0.047619047619047616,
0.05714285714285714,
0.18181818181818182,
0.0625,
0.03333333333333333,
0,
0.04878048780487805,
0.03389830508474576,
0,
0.029411764705882353,
0.03571428571428571,
0.029850746268656716,
0.03333333333333333,
0.16666666666666666,
0.05714285714285714,
0.07692307692307693,
0.04838709677419355,
0.046153846153846156,
0.15384615384615385,
0.03508771929824561,
0.041666666666666664,
0.03076923076923077,
0.06666666666666667
] |
def publish(self, key, data):
'''
Publishes the data to the event stream.
'''
publish_data = {key: data}
pub = salt.utils.json.dumps(publish_data) + str('\n\n') # future lint: disable=blacklisted-function
self.handler.write_message(pub) | [
"def",
"publish",
"(",
"self",
",",
"key",
",",
"data",
")",
":",
"publish_data",
"=",
"{",
"key",
":",
"data",
"}",
"pub",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"dumps",
"(",
"publish_data",
")",
"+",
"str",
"(",
"'\\n\\n'",
")",
"# future lint: disable=blacklisted-function",
"self",
".",
"handler",
".",
"write_message",
"(",
"pub",
")"
] | 39.857143 | [
0.034482758620689655,
0.18181818181818182,
0.0425531914893617,
0.18181818181818182,
0.058823529411764705,
0.027777777777777776,
0.05128205128205128
] |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'out_of_vocabulary_words'
) and self.out_of_vocabulary_words is not None:
_dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'error') and self.error is not None:
_dict['error'] = self.error
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'name'",
")",
"and",
"self",
".",
"name",
"is",
"not",
"None",
":",
"_dict",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"if",
"hasattr",
"(",
"self",
",",
"'out_of_vocabulary_words'",
")",
"and",
"self",
".",
"out_of_vocabulary_words",
"is",
"not",
"None",
":",
"_dict",
"[",
"'out_of_vocabulary_words'",
"]",
"=",
"self",
".",
"out_of_vocabulary_words",
"if",
"hasattr",
"(",
"self",
",",
"'status'",
")",
"and",
"self",
".",
"status",
"is",
"not",
"None",
":",
"_dict",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
"if",
"hasattr",
"(",
"self",
",",
"'error'",
")",
"and",
"self",
".",
"error",
"is",
"not",
"None",
":",
"_dict",
"[",
"'error'",
"]",
"=",
"self",
".",
"error",
"return",
"_dict"
] | 46.923077 | [
0.05263157894736842,
0.031746031746031744,
0.1111111111111111,
0.03389830508474576,
0.05405405405405406,
0.06,
0.06153846153846154,
0.02666666666666667,
0.031746031746031744,
0.04878048780487805,
0.03278688524590164,
0.05128205128205128,
0.1
] |
def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear() | [
"def",
"cancel_pending_requests",
"(",
"self",
")",
":",
"exception",
"=",
"CancelledError",
"(",
")",
"for",
"_request",
",",
"event",
"in",
"self",
".",
"_requests",
".",
"values",
"(",
")",
":",
"event",
".",
"result",
"=",
"exception",
"event",
".",
"set",
"(",
")",
"self",
".",
"_requests",
".",
"clear",
"(",
")"
] | 36.571429 | [
0.029411764705882353,
0.047619047619047616,
0.05555555555555555,
0.03636363636363636,
0.05555555555555555,
0.08695652173913043,
0.06666666666666667
] |
def add_record_check(self, actions, table, func):
# emitted after query
# table: 'table_name'
# column: ('table_name', 'column_name')
assert isinstance(table, str), '`table` must be table name'
for i in actions:
assert i not in (A.QUERY, A.CREATE), "meaningless action check with record: [%s]" % i
self.record_checks.append([table, actions, func])
"""def func(ability, user, action, record: DataRecord, available_columns: list):
pass
""" | [
"def",
"add_record_check",
"(",
"self",
",",
"actions",
",",
"table",
",",
"func",
")",
":",
"# emitted after query",
"# table: 'table_name'",
"# column: ('table_name', 'column_name')",
"assert",
"isinstance",
"(",
"table",
",",
"str",
")",
",",
"'`table` must be table name'",
"for",
"i",
"in",
"actions",
":",
"assert",
"i",
"not",
"in",
"(",
"A",
".",
"QUERY",
",",
"A",
".",
"CREATE",
")",
",",
"\"meaningless action check with record: [%s]\"",
"%",
"i",
"self",
".",
"record_checks",
".",
"append",
"(",
"[",
"table",
",",
"actions",
",",
"func",
"]",
")"
] | 39.615385 | [
0.02040816326530612,
0.06896551724137931,
0.06896551724137931,
0.0425531914893617,
0.029850746268656716,
0.08,
0.030927835051546393,
0,
0.03508771929824561,
0,
0.03409090909090909,
0.125,
0.18181818181818182
] |
def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer):
"""
Convert an ReLU layer with maximum value from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
if keras_layer.max_value is None:
builder.add_activation(layer, 'RELU', input_name, output_name)
return
# No direct support of RELU with max-activation value - use negate and
# clip layers
relu_output_name = output_name + '_relu'
builder.add_activation(layer, 'RELU', input_name, relu_output_name)
# negate it
neg_output_name = relu_output_name + '_neg'
builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name,
neg_output_name,[-1.0, 0])
# apply threshold
clip_output_name = relu_output_name + '_clip'
builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name,
'threshold', alpha = -keras_layer.max_value)
# negate it back
builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name,
output_name,[-1.0, 0]) | [
"def",
"convert_advanced_relu",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"if",
"keras_layer",
".",
"max_value",
"is",
"None",
":",
"builder",
".",
"add_activation",
"(",
"layer",
",",
"'RELU'",
",",
"input_name",
",",
"output_name",
")",
"return",
"# No direct support of RELU with max-activation value - use negate and",
"# clip layers",
"relu_output_name",
"=",
"output_name",
"+",
"'_relu'",
"builder",
".",
"add_activation",
"(",
"layer",
",",
"'RELU'",
",",
"input_name",
",",
"relu_output_name",
")",
"# negate it",
"neg_output_name",
"=",
"relu_output_name",
"+",
"'_neg'",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'__neg__'",
",",
"'LINEAR'",
",",
"relu_output_name",
",",
"neg_output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")",
"# apply threshold",
"clip_output_name",
"=",
"relu_output_name",
"+",
"'_clip'",
"builder",
".",
"add_unary",
"(",
"layer",
"+",
"'__clip__'",
",",
"neg_output_name",
",",
"clip_output_name",
",",
"'threshold'",
",",
"alpha",
"=",
"-",
"keras_layer",
".",
"max_value",
")",
"# negate it back",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'_neg2'",
",",
"'LINEAR'",
",",
"clip_output_name",
",",
"output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")"
] | 36.176471 | [
0.024390243902439025,
0.2857142857142857,
0.030303030303030304,
0,
0.14285714285714285,
0.14285714285714285,
0.09090909090909091,
0.06896551724137931,
0,
0.06060606060606061,
0.05,
0.2857142857142857,
0.0625,
0.031746031746031744,
0,
0.05405405405405406,
0.02857142857142857,
0.14285714285714285,
0,
0.02702702702702703,
0.11764705882352941,
0.045454545454545456,
0.028169014084507043,
0.13333333333333333,
0.0425531914893617,
0.04225352112676056,
0.10526315789473684,
0.09523809523809523,
0.04081632653061224,
0.04054054054054054,
0.05357142857142857,
0.1,
0.043478260869565216,
0.11764705882352941
] |
def __get_url(self, endpoint):
""" Get URL for requests """
url = self.url
api = "wc-api"
if url.endswith("/") is False:
url = "%s/" % url
if self.wp_api:
api = "wp-json"
return "%s%s/%s/%s" % (url, api, self.version, endpoint) | [
"def",
"__get_url",
"(",
"self",
",",
"endpoint",
")",
":",
"url",
"=",
"self",
".",
"url",
"api",
"=",
"\"wc-api\"",
"if",
"url",
".",
"endswith",
"(",
"\"/\"",
")",
"is",
"False",
":",
"url",
"=",
"\"%s/\"",
"%",
"url",
"if",
"self",
".",
"wp_api",
":",
"api",
"=",
"\"wp-json\"",
"return",
"\"%s%s/%s/%s\"",
"%",
"(",
"url",
",",
"api",
",",
"self",
".",
"version",
",",
"endpoint",
")"
] | 24.25 | [
0.03333333333333333,
0.05555555555555555,
0.09090909090909091,
0.09090909090909091,
0,
0.05263157894736842,
0.06896551724137931,
0,
0.08695652173913043,
0.07407407407407407,
0,
0.03125
] |
def line_model(freq, data, tref, amp=1, phi=0):
""" Simple time-domain model for a frequency line.
Parameters
----------
freq: float
Frequency of the line.
data: pycbc.types.TimeSeries
Reference data, to get delta_t, start_time, duration and sample_times.
tref: float
Reference time for the line model.
amp: {1., float}, optional
Amplitude of the frequency line.
phi: {0. float}, optional
Phase of the frequency line (radians).
Returns
-------
freq_line: pycbc.types.TimeSeries
A timeseries of the line model with frequency 'freq'. The returned
data are complex to allow measuring the amplitude and phase of the
corresponding frequency line in the strain data. For extraction, use
only the real part of the data.
"""
freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t,
epoch=data.start_time)
times = data.sample_times - float(tref)
alpha = 2 * numpy.pi * freq * times + phi
freq_line.data = amp * numpy.exp(1.j * alpha)
return freq_line | [
"def",
"line_model",
"(",
"freq",
",",
"data",
",",
"tref",
",",
"amp",
"=",
"1",
",",
"phi",
"=",
"0",
")",
":",
"freq_line",
"=",
"TimeSeries",
"(",
"zeros",
"(",
"len",
"(",
"data",
")",
")",
",",
"delta_t",
"=",
"data",
".",
"delta_t",
",",
"epoch",
"=",
"data",
".",
"start_time",
")",
"times",
"=",
"data",
".",
"sample_times",
"-",
"float",
"(",
"tref",
")",
"alpha",
"=",
"2",
"*",
"numpy",
".",
"pi",
"*",
"freq",
"*",
"times",
"+",
"phi",
"freq_line",
".",
"data",
"=",
"amp",
"*",
"numpy",
".",
"exp",
"(",
"1.j",
"*",
"alpha",
")",
"return",
"freq_line"
] | 33.96875 | [
0.02127659574468085,
0.037037037037037035,
0,
0.14285714285714285,
0.14285714285714285,
0.13333333333333333,
0.06666666666666667,
0.0625,
0.02564102564102564,
0.13333333333333333,
0.047619047619047616,
0.06666666666666667,
0.05,
0.06896551724137931,
0.06521739130434782,
0,
0.18181818181818182,
0.18181818181818182,
0.05405405405405406,
0.02702702702702703,
0.02702702702702703,
0.02631578947368421,
0.05128205128205128,
0.2857142857142857,
0.045454545454545456,
0.10204081632653061,
0,
0.046511627906976744,
0.044444444444444446,
0.04081632653061224,
0,
0.1
] |
def all(self):
"""Returns a tuple containing all elements of the object
This method returns all elements of the path in the form of a tuple.
e.g.: `(abs_path, drive_letter, path_only, rootname, extension,
filesize, time_in_seconds)`.
Returns
-------
tuple
All elements of the path associated with this object as a tuple.
Notes
-----
If path points to a non-existant file, the size and datetime will
be returned as None (NoneType).
"""
return (self._full, self._driv, self._path, self._name, self._ext, self._size, self._time) | [
"def",
"all",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"_full",
",",
"self",
".",
"_driv",
",",
"self",
".",
"_path",
",",
"self",
".",
"_name",
",",
"self",
".",
"_ext",
",",
"self",
".",
"_size",
",",
"self",
".",
"_time",
")"
] | 33.157895 | [
0.07142857142857142,
0.03125,
0,
0.02631578947368421,
0.0821917808219178,
0.13513513513513514,
0,
0.13333333333333333,
0.13333333333333333,
0.15384615384615385,
0.02631578947368421,
0,
0.15384615384615385,
0.15384615384615385,
0.0273972602739726,
0.05128205128205128,
0.18181818181818182,
0,
0.030612244897959183
] |
def stan(file=None, model_name="anon_model", model_code=None, fit=None,
data=None, pars=None, chains=4, iter=2000, warmup=None, thin=1,
init="random", seed=None, algorithm=None, control=None, sample_file=None,
diagnostic_file=None, verbose=False, boost_lib=None, eigen_lib=None,
include_paths=None, n_jobs=-1, **kwargs):
"""Fit a model using Stan.
The `pystan.stan` function was deprecated in version 2.17 and will be
removed in version 3.0. Compiling and using a Stan Program (e.g., for
drawing samples) should be done in separate steps.
Parameters
----------
file : string {'filename', file-like object}
Model code must found via one of the following parameters: `file` or
`model_code`.
If `file` is a filename, the string passed as an argument is expected
to be a filename containing the Stan model specification.
If `file` is a file object, the object passed must have a 'read' method
(file-like object) that is called to fetch the Stan model specification.
charset : string, optional
If bytes or files are provided, this charset is used to decode. 'utf-8'
by default.
model_code : string
A string containing the Stan model specification. Alternatively,
the model may be provided with the parameter `file`.
model_name: string, optional
A string naming the model. If none is provided 'anon_model' is
the default. However, if `file` is a filename, then the filename
will be used to provide a name. 'anon_model' by default.
fit : StanFit instance
An instance of StanFit derived from a previous fit, None by
default. If `fit` is not None, the compiled model associated
with a previous fit is reused and recompilation is avoided.
data : dict
A Python dictionary providing the data for the model. Variables
for Stan are stored in the dictionary as expected. Variable
names are the keys and the values are their associated values.
Stan only accepts certain kinds of values; see Notes.
pars : list of string, optional
A list of strings indicating parameters of interest. By default
all parameters specified in the model will be stored.
chains : int, optional
Positive integer specifying number of chains. 4 by default.
iter : int, 2000 by default
Positive integer specifying how many iterations for each chain
including warmup.
warmup : int, iter//2 by default
Positive integer specifying number of warmup (aka burin) iterations.
As `warmup` also specifies the number of iterations used for stepsize
adaption, warmup samples should not be used for inference.
thin : int, optional
Positive integer specifying the period for saving samples.
Default is 1.
init : {0, '0', 'random', function returning dict, list of dict}, optional
Specifies how initial parameter values are chosen:
- 0 or '0' initializes all to be zero on the unconstrained support.
- 'random' generates random initial values. An optional parameter
`init_r` controls the range of randomly generated initial values
for parameters in terms of their unconstrained support;
- list of size equal to the number of chains (`chains`), where the
list contains a dict with initial parameter values;
- function returning a dict with initial parameter values. The
function may take an optional argument `chain_id`.
seed : int or np.random.RandomState, optional
The seed, a positive integer for random number generation. Only
one seed is needed when multiple chains are used, as the other
chain's seeds are generated from the first chain's to prevent
dependency among random number streams. By default, seed is
``random.randint(0, MAX_UINT)``.
algorithm : {"NUTS", "HMC", "Fixed_param"}, optional
One of the algorithms that are implemented in Stan such as the No-U-Turn
sampler (NUTS, Hoffman and Gelman 2011) and static HMC.
sample_file : string, optional
File name specifying where samples for *all* parameters and other
saved quantities will be written. If not provided, no samples
will be written. If the folder given is not writable, a temporary
directory will be used. When there are multiple chains, an underscore
and chain number are appended to the file name. By default do not
write samples to file.
diagnostic_file : string, optional
File name specifying where diagnostic information should be written.
By default no diagnostic information is recorded.
boost_lib : string, optional
The path to a version of the Boost C++ library to use instead of
the one supplied with PyStan.
eigen_lib : string, optional
The path to a version of the Eigen C++ library to use instead of
the one in the supplied with PyStan.
include_paths : list of strings, optional
Paths for #include files defined in Stan code.
verbose : boolean, optional
Indicates whether intermediate output should be piped to the console.
This output may be useful for debugging. False by default.
control : dict, optional
A dictionary of parameters to control the sampler's behavior. Default
values are used if control is not specified. The following are
adaptation parameters for sampling algorithms.
These are parameters used in Stan with similar names:
- `adapt_engaged` : bool
- `adapt_gamma` : float, positive, default 0.05
- `adapt_delta` : float, between 0 and 1, default 0.8
- `adapt_kappa` : float, between default 0.75
- `adapt_t0` : float, positive, default 10
- `adapt_init_buffer` : int, positive, defaults to 75
- `adapt_term_buffer` : int, positive, defaults to 50
- `adapt_window` : int, positive, defaults to 25
In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS
share the following parameters:
- `stepsize`: float, positive
- `stepsize_jitter`: float, between 0 and 1
- `metric` : str, {"unit_e", "diag_e", "dense_e"}
In addition, depending on which algorithm is used, different parameters
can be set as in Stan for sampling. For the algorithm HMC we can set
- `int_time`: float, positive
For algorithm NUTS, we can set
- `max_treedepth` : int, positive
n_jobs : int, optional
Sample in parallel. If -1 all CPUs are used. If 1, no parallel
computing code is used at all, which is useful for debugging.
Returns
-------
fit : StanFit instance
Other parameters
----------------
chain_id : int, optional
`chain_id` can be a vector to specify the chain_id for all chains or
an integer. For the former case, they should be unique. For the latter,
the sequence of integers starting from the given `chain_id` are used
for all chains.
init_r : float, optional
`init_r` is only valid if `init` == "random". In this case, the intial
values are simulated from [-`init_r`, `init_r`] rather than using the
default interval (see the manual of (Cmd)Stan).
test_grad: bool, optional
If `test_grad` is ``True``, Stan will not do any sampling. Instead,
the gradient calculation is tested and printed out and the fitted
StanFit4Model object is in test gradient mode. By default, it is
``False``.
append_samples`: bool, optional
refresh`: int, optional
Argument `refresh` can be used to control how to indicate the progress
during sampling (i.e. show the progress every \code{refresh} iterations).
By default, `refresh` is `max(iter/10, 1)`.
obfuscate_model_name : boolean, optional
`obfuscate_model_name` is only valid if `fit` is None. True by default.
If False the model name in the generated C++ code will not be made
unique by the insertion of randomly generated characters.
Generally it is recommended that this parameter be left as True.
Examples
--------
>>> from pystan import stan
>>> import numpy as np
>>> model_code = '''
... parameters {
... real y[2];
... }
... model {
... y[1] ~ normal(0, 1);
... y[2] ~ double_exponential(0, 2);
... }'''
>>> fit1 = stan(model_code=model_code, iter=10)
>>> print(fit1)
>>> excode = '''
... transformed data {
... real y[20];
... y[1] = 0.5796; y[2] = 0.2276; y[3] = -0.2959;
... y[4] = -0.3742; y[5] = 0.3885; y[6] = -2.1585;
... y[7] = 0.7111; y[8] = 1.4424; y[9] = 2.5430;
... y[10] = 0.3746; y[11] = 0.4773; y[12] = 0.1803;
... y[13] = 0.5215; y[14] = -1.6044; y[15] = -0.6703;
... y[16] = 0.9459; y[17] = -0.382; y[18] = 0.7619;
... y[19] = 0.1006; y[20] = -1.7461;
... }
... parameters {
... real mu;
... real<lower=0, upper=10> sigma;
... vector[2] z[3];
... real<lower=0> alpha;
... }
... model {
... y ~ normal(mu, sigma);
... for (i in 1:3)
... z[i] ~ normal(0, 1);
... alpha ~ exponential(2);
... }'''
>>>
>>> def initfun1():
... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1)
>>> exfit0 = stan(model_code=excode, init=initfun1)
>>> def initfun2(chain_id=1):
... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1 + chain_id)
>>> exfit1 = stan(model_code=excode, init=initfun2)
"""
logger.warning('DeprecationWarning: pystan.stan was deprecated in version 2.17 and will be removed in version 3.0. '
'Compile and use a Stan program in separate steps.')
# NOTE: this is a thin wrapper for other functions. Error handling occurs
# elsewhere.
if data is None:
data = {}
if warmup is None:
warmup = int(iter // 2)
obfuscate_model_name = kwargs.pop("obfuscate_model_name", True)
if fit is not None:
m = fit.stanmodel
else:
m = StanModel(file=file, model_name=model_name, model_code=model_code,
boost_lib=boost_lib, eigen_lib=eigen_lib,
include_paths=include_paths,
obfuscate_model_name=obfuscate_model_name, verbose=verbose)
# check that arguments in kwargs are valid
valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "enable_random_init",
"refresh", "control"}
for arg in kwargs:
if arg not in valid_args:
raise ValueError("Parameter `{}` is not recognized.".format(arg))
fit = m.sampling(data, pars=pars, chains=chains, iter=iter,
warmup=warmup, thin=thin, seed=seed, init=init,
sample_file=sample_file, diagnostic_file=diagnostic_file,
verbose=verbose, algorithm=algorithm, control=control,
n_jobs=n_jobs, **kwargs)
return fit | [
"def",
"stan",
"(",
"file",
"=",
"None",
",",
"model_name",
"=",
"\"anon_model\"",
",",
"model_code",
"=",
"None",
",",
"fit",
"=",
"None",
",",
"data",
"=",
"None",
",",
"pars",
"=",
"None",
",",
"chains",
"=",
"4",
",",
"iter",
"=",
"2000",
",",
"warmup",
"=",
"None",
",",
"thin",
"=",
"1",
",",
"init",
"=",
"\"random\"",
",",
"seed",
"=",
"None",
",",
"algorithm",
"=",
"None",
",",
"control",
"=",
"None",
",",
"sample_file",
"=",
"None",
",",
"diagnostic_file",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"boost_lib",
"=",
"None",
",",
"eigen_lib",
"=",
"None",
",",
"include_paths",
"=",
"None",
",",
"n_jobs",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"warning",
"(",
"'DeprecationWarning: pystan.stan was deprecated in version 2.17 and will be removed in version 3.0. '",
"'Compile and use a Stan program in separate steps.'",
")",
"# NOTE: this is a thin wrapper for other functions. Error handling occurs",
"# elsewhere.",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"if",
"warmup",
"is",
"None",
":",
"warmup",
"=",
"int",
"(",
"iter",
"//",
"2",
")",
"obfuscate_model_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"obfuscate_model_name\"",
",",
"True",
")",
"if",
"fit",
"is",
"not",
"None",
":",
"m",
"=",
"fit",
".",
"stanmodel",
"else",
":",
"m",
"=",
"StanModel",
"(",
"file",
"=",
"file",
",",
"model_name",
"=",
"model_name",
",",
"model_code",
"=",
"model_code",
",",
"boost_lib",
"=",
"boost_lib",
",",
"eigen_lib",
"=",
"eigen_lib",
",",
"include_paths",
"=",
"include_paths",
",",
"obfuscate_model_name",
"=",
"obfuscate_model_name",
",",
"verbose",
"=",
"verbose",
")",
"# check that arguments in kwargs are valid",
"valid_args",
"=",
"{",
"\"chain_id\"",
",",
"\"init_r\"",
",",
"\"test_grad\"",
",",
"\"append_samples\"",
",",
"\"enable_random_init\"",
",",
"\"refresh\"",
",",
"\"control\"",
"}",
"for",
"arg",
"in",
"kwargs",
":",
"if",
"arg",
"not",
"in",
"valid_args",
":",
"raise",
"ValueError",
"(",
"\"Parameter `{}` is not recognized.\"",
".",
"format",
"(",
"arg",
")",
")",
"fit",
"=",
"m",
".",
"sampling",
"(",
"data",
",",
"pars",
"=",
"pars",
",",
"chains",
"=",
"chains",
",",
"iter",
"=",
"iter",
",",
"warmup",
"=",
"warmup",
",",
"thin",
"=",
"thin",
",",
"seed",
"=",
"seed",
",",
"init",
"=",
"init",
",",
"sample_file",
"=",
"sample_file",
",",
"diagnostic_file",
"=",
"diagnostic_file",
",",
"verbose",
"=",
"verbose",
",",
"algorithm",
"=",
"algorithm",
",",
"control",
"=",
"control",
",",
"n_jobs",
"=",
"n_jobs",
",",
"*",
"*",
"kwargs",
")",
"return",
"fit"
] | 41.709434 | [
0.028169014084507043,
0.125,
0.10975609756097561,
0.09090909090909091,
0.12,
0.06666666666666667,
0,
0.0410958904109589,
0.0684931506849315,
0.05555555555555555,
0,
0.14285714285714285,
0.14285714285714285,
0,
0.0625,
0.039473684210526314,
0.14285714285714285,
0,
0.03896103896103896,
0.03076923076923077,
0,
0.0379746835443038,
0.0375,
0,
0.1,
0.02531645569620253,
0.10526315789473684,
0,
0.13043478260869565,
0.027777777777777776,
0.05,
0,
0.0625,
0.02857142857142857,
0.041666666666666664,
0.03125,
0,
0.11538461538461539,
0.029850746268656716,
0.04411764705882353,
0.029850746268656716,
0,
0.2,
0.028169014084507043,
0.029850746268656716,
0.02857142857142857,
0.04918032786885246,
0,
0.08571428571428572,
0.028169014084507043,
0.03278688524590164,
0,
0.11538461538461539,
0.029850746268656716,
0,
0.0967741935483871,
0.02857142857142857,
0.08,
0,
0.08333333333333333,
0.039473684210526314,
0.03896103896103896,
0.030303030303030304,
0,
0.125,
0.030303030303030304,
0.09523809523809523,
0,
0.038461538461538464,
0.034482758620689655,
0.02666666666666667,
0.0273972602739726,
0.039473684210526314,
0.04477611940298507,
0.05405405405405406,
0.047619047619047616,
0.02857142857142857,
0.04838709677419355,
0,
0.061224489795918366,
0.028169014084507043,
0.02857142857142857,
0.028985507246376812,
0.029850746268656716,
0.075,
0,
0.05357142857142857,
0.0375,
0.047619047619047616,
0,
0.08823529411764706,
0.0410958904109589,
0.028985507246376812,
0.0273972602739726,
0.025974025974025976,
0.0273972602739726,
0.06666666666666667,
0,
0.07894736842105263,
0.02631578947368421,
0.03508771929824561,
0,
0.09375,
0.027777777777777776,
0.05405405405405406,
0,
0.09375,
0.027777777777777776,
0.045454545454545456,
0,
0.06666666666666667,
0.07407407407407407,
0,
0.0967741935483871,
0.025974025974025976,
0.030303030303030304,
0,
0.10714285714285714,
0.025974025974025976,
0.028169014084507043,
0.037037037037037035,
0,
0.03278688524590164,
0,
0.125,
0.07272727272727272,
0.06557377049180328,
0.07547169811320754,
0.07547169811320754,
0.06557377049180328,
0.06557377049180328,
0.07142857142857142,
0,
0.03896103896103896,
0.05128205128205128,
0,
0.08108108108108109,
0.058823529411764705,
0.07017543859649122,
0,
0.02531645569620253,
0.02631578947368421,
0,
0.08108108108108109,
0,
0.05263157894736842,
0,
0.0975609756097561,
0,
0.11538461538461539,
0.04285714285714286,
0.028985507246376812,
0,
0.18181818181818182,
0.18181818181818182,
0,
0.11538461538461539,
0,
0.1,
0.1,
0,
0.10714285714285714,
0.039473684210526314,
0.02531645569620253,
0.039473684210526314,
0.08695652173913043,
0,
0.10714285714285714,
0.038461538461538464,
0.03896103896103896,
0.07272727272727272,
0,
0.06896551724137931,
0.04,
0.0273972602739726,
0.0273972602739726,
0.16666666666666666,
0,
0.08571428571428572,
0,
0.1111111111111111,
0.038461538461538464,
0.04938271604938271,
0.058823529411764705,
0,
0.06818181818181818,
0.0379746835443038,
0.02702702702702703,
0.03076923076923077,
0.027777777777777776,
0,
0.16666666666666666,
0.16666666666666666,
0.0967741935483871,
0.11538461538461539,
0.16666666666666666,
0.15,
0.15,
0.4444444444444444,
0.2,
0.1,
0.07142857142857142,
0.3333333333333333,
0.058823529411764705,
0.15789473684210525,
0.2,
0.11538461538461539,
0.13043478260869565,
0.09836065573770492,
0.09836065573770492,
0.1,
0.08196721311475409,
0.08064516129032258,
0.08196721311475409,
0.09090909090909091,
0.4444444444444444,
0.15,
0.15,
0.16666666666666666,
0.1111111111111111,
0.1875,
0.4444444444444444,
0.2,
0.08823529411764706,
0.15384615384615385,
0.09375,
0.08571428571428572,
0.3333333333333333,
0.42857142857142855,
0.13043478260869565,
0.05,
0.05454545454545454,
0.09090909090909091,
0.04395604395604396,
0.05454545454545454,
0.2857142857142857,
0.03333333333333333,
0.05714285714285714,
0.025974025974025976,
0.125,
0.1,
0.11764705882352941,
0.09090909090909091,
0.06451612903225806,
0.029850746268656716,
0.08695652173913043,
0.08,
0.2222222222222222,
0.038461538461538464,
0.07936507936507936,
0.08,
0.08641975308641975,
0.043478260869565216,
0.043478260869565216,
0.10256410256410256,
0.09090909090909091,
0.06060606060606061,
0.025974025974025976,
0,
0.047619047619047616,
0.10294117647058823,
0.0641025641025641,
0.08,
0.1111111111111111,
0.14285714285714285
] |
def _join_url_dir(cls, url, *args):
"""
Join a URL with multiple directories
"""
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url | [
"def",
"_join_url_dir",
"(",
"cls",
",",
"url",
",",
"*",
"args",
")",
":",
"for",
"path",
"in",
"args",
":",
"url",
"=",
"url",
".",
"rstrip",
"(",
"'/'",
")",
"+",
"'/'",
"url",
"=",
"urljoin",
"(",
"url",
",",
"path",
".",
"lstrip",
"(",
"'/'",
")",
")",
"return",
"url"
] | 23.1 | [
0.02857142857142857,
0.18181818181818182,
0.045454545454545456,
0.18181818181818182,
0,
0.08,
0.05128205128205128,
0.041666666666666664,
0,
0.1111111111111111
] |
def _is_valid_string(self, inpt, metadata):
"""Checks if input is a valid string"""
if not is_string(inpt):
return False
if metadata.get_minimum_string_length() and len(inpt) < metadata.get_minimum_string_length():
return False
elif metadata.get_maximum_string_length() and len(inpt) > metadata.get_maximum_string_length():
return False
if metadata.get_string_set() and inpt not in metadata.get_string_set():
return False
else:
return True | [
"def",
"_is_valid_string",
"(",
"self",
",",
"inpt",
",",
"metadata",
")",
":",
"if",
"not",
"is_string",
"(",
"inpt",
")",
":",
"return",
"False",
"if",
"metadata",
".",
"get_minimum_string_length",
"(",
")",
"and",
"len",
"(",
"inpt",
")",
"<",
"metadata",
".",
"get_minimum_string_length",
"(",
")",
":",
"return",
"False",
"elif",
"metadata",
".",
"get_maximum_string_length",
"(",
")",
"and",
"len",
"(",
"inpt",
")",
">",
"metadata",
".",
"get_maximum_string_length",
"(",
")",
":",
"return",
"False",
"if",
"metadata",
".",
"get_string_set",
"(",
")",
"and",
"inpt",
"not",
"in",
"metadata",
".",
"get_string_set",
"(",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | 44.666667 | [
0.023255813953488372,
0.0425531914893617,
0.06451612903225806,
0.08333333333333333,
0.0297029702970297,
0.08333333333333333,
0.02912621359223301,
0.08333333333333333,
0.02531645569620253,
0.08333333333333333,
0.15384615384615385,
0.08695652173913043
] |
def datafind_keep_unique_backups(backup_outs, orig_outs):
"""This function will take a list of backup datafind files, presumably
obtained by querying a remote datafind server, e.g. CIT, and compares
these against a list of original datafind files, presumably obtained by
querying the local datafind server. Only the datafind files in the backup
list that do not appear in the original list are returned. This allows us
to use only files that are missing from the local cluster.
Parameters
-----------
backup_outs : FileList
List of datafind files from the remote datafind server.
orig_outs : FileList
List of datafind files from the local datafind server.
Returns
--------
FileList
List of datafind files in backup_outs and not in orig_outs.
"""
# NOTE: This function is not optimized and could be made considerably
# quicker if speed becomes in issue. With 4s frame files this might
# be slow, but for >1000s files I don't foresee any issue, so I keep
# this simple.
return_list = FileList([])
# We compare the LFNs to determine uniqueness
# Is there a way to associate two paths with one LFN??
orig_names = [f.name for f in orig_outs]
for file in backup_outs:
if file.name not in orig_names:
return_list.append(file)
else:
index_num = orig_names.index(file.name)
orig_out = orig_outs[index_num]
pfns = list(file.pfns)
# This shouldn't happen, but catch if it does
assert(len(pfns) == 1)
orig_out.PFN(pfns[0].url, site='notlocal')
return return_list | [
"def",
"datafind_keep_unique_backups",
"(",
"backup_outs",
",",
"orig_outs",
")",
":",
"# NOTE: This function is not optimized and could be made considerably",
"# quicker if speed becomes in issue. With 4s frame files this might",
"# be slow, but for >1000s files I don't foresee any issue, so I keep",
"# this simple.",
"return_list",
"=",
"FileList",
"(",
"[",
"]",
")",
"# We compare the LFNs to determine uniqueness",
"# Is there a way to associate two paths with one LFN??",
"orig_names",
"=",
"[",
"f",
".",
"name",
"for",
"f",
"in",
"orig_outs",
"]",
"for",
"file",
"in",
"backup_outs",
":",
"if",
"file",
".",
"name",
"not",
"in",
"orig_names",
":",
"return_list",
".",
"append",
"(",
"file",
")",
"else",
":",
"index_num",
"=",
"orig_names",
".",
"index",
"(",
"file",
".",
"name",
")",
"orig_out",
"=",
"orig_outs",
"[",
"index_num",
"]",
"pfns",
"=",
"list",
"(",
"file",
".",
"pfns",
")",
"# This shouldn't happen, but catch if it does",
"assert",
"(",
"len",
"(",
"pfns",
")",
"==",
"1",
")",
"orig_out",
".",
"PFN",
"(",
"pfns",
"[",
"0",
"]",
".",
"url",
",",
"site",
"=",
"'notlocal'",
")",
"return",
"return_list"
] | 41.3 | [
0.017543859649122806,
0.02702702702702703,
0.0273972602739726,
0.02666666666666667,
0.025974025974025976,
0.03896103896103896,
0.03225806451612903,
0,
0.14285714285714285,
0.13333333333333333,
0.11538461538461539,
0.031746031746031744,
0.125,
0.03225806451612903,
0,
0.18181818181818182,
0.16666666666666666,
0.16666666666666666,
0.029850746268656716,
0.2857142857142857,
0.0273972602739726,
0.025974025974025976,
0.02564102564102564,
0.08333333333333333,
0.06666666666666667,
0.04081632653061224,
0.034482758620689655,
0.045454545454545456,
0.07142857142857142,
0.05128205128205128,
0.05555555555555555,
0.15384615384615385,
0.0392156862745098,
0.046511627906976744,
0.058823529411764705,
0.03508771929824561,
0.058823529411764705,
0.037037037037037035,
0,
0.09090909090909091
] |
def _canonicalize_name(prefix, qvm_type, noisy):
"""Take the output of _parse_name to create a canonical name.
"""
if noisy:
noise_suffix = '-noisy'
else:
noise_suffix = ''
if qvm_type is None:
qvm_suffix = ''
elif qvm_type == 'qvm':
qvm_suffix = '-qvm'
elif qvm_type == 'pyqvm':
qvm_suffix = '-pyqvm'
else:
raise ValueError(f"Unknown qvm_type {qvm_type}")
name = f'{prefix}{noise_suffix}{qvm_suffix}'
return name | [
"def",
"_canonicalize_name",
"(",
"prefix",
",",
"qvm_type",
",",
"noisy",
")",
":",
"if",
"noisy",
":",
"noise_suffix",
"=",
"'-noisy'",
"else",
":",
"noise_suffix",
"=",
"''",
"if",
"qvm_type",
"is",
"None",
":",
"qvm_suffix",
"=",
"''",
"elif",
"qvm_type",
"==",
"'qvm'",
":",
"qvm_suffix",
"=",
"'-qvm'",
"elif",
"qvm_type",
"==",
"'pyqvm'",
":",
"qvm_suffix",
"=",
"'-pyqvm'",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Unknown qvm_type {qvm_type}\"",
")",
"name",
"=",
"f'{prefix}{noise_suffix}{qvm_suffix}'",
"return",
"name"
] | 25.526316 | [
0.020833333333333332,
0.03076923076923077,
0.2857142857142857,
0.15384615384615385,
0.06451612903225806,
0.2222222222222222,
0.08,
0,
0.08333333333333333,
0.08695652173913043,
0.07407407407407407,
0.07407407407407407,
0.06896551724137931,
0.06896551724137931,
0.2222222222222222,
0.03571428571428571,
0,
0.041666666666666664,
0.13333333333333333
] |
def gridsearch(self, X, y, exposure=None, weights=None,
return_scores=False, keep_best=True, objective='auto',
**param_grids):
"""
performs a grid search over a space of parameters for a given objective
NOTE:
gridsearch method is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
even though fit_splines==False
it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array
input data of shape (n_samples, m_features)
y : array
label data of shape (n_samples,)
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
return_scores : boolean, default False
whether to return the hyperpamaters
and score for each element in the grid
keep_best : boolean
whether to keep the best GAM as self.
default: True
objective : string, default: 'auto'
metric to optimize. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with unknown
scale and UBRE for models with known scale.
**kwargs : dict, default {'lam': np.logspace(-3, 3, 11)}
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
if iterable of iterables of floats, the outer iterable must have
length m_features.
the method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if return_values == True:
model_scores : dict
Contains each fitted model as keys and corresponding
objective scores as values
else:
self, ie possibly the newly fitted model
"""
y, weights = self._exposure_to_weights(y, exposure, weights)
return super(PoissonGAM, self).gridsearch(X, y,
weights=weights,
return_scores=return_scores,
keep_best=keep_best,
objective=objective,
**param_grids) | [
"def",
"gridsearch",
"(",
"self",
",",
"X",
",",
"y",
",",
"exposure",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"return_scores",
"=",
"False",
",",
"keep_best",
"=",
"True",
",",
"objective",
"=",
"'auto'",
",",
"*",
"*",
"param_grids",
")",
":",
"y",
",",
"weights",
"=",
"self",
".",
"_exposure_to_weights",
"(",
"y",
",",
"exposure",
",",
"weights",
")",
"return",
"super",
"(",
"PoissonGAM",
",",
"self",
")",
".",
"gridsearch",
"(",
"X",
",",
"y",
",",
"weights",
"=",
"weights",
",",
"return_scores",
"=",
"return_scores",
",",
"keep_best",
"=",
"keep_best",
",",
"objective",
"=",
"objective",
",",
"*",
"*",
"param_grids",
")"
] | 37.826667 | [
0.03636363636363636,
0.0821917808219178,
0.11764705882352941,
0.18181818181818182,
0.02531645569620253,
0,
0.15384615384615385,
0.02702702702702703,
0.058823529411764705,
0,
0.09375,
0,
0.030303030303030304,
0.10526315789473684,
0,
0.029850746268656716,
0.028985507246376812,
0.041666666666666664,
0,
0.1111111111111111,
0.1111111111111111,
0.17647058823529413,
0.07547169811320754,
0,
0.17647058823529413,
0.09523809523809523,
0,
0.056338028169014086,
0.0625,
0.043478260869565216,
0,
0.05714285714285714,
0.05405405405405406,
0.043478260869565216,
0,
0.06521739130434782,
0.06666666666666667,
0.0625,
0,
0.1111111111111111,
0.06382978723404255,
0.13043478260869565,
0,
0.06976744186046512,
0.0379746835443038,
0.0379746835443038,
0.05660377358490566,
0,
0.046875,
0.05263157894736842,
0.05172413793103448,
0,
0.04054054054054054,
0.10714285714285714,
0,
0.0379746835443038,
0.06818181818181818,
0,
0,
0.13333333333333333,
0.13333333333333333,
0.09090909090909091,
0.0967741935483871,
0.029411764705882353,
0.047619047619047616,
0.15384615384615385,
0.038461538461538464,
0.18181818181818182,
0.029411764705882353,
0.05454545454545454,
0.06060606060606061,
0.05128205128205128,
0.05714285714285714,
0.05714285714285714,
0.0625
] |
def bust_self(self, obj):
"""Remove the value that is being stored on `obj` for this
:class:`.cached_property`
object.
:param obj: The instance on which to bust the cache.
"""
if self.func.__name__ in obj.__dict__:
delattr(obj, self.func.__name__) | [
"def",
"bust_self",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"func",
".",
"__name__",
"in",
"obj",
".",
"__dict__",
":",
"delattr",
"(",
"obj",
",",
"self",
".",
"func",
".",
"__name__",
")"
] | 33.333333 | [
0.04,
0.030303030303030304,
0.15151515151515152,
0.13333333333333333,
0,
0.05,
0.18181818181818182,
0.043478260869565216,
0.045454545454545456
] |
async def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
if self.scripts:
await self.load_scripts()
if self.transaction or self.explicit_transaction:
exec = self._execute_transaction
else:
exec = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection()
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return await exec(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry his transaction. If this is more
# than a temporary failure, the WATCH that the user next issues
# will fail, propegating the real ConnectionError
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return await exec(conn, stack, raise_on_error)
finally:
await self.reset() | [
"async",
"def",
"execute",
"(",
"self",
",",
"raise_on_error",
"=",
"True",
")",
":",
"stack",
"=",
"self",
".",
"command_stack",
"if",
"not",
"stack",
":",
"return",
"[",
"]",
"if",
"self",
".",
"scripts",
":",
"await",
"self",
".",
"load_scripts",
"(",
")",
"if",
"self",
".",
"transaction",
"or",
"self",
".",
"explicit_transaction",
":",
"exec",
"=",
"self",
".",
"_execute_transaction",
"else",
":",
"exec",
"=",
"self",
".",
"_execute_pipeline",
"conn",
"=",
"self",
".",
"connection",
"if",
"not",
"conn",
":",
"conn",
"=",
"self",
".",
"connection_pool",
".",
"get_connection",
"(",
")",
"# assign to self.connection so reset() releases the connection",
"# back to the pool after we're done",
"self",
".",
"connection",
"=",
"conn",
"try",
":",
"return",
"await",
"exec",
"(",
"conn",
",",
"stack",
",",
"raise_on_error",
")",
"except",
"(",
"ConnectionError",
",",
"TimeoutError",
")",
"as",
"e",
":",
"conn",
".",
"disconnect",
"(",
")",
"if",
"not",
"conn",
".",
"retry_on_timeout",
"and",
"isinstance",
"(",
"e",
",",
"TimeoutError",
")",
":",
"raise",
"# if we were watching a variable, the watch is no longer valid",
"# since this connection has died. raise a WatchError, which",
"# indicates the user should retry his transaction. If this is more",
"# than a temporary failure, the WATCH that the user next issues",
"# will fail, propegating the real ConnectionError",
"if",
"self",
".",
"watching",
":",
"raise",
"WatchError",
"(",
"\"A ConnectionError occured on while watching \"",
"\"one or more keys\"",
")",
"# otherwise, it's safe to retry since the transaction isn't",
"# predicated on any state",
"return",
"await",
"exec",
"(",
"conn",
",",
"stack",
",",
"raise_on_error",
")",
"finally",
":",
"await",
"self",
".",
"reset",
"(",
")"
] | 42.947368 | [
0.022222222222222223,
0.034482758620689655,
0.058823529411764705,
0.09523809523809523,
0.09523809523809523,
0.08333333333333333,
0.05405405405405406,
0.03508771929824561,
0.045454545454545456,
0.15384615384615385,
0.04878048780487805,
0,
0.06666666666666667,
0.1,
0.03571428571428571,
0.02702702702702703,
0.0425531914893617,
0.058823529411764705,
0,
0.16666666666666666,
0.034482758620689655,
0.038461538461538464,
0.06896551724137931,
0.0273972602739726,
0.09523809523809523,
0.02702702702702703,
0.028169014084507043,
0.02564102564102564,
0.02666666666666667,
0.03278688524590164,
0.06896551724137931,
0.0379746835443038,
0.07692307692307693,
0.028169014084507043,
0.05405405405405406,
0.034482758620689655,
0.125,
0.06666666666666667
] |
def adapter_type(self, adapter_type):
"""
Sets the adapter type for this VMware VM instance.
:param adapter_type: adapter type (string)
"""
self._adapter_type = adapter_type
log.info("VMware VM '{name}' [{id}]: adapter type changed to {adapter_type}".format(name=self.name,
id=self.id,
adapter_type=adapter_type)) | [
"def",
"adapter_type",
"(",
"self",
",",
"adapter_type",
")",
":",
"self",
".",
"_adapter_type",
"=",
"adapter_type",
"log",
".",
"info",
"(",
"\"VMware VM '{name}' [{id}]: adapter type changed to {adapter_type}\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"name",
",",
"id",
"=",
"self",
".",
"id",
",",
"adapter_type",
"=",
"adapter_type",
")",
")"
] | 48.818182 | [
0.02702702702702703,
0.18181818181818182,
0.034482758620689655,
0,
0.08,
0.18181818181818182,
0,
0.04878048780487805,
0.037383177570093455,
0.038834951456310676,
0.04201680672268908
] |
def get(cls, filename):
"""
Get the absolute path of a file added through C{SparkContext.addFile()}.
"""
path = os.path.join(SparkFiles.getRootDirectory(), filename)
return os.path.abspath(path) | [
"def",
"get",
"(",
"cls",
",",
"filename",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"SparkFiles",
".",
"getRootDirectory",
"(",
")",
",",
"filename",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")"
] | 38.166667 | [
0.043478260869565216,
0.18181818181818182,
0.0375,
0.18181818181818182,
0.029411764705882353,
0.05555555555555555
] |
def make_logger(scraper):
""" Create two log handlers, one to output info-level ouput to the
console, the other to store all logging in a JSON file which will
later be used to generate reports. """
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
json_handler = logging.FileHandler(log_path(scraper))
json_handler.setLevel(logging.DEBUG)
json_formatter = jsonlogger.JsonFormatter(make_json_format())
json_handler.setFormatter(json_formatter)
logger.addHandler(json_handler)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
fmt = '%(name)s [%(levelname)-8s]: %(message)s'
formatter = logging.Formatter(fmt)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger = logging.getLogger(scraper.name)
logger = TaskAdapter(logger, scraper)
return logger | [
"def",
"make_logger",
"(",
"scraper",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"''",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"requests_log",
"=",
"logging",
".",
"getLogger",
"(",
"\"requests\"",
")",
"requests_log",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"json_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"log_path",
"(",
"scraper",
")",
")",
"json_handler",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"json_formatter",
"=",
"jsonlogger",
".",
"JsonFormatter",
"(",
"make_json_format",
"(",
")",
")",
"json_handler",
".",
"setFormatter",
"(",
"json_formatter",
")",
"logger",
".",
"addHandler",
"(",
"json_handler",
")",
"console_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console_handler",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"fmt",
"=",
"'%(name)s [%(levelname)-8s]: %(message)s'",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
"console_handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"console_handler",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"scraper",
".",
"name",
")",
"logger",
"=",
"TaskAdapter",
"(",
"logger",
",",
"scraper",
")",
"return",
"logger"
] | 35.740741 | [
0.04,
0.02857142857142857,
0.028985507246376812,
0.07142857142857142,
0,
0.058823529411764705,
0.058823529411764705,
0,
0.041666666666666664,
0.047619047619047616,
0,
0.03508771929824561,
0.05,
0.03076923076923077,
0.044444444444444446,
0.05714285714285714,
0,
0.044444444444444446,
0.047619047619047616,
0.0392156862745098,
0.05263157894736842,
0.046511627906976744,
0.05263157894736842,
0,
0.045454545454545456,
0.04878048780487805,
0.11764705882352941
] |
def exec_command(self, command, bufsize=-1, get_pty=False):
"""
Execute a command in the connection
@param command: command to execute
@type command: str
@param bufsize: buffer size
@type bufsize: int
@param get_pty: get pty
@type get_pty: bool
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile},
L{paramiko.ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
self.last_command = command
return self.cli.exec_command(command, bufsize, get_pty=get_pty) | [
"def",
"exec_command",
"(",
"self",
",",
"command",
",",
"bufsize",
"=",
"-",
"1",
",",
"get_pty",
"=",
"False",
")",
":",
"self",
".",
"last_command",
"=",
"command",
"return",
"self",
".",
"cli",
".",
"exec_command",
"(",
"command",
",",
"bufsize",
",",
"get_pty",
"=",
"get_pty",
")"
] | 32.190476 | [
0.01694915254237288,
0.18181818181818182,
0.046511627906976744,
0,
0.047619047619047616,
0.07692307692307693,
0,
0.05714285714285714,
0.07692307692307693,
0,
0.06451612903225806,
0.07407407407407407,
0,
0.028169014084507043,
0.04225352112676056,
0.08695652173913043,
0,
0.028169014084507043,
0.18181818181818182,
0.05714285714285714,
0.028169014084507043
] |
def update_asset(self, name, label=""):
"""
Update asset metadata.
:rtype: github.GitReleaseAsset.GitReleaseAsset
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(label, (str, unicode)), label
post_parameters = {
"name": name,
"label": label
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
return GitReleaseAsset(self._requester, headers, data, completed=True) | [
"def",
"update_asset",
"(",
"self",
",",
"name",
",",
"label",
"=",
"\"\"",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"(",
"str",
",",
"unicode",
")",
")",
",",
"name",
"assert",
"isinstance",
"(",
"label",
",",
"(",
"str",
",",
"unicode",
")",
")",
",",
"label",
"post_parameters",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"label\"",
":",
"label",
"}",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
"\"PATCH\"",
",",
"self",
".",
"url",
",",
"input",
"=",
"post_parameters",
")",
"return",
"GitReleaseAsset",
"(",
"self",
".",
"_requester",
",",
"headers",
",",
"data",
",",
"completed",
"=",
"True",
")"
] | 33 | [
0.02564102564102564,
0.18181818181818182,
0.06666666666666667,
0.05555555555555555,
0.18181818181818182,
0.03773584905660377,
0.03636363636363636,
0.1111111111111111,
0.08,
0.07692307692307693,
0.3333333333333333,
0.05,
0.1,
0.09523809523809523,
0.09090909090909091,
0.3333333333333333,
0.02564102564102564
] |
def success(self, buf, newline=True):
""" Same as `write`, but adds success coloring if enabled.
`buf`
Data buffer to write.
`newline`
Append newline character to buffer before writing.
"""
if self._colored:
buf = self.ESCAPE_GREEN + buf + self.ESCAPE_CLEAR
self.write(buf, newline) | [
"def",
"success",
"(",
"self",
",",
"buf",
",",
"newline",
"=",
"True",
")",
":",
"if",
"self",
".",
"_colored",
":",
"buf",
"=",
"self",
".",
"ESCAPE_GREEN",
"+",
"buf",
"+",
"self",
".",
"ESCAPE_CLEAR",
"self",
".",
"write",
"(",
"buf",
",",
"newline",
")"
] | 29 | [
0.02702702702702703,
0.030303030303030304,
0,
0.17647058823529413,
0.05405405405405406,
0.14285714285714285,
0.030303030303030304,
0.13333333333333333,
0,
0.08,
0.03278688524590164,
0,
0.0625
] |
def filter_pages(pages, pagenum, pagename):
""" Choices pages by pagenum and pagename """
if pagenum:
try:
pages = [list(pages)[pagenum - 1]]
except IndexError:
raise IndexError('Invalid page number: %d' % pagenum)
if pagename:
pages = [page for page in pages if page.name == pagename]
if pages == []:
raise IndexError('Page not found: pagename=%s' % pagename)
return pages | [
"def",
"filter_pages",
"(",
"pages",
",",
"pagenum",
",",
"pagename",
")",
":",
"if",
"pagenum",
":",
"try",
":",
"pages",
"=",
"[",
"list",
"(",
"pages",
")",
"[",
"pagenum",
"-",
"1",
"]",
"]",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"'Invalid page number: %d'",
"%",
"pagenum",
")",
"if",
"pagename",
":",
"pages",
"=",
"[",
"page",
"for",
"page",
"in",
"pages",
"if",
"page",
".",
"name",
"==",
"pagename",
"]",
"if",
"pages",
"==",
"[",
"]",
":",
"raise",
"IndexError",
"(",
"'Page not found: pagename=%s'",
"%",
"pagename",
")",
"return",
"pages"
] | 31.857143 | [
0.023255813953488372,
0.04081632653061224,
0.13333333333333333,
0.16666666666666666,
0.043478260869565216,
0.07692307692307693,
0.03076923076923077,
0,
0.125,
0.03076923076923077,
0.08695652173913043,
0.02857142857142857,
0,
0.125
] |
def query_ssos(self, target_name, lunation_count=None):
"""Send a query to the SSOS web service, looking for available observations using the given track.
:param target_name: name of target to query against SSOIS db
:param lunation_count: ignored
:rtype: SSOSData
"""
# we observe ~ a week either side of new moon
# but we don't know when in the dark run the discovery happened
# so be generous with the search boundaries, add extra 2 weeks
# current date just has to be the night of the triplet,
from mp_ephem import horizons
search_start_date = Time('1999-01-01', scale='utc')
search_end_date = Time(datetime.datetime.now().strftime('%Y-%m-%d'), scale='utc')
logger.info("Sending query to SSOS start_date: {} end_data: {}\n".format(search_start_date, search_end_date))
query = Query(target_name,
search_start_date=search_start_date,
search_end_date=search_end_date)
logger.debug("Parsing query results...")
tracks_data = self.ssos_parser.parse(query.get())
tracks_data.mpc_observations = {}
start_time = Time(search_start_date)
stop_time = Time(search_end_date)
step_size = 5 * units.hour
self.orbit = horizons.Body(target_name, start_time, stop_time, step_size)
ref_sky_coord = None
for source in tracks_data.get_sources():
astrom_observations = tracks_data.observations
source_readings = source.get_readings()
for idx in range(len(source_readings)):
source_reading = source_readings[idx]
assert isinstance(source_reading, SourceReading)
if ref_sky_coord is None or source_reading.sky_coord.separation(ref_sky_coord) > 40 * units.arcsec:
ref_sky_coord = source_reading.sky_coord
source_reading.reference_sky_coord = ref_sky_coord
astrom_observation = astrom_observations[idx]
self.orbit.predict(Time(astrom_observation.mjd, format='mjd', scale='utc'))
source_reading.pa = self.orbit.pa
# why are these being recorded just in pixels? Because the error ellipse is drawn in pixels.
# TODO: Modify error ellipse drawing routine to use WCS but be sure
# that this does not cause trouble with the use of dra/ddec for cutout computer
source_reading.dx = self.orbit.dra
source_reading.dy = self.orbit.ddec
logger.debug("Sending back set of observations that might contain the target: {}".format(tracks_data))
return tracks_data | [
"def",
"query_ssos",
"(",
"self",
",",
"target_name",
",",
"lunation_count",
"=",
"None",
")",
":",
"# we observe ~ a week either side of new moon",
"# but we don't know when in the dark run the discovery happened",
"# so be generous with the search boundaries, add extra 2 weeks",
"# current date just has to be the night of the triplet,",
"from",
"mp_ephem",
"import",
"horizons",
"search_start_date",
"=",
"Time",
"(",
"'1999-01-01'",
",",
"scale",
"=",
"'utc'",
")",
"search_end_date",
"=",
"Time",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
",",
"scale",
"=",
"'utc'",
")",
"logger",
".",
"info",
"(",
"\"Sending query to SSOS start_date: {} end_data: {}\\n\"",
".",
"format",
"(",
"search_start_date",
",",
"search_end_date",
")",
")",
"query",
"=",
"Query",
"(",
"target_name",
",",
"search_start_date",
"=",
"search_start_date",
",",
"search_end_date",
"=",
"search_end_date",
")",
"logger",
".",
"debug",
"(",
"\"Parsing query results...\"",
")",
"tracks_data",
"=",
"self",
".",
"ssos_parser",
".",
"parse",
"(",
"query",
".",
"get",
"(",
")",
")",
"tracks_data",
".",
"mpc_observations",
"=",
"{",
"}",
"start_time",
"=",
"Time",
"(",
"search_start_date",
")",
"stop_time",
"=",
"Time",
"(",
"search_end_date",
")",
"step_size",
"=",
"5",
"*",
"units",
".",
"hour",
"self",
".",
"orbit",
"=",
"horizons",
".",
"Body",
"(",
"target_name",
",",
"start_time",
",",
"stop_time",
",",
"step_size",
")",
"ref_sky_coord",
"=",
"None",
"for",
"source",
"in",
"tracks_data",
".",
"get_sources",
"(",
")",
":",
"astrom_observations",
"=",
"tracks_data",
".",
"observations",
"source_readings",
"=",
"source",
".",
"get_readings",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"source_readings",
")",
")",
":",
"source_reading",
"=",
"source_readings",
"[",
"idx",
"]",
"assert",
"isinstance",
"(",
"source_reading",
",",
"SourceReading",
")",
"if",
"ref_sky_coord",
"is",
"None",
"or",
"source_reading",
".",
"sky_coord",
".",
"separation",
"(",
"ref_sky_coord",
")",
">",
"40",
"*",
"units",
".",
"arcsec",
":",
"ref_sky_coord",
"=",
"source_reading",
".",
"sky_coord",
"source_reading",
".",
"reference_sky_coord",
"=",
"ref_sky_coord",
"astrom_observation",
"=",
"astrom_observations",
"[",
"idx",
"]",
"self",
".",
"orbit",
".",
"predict",
"(",
"Time",
"(",
"astrom_observation",
".",
"mjd",
",",
"format",
"=",
"'mjd'",
",",
"scale",
"=",
"'utc'",
")",
")",
"source_reading",
".",
"pa",
"=",
"self",
".",
"orbit",
".",
"pa",
"# why are these being recorded just in pixels? Because the error ellipse is drawn in pixels.",
"# TODO: Modify error ellipse drawing routine to use WCS but be sure",
"# that this does not cause trouble with the use of dra/ddec for cutout computer",
"source_reading",
".",
"dx",
"=",
"self",
".",
"orbit",
".",
"dra",
"source_reading",
".",
"dy",
"=",
"self",
".",
"orbit",
".",
"ddec",
"logger",
".",
"debug",
"(",
"\"Sending back set of observations that might contain the target: {}\"",
".",
"format",
"(",
"tracks_data",
")",
")",
"return",
"tracks_data"
] | 52.392157 | [
0.01818181818181818,
0.02830188679245283,
0,
0.04411764705882353,
0.07894736842105263,
0.125,
0.18181818181818182,
0,
0.03773584905660377,
0.028169014084507043,
0.02857142857142857,
0.031746031746031744,
0.05405405405405406,
0.03389830508474576,
0.033707865168539325,
0.02564102564102564,
0.08823529411764706,
0.06896551724137931,
0.09259259259259259,
0,
0.041666666666666664,
0.03508771929824561,
0,
0.04878048780487805,
0,
0.045454545454545456,
0.04878048780487805,
0.058823529411764705,
0.037037037037037035,
0,
0.07142857142857142,
0,
0.041666666666666664,
0.034482758620689655,
0.0392156862745098,
0.0392156862745098,
0.03773584905660377,
0.03125,
0.02608695652173913,
0.03333333333333333,
0.030303030303030304,
0.03278688524590164,
0.03296703296703297,
0.04081632653061224,
0.027522935779816515,
0.03614457831325301,
0.031578947368421054,
0.04,
0.0392156862745098,
0.02727272727272727,
0.07692307692307693
] |
def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until)) | [
"def",
"loadItems",
"(",
"self",
",",
"excludeRead",
"=",
"False",
",",
"loadLimit",
"=",
"20",
",",
"since",
"=",
"None",
",",
"until",
"=",
"None",
")",
":",
"self",
".",
"clearItems",
"(",
")",
"self",
".",
"loadtLoadOk",
"=",
"False",
"self",
".",
"lastLoadLength",
"=",
"0",
"self",
".",
"_itemsLoadedDone",
"(",
"self",
".",
"_getContent",
"(",
"excludeRead",
",",
"None",
",",
"loadLimit",
",",
"since",
",",
"until",
")",
")"
] | 44.125 | [
0.012987012987012988,
0.18181818181818182,
0.027777777777777776,
0.18181818181818182,
0.08,
0.08571428571428572,
0.06451612903225806,
0.03296703296703297
] |
def _build_request(request):
"""Build message to transfer over the socket from a request."""
msg = bytes([request['cmd']])
if 'dest' in request:
msg += bytes([request['dest']])
else:
msg += b'\0'
if 'sha' in request:
msg += request['sha']
else:
for dummy in range(64):
msg += b'0'
logging.debug("Request (%d): %s", len(msg), msg)
return msg | [
"def",
"_build_request",
"(",
"request",
")",
":",
"msg",
"=",
"bytes",
"(",
"[",
"request",
"[",
"'cmd'",
"]",
"]",
")",
"if",
"'dest'",
"in",
"request",
":",
"msg",
"+=",
"bytes",
"(",
"[",
"request",
"[",
"'dest'",
"]",
"]",
")",
"else",
":",
"msg",
"+=",
"b'\\0'",
"if",
"'sha'",
"in",
"request",
":",
"msg",
"+=",
"request",
"[",
"'sha'",
"]",
"else",
":",
"for",
"dummy",
"in",
"range",
"(",
"64",
")",
":",
"msg",
"+=",
"b'0'",
"logging",
".",
"debug",
"(",
"\"Request (%d): %s\"",
",",
"len",
"(",
"msg",
")",
",",
"msg",
")",
"return",
"msg"
] | 28.785714 | [
0.03571428571428571,
0.029850746268656716,
0.06060606060606061,
0.08,
0.05128205128205128,
0.2222222222222222,
0.1,
0.08333333333333333,
0.06896551724137931,
0.2222222222222222,
0.06451612903225806,
0.08695652173913043,
0.038461538461538464,
0.14285714285714285
] |
def build_template(self, template, template_file, package):
"""
Compile the cheetah template in src into a python file in build
"""
try:
from Cheetah.Compiler import Compiler
except ImportError:
self.announce("unable to import Cheetah.Compiler, build failed")
raise
else:
comp = Compiler(file=template_file, moduleName=template)
# load configuration if it exists
conf_fn = DEFAULT_CONFIG
if exists(conf_fn):
with open(conf_fn, "rt") as config:
comp.updateSettingsFromConfigFileObj(config)
# and just why can't I configure these?
comp.setShBang("")
comp.addModuleHeader("pylint: disable=C,W,R,F")
outfd = join(self.build_lib, *package.split("."))
outfn = join(outfd, template + ".py")
if not exists(outfd):
makedirs(outfd)
if newer(template_file, outfn):
self.announce("compiling %s -> %s" % (template_file, outfd), 2)
with open(outfn, "w") as output:
output.write(str(comp)) | [
"def",
"build_template",
"(",
"self",
",",
"template",
",",
"template_file",
",",
"package",
")",
":",
"try",
":",
"from",
"Cheetah",
".",
"Compiler",
"import",
"Compiler",
"except",
"ImportError",
":",
"self",
".",
"announce",
"(",
"\"unable to import Cheetah.Compiler, build failed\"",
")",
"raise",
"else",
":",
"comp",
"=",
"Compiler",
"(",
"file",
"=",
"template_file",
",",
"moduleName",
"=",
"template",
")",
"# load configuration if it exists",
"conf_fn",
"=",
"DEFAULT_CONFIG",
"if",
"exists",
"(",
"conf_fn",
")",
":",
"with",
"open",
"(",
"conf_fn",
",",
"\"rt\"",
")",
"as",
"config",
":",
"comp",
".",
"updateSettingsFromConfigFileObj",
"(",
"config",
")",
"# and just why can't I configure these?",
"comp",
".",
"setShBang",
"(",
"\"\"",
")",
"comp",
".",
"addModuleHeader",
"(",
"\"pylint: disable=C,W,R,F\"",
")",
"outfd",
"=",
"join",
"(",
"self",
".",
"build_lib",
",",
"*",
"package",
".",
"split",
"(",
"\".\"",
")",
")",
"outfn",
"=",
"join",
"(",
"outfd",
",",
"template",
"+",
"\".py\"",
")",
"if",
"not",
"exists",
"(",
"outfd",
")",
":",
"makedirs",
"(",
"outfd",
")",
"if",
"newer",
"(",
"template_file",
",",
"outfn",
")",
":",
"self",
".",
"announce",
"(",
"\"compiling %s -> %s\"",
"%",
"(",
"template_file",
",",
"outfd",
")",
",",
"2",
")",
"with",
"open",
"(",
"outfn",
",",
"\"w\"",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"str",
"(",
"comp",
")",
")"
] | 33.454545 | [
0.01694915254237288,
0.18181818181818182,
0.028169014084507043,
0.18181818181818182,
0,
0.16666666666666666,
0.04081632653061224,
0.07407407407407407,
0.02631578947368421,
0.11764705882352941,
0.15384615384615385,
0.029411764705882353,
0,
0.04878048780487805,
0.0625,
0.07407407407407407,
0.0425531914893617,
0.03333333333333333,
0,
0.0425531914893617,
0.07692307692307693,
0.03636363636363636,
0,
0.03508771929824561,
0.044444444444444446,
0,
0.06896551724137931,
0.07407407407407407,
0,
0.05128205128205128,
0.02666666666666667,
0.045454545454545456,
0.05128205128205128
] |