text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
sequence |
---|---|---|---|
def tag_create(self, label, instances=None, domains=None, nodebalancers=None,
volumes=None, entities=[]):
"""
Creates a new Tag and optionally applies it to the given entities.
:param label: The label for the new Tag
:type label: str
:param entities: A list of objects to apply this Tag to upon creation.
May only be taggable types (Linode Instances, Domains,
NodeBalancers, or Volumes). These are applied *in addition
to* any IDs specified with ``instances``, ``domains``,
``nodebalancers``, or ``volumes``, and is a convenience
for sending multiple entity types without sorting them
yourself.
:type entities: list of Instance, Domain, NodeBalancer, and/or Volume
:param instances: A list of Linode Instances to apply this Tag to upon
creation
:type instances: list of Instance or list of int
:param domains: A list of Domains to apply this Tag to upon
creation
:type domains: list of Domain or list of int
:param nodebalancers: A list of NodeBalancers to apply this Tag to upon
creation
:type nodebalancers: list of NodeBalancer or list of int
:param volumes: A list of Volumes to apply this Tag to upon
creation
:type volumes: list of Volumes or list of int
:returns: The new Tag
:rtype: Tag
"""
linode_ids, nodebalancer_ids, domain_ids, volume_ids = [], [], [], []
# filter input into lists of ids
sorter = zip((linode_ids, nodebalancer_ids, domain_ids, volume_ids),
(instances, nodebalancers, domains, volumes))
for id_list, input_list in sorter:
# if we got something, we need to find its ID
if input_list is not None:
for cur in input_list:
if isinstance(cur, int):
id_list.append(cur)
else:
id_list.append(cur.id)
# filter entities into id lists too
type_map = {
Instance: linode_ids,
NodeBalancer: nodebalancer_ids,
Domain: domain_ids,
Volume: volume_ids,
}
for e in entities:
if type(e) in type_map:
type_map[type(e)].append(e.id)
else:
raise ValueError('Unsupported entity type {}'.format(type(e)))
# finally, omit all id lists that are empty
params = {
'label': label,
'linodes': linode_ids or None,
'nodebalancers': nodebalancer_ids or None,
'domains': domain_ids or None,
'volumes': volume_ids or None,
}
result = self.post('/tags', data=params)
if not 'label' in result:
raise UnexpectedResponseError('Unexpected response when creating Tag!', json=result)
t = Tag(self, result['label'], result)
return t | [
"def",
"tag_create",
"(",
"self",
",",
"label",
",",
"instances",
"=",
"None",
",",
"domains",
"=",
"None",
",",
"nodebalancers",
"=",
"None",
",",
"volumes",
"=",
"None",
",",
"entities",
"=",
"[",
"]",
")",
":",
"linode_ids",
",",
"nodebalancer_ids",
",",
"domain_ids",
",",
"volume_ids",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"# filter input into lists of ids",
"sorter",
"=",
"zip",
"(",
"(",
"linode_ids",
",",
"nodebalancer_ids",
",",
"domain_ids",
",",
"volume_ids",
")",
",",
"(",
"instances",
",",
"nodebalancers",
",",
"domains",
",",
"volumes",
")",
")",
"for",
"id_list",
",",
"input_list",
"in",
"sorter",
":",
"# if we got something, we need to find its ID",
"if",
"input_list",
"is",
"not",
"None",
":",
"for",
"cur",
"in",
"input_list",
":",
"if",
"isinstance",
"(",
"cur",
",",
"int",
")",
":",
"id_list",
".",
"append",
"(",
"cur",
")",
"else",
":",
"id_list",
".",
"append",
"(",
"cur",
".",
"id",
")",
"# filter entities into id lists too",
"type_map",
"=",
"{",
"Instance",
":",
"linode_ids",
",",
"NodeBalancer",
":",
"nodebalancer_ids",
",",
"Domain",
":",
"domain_ids",
",",
"Volume",
":",
"volume_ids",
",",
"}",
"for",
"e",
"in",
"entities",
":",
"if",
"type",
"(",
"e",
")",
"in",
"type_map",
":",
"type_map",
"[",
"type",
"(",
"e",
")",
"]",
".",
"append",
"(",
"e",
".",
"id",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported entity type {}'",
".",
"format",
"(",
"type",
"(",
"e",
")",
")",
")",
"# finally, omit all id lists that are empty",
"params",
"=",
"{",
"'label'",
":",
"label",
",",
"'linodes'",
":",
"linode_ids",
"or",
"None",
",",
"'nodebalancers'",
":",
"nodebalancer_ids",
"or",
"None",
",",
"'domains'",
":",
"domain_ids",
"or",
"None",
",",
"'volumes'",
":",
"volume_ids",
"or",
"None",
",",
"}",
"result",
"=",
"self",
".",
"post",
"(",
"'/tags'",
",",
"data",
"=",
"params",
")",
"if",
"not",
"'label'",
"in",
"result",
":",
"raise",
"UnexpectedResponseError",
"(",
"'Unexpected response when creating Tag!'",
",",
"json",
"=",
"result",
")",
"t",
"=",
"Tag",
"(",
"self",
",",
"result",
"[",
"'label'",
"]",
",",
"result",
")",
"return",
"t"
] | 40.789474 | [
0.025974025974025976,
0.13043478260869565,
0.18181818181818182,
0.02702702702702703,
0,
0.06382978723404255,
0.125,
0.038461538461538464,
0.06329113924050633,
0.07142857142857142,
0.06329113924050633,
0.0625,
0.0379746835443038,
0.08823529411764706,
0.06493506493506493,
0.038461538461538464,
0.0625,
0.05357142857142857,
0.04477611940298507,
0.0625,
0.057692307692307696,
0.0379746835443038,
0.0625,
0.046875,
0.04477611940298507,
0.0625,
0.05660377358490566,
0,
0.10344827586206896,
0.15789473684210525,
0.18181818181818182,
0.025974025974025976,
0,
0.05,
0.039473684210526314,
0.06060606060606061,
0,
0.047619047619047616,
0.03508771929824561,
0.05263157894736842,
0.05263157894736842,
0.045454545454545456,
0.046511627906976744,
0.08,
0.043478260869565216,
0,
0.046511627906976744,
0.15,
0.06060606060606061,
0.046511627906976744,
0.06451612903225806,
0.06451612903225806,
0.3333333333333333,
0,
0.07692307692307693,
0.05714285714285714,
0.043478260869565216,
0.11764705882352941,
0.02564102564102564,
0,
0.0392156862745098,
0.16666666666666666,
0.07407407407407407,
0.047619047619047616,
0.037037037037037035,
0.047619047619047616,
0.047619047619047616,
0.3333333333333333,
0,
0.041666666666666664,
0,
0.09090909090909091,
0.03125,
0,
0.043478260869565216,
0.125
] |
def is_valid_rgb_color(value):
"""Checks whether the value is a valid rgb or rgba color string.
Valid colors consist of:
- rgb(255, 255, 255)
- rgba(23, 34, 45, .5)
"""
if not value:
return False
regex = re.compile(RGB_COLOR_REGEX)
return bool(regex.match(value)) | [
"def",
"is_valid_rgb_color",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"False",
"regex",
"=",
"re",
".",
"compile",
"(",
"RGB_COLOR_REGEX",
")",
"return",
"bool",
"(",
"regex",
".",
"match",
"(",
"value",
")",
")"
] | 22.615385 | [
0.03333333333333333,
0.029411764705882353,
0,
0.07142857142857142,
0,
0.08333333333333333,
0.07692307692307693,
0.2857142857142857,
0.11764705882352941,
0.1,
0,
0.05128205128205128,
0.05714285714285714
] |
def choose_labels(alternatives):
"""
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
"""
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
user_choice = click.prompt(
prompt, type=click.Choice(choice_indexes), default=input_terminator
)
done = user_choice == input_terminator
new_selection = user_choice not in user_choices
nothing_selected = not user_choices
if not done and new_selection:
user_choices.add(choice_map[user_choice])
if done and nothing_selected:
error('Please select at least one label')
user_choice = None
return user_choices | [
"def",
"choose_labels",
"(",
"alternatives",
")",
":",
"if",
"not",
"alternatives",
":",
"raise",
"ValueError",
"if",
"not",
"isinstance",
"(",
"alternatives",
",",
"list",
")",
":",
"raise",
"TypeError",
"choice_map",
"=",
"OrderedDict",
"(",
"(",
"'{}'",
".",
"format",
"(",
"i",
")",
",",
"value",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"alternatives",
",",
"1",
")",
")",
"# prepend a termination option",
"input_terminator",
"=",
"'0'",
"choice_map",
".",
"update",
"(",
"{",
"input_terminator",
":",
"'<done>'",
"}",
")",
"choice_map",
".",
"move_to_end",
"(",
"'0'",
",",
"last",
"=",
"False",
")",
"choice_indexes",
"=",
"choice_map",
".",
"keys",
"(",
")",
"choice_lines",
"=",
"[",
"'{} - {}'",
".",
"format",
"(",
"*",
"c",
")",
"for",
"c",
"in",
"choice_map",
".",
"items",
"(",
")",
"]",
"prompt",
"=",
"'\\n'",
".",
"join",
"(",
"(",
"'Select labels:'",
",",
"'\\n'",
".",
"join",
"(",
"choice_lines",
")",
",",
"'Choose from {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"choice_indexes",
")",
")",
",",
")",
")",
"user_choices",
"=",
"set",
"(",
")",
"user_choice",
"=",
"None",
"while",
"not",
"user_choice",
"==",
"input_terminator",
":",
"if",
"user_choices",
":",
"note",
"(",
"'Selected labels: [{}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"user_choices",
")",
")",
")",
"user_choice",
"=",
"click",
".",
"prompt",
"(",
"prompt",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"choice_indexes",
")",
",",
"default",
"=",
"input_terminator",
")",
"done",
"=",
"user_choice",
"==",
"input_terminator",
"new_selection",
"=",
"user_choice",
"not",
"in",
"user_choices",
"nothing_selected",
"=",
"not",
"user_choices",
"if",
"not",
"done",
"and",
"new_selection",
":",
"user_choices",
".",
"add",
"(",
"choice_map",
"[",
"user_choice",
"]",
")",
"if",
"done",
"and",
"nothing_selected",
":",
"error",
"(",
"'Please select at least one label'",
")",
"user_choice",
"=",
"None",
"return",
"user_choices"
] | 29 | [
0.03125,
0.2857142857142857,
0.0273972602739726,
0,
0.05,
0,
0.04819277108433735,
0.08333333333333333,
0.2857142857142857,
0.08333333333333333,
0.08333333333333333,
0,
0.047619047619047616,
0.08695652173913043,
0,
0.10344827586206896,
0.02702702702702703,
0.6,
0.058823529411764705,
0.07692307692307693,
0.0392156862745098,
0.046511627906976744,
0,
0.05263157894736842,
0,
0.028985507246376812,
0.13043478260869565,
0.3333333333333333,
0.06896551724137931,
0.05555555555555555,
0.031746031746031744,
0.3333333333333333,
0.6,
0,
0.08333333333333333,
0.09090909090909091,
0,
0.043478260869565216,
0.08333333333333333,
0.0273972602739726,
0,
0.08571428571428572,
0.05063291139240506,
0.3333333333333333,
0.043478260869565216,
0.03636363636363636,
0.046511627906976744,
0,
0.05263157894736842,
0.03773584905660377,
0,
0.05405405405405406,
0.03773584905660377,
0.06666666666666667,
0,
0.08695652173913043
] |
def get_price_target(self, **kwargs):
"""Price Target
Provides the latest avg, high, and low analyst price target for a
symbol.
Reference: https://iexcloud.io/docs/api/#price-target
Data Weighting: ``500`` per symbol
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
Returns
-------
dict or pandas.DataFrame
Latest average, high, and low price targets for a symbol
"""
def fmt_p(out):
if len(self.symbols) == 1:
return pd.DataFrame(out, index=self.symbols[0])
return pd.DataFrame(out)
return self._get_endpoint('price-target', params=kwargs) | [
"def",
"get_price_target",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"fmt_p",
"(",
"out",
")",
":",
"if",
"len",
"(",
"self",
".",
"symbols",
")",
"==",
"1",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"out",
",",
"index",
"=",
"self",
".",
"symbols",
"[",
"0",
"]",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"out",
")",
"return",
"self",
".",
"_get_endpoint",
"(",
"'price-target'",
",",
"params",
"=",
"kwargs",
")"
] | 31.958333 | [
0.02631578947368421,
0.041666666666666664,
1,
0.013513513513513514,
0.0625,
1,
0.06451612903225806,
1,
0.046511627906976744,
1,
0.02702702702702703,
0.08928571428571429,
1,
0.0625,
0.0625,
0.030303030303030304,
0.014492753623188406,
0.08333333333333333,
0.08333333333333333,
0.07692307692307693,
0.015625,
0.02702702702702703,
1,
0.03125
] |
def set_docstring(self, loc, tokens):
"""Set the docstring."""
internal_assert(len(tokens) == 2, "invalid docstring tokens", tokens)
self.docstring = self.reformat(tokens[0]) + "\n\n"
return tokens[1] | [
"def",
"set_docstring",
"(",
"self",
",",
"loc",
",",
"tokens",
")",
":",
"internal_assert",
"(",
"len",
"(",
"tokens",
")",
"==",
"2",
",",
"\"invalid docstring tokens\"",
",",
"tokens",
")",
"self",
".",
"docstring",
"=",
"self",
".",
"reformat",
"(",
"tokens",
"[",
"0",
"]",
")",
"+",
"\"\\n\\n\"",
"return",
"tokens",
"[",
"1",
"]"
] | 45.6 | [
0.02702702702702703,
0.0625,
0.025974025974025976,
0.034482758620689655,
0.08333333333333333
] |
def __make_request(self, request, opt, cache_key=None, method=None):
""" Check cache, deal with expiration and etag, make the request and
return the response or cached response.
:param request: the pyswagger.io.Request object to prepare the request
:param opt: options, see pyswagger/blob/master/pyswagger/io.py#L144
:param cache_key: the cache key used for the cache stuff.
:param method: [default:None] allows to force the method, especially
useful if you want to make a HEAD request.
Default value will use endpoint method
"""
# check expiration and etags
opt_headers = {}
cached_response = self.cache.get(cache_key, None)
if cached_response is not None:
# if we have expires cached, and still validd
expires = cached_response.headers.get('expires', None)
if expires is not None:
cache_timeout = get_cache_time_left(
cached_response.headers['expires']
)
if cache_timeout >= 0:
return cached_response
# if we have etags, add the header to use them
etag = cached_response.headers.get('etag', None)
if etag is not None:
opt_headers['If-None-Match'] = etag
# if nothing makes us use the cache, invalidate everything
if (expires is None or cache_timeout < 0) and etag is None:
self.cache.invalidate(cache_key)
# apply request-related options before preparation.
request.prepare(
scheme=self.prepare_schemes(request).pop(),
handle_files=False
)
request._patch(opt)
# prepare the request and make it.
method = method or request.method.upper()
request.header.update(opt_headers)
prepared_request = self._session.prepare_request(
Request(
method=method,
url=request.url,
params=request.query,
data=request.data,
headers=request.header
)
)
start_api_call = time.time()
try:
res = self._session.send(
prepared_request,
timeout=self.timeout,
)
except (RequestsConnectionError, Timeout) as exc:
# timeout issue, generate a fake response to finish the process
# as a normal error 500
res = CachedResponse(
status_code=500,
headers={},
content=('{"error": "%s"}' % str(exc)).encode('latin-1'),
url=prepared_request.url
)
# event for api call stats
self.signal_api_call_stats.send(
url=res.url,
status_code=res.status_code,
elapsed_time=time.time() - start_api_call,
message=res.content if res.status_code != 200 else None
)
# if we have HTTP 304 (content didn't change), return the cached
# response updated with the new headers
if res.status_code == 304 and cached_response is not None:
cached_response.headers['Expires'] = res.headers.get('Expires')
cached_response.headers['Date'] = res.headers.get('Date')
return cached_response
return res | [
"def",
"__make_request",
"(",
"self",
",",
"request",
",",
"opt",
",",
"cache_key",
"=",
"None",
",",
"method",
"=",
"None",
")",
":",
"# check expiration and etags\r",
"opt_headers",
"=",
"{",
"}",
"cached_response",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"cache_key",
",",
"None",
")",
"if",
"cached_response",
"is",
"not",
"None",
":",
"# if we have expires cached, and still validd\r",
"expires",
"=",
"cached_response",
".",
"headers",
".",
"get",
"(",
"'expires'",
",",
"None",
")",
"if",
"expires",
"is",
"not",
"None",
":",
"cache_timeout",
"=",
"get_cache_time_left",
"(",
"cached_response",
".",
"headers",
"[",
"'expires'",
"]",
")",
"if",
"cache_timeout",
">=",
"0",
":",
"return",
"cached_response",
"# if we have etags, add the header to use them\r",
"etag",
"=",
"cached_response",
".",
"headers",
".",
"get",
"(",
"'etag'",
",",
"None",
")",
"if",
"etag",
"is",
"not",
"None",
":",
"opt_headers",
"[",
"'If-None-Match'",
"]",
"=",
"etag",
"# if nothing makes us use the cache, invalidate everything\r",
"if",
"(",
"expires",
"is",
"None",
"or",
"cache_timeout",
"<",
"0",
")",
"and",
"etag",
"is",
"None",
":",
"self",
".",
"cache",
".",
"invalidate",
"(",
"cache_key",
")",
"# apply request-related options before preparation.\r",
"request",
".",
"prepare",
"(",
"scheme",
"=",
"self",
".",
"prepare_schemes",
"(",
"request",
")",
".",
"pop",
"(",
")",
",",
"handle_files",
"=",
"False",
")",
"request",
".",
"_patch",
"(",
"opt",
")",
"# prepare the request and make it.\r",
"method",
"=",
"method",
"or",
"request",
".",
"method",
".",
"upper",
"(",
")",
"request",
".",
"header",
".",
"update",
"(",
"opt_headers",
")",
"prepared_request",
"=",
"self",
".",
"_session",
".",
"prepare_request",
"(",
"Request",
"(",
"method",
"=",
"method",
",",
"url",
"=",
"request",
".",
"url",
",",
"params",
"=",
"request",
".",
"query",
",",
"data",
"=",
"request",
".",
"data",
",",
"headers",
"=",
"request",
".",
"header",
")",
")",
"start_api_call",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"res",
"=",
"self",
".",
"_session",
".",
"send",
"(",
"prepared_request",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
")",
"except",
"(",
"RequestsConnectionError",
",",
"Timeout",
")",
"as",
"exc",
":",
"# timeout issue, generate a fake response to finish the process\r",
"# as a normal error 500\r",
"res",
"=",
"CachedResponse",
"(",
"status_code",
"=",
"500",
",",
"headers",
"=",
"{",
"}",
",",
"content",
"=",
"(",
"'{\"error\": \"%s\"}'",
"%",
"str",
"(",
"exc",
")",
")",
".",
"encode",
"(",
"'latin-1'",
")",
",",
"url",
"=",
"prepared_request",
".",
"url",
")",
"# event for api call stats\r",
"self",
".",
"signal_api_call_stats",
".",
"send",
"(",
"url",
"=",
"res",
".",
"url",
",",
"status_code",
"=",
"res",
".",
"status_code",
",",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_api_call",
",",
"message",
"=",
"res",
".",
"content",
"if",
"res",
".",
"status_code",
"!=",
"200",
"else",
"None",
")",
"# if we have HTTP 304 (content didn't change), return the cached\r",
"# response updated with the new headers\r",
"if",
"res",
".",
"status_code",
"==",
"304",
"and",
"cached_response",
"is",
"not",
"None",
":",
"cached_response",
".",
"headers",
"[",
"'Expires'",
"]",
"=",
"res",
".",
"headers",
".",
"get",
"(",
"'Expires'",
")",
"cached_response",
".",
"headers",
"[",
"'Date'",
"]",
"=",
"res",
".",
"headers",
".",
"get",
"(",
"'Date'",
")",
"return",
"cached_response",
"return",
"res"
] | 39.546512 | [
0.014492753623188406,
0.012987012987012988,
0.020833333333333332,
1,
0.02531645569620253,
0.05263157894736842,
0.030303030303030304,
0.025974025974025976,
0.01818181818181818,
0.0196078431372549,
1,
0.08333333333333333,
0.02702702702702703,
0.04,
0.017241379310344827,
0.075,
0.017241379310344827,
0.014925373134328358,
0.08333333333333333,
0.03773584905660377,
0.01818181818181818,
0.1111111111111111,
0.07692307692307693,
0.023255813953488372,
1,
0.01694915254237288,
0.01639344262295082,
0.09090909090909091,
0.019230769230769232,
1,
0.014084507042253521,
0.041666666666666664,
0.02040816326530612,
1,
0.016666666666666666,
0.08,
0.05357142857142857,
0.06451612903225806,
0.2,
0.03571428571428571,
1,
0.023255813953488372,
0.02,
0.023255813953488372,
0.034482758620689655,
0.09523809523809523,
0.0967741935483871,
0.09090909090909091,
0.07894736842105263,
0.08571428571428572,
0.05128205128205128,
0.14285714285714285,
0.2,
0.02702702702702703,
1,
0.23076923076923078,
0.05263157894736842,
0.058823529411764705,
0.07894736842105263,
0.14285714285714285,
1,
0.05172413793103448,
0.013157894736842105,
0.027777777777777776,
0.058823529411764705,
0.09090909090909091,
0.10714285714285714,
0.04054054054054054,
0.04878048780487805,
0.14285714285714285,
1,
0.02857142857142857,
0.04878048780487805,
0.12,
0.07317073170731707,
0.05454545454545454,
0.029411764705882353,
0.2,
1,
0.0136986301369863,
0.020833333333333332,
0.04477611940298507,
0.013157894736842105,
0.014285714285714285,
0.02857142857142857,
0.1111111111111111
] |
def get_referenced_object(referring_object, fieldname):
"""
Get an object referred to by a field in another object.
For example an object of type Construction has fields for each layer, each
of which refers to a Material. This functions allows the object
representing a Material to be fetched using the name of the layer.
Returns the first item found since if there is more than one matching item,
it is a malformed IDF.
Parameters
----------
referring_object : EpBunch
The object which contains a reference to another object,
fieldname : str
The name of the field in the referring object which contains the
reference to another object.
Returns
-------
EpBunch
"""
idf = referring_object.theidf
object_list = referring_object.getfieldidd_item(fieldname, u'object-list')
for obj_type in idf.idfobjects:
for obj in idf.idfobjects[obj_type]:
valid_object_lists = obj.getfieldidd_item("Name", u'reference')
if set(object_list).intersection(set(valid_object_lists)):
referenced_obj_name = referring_object[fieldname]
if obj.Name == referenced_obj_name:
return obj | [
"def",
"get_referenced_object",
"(",
"referring_object",
",",
"fieldname",
")",
":",
"idf",
"=",
"referring_object",
".",
"theidf",
"object_list",
"=",
"referring_object",
".",
"getfieldidd_item",
"(",
"fieldname",
",",
"u'object-list'",
")",
"for",
"obj_type",
"in",
"idf",
".",
"idfobjects",
":",
"for",
"obj",
"in",
"idf",
".",
"idfobjects",
"[",
"obj_type",
"]",
":",
"valid_object_lists",
"=",
"obj",
".",
"getfieldidd_item",
"(",
"\"Name\"",
",",
"u'reference'",
")",
"if",
"set",
"(",
"object_list",
")",
".",
"intersection",
"(",
"set",
"(",
"valid_object_lists",
")",
")",
":",
"referenced_obj_name",
"=",
"referring_object",
"[",
"fieldname",
"]",
"if",
"obj",
".",
"Name",
"==",
"referenced_obj_name",
":",
"return",
"obj"
] | 36.69697 | [
0.01818181818181818,
0.2857142857142857,
0.03389830508474576,
0,
0.02564102564102564,
0.029850746268656716,
0.02857142857142857,
0,
0.02531645569620253,
0.07692307692307693,
0,
0.14285714285714285,
0.14285714285714285,
0.1,
0.03125,
0.15789473684210525,
0.027777777777777776,
0.05555555555555555,
0,
0.18181818181818182,
0.18181818181818182,
0.18181818181818182,
0,
0.2857142857142857,
0.06060606060606061,
0.02564102564102564,
0.05714285714285714,
0.045454545454545456,
0.02666666666666667,
0.02857142857142857,
0.03076923076923077,
0.0392156862745098,
0.06666666666666667
] |
def get_player_verify(self, tag: crtag, apikey: str, timeout=None):
"""Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Raises BadRequest if the apikey is invalid
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/verifytoken'
return self._get_model(url, FullPlayer, timeout=timeout, method='POST', json={'token': apikey}) | [
"def",
"get_player_verify",
"(",
"self",
",",
"tag",
":",
"crtag",
",",
"apikey",
":",
"str",
",",
"timeout",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"api",
".",
"PLAYER",
"+",
"'/'",
"+",
"tag",
"+",
"'/verifytoken'",
"return",
"self",
".",
"_get_model",
"(",
"url",
",",
"FullPlayer",
",",
"timeout",
"=",
"timeout",
",",
"method",
"=",
"'POST'",
",",
"json",
"=",
"{",
"'token'",
":",
"apikey",
"}",
")"
] | 38.315789 | [
0.014925373134328358,
0.04878048780487805,
0.08333333333333333,
0.05,
0,
0.04,
0,
0.1111111111111111,
0.1111111111111111,
0.125,
0.03773584905660377,
0.045454545454545456,
0.10526315789473684,
0.041666666666666664,
0.05405405405405406,
0.03508771929824561,
0.18181818181818182,
0.034482758620689655,
0.02912621359223301
] |
def cls_register(cls, frameset, new_class, init_args, name=None):
""" Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
"""
name = name or getattr(frameset, 'name', None)
if name is None:
raise ValueError(
'`name` is needed when the `frameset` has no name attribute.'
)
kwargs = {'name': name}
for initarg in init_args:
kwargs[initarg] = getattr(frameset, initarg, None)
newframeset = new_class(frameset, **kwargs)
# Mark this FrameSet/BarSet as a registered item (not basic/original).
newframeset._registered = True
setattr(cls, name, newframeset)
return newframeset | [
"def",
"cls_register",
"(",
"cls",
",",
"frameset",
",",
"new_class",
",",
"init_args",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"getattr",
"(",
"frameset",
",",
"'name'",
",",
"None",
")",
"if",
"name",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'`name` is needed when the `frameset` has no name attribute.'",
")",
"kwargs",
"=",
"{",
"'name'",
":",
"name",
"}",
"for",
"initarg",
"in",
"init_args",
":",
"kwargs",
"[",
"initarg",
"]",
"=",
"getattr",
"(",
"frameset",
",",
"initarg",
",",
"None",
")",
"newframeset",
"=",
"new_class",
"(",
"frameset",
",",
"*",
"*",
"kwargs",
")",
"# Mark this FrameSet/BarSet as a registered item (not basic/original).",
"newframeset",
".",
"_registered",
"=",
"True",
"setattr",
"(",
"cls",
",",
"name",
",",
"newframeset",
")",
"return",
"newframeset"
] | 44.655172 | [
0.015384615384615385,
0.02702702702702703,
0.10526315789473684,
0.037037037037037035,
0.1111111111111111,
0.041666666666666664,
0.05128205128205128,
0.034482758620689655,
0.05660377358490566,
0.04477611940298507,
0.0425531914893617,
0.04054054054054054,
0.028169014084507043,
0.05714285714285714,
0.2857142857142857,
0.04,
0.1,
0.12,
0.0273972602739726,
0.3333333333333333,
0.07407407407407407,
0.06896551724137931,
0.034482758620689655,
0,
0.0425531914893617,
0.02702702702702703,
0.058823529411764705,
0.05714285714285714,
0.09090909090909091
] |
def decode_kempressed(bytestring):
"""subvol not bytestring since numpy conversion is done inside fpzip extension."""
subvol = fpzip.decompress(bytestring, order='F')
return np.swapaxes(subvol, 3,2) - 2.0 | [
"def",
"decode_kempressed",
"(",
"bytestring",
")",
":",
"subvol",
"=",
"fpzip",
".",
"decompress",
"(",
"bytestring",
",",
"order",
"=",
"'F'",
")",
"return",
"np",
".",
"swapaxes",
"(",
"subvol",
",",
"3",
",",
"2",
")",
"-",
"2.0"
] | 51.75 | [
0.029411764705882353,
0.047619047619047616,
0.06,
0.10256410256410256
] |
def fix_ipython_startup(fn):
"""
Attempt to fix IPython startup to not print (Bool_t)1
"""
BADSTR = 'TPython::Exec( "" )'
GOODSTR = 'TPython::Exec( "" );'
if sys.version_info[0] < 3:
consts = fn.im_func.func_code.co_consts
else:
consts = fn.__code__.co_consts
if BADSTR not in consts:
return
idx = consts.index(BADSTR)
orig_refcount = sys.getrefcount(consts)
del consts
PyTuple_SetItem = ctypes.pythonapi.PyTuple_SetItem
PyTuple_SetItem.argtypes = (ctypes.py_object,
ctypes.c_size_t,
ctypes.py_object)
if sys.version_info[0] < 3:
consts = ctypes.py_object(fn.im_func.func_code.co_consts)
else:
consts = ctypes.py_object(fn.im_func.__code__.co_consts)
for _ in range(orig_refcount - 2):
ctypes.pythonapi.Py_DecRef(consts)
try:
ctypes.pythonapi.Py_IncRef(GOODSTR)
PyTuple_SetItem(consts, idx, GOODSTR)
finally:
for _ in range(orig_refcount - 2):
ctypes.pythonapi.Py_IncRef(consts) | [
"def",
"fix_ipython_startup",
"(",
"fn",
")",
":",
"BADSTR",
"=",
"'TPython::Exec( \"\" )'",
"GOODSTR",
"=",
"'TPython::Exec( \"\" );'",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"consts",
"=",
"fn",
".",
"im_func",
".",
"func_code",
".",
"co_consts",
"else",
":",
"consts",
"=",
"fn",
".",
"__code__",
".",
"co_consts",
"if",
"BADSTR",
"not",
"in",
"consts",
":",
"return",
"idx",
"=",
"consts",
".",
"index",
"(",
"BADSTR",
")",
"orig_refcount",
"=",
"sys",
".",
"getrefcount",
"(",
"consts",
")",
"del",
"consts",
"PyTuple_SetItem",
"=",
"ctypes",
".",
"pythonapi",
".",
"PyTuple_SetItem",
"PyTuple_SetItem",
".",
"argtypes",
"=",
"(",
"ctypes",
".",
"py_object",
",",
"ctypes",
".",
"c_size_t",
",",
"ctypes",
".",
"py_object",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"consts",
"=",
"ctypes",
".",
"py_object",
"(",
"fn",
".",
"im_func",
".",
"func_code",
".",
"co_consts",
")",
"else",
":",
"consts",
"=",
"ctypes",
".",
"py_object",
"(",
"fn",
".",
"im_func",
".",
"__code__",
".",
"co_consts",
")",
"for",
"_",
"in",
"range",
"(",
"orig_refcount",
"-",
"2",
")",
":",
"ctypes",
".",
"pythonapi",
".",
"Py_DecRef",
"(",
"consts",
")",
"try",
":",
"ctypes",
".",
"pythonapi",
".",
"Py_IncRef",
"(",
"GOODSTR",
")",
"PyTuple_SetItem",
"(",
"consts",
",",
"idx",
",",
"GOODSTR",
")",
"finally",
":",
"for",
"_",
"in",
"range",
"(",
"orig_refcount",
"-",
"2",
")",
":",
"ctypes",
".",
"pythonapi",
".",
"Py_IncRef",
"(",
"consts",
")"
] | 31.411765 | [
0.03571428571428571,
0.2857142857142857,
0.05263157894736842,
0.2857142857142857,
0.058823529411764705,
0.05555555555555555,
0.06451612903225806,
0.0425531914893617,
0.2222222222222222,
0.05263157894736842,
0.07142857142857142,
0.14285714285714285,
0.06666666666666667,
0.046511627906976744,
0.14285714285714285,
0,
0.037037037037037035,
0.061224489795918366,
0.041666666666666664,
0.061224489795918366,
0,
0.06451612903225806,
0.03076923076923077,
0.2222222222222222,
0.03125,
0,
0.05263157894736842,
0.047619047619047616,
0.25,
0.046511627906976744,
0.044444444444444446,
0.16666666666666666,
0.047619047619047616,
0.043478260869565216
] |
def get_stack_var(name, depth=0):
'''This function may fiddle with the locals of the calling function,
to make it the root function of the fiber. If called from a short-lived
function be sure to use a bigger frame depth.
Returns the fiber state or None.'''
base_frame = _get_base_frame(depth)
if not base_frame:
# Frame not found
raise RuntimeError("Base frame not found")
# Lookup up the frame stack starting at the base frame for the fiber state
level = 0
frame = base_frame
while frame:
locals = frame.f_locals
value = locals.get(name)
if value is not None:
if level > 0:
# Copy a reference of the fiber state in the base frame
base_frame.f_locals[name] = value
return value
if locals.get(SECTION_BOUNDARY_TAG):
return None
frame = frame.f_back
level += 1
return None | [
"def",
"get_stack_var",
"(",
"name",
",",
"depth",
"=",
"0",
")",
":",
"base_frame",
"=",
"_get_base_frame",
"(",
"depth",
")",
"if",
"not",
"base_frame",
":",
"# Frame not found",
"raise",
"RuntimeError",
"(",
"\"Base frame not found\"",
")",
"# Lookup up the frame stack starting at the base frame for the fiber state",
"level",
"=",
"0",
"frame",
"=",
"base_frame",
"while",
"frame",
":",
"locals",
"=",
"frame",
".",
"f_locals",
"value",
"=",
"locals",
".",
"get",
"(",
"name",
")",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"level",
">",
"0",
":",
"# Copy a reference of the fiber state in the base frame",
"base_frame",
".",
"f_locals",
"[",
"name",
"]",
"=",
"value",
"return",
"value",
"if",
"locals",
".",
"get",
"(",
"SECTION_BOUNDARY_TAG",
")",
":",
"return",
"None",
"frame",
"=",
"frame",
".",
"f_back",
"level",
"+=",
"1",
"return",
"None"
] | 34.148148 | [
0.030303030303030304,
0.027777777777777776,
0.02666666666666667,
0.04081632653061224,
0.07692307692307693,
0,
0.05128205128205128,
0.09090909090909091,
0.08,
0.04,
0,
0.02564102564102564,
0.15384615384615385,
0.09090909090909091,
0.125,
0.06451612903225806,
0.0625,
0.06896551724137931,
0.08,
0.028169014084507043,
0.04081632653061224,
0.08333333333333333,
0.045454545454545456,
0.08695652173913043,
0.07142857142857142,
0.1111111111111111,
0.13333333333333333
] |
def cardInfo(self, resource_id):
"""Return card info.
:params resource_id: Resource id.
"""
# TODO: add referer to headers (futweb)
base_id = baseId(resource_id)
if base_id in self.players:
return self.players[base_id]
else: # not a player?
url = '{0}{1}.json'.format(card_info_url, base_id)
return requests.get(url, timeout=self.timeout).json() | [
"def",
"cardInfo",
"(",
"self",
",",
"resource_id",
")",
":",
"# TODO: add referer to headers (futweb)",
"base_id",
"=",
"baseId",
"(",
"resource_id",
")",
"if",
"base_id",
"in",
"self",
".",
"players",
":",
"return",
"self",
".",
"players",
"[",
"base_id",
"]",
"else",
":",
"# not a player?",
"url",
"=",
"'{0}{1}.json'",
".",
"format",
"(",
"card_info_url",
",",
"base_id",
")",
"return",
"requests",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
".",
"json",
"(",
")"
] | 35.666667 | [
0.03125,
0.07142857142857142,
0,
0.07317073170731707,
0.18181818181818182,
0.0425531914893617,
0.05405405405405406,
0.05714285714285714,
0.05,
0.06666666666666667,
0.03225806451612903,
0.03076923076923077
] |
def register(self, event_type, callback,
args=None, kwargs=None, details_filter=None,
weak=False):
"""Register a callback to be called when event of a given type occurs.
Callback will be called with provided ``args`` and ``kwargs`` and
when event type occurs (or on any event if ``event_type`` equals to
:attr:`.ANY`). It will also get additional keyword argument,
``details``, that will hold event details provided to the
:meth:`.notify` method (if a details filter callback is provided then
the target callback will *only* be triggered if the details filter
callback returns a truthy value).
:param event_type: event type to get triggered on
:param callback: function callback to be registered.
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value pair arguments
:type kwargs: dictionary
:param weak: if the callback retained should be referenced via
a weak reference or a strong reference (defaults to
holding a strong reference)
:type weak: bool
:returns: the listener that was registered
:rtype: :py:class:`~.Listener`
"""
if not six.callable(callback):
raise ValueError("Event callback must be callable")
if details_filter is not None:
if not six.callable(details_filter):
raise ValueError("Details filter must be callable")
if not self.can_be_registered(event_type):
raise ValueError("Disallowed event type '%s' can not have a"
" callback registered" % event_type)
if kwargs:
for k in self.RESERVED_KEYS:
if k in kwargs:
raise KeyError("Reserved key '%s' not allowed in "
"kwargs" % k)
with self._lock:
if self.is_registered(event_type, callback,
details_filter=details_filter):
raise ValueError("Event callback already registered with"
" equivalent details filter")
listener = Listener(_make_ref(callback, weak=weak),
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
listeners = self._topics.setdefault(event_type, [])
listeners.append(listener)
return listener | [
"def",
"register",
"(",
"self",
",",
"event_type",
",",
"callback",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"details_filter",
"=",
"None",
",",
"weak",
"=",
"False",
")",
":",
"if",
"not",
"six",
".",
"callable",
"(",
"callback",
")",
":",
"raise",
"ValueError",
"(",
"\"Event callback must be callable\"",
")",
"if",
"details_filter",
"is",
"not",
"None",
":",
"if",
"not",
"six",
".",
"callable",
"(",
"details_filter",
")",
":",
"raise",
"ValueError",
"(",
"\"Details filter must be callable\"",
")",
"if",
"not",
"self",
".",
"can_be_registered",
"(",
"event_type",
")",
":",
"raise",
"ValueError",
"(",
"\"Disallowed event type '%s' can not have a\"",
"\" callback registered\"",
"%",
"event_type",
")",
"if",
"kwargs",
":",
"for",
"k",
"in",
"self",
".",
"RESERVED_KEYS",
":",
"if",
"k",
"in",
"kwargs",
":",
"raise",
"KeyError",
"(",
"\"Reserved key '%s' not allowed in \"",
"\"kwargs\"",
"%",
"k",
")",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"is_registered",
"(",
"event_type",
",",
"callback",
",",
"details_filter",
"=",
"details_filter",
")",
":",
"raise",
"ValueError",
"(",
"\"Event callback already registered with\"",
"\" equivalent details filter\"",
")",
"listener",
"=",
"Listener",
"(",
"_make_ref",
"(",
"callback",
",",
"weak",
"=",
"weak",
")",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
",",
"details_filter",
"=",
"details_filter",
",",
"weak",
"=",
"weak",
")",
"listeners",
"=",
"self",
".",
"_topics",
".",
"setdefault",
"(",
"event_type",
",",
"[",
"]",
")",
"listeners",
".",
"append",
"(",
"listener",
")",
"return",
"listener"
] | 48.788462 | [
0.05,
0.09836065573770492,
0.1724137931034483,
0.02564102564102564,
0,
0.0410958904109589,
0.08,
0.08823529411764706,
0.046153846153846156,
0.09090909090909091,
0.05405405405405406,
0.07317073170731707,
0,
0.05263157894736842,
0.05,
0.06818181818181818,
0.125,
0.06382978723404255,
0.09375,
0.04285714285714286,
0.06944444444444445,
0.08333333333333333,
0.125,
0,
0.06,
0.21052631578947367,
0.18181818181818182,
0.05263157894736842,
0.031746031746031744,
0.05263157894736842,
0.041666666666666664,
0.029850746268656716,
0.04,
0.041666666666666664,
0.06153846153846154,
0.1111111111111111,
0.05,
0.06451612903225806,
0.04285714285714286,
0.08333333333333333,
0.08333333333333333,
0.05454545454545454,
0.07692307692307693,
0.0410958904109589,
0.06451612903225806,
0.047619047619047616,
0.07017543859649122,
0.04838709677419355,
0.09523809523809523,
0.031746031746031744,
0.05263157894736842,
0.07407407407407407
] |
async def handler(event):
"""#client or #msg query: Looks for the given attribute in RTD."""
await event.delete()
await event.respond(
get_docs_message(kind=event.pattern_match.group(1),
query=event.pattern_match.group(2)),
reply_to=event.reply_to_msg_id
) | [
"async",
"def",
"handler",
"(",
"event",
")",
":",
"await",
"event",
".",
"delete",
"(",
")",
"await",
"event",
".",
"respond",
"(",
"get_docs_message",
"(",
"kind",
"=",
"event",
".",
"pattern_match",
".",
"group",
"(",
"1",
")",
",",
"query",
"=",
"event",
".",
"pattern_match",
".",
"group",
"(",
"2",
")",
")",
",",
"reply_to",
"=",
"event",
".",
"reply_to_msg_id",
")"
] | 34 | [
0.04,
0.02857142857142857,
0.08333333333333333,
0,
0.125,
0.05084745762711865,
0.08196721311475409,
0.07894736842105263,
0.6
] |
def updateContextImage(self, contextpar):
""" Reset the name of the context image to `None` if parameter
``context`` is `False`.
"""
self.createContext = contextpar
if not contextpar:
log.info('No context image will be created for %s' %
self._filename)
self.outputNames['outContext'] = None | [
"def",
"updateContextImage",
"(",
"self",
",",
"contextpar",
")",
":",
"self",
".",
"createContext",
"=",
"contextpar",
"if",
"not",
"contextpar",
":",
"log",
".",
"info",
"(",
"'No context image will be created for %s'",
"%",
"self",
".",
"_filename",
")",
"self",
".",
"outputNames",
"[",
"'outContext'",
"]",
"=",
"None"
] | 36.7 | [
0.024390243902439025,
0.02857142857142857,
0.0967741935483871,
0,
0.18181818181818182,
0.05128205128205128,
0.07692307692307693,
0.046875,
0.1111111111111111,
0.04081632653061224
] |
def _get_url(self, url):
"""
Returns normalized url. If schema is not given, would fall
to filesystem
(``file:///``) schema.
"""
url = str(url)
if url != 'default' and not '://' in url:
url = "file:" + urllib.pathname2url(url)
return url | [
"def",
"_get_url",
"(",
"self",
",",
"url",
")",
":",
"url",
"=",
"str",
"(",
"url",
")",
"if",
"url",
"!=",
"'default'",
"and",
"not",
"'://'",
"in",
"url",
":",
"url",
"=",
"\"file:\"",
"+",
"urllib",
".",
"pathname2url",
"(",
"url",
")",
"return",
"url"
] | 30.4 | [
0.041666666666666664,
0.18181818181818182,
0.030303030303030304,
0.09523809523809523,
0.13333333333333333,
0.18181818181818182,
0.09090909090909091,
0.061224489795918366,
0.038461538461538464,
0.1111111111111111
] |
def selected_value(self, layer_purpose_key):
"""Obtain selected hazard or exposure.
:param layer_purpose_key: A layer purpose key, can be hazard or
exposure.
:type layer_purpose_key: str
:returns: A selected hazard or exposure definition.
:rtype: dict
"""
if layer_purpose_key == layer_purpose_exposure['key']:
role = RoleExposure
elif layer_purpose_key == layer_purpose_hazard['key']:
role = RoleHazard
else:
return None
selected = self.tblFunctions1.selectedItems()
if len(selected) != 1:
return None
try:
return selected[0].data(role)
except (AttributeError, NameError):
return None | [
"def",
"selected_value",
"(",
"self",
",",
"layer_purpose_key",
")",
":",
"if",
"layer_purpose_key",
"==",
"layer_purpose_exposure",
"[",
"'key'",
"]",
":",
"role",
"=",
"RoleExposure",
"elif",
"layer_purpose_key",
"==",
"layer_purpose_hazard",
"[",
"'key'",
"]",
":",
"role",
"=",
"RoleHazard",
"else",
":",
"return",
"None",
"selected",
"=",
"self",
".",
"tblFunctions1",
".",
"selectedItems",
"(",
")",
"if",
"len",
"(",
"selected",
")",
"!=",
"1",
":",
"return",
"None",
"try",
":",
"return",
"selected",
"[",
"0",
"]",
".",
"data",
"(",
"role",
")",
"except",
"(",
"AttributeError",
",",
"NameError",
")",
":",
"return",
"None"
] | 31.375 | [
0.022727272727272728,
0.043478260869565216,
0,
0.04225352112676056,
0.09523809523809523,
0.08333333333333333,
0,
0.05084745762711865,
0.15,
0.18181818181818182,
0.03225806451612903,
0.06451612903225806,
0.03225806451612903,
0.06896551724137931,
0.15384615384615385,
0.08695652173913043,
0,
0.03773584905660377,
0.06666666666666667,
0.08695652173913043,
0.16666666666666666,
0.04878048780487805,
0.046511627906976744,
0.08695652173913043
] |
def _get_valid_mount_strings(self):
"""Return a tuple of potential mount strings.
Casper Admin seems to mount in a number of ways:
- hostname/share
- fqdn/share
Plus, there's the possibility of:
- IPAddress/share
Then factor in the possibility that the port is included too!
This gives us a total of up to six valid addresses for mount
to report.
"""
results = set()
join = os.path.join
url = self.connection["url"]
share_name = urllib.quote(self.connection["share_name"],
safe="~()*!.'")
port = self.connection["port"]
# URL from python-jss form:
results.add(join(url, share_name))
results.add(join("%s:%s" % (url, port), share_name))
# IP Address form:
# socket.gethostbyname() will return an IP address whether
# an IP address, FQDN, or .local name is provided.
ip_address = socket.gethostbyname(url)
results.add(join(ip_address, share_name))
results.add(join("%s:%s" % (ip_address, port), share_name))
# Domain name only form:
domain_name = url.split(".")[0]
results.add(join(domain_name, share_name))
results.add(join("%s:%s" % (domain_name, port), share_name))
# FQDN form using getfqdn:
# socket.getfqdn() could just resolve back to the ip
# or be the same as the initial URL so only add it if it's
# different than both.
fqdn = socket.getfqdn(ip_address)
results.add(join(fqdn, share_name))
results.add(join("%s:%s" % (fqdn, port), share_name))
return tuple(results) | [
"def",
"_get_valid_mount_strings",
"(",
"self",
")",
":",
"results",
"=",
"set",
"(",
")",
"join",
"=",
"os",
".",
"path",
".",
"join",
"url",
"=",
"self",
".",
"connection",
"[",
"\"url\"",
"]",
"share_name",
"=",
"urllib",
".",
"quote",
"(",
"self",
".",
"connection",
"[",
"\"share_name\"",
"]",
",",
"safe",
"=",
"\"~()*!.'\"",
")",
"port",
"=",
"self",
".",
"connection",
"[",
"\"port\"",
"]",
"# URL from python-jss form:",
"results",
".",
"add",
"(",
"join",
"(",
"url",
",",
"share_name",
")",
")",
"results",
".",
"add",
"(",
"join",
"(",
"\"%s:%s\"",
"%",
"(",
"url",
",",
"port",
")",
",",
"share_name",
")",
")",
"# IP Address form:",
"# socket.gethostbyname() will return an IP address whether",
"# an IP address, FQDN, or .local name is provided.",
"ip_address",
"=",
"socket",
".",
"gethostbyname",
"(",
"url",
")",
"results",
".",
"add",
"(",
"join",
"(",
"ip_address",
",",
"share_name",
")",
")",
"results",
".",
"add",
"(",
"join",
"(",
"\"%s:%s\"",
"%",
"(",
"ip_address",
",",
"port",
")",
",",
"share_name",
")",
")",
"# Domain name only form:",
"domain_name",
"=",
"url",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"results",
".",
"add",
"(",
"join",
"(",
"domain_name",
",",
"share_name",
")",
")",
"results",
".",
"add",
"(",
"join",
"(",
"\"%s:%s\"",
"%",
"(",
"domain_name",
",",
"port",
")",
",",
"share_name",
")",
")",
"# FQDN form using getfqdn:",
"# socket.getfqdn() could just resolve back to the ip",
"# or be the same as the initial URL so only add it if it's",
"# different than both.",
"fqdn",
"=",
"socket",
".",
"getfqdn",
"(",
"ip_address",
")",
"results",
".",
"add",
"(",
"join",
"(",
"fqdn",
",",
"share_name",
")",
")",
"results",
".",
"add",
"(",
"join",
"(",
"\"%s:%s\"",
"%",
"(",
"fqdn",
",",
"port",
")",
",",
"share_name",
")",
")",
"return",
"tuple",
"(",
"results",
")"
] | 37.977273 | [
0.02857142857142857,
0.03773584905660377,
0,
0.03571428571428571,
0.07142857142857142,
0.08333333333333333,
0.04878048780487805,
0.06896551724137931,
0.028985507246376812,
0.029411764705882353,
0.1111111111111111,
0.18181818181818182,
0.08695652173913043,
0.07407407407407407,
0.05555555555555555,
0.046875,
0.10204081632653061,
0.05263157894736842,
0,
0.05714285714285714,
0.047619047619047616,
0.03333333333333333,
0,
0.07692307692307693,
0.030303030303030304,
0.034482758620689655,
0.043478260869565216,
0.04081632653061224,
0.029850746268656716,
0,
0.0625,
0.05128205128205128,
0.04,
0.029411764705882353,
0,
0.058823529411764705,
0.03333333333333333,
0.030303030303030304,
0.06666666666666667,
0.04878048780487805,
0.046511627906976744,
0.03278688524590164,
0,
0.06896551724137931
] |
def _set_detail(self, v, load=False):
"""
Setter method for detail, mapped from YANG variable /openflow_state/detail (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_detail() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=detail.detail, is_container='container', presence=False, yang_name="detail", rest_name="detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """detail must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=detail.detail, is_container='container', presence=False, yang_name="detail", rest_name="detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""",
})
self.__detail = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_detail",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"detail",
".",
"detail",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"detail\"",
",",
"rest_name",
"=",
"\"detail\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'openflow-detail'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-openflow-operational'",
",",
"defining_module",
"=",
"'brocade-openflow-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"detail must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=detail.detail, is_container='container', presence=False, yang_name=\"detail\", rest_name=\"detail\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__detail",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 75.136364 | [
0.02702702702702703,
0.2857142857142857,
0.05555555555555555,
0.05357142857142857,
0.03076923076923077,
0.03278688524590164,
0.03773584905660377,
0.2857142857142857,
0.07142857142857142,
0.14285714285714285,
0.25,
0.010638297872340425,
0.05714285714285714,
0.16666666666666666,
0.04819277108433735,
0.07894736842105263,
0.008113590263691683,
0.3,
0,
0.09523809523809523,
0.06896551724137931,
0.17647058823529413
] |
def contraction_conical(Di1, Di2, fd=None, l=None, angle=None,
Re=None, roughness=0.0, method='Rennels'):
r'''Returns the loss coefficient for any conical pipe contraction.
This calculation has five methods available. The 'Idelchik' [2]_ and
'Blevins' [3]_ methods use interpolation among tables of values; 'Miller'
uses a 2d spline representation of a graph; and the
'Rennels' [1]_, 'Crane' [4]_, and 'Swamee' [5]_ methods use formulas for
their calculations.
The 'Rennels' [1]_ formulas are:
.. math::
K_2 = K_{fr,2} + K_{conv,2}
.. math::
K_{fr,2} = \frac{f_d}{1 - \beta^4}{8\sin(\theta/2)}
.. math::
K_{conv,2} = 0.0696[1+C_B(\sin(\alpha/2)-1)](1-\beta^5)\lambda^2 + (\lambda-1)^2
.. math::
\lambda = 1 + 0.622(\alpha/180)^{0.8}(1-0.215\beta^2-0.785\beta^5)
.. math::
\beta = d_2/d_1
The 'Swamee' [5]_ formula is:
.. math::
K = 0.315 \theta^{1/3}
.. figure:: fittings/contraction_conical.png
:scale: 30 %
:alt: contraction conical; after [1]_
Parameters
----------
Di1 : float
Inside pipe diameter of the larger, upstream, pipe, [m]
Di2 : float
Inside pipe diameter of the smaller, downstream, pipe, [m]
fd : float, optional
Darcy friction factor; used only in the Rennels method and will be
calculated if not given, [-]
l : float, optional
Length of the contraction, optional [m]
angle : float, optional
Angle of contraction (180 = sharp, 0 = infinitely long contraction),
optional [degrees]
Re : float, optional
Reynolds number of the pipe (used in Rennels method only if no friction
factor given), [m]
roughness : float, optional
Roughness of bend wall (used in Rennel method if no friction factor
given), [m]
method : str, optional
The method to use for the calculation; one of 'Rennels', 'Idelchik',
'Crane', 'Swamee' or 'Blevins', [-]
Returns
-------
K : float
Loss coefficient in terms of the following pipe [-]
Notes
-----
Cheap and has substantial impact on pressure drop.
The 'Idelchik' method includes two tabular interpolations; its friction
term is limited to angles between 2 and 20 degrees and area ratios 0.05 to
0.6, while its main term is limited to length over diameter ratios 0.025 to
0.6. This seems to give it high results for angles < 25 degrees.
The 'Blevins' method is based on Idelchik data; it should not be used,
because its data jumps around and its data is limited to area ratios .1 to
0.83, and length over diameter ratios 0 to 0.6. The 'Miller' method jumps
around as well. Unlike most of Miller's method, there is no correction for
Reynolds number.
There is quite a bit of variance in the predictions of the methods, as
demonstrated by the following figure.
.. plot:: plots/contraction_conical.py
Examples
--------
>>> contraction_conical(Di1=0.1, Di2=0.04, l=0.04, Re=1E6)
0.15639885880609544
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
.. [3] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
.. [4] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [5] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [6] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
'''
beta = Di2/Di1
if angle is not None:
angle_rad = radians(angle)
l = (Di1 - Di2)/(2.0*tan(0.5*angle_rad))
elif l is not None:
try:
angle_rad = 2.0*atan((Di1-Di2)/(2.0*l))
angle = degrees(angle_rad)
except ZeroDivisionError:
angle_rad = pi
angle = 180.0
else:
raise Exception('Either l or angle is required')
if method is None:
method == 'Rennels'
if method == 'Rennels':
if fd is None:
if Re is None:
raise ValueError("The `Rennels` method requires either a "
"specified friction factor or `Re`")
fd = Colebrook(Re=Re, eD=roughness/Di2, tol=-1)
beta2 = beta*beta
beta4 = beta2*beta2
beta5 = beta4*beta
lbd = 1.0 + 0.622*(angle_rad/pi)**0.8*(1.0 - 0.215*beta2 - 0.785*beta5)
sin_half_angle = sin(0.5*angle_rad)
K_fr2 = fd*(1.0 - beta4)/(8.0*sin_half_angle)
K_conv2 = 0.0696*sin_half_angle*(1.0 - beta5)*lbd*lbd + (lbd - 1.0)**2
return K_fr2 + K_conv2
elif method == 'Crane':
return contraction_conical_Crane(Di1=Di1, Di2=Di2, l=l, angle=angle)
elif method == 'Swamee':
return 0.315*angle_rad**(1.0/3.0)
elif method == 'Idelchik':
# Diagram 3-6; already digitized for beveled entrance
K0 = float(entrance_beveled_Idelchik_obj(angle, l/Di2))
# Angles 0 to 20, ratios 0.05 to 0.06
if angle > 20.0:
angle_fric = 20.0
elif angle < 2.0:
angle_fric = 2.0
else:
angle_fric = angle
A_ratio = A_ratio_fric = Di2*Di2/(Di1*Di1)
if A_ratio_fric < 0.05:
A_ratio_fric = 0.05
elif A_ratio_fric > 0.6:
A_ratio_fric = 0.6
K_fr = float(contraction_conical_frction_Idelchik_obj(angle_fric, A_ratio_fric))
return K0*(1.0 - A_ratio) + K_fr
elif method == 'Blevins':
A_ratio = Di1*Di1/(Di2*Di2)
if A_ratio < 1.2:
A_ratio = 1.2
elif A_ratio > 10.0:
A_ratio = 10.0
l_ratio = l/Di2
if l_ratio > 0.6:
l_ratio= 0.6
return float(contraction_conical_Blevins_obj(l_ratio, A_ratio))
elif method == 'Miller':
A_ratio = Di1*Di1/(Di2*Di2)
if A_ratio > 4.0:
A_ratio = 4.0
elif A_ratio < 1.1:
A_ratio = 1.1
l_ratio = l/(Di2*0.5)
if l_ratio < 0.1:
l_ratio = 0.1
elif l_ratio > 10.0:
l_ratio = 10.0
# Turning on ofr off the limits - little difference in plot
return contraction_conical_Miller_obj(l_ratio, A_ratio)
else:
raise ValueError('Specified method not recognized; methods are %s'
%(contraction_conical_methods)) | [
"def",
"contraction_conical",
"(",
"Di1",
",",
"Di2",
",",
"fd",
"=",
"None",
",",
"l",
"=",
"None",
",",
"angle",
"=",
"None",
",",
"Re",
"=",
"None",
",",
"roughness",
"=",
"0.0",
",",
"method",
"=",
"'Rennels'",
")",
":",
"beta",
"=",
"Di2",
"/",
"Di1",
"if",
"angle",
"is",
"not",
"None",
":",
"angle_rad",
"=",
"radians",
"(",
"angle",
")",
"l",
"=",
"(",
"Di1",
"-",
"Di2",
")",
"/",
"(",
"2.0",
"*",
"tan",
"(",
"0.5",
"*",
"angle_rad",
")",
")",
"elif",
"l",
"is",
"not",
"None",
":",
"try",
":",
"angle_rad",
"=",
"2.0",
"*",
"atan",
"(",
"(",
"Di1",
"-",
"Di2",
")",
"/",
"(",
"2.0",
"*",
"l",
")",
")",
"angle",
"=",
"degrees",
"(",
"angle_rad",
")",
"except",
"ZeroDivisionError",
":",
"angle_rad",
"=",
"pi",
"angle",
"=",
"180.0",
"else",
":",
"raise",
"Exception",
"(",
"'Either l or angle is required'",
")",
"if",
"method",
"is",
"None",
":",
"method",
"==",
"'Rennels'",
"if",
"method",
"==",
"'Rennels'",
":",
"if",
"fd",
"is",
"None",
":",
"if",
"Re",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"The `Rennels` method requires either a \"",
"\"specified friction factor or `Re`\"",
")",
"fd",
"=",
"Colebrook",
"(",
"Re",
"=",
"Re",
",",
"eD",
"=",
"roughness",
"/",
"Di2",
",",
"tol",
"=",
"-",
"1",
")",
"beta2",
"=",
"beta",
"*",
"beta",
"beta4",
"=",
"beta2",
"*",
"beta2",
"beta5",
"=",
"beta4",
"*",
"beta",
"lbd",
"=",
"1.0",
"+",
"0.622",
"*",
"(",
"angle_rad",
"/",
"pi",
")",
"**",
"0.8",
"*",
"(",
"1.0",
"-",
"0.215",
"*",
"beta2",
"-",
"0.785",
"*",
"beta5",
")",
"sin_half_angle",
"=",
"sin",
"(",
"0.5",
"*",
"angle_rad",
")",
"K_fr2",
"=",
"fd",
"*",
"(",
"1.0",
"-",
"beta4",
")",
"/",
"(",
"8.0",
"*",
"sin_half_angle",
")",
"K_conv2",
"=",
"0.0696",
"*",
"sin_half_angle",
"*",
"(",
"1.0",
"-",
"beta5",
")",
"*",
"lbd",
"*",
"lbd",
"+",
"(",
"lbd",
"-",
"1.0",
")",
"**",
"2",
"return",
"K_fr2",
"+",
"K_conv2",
"elif",
"method",
"==",
"'Crane'",
":",
"return",
"contraction_conical_Crane",
"(",
"Di1",
"=",
"Di1",
",",
"Di2",
"=",
"Di2",
",",
"l",
"=",
"l",
",",
"angle",
"=",
"angle",
")",
"elif",
"method",
"==",
"'Swamee'",
":",
"return",
"0.315",
"*",
"angle_rad",
"**",
"(",
"1.0",
"/",
"3.0",
")",
"elif",
"method",
"==",
"'Idelchik'",
":",
"# Diagram 3-6; already digitized for beveled entrance",
"K0",
"=",
"float",
"(",
"entrance_beveled_Idelchik_obj",
"(",
"angle",
",",
"l",
"/",
"Di2",
")",
")",
"# Angles 0 to 20, ratios 0.05 to 0.06",
"if",
"angle",
">",
"20.0",
":",
"angle_fric",
"=",
"20.0",
"elif",
"angle",
"<",
"2.0",
":",
"angle_fric",
"=",
"2.0",
"else",
":",
"angle_fric",
"=",
"angle",
"A_ratio",
"=",
"A_ratio_fric",
"=",
"Di2",
"*",
"Di2",
"/",
"(",
"Di1",
"*",
"Di1",
")",
"if",
"A_ratio_fric",
"<",
"0.05",
":",
"A_ratio_fric",
"=",
"0.05",
"elif",
"A_ratio_fric",
">",
"0.6",
":",
"A_ratio_fric",
"=",
"0.6",
"K_fr",
"=",
"float",
"(",
"contraction_conical_frction_Idelchik_obj",
"(",
"angle_fric",
",",
"A_ratio_fric",
")",
")",
"return",
"K0",
"*",
"(",
"1.0",
"-",
"A_ratio",
")",
"+",
"K_fr",
"elif",
"method",
"==",
"'Blevins'",
":",
"A_ratio",
"=",
"Di1",
"*",
"Di1",
"/",
"(",
"Di2",
"*",
"Di2",
")",
"if",
"A_ratio",
"<",
"1.2",
":",
"A_ratio",
"=",
"1.2",
"elif",
"A_ratio",
">",
"10.0",
":",
"A_ratio",
"=",
"10.0",
"l_ratio",
"=",
"l",
"/",
"Di2",
"if",
"l_ratio",
">",
"0.6",
":",
"l_ratio",
"=",
"0.6",
"return",
"float",
"(",
"contraction_conical_Blevins_obj",
"(",
"l_ratio",
",",
"A_ratio",
")",
")",
"elif",
"method",
"==",
"'Miller'",
":",
"A_ratio",
"=",
"Di1",
"*",
"Di1",
"/",
"(",
"Di2",
"*",
"Di2",
")",
"if",
"A_ratio",
">",
"4.0",
":",
"A_ratio",
"=",
"4.0",
"elif",
"A_ratio",
"<",
"1.1",
":",
"A_ratio",
"=",
"1.1",
"l_ratio",
"=",
"l",
"/",
"(",
"Di2",
"*",
"0.5",
")",
"if",
"l_ratio",
"<",
"0.1",
":",
"l_ratio",
"=",
"0.1",
"elif",
"l_ratio",
">",
"10.0",
":",
"l_ratio",
"=",
"10.0",
"# Turning on ofr off the limits - little difference in plot",
"return",
"contraction_conical_Miller_obj",
"(",
"l_ratio",
",",
"A_ratio",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Specified method not recognized; methods are %s'",
"%",
"(",
"contraction_conical_methods",
")",
")"
] | 36.328042 | [
0.04838709677419355,
0.09090909090909091,
0.02857142857142857,
0.0410958904109589,
0.05128205128205128,
0.07142857142857142,
0.02631578947368421,
0.08695652173913043,
0.5,
0.05555555555555555,
0,
0.23076923076923078,
0.11428571428571428,
0.25,
0.23076923076923078,
0.06779661016949153,
0.25,
0.23076923076923078,
0.09090909090909091,
0,
0.23076923076923078,
0.06756756756756757,
0,
0.23076923076923078,
0.08695652173913043,
0.5,
0.09090909090909091,
0.5,
0.23076923076923078,
0.1,
0,
0.0625,
0.21052631578947367,
0.13636363636363635,
0,
0.14285714285714285,
0.14285714285714285,
0.2,
0.04477611940298507,
0.2,
0.04285714285714286,
0.125,
0.05333333333333334,
0.05555555555555555,
0.13043478260869565,
0.06382978723404255,
0.1111111111111111,
0.09210526315789473,
0.11538461538461539,
0.125,
0.05063291139240506,
0.11538461538461539,
0.0967741935483871,
0.06578947368421052,
0.18181818181818182,
0.11538461538461539,
0.039473684210526314,
0.046511627906976744,
0,
0.18181818181818182,
0.18181818181818182,
0.23076923076923078,
0.05084745762711865,
0,
0.2222222222222222,
0.2222222222222222,
0.037037037037037035,
0.5,
0.04,
0.02564102564102564,
0.02531645569620253,
0.029411764705882353,
0.5,
0.05333333333333334,
0.02564102564102564,
0.025974025974025976,
0.0379746835443038,
0.1,
0,
0.5,
0.04,
0.04878048780487805,
0,
0.07142857142857142,
0,
0.16666666666666666,
0.16666666666666666,
0.04838709677419355,
0.08695652173913043,
0,
0.14285714285714285,
0.14285714285714285,
0.02666666666666667,
0.04225352112676056,
0.038461538461538464,
0.08333333333333333,
0.03896103896103896,
0.06451612903225806,
0.0379746835443038,
0.07692307692307693,
0.02564102564102564,
0.25,
0.039473684210526314,
0.06521739130434782,
0.02702702702702703,
0.061224489795918366,
0.2857142857142857,
0.1111111111111111,
0.08,
0.058823529411764705,
0.08163265306122448,
0.08695652173913043,
0.16666666666666666,
0.0392156862745098,
0.05263157894736842,
0.06060606060606061,
0.07692307692307693,
0.08,
0.2222222222222222,
0.03571428571428571,
0.09090909090909091,
0.07407407407407407,
0,
0.07407407407407407,
0.09090909090909091,
0.07692307692307693,
0.04054054054054054,
0.057971014492753624,
0.03389830508474576,
0.16666666666666666,
0.08,
0.07407407407407407,
0.07692307692307693,
0.02531645569620253,
0.046511627906976744,
0.03773584905660377,
0.02564102564102564,
0.06666666666666667,
0.07407407407407407,
0.02631578947368421,
0.07142857142857142,
0.04878048780487805,
0.06666666666666667,
0.03278688524590164,
0.031746031746031744,
0.25,
0.044444444444444446,
0.08333333333333333,
0.06896551724137931,
0.08,
0.07142857142857142,
0.15384615384615385,
0.06666666666666667,
0.16666666666666666,
0.04,
0.06451612903225806,
0.06451612903225806,
0.0625,
0.06666666666666667,
0.16666666666666666,
0.03409090909090909,
0.05,
0.06896551724137931,
0.05714285714285714,
0.08,
0.08,
0.07142857142857142,
0.07692307692307693,
0.25,
0.08695652173913043,
0.08,
0.125,
0.028169014084507043,
0.07142857142857142,
0.05714285714285714,
0.08,
0.08,
0.07407407407407407,
0.08,
0.06896551724137931,
0.08,
0.08,
0.07142857142857142,
0.07692307692307693,
0.029850746268656716,
0.031746031746031744,
0.2222222222222222,
0.04054054054054054,
0.07142857142857142
] |
def translator(self):
"""Get a valid translator object from one or several languages names."""
if self._translator is None:
languages = self.lang
if not languages:
return gettext.NullTranslations()
if not isinstance(languages, list):
languages = [languages]
translator = gettext.NullTranslations()
for name, i18n_dir in [
(
'biryani',
os.path.join(pkg_resources.get_distribution('biryani').location, 'biryani', 'i18n'),
),
(
conf['country_package'].replace('_', '-'),
os.path.join(pkg_resources.get_distribution(conf['country_package']).location,
conf['country_package'], 'i18n'),
),
]:
if i18n_dir is not None:
translator = new_translator(name, i18n_dir, languages, fallback = translator)
translator = new_translator(conf['package_name'], conf['i18n_dir'], languages, fallback = translator)
self._translator = translator
return self._translator | [
"def",
"translator",
"(",
"self",
")",
":",
"if",
"self",
".",
"_translator",
"is",
"None",
":",
"languages",
"=",
"self",
".",
"lang",
"if",
"not",
"languages",
":",
"return",
"gettext",
".",
"NullTranslations",
"(",
")",
"if",
"not",
"isinstance",
"(",
"languages",
",",
"list",
")",
":",
"languages",
"=",
"[",
"languages",
"]",
"translator",
"=",
"gettext",
".",
"NullTranslations",
"(",
")",
"for",
"name",
",",
"i18n_dir",
"in",
"[",
"(",
"'biryani'",
",",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"'biryani'",
")",
".",
"location",
",",
"'biryani'",
",",
"'i18n'",
")",
",",
")",
",",
"(",
"conf",
"[",
"'country_package'",
"]",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"conf",
"[",
"'country_package'",
"]",
")",
".",
"location",
",",
"conf",
"[",
"'country_package'",
"]",
",",
"'i18n'",
")",
",",
")",
",",
"]",
":",
"if",
"i18n_dir",
"is",
"not",
"None",
":",
"translator",
"=",
"new_translator",
"(",
"name",
",",
"i18n_dir",
",",
"languages",
",",
"fallback",
"=",
"translator",
")",
"translator",
"=",
"new_translator",
"(",
"conf",
"[",
"'package_name'",
"]",
",",
"conf",
"[",
"'i18n_dir'",
"]",
",",
"languages",
",",
"fallback",
"=",
"translator",
")",
"self",
".",
"_translator",
"=",
"translator",
"return",
"self",
".",
"_translator"
] | 49.16 | [
0.047619047619047616,
0.0375,
0.05555555555555555,
0.06060606060606061,
0.06896551724137931,
0.04081632653061224,
0.0425531914893617,
0.05128205128205128,
0.0392156862745098,
0.08571428571428572,
0.14285714285714285,
0.058823529411764705,
0.027777777777777776,
0.11538461538461539,
0.14285714285714285,
0.030303030303030304,
0.0392156862745098,
0.04918032786885246,
0.11538461538461539,
0.13636363636363635,
0.05,
0.05154639175257732,
0.04424778761061947,
0.04878048780487805,
0.06451612903225806
] |
def scalar_leaf_only(operator):
"""Ensure the filter function is only applied to scalar leaf types."""
def decorator(f):
"""Decorate the supplied function with the "scalar_leaf_only" logic."""
@wraps(f)
def wrapper(filter_operation_info, context, parameters, *args, **kwargs):
"""Check that the type on which the operator operates is a scalar leaf type."""
if 'operator' in kwargs:
current_operator = kwargs['operator']
else:
# Because "operator" is from an enclosing scope, it is immutable in Python 2.x.
current_operator = operator
if not is_leaf_type(filter_operation_info.field_type):
raise GraphQLCompilationError(u'Cannot apply "{}" filter to non-leaf type'
u'{}'.format(current_operator, filter_operation_info))
return f(filter_operation_info, context, parameters, *args, **kwargs)
return wrapper
return decorator | [
"def",
"scalar_leaf_only",
"(",
"operator",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"\"\"\"Decorate the supplied function with the \"scalar_leaf_only\" logic.\"\"\"",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"filter_operation_info",
",",
"context",
",",
"parameters",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Check that the type on which the operator operates is a scalar leaf type.\"\"\"",
"if",
"'operator'",
"in",
"kwargs",
":",
"current_operator",
"=",
"kwargs",
"[",
"'operator'",
"]",
"else",
":",
"# Because \"operator\" is from an enclosing scope, it is immutable in Python 2.x.",
"current_operator",
"=",
"operator",
"if",
"not",
"is_leaf_type",
"(",
"filter_operation_info",
".",
"field_type",
")",
":",
"raise",
"GraphQLCompilationError",
"(",
"u'Cannot apply \"{}\" filter to non-leaf type'",
"u'{}'",
".",
"format",
"(",
"current_operator",
",",
"filter_operation_info",
")",
")",
"return",
"f",
"(",
"filter_operation_info",
",",
"context",
",",
"parameters",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] | 48.428571 | [
0.03225806451612903,
0.02702702702702703,
0.09523809523809523,
0.02531645569620253,
0.11764705882352941,
0.037037037037037035,
0.03296703296703297,
0.05555555555555555,
0.03773584905660377,
0.11764705882352941,
0.031578947368421054,
0.046511627906976744,
0,
0.030303030303030304,
0.044444444444444446,
0.05,
0.037037037037037035,
0,
0.09090909090909091,
0,
0.1
] |
def _request(self, http_method, relative_url='', **kwargs):
"""Does actual HTTP request using requests library."""
# It could be possible to call api.resource.get('/index')
# but it would be non-intuitive that the path would resolve
# to root of domain
relative_url = self._remove_leading_slash(relative_url)
# Add default kwargs with possible custom kwargs returned by
# before_request
new_kwargs = self.default_kwargs().copy()
custom_kwargs = self.before_request(
http_method,
relative_url,
kwargs.copy()
)
new_kwargs.update(custom_kwargs)
response = requests.request(
http_method,
self._join_url(relative_url),
**new_kwargs
)
return self.after_request(response) | [
"def",
"_request",
"(",
"self",
",",
"http_method",
",",
"relative_url",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"# It could be possible to call api.resource.get('/index')",
"# but it would be non-intuitive that the path would resolve",
"# to root of domain",
"relative_url",
"=",
"self",
".",
"_remove_leading_slash",
"(",
"relative_url",
")",
"# Add default kwargs with possible custom kwargs returned by",
"# before_request",
"new_kwargs",
"=",
"self",
".",
"default_kwargs",
"(",
")",
".",
"copy",
"(",
")",
"custom_kwargs",
"=",
"self",
".",
"before_request",
"(",
"http_method",
",",
"relative_url",
",",
"kwargs",
".",
"copy",
"(",
")",
")",
"new_kwargs",
".",
"update",
"(",
"custom_kwargs",
")",
"response",
"=",
"requests",
".",
"request",
"(",
"http_method",
",",
"self",
".",
"_join_url",
"(",
"relative_url",
")",
",",
"*",
"*",
"new_kwargs",
")",
"return",
"self",
".",
"after_request",
"(",
"response",
")"
] | 34.5 | [
0.01694915254237288,
0.03225806451612903,
0.03076923076923077,
0.029850746268656716,
0.07407407407407407,
0.031746031746031744,
0,
0.029411764705882353,
0.08333333333333333,
0.04081632653061224,
0.06818181818181818,
0.08333333333333333,
0.08,
0.08,
0.3333333333333333,
0.05,
0,
0.08333333333333333,
0.08333333333333333,
0.04878048780487805,
0.08333333333333333,
0.3333333333333333,
0,
0.046511627906976744
] |
def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype) | [
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"self",
".",
"_dtype",
")"
] | 37.333333 | [
0.0625,
0.05,
0.03571428571428571
] |
def register_connection(self, alias, api_key, base_url, timeout=5):
"""
Create and register a new connection.
:param alias: The alias of the connection. If not changed with `switch_connection`,
the connection with default 'alias' is used by the resources.
:param api_key: The private api key.
:param base_url: The api url including protocol, host, port (optional) and location.
:param timeout: The time in seconds to wait for 'connect' and 'read' respectively.
Use a tuple to set these values separately or None to wait forever.
:return:
"""
if not base_url.endswith('/'):
base_url += '/'
self._connections[alias] = Connection(api_key, base_url, timeout) | [
"def",
"register_connection",
"(",
"self",
",",
"alias",
",",
"api_key",
",",
"base_url",
",",
"timeout",
"=",
"5",
")",
":",
"if",
"not",
"base_url",
".",
"endswith",
"(",
"'/'",
")",
":",
"base_url",
"+=",
"'/'",
"self",
".",
"_connections",
"[",
"alias",
"]",
"=",
"Connection",
"(",
"api_key",
",",
"base_url",
",",
"timeout",
")"
] | 48.9375 | [
0.014925373134328358,
0.18181818181818182,
0.044444444444444446,
0,
0.053763440860215055,
0.03529411764705882,
0.06818181818181818,
0.05434782608695652,
0.044444444444444446,
0.03296703296703297,
0.1875,
0.18181818181818182,
0.05263157894736842,
0.07407407407407407,
0,
0.0273972602739726
] |
def from_file_obj(cls, fp):
"""
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
# When stdout is a TTY it's a character device
# and it's not seekable, you cannot seek in a TTY.
pass
finally:
s = fp.read()
return cls.from_string(s) | [
"def",
"from_file_obj",
"(",
"cls",
",",
"fp",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from file object\"",
")",
"try",
":",
"fp",
".",
"seek",
"(",
"0",
")",
"except",
"IOError",
":",
"# When stdout is a TTY it's a character device",
"# and it's not seekable, you cannot seek in a TTY.",
"pass",
"finally",
":",
"s",
"=",
"fp",
".",
"read",
"(",
")",
"return",
"cls",
".",
"from_string",
"(",
"s",
")"
] | 26 | [
0.037037037037037035,
0.18181818181818182,
0.04,
0.07142857142857142,
0,
0.15384615384615385,
0.046875,
0,
0.125,
0.058823529411764705,
0.18181818181818182,
0.0392156862745098,
0.16666666666666666,
0.09090909090909091,
0.08695652173913043,
0.034482758620689655,
0.03225806451612903,
0.125,
0.125,
0.08,
0,
0.06060606060606061
] |
def punsubscribe(self, pattern, *patterns):
"""Unsubscribe from specific patterns.
Arguments can be instances of :class:`~aioredis.Channel`.
"""
conn = self._pool_or_conn
return conn.execute_pubsub(b'PUNSUBSCRIBE', pattern, *patterns) | [
"def",
"punsubscribe",
"(",
"self",
",",
"pattern",
",",
"*",
"patterns",
")",
":",
"conn",
"=",
"self",
".",
"_pool_or_conn",
"return",
"conn",
".",
"execute_pubsub",
"(",
"b'PUNSUBSCRIBE'",
",",
"pattern",
",",
"*",
"patterns",
")"
] | 38.428571 | [
0.023255813953488372,
0.043478260869565216,
0,
0.09230769230769231,
0.18181818181818182,
0.06060606060606061,
0.028169014084507043
] |
def get_or_create_data_group(self, title: str) -> DataGroup:
"""Get (or create) a data group.
:param title: The title of the data group.
:return: The new :py:class:`nion.swift.Facade.DataGroup` object.
:rtype: :py:class:`nion.swift.Facade.DataGroup`
.. versionadded:: 1.0
Scriptable: Yes
"""
return DataGroup(self.__document_model.get_or_create_data_group(title)) | [
"def",
"get_or_create_data_group",
"(",
"self",
",",
"title",
":",
"str",
")",
"->",
"DataGroup",
":",
"return",
"DataGroup",
"(",
"self",
".",
"__document_model",
".",
"get_or_create_data_group",
"(",
"title",
")",
")"
] | 34.916667 | [
0.016666666666666666,
0.05,
0,
0.06,
0.1111111111111111,
0.14545454545454545,
0,
0.10344827586206896,
0,
0.08695652173913043,
0.18181818181818182,
0.02531645569620253
] |
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | [
"def",
"pad_sentences",
"(",
"sentences",
",",
"padding_word",
"=",
"\"</s>\"",
")",
":",
"sequence_length",
"=",
"max",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"sentences",
")",
"padded_sentences",
"=",
"[",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"sentences",
")",
":",
"num_padding",
"=",
"sequence_length",
"-",
"len",
"(",
"sentence",
")",
"new_sentence",
"=",
"sentence",
"+",
"[",
"padding_word",
"]",
"*",
"num_padding",
"padded_sentences",
".",
"append",
"(",
"new_sentence",
")",
"return",
"padded_sentences"
] | 44.181818 | [
0.02,
0.03260869565217391,
0.06896551724137931,
0.2857142857142857,
0.038461538461538464,
0.08,
0.045454545454545456,
0.03773584905660377,
0.03225806451612903,
0.044444444444444446,
0.07407407407407407
] |
def collapse(self):
"""
Collapse the collection items into a single element (dict or list)
:return: A new Collection instance with collapsed items
:rtype: Collection
"""
results = []
if isinstance(self._items, dict):
items = self._items.values()
for values in items:
if isinstance(values, Collection):
values = values.all()
results += values
return Collection(results) | [
"def",
"collapse",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"self",
".",
"_items",
",",
"dict",
")",
":",
"items",
"=",
"self",
".",
"_items",
".",
"values",
"(",
")",
"for",
"values",
"in",
"items",
":",
"if",
"isinstance",
"(",
"values",
",",
"Collection",
")",
":",
"values",
"=",
"values",
".",
"all",
"(",
")",
"results",
"+=",
"values",
"return",
"Collection",
"(",
"results",
")"
] | 25.210526 | [
0.05263157894736842,
0.18181818181818182,
0.04054054054054054,
0,
0.047619047619047616,
0.11538461538461539,
0.18181818181818182,
0.1,
0,
0.04878048780487805,
0.05,
0,
0.07142857142857142,
0.043478260869565216,
0.05405405405405406,
0,
0.06896551724137931,
0,
0.058823529411764705
] |
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the cache service
'''
key, profile = _parse_key(key, profile)
cache = salt.cache.Cache(__opts__)
cache.store(profile['bank'], key, value)
return get(key, service, profile) | [
"def",
"set_",
"(",
"key",
",",
"value",
",",
"service",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"# pylint: disable=W0613",
"key",
",",
"profile",
"=",
"_parse_key",
"(",
"key",
",",
"profile",
")",
"cache",
"=",
"salt",
".",
"cache",
".",
"Cache",
"(",
"__opts__",
")",
"cache",
".",
"store",
"(",
"profile",
"[",
"'bank'",
"]",
",",
"key",
",",
"value",
")",
"return",
"get",
"(",
"key",
",",
"service",
",",
"profile",
")"
] | 36.875 | [
0.013513513513513514,
0.2857142857142857,
0.044444444444444446,
0.2857142857142857,
0.046511627906976744,
0.05263157894736842,
0.045454545454545456,
0.05405405405405406
] |
def flag_values_dict(self):
"""Returns a dictionary that maps flag names to flag values."""
return {name: flag.value for name, flag in six.iteritems(self._flags())} | [
"def",
"flag_values_dict",
"(",
"self",
")",
":",
"return",
"{",
"name",
":",
"flag",
".",
"value",
"for",
"name",
",",
"flag",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_flags",
"(",
")",
")",
"}"
] | 56.666667 | [
0.037037037037037035,
0.029850746268656716,
0.02631578947368421
] |
def to_iris(dataarray):
""" Convert a DataArray into a Iris Cube
"""
# Iris not a hard dependency
import iris
from iris.fileformats.netcdf import parse_cell_methods
dim_coords = []
aux_coords = []
for coord_name in dataarray.coords:
coord = encode(dataarray.coords[coord_name])
coord_args = _get_iris_args(coord.attrs)
coord_args['var_name'] = coord_name
axis = None
if coord.dims:
axis = dataarray.get_axis_num(coord.dims)
if coord_name in dataarray.dims:
try:
iris_coord = iris.coords.DimCoord(coord.values, **coord_args)
dim_coords.append((iris_coord, axis))
except ValueError:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
else:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
args = _get_iris_args(dataarray.attrs)
args['var_name'] = dataarray.name
args['dim_coords_and_dims'] = dim_coords
args['aux_coords_and_dims'] = aux_coords
if 'cell_methods' in dataarray.attrs:
args['cell_methods'] = \
parse_cell_methods(dataarray.attrs['cell_methods'])
masked_data = duck_array_ops.masked_invalid(dataarray.data)
cube = iris.cube.Cube(masked_data, **args)
return cube | [
"def",
"to_iris",
"(",
"dataarray",
")",
":",
"# Iris not a hard dependency",
"import",
"iris",
"from",
"iris",
".",
"fileformats",
".",
"netcdf",
"import",
"parse_cell_methods",
"dim_coords",
"=",
"[",
"]",
"aux_coords",
"=",
"[",
"]",
"for",
"coord_name",
"in",
"dataarray",
".",
"coords",
":",
"coord",
"=",
"encode",
"(",
"dataarray",
".",
"coords",
"[",
"coord_name",
"]",
")",
"coord_args",
"=",
"_get_iris_args",
"(",
"coord",
".",
"attrs",
")",
"coord_args",
"[",
"'var_name'",
"]",
"=",
"coord_name",
"axis",
"=",
"None",
"if",
"coord",
".",
"dims",
":",
"axis",
"=",
"dataarray",
".",
"get_axis_num",
"(",
"coord",
".",
"dims",
")",
"if",
"coord_name",
"in",
"dataarray",
".",
"dims",
":",
"try",
":",
"iris_coord",
"=",
"iris",
".",
"coords",
".",
"DimCoord",
"(",
"coord",
".",
"values",
",",
"*",
"*",
"coord_args",
")",
"dim_coords",
".",
"append",
"(",
"(",
"iris_coord",
",",
"axis",
")",
")",
"except",
"ValueError",
":",
"iris_coord",
"=",
"iris",
".",
"coords",
".",
"AuxCoord",
"(",
"coord",
".",
"values",
",",
"*",
"*",
"coord_args",
")",
"aux_coords",
".",
"append",
"(",
"(",
"iris_coord",
",",
"axis",
")",
")",
"else",
":",
"iris_coord",
"=",
"iris",
".",
"coords",
".",
"AuxCoord",
"(",
"coord",
".",
"values",
",",
"*",
"*",
"coord_args",
")",
"aux_coords",
".",
"append",
"(",
"(",
"iris_coord",
",",
"axis",
")",
")",
"args",
"=",
"_get_iris_args",
"(",
"dataarray",
".",
"attrs",
")",
"args",
"[",
"'var_name'",
"]",
"=",
"dataarray",
".",
"name",
"args",
"[",
"'dim_coords_and_dims'",
"]",
"=",
"dim_coords",
"args",
"[",
"'aux_coords_and_dims'",
"]",
"=",
"aux_coords",
"if",
"'cell_methods'",
"in",
"dataarray",
".",
"attrs",
":",
"args",
"[",
"'cell_methods'",
"]",
"=",
"parse_cell_methods",
"(",
"dataarray",
".",
"attrs",
"[",
"'cell_methods'",
"]",
")",
"masked_data",
"=",
"duck_array_ops",
".",
"masked_invalid",
"(",
"dataarray",
".",
"data",
")",
"cube",
"=",
"iris",
".",
"cube",
".",
"Cube",
"(",
"masked_data",
",",
"*",
"*",
"args",
")",
"return",
"cube"
] | 35.025 | [
0.043478260869565216,
0.045454545454545456,
0.2857142857142857,
0.0625,
0.13333333333333333,
0.034482758620689655,
0,
0.10526315789473684,
0.10526315789473684,
0,
0.05128205128205128,
0.038461538461538464,
0.041666666666666664,
0.046511627906976744,
0.10526315789473684,
0.09090909090909091,
0.03773584905660377,
0.05,
0.125,
0.025974025974025976,
0.03773584905660377,
0.06666666666666667,
0.025974025974025976,
0.03773584905660377,
0.15384615384615385,
0.0273972602739726,
0.04081632653061224,
0,
0.047619047619047616,
0.05405405405405406,
0.045454545454545456,
0.045454545454545456,
0.04878048780487805,
0.0625,
0.031746031746031744,
0,
0.031746031746031744,
0.043478260869565216,
0,
0.13333333333333333
] |
def reset_annotations(self):
"""Resets the builder's state to allow building new annotations."""
# FIXME: this state does not make sense
self.annotation_date_set = False
self.annotation_comment_set = False
self.annotation_type_set = False
self.annotation_spdx_id_set = False | [
"def",
"reset_annotations",
"(",
"self",
")",
":",
"# FIXME: this state does not make sense",
"self",
".",
"annotation_date_set",
"=",
"False",
"self",
".",
"annotation_comment_set",
"=",
"False",
"self",
".",
"annotation_type_set",
"=",
"False",
"self",
".",
"annotation_spdx_id_set",
"=",
"False"
] | 45.142857 | [
0.03571428571428571,
0.02666666666666667,
0.0425531914893617,
0.05,
0.046511627906976744,
0.05,
0.046511627906976744
] |
def get_avatar(self):
"""Gets the asset.
return: (osid.repository.Asset) - the asset
raise: IllegalState - ``has_avatar()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['avatarId']):
raise errors.IllegalState('this Resource has no avatar')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_asset_lookup():
raise errors.OperationFailed('Repository does not support Asset lookup')
lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
osid_object = lookup_session.get_asset(self.get_avatar_id())
return osid_object | [
"def",
"get_avatar",
"(",
"self",
")",
":",
"# Implemented from template for osid.resource.Resource.get_avatar_template",
"if",
"not",
"bool",
"(",
"self",
".",
"_my_map",
"[",
"'avatarId'",
"]",
")",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'this Resource has no avatar'",
")",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'REPOSITORY'",
")",
"if",
"not",
"mgr",
".",
"supports_asset_lookup",
"(",
")",
":",
"raise",
"errors",
".",
"OperationFailed",
"(",
"'Repository does not support Asset lookup'",
")",
"lookup_session",
"=",
"mgr",
".",
"get_asset_lookup_session",
"(",
"proxy",
"=",
"getattr",
"(",
"self",
",",
"\"_proxy\"",
",",
"None",
")",
")",
"lookup_session",
".",
"use_federated_repository_view",
"(",
")",
"osid_object",
"=",
"lookup_session",
".",
"get_asset",
"(",
"self",
".",
"get_avatar_id",
"(",
")",
")",
"return",
"osid_object"
] | 47.947368 | [
0.047619047619047616,
0.07692307692307693,
0,
0.0392156862745098,
0.05,
0.03333333333333333,
0.04477611940298507,
0,
0.18181818181818182,
0.036585365853658534,
0.043478260869565216,
0.029411764705882353,
0.037037037037037035,
0.046511627906976744,
0.03571428571428571,
0.03333333333333333,
0.037037037037037035,
0.029411764705882353,
0.07692307692307693
] |
def build_schema(self, fields):
"""
Build the schema from fields.
:param fields: A list of fields in the index
:returns: list of dictionaries
Each dictionary has the keys
field_name: The name of the field index
type: what type of value it is
'multi_valued': if it allows more than one value
'column': a number identifying it
'type': the type of the field
'multi_valued': 'false', 'column': 0}
"""
content_field_name = ''
schema_fields = [
{'field_name': ID,
'type': 'text',
'multi_valued': 'false',
'column': 0},
{'field_name': DJANGO_ID,
'type': 'integer',
'multi_valued': 'false',
'column': 1},
{'field_name': DJANGO_CT,
'type': 'text',
'multi_valued': 'false',
'column': 2},
]
self._columns[ID] = 0
self._columns[DJANGO_ID] = 1
self._columns[DJANGO_CT] = 2
column = len(schema_fields)
for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]):
if field_class.document is True:
content_field_name = field_class.index_fieldname
if field_class.indexed is True:
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text',
'multi_valued': 'false',
'column': column,
}
if field_class.field_type == 'date':
field_data['type'] = 'date'
elif field_class.field_type == 'datetime':
field_data['type'] = 'datetime'
elif field_class.field_type == 'integer':
field_data['type'] = 'integer'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
schema_fields.append(field_data)
self._columns[field_data['field_name']] = column
column += 1
return content_field_name, schema_fields | [
"def",
"build_schema",
"(",
"self",
",",
"fields",
")",
":",
"content_field_name",
"=",
"''",
"schema_fields",
"=",
"[",
"{",
"'field_name'",
":",
"ID",
",",
"'type'",
":",
"'text'",
",",
"'multi_valued'",
":",
"'false'",
",",
"'column'",
":",
"0",
"}",
",",
"{",
"'field_name'",
":",
"DJANGO_ID",
",",
"'type'",
":",
"'integer'",
",",
"'multi_valued'",
":",
"'false'",
",",
"'column'",
":",
"1",
"}",
",",
"{",
"'field_name'",
":",
"DJANGO_CT",
",",
"'type'",
":",
"'text'",
",",
"'multi_valued'",
":",
"'false'",
",",
"'column'",
":",
"2",
"}",
",",
"]",
"self",
".",
"_columns",
"[",
"ID",
"]",
"=",
"0",
"self",
".",
"_columns",
"[",
"DJANGO_ID",
"]",
"=",
"1",
"self",
".",
"_columns",
"[",
"DJANGO_CT",
"]",
"=",
"2",
"column",
"=",
"len",
"(",
"schema_fields",
")",
"for",
"field_name",
",",
"field_class",
"in",
"sorted",
"(",
"list",
"(",
"fields",
".",
"items",
"(",
")",
")",
",",
"key",
"=",
"lambda",
"n",
":",
"n",
"[",
"0",
"]",
")",
":",
"if",
"field_class",
".",
"document",
"is",
"True",
":",
"content_field_name",
"=",
"field_class",
".",
"index_fieldname",
"if",
"field_class",
".",
"indexed",
"is",
"True",
":",
"field_data",
"=",
"{",
"'field_name'",
":",
"field_class",
".",
"index_fieldname",
",",
"'type'",
":",
"'text'",
",",
"'multi_valued'",
":",
"'false'",
",",
"'column'",
":",
"column",
",",
"}",
"if",
"field_class",
".",
"field_type",
"==",
"'date'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'date'",
"elif",
"field_class",
".",
"field_type",
"==",
"'datetime'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'datetime'",
"elif",
"field_class",
".",
"field_type",
"==",
"'integer'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'integer'",
"elif",
"field_class",
".",
"field_type",
"==",
"'float'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'float'",
"elif",
"field_class",
".",
"field_type",
"==",
"'boolean'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'boolean'",
"elif",
"field_class",
".",
"field_type",
"==",
"'ngram'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'ngram'",
"elif",
"field_class",
".",
"field_type",
"==",
"'edge_ngram'",
":",
"field_data",
"[",
"'type'",
"]",
"=",
"'edge_ngram'",
"if",
"field_class",
".",
"is_multivalued",
":",
"field_data",
"[",
"'multi_valued'",
"]",
"=",
"'true'",
"schema_fields",
".",
"append",
"(",
"field_data",
")",
"self",
".",
"_columns",
"[",
"field_data",
"[",
"'field_name'",
"]",
"]",
"=",
"column",
"column",
"+=",
"1",
"return",
"content_field_name",
",",
"schema_fields"
] | 36.056338 | [
0.03225806451612903,
0.18181818181818182,
0.05405405405405406,
0,
0.057692307692307696,
0.07894736842105263,
0,
0.05555555555555555,
0.0625,
0.07692307692307693,
0.05263157894736842,
0.07142857142857142,
0.07894736842105263,
0.08695652173913043,
0.18181818181818182,
0.06451612903225806,
0.12,
0.1,
0.10714285714285714,
0.08108108108108109,
0.15384615384615385,
0.08108108108108109,
0.0967741935483871,
0.08108108108108109,
0.15384615384615385,
0.08108108108108109,
0.10714285714285714,
0.08108108108108109,
0.15384615384615385,
0.3333333333333333,
0.06896551724137931,
0.05555555555555555,
0.05555555555555555,
0,
0.05714285714285714,
0,
0.03409090909090909,
0.045454545454545456,
0.03125,
0,
0.046511627906976744,
0.1,
0.03225806451612903,
0.05714285714285714,
0.045454545454545456,
0.05405405405405406,
0.17647058823529413,
0,
0.038461538461538464,
0.0425531914893617,
0.034482758620689655,
0.0392156862745098,
0.03508771929824561,
0.04,
0.03636363636363636,
0.041666666666666664,
0.03508771929824561,
0.04,
0.03636363636363636,
0.041666666666666664,
0.03333333333333333,
0.03773584905660377,
0,
0.043478260869565216,
0.03636363636363636,
0,
0.041666666666666664,
0.03125,
0.07407407407407407,
0,
0.041666666666666664
] |
def data_input_and_res_time_analysis(self):
"""
Loads the data into Data() - renumbers the residues, imports mol file in rdkit.
If there are trajectories to analyse, the residues that will be plotted are determined
from Residence_time() analysis.
"""
self.topol_data = Data()
self.topol_data.load_data(self.topology,self.mol_file,self.ligand,self.offset)
if len(self.trajectory) == 0:
self.topol_data.analyse_topology(self.topology,self.cutoff)
else:
self.res_time = Residence_time(self.topol_data,self.trajectory, self.start, self.end, self.skip,self.topology, self.ligand,self.offset)
self.res_time.measure_residence_time(self.cutoff)
self.res_time.define_residues_for_plotting_traj(self.analysis_cutoff)
self.topol_data.find_the_closest_atoms(self.topology) | [
"def",
"data_input_and_res_time_analysis",
"(",
"self",
")",
":",
"self",
".",
"topol_data",
"=",
"Data",
"(",
")",
"self",
".",
"topol_data",
".",
"load_data",
"(",
"self",
".",
"topology",
",",
"self",
".",
"mol_file",
",",
"self",
".",
"ligand",
",",
"self",
".",
"offset",
")",
"if",
"len",
"(",
"self",
".",
"trajectory",
")",
"==",
"0",
":",
"self",
".",
"topol_data",
".",
"analyse_topology",
"(",
"self",
".",
"topology",
",",
"self",
".",
"cutoff",
")",
"else",
":",
"self",
".",
"res_time",
"=",
"Residence_time",
"(",
"self",
".",
"topol_data",
",",
"self",
".",
"trajectory",
",",
"self",
".",
"start",
",",
"self",
".",
"end",
",",
"self",
".",
"skip",
",",
"self",
".",
"topology",
",",
"self",
".",
"ligand",
",",
"self",
".",
"offset",
")",
"self",
".",
"res_time",
".",
"measure_residence_time",
"(",
"self",
".",
"cutoff",
")",
"self",
".",
"res_time",
".",
"define_residues_for_plotting_traj",
"(",
"self",
".",
"analysis_cutoff",
")",
"self",
".",
"topol_data",
".",
"find_the_closest_atoms",
"(",
"self",
".",
"topology",
")"
] | 58.533333 | [
0.023255813953488372,
0.18181818181818182,
0.034482758620689655,
0.031914893617021274,
0.05128205128205128,
0.18181818181818182,
0.0625,
0.06976744186046512,
0.05405405405405406,
0.04225352112676056,
0.15384615384615385,
0.04081632653061224,
0.03278688524590164,
0.037037037037037035,
0.03076923076923077
] |
def local_datetime(dt=None, utc_value=True):
""" Convert UTC datetime and/or datetime without timezone information to local datetime with timezone
information
:param dt: datetime in UTC to convert. If is None, then system datetime value is used
:param utc_value: whether dt is a datetime in UTC or in system timezone without timezone information
:return: datetime for system (local) timezone with tz set
"""
# TODO: rename utc_value to utc_tz or in_utc_tz
if dt is None:
return datetime.now(tz=local_tz())
result = dt
if result.utcoffset() is None:
if utc_value is False:
return result.replace(tzinfo=local_tz())
else:
result = result.replace(tzinfo=timezone.utc)
return result.astimezone(local_tz()) | [
"def",
"local_datetime",
"(",
"dt",
"=",
"None",
",",
"utc_value",
"=",
"True",
")",
":",
"# TODO: rename utc_value to utc_tz or in_utc_tz",
"if",
"dt",
"is",
"None",
":",
"return",
"datetime",
".",
"now",
"(",
"tz",
"=",
"local_tz",
"(",
")",
")",
"result",
"=",
"dt",
"if",
"result",
".",
"utcoffset",
"(",
")",
"is",
"None",
":",
"if",
"utc_value",
"is",
"False",
":",
"return",
"result",
".",
"replace",
"(",
"tzinfo",
"=",
"local_tz",
"(",
")",
")",
"else",
":",
"result",
"=",
"result",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
"return",
"result",
".",
"astimezone",
"(",
"local_tz",
"(",
")",
")"
] | 35.35 | [
0.022727272727272728,
0.0392156862745098,
0.25,
0,
0.05813953488372093,
0.04950495049504951,
0.08620689655172414,
0.75,
0.0625,
0.2,
0.08333333333333333,
0,
0.25,
0.0967741935483871,
0.125,
0.06976744186046512,
0.42857142857142855,
0.06382978723404255,
0,
0.08108108108108109
] |
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val) | [
"def",
"update_long",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"self",
".",
"update_arg",
"(",
"key",
",",
"long",
"=",
"val",
")"
] | 31.228571 | [
0.03125,
0.18181818181818182,
0.04225352112676056,
0,
0.02564102564102564,
0.1016949152542373,
0,
0.1111111111111111,
0.1111111111111111,
0.15,
0.08695652173913043,
0.03896103896103896,
0.07692307692307693,
0,
0.125,
0.125,
0.17647058823529413,
0,
0.04285714285714286,
0,
0.08823529411764706,
0,
0.05357142857142857,
0.04838709677419355,
0,
0.06,
0,
0.046153846153846156,
0.04225352112676056,
0,
0.125,
0.125,
0.08108108108108109,
0.043478260869565216,
0.047619047619047616
] |
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute(num_retries=self.num_retries)) | [
"def",
"get_tabledata",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
",",
"max_results",
"=",
"None",
",",
"selected_fields",
"=",
"None",
",",
"page_token",
"=",
"None",
",",
"start_index",
"=",
"None",
")",
":",
"optional_params",
"=",
"{",
"}",
"if",
"max_results",
":",
"optional_params",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"selected_fields",
":",
"optional_params",
"[",
"'selectedFields'",
"]",
"=",
"selected_fields",
"if",
"page_token",
":",
"optional_params",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"if",
"start_index",
":",
"optional_params",
"[",
"'startIndex'",
"]",
"=",
"start_index",
"return",
"(",
"self",
".",
"service",
".",
"tabledata",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
",",
"*",
"*",
"optional_params",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
")"
] | 46.870968 | [
0.044444444444444446,
0.07692307692307693,
0.125,
0.18181818181818182,
0.03614457831325301,
0.038461538461538464,
0,
0.046153846153846156,
0.04918032786885246,
0.05172413793103448,
0.038461538461538464,
0.04081632653061224,
0.043478260869565216,
0.05128205128205128,
0.0410958904109589,
0.058823529411764705,
0.18181818181818182,
0.07142857142857142,
0.08695652173913043,
0.03636363636363636,
0.07407407407407407,
0.031746031746031744,
0.09090909090909091,
0.03773584905660377,
0.08695652173913043,
0.03636363636363636,
0.06521739130434782,
0.07894736842105263,
0.09090909090909091,
0.10344827586206896,
0.057971014492753624
] |
def get_files(self, offset=None, limit=None):
"""
Retrieves files in this project.
:param offset: Pagination offset.
:param limit: Pagination limit.
:return: Collection object.
"""
params = {'project': self.id, 'offset': offset, 'limit': limit}
return self._api.files.query(api=self._api, **params) | [
"def",
"get_files",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'project'",
":",
"self",
".",
"id",
",",
"'offset'",
":",
"offset",
",",
"'limit'",
":",
"limit",
"}",
"return",
"self",
".",
"_api",
".",
"files",
".",
"query",
"(",
"api",
"=",
"self",
".",
"_api",
",",
"*",
"*",
"params",
")"
] | 39.333333 | [
0.022222222222222223,
0.18181818181818182,
0.05,
0.07317073170731707,
0.07692307692307693,
0.08571428571428572,
0.18181818181818182,
0.028169014084507043,
0.03278688524590164
] |
def set_position(self, pos, which='both'):
"""Identical to Axes.set_position (This docstring is overwritten)."""
self._polar.set_position(pos, which)
if self._overlay_axes is not None:
self._overlay_axes.set_position(pos, which)
LambertAxes.set_position(self, pos, which) | [
"def",
"set_position",
"(",
"self",
",",
"pos",
",",
"which",
"=",
"'both'",
")",
":",
"self",
".",
"_polar",
".",
"set_position",
"(",
"pos",
",",
"which",
")",
"if",
"self",
".",
"_overlay_axes",
"is",
"not",
"None",
":",
"self",
".",
"_overlay_axes",
".",
"set_position",
"(",
"pos",
",",
"which",
")",
"LambertAxes",
".",
"set_position",
"(",
"self",
",",
"pos",
",",
"which",
")"
] | 51.666667 | [
0.023809523809523808,
0.025974025974025976,
0.045454545454545456,
0.047619047619047616,
0.03636363636363636,
0.04
] |
def list_apps(self, cmd=None, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False, app_id=None, label=None, **kwargs):
"""List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
"""
params = {}
if cmd:
params['cmd'] = cmd
if app_id:
params['id'] = app_id
if label:
params['label'] = label
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request('GET', '/v2/apps', params=params)
apps = self._parse_response(
response, MarathonApp, is_list=True, resource_name='apps')
for k, v in kwargs.items():
apps = [o for o in apps if getattr(o, k) == v]
return apps | [
"def",
"list_apps",
"(",
"self",
",",
"cmd",
"=",
"None",
",",
"embed_tasks",
"=",
"False",
",",
"embed_counts",
"=",
"False",
",",
"embed_deployments",
"=",
"False",
",",
"embed_readiness",
"=",
"False",
",",
"embed_last_task_failure",
"=",
"False",
",",
"embed_failures",
"=",
"False",
",",
"embed_task_stats",
"=",
"False",
",",
"app_id",
"=",
"None",
",",
"label",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"if",
"cmd",
":",
"params",
"[",
"'cmd'",
"]",
"=",
"cmd",
"if",
"app_id",
":",
"params",
"[",
"'id'",
"]",
"=",
"app_id",
"if",
"label",
":",
"params",
"[",
"'label'",
"]",
"=",
"label",
"embed_params",
"=",
"{",
"'app.tasks'",
":",
"embed_tasks",
",",
"'app.counts'",
":",
"embed_counts",
",",
"'app.deployments'",
":",
"embed_deployments",
",",
"'app.readiness'",
":",
"embed_readiness",
",",
"'app.lastTaskFailure'",
":",
"embed_last_task_failure",
",",
"'app.failures'",
":",
"embed_failures",
",",
"'app.taskStats'",
":",
"embed_task_stats",
"}",
"filtered_embed_params",
"=",
"[",
"k",
"for",
"(",
"k",
",",
"v",
")",
"in",
"embed_params",
".",
"items",
"(",
")",
"if",
"v",
"]",
"if",
"filtered_embed_params",
":",
"params",
"[",
"'embed'",
"]",
"=",
"filtered_embed_params",
"response",
"=",
"self",
".",
"_do_request",
"(",
"'GET'",
",",
"'/v2/apps'",
",",
"params",
"=",
"params",
")",
"apps",
"=",
"self",
".",
"_parse_response",
"(",
"response",
",",
"MarathonApp",
",",
"is_list",
"=",
"True",
",",
"resource_name",
"=",
"'apps'",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"apps",
"=",
"[",
"o",
"for",
"o",
"in",
"apps",
"if",
"getattr",
"(",
"o",
",",
"k",
")",
"==",
"v",
"]",
"return",
"apps"
] | 44.458333 | [
0.029411764705882353,
0.07692307692307693,
0.07142857142857142,
0.09090909090909091,
0.08,
0,
0.056338028169014086,
0.05555555555555555,
0.05454545454545454,
0.04285714285714286,
0.04285714285714286,
0.0410958904109589,
0.0410958904109589,
0.046875,
0.039603960396039604,
0.039473684210526314,
0.06382978723404255,
0,
0.07894736842105263,
0.06451612903225806,
0.18181818181818182,
0.10526315789473684,
0.13333333333333333,
0.06451612903225806,
0.1111111111111111,
0.06060606060606061,
0.11764705882352941,
0.05714285714285714,
0,
0.125,
0.05405405405405406,
0.05128205128205128,
0.04081632653061224,
0.044444444444444446,
0.03389830508474576,
0.046511627906976744,
0.044444444444444446,
0.3333333333333333,
0.02666666666666667,
0.06060606060606061,
0.0392156862745098,
0,
0.028985507246376812,
0.08333333333333333,
0.07142857142857142,
0.05714285714285714,
0.034482758620689655,
0.10526315789473684
] |
def close(self, *args, **kwargs):
"""
write close tag of MRV file and close opened file
:param force: force closing of externally opened file or buffer
"""
if not self.__finalized:
self._file.write('</cml>')
self.__finalized = True
super().close(*args, **kwargs) | [
"def",
"close",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"__finalized",
":",
"self",
".",
"_file",
".",
"write",
"(",
"'</cml>'",
")",
"self",
".",
"__finalized",
"=",
"True",
"super",
"(",
")",
".",
"close",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 32.6 | [
0.030303030303030304,
0.18181818181818182,
0.03508771929824561,
0,
0.04225352112676056,
0.18181818181818182,
0.0625,
0.05263157894736842,
0.05714285714285714,
0.05263157894736842
] |
def parent_site(self, site):
"""
Filter to the given site, only give content relevant for that site.
"""
# Avoid auto filter if site is already set.
self._parent_site = site
if sharedcontent_appsettings.FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE:
# Allow content to be shared between all sites:
return self.filter(Q(parent_site=site) | Q(is_cross_site=True))
else:
return self.filter(parent_site=site) | [
"def",
"parent_site",
"(",
"self",
",",
"site",
")",
":",
"# Avoid auto filter if site is already set.",
"self",
".",
"_parent_site",
"=",
"site",
"if",
"sharedcontent_appsettings",
".",
"FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE",
":",
"# Allow content to be shared between all sites:",
"return",
"self",
".",
"filter",
"(",
"Q",
"(",
"parent_site",
"=",
"site",
")",
"|",
"Q",
"(",
"is_cross_site",
"=",
"True",
")",
")",
"else",
":",
"return",
"self",
".",
"filter",
"(",
"parent_site",
"=",
"site",
")"
] | 40 | [
0.03571428571428571,
0.18181818181818182,
0.02666666666666667,
0.18181818181818182,
0.0392156862745098,
0.0625,
0,
0.025974025974025976,
0.03389830508474576,
0.02666666666666667,
0.15384615384615385,
0.041666666666666664
] |
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself | [
"def",
"resolve_polytomy",
"(",
"self",
",",
"dist",
"=",
"1.0",
",",
"support",
"=",
"100",
",",
"recursive",
"=",
"True",
")",
":",
"nself",
"=",
"self",
".",
"copy",
"(",
")",
"nself",
".",
"treenode",
".",
"resolve_polytomy",
"(",
"default_dist",
"=",
"dist",
",",
"default_support",
"=",
"support",
",",
"recursive",
"=",
"recursive",
")",
"nself",
".",
"_coords",
".",
"update",
"(",
")",
"return",
"nself"
] | 27.875 | [
0.09523809523809523,
0.15384615384615385,
0.17647058823529413,
0.15,
0.16666666666666666,
0.18181818181818182,
0.0273972602739726,
0.07317073170731707,
0.18181818181818182,
0.07407407407407407,
0.075,
0.1,
0.08333333333333333,
0.125,
0.06666666666666667,
0.1
] |
def invalidate_token(self, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html>`_
:arg body: The token to invalidate
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"DELETE", "/_security/oauth2/token", params=params, body=body
) | [
"def",
"invalidate_token",
"(",
"self",
",",
"body",
",",
"params",
"=",
"None",
")",
":",
"if",
"body",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument 'body'.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"DELETE\"",
",",
"\"/_security/oauth2/token\"",
",",
"params",
"=",
"params",
",",
"body",
"=",
"body",
")"
] | 42.090909 | [
0.021739130434782608,
0.18181818181818182,
0.05405405405405406,
0,
0.07142857142857142,
0.18181818181818182,
0.0625,
0.036585365853658534,
0.06521739130434782,
0.0547945205479452,
0.3333333333333333
] |
def parse(timestamp, utc=False, produce_naive=False):
'''
Parse an :RFC:`3339`-formatted timestamp and return a
`datetime.datetime`.
If the timestamp is presented in UTC, then the `tzinfo` parameter of the
returned `datetime` will be set to `pytz.utc`.
>>> parse('2009-01-01T10:01:02Z')
datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>)
Otherwise, a `tzinfo` instance is created with the appropriate offset, and
the `tzinfo` parameter of the returned `datetime` is set to that value.
>>> parse('2009-01-01T14:01:02-04:00')
datetime.datetime(2009, 1, 1, 14, 1, 2, tzinfo=<UTC-04:00>)
However, if `parse()` is called with `utc=True`, then the returned
`datetime` will be normalized to UTC (and its tzinfo parameter set to
`pytz.utc`), regardless of the input timezone.
>>> parse('2009-01-01T06:01:02-04:00', utc=True)
datetime.datetime(2009, 1, 1, 10, 1, 2, tzinfo=<UTC>)
The input is strictly required to conform to :RFC:`3339`, and appropriate
exceptions are thrown for invalid input.
>>> parse('2009-01-01T06:01:02')
Traceback (most recent call last):
...
ValueError: timestamp does not conform to RFC 3339
>>> parse('2009-01-01T25:01:02Z')
Traceback (most recent call last):
...
ValueError: hour must be in 0..23
'''
parse_re = re.compile(r'''^(?:(?:(?P<date_fullyear>[0-9]{4})\-(?P<date_month>[0-9]{2})\-(?P<date_mday>[0-9]{2}))T(?:(?:(?P<time_hour>[0-9]{2})\:(?P<time_minute>[0-9]{2})\:(?P<time_second>[0-9]{2})(?P<time_secfrac>(?:\.[0-9]{1,}))?)(?P<time_offset>(?:Z|(?P<time_numoffset>(?P<time_houroffset>(?:\+|\-)[0-9]{2})\:(?P<time_minuteoffset>[0-9]{2}))))))$''',
re.I | re.X)
match = parse_re.match(timestamp)
if match is not None:
if match.group('time_offset') in ["Z", "z", "+00:00", "-00:00"]:
if produce_naive is True:
tzinfo = None
else:
tzinfo = pytz.utc
else:
if produce_naive is True:
raise ValueError("cannot produce a naive datetime from " +
"a local timestamp")
else:
tzinfo = FixedOffset(int(match.group('time_houroffset')),
int(match.group('time_minuteoffset')))
secfrac = match.group('time_secfrac')
if secfrac is None:
microsecond = 0
else:
microsecond = int(round(float(secfrac) * 1000000))
dt_out = datetime(year=int(match.group('date_fullyear')),
month=int(match.group('date_month')),
day=int(match.group('date_mday')),
hour=int(match.group('time_hour')),
minute=int(match.group('time_minute')),
second=int(match.group('time_second')),
microsecond=microsecond,
tzinfo=tzinfo)
if utc:
dt_out = dt_out.astimezone(pytz.utc)
return dt_out
else:
raise ValueError("timestamp does not conform to RFC 3339") | [
"def",
"parse",
"(",
"timestamp",
",",
"utc",
"=",
"False",
",",
"produce_naive",
"=",
"False",
")",
":",
"parse_re",
"=",
"re",
".",
"compile",
"(",
"r'''^(?:(?:(?P<date_fullyear>[0-9]{4})\\-(?P<date_month>[0-9]{2})\\-(?P<date_mday>[0-9]{2}))T(?:(?:(?P<time_hour>[0-9]{2})\\:(?P<time_minute>[0-9]{2})\\:(?P<time_second>[0-9]{2})(?P<time_secfrac>(?:\\.[0-9]{1,}))?)(?P<time_offset>(?:Z|(?P<time_numoffset>(?P<time_houroffset>(?:\\+|\\-)[0-9]{2})\\:(?P<time_minuteoffset>[0-9]{2}))))))$'''",
",",
"re",
".",
"I",
"|",
"re",
".",
"X",
")",
"match",
"=",
"parse_re",
".",
"match",
"(",
"timestamp",
")",
"if",
"match",
"is",
"not",
"None",
":",
"if",
"match",
".",
"group",
"(",
"'time_offset'",
")",
"in",
"[",
"\"Z\"",
",",
"\"z\"",
",",
"\"+00:00\"",
",",
"\"-00:00\"",
"]",
":",
"if",
"produce_naive",
"is",
"True",
":",
"tzinfo",
"=",
"None",
"else",
":",
"tzinfo",
"=",
"pytz",
".",
"utc",
"else",
":",
"if",
"produce_naive",
"is",
"True",
":",
"raise",
"ValueError",
"(",
"\"cannot produce a naive datetime from \"",
"+",
"\"a local timestamp\"",
")",
"else",
":",
"tzinfo",
"=",
"FixedOffset",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"'time_houroffset'",
")",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"'time_minuteoffset'",
")",
")",
")",
"secfrac",
"=",
"match",
".",
"group",
"(",
"'time_secfrac'",
")",
"if",
"secfrac",
"is",
"None",
":",
"microsecond",
"=",
"0",
"else",
":",
"microsecond",
"=",
"int",
"(",
"round",
"(",
"float",
"(",
"secfrac",
")",
"*",
"1000000",
")",
")",
"dt_out",
"=",
"datetime",
"(",
"year",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'date_fullyear'",
")",
")",
",",
"month",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'date_month'",
")",
")",
",",
"day",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'date_mday'",
")",
")",
",",
"hour",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'time_hour'",
")",
")",
",",
"minute",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'time_minute'",
")",
")",
",",
"second",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'time_second'",
")",
")",
",",
"microsecond",
"=",
"microsecond",
",",
"tzinfo",
"=",
"tzinfo",
")",
"if",
"utc",
":",
"dt_out",
"=",
"dt_out",
".",
"astimezone",
"(",
"pytz",
".",
"utc",
")",
"return",
"dt_out",
"else",
":",
"raise",
"ValueError",
"(",
"\"timestamp does not conform to RFC 3339\"",
")"
] | 39.139241 | [
0.018867924528301886,
0.2857142857142857,
0.12280701754385964,
0.125,
0,
0.039473684210526314,
0.06,
0,
0.08108108108108109,
0.07017543859649122,
0,
0.038461538461538464,
0.04,
0,
0.07142857142857142,
0.07936507936507936,
0,
0.07042253521126761,
0.0821917808219178,
0.08,
0,
0.057692307692307696,
0.07017543859649122,
0,
0.07792207792207792,
0.045454545454545456,
0,
0.08333333333333333,
0.07894736842105263,
0.2857142857142857,
0.037037037037037035,
0,
0.08108108108108109,
0.07894736842105263,
0.2857142857142857,
0.05405405405405406,
0,
0.2857142857142857,
0,
0.011235955056179775,
0.10526315789473684,
0,
0.05405405405405406,
0,
0.08,
0.027777777777777776,
0.05405405405405406,
0.06896551724137931,
0.11764705882352941,
0.06060606060606061,
0.15384615384615385,
0.05405405405405406,
0.04054054054054054,
0.07547169811320754,
0.11764705882352941,
0.0410958904109589,
0.05333333333333334,
0,
0.044444444444444446,
0.07407407407407407,
0.07407407407407407,
0.15384615384615385,
0.03225806451612903,
0,
0.046153846153846156,
0.06349206349206349,
0.06666666666666667,
0.06557377049180328,
0.06153846153846154,
0.06153846153846154,
0.08,
0.125,
0,
0.13333333333333333,
0.041666666666666664,
0,
0.09523809523809523,
0.2222222222222222,
0.030303030303030304
] |
def _get_redis_keys_opts():
'''
Build the key opts based on the user options.
'''
return {
'bank_prefix': __opts__.get('cache.redis.bank_prefix', _BANK_PREFIX),
'bank_keys_prefix': __opts__.get('cache.redis.bank_keys_prefix', _BANK_KEYS_PREFIX),
'key_prefix': __opts__.get('cache.redis.key_prefix', _KEY_PREFIX),
'separator': __opts__.get('cache.redis.separator', _SEPARATOR)
} | [
"def",
"_get_redis_keys_opts",
"(",
")",
":",
"return",
"{",
"'bank_prefix'",
":",
"__opts__",
".",
"get",
"(",
"'cache.redis.bank_prefix'",
",",
"_BANK_PREFIX",
")",
",",
"'bank_keys_prefix'",
":",
"__opts__",
".",
"get",
"(",
"'cache.redis.bank_keys_prefix'",
",",
"_BANK_KEYS_PREFIX",
")",
",",
"'key_prefix'",
":",
"__opts__",
".",
"get",
"(",
"'cache.redis.key_prefix'",
",",
"_KEY_PREFIX",
")",
",",
"'separator'",
":",
"__opts__",
".",
"get",
"(",
"'cache.redis.separator'",
",",
"_SEPARATOR",
")",
"}"
] | 42 | [
0.037037037037037035,
0.2857142857142857,
0.04081632653061224,
0.2857142857142857,
0.25,
0.025974025974025976,
0.03260869565217391,
0.02702702702702703,
0.02857142857142857,
0.6
] |
def list_bgp_speaker_on_dragent(self, bgp_dragent, **_params):
"""Fetches a list of BGP speakers hosted by Dynamic Routing agent."""
return self.get((self.agent_path + self.BGP_DRINSTANCES)
% bgp_dragent, params=_params) | [
"def",
"list_bgp_speaker_on_dragent",
"(",
"self",
",",
"bgp_dragent",
",",
"*",
"*",
"_params",
")",
":",
"return",
"self",
".",
"get",
"(",
"(",
"self",
".",
"agent_path",
"+",
"self",
".",
"BGP_DRINSTANCES",
")",
"%",
"bgp_dragent",
",",
"params",
"=",
"_params",
")"
] | 64.25 | [
0.016129032258064516,
0.025974025974025976,
0.046875,
0.07407407407407407
] |
def save(self, *objects):
"""Add all the objects to the session and commit them.
This only needs to be done for networks and participants.
"""
if len(objects) > 0:
self.session.add_all(objects)
self.session.commit() | [
"def",
"save",
"(",
"self",
",",
"*",
"objects",
")",
":",
"if",
"len",
"(",
"objects",
")",
">",
"0",
":",
"self",
".",
"session",
".",
"add_all",
"(",
"objects",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | 29 | [
0.04,
0.03225806451612903,
0,
0.03076923076923077,
0,
0.18181818181818182,
0.07142857142857142,
0.04878048780487805,
0.06896551724137931
] |
def save_template(self, filename="{ScanningTemplate}leicacam.xml"):
"""Save scanning template to filename."""
cmd = [
('sys', '0'),
('cmd', 'save'),
('fil', str(filename))
]
self.send(cmd)
return self.wait_for(*cmd[0]) | [
"def",
"save_template",
"(",
"self",
",",
"filename",
"=",
"\"{ScanningTemplate}leicacam.xml\"",
")",
":",
"cmd",
"=",
"[",
"(",
"'sys'",
",",
"'0'",
")",
",",
"(",
"'cmd'",
",",
"'save'",
")",
",",
"(",
"'fil'",
",",
"str",
"(",
"filename",
")",
")",
"]",
"self",
".",
"send",
"(",
"cmd",
")",
"return",
"self",
".",
"wait_for",
"(",
"*",
"cmd",
"[",
"0",
"]",
")"
] | 31.777778 | [
0.014925373134328358,
0.04081632653061224,
0.2,
0.08,
0.07142857142857142,
0.058823529411764705,
0.3333333333333333,
0.09090909090909091,
0.05405405405405406
] |
def numchannels(samples:np.ndarray) -> int:
"""
return the number of channels present in samples
samples: a numpy array as returned by sndread
for multichannel audio, samples is always interleaved,
meaning that samples[n] returns always a frame, which
is either a single scalar for mono audio, or an array
for multichannel audio.
"""
if len(samples.shape) == 1:
return 1
else:
return samples.shape[1] | [
"def",
"numchannels",
"(",
"samples",
":",
"np",
".",
"ndarray",
")",
"->",
"int",
":",
"if",
"len",
"(",
"samples",
".",
"shape",
")",
"==",
"1",
":",
"return",
"1",
"else",
":",
"return",
"samples",
".",
"shape",
"[",
"1",
"]"
] | 29.6 | [
0.046511627906976744,
0.2857142857142857,
0.038461538461538464,
0,
0.04081632653061224,
0,
0.034482758620689655,
0.03508771929824561,
0.03508771929824561,
0.07407407407407407,
0.2857142857142857,
0.06451612903225806,
0.125,
0.2222222222222222,
0.06451612903225806
] |
def set_display_mode(self, zoom,layout='continuous'):
"""Set display mode in viewer
The "zoom" argument may be 'fullpage', 'fullwidth', 'real',
'default', or a number, interpreted as a percentage."""
if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
self.zoom_mode=zoom
else:
self.error('Incorrect zoom display mode: '+zoom)
if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
self.layout_mode=layout
else:
self.error('Incorrect layout display mode: '+layout) | [
"def",
"set_display_mode",
"(",
"self",
",",
"zoom",
",",
"layout",
"=",
"'continuous'",
")",
":",
"if",
"(",
"zoom",
"==",
"'fullpage'",
"or",
"zoom",
"==",
"'fullwidth'",
"or",
"zoom",
"==",
"'real'",
"or",
"zoom",
"==",
"'default'",
"or",
"not",
"isinstance",
"(",
"zoom",
",",
"basestring",
")",
")",
":",
"self",
".",
"zoom_mode",
"=",
"zoom",
"else",
":",
"self",
".",
"error",
"(",
"'Incorrect zoom display mode: '",
"+",
"zoom",
")",
"if",
"(",
"layout",
"==",
"'single'",
"or",
"layout",
"==",
"'continuous'",
"or",
"layout",
"==",
"'two'",
"or",
"layout",
"==",
"'default'",
")",
":",
"self",
".",
"layout_mode",
"=",
"layout",
"else",
":",
"self",
".",
"error",
"(",
"'Incorrect layout display mode: '",
"+",
"layout",
")"
] | 47.357143 | [
0.03773584905660377,
0.05405405405405406,
0.25,
0.029850746268656716,
0.047619047619047616,
0.25,
0.06666666666666667,
0.0967741935483871,
0.15384615384615385,
0.03333333333333333,
0.07692307692307693,
0.08571428571428572,
0.15384615384615385,
0.03125
] |
def run(connection):
"""
Parse arguments and start upload/download
"""
parser = argparse.ArgumentParser(description="""
Process database dumps.
Either download of upload a dump file to the objectstore.
downloads the latest dump and uploads with envronment and date
into given container destination
""")
parser.add_argument(
'location',
nargs=1,
default=f'{DUMPFOLDER}/database.{ENV}.dump',
help="Dump file location")
parser.add_argument(
'objectstore',
nargs=1,
default=f'{DUMPFOLDER}/database.{ENV}.dump',
help="Dump file objectstore location")
parser.add_argument(
'--download-db',
action='store_true',
dest='download',
default=False,
help='Download db')
parser.add_argument(
'--upload-db',
action='store_true',
dest='upload',
default=False,
help='Upload db')
parser.add_argument(
'--container',
action='store_true',
dest='container',
default=False,
help='Upload db')
parser.add_argument(
'--days',
type=int,
nargs=1,
dest='days',
default=0,
help='Days to keep database dumps')
args = parser.parse_args()
if args.days:
LOG.debug('Cleanup old dumps')
remove_old_dumps(
connection, args.objectstore[0], args.days[0])
elif args.download:
download_database(
connection, args.objectstore[0], args.location[0])
elif args.upload:
upload_database(
connection, args.objectstore[0], args.location[0])
else:
parser.print_help() | [
"def",
"run",
"(",
"connection",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"\"\"\n Process database dumps.\n\n Either download of upload a dump file to the objectstore.\n\n downloads the latest dump and uploads with envronment and date\n into given container destination\n \"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"'location'",
",",
"nargs",
"=",
"1",
",",
"default",
"=",
"f'{DUMPFOLDER}/database.{ENV}.dump'",
",",
"help",
"=",
"\"Dump file location\"",
")",
"parser",
".",
"add_argument",
"(",
"'objectstore'",
",",
"nargs",
"=",
"1",
",",
"default",
"=",
"f'{DUMPFOLDER}/database.{ENV}.dump'",
",",
"help",
"=",
"\"Dump file objectstore location\"",
")",
"parser",
".",
"add_argument",
"(",
"'--download-db'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'download'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Download db'",
")",
"parser",
".",
"add_argument",
"(",
"'--upload-db'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'upload'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Upload db'",
")",
"parser",
".",
"add_argument",
"(",
"'--container'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'container'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Upload db'",
")",
"parser",
".",
"add_argument",
"(",
"'--days'",
",",
"type",
"=",
"int",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"'days'",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Days to keep database dumps'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"days",
":",
"LOG",
".",
"debug",
"(",
"'Cleanup old dumps'",
")",
"remove_old_dumps",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"days",
"[",
"0",
"]",
")",
"elif",
"args",
".",
"download",
":",
"download_database",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"location",
"[",
"0",
"]",
")",
"elif",
"args",
".",
"upload",
":",
"upload_database",
"(",
"connection",
",",
"args",
".",
"objectstore",
"[",
"0",
"]",
",",
"args",
".",
"location",
"[",
"0",
"]",
")",
"else",
":",
"parser",
".",
"print_help",
"(",
")"
] | 23.898551 | [
0.05,
0.2857142857142857,
0.044444444444444446,
0.2857142857142857,
0,
0.057692307692307696,
0.07407407407407407,
0,
0.03278688524590164,
0,
0.030303030303030304,
0.05555555555555555,
0.25,
0,
0.125,
0.10526315789473684,
0.1875,
0.057692307692307696,
0.11764705882352941,
0,
0.125,
0.09090909090909091,
0.1875,
0.057692307692307696,
0.08695652173913043,
0,
0.125,
0.08333333333333333,
0.10714285714285714,
0.125,
0.13636363636363635,
0.14814814814814814,
0,
0.125,
0.09090909090909091,
0.10714285714285714,
0.13636363636363635,
0.13636363636363635,
0.16,
0,
0.125,
0.09090909090909091,
0.10714285714285714,
0.12,
0.13636363636363635,
0.16,
0,
0.125,
0.11764705882352941,
0.17647058823529413,
0.1875,
0.15,
0.16666666666666666,
0.09302325581395349,
0,
0.06666666666666667,
0,
0.11764705882352941,
0.05263157894736842,
0.12,
0.05172413793103448,
0.08695652173913043,
0.11538461538461539,
0.04838709677419355,
0.09523809523809523,
0.125,
0.04838709677419355,
0.2222222222222222,
0.07407407407407407
] |
def generate_import_code(pipeline, operators, impute=False):
"""Generate all library import calls for use in TPOT.export().
Parameters
----------
pipeline: List
List of operators in the current optimized pipeline
operators:
List of operator class from operator library
impute : bool
Whether to impute new values in the feature set.
Returns
-------
pipeline_text: String
The Python code that imports all required library used in the current
optimized pipeline
"""
def merge_imports(old_dict, new_dict):
# Key is a module name
for key in new_dict.keys():
if key in old_dict.keys():
# Union imports from the same module
old_dict[key] = set(old_dict[key]) | set(new_dict[key])
else:
old_dict[key] = set(new_dict[key])
operators_used = [x.name for x in pipeline if isinstance(x, deap.gp.Primitive)]
pipeline_text = 'import numpy as np\nimport pandas as pd\n'
pipeline_imports = _starting_imports(operators, operators_used)
# Build dict of import requirments from list of operators
import_relations = {op.__name__: op.import_hash for op in operators}
# Add the imputer if necessary
if impute:
pipeline_imports['sklearn.preprocessing'] = ['Imputer']
# Build import dict from operators used
for op in operators_used:
try:
operator_import = import_relations[op]
merge_imports(pipeline_imports, operator_import)
except KeyError:
pass # Operator does not require imports
# Build import string
for key in sorted(pipeline_imports.keys()):
module_list = ', '.join(sorted(pipeline_imports[key]))
pipeline_text += 'from {} import {}\n'.format(key, module_list)
return pipeline_text | [
"def",
"generate_import_code",
"(",
"pipeline",
",",
"operators",
",",
"impute",
"=",
"False",
")",
":",
"def",
"merge_imports",
"(",
"old_dict",
",",
"new_dict",
")",
":",
"# Key is a module name",
"for",
"key",
"in",
"new_dict",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"old_dict",
".",
"keys",
"(",
")",
":",
"# Union imports from the same module",
"old_dict",
"[",
"key",
"]",
"=",
"set",
"(",
"old_dict",
"[",
"key",
"]",
")",
"|",
"set",
"(",
"new_dict",
"[",
"key",
"]",
")",
"else",
":",
"old_dict",
"[",
"key",
"]",
"=",
"set",
"(",
"new_dict",
"[",
"key",
"]",
")",
"operators_used",
"=",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"pipeline",
"if",
"isinstance",
"(",
"x",
",",
"deap",
".",
"gp",
".",
"Primitive",
")",
"]",
"pipeline_text",
"=",
"'import numpy as np\\nimport pandas as pd\\n'",
"pipeline_imports",
"=",
"_starting_imports",
"(",
"operators",
",",
"operators_used",
")",
"# Build dict of import requirments from list of operators",
"import_relations",
"=",
"{",
"op",
".",
"__name__",
":",
"op",
".",
"import_hash",
"for",
"op",
"in",
"operators",
"}",
"# Add the imputer if necessary",
"if",
"impute",
":",
"pipeline_imports",
"[",
"'sklearn.preprocessing'",
"]",
"=",
"[",
"'Imputer'",
"]",
"# Build import dict from operators used",
"for",
"op",
"in",
"operators_used",
":",
"try",
":",
"operator_import",
"=",
"import_relations",
"[",
"op",
"]",
"merge_imports",
"(",
"pipeline_imports",
",",
"operator_import",
")",
"except",
"KeyError",
":",
"pass",
"# Operator does not require imports",
"# Build import string",
"for",
"key",
"in",
"sorted",
"(",
"pipeline_imports",
".",
"keys",
"(",
")",
")",
":",
"module_list",
"=",
"', '",
".",
"join",
"(",
"sorted",
"(",
"pipeline_imports",
"[",
"key",
"]",
")",
")",
"pipeline_text",
"+=",
"'from {} import {}\\n'",
".",
"format",
"(",
"key",
",",
"module_list",
")",
"return",
"pipeline_text"
] | 34.320755 | [
0.016666666666666666,
0.030303030303030304,
0,
0.14285714285714285,
0.14285714285714285,
0.1111111111111111,
0.03389830508474576,
0.14285714285714285,
0.038461538461538464,
0.17647058823529413,
0.03571428571428571,
0,
0.18181818181818182,
0.18181818181818182,
0.08,
0.025974025974025976,
0.07692307692307693,
0,
0.2857142857142857,
0.047619047619047616,
0.06666666666666667,
0.05714285714285714,
0.05263157894736842,
0.038461538461538464,
0.028169014084507043,
0.11764705882352941,
0.04,
0,
0.03614457831325301,
0.031746031746031744,
0.029850746268656716,
0,
0.03278688524590164,
0.027777777777777776,
0,
0.058823529411764705,
0.14285714285714285,
0.031746031746031744,
0,
0.046511627906976744,
0.06896551724137931,
0.16666666666666666,
0.04,
0.03333333333333333,
0.08333333333333333,
0.03773584905660377,
0,
0.08,
0.0425531914893617,
0.03225806451612903,
0.028169014084507043,
0,
0.08333333333333333
] |
def _find_channel(connection, name, ctype, dtype, sample_rate, unique=False):
"""Internal method to find a single channel
Parameters
----------
connection : `nds2.connection`, optional
open NDS2 connection to use for query
name : `str`
the name of the channel to find
ctype : `int`
the NDS2 channel type to match
dtype : `int`
the NDS2 data type to match
sample_rate : `tuple`
a pre-formatted rate tuple (see `find_channels`)
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects, if `unique=True` is given the list
is guaranteed to have only one element.
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
"""
# parse channel type from name,
# e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced'
name, ctype = _strip_ctype(name, ctype, connection.get_protocol())
# query NDS2
found = connection.find_channels(name, ctype, dtype, *sample_rate)
# if don't care about defaults, just return now
if not unique:
return found
# if two results, remove 'online' copy (if present)
# (if no online channels present, this does nothing)
if len(found) == 2:
found = [c for c in found if
c.channel_type != Nds2ChannelType.ONLINE.value]
# if not unique result, panic
if len(found) != 1:
raise ValueError("unique NDS2 channel match not found for %r"
% name)
return found | [
"def",
"_find_channel",
"(",
"connection",
",",
"name",
",",
"ctype",
",",
"dtype",
",",
"sample_rate",
",",
"unique",
"=",
"False",
")",
":",
"# parse channel type from name,",
"# e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced'",
"name",
",",
"ctype",
"=",
"_strip_ctype",
"(",
"name",
",",
"ctype",
",",
"connection",
".",
"get_protocol",
"(",
")",
")",
"# query NDS2",
"found",
"=",
"connection",
".",
"find_channels",
"(",
"name",
",",
"ctype",
",",
"dtype",
",",
"*",
"sample_rate",
")",
"# if don't care about defaults, just return now",
"if",
"not",
"unique",
":",
"return",
"found",
"# if two results, remove 'online' copy (if present)",
"# (if no online channels present, this does nothing)",
"if",
"len",
"(",
"found",
")",
"==",
"2",
":",
"found",
"=",
"[",
"c",
"for",
"c",
"in",
"found",
"if",
"c",
".",
"channel_type",
"!=",
"Nds2ChannelType",
".",
"ONLINE",
".",
"value",
"]",
"# if not unique result, panic",
"if",
"len",
"(",
"found",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"unique NDS2 channel match not found for %r\"",
"%",
"name",
")",
"return",
"found"
] | 28.929825 | [
0.012987012987012988,
0.0425531914893617,
0,
0.14285714285714285,
0.14285714285714285,
0.09090909090909091,
0.044444444444444446,
0,
0.25,
0.05128205128205128,
0,
0.23529411764705882,
0.05263157894736842,
0,
0.23529411764705882,
0.05714285714285714,
0,
0.16,
0.07142857142857142,
0,
0.0851063829787234,
0.07692307692307693,
0,
0.18181818181818182,
0.18181818181818182,
0.10256410256410256,
0.05555555555555555,
0.0425531914893617,
0,
0.16666666666666666,
0.16666666666666666,
0.06060606060606061,
0.03571428571428571,
0.2857142857142857,
0.05714285714285714,
0.02631578947368421,
0.02857142857142857,
0,
0.125,
0.02857142857142857,
0,
0.0392156862745098,
0.1111111111111111,
0.1,
0,
0.03636363636363636,
0.03389830508474576,
0.08695652173913043,
0.08333333333333333,
0.0625,
0,
0.06060606060606061,
0.08695652173913043,
0.043478260869565216,
0.125,
0,
0.125
] |
def exchange(self, pubkey):
'''
Perform a ECDH key exchange with a public key.
Args:
pubkey (PubKey): A PubKey to perform the ECDH with.
Returns:
bytes: The ECDH bytes. This is deterministic for a given pubkey
and private key.
'''
try:
return self.priv.exchange(c_ec.ECDH(), pubkey.publ)
except ValueError as e:
raise s_exc.BadEccExchange(mesg=str(e)) | [
"def",
"exchange",
"(",
"self",
",",
"pubkey",
")",
":",
"try",
":",
"return",
"self",
".",
"priv",
".",
"exchange",
"(",
"c_ec",
".",
"ECDH",
"(",
")",
",",
"pubkey",
".",
"publ",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"s_exc",
".",
"BadEccExchange",
"(",
"mesg",
"=",
"str",
"(",
"e",
")",
")"
] | 30.333333 | [
0.037037037037037035,
0.18181818181818182,
0.037037037037037035,
0,
0.15384615384615385,
0.047619047619047616,
0,
0.125,
0.02666666666666667,
0.07142857142857142,
0.18181818181818182,
0.16666666666666666,
0.031746031746031744,
0.06451612903225806,
0.0392156862745098
] |
def configure_from_environment(self, whitelist_keys=False, whitelist=None):
"""Configure from the entire set of available environment variables.
This is really a shorthand for grabbing ``os.environ`` and passing to
:meth:`_configure_from_mapping`.
As always, only uppercase keys are loaded.
Keyword Args:
whitelist_keys (bool):
Should we whitelist the keys by only pulling those that are
already present in the config? Useful for avoiding adding
things like ``LESSPIPE`` to your app config. If no whitelist is
provided, we use the current config keys as our whitelist.
whitelist (list[str]):
An explicit list of keys that should be allowed. If provided
and ``whitelist_keys`` is true, we will use that as our
whitelist instead of pre-existing app config keys.
Returns:
fleaker.base.BaseApplication:
Returns itself.
"""
self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys,
whitelist=whitelist)
return self | [
"def",
"configure_from_environment",
"(",
"self",
",",
"whitelist_keys",
"=",
"False",
",",
"whitelist",
"=",
"None",
")",
":",
"self",
".",
"_configure_from_mapping",
"(",
"os",
".",
"environ",
",",
"whitelist_keys",
"=",
"whitelist_keys",
",",
"whitelist",
"=",
"whitelist",
")",
"return",
"self"
] | 43.518519 | [
0.013333333333333334,
0.02631578947368421,
0,
0.03896103896103896,
0.125,
0,
0.04,
0,
0.09523809523809523,
0.08823529411764706,
0.02666666666666667,
0.0273972602739726,
0.0379746835443038,
0.02702702702702703,
0.08823529411764706,
0.02631578947368421,
0.04225352112676056,
0.030303030303030304,
0,
0.125,
0.04878048780487805,
0.06451612903225806,
0.18181818181818182,
0.0379746835443038,
0.08771929824561403,
0,
0.10526315789473684
] |
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
core_commands, inenv_commands = [], []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help_ = cmd.short_help or ''
if subcommand in self.sort_later:
inenv_commands.append((subcommand, help_))
else:
core_commands.append((subcommand, help_))
if core_commands:
with formatter.section('Commands'):
formatter.write_dl(core_commands)
if inenv_commands:
with formatter.section('Inenvs'):
formatter.write_dl(inenv_commands) | [
"def",
"format_commands",
"(",
"self",
",",
"ctx",
",",
"formatter",
")",
":",
"core_commands",
",",
"inenv_commands",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"subcommand",
"in",
"self",
".",
"list_commands",
"(",
"ctx",
")",
":",
"cmd",
"=",
"self",
".",
"get_command",
"(",
"ctx",
",",
"subcommand",
")",
"# What is this, the tool lied about a command. Ignore it",
"if",
"cmd",
"is",
"None",
":",
"continue",
"help_",
"=",
"cmd",
".",
"short_help",
"or",
"''",
"if",
"subcommand",
"in",
"self",
".",
"sort_later",
":",
"inenv_commands",
".",
"append",
"(",
"(",
"subcommand",
",",
"help_",
")",
")",
"else",
":",
"core_commands",
".",
"append",
"(",
"(",
"subcommand",
",",
"help_",
")",
")",
"if",
"core_commands",
":",
"with",
"formatter",
".",
"section",
"(",
"'Commands'",
")",
":",
"formatter",
".",
"write_dl",
"(",
"core_commands",
")",
"if",
"inenv_commands",
":",
"with",
"formatter",
".",
"section",
"(",
"'Inenvs'",
")",
":",
"formatter",
".",
"write_dl",
"(",
"inenv_commands",
")"
] | 36.708333 | [
0.023809523809523808,
0.02631578947368421,
0.07692307692307693,
0.18181818181818182,
0.043478260869565216,
0.04,
0.0392156862745098,
0.028985507246376812,
0.07407407407407407,
0.08333333333333333,
0,
0.05,
0.044444444444444446,
0.034482758620689655,
0.11764705882352941,
0.03508771929824561,
0,
0.08,
0.0425531914893617,
0.04081632653061224,
0,
0.07692307692307693,
0.044444444444444446,
0.04
] |
def execute_on_entries(self, entry_processor, predicate=None):
"""
Applies the user defined EntryProcessor to all the entries in the map or entries in the map which satisfies
the predicate if provided. Returns the results mapped by each key in the map.
:param entry_processor: (object), A stateful serializable object which represents the EntryProcessor defined on
server side.
This object must have a serializable EntryProcessor counter part registered on server side with the actual
``org.hazelcast.map.EntryProcessor`` implementation.
:param predicate: (Predicate), predicate for filtering the entries (optional).
:return: (Sequence), list of map entries which includes the keys and the results of the entry process.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
"""
if predicate:
return self._encode_invoke(map_execute_with_predicate_codec, entry_processor=self._to_data(entry_processor),
predicate=self._to_data(predicate))
return self._encode_invoke(map_execute_on_all_keys_codec, entry_processor=self._to_data(entry_processor)) | [
"def",
"execute_on_entries",
"(",
"self",
",",
"entry_processor",
",",
"predicate",
"=",
"None",
")",
":",
"if",
"predicate",
":",
"return",
"self",
".",
"_encode_invoke",
"(",
"map_execute_with_predicate_codec",
",",
"entry_processor",
"=",
"self",
".",
"_to_data",
"(",
"entry_processor",
")",
",",
"predicate",
"=",
"self",
".",
"_to_data",
"(",
"predicate",
")",
")",
"return",
"self",
".",
"_encode_invoke",
"(",
"map_execute_on_all_keys_codec",
",",
"entry_processor",
"=",
"self",
".",
"_to_data",
"(",
"entry_processor",
")",
")"
] | 68.833333 | [
0.016129032258064516,
0.18181818181818182,
0.02608695652173913,
0.03529411764705882,
0,
0.03361344537815126,
0.08333333333333333,
0.025423728813559324,
0.046875,
0.05813953488372093,
0.03636363636363636,
0,
0.07547169811320754,
0.18181818181818182,
0.09523809523809523,
0.03333333333333333,
0.06756756756756757,
0.02654867256637168
] |
def from_env(cls, reactor=None, env=os.environ):
"""
Create a Vault client with configuration from the environment. Supports
a limited number of the available config options:
https://www.vaultproject.io/docs/commands/index.html#environment-variables
https://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40
Supported:
- ``VAULT_ADDR``
- ``VAULT_CACERT``
- ``VAULT_CLIENT_CERT``
- ``VAULT_CLIENT_KEY``
- ``VAULT_TLS_SERVER_NAME``
- ``VAULT_TOKEN``
Not currently supported:
- ``VAULT_CAPATH``
- ``VAULT_CLIENT_TIMEOUT``
- ``VAULT_MAX_RETRIES``
- ``VAULT_MFA``
- ``VAULT_RATE_LIMIT``
- ``VAULT_SKIP_VERIFY``
- ``VAULT_WRAP_TTL``
"""
address = env.get('VAULT_ADDR', 'https://127.0.0.1:8200')
# This seems to be what the Vault CLI defaults to
token = env.get('VAULT_TOKEN', 'TEST')
ca_cert = env.get('VAULT_CACERT')
tls_server_name = env.get('VAULT_TLS_SERVER_NAME')
client_cert = env.get('VAULT_CLIENT_CERT')
client_key = env.get('VAULT_CLIENT_KEY')
cf = ClientPolicyForHTTPS.from_pem_files(
caKey=ca_cert, privateKey=client_key, certKey=client_cert,
tls_server_name=tls_server_name
)
client, reactor = default_client(reactor, contextFactory=cf)
return cls(address, token, client=client, reactor=reactor) | [
"def",
"from_env",
"(",
"cls",
",",
"reactor",
"=",
"None",
",",
"env",
"=",
"os",
".",
"environ",
")",
":",
"address",
"=",
"env",
".",
"get",
"(",
"'VAULT_ADDR'",
",",
"'https://127.0.0.1:8200'",
")",
"# This seems to be what the Vault CLI defaults to",
"token",
"=",
"env",
".",
"get",
"(",
"'VAULT_TOKEN'",
",",
"'TEST'",
")",
"ca_cert",
"=",
"env",
".",
"get",
"(",
"'VAULT_CACERT'",
")",
"tls_server_name",
"=",
"env",
".",
"get",
"(",
"'VAULT_TLS_SERVER_NAME'",
")",
"client_cert",
"=",
"env",
".",
"get",
"(",
"'VAULT_CLIENT_CERT'",
")",
"client_key",
"=",
"env",
".",
"get",
"(",
"'VAULT_CLIENT_KEY'",
")",
"cf",
"=",
"ClientPolicyForHTTPS",
".",
"from_pem_files",
"(",
"caKey",
"=",
"ca_cert",
",",
"privateKey",
"=",
"client_key",
",",
"certKey",
"=",
"client_cert",
",",
"tls_server_name",
"=",
"tls_server_name",
")",
"client",
",",
"reactor",
"=",
"default_client",
"(",
"reactor",
",",
"contextFactory",
"=",
"cf",
")",
"return",
"cls",
"(",
"address",
",",
"token",
",",
"client",
"=",
"client",
",",
"reactor",
"=",
"reactor",
")"
] | 37.410256 | [
0.020833333333333332,
0.18181818181818182,
0.02531645569620253,
0.03508771929824561,
0.07317073170731707,
0.06493506493506493,
0,
0.1111111111111111,
0.125,
0.11538461538461539,
0.0967741935483871,
0.1,
0.08571428571428572,
0.12,
0,
0.0625,
0.11538461538461539,
0.08823529411764706,
0.0967741935483871,
0.13043478260869565,
0.1,
0.0967741935483871,
0.10714285714285714,
0.18181818181818182,
0.03076923076923077,
0.03508771929824561,
0.043478260869565216,
0,
0.04878048780487805,
0.034482758620689655,
0.04,
0.041666666666666664,
0.061224489795918366,
0.07142857142857142,
0.06976744186046512,
0.3333333333333333,
0.029411764705882353,
0,
0.030303030303030304
] |
def _activate(self):
"""Activate the application (bringing menus and windows forward)."""
ra = AppKit.NSRunningApplication
app = ra.runningApplicationWithProcessIdentifier_(
self._getPid())
# NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps
# == 3 - PyObjC in 10.6 does not expose these constants though so I have
# to use the int instead of the symbolic names
app.activateWithOptions_(3) | [
"def",
"_activate",
"(",
"self",
")",
":",
"ra",
"=",
"AppKit",
".",
"NSRunningApplication",
"app",
"=",
"ra",
".",
"runningApplicationWithProcessIdentifier_",
"(",
"self",
".",
"_getPid",
"(",
")",
")",
"# NSApplicationActivateAllWindows | NSApplicationActivateIgnoringOtherApps",
"# == 3 - PyObjC in 10.6 does not expose these constants though so I have",
"# to use the int instead of the symbolic names",
"app",
".",
"activateWithOptions_",
"(",
"3",
")"
] | 52.444444 | [
0.05,
0.02631578947368421,
0.05,
0.05172413793103448,
0.1111111111111111,
0.036585365853658534,
0.0375,
0.037037037037037035,
0.05714285714285714
] |
def build(ctx, max_revisions, targets, operators, archiver):
"""Build the wily cache."""
config = ctx.obj["CONFIG"]
from wily.commands.build import build
if max_revisions:
logger.debug(f"Fixing revisions to {max_revisions}")
config.max_revisions = max_revisions
if operators:
logger.debug(f"Fixing operators to {operators}")
config.operators = operators.strip().split(",")
if archiver:
logger.debug(f"Fixing archiver to {archiver}")
config.archiver = archiver
if targets:
logger.debug(f"Fixing targets to {targets}")
config.targets = targets
build(
config=config,
archiver=resolve_archiver(config.archiver),
operators=resolve_operators(config.operators),
)
logger.info(
"Completed building wily history, run `wily report <file>` or `wily index` to see more."
) | [
"def",
"build",
"(",
"ctx",
",",
"max_revisions",
",",
"targets",
",",
"operators",
",",
"archiver",
")",
":",
"config",
"=",
"ctx",
".",
"obj",
"[",
"\"CONFIG\"",
"]",
"from",
"wily",
".",
"commands",
".",
"build",
"import",
"build",
"if",
"max_revisions",
":",
"logger",
".",
"debug",
"(",
"f\"Fixing revisions to {max_revisions}\"",
")",
"config",
".",
"max_revisions",
"=",
"max_revisions",
"if",
"operators",
":",
"logger",
".",
"debug",
"(",
"f\"Fixing operators to {operators}\"",
")",
"config",
".",
"operators",
"=",
"operators",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\",\"",
")",
"if",
"archiver",
":",
"logger",
".",
"debug",
"(",
"f\"Fixing archiver to {archiver}\"",
")",
"config",
".",
"archiver",
"=",
"archiver",
"if",
"targets",
":",
"logger",
".",
"debug",
"(",
"f\"Fixing targets to {targets}\"",
")",
"config",
".",
"targets",
"=",
"targets",
"build",
"(",
"config",
"=",
"config",
",",
"archiver",
"=",
"resolve_archiver",
"(",
"config",
".",
"archiver",
")",
",",
"operators",
"=",
"resolve_operators",
"(",
"config",
".",
"operators",
")",
",",
")",
"logger",
".",
"info",
"(",
"\"Completed building wily history, run `wily report <file>` or `wily index` to see more.\"",
")"
] | 29.233333 | [
0.016666666666666666,
0.06451612903225806,
0.06666666666666667,
0,
0.04878048780487805,
0,
0.09523809523809523,
0.03333333333333333,
0.045454545454545456,
0,
0.11764705882352941,
0.03571428571428571,
0.03636363636363636,
0,
0.125,
0.037037037037037035,
0.058823529411764705,
0,
0.13333333333333333,
0.038461538461538464,
0.0625,
0,
0.3,
0.13636363636363635,
0.058823529411764705,
0.05555555555555555,
0.6,
0.1875,
0.03125,
0.6
] |
async def message(self, msg, msg_type=None, obj=None):
"""
Loads/dumps message
:param msg:
:param msg_type:
:param obj:
:return:
"""
elem_type = msg_type if msg_type is not None else msg.__class__
obj = collections.OrderedDict() if not x.has_elem(obj) else x.get_elem(obj)
if hasattr(elem_type, 'kv_serialize'):
msg = elem_type() if msg is None else msg
return await msg.kv_serialize(self, obj=obj)
fields = elem_type.f_specs()
for field in fields:
await self.message_field(msg=msg, field=field, obj=obj)
return obj if self.writing else msg | [
"async",
"def",
"message",
"(",
"self",
",",
"msg",
",",
"msg_type",
"=",
"None",
",",
"obj",
"=",
"None",
")",
":",
"elem_type",
"=",
"msg_type",
"if",
"msg_type",
"is",
"not",
"None",
"else",
"msg",
".",
"__class__",
"obj",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"if",
"not",
"x",
".",
"has_elem",
"(",
"obj",
")",
"else",
"x",
".",
"get_elem",
"(",
"obj",
")",
"if",
"hasattr",
"(",
"elem_type",
",",
"'kv_serialize'",
")",
":",
"msg",
"=",
"elem_type",
"(",
")",
"if",
"msg",
"is",
"None",
"else",
"msg",
"return",
"await",
"msg",
".",
"kv_serialize",
"(",
"self",
",",
"obj",
"=",
"obj",
")",
"fields",
"=",
"elem_type",
".",
"f_specs",
"(",
")",
"for",
"field",
"in",
"fields",
":",
"await",
"self",
".",
"message_field",
"(",
"msg",
"=",
"msg",
",",
"field",
"=",
"field",
",",
"obj",
"=",
"obj",
")",
"return",
"obj",
"if",
"self",
".",
"writing",
"else",
"msg"
] | 33.2 | [
0.018518518518518517,
0.18181818181818182,
0.07407407407407407,
0.15789473684210525,
0.125,
0.15789473684210525,
0.1875,
0.18181818181818182,
0.028169014084507043,
0.03614457831325301,
0,
0.043478260869565216,
0.03773584905660377,
0.03571428571428571,
0,
0.05555555555555555,
0.07142857142857142,
0.029850746268656716,
0,
0.046511627906976744
] |
def timings_out(reps,func,*args,**kw):
"""timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
Execute a function reps times, return a tuple with the elapsed total
CPU time in seconds, the time per call and the function's output.
Under Unix, the return value is the sum of user+system time consumed by
the process, computed via the resource module. This prevents problems
related to the wraparound effect which the time.clock() function has.
Under Windows the return value is in wall clock seconds. See the
documentation for the time module for more details."""
reps = int(reps)
assert reps >=1, 'reps must be >= 1'
if reps==1:
start = clock()
out = func(*args,**kw)
tot_time = clock()-start
else:
rng = xrange(reps-1) # the last time is executed separately to store output
start = clock()
for dummy in rng: func(*args,**kw)
out = func(*args,**kw) # one last time
tot_time = clock()-start
av_time = tot_time / reps
return tot_time,av_time,out | [
"def",
"timings_out",
"(",
"reps",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"reps",
"=",
"int",
"(",
"reps",
")",
"assert",
"reps",
">=",
"1",
",",
"'reps must be >= 1'",
"if",
"reps",
"==",
"1",
":",
"start",
"=",
"clock",
"(",
")",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"tot_time",
"=",
"clock",
"(",
")",
"-",
"start",
"else",
":",
"rng",
"=",
"xrange",
"(",
"reps",
"-",
"1",
")",
"# the last time is executed separately to store output",
"start",
"=",
"clock",
"(",
")",
"for",
"dummy",
"in",
"rng",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"# one last time",
"tot_time",
"=",
"clock",
"(",
")",
"-",
"start",
"av_time",
"=",
"tot_time",
"/",
"reps",
"return",
"tot_time",
",",
"av_time",
",",
"out"
] | 39.037037 | [
0.10526315789473684,
0.028169014084507043,
0,
0.027777777777777776,
0.028985507246376812,
0,
0.02666666666666667,
0.02702702702702703,
0.0273972602739726,
0,
0.029411764705882353,
0.05172413793103448,
0,
0.1,
0.075,
0.2,
0.08695652173913043,
0.1,
0.0625,
0.2222222222222222,
0.04819277108433735,
0.08695652173913043,
0.09523809523809523,
0.06382978723404255,
0.0625,
0.06896551724137931,
0.12903225806451613
] |
async def fetch_ban(self, user):
"""|coro|
Retrieves the :class:`BanEntry` for a user, which is a namedtuple
with a ``user`` and ``reason`` field. See :meth:`bans` for more
information.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
BanEntry
The BanEntry object for the specified user.
"""
data = await self._state.http.get_ban(user.id, self.id)
return BanEntry(
user=User(state=self._state, data=data['user']),
reason=data['reason']
) | [
"async",
"def",
"fetch_ban",
"(",
"self",
",",
"user",
")",
":",
"data",
"=",
"await",
"self",
".",
"_state",
".",
"http",
".",
"get_ban",
"(",
"user",
".",
"id",
",",
"self",
".",
"id",
")",
"return",
"BanEntry",
"(",
"user",
"=",
"User",
"(",
"state",
"=",
"self",
".",
"_state",
",",
"data",
"=",
"data",
"[",
"'user'",
"]",
")",
",",
"reason",
"=",
"data",
"[",
"'reason'",
"]",
")"
] | 29 | [
0.03125,
0.11764705882352941,
0,
0.0821917808219178,
0.11267605633802817,
0.1,
0,
0.08695652173913043,
0.0625,
0,
0.1111111111111111,
0.10526315789473684,
0.16666666666666666,
0.04081632653061224,
0,
0.14285714285714285,
0.14285714285714285,
0.11764705882352941,
0.02857142857142857,
0.125,
0.05555555555555555,
0.09523809523809523,
0.03278688524590164,
0,
0.13333333333333333,
0.13333333333333333,
0.125,
0.03636363636363636,
0.18181818181818182,
0.031746031746031744,
0.125,
0.05,
0.09090909090909091,
0.3333333333333333
] |
def list_daemon_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_daemon_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_daemon_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_daemon_set_for_all_namespaces_with_http_info(**kwargs)
return data | [
"def",
"list_daemon_set_for_all_namespaces",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"list_daemon_set_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_daemon_set_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 168.777778 | [
0.01818181818181818,
0.18181818181818182,
0.0425531914893617,
0.02666666666666667,
0.04918032786885246,
0.04,
0.09090909090909091,
0,
0.10344827586206896,
0.004780876494023904,
0.031496062992125984,
0.031496062992125984,
0.005035971223021582,
0.041666666666666664,
0.01312910284463895,
0.02702702702702703,
0.024096385542168676,
0.09375,
0.05357142857142857,
0.06818181818181818,
0.18181818181818182,
0.0425531914893617,
0.05714285714285714,
0.03614457831325301,
0.15384615384615385,
0.03529411764705882,
0.08695652173913043
] |
def query_segdb(cls, flag, *args, **kwargs):
"""Query the initial LIGO segment database for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
warnings.warn("query_segdb is deprecated and will be removed in a "
"future release", DeprecationWarning)
# parse arguments
qsegs = _parse_query_segments(args, cls.query_segdb)
# process query
try:
flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs)
except TypeError as exc:
if 'DataQualityDict' in str(exc):
raise TypeError(str(exc).replace('DataQualityDict',
cls.__name__))
else:
raise
if len(flags) > 1:
raise RuntimeError("Multiple flags returned for single query, "
"something went wrong:\n %s"
% '\n '.join(flags.keys()))
elif len(flags) == 0:
raise RuntimeError("No flags returned for single query, "
"something went wrong.")
return flags[flag] | [
"def",
"query_segdb",
"(",
"cls",
",",
"flag",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"query_segdb is deprecated and will be removed in a \"",
"\"future release\"",
",",
"DeprecationWarning",
")",
"# parse arguments",
"qsegs",
"=",
"_parse_query_segments",
"(",
"args",
",",
"cls",
".",
"query_segdb",
")",
"# process query",
"try",
":",
"flags",
"=",
"DataQualityDict",
".",
"query_segdb",
"(",
"[",
"flag",
"]",
",",
"qsegs",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
"as",
"exc",
":",
"if",
"'DataQualityDict'",
"in",
"str",
"(",
"exc",
")",
":",
"raise",
"TypeError",
"(",
"str",
"(",
"exc",
")",
".",
"replace",
"(",
"'DataQualityDict'",
",",
"cls",
".",
"__name__",
")",
")",
"else",
":",
"raise",
"if",
"len",
"(",
"flags",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"Multiple flags returned for single query, \"",
"\"something went wrong:\\n %s\"",
"%",
"'\\n '",
".",
"join",
"(",
"flags",
".",
"keys",
"(",
")",
")",
")",
"elif",
"len",
"(",
"flags",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"No flags returned for single query, \"",
"\"something went wrong.\"",
")",
"return",
"flags",
"[",
"flag",
"]"
] | 37 | [
0.022727272727272728,
0.028985507246376812,
0,
0.1111111111111111,
0.1111111111111111,
0.2,
0.0392156862745098,
0,
0.15384615384615385,
0.06779661016949153,
0.06896551724137931,
0.04081632653061224,
0,
0.13793103448275862,
0.038461538461538464,
0.046875,
0.06976744186046512,
0,
0.13333333333333333,
0.13333333333333333,
0.125,
0.041666666666666664,
0.06060606060606061,
0.18181818181818182,
0.04,
0.06779661016949153,
0,
0.08,
0.03333333333333333,
0,
0.08695652173913043,
0.16666666666666666,
0.027777777777777776,
0.0625,
0.044444444444444446,
0.04477611940298507,
0.06349206349206349,
0.11764705882352941,
0.09523809523809523,
0.07692307692307693,
0.04,
0.04838709677419355,
0.06557377049180328,
0.06896551724137931,
0.043478260869565216,
0.07272727272727272,
0.07692307692307693
] |
def make_anchor(file_path: pathlib.Path,
offset: int,
width: int,
context_width: int,
metadata,
encoding: str = 'utf-8',
handle=None):
"""Construct a new `Anchor`.
Args:
file_path: The absolute path to the target file for the anchor.
offset: The offset of the anchored text in codepoints in `file_path`'s
contents.
width: The width in codepoints of the anchored text.
context_width: The width in codepoints of context on either side of the
anchor.
metadata: The metadata to attach to the anchor. Must be json-serializeable.
encoding: The encoding of the contents of `file_path`.
handle: If not `None`, this is a file-like object the contents of which
are used to calculate the context of the anchor. If `None`, then
the file indicated by `file_path` is opened instead.
Raises:
ValueError: `width` characters can't be read at `offset`.
ValueError: `file_path` is not absolute.
"""
@contextmanager
def get_handle():
if handle is None:
with file_path.open(mode='rt', encoding=encoding) as fp:
yield fp
else:
yield handle
with get_handle() as fp:
context = _make_context(fp, offset, width, context_width)
return Anchor(
file_path=file_path,
encoding=encoding,
context=context,
metadata=metadata) | [
"def",
"make_anchor",
"(",
"file_path",
":",
"pathlib",
".",
"Path",
",",
"offset",
":",
"int",
",",
"width",
":",
"int",
",",
"context_width",
":",
"int",
",",
"metadata",
",",
"encoding",
":",
"str",
"=",
"'utf-8'",
",",
"handle",
"=",
"None",
")",
":",
"@",
"contextmanager",
"def",
"get_handle",
"(",
")",
":",
"if",
"handle",
"is",
"None",
":",
"with",
"file_path",
".",
"open",
"(",
"mode",
"=",
"'rt'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"fp",
":",
"yield",
"fp",
"else",
":",
"yield",
"handle",
"with",
"get_handle",
"(",
")",
"as",
"fp",
":",
"context",
"=",
"_make_context",
"(",
"fp",
",",
"offset",
",",
"width",
",",
"context_width",
")",
"return",
"Anchor",
"(",
"file_path",
"=",
"file_path",
",",
"encoding",
"=",
"encoding",
",",
"context",
"=",
"context",
",",
"metadata",
"=",
"metadata",
")"
] | 34.045455 | [
0.05,
0.07142857142857142,
0.07407407407407407,
0.05714285714285714,
0.08,
0.05,
0.13793103448275862,
0.0625,
0,
0.2222222222222222,
0.028169014084507043,
0.038461538461538464,
0.09523809523809523,
0.03333333333333333,
0.02531645569620253,
0.10526315789473684,
0.03614457831325301,
0.04838709677419355,
0.0379746835443038,
0.039473684210526314,
0.046875,
0,
0.18181818181818182,
0.046153846153846156,
0.0625,
0,
0.2857142857142857,
0,
0.10526315789473684,
0.09523809523809523,
0.07692307692307693,
0.029411764705882353,
0.08333333333333333,
0.15384615384615385,
0.08333333333333333,
0,
0.07142857142857142,
0.03076923076923077,
0,
0.16666666666666666,
0.10714285714285714,
0.11538461538461539,
0.125,
0.15384615384615385
] |
def main(name, klass):
""" OSPD Main function. """
# Common args parser.
parser = create_args_parser(name)
# Common args
cargs = get_common_args(parser)
logging.getLogger().setLevel(cargs['log_level'])
wrapper = klass(certfile=cargs['certfile'], keyfile=cargs['keyfile'],
cafile=cargs['cafile'])
if cargs['version']:
print_version(wrapper)
sys.exit()
if cargs['foreground']:
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s: %(message)s'))
logging.getLogger().addHandler(console)
elif cargs['log_file']:
logfile = logging.handlers.WatchedFileHandler(cargs['log_file'])
logfile.setFormatter(
logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s: %(message)s'))
logging.getLogger().addHandler(logfile)
go_to_background()
else:
syslog = logging.handlers.SysLogHandler('/dev/log')
syslog.setFormatter(
logging.Formatter('%(name)s: %(levelname)s: %(message)s'))
logging.getLogger().addHandler(syslog)
# Duplicate syslog's file descriptor to stout/stderr.
syslog_fd = syslog.socket.fileno()
os.dup2(syslog_fd, 1)
os.dup2(syslog_fd, 2)
go_to_background()
if not wrapper.check():
return 1
return wrapper.run(cargs['address'], cargs['port'], cargs['unix_socket']) | [
"def",
"main",
"(",
"name",
",",
"klass",
")",
":",
"# Common args parser.",
"parser",
"=",
"create_args_parser",
"(",
"name",
")",
"# Common args",
"cargs",
"=",
"get_common_args",
"(",
"parser",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"cargs",
"[",
"'log_level'",
"]",
")",
"wrapper",
"=",
"klass",
"(",
"certfile",
"=",
"cargs",
"[",
"'certfile'",
"]",
",",
"keyfile",
"=",
"cargs",
"[",
"'keyfile'",
"]",
",",
"cafile",
"=",
"cargs",
"[",
"'cafile'",
"]",
")",
"if",
"cargs",
"[",
"'version'",
"]",
":",
"print_version",
"(",
"wrapper",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"cargs",
"[",
"'foreground'",
"]",
":",
"console",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s %(name)s: %(levelname)s: %(message)s'",
")",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"addHandler",
"(",
"console",
")",
"elif",
"cargs",
"[",
"'log_file'",
"]",
":",
"logfile",
"=",
"logging",
".",
"handlers",
".",
"WatchedFileHandler",
"(",
"cargs",
"[",
"'log_file'",
"]",
")",
"logfile",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s %(name)s: %(levelname)s: %(message)s'",
")",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"addHandler",
"(",
"logfile",
")",
"go_to_background",
"(",
")",
"else",
":",
"syslog",
"=",
"logging",
".",
"handlers",
".",
"SysLogHandler",
"(",
"'/dev/log'",
")",
"syslog",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(name)s: %(levelname)s: %(message)s'",
")",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"addHandler",
"(",
"syslog",
")",
"# Duplicate syslog's file descriptor to stout/stderr.",
"syslog_fd",
"=",
"syslog",
".",
"socket",
".",
"fileno",
"(",
")",
"os",
".",
"dup2",
"(",
"syslog_fd",
",",
"1",
")",
"os",
".",
"dup2",
"(",
"syslog_fd",
",",
"2",
")",
"go_to_background",
"(",
")",
"if",
"not",
"wrapper",
".",
"check",
"(",
")",
":",
"return",
"1",
"return",
"wrapper",
".",
"run",
"(",
"cargs",
"[",
"'address'",
"]",
",",
"cargs",
"[",
"'port'",
"]",
",",
"cargs",
"[",
"'unix_socket'",
"]",
")"
] | 34.116279 | [
0.045454545454545456,
0.06451612903225806,
0,
0.08,
0.05405405405405406,
0,
0.11764705882352941,
0.05714285714285714,
0.038461538461538464,
0.0410958904109589,
0.09302325581395349,
0,
0.08333333333333333,
0.06666666666666667,
0.1111111111111111,
0,
0.07407407407407407,
0.04878048780487805,
0.10344827586206896,
0.1,
0.04411764705882353,
0.0425531914893617,
0.07407407407407407,
0.027777777777777776,
0.10344827586206896,
0.1,
0.04411764705882353,
0.0425531914893617,
0.07692307692307693,
0.2222222222222222,
0.03389830508474576,
0.10714285714285714,
0.04285714285714286,
0.043478260869565216,
0.03278688524590164,
0.047619047619047616,
0.06896551724137931,
0.06896551724137931,
0.07692307692307693,
0,
0.07407407407407407,
0.125,
0.025974025974025976
] |
def configure(self, config):
"""See base class method."""
super().configure(config)
self._line = config.getint(
'line{suffix}'.format(suffix=self._option_suffix),
fallback=self._line) | [
"def",
"configure",
"(",
"self",
",",
"config",
")",
":",
"super",
"(",
")",
".",
"configure",
"(",
"config",
")",
"self",
".",
"_line",
"=",
"config",
".",
"getint",
"(",
"'line{suffix}'",
".",
"format",
"(",
"suffix",
"=",
"self",
".",
"_option_suffix",
")",
",",
"fallback",
"=",
"self",
".",
"_line",
")"
] | 37.666667 | [
0.03571428571428571,
0.05555555555555555,
0.06060606060606061,
0.08571428571428572,
0.03225806451612903,
0.125
] |
def getActionHandle(self, pchActionName):
"""Returns a handle for an action. This handle is used for all performance-sensitive calls."""
fn = self.function_table.getActionHandle
pHandle = VRActionHandle_t()
result = fn(pchActionName, byref(pHandle))
return result, pHandle | [
"def",
"getActionHandle",
"(",
"self",
",",
"pchActionName",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getActionHandle",
"pHandle",
"=",
"VRActionHandle_t",
"(",
")",
"result",
"=",
"fn",
"(",
"pchActionName",
",",
"byref",
"(",
"pHandle",
")",
")",
"return",
"result",
",",
"pHandle"
] | 43.857143 | [
0.024390243902439025,
0.029411764705882353,
0,
0.041666666666666664,
0.05555555555555555,
0.04,
0.06666666666666667
] |
def html(content, **kwargs):
"""HTML (Hypertext Markup Language)"""
if hasattr(content, 'read'):
return content
elif hasattr(content, 'render'):
return content.render().encode('utf8')
return str(content).encode('utf8') | [
"def",
"html",
"(",
"content",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"content",
",",
"'read'",
")",
":",
"return",
"content",
"elif",
"hasattr",
"(",
"content",
",",
"'render'",
")",
":",
"return",
"content",
".",
"render",
"(",
")",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"str",
"(",
"content",
")",
".",
"encode",
"(",
"'utf8'",
")"
] | 30.5 | [
0.03571428571428571,
0.047619047619047616,
0.0625,
0.09090909090909091,
0.05555555555555555,
0.043478260869565216,
0,
0.05263157894736842
] |
def make_full_ivar():
""" take the scatters and skylines and make final ivars """
# skylines come as an ivar
# don't use them for now, because I don't really trust them...
# skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2
test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2
# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)
# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)
ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
SMALL = 1.0 / 1000000000.0
ref_ivar[ref_bad] = SMALL
test_ivar[test_bad] = SMALL
np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar)
np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar) | [
"def",
"make_full_ivar",
"(",
")",
":",
"# skylines come as an ivar",
"# don't use them for now, because I don't really trust them...",
"# skylines = np.load(\"%s/skylines.npz\" %DATA_DIR)['arr_0']",
"ref_flux",
"=",
"np",
".",
"load",
"(",
"\"%s/ref_flux_all.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"ref_scat",
"=",
"np",
".",
"load",
"(",
"\"%s/ref_spec_scat_all.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"test_flux",
"=",
"np",
".",
"load",
"(",
"\"%s/test_flux.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"test_scat",
"=",
"np",
".",
"load",
"(",
"\"%s/test_spec_scat.npz\"",
"%",
"DATA_DIR",
")",
"[",
"'arr_0'",
"]",
"ref_ivar",
"=",
"np",
".",
"ones",
"(",
"ref_flux",
".",
"shape",
")",
"/",
"ref_scat",
"[",
":",
",",
"None",
"]",
"**",
"2",
"test_ivar",
"=",
"np",
".",
"ones",
"(",
"test_flux",
".",
"shape",
")",
"/",
"test_scat",
"[",
":",
",",
"None",
"]",
"**",
"2",
"# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)",
"# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)",
"ref_bad",
"=",
"np",
".",
"logical_or",
"(",
"ref_flux",
"<=",
"0",
",",
"ref_flux",
">",
"1.1",
")",
"test_bad",
"=",
"np",
".",
"logical_or",
"(",
"test_flux",
"<=",
"0",
",",
"test_flux",
">",
"1.1",
")",
"SMALL",
"=",
"1.0",
"/",
"1000000000.0",
"ref_ivar",
"[",
"ref_bad",
"]",
"=",
"SMALL",
"test_ivar",
"[",
"test_bad",
"]",
"=",
"SMALL",
"np",
".",
"savez",
"(",
"\"%s/ref_ivar_corr.npz\"",
"%",
"DATA_DIR",
",",
"ref_ivar",
")",
"np",
".",
"savez",
"(",
"\"%s/test_ivar_corr.npz\"",
"%",
"DATA_DIR",
",",
"test_ivar",
")"
] | 46.375 | [
0.047619047619047616,
0.031746031746031744,
0,
0.06666666666666667,
0.030303030303030304,
0.03225806451612903,
0,
0.046875,
0.043478260869565216,
0.04838709677419355,
0.04477611940298507,
0.05,
0.047619047619047616,
0,
0.0375,
0.03614457831325301,
0,
0.034482758620689655,
0.03278688524590164,
0.06666666666666667,
0.06896551724137931,
0.06451612903225806,
0.05357142857142857,
0.05172413793103448
] |
def get_email_addresses(self):
"""
: returns: dict of type and email address list
:rtype: dict(str, list(str))
"""
email_dict = {}
for child in self.vcard.getChildren():
if child.name == "EMAIL":
type = helpers.list_to_string(
self._get_types_for_vcard_object(child, "internet"), ", ")
if type not in email_dict:
email_dict[type] = []
email_dict[type].append(child.value)
# sort email address lists
for email_list in email_dict.values():
email_list.sort()
return email_dict | [
"def",
"get_email_addresses",
"(",
"self",
")",
":",
"email_dict",
"=",
"{",
"}",
"for",
"child",
"in",
"self",
".",
"vcard",
".",
"getChildren",
"(",
")",
":",
"if",
"child",
".",
"name",
"==",
"\"EMAIL\"",
":",
"type",
"=",
"helpers",
".",
"list_to_string",
"(",
"self",
".",
"_get_types_for_vcard_object",
"(",
"child",
",",
"\"internet\"",
")",
",",
"\", \"",
")",
"if",
"type",
"not",
"in",
"email_dict",
":",
"email_dict",
"[",
"type",
"]",
"=",
"[",
"]",
"email_dict",
"[",
"type",
"]",
".",
"append",
"(",
"child",
".",
"value",
")",
"# sort email address lists",
"for",
"email_list",
"in",
"email_dict",
".",
"values",
"(",
")",
":",
"email_list",
".",
"sort",
"(",
")",
"return",
"email_dict"
] | 37.705882 | [
0.03333333333333333,
0.18181818181818182,
0.037037037037037035,
0.08333333333333333,
0.18181818181818182,
0.08695652173913043,
0.043478260869565216,
0.05405405405405406,
0.06521739130434782,
0.038461538461538464,
0.047619047619047616,
0.04878048780487805,
0.038461538461538464,
0.058823529411764705,
0.043478260869565216,
0.06896551724137931,
0.08
] |
def update(self, items):
"""
Updates the dependencies in the inverse relationship format, i.e. from an iterable or dict that is structured
as `(item, dependent_items)`. The parent element `item` may occur multiple times.
:param items: Iterable or dictionary in the format `(item, dependent_items)`.
:type items: collections.Iterable
"""
for parent, sub_items in _iterate_dependencies(items):
for si in sub_items:
dep = self._deps[si]
if parent not in dep.parent:
dep.parent.append(parent) | [
"def",
"update",
"(",
"self",
",",
"items",
")",
":",
"for",
"parent",
",",
"sub_items",
"in",
"_iterate_dependencies",
"(",
"items",
")",
":",
"for",
"si",
"in",
"sub_items",
":",
"dep",
"=",
"self",
".",
"_deps",
"[",
"si",
"]",
"if",
"parent",
"not",
"in",
"dep",
".",
"parent",
":",
"dep",
".",
"parent",
".",
"append",
"(",
"parent",
")"
] | 45.923077 | [
0.041666666666666664,
0.18181818181818182,
0.02564102564102564,
0.0449438202247191,
0,
0.058823529411764705,
0.07317073170731707,
0.18181818181818182,
0.03225806451612903,
0.0625,
0.05555555555555555,
0.045454545454545456,
0.044444444444444446
] |
def upsert(self, name, value=None, seq=None):
"""Add one name/value entry to the main context of the rolne, but
only if an entry with that name does not already exist.
If the an entry with name exists, then the first entry found has it's
value changed.
NOTE: the upsert only updates the FIRST entry with the name found.
The method returns True if an insertion occurs, otherwise False.
Example of use:
>>> # setup an example rolne first
>>> my_var = rolne()
>>> my_var.upsert("item", "zing")
True
>>> my_var["item", "zing"].append("color", "blue")
>>> print my_var
%rolne:
item = zing
color = blue
<BLANKLINE>
>>> my_var.upsert("item", "zing")
False
>>> print my_var
%rolne:
item = zing
color = blue
<BLANKLINE>
>>> my_var.upsert("item", "broom")
False
>>> print my_var
%rolne:
item = broom
color = blue
<BLANKLINE>
.. versionadded:: 0.1.1
:param name:
The key name of the name/value pair.
:param value:
The key value of the name/value pair. If not passed, then the value
is assumed to be None.
:returns:
Returns True if the name/value was newly inserted. Otherwise, it
returns False indicated that an update was done instead.
"""
for ctr, entry in enumerate(self.data):
if entry[TNAME]==name:
new_tuple = (name, value, entry[TLIST], entry[TSEQ])
self.data[ctr]=new_tuple
return False
new_tuple = (name, value, [], lib._seq(self, seq))
self.data.append(new_tuple)
return True | [
"def",
"upsert",
"(",
"self",
",",
"name",
",",
"value",
"=",
"None",
",",
"seq",
"=",
"None",
")",
":",
"for",
"ctr",
",",
"entry",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"entry",
"[",
"TNAME",
"]",
"==",
"name",
":",
"new_tuple",
"=",
"(",
"name",
",",
"value",
",",
"entry",
"[",
"TLIST",
"]",
",",
"entry",
"[",
"TSEQ",
"]",
")",
"self",
".",
"data",
"[",
"ctr",
"]",
"=",
"new_tuple",
"return",
"False",
"new_tuple",
"=",
"(",
"name",
",",
"value",
",",
"[",
"]",
",",
"lib",
".",
"_seq",
"(",
"self",
",",
"seq",
")",
")",
"self",
".",
"data",
".",
"append",
"(",
"new_tuple",
")",
"return",
"True"
] | 31.22807 | [
0.022222222222222223,
0.0273972602739726,
0.031746031746031744,
0,
0.025974025974025976,
0.09090909090909091,
0,
0.02702702702702703,
0,
0.027777777777777776,
0,
0.08695652173913043,
0,
0.09523809523809523,
0.10714285714285714,
0.07317073170731707,
0.16666666666666666,
0.05172413793103448,
0.125,
0.13333333333333333,
0.10526315789473684,
0.08333333333333333,
0.15789473684210525,
0.07317073170731707,
0.15384615384615385,
0.125,
0.13333333333333333,
0.10526315789473684,
0.08333333333333333,
0.15789473684210525,
0.07142857142857142,
0.15384615384615385,
0.125,
0.13333333333333333,
0.1,
0.08333333333333333,
0.15789473684210525,
0,
0.0967741935483871,
0.25,
0.15,
0.06382978723404255,
0.14285714285714285,
0.038461538461538464,
0.09090909090909091,
0.17647058823529413,
0.04,
0.04477611940298507,
0.18181818181818182,
0.0425531914893617,
0.08823529411764706,
0.029411764705882353,
0.075,
0.07142857142857142,
0.034482758620689655,
0.05714285714285714,
0.10526315789473684
] |
def get_transcript_lengths(ensembl, transcript_ids):
""" finds the protein length for ensembl transcript IDs for a gene
Args:
ensembl: EnsemblRequest object to request sequences and data
from the ensembl REST API
transcript_ids: list of transcript IDs for a single gene
Returns:
dictionary of lengths (in amino acids), indexed by transcript IDs
"""
transcripts = {}
for transcript_id in transcript_ids:
# get the transcript's protein sequence via the ensembl REST API
try:
seq = ensembl.get_protein_seq_for_transcript(transcript_id)
except ValueError:
continue
transcripts[transcript_id] = len(seq)
return transcripts | [
"def",
"get_transcript_lengths",
"(",
"ensembl",
",",
"transcript_ids",
")",
":",
"transcripts",
"=",
"{",
"}",
"for",
"transcript_id",
"in",
"transcript_ids",
":",
"# get the transcript's protein sequence via the ensembl REST API",
"try",
":",
"seq",
"=",
"ensembl",
".",
"get_protein_seq_for_transcript",
"(",
"transcript_id",
")",
"except",
"ValueError",
":",
"continue",
"transcripts",
"[",
"transcript_id",
"]",
"=",
"len",
"(",
"seq",
")",
"return",
"transcripts"
] | 32.347826 | [
0.019230769230769232,
0.02857142857142857,
0.5,
0.2222222222222222,
0.029411764705882353,
0.05405405405405406,
0.03125,
0.5,
0.16666666666666666,
0.0547945205479452,
0.2857142857142857,
0.5,
0.1,
0.05,
0.027777777777777776,
0.16666666666666666,
0.028169014084507043,
0.07692307692307693,
0.1,
0.25,
0.044444444444444446,
0.5,
0.09090909090909091
] |
def main(pyc_file, asm_path):
"""
Create Python bytecode from a Python assembly file.
ASM_PATH gives the input Python assembly file. We suggest ending the
file in .pyc
If --pyc-file is given, that indicates the path to write the
Python bytecode. The path should end in '.pyc'.
See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst
for how to write a Python assembler file.
"""
if os.stat(asm_path).st_size == 0:
print("Size of assembly file %s is zero" % asm_path)
sys.exit(1)
asm = asm_file(asm_path)
if not pyc_file and asm_path.endswith('.pyasm'):
pyc_file = asm_path[:-len('.pyasm')] + '.pyc'
write_pycfile(pyc_file, asm) | [
"def",
"main",
"(",
"pyc_file",
",",
"asm_path",
")",
":",
"if",
"os",
".",
"stat",
"(",
"asm_path",
")",
".",
"st_size",
"==",
"0",
":",
"print",
"(",
"\"Size of assembly file %s is zero\"",
"%",
"asm_path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"asm",
"=",
"asm_file",
"(",
"asm_path",
")",
"if",
"not",
"pyc_file",
"and",
"asm_path",
".",
"endswith",
"(",
"'.pyasm'",
")",
":",
"pyc_file",
"=",
"asm_path",
"[",
":",
"-",
"len",
"(",
"'.pyasm'",
")",
"]",
"+",
"'.pyc'",
"write_pycfile",
"(",
"pyc_file",
",",
"asm",
")"
] | 31.772727 | [
0.034482758620689655,
0.2857142857142857,
0.03636363636363636,
0,
0.027777777777777776,
0.125,
0,
0.046875,
0.0392156862745098,
0,
0.04225352112676056,
0.044444444444444446,
0.2857142857142857,
0.05263157894736842,
0.03333333333333333,
0.10526315789473684,
0.07142857142857142,
0,
0.038461538461538464,
0.03773584905660377,
0,
0.0625
] |
def as_condition(cls, obj):
"""Convert obj into :class:`Condition`"""
if isinstance(obj, cls):
return obj
else:
return cls(cmap=obj) | [
"def",
"as_condition",
"(",
"cls",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"cls",
")",
":",
"return",
"obj",
"else",
":",
"return",
"cls",
"(",
"cmap",
"=",
"obj",
")"
] | 29.166667 | [
0.037037037037037035,
0.04081632653061224,
0.0625,
0.09090909090909091,
0.15384615384615385,
0.0625
] |
def line(x1, y1, x2, y2):
"""Returns the coords of the line between (x1, y1), (x2, y2)
:param x1: x coordinate of the startpoint
:param y1: y coordinate of the startpoint
:param x2: x coordinate of the endpoint
:param y2: y coordinate of the endpoint
"""
x1 = normalize(x1)
y1 = normalize(y1)
x2 = normalize(x2)
y2 = normalize(y2)
xdiff = max(x1, x2) - min(x1, x2)
ydiff = max(y1, y2) - min(y1, y2)
xdir = 1 if x1 <= x2 else -1
ydir = 1 if y1 <= y2 else -1
r = max(xdiff, ydiff)
for i in range(r+1):
x = x1
y = y1
if ydiff:
y += (float(i) * ydiff) / r * ydir
if xdiff:
x += (float(i) * xdiff) / r * xdir
yield (x, y) | [
"def",
"line",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
":",
"x1",
"=",
"normalize",
"(",
"x1",
")",
"y1",
"=",
"normalize",
"(",
"y1",
")",
"x2",
"=",
"normalize",
"(",
"x2",
")",
"y2",
"=",
"normalize",
"(",
"y2",
")",
"xdiff",
"=",
"max",
"(",
"x1",
",",
"x2",
")",
"-",
"min",
"(",
"x1",
",",
"x2",
")",
"ydiff",
"=",
"max",
"(",
"y1",
",",
"y2",
")",
"-",
"min",
"(",
"y1",
",",
"y2",
")",
"xdir",
"=",
"1",
"if",
"x1",
"<=",
"x2",
"else",
"-",
"1",
"ydir",
"=",
"1",
"if",
"y1",
"<=",
"y2",
"else",
"-",
"1",
"r",
"=",
"max",
"(",
"xdiff",
",",
"ydiff",
")",
"for",
"i",
"in",
"range",
"(",
"r",
"+",
"1",
")",
":",
"x",
"=",
"x1",
"y",
"=",
"y1",
"if",
"ydiff",
":",
"y",
"+=",
"(",
"float",
"(",
"i",
")",
"*",
"ydiff",
")",
"/",
"r",
"*",
"ydir",
"if",
"xdiff",
":",
"x",
"+=",
"(",
"float",
"(",
"i",
")",
"*",
"xdiff",
")",
"/",
"r",
"*",
"xdir",
"yield",
"(",
"x",
",",
"y",
")"
] | 23.258065 | [
0.04,
0.03125,
0,
0.06666666666666667,
0.06666666666666667,
0.06976744186046512,
0.06976744186046512,
0.2857142857142857,
0,
0.09090909090909091,
0.09090909090909091,
0.09090909090909091,
0.09090909090909091,
0,
0.05405405405405406,
0.05405405405405406,
0.0625,
0.0625,
0,
0.08,
0,
0.08333333333333333,
0.14285714285714285,
0.14285714285714285,
0,
0.11764705882352941,
0.043478260869565216,
0.11764705882352941,
0.043478260869565216,
0,
0.1
] |
def is_owner():
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx):
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate) | [
"def",
"is_owner",
"(",
")",
":",
"async",
"def",
"predicate",
"(",
"ctx",
")",
":",
"if",
"not",
"await",
"ctx",
".",
"bot",
".",
"is_owner",
"(",
"ctx",
".",
"author",
")",
":",
"raise",
"NotOwner",
"(",
"'You do not own this bot.'",
")",
"return",
"True",
"return",
"check",
"(",
"predicate",
")"
] | 28.125 | [
0.06666666666666667,
0.02564102564102564,
0.09523809523809523,
0,
0.13333333333333333,
0,
0.06666666666666667,
0.2,
0.2857142857142857,
0,
0.06896551724137931,
0.04,
0.037037037037037035,
0.10526315789473684,
0,
0.07407407407407407
] |
def add_directory(self, directory: PathLike) -> None:
"""
Adds *.gtb.cp4* tables from a directory. The relevant files are lazily
opened when the tablebase is actually probed.
"""
directory = os.path.abspath(directory)
if not os.path.isdir(directory):
raise IOError("not a directory: {!r}".format(directory))
for tbfile in fnmatch.filter(os.listdir(directory), "*.gtb.cp4"):
self.available_tables[os.path.basename(tbfile).replace(".gtb.cp4", "")] = os.path.join(directory, tbfile) | [
"def",
"add_directory",
"(",
"self",
",",
"directory",
":",
"PathLike",
")",
"->",
"None",
":",
"directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"raise",
"IOError",
"(",
"\"not a directory: {!r}\"",
".",
"format",
"(",
"directory",
")",
")",
"for",
"tbfile",
"in",
"fnmatch",
".",
"filter",
"(",
"os",
".",
"listdir",
"(",
"directory",
")",
",",
"\"*.gtb.cp4\"",
")",
":",
"self",
".",
"available_tables",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"tbfile",
")",
".",
"replace",
"(",
"\".gtb.cp4\"",
",",
"\"\"",
")",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"tbfile",
")"
] | 50 | [
0.018867924528301886,
0.18181818181818182,
0.05128205128205128,
0.03773584905660377,
0.18181818181818182,
0.043478260869565216,
0.05,
0.029411764705882353,
0,
0.0273972602739726,
0.02564102564102564
] |
def _c_base_var(self):
"""Return the name of the module base variable."""
if self.opts.no_structs:
return self.name
return 'windll->{}.{}'.format(
self.name, self.opts.base
) | [
"def",
"_c_base_var",
"(",
"self",
")",
":",
"if",
"self",
".",
"opts",
".",
"no_structs",
":",
"return",
"self",
".",
"name",
"return",
"'windll->{}.{}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"self",
".",
"opts",
".",
"base",
")"
] | 32 | [
0.045454545454545456,
0.034482758620689655,
0.0625,
0.07142857142857142,
0.07894736842105263,
0.05405405405405406,
0.3333333333333333
] |
def CovInv(self):
"""
Inverse of the covariance matrix
Returns
-------
H : (np.array)
inverse of the covariance matrix.
"""
self.recurse(full_matrix=True)
return self.tree.root.cinv | [
"def",
"CovInv",
"(",
"self",
")",
":",
"self",
".",
"recurse",
"(",
"full_matrix",
"=",
"True",
")",
"return",
"self",
".",
"tree",
".",
"root",
".",
"cinv"
] | 20.75 | [
0.058823529411764705,
0.18181818181818182,
0.05,
0,
0.13333333333333333,
0.13333333333333333,
0,
0.17391304347826086,
0.044444444444444446,
0.18181818181818182,
0.05263157894736842,
0.058823529411764705
] |
def dangling(self, targets: sos_targets):
'''returns
1. missing targets, which are missing from the DAG or from the provided targets
2. existing targets of provided target list, not in DAG
'''
existing = []
missing = []
if env.config['trace_existing']:
for x in self._all_depends_files.keys():
if x not in self._all_output_files:
if x.target_exists():
existing.append(x)
else:
missing.append(x)
else:
missing = [
x for x in self._all_depends_files.keys()
if x not in self._all_output_files and not x.target_exists()
]
for x in targets:
if x not in self._all_output_files:
if x.target_exists('target'):
existing.append(x)
else:
missing.append(x)
return missing, existing | [
"def",
"dangling",
"(",
"self",
",",
"targets",
":",
"sos_targets",
")",
":",
"existing",
"=",
"[",
"]",
"missing",
"=",
"[",
"]",
"if",
"env",
".",
"config",
"[",
"'trace_existing'",
"]",
":",
"for",
"x",
"in",
"self",
".",
"_all_depends_files",
".",
"keys",
"(",
")",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
":",
"if",
"x",
".",
"target_exists",
"(",
")",
":",
"existing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_all_depends_files",
".",
"keys",
"(",
")",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
"and",
"not",
"x",
".",
"target_exists",
"(",
")",
"]",
"for",
"x",
"in",
"targets",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_all_output_files",
":",
"if",
"x",
".",
"target_exists",
"(",
"'target'",
")",
":",
"existing",
".",
"append",
"(",
"x",
")",
"else",
":",
"missing",
".",
"append",
"(",
"x",
")",
"return",
"missing",
",",
"existing"
] | 37.692308 | [
0.024390243902439025,
0.1111111111111111,
0.034482758620689655,
0.031746031746031744,
0.18181818181818182,
0.09523809523809523,
0.1,
0.05,
0.038461538461538464,
0.0392156862745098,
0.04878048780487805,
0.047619047619047616,
0.08,
0.04878048780487805,
0.15384615384615385,
0.13043478260869565,
0.03508771929824561,
0.02631578947368421,
0.23076923076923078,
0.08,
0.0425531914893617,
0.044444444444444446,
0.05263157894736842,
0.09523809523809523,
0.05405405405405406,
0.0625
] |
def _delete_state(self, activity, agent, state_id=None, registration=None, etag=None):
"""Private method to delete a specified state from the LRS
:param activity: Activity object of state to be deleted
:type activity: :class:`tincan.activity.Activity`
:param agent: Agent object of state to be deleted
:type agent: :class:`tincan.agent.Agent`
:param state_id: UUID of state to be deleted
:type state_id: str | unicode
:param registration: registration UUID of state to be deleted
:type registration: str | unicode
:param etag: etag of state to be deleted
:type etag: str | unicode
:return: LRS Response object with deleted state as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(activity, Activity):
activity = Activity(activity)
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="DELETE",
resource="activities/state"
)
if etag is not None:
request.headers["If-Match"] = etag
request.query_params = {
"activityId": activity.id,
"agent": agent.to_json(self.version)
}
if state_id is not None:
request.query_params["stateId"] = state_id
if registration is not None:
request.query_params["registration"] = registration
lrs_response = self._send_request(request)
return lrs_response | [
"def",
"_delete_state",
"(",
"self",
",",
"activity",
",",
"agent",
",",
"state_id",
"=",
"None",
",",
"registration",
"=",
"None",
",",
"etag",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"activity",
",",
"Activity",
")",
":",
"activity",
"=",
"Activity",
"(",
"activity",
")",
"if",
"not",
"isinstance",
"(",
"agent",
",",
"Agent",
")",
":",
"agent",
"=",
"Agent",
"(",
"agent",
")",
"request",
"=",
"HTTPRequest",
"(",
"method",
"=",
"\"DELETE\"",
",",
"resource",
"=",
"\"activities/state\"",
")",
"if",
"etag",
"is",
"not",
"None",
":",
"request",
".",
"headers",
"[",
"\"If-Match\"",
"]",
"=",
"etag",
"request",
".",
"query_params",
"=",
"{",
"\"activityId\"",
":",
"activity",
".",
"id",
",",
"\"agent\"",
":",
"agent",
".",
"to_json",
"(",
"self",
".",
"version",
")",
"}",
"if",
"state_id",
"is",
"not",
"None",
":",
"request",
".",
"query_params",
"[",
"\"stateId\"",
"]",
"=",
"state_id",
"if",
"registration",
"is",
"not",
"None",
":",
"request",
".",
"query_params",
"[",
"\"registration\"",
"]",
"=",
"registration",
"lrs_response",
"=",
"self",
".",
"_send_request",
"(",
"request",
")",
"return",
"lrs_response"
] | 35.302326 | [
0.023255813953488372,
0.030303030303030304,
0,
0.047619047619047616,
0.12280701754385964,
0.05263157894736842,
0.14583333333333334,
0.057692307692307696,
0.08108108108108109,
0.043478260869565216,
0.07317073170731707,
0.0625,
0.09090909090909091,
0.045454545454545456,
0.125,
0.18181818181818182,
0.043478260869565216,
0.04878048780487805,
0,
0.05,
0.0625,
0,
0.1,
0.10714285714285714,
0.07692307692307693,
0.3333333333333333,
0,
0.07142857142857142,
0.043478260869565216,
0,
0.09375,
0.05263157894736842,
0.041666666666666664,
0.3333333333333333,
0.0625,
0.037037037037037035,
0,
0.05555555555555555,
0.031746031746031744,
0,
0.04,
0,
0.07407407407407407
] |
def health_percentage(self) -> Union[int, float]:
""" Does not include shields """
if self._proto.health_max == 0:
return 0
return self._proto.health / self._proto.health_max | [
"def",
"health_percentage",
"(",
"self",
")",
"->",
"Union",
"[",
"int",
",",
"float",
"]",
":",
"if",
"self",
".",
"_proto",
".",
"health_max",
"==",
"0",
":",
"return",
"0",
"return",
"self",
".",
"_proto",
".",
"health",
"/",
"self",
".",
"_proto",
".",
"health_max"
] | 41.2 | [
0.02040816326530612,
0.05,
0.05128205128205128,
0.1,
0.034482758620689655
] |
def update_contact(self, contact_id, email=None, name=None):
"""
Update a current contact
:param contact_id: contact id
:param email: user email
:param name: user name
"""
params = {}
if email is not None:
params['email'] = email
if name is not None:
params['name'] = name
url = self.CONTACTS_ID_URL % contact_id
connection = Connection(self.token)
connection.set_url(self.production, url)
connection.add_header('Content-Type', 'application/json')
connection.add_params(params)
return connection.patch_request() | [
"def",
"update_contact",
"(",
"self",
",",
"contact_id",
",",
"email",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"email",
"is",
"not",
"None",
":",
"params",
"[",
"'email'",
"]",
"=",
"email",
"if",
"name",
"is",
"not",
"None",
":",
"params",
"[",
"'name'",
"]",
"=",
"name",
"url",
"=",
"self",
".",
"CONTACTS_ID_URL",
"%",
"contact_id",
"connection",
"=",
"Connection",
"(",
"self",
".",
"token",
")",
"connection",
".",
"set_url",
"(",
"self",
".",
"production",
",",
"url",
")",
"connection",
".",
"add_header",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"connection",
".",
"add_params",
"(",
"params",
")",
"return",
"connection",
".",
"patch_request",
"(",
")"
] | 27.73913 | [
0.016666666666666666,
0.18181818181818182,
0.0625,
0.08108108108108109,
0.09375,
0.1,
0.18181818181818182,
0.10526315789473684,
0,
0.06896551724137931,
0.05714285714285714,
0,
0.07142857142857142,
0.06060606060606061,
0,
0.0425531914893617,
0,
0.046511627906976744,
0.041666666666666664,
0.03076923076923077,
0.05405405405405406,
0,
0.04878048780487805
] |
def read_fcs_text_segment(buf, begin, end, delim=None, supplemental=False):
"""
Read TEXT segment of FCS file.
Parameters
----------
buf : file-like object
Buffer containing data to interpret as TEXT segment.
begin : int
Offset (in bytes) to first byte of TEXT segment in `buf`.
end : int
Offset (in bytes) to last byte of TEXT segment in `buf`.
delim : str, optional
1-byte delimiter character which delimits key-value entries of
TEXT segment. If None and ``supplemental==False``, will extract
delimiter as first byte of TEXT segment.
supplemental : bool, optional
Flag specifying that segment is a supplemental TEXT segment (see
FCS3.0 and FCS3.1), in which case a delimiter (``delim``) must be
specified.
Returns
-------
text : dict
Dictionary of key-value entries extracted from TEXT segment.
delim : str or None
String containing delimiter or None if TEXT segment is empty.
Raises
------
ValueError
If supplemental TEXT segment (``supplemental==True``) but ``delim`` is
not specified.
ValueError
If primary TEXT segment (``supplemental==False``) does not start with
delimiter.
ValueError
If first keyword starts with delimiter (e.g. a primary TEXT segment
with the following contents: ///k1/v1/k2/v2/).
ValueError
If odd number of keys + values detected (indicating an unpaired key or
value).
ValueError
If TEXT segment is ill-formed (unable to be parsed according to the
FCS standards).
Notes
-----
ANALYSIS segments and supplemental TEXT segments are parsed the same way,
so this function can also be used to parse ANALYSIS segments.
This function does *not* automatically parse and accumulate additional
TEXT-like segments (e.g. supplemental TEXT segments or ANALYSIS segments)
referenced in the originally specified TEXT segment.
References
----------
.. [1] P.N. Dean, C.B. Bagwell, T. Lindmo, R.F. Murphy, G.C. Salzman,
"Data file standard for flow cytometry. Data File Standards
Committee of the Society for Analytical Cytology," Cytometry vol
11, pp 323-332, 1990, PMID 2340769.
.. [2] L.C. Seamer, C.B. Bagwell, L. Barden, D. Redelman, G.C. Salzman,
J.C. Wood, R.F. Murphy, "Proposed new data file standard for flow
cytometry, version FCS 3.0," Cytometry vol 28, pp 118-122, 1997,
PMID 9181300.
.. [3] J. Spidlen, et al, "Data File Standard for Flow Cytometry,
version FCS 3.1," Cytometry A vol 77A, pp 97-100, 2009, PMID
19937951.
"""
if delim is None:
if supplemental:
raise ValueError("must specify ``delim`` if reading supplemental"
+ " TEXT segment")
else:
buf.seek(begin)
delim = buf.read(1).decode(encoding)
# The offsets are inclusive (meaning they specify first and last byte
# WITHIN segment) and seeking is inclusive (read() after seek() reads the
# byte which was seeked to). This means the length of the segment is
# ((end+1) - begin).
buf.seek(begin)
raw = buf.read((end+1)-begin).decode(encoding)
# If segment is empty, return empty dictionary as text
if not raw:
return {}, None
if not supplemental:
# Check that the first character of the TEXT segment is equal to the
# delimiter.
if raw[0] != delim:
raise ValueError("primary TEXT segment should start with"
+ " delimiter")
# The FCS standards indicate that keyword values must be flanked by the
# delimiter character, but they do not require that the TEXT segment end
# with the delimiter. As such, look for the last instance of the delimiter
# in the segment and retain everything before it (potentially removing
# TEXT segment characters which occur after the last instance of the
# delimiter).
end_index = raw.rfind(delim)
if supplemental and end_index == -1:
# Delimiter was not found. This should only be permitted for an empty
# supplemental TEXT segment (primary TEXT segment should fail above
# if first character doesn't match delimiter).
return {}, delim
else:
raw = raw[:end_index]
pairs_list = raw.split(delim)
###
# Reconstruct Keys and Values By Aggregating Escaped Delimiters
###
# According to the FCS standards, delimiter characters are permitted in
# keywords and keyword values as long as they are escaped by being
# immediately repeated. Delimiter characters are not permitted as the
# first character of a keyword or keyword value, however. Null (zero
# length) keywords or keyword values are also not permitted. According to
# these restrictions, a delimiter character should manifest itself as an
# empty element in ``pairs_list``. As such, scan through ``pairs_list``
# looking for empty elements and reconstruct keywords or keyword values
# containing delimiters.
###
# Start scanning from the end of the list since the end of the list is
# well defined (i.e. doesn't depend on whether the TEXT segment is a
# primary segment, which MUST start with the delimiter, or a supplemental
# segment, which is not required to start with the delimiter).
reconstructed_KV_accumulator = []
idx = len(pairs_list) - 1
while idx >= 0:
if pairs_list[idx] == '':
# Count the number of consecutive empty elements to determine how
# many escaped delimiters exist and whether or not a true boundary
# delimiter exists.
num_empty_elements = 1
idx = idx - 1
while idx >=0 and pairs_list[idx] == '':
num_empty_elements = num_empty_elements + 1
idx = idx - 1
# Need to differentiate between rolling off the bottom of the list
# and hitting a non-empty element. Assessing pairs_list[-1] can
# still be evaluated (by wrapping around the list, which is not
# what we want to check) so check if we've finished scanning the
# list first.
if idx < 0:
# We rolled off the bottom of the list.
if num_empty_elements == 1:
# If we only hit 1 empty element before rolling off the
# list, then there were no escaped delimiters and the
# segment started with a delimiter.
break
elif (num_empty_elements % 2) == 0:
# Even number of empty elements.
#
# If this is a supplemental TEXT segment, this can be
# interpreted as *not* starting the TEXT segment with the
# delimiter (which is permitted for supplemental TEXT
# segments) and starting the first keyword with one or
# more delimiters, which is prohibited.
if supplemental:
raise ValueError("starting a TEXT segment keyword"
+ " with a delimiter is prohibited")
# If this is a primary TEXT segment, this is an ill-formed
# segment. Rationale: 1 empty element will always be
# consumed as the initial delimiter which a primary TEXT
# segment is required to start with. After that delimiter
# is accounted for, you now have either an unescaped
# delimiter, which is prohibited, or a boundary delimiter,
# which would imply that the entire first keyword was
# composed of delimiters, which is prohibited because
# keywords are not allowed to start with the delimiter).
raise ValueError("ill-formed TEXT segment")
else:
# Odd number of empty elements. This can be interpreted as
# starting the segment with a delimiter and then having
# one or more delimiters starting the first keyword, which
# is prohibited.
raise ValueError("starting a TEXT segment keyword with a"
+ " delimiter is prohibited")
else:
# We encountered a non-empty element. Calculate the number of
# escaped delimiters and whether or not a true boundary
# delimiter is present.
num_delim = (num_empty_elements+1)//2
boundary_delim = (num_empty_elements % 2) == 0
if boundary_delim:
# A boundary delimiter exists. We know that the boundary
# has to be on the right side (end of the sequence),
# because keywords and keyword values are prohibited from
# starting with a delimiter, which would be the case if the
# boundary delimiter was anywhere BUT the last character.
# This means we need to postpend the appropriate number of
# delim characters to the end of the non-empty list
# element that ``idx`` currently points to.
pairs_list[idx] = pairs_list[idx] + (num_delim*delim)
# We can now add the reconstructed keyword or keyword
# value to the accumulator and move on. It's possible that
# this keyword or keyword value is incompletely
# reconstructed (e.g. /key//1///value1/
# => {'key/1/':'value1'}; there are other delimiters in
# this keyword or keyword value), but that case is now
# handled independently of this case and just like any
# other instance of escaped delimiters *without* a
# boundary delimiter.
reconstructed_KV_accumulator.append(pairs_list[idx])
idx = idx - 1
else:
# No boundary delimiters exist, so we need to glue the
# list elements before and after this sequence of
# consecutive delimiters together with the appropriate
# number of delimiters.
if len(reconstructed_KV_accumulator) == 0:
# Edge Case: The accumulator should always have at
# least 1 element in it at this point. If it doesn't,
# the last value ends with the delimiter, which will
# result in two consecutive empty elements, which
# won't fall into this case. Only 1 empty element
# indicates an ill-formed TEXT segment with an
# unpaired non-boundary delimiter (e.g. /k1/v1//),
# which is not permitted. The ill-formed TEXT segment
# implied by 1 empty element is recoverable, though,
# and a use case which is known to exist, so throw a
# warning and ignore the 2nd copy of the delimiter.
warnings.warn("detected ill-formed TEXT segment (ends"
+ " with two delimiter characters)."
+ " Ignoring last delimiter character")
reconstructed_KV_accumulator.append(pairs_list[idx])
else:
reconstructed_KV_accumulator[-1] = pairs_list[idx] + \
(num_delim*delim) + reconstructed_KV_accumulator[-1]
idx = idx - 1
else:
# Non-empty element, just append
reconstructed_KV_accumulator.append(pairs_list[idx])
idx = idx - 1
pairs_list_reconstructed = list(reversed(reconstructed_KV_accumulator))
# List length should be even since all key-value entries should be pairs
if len(pairs_list_reconstructed) % 2 != 0:
raise ValueError("odd # of (keys + values); unpaired key or value")
text = dict(zip(pairs_list_reconstructed[0::2],
pairs_list_reconstructed[1::2]))
return text, delim | [
"def",
"read_fcs_text_segment",
"(",
"buf",
",",
"begin",
",",
"end",
",",
"delim",
"=",
"None",
",",
"supplemental",
"=",
"False",
")",
":",
"if",
"delim",
"is",
"None",
":",
"if",
"supplemental",
":",
"raise",
"ValueError",
"(",
"\"must specify ``delim`` if reading supplemental\"",
"+",
"\" TEXT segment\"",
")",
"else",
":",
"buf",
".",
"seek",
"(",
"begin",
")",
"delim",
"=",
"buf",
".",
"read",
"(",
"1",
")",
".",
"decode",
"(",
"encoding",
")",
"# The offsets are inclusive (meaning they specify first and last byte",
"# WITHIN segment) and seeking is inclusive (read() after seek() reads the",
"# byte which was seeked to). This means the length of the segment is",
"# ((end+1) - begin).",
"buf",
".",
"seek",
"(",
"begin",
")",
"raw",
"=",
"buf",
".",
"read",
"(",
"(",
"end",
"+",
"1",
")",
"-",
"begin",
")",
".",
"decode",
"(",
"encoding",
")",
"# If segment is empty, return empty dictionary as text",
"if",
"not",
"raw",
":",
"return",
"{",
"}",
",",
"None",
"if",
"not",
"supplemental",
":",
"# Check that the first character of the TEXT segment is equal to the",
"# delimiter.",
"if",
"raw",
"[",
"0",
"]",
"!=",
"delim",
":",
"raise",
"ValueError",
"(",
"\"primary TEXT segment should start with\"",
"+",
"\" delimiter\"",
")",
"# The FCS standards indicate that keyword values must be flanked by the",
"# delimiter character, but they do not require that the TEXT segment end",
"# with the delimiter. As such, look for the last instance of the delimiter",
"# in the segment and retain everything before it (potentially removing",
"# TEXT segment characters which occur after the last instance of the",
"# delimiter).",
"end_index",
"=",
"raw",
".",
"rfind",
"(",
"delim",
")",
"if",
"supplemental",
"and",
"end_index",
"==",
"-",
"1",
":",
"# Delimiter was not found. This should only be permitted for an empty",
"# supplemental TEXT segment (primary TEXT segment should fail above",
"# if first character doesn't match delimiter).",
"return",
"{",
"}",
",",
"delim",
"else",
":",
"raw",
"=",
"raw",
"[",
":",
"end_index",
"]",
"pairs_list",
"=",
"raw",
".",
"split",
"(",
"delim",
")",
"###",
"# Reconstruct Keys and Values By Aggregating Escaped Delimiters",
"###",
"# According to the FCS standards, delimiter characters are permitted in",
"# keywords and keyword values as long as they are escaped by being",
"# immediately repeated. Delimiter characters are not permitted as the",
"# first character of a keyword or keyword value, however. Null (zero",
"# length) keywords or keyword values are also not permitted. According to",
"# these restrictions, a delimiter character should manifest itself as an",
"# empty element in ``pairs_list``. As such, scan through ``pairs_list``",
"# looking for empty elements and reconstruct keywords or keyword values",
"# containing delimiters.",
"###",
"# Start scanning from the end of the list since the end of the list is",
"# well defined (i.e. doesn't depend on whether the TEXT segment is a",
"# primary segment, which MUST start with the delimiter, or a supplemental",
"# segment, which is not required to start with the delimiter).",
"reconstructed_KV_accumulator",
"=",
"[",
"]",
"idx",
"=",
"len",
"(",
"pairs_list",
")",
"-",
"1",
"while",
"idx",
">=",
"0",
":",
"if",
"pairs_list",
"[",
"idx",
"]",
"==",
"''",
":",
"# Count the number of consecutive empty elements to determine how",
"# many escaped delimiters exist and whether or not a true boundary",
"# delimiter exists.",
"num_empty_elements",
"=",
"1",
"idx",
"=",
"idx",
"-",
"1",
"while",
"idx",
">=",
"0",
"and",
"pairs_list",
"[",
"idx",
"]",
"==",
"''",
":",
"num_empty_elements",
"=",
"num_empty_elements",
"+",
"1",
"idx",
"=",
"idx",
"-",
"1",
"# Need to differentiate between rolling off the bottom of the list",
"# and hitting a non-empty element. Assessing pairs_list[-1] can",
"# still be evaluated (by wrapping around the list, which is not",
"# what we want to check) so check if we've finished scanning the",
"# list first.",
"if",
"idx",
"<",
"0",
":",
"# We rolled off the bottom of the list.",
"if",
"num_empty_elements",
"==",
"1",
":",
"# If we only hit 1 empty element before rolling off the",
"# list, then there were no escaped delimiters and the",
"# segment started with a delimiter.",
"break",
"elif",
"(",
"num_empty_elements",
"%",
"2",
")",
"==",
"0",
":",
"# Even number of empty elements.",
"#",
"# If this is a supplemental TEXT segment, this can be",
"# interpreted as *not* starting the TEXT segment with the",
"# delimiter (which is permitted for supplemental TEXT",
"# segments) and starting the first keyword with one or",
"# more delimiters, which is prohibited.",
"if",
"supplemental",
":",
"raise",
"ValueError",
"(",
"\"starting a TEXT segment keyword\"",
"+",
"\" with a delimiter is prohibited\"",
")",
"# If this is a primary TEXT segment, this is an ill-formed",
"# segment. Rationale: 1 empty element will always be",
"# consumed as the initial delimiter which a primary TEXT",
"# segment is required to start with. After that delimiter",
"# is accounted for, you now have either an unescaped",
"# delimiter, which is prohibited, or a boundary delimiter,",
"# which would imply that the entire first keyword was",
"# composed of delimiters, which is prohibited because",
"# keywords are not allowed to start with the delimiter).",
"raise",
"ValueError",
"(",
"\"ill-formed TEXT segment\"",
")",
"else",
":",
"# Odd number of empty elements. This can be interpreted as",
"# starting the segment with a delimiter and then having",
"# one or more delimiters starting the first keyword, which",
"# is prohibited.",
"raise",
"ValueError",
"(",
"\"starting a TEXT segment keyword with a\"",
"+",
"\" delimiter is prohibited\"",
")",
"else",
":",
"# We encountered a non-empty element. Calculate the number of",
"# escaped delimiters and whether or not a true boundary",
"# delimiter is present.",
"num_delim",
"=",
"(",
"num_empty_elements",
"+",
"1",
")",
"//",
"2",
"boundary_delim",
"=",
"(",
"num_empty_elements",
"%",
"2",
")",
"==",
"0",
"if",
"boundary_delim",
":",
"# A boundary delimiter exists. We know that the boundary",
"# has to be on the right side (end of the sequence),",
"# because keywords and keyword values are prohibited from",
"# starting with a delimiter, which would be the case if the",
"# boundary delimiter was anywhere BUT the last character.",
"# This means we need to postpend the appropriate number of",
"# delim characters to the end of the non-empty list",
"# element that ``idx`` currently points to.",
"pairs_list",
"[",
"idx",
"]",
"=",
"pairs_list",
"[",
"idx",
"]",
"+",
"(",
"num_delim",
"*",
"delim",
")",
"# We can now add the reconstructed keyword or keyword",
"# value to the accumulator and move on. It's possible that",
"# this keyword or keyword value is incompletely",
"# reconstructed (e.g. /key//1///value1/",
"# => {'key/1/':'value1'}; there are other delimiters in",
"# this keyword or keyword value), but that case is now",
"# handled independently of this case and just like any",
"# other instance of escaped delimiters *without* a",
"# boundary delimiter.",
"reconstructed_KV_accumulator",
".",
"append",
"(",
"pairs_list",
"[",
"idx",
"]",
")",
"idx",
"=",
"idx",
"-",
"1",
"else",
":",
"# No boundary delimiters exist, so we need to glue the",
"# list elements before and after this sequence of",
"# consecutive delimiters together with the appropriate",
"# number of delimiters.",
"if",
"len",
"(",
"reconstructed_KV_accumulator",
")",
"==",
"0",
":",
"# Edge Case: The accumulator should always have at",
"# least 1 element in it at this point. If it doesn't,",
"# the last value ends with the delimiter, which will",
"# result in two consecutive empty elements, which",
"# won't fall into this case. Only 1 empty element",
"# indicates an ill-formed TEXT segment with an",
"# unpaired non-boundary delimiter (e.g. /k1/v1//),",
"# which is not permitted. The ill-formed TEXT segment",
"# implied by 1 empty element is recoverable, though,",
"# and a use case which is known to exist, so throw a",
"# warning and ignore the 2nd copy of the delimiter.",
"warnings",
".",
"warn",
"(",
"\"detected ill-formed TEXT segment (ends\"",
"+",
"\" with two delimiter characters).\"",
"+",
"\" Ignoring last delimiter character\"",
")",
"reconstructed_KV_accumulator",
".",
"append",
"(",
"pairs_list",
"[",
"idx",
"]",
")",
"else",
":",
"reconstructed_KV_accumulator",
"[",
"-",
"1",
"]",
"=",
"pairs_list",
"[",
"idx",
"]",
"+",
"(",
"num_delim",
"*",
"delim",
")",
"+",
"reconstructed_KV_accumulator",
"[",
"-",
"1",
"]",
"idx",
"=",
"idx",
"-",
"1",
"else",
":",
"# Non-empty element, just append",
"reconstructed_KV_accumulator",
".",
"append",
"(",
"pairs_list",
"[",
"idx",
"]",
")",
"idx",
"=",
"idx",
"-",
"1",
"pairs_list_reconstructed",
"=",
"list",
"(",
"reversed",
"(",
"reconstructed_KV_accumulator",
")",
")",
"# List length should be even since all key-value entries should be pairs",
"if",
"len",
"(",
"pairs_list_reconstructed",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"odd # of (keys + values); unpaired key or value\"",
")",
"text",
"=",
"dict",
"(",
"zip",
"(",
"pairs_list_reconstructed",
"[",
"0",
":",
":",
"2",
"]",
",",
"pairs_list_reconstructed",
"[",
"1",
":",
":",
"2",
"]",
")",
")",
"return",
"text",
",",
"delim"
] | 48.5 | [
0.013333333333333334,
0.2857142857142857,
0.058823529411764705,
0,
0.14285714285714285,
0.14285714285714285,
0.11538461538461539,
0.03333333333333333,
0.2,
0.07692307692307693,
0.23076923076923078,
0.078125,
0.12,
0.02857142857142857,
0.07042253521126761,
0.041666666666666664,
0.09090909090909091,
0.05555555555555555,
0.0684931506849315,
0.1111111111111111,
0,
0.18181818181818182,
0.18181818181818182,
0.2,
0.029411764705882353,
0.13043478260869565,
0.028985507246376812,
0,
0.2,
0.2,
0.14285714285714285,
0.07692307692307693,
0.09090909090909091,
0.14285714285714285,
0.07792207792207792,
0.1111111111111111,
0.14285714285714285,
0.05333333333333334,
0.1111111111111111,
0.14285714285714285,
0.05128205128205128,
0.2,
0.14285714285714285,
0.05333333333333334,
0.13043478260869565,
0,
0.2222222222222222,
0.2222222222222222,
0.025974025974025976,
0.03076923076923077,
0,
0.04054054054054054,
0.03896103896103896,
0.03571428571428571,
0,
0.14285714285714285,
0.14285714285714285,
0.0273972602739726,
0.045454545454545456,
0.056338028169014086,
0.07142857142857142,
0,
0.02666666666666667,
0.041666666666666664,
0.056338028169014086,
0.15,
0,
0.028985507246376812,
0.05970149253731343,
0.1875,
0,
0.2857142857142857,
0.09523809523809523,
0.08333333333333333,
0.03896103896103896,
0.0851063829787234,
0.15384615384615385,
0.07407407407407407,
0.041666666666666664,
0,
0.0273972602739726,
0.025974025974025976,
0.027777777777777776,
0.08333333333333333,
0.10526315789473684,
0.04,
0,
0.034482758620689655,
0.13333333333333333,
0.08695652173913043,
0,
0.08333333333333333,
0.02631578947368421,
0.1,
0.07407407407407407,
0.043478260869565216,
0.09090909090909091,
0,
0.02666666666666667,
0.02631578947368421,
0.02564102564102564,
0.02702702702702703,
0.027777777777777776,
0.11764705882352941,
0.0625,
0.05,
0.025974025974025976,
0.02666666666666667,
0.037037037037037035,
0.08333333333333333,
0.2222222222222222,
0.06896551724137931,
0,
0.06060606060606061,
0,
0.2857142857142857,
0.029850746268656716,
0.2857142857142857,
0.02666666666666667,
0.02857142857142857,
0.0273972602739726,
0.027777777777777776,
0.025974025974025976,
0.02631578947368421,
0.02666666666666667,
0.02666666666666667,
0.07142857142857142,
0.2857142857142857,
0,
0.02702702702702703,
0.027777777777777776,
0.025974025974025976,
0.030303030303030304,
0.05405405405405406,
0.06896551724137931,
0.10526315789473684,
0.06060606060606061,
0.025974025974025976,
0.02564102564102564,
0.06451612903225806,
0.058823529411764705,
0.08,
0.057692307692307696,
0.03389830508474576,
0.06896551724137931,
0.02564102564102564,
0.02666666666666667,
0.02666666666666667,
0.02631578947368421,
0.08,
0.08695652173913043,
0.03636363636363636,
0.046511627906976744,
0.02666666666666667,
0.0273972602739726,
0.03636363636363636,
0.08,
0.0392156862745098,
0.038461538461538464,
0.09523809523809523,
0.0273972602739726,
0.025974025974025976,
0.0273972602739726,
0.02702702702702703,
0.03389830508474576,
0.05555555555555555,
0.04054054054054054,
0.05194805194805195,
0.02564102564102564,
0.027777777777777776,
0.02631578947368421,
0.025974025974025976,
0.027777777777777776,
0.02564102564102564,
0.0273972602739726,
0.0273972602739726,
0.02631578947368421,
0.031746031746031744,
0.09523809523809523,
0.02564102564102564,
0.02666666666666667,
0.02564102564102564,
0.05555555555555555,
0.03896103896103896,
0.06060606060606061,
0.11764705882352941,
0.025974025974025976,
0.028169014084507043,
0.05128205128205128,
0.05172413793103448,
0.03225806451612903,
0,
0.058823529411764705,
0.02631578947368421,
0.027777777777777776,
0.025974025974025976,
0.02531645569620253,
0.025974025974025976,
0.02564102564102564,
0.028169014084507043,
0.031746031746031744,
0.0273972602739726,
0,
0.0273972602739726,
0.02564102564102564,
0.029850746268656716,
0.03389830508474576,
0.02666666666666667,
0.02702702702702703,
0.02702702702702703,
0.02857142857142857,
0.04878048780487805,
0.027777777777777776,
0.06060606060606061,
0.09523809523809523,
0.02702702702702703,
0.028985507246376812,
0.02702702702702703,
0.046511627906976744,
0.03225806451612903,
0.02702702702702703,
0.025974025974025976,
0.02631578947368421,
0.0273972602739726,
0.0273972602739726,
0.02857142857142857,
0.02702702702702703,
0.025974025974025976,
0.02631578947368421,
0.02631578947368421,
0.02666666666666667,
0.038461538461538464,
0.04054054054054054,
0.05194805194805195,
0.02631578947368421,
0.08,
0.02564102564102564,
0.0375,
0.06060606060606061,
0.15384615384615385,
0.045454545454545456,
0.03125,
0.08,
0,
0.02666666666666667,
0,
0.02631578947368421,
0.043478260869565216,
0.02666666666666667,
0,
0.058823529411764705,
0.057692307692307696,
0,
0.09090909090909091
] |
def get_internals(model):
"""
Return non-boundary reactions and their metabolites.
Boundary reactions are unbalanced by their nature. They are excluded here
and only the metabolites of the others are considered.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
biomass = set(find_biomass_reaction(model))
if len(biomass) == 0:
LOGGER.warning("No biomass reaction detected. Consistency test results "
"are unreliable if one exists.")
return set(model.reactions) - (set(model.boundary) | biomass) | [
"def",
"get_internals",
"(",
"model",
")",
":",
"biomass",
"=",
"set",
"(",
"find_biomass_reaction",
"(",
"model",
")",
")",
"if",
"len",
"(",
"biomass",
")",
"==",
"0",
":",
"LOGGER",
".",
"warning",
"(",
"\"No biomass reaction detected. Consistency test results \"",
"\"are unreliable if one exists.\"",
")",
"return",
"set",
"(",
"model",
".",
"reactions",
")",
"-",
"(",
"set",
"(",
"model",
".",
"boundary",
")",
"|",
"biomass",
")"
] | 33.388889 | [
0.04,
0.2857142857142857,
0.03571428571428571,
0,
0.025974025974025976,
0.034482758620689655,
0,
0.14285714285714285,
0.14285714285714285,
0.13043478260869565,
0.041666666666666664,
0,
0.2857142857142857,
0.0425531914893617,
0.08,
0.05,
0.07272727272727272,
0.03076923076923077
] |
def pwordfreq(view, fnames):
"""Parallel word frequency counter.
view - An IPython DirectView
fnames - The filenames containing the split data.
"""
assert len(fnames) == len(view.targets)
view.scatter('fname', fnames, flatten=True)
ar = view.apply(wordfreq, Reference('fname'))
freqs_list = ar.get()
word_set = set()
for f in freqs_list:
word_set.update(f.keys())
freqs = dict(zip(word_set, repeat(0)))
for f in freqs_list:
for word, count in f.iteritems():
freqs[word] += count
return freqs | [
"def",
"pwordfreq",
"(",
"view",
",",
"fnames",
")",
":",
"assert",
"len",
"(",
"fnames",
")",
"==",
"len",
"(",
"view",
".",
"targets",
")",
"view",
".",
"scatter",
"(",
"'fname'",
",",
"fnames",
",",
"flatten",
"=",
"True",
")",
"ar",
"=",
"view",
".",
"apply",
"(",
"wordfreq",
",",
"Reference",
"(",
"'fname'",
")",
")",
"freqs_list",
"=",
"ar",
".",
"get",
"(",
")",
"word_set",
"=",
"set",
"(",
")",
"for",
"f",
"in",
"freqs_list",
":",
"word_set",
".",
"update",
"(",
"f",
".",
"keys",
"(",
")",
")",
"freqs",
"=",
"dict",
"(",
"zip",
"(",
"word_set",
",",
"repeat",
"(",
"0",
")",
")",
")",
"for",
"f",
"in",
"freqs_list",
":",
"for",
"word",
",",
"count",
"in",
"f",
".",
"iteritems",
"(",
")",
":",
"freqs",
"[",
"word",
"]",
"+=",
"count",
"return",
"freqs"
] | 31.055556 | [
0.03571428571428571,
0.05128205128205128,
0.5,
0.0625,
0.03773584905660377,
0.2857142857142857,
0.046511627906976744,
0.0425531914893617,
0.04081632653061224,
0.08,
0.1,
0.08333333333333333,
0.06060606060606061,
0.047619047619047616,
0.08333333333333333,
0.04878048780487805,
0.0625,
0.125
] |
def _decode(s, encoding=None, errors=None):
"""Decodes *s*."""
if encoding is None:
encoding = ENCODING
if errors is None:
errors = ENCODING_ERRORS
return s if isinstance(s, unicode) else s.decode(encoding, errors) | [
"def",
"_decode",
"(",
"s",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"ENCODING",
"if",
"errors",
"is",
"None",
":",
"errors",
"=",
"ENCODING_ERRORS",
"return",
"s",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
"else",
"s",
".",
"decode",
"(",
"encoding",
",",
"errors",
")"
] | 34.285714 | [
0.023255813953488372,
0.09090909090909091,
0.08333333333333333,
0.07407407407407407,
0.09090909090909091,
0.0625,
0.02857142857142857
] |
def open_in_browser(self, outfile):
"""Open the given HTML file in a browser.
"""
if self.browser == 'default':
webbrowser.open('file://%s' % outfile)
else:
browser = webbrowser.get(self.browser)
browser.open('file://%s' % outfile) | [
"def",
"open_in_browser",
"(",
"self",
",",
"outfile",
")",
":",
"if",
"self",
".",
"browser",
"==",
"'default'",
":",
"webbrowser",
".",
"open",
"(",
"'file://%s'",
"%",
"outfile",
")",
"else",
":",
"browser",
"=",
"webbrowser",
".",
"get",
"(",
"self",
".",
"browser",
")",
"browser",
".",
"open",
"(",
"'file://%s'",
"%",
"outfile",
")"
] | 36.5 | [
0.02857142857142857,
0.04081632653061224,
0.18181818181818182,
0.05405405405405406,
0.04,
0.15384615384615385,
0.04,
0.0425531914893617
] |
def update(self, reseed):
"""
Update that trail!
:param reseed: Whether we are in the normal reseed cycle or not.
"""
if self._clear:
for i in range(0, 3):
self._screen.print_at(" ",
self._x,
self._screen.start_line + self._y + i)
self._maybe_reseed(reseed)
else:
for i in range(0, 3):
self._screen.print_at(chr(randint(32, 126)),
self._x,
self._screen.start_line + self._y + i,
Screen.COLOUR_GREEN)
for i in range(4, 6):
self._screen.print_at(chr(randint(32, 126)),
self._x,
self._screen.start_line + self._y + i,
Screen.COLOUR_GREEN,
Screen.A_BOLD)
self._maybe_reseed(reseed) | [
"def",
"update",
"(",
"self",
",",
"reseed",
")",
":",
"if",
"self",
".",
"_clear",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"3",
")",
":",
"self",
".",
"_screen",
".",
"print_at",
"(",
"\" \"",
",",
"self",
".",
"_x",
",",
"self",
".",
"_screen",
".",
"start_line",
"+",
"self",
".",
"_y",
"+",
"i",
")",
"self",
".",
"_maybe_reseed",
"(",
"reseed",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"3",
")",
":",
"self",
".",
"_screen",
".",
"print_at",
"(",
"chr",
"(",
"randint",
"(",
"32",
",",
"126",
")",
")",
",",
"self",
".",
"_x",
",",
"self",
".",
"_screen",
".",
"start_line",
"+",
"self",
".",
"_y",
"+",
"i",
",",
"Screen",
".",
"COLOUR_GREEN",
")",
"for",
"i",
"in",
"range",
"(",
"4",
",",
"6",
")",
":",
"self",
".",
"_screen",
".",
"print_at",
"(",
"chr",
"(",
"randint",
"(",
"32",
",",
"126",
")",
")",
",",
"self",
".",
"_x",
",",
"self",
".",
"_screen",
".",
"start_line",
"+",
"self",
".",
"_y",
"+",
"i",
",",
"Screen",
".",
"COLOUR_GREEN",
",",
"Screen",
".",
"A_BOLD",
")",
"self",
".",
"_maybe_reseed",
"(",
"reseed",
")"
] | 42.08 | [
0.04,
0.18181818181818182,
0.07692307692307693,
0,
0.041666666666666664,
0.18181818181818182,
0.08695652173913043,
0.06060606060606061,
0.07142857142857142,
0.06521739130434782,
0.05263157894736842,
0.05263157894736842,
0.15384615384615385,
0.06060606060606061,
0.05,
0.06521739130434782,
0.039473684210526314,
0.06896551724137931,
0.06060606060606061,
0.05,
0.06521739130434782,
0.039473684210526314,
0.05172413793103448,
0.07692307692307693,
0.05263157894736842
] |
def refresh_access_token(self, refresh_token, redirect_uri=None):
'''
Refresh access token.
'''
redirect = redirect_uri or self._redirect_uri
resp_text = _http('POST', 'https://graph.qq.com/oauth2.0/token',
refresh_token=refresh_token,
client_id=self._client_id, client_secret=self._client_secret,
redirect_uri=redirect, grant_type='refresh_token')
return self._parse_access_token(resp_text) | [
"def",
"refresh_access_token",
"(",
"self",
",",
"refresh_token",
",",
"redirect_uri",
"=",
"None",
")",
":",
"redirect",
"=",
"redirect_uri",
"or",
"self",
".",
"_redirect_uri",
"resp_text",
"=",
"_http",
"(",
"'POST'",
",",
"'https://graph.qq.com/oauth2.0/token'",
",",
"refresh_token",
"=",
"refresh_token",
",",
"client_id",
"=",
"self",
".",
"_client_id",
",",
"client_secret",
"=",
"self",
".",
"_client_secret",
",",
"redirect_uri",
"=",
"redirect",
",",
"grant_type",
"=",
"'refresh_token'",
")",
"return",
"self",
".",
"_parse_access_token",
"(",
"resp_text",
")"
] | 50.8 | [
0.015384615384615385,
0.18181818181818182,
0.06896551724137931,
0.18181818181818182,
0.03773584905660377,
0.041666666666666664,
0.07407407407407407,
0.06896551724137931,
0.07894736842105263,
0.04
] |
def estimate(self, maxiter=250, convergence=1e-7):
"""
run EM algorithm until convergence, or until maxiter reached
"""
self.loglik = np.zeros(maxiter)
iter = 0
while iter < maxiter:
self.loglik[iter] = self.E_step()
if np.isnan(self.loglik[iter]):
print("undefined log-likelihood")
break
self.M_step()
if self.loglik[iter] - self.loglik[iter - 1] < 0 and iter > 0:
print("log-likelihood decreased by %f at iteration %d"
% (self.loglik[iter] - self.loglik[iter - 1],
iter))
elif self.loglik[iter] - self.loglik[iter - 1] < convergence \
and iter > 0:
print("convergence at iteration %d, loglik = %f" %
(iter, self.loglik[iter]))
self.loglik = self.loglik[self.loglik < 0]
break
iter += 1 | [
"def",
"estimate",
"(",
"self",
",",
"maxiter",
"=",
"250",
",",
"convergence",
"=",
"1e-7",
")",
":",
"self",
".",
"loglik",
"=",
"np",
".",
"zeros",
"(",
"maxiter",
")",
"iter",
"=",
"0",
"while",
"iter",
"<",
"maxiter",
":",
"self",
".",
"loglik",
"[",
"iter",
"]",
"=",
"self",
".",
"E_step",
"(",
")",
"if",
"np",
".",
"isnan",
"(",
"self",
".",
"loglik",
"[",
"iter",
"]",
")",
":",
"print",
"(",
"\"undefined log-likelihood\"",
")",
"break",
"self",
".",
"M_step",
"(",
")",
"if",
"self",
".",
"loglik",
"[",
"iter",
"]",
"-",
"self",
".",
"loglik",
"[",
"iter",
"-",
"1",
"]",
"<",
"0",
"and",
"iter",
">",
"0",
":",
"print",
"(",
"\"log-likelihood decreased by %f at iteration %d\"",
"%",
"(",
"self",
".",
"loglik",
"[",
"iter",
"]",
"-",
"self",
".",
"loglik",
"[",
"iter",
"-",
"1",
"]",
",",
"iter",
")",
")",
"elif",
"self",
".",
"loglik",
"[",
"iter",
"]",
"-",
"self",
".",
"loglik",
"[",
"iter",
"-",
"1",
"]",
"<",
"convergence",
"and",
"iter",
">",
"0",
":",
"print",
"(",
"\"convergence at iteration %d, loglik = %f\"",
"%",
"(",
"iter",
",",
"self",
".",
"loglik",
"[",
"iter",
"]",
")",
")",
"self",
".",
"loglik",
"=",
"self",
".",
"loglik",
"[",
"self",
".",
"loglik",
"<",
"0",
"]",
"break",
"iter",
"+=",
"1"
] | 33.566667 | [
0.02,
0,
0.18181818181818182,
0.029411764705882353,
0.18181818181818182,
0,
0.05128205128205128,
0,
0.125,
0,
0.06896551724137931,
0,
0.044444444444444446,
0.046511627906976744,
0.03773584905660377,
0.08,
0.08,
0,
0.02702702702702703,
0.04054054054054054,
0.056338028169014086,
0.11428571428571428,
0.02702702702702703,
0.06060606060606061,
0.04285714285714286,
0.07547169811320754,
0.03225806451612903,
0.08,
0,
0.09523809523809523
] |
def es_version_check(f):
"""Decorator to check Elasticsearch version."""
@wraps(f)
def inner(*args, **kwargs):
cluster_ver = current_search.cluster_version[0]
client_ver = ES_VERSION[0]
if cluster_ver != client_ver:
raise click.ClickException(
'Elasticsearch version mismatch. Invenio was installed with '
'Elasticsearch v{client_ver}.x support, but the cluster runs '
'Elasticsearch v{cluster_ver}.x.'.format(
client_ver=client_ver,
cluster_ver=cluster_ver,
))
return f(*args, **kwargs)
return inner | [
"def",
"es_version_check",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cluster_ver",
"=",
"current_search",
".",
"cluster_version",
"[",
"0",
"]",
"client_ver",
"=",
"ES_VERSION",
"[",
"0",
"]",
"if",
"cluster_ver",
"!=",
"client_ver",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'Elasticsearch version mismatch. Invenio was installed with '",
"'Elasticsearch v{client_ver}.x support, but the cluster runs '",
"'Elasticsearch v{cluster_ver}.x.'",
".",
"format",
"(",
"client_ver",
"=",
"client_ver",
",",
"cluster_ver",
"=",
"cluster_ver",
",",
")",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner"
] | 40.5625 | [
0.041666666666666664,
0.0392156862745098,
0.15384615384615385,
0.06451612903225806,
0.03636363636363636,
0.058823529411764705,
0.05405405405405406,
0.07692307692307693,
0.025974025974025976,
0.02564102564102564,
0.05263157894736842,
0.07142857142857142,
0.06818181818181818,
0.16666666666666666,
0.06060606060606061,
0.125
] |