text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def try_run_setup(**kwargs): """ Fails gracefully when various install steps don't work. """ try: run_setup(**kwargs) except Exception as e: print(str(e)) if "xgboost" in str(e).lower(): kwargs["test_xgboost"] = False print("Couldn't install XGBoost for testing!") try_run_setup(**kwargs) elif "lightgbm" in str(e).lower(): kwargs["test_lightgbm"] = False print("Couldn't install LightGBM for testing!") try_run_setup(**kwargs) elif kwargs["with_binary"]: kwargs["with_binary"] = False print("WARNING: The C extension could not be compiled, sklearn tree models not supported.") try_run_setup(**kwargs) else: print("ERROR: Failed to build!")
[ "def", "try_run_setup", "(", "*", "*", "kwargs", ")", ":", "try", ":", "run_setup", "(", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "print", "(", "str", "(", "e", ")", ")", "if", "\"xgboost\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "kwargs", "[", "\"test_xgboost\"", "]", "=", "False", "print", "(", "\"Couldn't install XGBoost for testing!\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "elif", "\"lightgbm\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "kwargs", "[", "\"test_lightgbm\"", "]", "=", "False", "print", "(", "\"Couldn't install LightGBM for testing!\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "elif", "kwargs", "[", "\"with_binary\"", "]", ":", "kwargs", "[", "\"with_binary\"", "]", "=", "False", "print", "(", "\"WARNING: The C extension could not be compiled, sklearn tree models not supported.\"", ")", "try_run_setup", "(", "*", "*", "kwargs", ")", "else", ":", "print", "(", "\"ERROR: Failed to build!\"", ")" ]
36.545455
0.002424
def _api_delete(path, data, server=None): ''' Do a DELETE request to the API ''' server = _get_server(server) response = requests.delete( url=_get_url(server['ssl'], server['url'], server['port'], path), auth=_get_auth(server['user'], server['password']), headers=_get_headers(), params=data, verify=False ) return _api_response(response)
[ "def", "_api_delete", "(", "path", ",", "data", ",", "server", "=", "None", ")", ":", "server", "=", "_get_server", "(", "server", ")", "response", "=", "requests", ".", "delete", "(", "url", "=", "_get_url", "(", "server", "[", "'ssl'", "]", ",", "server", "[", "'url'", "]", ",", "server", "[", "'port'", "]", ",", "path", ")", ",", "auth", "=", "_get_auth", "(", "server", "[", "'user'", "]", ",", "server", "[", "'password'", "]", ")", ",", "headers", "=", "_get_headers", "(", ")", ",", "params", "=", "data", ",", "verify", "=", "False", ")", "return", "_api_response", "(", "response", ")" ]
31.846154
0.002347
def _update_index(self, axis, key, value): """Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index` """ # delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) # if value is changing, delete current index try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) # set new value setattr(self, _key, value) return value
[ "def", "_update_index", "(", "self", ",", "axis", ",", "key", ",", "value", ")", ":", "# delete current value if given None", "if", "value", "is", "None", ":", "return", "delattr", "(", "self", ",", "key", ")", "_key", "=", "\"_{}\"", ".", "format", "(", "key", ")", "index", "=", "\"{[0]}index\"", ".", "format", "(", "axis", ")", "unit", "=", "\"{[0]}unit\"", ".", "format", "(", "axis", ")", "# convert float to Quantity", "if", "not", "isinstance", "(", "value", ",", "Quantity", ")", ":", "try", ":", "value", "=", "Quantity", "(", "value", ",", "getattr", "(", "self", ",", "unit", ")", ")", "except", "TypeError", ":", "value", "=", "Quantity", "(", "float", "(", "value", ")", ",", "getattr", "(", "self", ",", "unit", ")", ")", "# if value is changing, delete current index", "try", ":", "curr", "=", "getattr", "(", "self", ",", "_key", ")", "except", "AttributeError", ":", "delattr", "(", "self", ",", "index", ")", "else", ":", "if", "(", "value", "is", "None", "or", "getattr", "(", "self", ",", "key", ")", "is", "None", "or", "not", "value", ".", "unit", ".", "is_equivalent", "(", "curr", ".", "unit", ")", "or", "value", "!=", "curr", ")", ":", "delattr", "(", "self", ",", "index", ")", "# set new value", "setattr", "(", "self", ",", "_key", ",", "value", ")", "return", "value" ]
31.111111
0.001385
def per_installer_data(self): """ Return download data by installer name and version. :return: dict of cache data; keys are datetime objects, values are dict of installer name/version (str) to count (int). :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for inst_name, inst_data in data['by_installer'].items(): for inst_ver, count in inst_data.items(): k = self._compound_column_value( inst_name, self._shorten_version(inst_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
[ "def", "per_installer_data", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "cache_date", "in", "self", ".", "cache_dates", ":", "data", "=", "self", ".", "_cache_get", "(", "cache_date", ")", "ret", "[", "cache_date", "]", "=", "{", "}", "for", "inst_name", ",", "inst_data", "in", "data", "[", "'by_installer'", "]", ".", "items", "(", ")", ":", "for", "inst_ver", ",", "count", "in", "inst_data", ".", "items", "(", ")", ":", "k", "=", "self", ".", "_compound_column_value", "(", "inst_name", ",", "self", ".", "_shorten_version", "(", "inst_ver", ")", ")", "ret", "[", "cache_date", "]", "[", "k", "]", "=", "count", "if", "len", "(", "ret", "[", "cache_date", "]", ")", "==", "0", ":", "ret", "[", "cache_date", "]", "[", "'unknown'", "]", "=", "0", "return", "ret" ]
38.272727
0.002317
def decrease_writes_in_percent( current_provisioning, percent, min_provisioned_writes, log_tag): """ Decrease the current_provisioning with percent % :type current_provisioning: int :param current_provisioning: The current provisioning :type percent: int :param percent: How many percent should we decrease with :returns: int -- New provisioning value :type min_provisioned_writes: int :param min_provisioned_writes: Configured min provisioned writes :type log_tag: str :param log_tag: Prefix for the log """ percent = float(percent) decrease = int(float(current_provisioning)*(float(percent)/100)) updated_provisioning = current_provisioning - decrease min_provisioned_writes = __get_min_writes( current_provisioning, min_provisioned_writes, log_tag) if updated_provisioning < min_provisioned_writes: logger.info( '{0} - Reached provisioned writes min limit: {1:d}'.format( log_tag, int(min_provisioned_writes))) return min_provisioned_writes logger.debug( '{0} - Write provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ "def", "decrease_writes_in_percent", "(", "current_provisioning", ",", "percent", ",", "min_provisioned_writes", ",", "log_tag", ")", ":", "percent", "=", "float", "(", "percent", ")", "decrease", "=", "int", "(", "float", "(", "current_provisioning", ")", "*", "(", "float", "(", "percent", ")", "/", "100", ")", ")", "updated_provisioning", "=", "current_provisioning", "-", "decrease", "min_provisioned_writes", "=", "__get_min_writes", "(", "current_provisioning", ",", "min_provisioned_writes", ",", "log_tag", ")", "if", "updated_provisioning", "<", "min_provisioned_writes", ":", "logger", ".", "info", "(", "'{0} - Reached provisioned writes min limit: {1:d}'", ".", "format", "(", "log_tag", ",", "int", "(", "min_provisioned_writes", ")", ")", ")", "return", "min_provisioned_writes", "logger", ".", "debug", "(", "'{0} - Write provisioning will be decreased to {1:d} units'", ".", "format", "(", "log_tag", ",", "int", "(", "updated_provisioning", ")", ")", ")", "return", "updated_provisioning" ]
34.888889
0.000775
def init2( self, input_tube, # Read task from the input tube. output_tubes, # Send result on all the output tubes. num_workers, # Total number of workers in the stage. disable_result, # Whether to override any result with None. do_stop_task, # Whether to call doTask() on "stop" request. ): """Create *num_workers* worker objects with *input_tube* and an iterable of *output_tubes*. The worker reads a task from *input_tube* and writes the result to *output_tubes*.""" super(UnorderedWorker, self).__init__() self._tube_task_input = input_tube self._tubes_result_output = output_tubes self._num_workers = num_workers self._disable_result = disable_result self._do_stop_task = do_stop_task
[ "def", "init2", "(", "self", ",", "input_tube", ",", "# Read task from the input tube.", "output_tubes", ",", "# Send result on all the output tubes.", "num_workers", ",", "# Total number of workers in the stage.", "disable_result", ",", "# Whether to override any result with None.", "do_stop_task", ",", "# Whether to call doTask() on \"stop\" request.", ")", ":", "super", "(", "UnorderedWorker", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "_tube_task_input", "=", "input_tube", "self", ".", "_tubes_result_output", "=", "output_tubes", "self", ".", "_num_workers", "=", "num_workers", "self", ".", "_disable_result", "=", "disable_result", "self", ".", "_do_stop_task", "=", "do_stop_task" ]
45.5
0.007177
def parents_from_boolRule(self,rule): """ Determine parents based on boolean updaterule. Returns list of parents. """ rule_pa = rule.replace('(','').replace(')','').replace('or','').replace('and','').replace('not','') rule_pa = rule_pa.split() # if there are no parents, continue if not rule_pa: return [] # check whether these are meaningful parents pa_old = [] pa_delete = [] for pa in rule_pa: if pa not in self.varNames.keys(): settings.m(0,'list of available variables:') settings.m(0,list(self.varNames.keys())) message = ('processing of rule "' + rule + ' yields an invalid parent: ' + pa + ' | check whether the syntax is correct: \n' + 'only python expressions "(",")","or","and","not" ' + 'are allowed, variable names and expressions have to be separated ' + 'by white spaces') raise ValueError(message) if pa in pa_old: pa_delete.append(pa) for pa in pa_delete: rule_pa.remove(pa) return rule_pa
[ "def", "parents_from_boolRule", "(", "self", ",", "rule", ")", ":", "rule_pa", "=", "rule", ".", "replace", "(", "'('", ",", "''", ")", ".", "replace", "(", "')'", ",", "''", ")", ".", "replace", "(", "'or'", ",", "''", ")", ".", "replace", "(", "'and'", ",", "''", ")", ".", "replace", "(", "'not'", ",", "''", ")", "rule_pa", "=", "rule_pa", ".", "split", "(", ")", "# if there are no parents, continue", "if", "not", "rule_pa", ":", "return", "[", "]", "# check whether these are meaningful parents", "pa_old", "=", "[", "]", "pa_delete", "=", "[", "]", "for", "pa", "in", "rule_pa", ":", "if", "pa", "not", "in", "self", ".", "varNames", ".", "keys", "(", ")", ":", "settings", ".", "m", "(", "0", ",", "'list of available variables:'", ")", "settings", ".", "m", "(", "0", ",", "list", "(", "self", ".", "varNames", ".", "keys", "(", ")", ")", ")", "message", "=", "(", "'processing of rule \"'", "+", "rule", "+", "' yields an invalid parent: '", "+", "pa", "+", "' | check whether the syntax is correct: \\n'", "+", "'only python expressions \"(\",\")\",\"or\",\"and\",\"not\" '", "+", "'are allowed, variable names and expressions have to be separated '", "+", "'by white spaces'", ")", "raise", "ValueError", "(", "message", ")", "if", "pa", "in", "pa_old", ":", "pa_delete", ".", "append", "(", "pa", ")", "for", "pa", "in", "pa_delete", ":", "rule_pa", ".", "remove", "(", "pa", ")", "return", "rule_pa" ]
43.448276
0.01087
def download_artifact_bundle(self, id_or_uri, file_path): """ Download the Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. file_path(str): Destination file path. Returns: bool: Successfully downloaded. """ uri = self.DOWNLOAD_PATH + '/' + extract_id_from_uri(id_or_uri) return self._client.download(uri, file_path)
[ "def", "download_artifact_bundle", "(", "self", ",", "id_or_uri", ",", "file_path", ")", ":", "uri", "=", "self", ".", "DOWNLOAD_PATH", "+", "'/'", "+", "extract_id_from_uri", "(", "id_or_uri", ")", "return", "self", ".", "_client", ".", "download", "(", "uri", ",", "file_path", ")" ]
32
0.004673
def prior_omega_m(self, omega_m, omega_m_min=0, omega_m_max=1): """ checks whether the parameter omega_m is within the given bounds :param omega_m: :param omega_m_min: :param omega_m_max: :return: """ if omega_m < omega_m_min or omega_m > omega_m_max: penalty = -10**15 return penalty, False else: return 0, True
[ "def", "prior_omega_m", "(", "self", ",", "omega_m", ",", "omega_m_min", "=", "0", ",", "omega_m_max", "=", "1", ")", ":", "if", "omega_m", "<", "omega_m_min", "or", "omega_m", ">", "omega_m_max", ":", "penalty", "=", "-", "10", "**", "15", "return", "penalty", ",", "False", "else", ":", "return", "0", ",", "True" ]
31.384615
0.004762
def chatToId(url): """ Extract the conversation ID from a conversation URL. Matches addresses containing ``conversations/<chat>``. Args: url (str): Skype API URL Returns: str: extracted identifier """ match = re.search(r"conversations/([0-9]+:[^/]+)", url) return match.group(1) if match else None
[ "def", "chatToId", "(", "url", ")", ":", "match", "=", "re", ".", "search", "(", "r\"conversations/([0-9]+:[^/]+)\"", ",", "url", ")", "return", "match", ".", "group", "(", "1", ")", "if", "match", "else", "None" ]
26.785714
0.005155
def listdir(self, dirname): """Returns a list of entries contained within a directory.""" client = boto3.client("s3") bucket, path = self.bucket_and_path(dirname) p = client.get_paginator("list_objects") if not path.endswith("/"): path += "/" # This will now only retrieve subdir content keys = [] for r in p.paginate(Bucket=bucket, Prefix=path, Delimiter="/"): keys.extend(o["Prefix"][len(path):-1] for o in r.get("CommonPrefixes", [])) for o in r.get("Contents", []): key = o["Key"][len(path):] if key: # Skip the base dir, which would add an empty string keys.append(key) return keys
[ "def", "listdir", "(", "self", ",", "dirname", ")", ":", "client", "=", "boto3", ".", "client", "(", "\"s3\"", ")", "bucket", ",", "path", "=", "self", ".", "bucket_and_path", "(", "dirname", ")", "p", "=", "client", ".", "get_paginator", "(", "\"list_objects\"", ")", "if", "not", "path", ".", "endswith", "(", "\"/\"", ")", ":", "path", "+=", "\"/\"", "# This will now only retrieve subdir content", "keys", "=", "[", "]", "for", "r", "in", "p", ".", "paginate", "(", "Bucket", "=", "bucket", ",", "Prefix", "=", "path", ",", "Delimiter", "=", "\"/\"", ")", ":", "keys", ".", "extend", "(", "o", "[", "\"Prefix\"", "]", "[", "len", "(", "path", ")", ":", "-", "1", "]", "for", "o", "in", "r", ".", "get", "(", "\"CommonPrefixes\"", ",", "[", "]", ")", ")", "for", "o", "in", "r", ".", "get", "(", "\"Contents\"", ",", "[", "]", ")", ":", "key", "=", "o", "[", "\"Key\"", "]", "[", "len", "(", "path", ")", ":", "]", "if", "key", ":", "# Skip the base dir, which would add an empty string", "keys", ".", "append", "(", "key", ")", "return", "keys" ]
48.4
0.004054
def double_exponential_moving_average(data, period): """ Double Exponential Moving Average. Formula: DEMA = 2*EMA - EMA(EMA) """ catch_errors.check_for_period_error(data, period) dema = (2 * ema(data, period)) - ema(ema(data, period), period) return dema
[ "def", "double_exponential_moving_average", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "dema", "=", "(", "2", "*", "ema", "(", "data", ",", "period", ")", ")", "-", "ema", "(", "ema", "(", "data", ",", "period", ")", ",", "period", ")", "return", "dema" ]
25.272727
0.003472
def _check_requirements(self): """ Check if VPCS is available with the correct version. """ path = self._vpcs_path() if not path: raise VPCSError("No path to a VPCS executable has been set") # This raise an error if ubridge is not available self.ubridge_path if not os.path.isfile(path): raise VPCSError("VPCS program '{}' is not accessible".format(path)) if not os.access(path, os.X_OK): raise VPCSError("VPCS program '{}' is not executable".format(path)) yield from self._check_vpcs_version()
[ "def", "_check_requirements", "(", "self", ")", ":", "path", "=", "self", ".", "_vpcs_path", "(", ")", "if", "not", "path", ":", "raise", "VPCSError", "(", "\"No path to a VPCS executable has been set\"", ")", "# This raise an error if ubridge is not available", "self", ".", "ubridge_path", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "raise", "VPCSError", "(", "\"VPCS program '{}' is not accessible\"", ".", "format", "(", "path", ")", ")", "if", "not", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")", ":", "raise", "VPCSError", "(", "\"VPCS program '{}' is not executable\"", ".", "format", "(", "path", ")", ")", "yield", "from", "self", ".", "_check_vpcs_version", "(", ")" ]
31.421053
0.003252
def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture""" if not isinstance(image, np.ndarray): raise TypeError('Unknown input type ({})'.format(type(image))) if image.ndim != 3 or image.shape[2] != 3: raise AssertionError('Input image must be nn by nm by RGB') grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1)) grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F') grid.set_active_scalar('Image') return image_to_texture(grid)
[ "def", "numpy_to_texture", "(", "image", ")", ":", "if", "not", "isinstance", "(", "image", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "'Unknown input type ({})'", ".", "format", "(", "type", "(", "image", ")", ")", ")", "if", "image", ".", "ndim", "!=", "3", "or", "image", ".", "shape", "[", "2", "]", "!=", "3", ":", "raise", "AssertionError", "(", "'Input image must be nn by nm by RGB'", ")", "grid", "=", "vtki", ".", "UniformGrid", "(", "(", "image", ".", "shape", "[", "1", "]", ",", "image", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "grid", ".", "point_arrays", "[", "'Image'", "]", "=", "np", ".", "flip", "(", "image", ".", "swapaxes", "(", "0", ",", "1", ")", ",", "axis", "=", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ",", "order", "=", "'F'", ")", "grid", ".", "set_active_scalar", "(", "'Image'", ")", "return", "image_to_texture", "(", "grid", ")" ]
53.8
0.005484
def list_all_versions(pkg, bin_env=None, include_alpha=False, include_beta=False, include_rc=False, user=None, cwd=None, index_url=None, extra_index_url=None): ''' .. versionadded:: 2017.7.3 List all available versions of a pip package pkg The package to check bin_env Path to pip (or to a virtualenv). This can be used to specify the path to the pip to use when more than one Python release is installed (e.g. ``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is specified, it is assumed to be a virtualenv. include_alpha Include alpha versions in the list include_beta Include beta versions in the list include_rc Include release candidates versions in the list user The user under which to run pip cwd Directory from which to run pip index_url Base URL of Python Package Index .. versionadded:: 2019.2.0 extra_index_url Additional URL of Python Package Index .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' pip.list_all_versions <package name> ''' cmd = _get_pip_bin(bin_env) cmd.extend(['install', '{0}==versions'.format(pkg)]) if index_url: if not salt.utils.url.validate(index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(index_url) ) cmd.extend(['--index-url', index_url]) if extra_index_url: if not salt.utils.url.validate(extra_index_url, VALID_PROTOS): raise CommandExecutionError( '\'{0}\' is not a valid URL'.format(extra_index_url) ) cmd.extend(['--extra-index-url', extra_index_url]) cmd_kwargs = dict(cwd=cwd, runas=user, output_loglevel='quiet', redirect_stderr=True) if bin_env and os.path.isdir(bin_env): cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env} result = __salt__['cmd.run_all'](cmd, **cmd_kwargs) filtered = [] if not include_alpha: filtered.append('a') if not include_beta: filtered.append('b') if not include_rc: filtered.append('rc') if filtered: excludes = re.compile(r'^((?!{0}).)*$'.format('|'.join(filtered))) else: excludes = re.compile(r'') versions = [] for line in result['stdout'].splitlines(): match = re.search(r'\s*Could not find a version.* \(from versions: (.*)\)', line) if match: versions = [v for v in match.group(1).split(', ') if v and excludes.match(v)] versions.sort(key=pkg_resources.parse_version) break if not versions: return None return versions
[ "def", "list_all_versions", "(", "pkg", ",", "bin_env", "=", "None", ",", "include_alpha", "=", "False", ",", "include_beta", "=", "False", ",", "include_rc", "=", "False", ",", "user", "=", "None", ",", "cwd", "=", "None", ",", "index_url", "=", "None", ",", "extra_index_url", "=", "None", ")", ":", "cmd", "=", "_get_pip_bin", "(", "bin_env", ")", "cmd", ".", "extend", "(", "[", "'install'", ",", "'{0}==versions'", ".", "format", "(", "pkg", ")", "]", ")", "if", "index_url", ":", "if", "not", "salt", ".", "utils", ".", "url", ".", "validate", "(", "index_url", ",", "VALID_PROTOS", ")", ":", "raise", "CommandExecutionError", "(", "'\\'{0}\\' is not a valid URL'", ".", "format", "(", "index_url", ")", ")", "cmd", ".", "extend", "(", "[", "'--index-url'", ",", "index_url", "]", ")", "if", "extra_index_url", ":", "if", "not", "salt", ".", "utils", ".", "url", ".", "validate", "(", "extra_index_url", ",", "VALID_PROTOS", ")", ":", "raise", "CommandExecutionError", "(", "'\\'{0}\\' is not a valid URL'", ".", "format", "(", "extra_index_url", ")", ")", "cmd", ".", "extend", "(", "[", "'--extra-index-url'", ",", "extra_index_url", "]", ")", "cmd_kwargs", "=", "dict", "(", "cwd", "=", "cwd", ",", "runas", "=", "user", ",", "output_loglevel", "=", "'quiet'", ",", "redirect_stderr", "=", "True", ")", "if", "bin_env", "and", "os", ".", "path", ".", "isdir", "(", "bin_env", ")", ":", "cmd_kwargs", "[", "'env'", "]", "=", "{", "'VIRTUAL_ENV'", ":", "bin_env", "}", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "*", "*", "cmd_kwargs", ")", "filtered", "=", "[", "]", "if", "not", "include_alpha", ":", "filtered", ".", "append", "(", "'a'", ")", "if", "not", "include_beta", ":", "filtered", ".", "append", "(", "'b'", ")", "if", "not", "include_rc", ":", "filtered", ".", "append", "(", "'rc'", ")", "if", "filtered", ":", "excludes", "=", "re", ".", "compile", "(", "r'^((?!{0}).)*$'", ".", "format", "(", "'|'", ".", "join", "(", "filtered", ")", ")", ")", "else", ":", "excludes", "=", "re", ".", "compile", "(", "r''", ")", "versions", "=", "[", "]", "for", "line", "in", "result", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "match", "=", "re", ".", "search", "(", "r'\\s*Could not find a version.* \\(from versions: (.*)\\)'", ",", "line", ")", "if", "match", ":", "versions", "=", "[", "v", "for", "v", "in", "match", ".", "group", "(", "1", ")", ".", "split", "(", "', '", ")", "if", "v", "and", "excludes", ".", "match", "(", "v", ")", "]", "versions", ".", "sort", "(", "key", "=", "pkg_resources", ".", "parse_version", ")", "break", "if", "not", "versions", ":", "return", "None", "return", "versions" ]
28.989796
0.001361
def add_copy_spec(self, copyspecs, sizelimit=None, tailit=True, pred=None): """Add a file or glob but limit it to sizelimit megabytes. If fname is a single file the file will be tailed to meet sizelimit. If the first file in a glob is too large it will be tailed to meet the sizelimit. """ if not self.test_predicate(pred=pred): self._log_info("skipped copy spec '%s' due to predicate (%s)" % (copyspecs, self.get_predicate(pred=pred))) return if sizelimit is None: sizelimit = self.get_option("log_size") if self.get_option('all_logs'): sizelimit = None if sizelimit: sizelimit *= 1024 * 1024 # in MB if not copyspecs: return False if isinstance(copyspecs, six.string_types): copyspecs = [copyspecs] for copyspec in copyspecs: if not (copyspec and len(copyspec)): return False if self.use_sysroot(): copyspec = self.join_sysroot(copyspec) files = self._expand_copy_spec(copyspec) if len(files) == 0: continue # Files hould be sorted in most-recently-modified order, so that # we collect the newest data first before reaching the limit. def getmtime(path): try: return os.path.getmtime(path) except OSError: return 0 files.sort(key=getmtime, reverse=True) current_size = 0 limit_reached = False _file = None for _file in files: if self._is_forbidden_path(_file): self._log_debug("skipping forbidden path '%s'" % _file) continue try: current_size += os.stat(_file)[stat.ST_SIZE] except OSError: self._log_info("failed to stat '%s'" % _file) if sizelimit and current_size > sizelimit: limit_reached = True break self._add_copy_paths([_file]) if limit_reached and tailit and not _file_is_compressed(_file): file_name = _file if file_name[0] == os.sep: file_name = file_name.lstrip(os.sep) strfile = file_name.replace(os.path.sep, ".") + ".tailed" self.add_string_as_file(tail(_file, sizelimit), strfile) rel_path = os.path.relpath('/', os.path.dirname(_file)) link_path = os.path.join(rel_path, 'sos_strings', self.name(), strfile) self.archive.add_link(link_path, _file)
[ "def", "add_copy_spec", "(", "self", ",", "copyspecs", ",", "sizelimit", "=", "None", ",", "tailit", "=", "True", ",", "pred", "=", "None", ")", ":", "if", "not", "self", ".", "test_predicate", "(", "pred", "=", "pred", ")", ":", "self", ".", "_log_info", "(", "\"skipped copy spec '%s' due to predicate (%s)\"", "%", "(", "copyspecs", ",", "self", ".", "get_predicate", "(", "pred", "=", "pred", ")", ")", ")", "return", "if", "sizelimit", "is", "None", ":", "sizelimit", "=", "self", ".", "get_option", "(", "\"log_size\"", ")", "if", "self", ".", "get_option", "(", "'all_logs'", ")", ":", "sizelimit", "=", "None", "if", "sizelimit", ":", "sizelimit", "*=", "1024", "*", "1024", "# in MB", "if", "not", "copyspecs", ":", "return", "False", "if", "isinstance", "(", "copyspecs", ",", "six", ".", "string_types", ")", ":", "copyspecs", "=", "[", "copyspecs", "]", "for", "copyspec", "in", "copyspecs", ":", "if", "not", "(", "copyspec", "and", "len", "(", "copyspec", ")", ")", ":", "return", "False", "if", "self", ".", "use_sysroot", "(", ")", ":", "copyspec", "=", "self", ".", "join_sysroot", "(", "copyspec", ")", "files", "=", "self", ".", "_expand_copy_spec", "(", "copyspec", ")", "if", "len", "(", "files", ")", "==", "0", ":", "continue", "# Files hould be sorted in most-recently-modified order, so that", "# we collect the newest data first before reaching the limit.", "def", "getmtime", "(", "path", ")", ":", "try", ":", "return", "os", ".", "path", ".", "getmtime", "(", "path", ")", "except", "OSError", ":", "return", "0", "files", ".", "sort", "(", "key", "=", "getmtime", ",", "reverse", "=", "True", ")", "current_size", "=", "0", "limit_reached", "=", "False", "_file", "=", "None", "for", "_file", "in", "files", ":", "if", "self", ".", "_is_forbidden_path", "(", "_file", ")", ":", "self", ".", "_log_debug", "(", "\"skipping forbidden path '%s'\"", "%", "_file", ")", "continue", "try", ":", "current_size", "+=", "os", ".", "stat", "(", "_file", ")", "[", "stat", ".", "ST_SIZE", "]", "except", "OSError", ":", "self", ".", "_log_info", "(", "\"failed to stat '%s'\"", "%", "_file", ")", "if", "sizelimit", "and", "current_size", ">", "sizelimit", ":", "limit_reached", "=", "True", "break", "self", ".", "_add_copy_paths", "(", "[", "_file", "]", ")", "if", "limit_reached", "and", "tailit", "and", "not", "_file_is_compressed", "(", "_file", ")", ":", "file_name", "=", "_file", "if", "file_name", "[", "0", "]", "==", "os", ".", "sep", ":", "file_name", "=", "file_name", ".", "lstrip", "(", "os", ".", "sep", ")", "strfile", "=", "file_name", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "\".\"", ")", "+", "\".tailed\"", "self", ".", "add_string_as_file", "(", "tail", "(", "_file", ",", "sizelimit", ")", ",", "strfile", ")", "rel_path", "=", "os", ".", "path", ".", "relpath", "(", "'/'", ",", "os", ".", "path", ".", "dirname", "(", "_file", ")", ")", "link_path", "=", "os", ".", "path", ".", "join", "(", "rel_path", ",", "'sos_strings'", ",", "self", ".", "name", "(", ")", ",", "strfile", ")", "self", ".", "archive", ".", "add_link", "(", "link_path", ",", "_file", ")" ]
37.27027
0.000706
def status(self, value): """Set the workflow stage status.""" # FIXME(BM) This is currently a hack because workflow stages # don't each have their own db entry. pb_key = SchedulingObject.get_key(PB_KEY, self._pb_id) stages = DB.get_hash_value(pb_key, 'workflow_stages') stages = ast.literal_eval(stages) stages[self._index]['status'] = value DB.set_hash_value(pb_key, 'workflow_stages', stages)
[ "def", "status", "(", "self", ",", "value", ")", ":", "# FIXME(BM) This is currently a hack because workflow stages", "# don't each have their own db entry.", "pb_key", "=", "SchedulingObject", ".", "get_key", "(", "PB_KEY", ",", "self", ".", "_pb_id", ")", "stages", "=", "DB", ".", "get_hash_value", "(", "pb_key", ",", "'workflow_stages'", ")", "stages", "=", "ast", ".", "literal_eval", "(", "stages", ")", "stages", "[", "self", ".", "_index", "]", "[", "'status'", "]", "=", "value", "DB", ".", "set_hash_value", "(", "pb_key", ",", "'workflow_stages'", ",", "stages", ")" ]
51.111111
0.004274
def download(source=None, username=None, directory='.', max_size='128m', quiet=None, debug=None): """ Download public data from Open Humans. :param source: This field is the data source from which to download. It's default value is None. :param username: This fiels is username of user. It's default value is None. :param directory: This field is the target directory to which data is downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. :param quiet: This field is the logging level. It's default value is None. :param debug: This field is the logging level. It's default value is None. """ if debug: logging.basicConfig(level=logging.DEBUG) elif quiet: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) logging.debug("Running with source: '{}'".format(source) + " and username: '{}'".format(username) + " and directory: '{}'".format(directory) + " and max-size: '{}'".format(max_size)) signal.signal(signal.SIGINT, signal_handler_cb) max_bytes = parse_size(max_size) options = {} if source: options['source'] = source if username: options['username'] = username page = '{}?{}'.format(BASE_URL_API, urlencode(options)) results = [] counter = 1 logging.info('Retrieving metadata') while True: logging.info('Retrieving page {}'.format(counter)) response = get_page(page) results = results + response['results'] if response['next']: page = response['next'] else: break counter += 1 logging.info('Downloading {} files'.format(len(results))) download_url_partial = partial(download_url, directory=directory, max_bytes=max_bytes) with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: for value in executor.map(download_url_partial, results): if value: logging.info(value)
[ "def", "download", "(", "source", "=", "None", ",", "username", "=", "None", ",", "directory", "=", "'.'", ",", "max_size", "=", "'128m'", ",", "quiet", "=", "None", ",", "debug", "=", "None", ")", ":", "if", "debug", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "elif", "quiet", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "ERROR", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "logging", ".", "debug", "(", "\"Running with source: '{}'\"", ".", "format", "(", "source", ")", "+", "\" and username: '{}'\"", ".", "format", "(", "username", ")", "+", "\" and directory: '{}'\"", ".", "format", "(", "directory", ")", "+", "\" and max-size: '{}'\"", ".", "format", "(", "max_size", ")", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal_handler_cb", ")", "max_bytes", "=", "parse_size", "(", "max_size", ")", "options", "=", "{", "}", "if", "source", ":", "options", "[", "'source'", "]", "=", "source", "if", "username", ":", "options", "[", "'username'", "]", "=", "username", "page", "=", "'{}?{}'", ".", "format", "(", "BASE_URL_API", ",", "urlencode", "(", "options", ")", ")", "results", "=", "[", "]", "counter", "=", "1", "logging", ".", "info", "(", "'Retrieving metadata'", ")", "while", "True", ":", "logging", ".", "info", "(", "'Retrieving page {}'", ".", "format", "(", "counter", ")", ")", "response", "=", "get_page", "(", "page", ")", "results", "=", "results", "+", "response", "[", "'results'", "]", "if", "response", "[", "'next'", "]", ":", "page", "=", "response", "[", "'next'", "]", "else", ":", "break", "counter", "+=", "1", "logging", ".", "info", "(", "'Downloading {} files'", ".", "format", "(", "len", "(", "results", ")", ")", ")", "download_url_partial", "=", "partial", "(", "download_url", ",", "directory", "=", "directory", ",", "max_bytes", "=", "max_bytes", ")", "with", "concurrent", ".", "futures", ".", "ProcessPoolExecutor", "(", "max_workers", "=", "4", ")", "as", "executor", ":", "for", "value", "in", "executor", ".", "map", "(", "download_url_partial", ",", "results", ")", ":", "if", "value", ":", "logging", ".", "info", "(", "value", ")" ]
29.732394
0.000459
def cpf(numero): """Valida um número de CPF. O número deverá ser informado como uma string contendo 11 dígitos numéricos. Se o número informado for inválido será lançada a exceção :exc:`NumeroCPFError`. Esta implementação da validação foi delicadamente copiada de `python-sped <http://git.io/vfuGW>`. """ _digitos = [int(c) for c in numero if c.isdigit()] if len(_digitos) != 11 or len(numero) != 11: raise NumeroCPFError('Nao possui 11 digitos: {!r}'.format(numero)) if numero == numero[0] * 11: raise NumeroCPFError('Todos os digitos iguais: {!r}'.format(numero)) multiplicadores = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2] soma1 = sum([_digitos[i] * multiplicadores[i+1] for i in range(9)]) soma2 = sum([_digitos[i] * multiplicadores[i] for i in range(10)]) digito1 = 11 - (soma1 % 11) digito2 = 11 - (soma2 % 11) if digito1 >= 10: digito1 = 0 if digito2 >= 10: digito2 = 0 if _digitos[9] != digito1 or _digitos[10] != digito2: raise NumeroCPFError('Digitos verificadores invalidos: {!r}'.format(numero))
[ "def", "cpf", "(", "numero", ")", ":", "_digitos", "=", "[", "int", "(", "c", ")", "for", "c", "in", "numero", "if", "c", ".", "isdigit", "(", ")", "]", "if", "len", "(", "_digitos", ")", "!=", "11", "or", "len", "(", "numero", ")", "!=", "11", ":", "raise", "NumeroCPFError", "(", "'Nao possui 11 digitos: {!r}'", ".", "format", "(", "numero", ")", ")", "if", "numero", "==", "numero", "[", "0", "]", "*", "11", ":", "raise", "NumeroCPFError", "(", "'Todos os digitos iguais: {!r}'", ".", "format", "(", "numero", ")", ")", "multiplicadores", "=", "[", "11", ",", "10", ",", "9", ",", "8", ",", "7", ",", "6", ",", "5", ",", "4", ",", "3", ",", "2", "]", "soma1", "=", "sum", "(", "[", "_digitos", "[", "i", "]", "*", "multiplicadores", "[", "i", "+", "1", "]", "for", "i", "in", "range", "(", "9", ")", "]", ")", "soma2", "=", "sum", "(", "[", "_digitos", "[", "i", "]", "*", "multiplicadores", "[", "i", "]", "for", "i", "in", "range", "(", "10", ")", "]", ")", "digito1", "=", "11", "-", "(", "soma1", "%", "11", ")", "digito2", "=", "11", "-", "(", "soma2", "%", "11", ")", "if", "digito1", ">=", "10", ":", "digito1", "=", "0", "if", "digito2", ">=", "10", ":", "digito2", "=", "0", "if", "_digitos", "[", "9", "]", "!=", "digito1", "or", "_digitos", "[", "10", "]", "!=", "digito2", ":", "raise", "NumeroCPFError", "(", "'Digitos verificadores invalidos: {!r}'", ".", "format", "(", "numero", ")", ")" ]
37.275862
0.001803
def build_estimator(model_dir, model_type, model_column_fn, inter_op, intra_op, ctx): """Build an estimator appropriate for the given model type.""" wide_columns, deep_columns = model_column_fn() hidden_units = [100, 75, 50, 25] # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which # trains faster than GPU for this model. # Note: adding device_filter to fix: https://github.com/tensorflow/tensorflow/issues/21745 run_config = tf.estimator.RunConfig().replace( session_config=tf.ConfigProto(device_count={'GPU': 0}, device_filters=['/job:ps', '/job:%s/task:%d' % (ctx.job_name, ctx.task_index)], inter_op_parallelism_threads=inter_op, intra_op_parallelism_threads=intra_op)) if model_type == 'wide': return tf.estimator.LinearClassifier( model_dir=model_dir, feature_columns=wide_columns, config=run_config) elif model_type == 'deep': return tf.estimator.DNNClassifier( model_dir=model_dir, feature_columns=deep_columns, hidden_units=hidden_units, config=run_config) else: return tf.estimator.DNNLinearCombinedClassifier( model_dir=model_dir, linear_feature_columns=wide_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=hidden_units, config=run_config)
[ "def", "build_estimator", "(", "model_dir", ",", "model_type", ",", "model_column_fn", ",", "inter_op", ",", "intra_op", ",", "ctx", ")", ":", "wide_columns", ",", "deep_columns", "=", "model_column_fn", "(", ")", "hidden_units", "=", "[", "100", ",", "75", ",", "50", ",", "25", "]", "# Create a tf.estimator.RunConfig to ensure the model is run on CPU, which", "# trains faster than GPU for this model.", "# Note: adding device_filter to fix: https://github.com/tensorflow/tensorflow/issues/21745", "run_config", "=", "tf", ".", "estimator", ".", "RunConfig", "(", ")", ".", "replace", "(", "session_config", "=", "tf", ".", "ConfigProto", "(", "device_count", "=", "{", "'GPU'", ":", "0", "}", ",", "device_filters", "=", "[", "'/job:ps'", ",", "'/job:%s/task:%d'", "%", "(", "ctx", ".", "job_name", ",", "ctx", ".", "task_index", ")", "]", ",", "inter_op_parallelism_threads", "=", "inter_op", ",", "intra_op_parallelism_threads", "=", "intra_op", ")", ")", "if", "model_type", "==", "'wide'", ":", "return", "tf", ".", "estimator", ".", "LinearClassifier", "(", "model_dir", "=", "model_dir", ",", "feature_columns", "=", "wide_columns", ",", "config", "=", "run_config", ")", "elif", "model_type", "==", "'deep'", ":", "return", "tf", ".", "estimator", ".", "DNNClassifier", "(", "model_dir", "=", "model_dir", ",", "feature_columns", "=", "deep_columns", ",", "hidden_units", "=", "hidden_units", ",", "config", "=", "run_config", ")", "else", ":", "return", "tf", ".", "estimator", ".", "DNNLinearCombinedClassifier", "(", "model_dir", "=", "model_dir", ",", "linear_feature_columns", "=", "wide_columns", ",", "dnn_feature_columns", "=", "deep_columns", ",", "dnn_hidden_units", "=", "hidden_units", ",", "config", "=", "run_config", ")" ]
43.6875
0.009797
def fill_out_and_submit(self, data, prefix='', skip_reset=False): """ Calls :py:meth:`~.Form.fill_out` and then :py:meth:`.submit`. """ self.fill_out(data, prefix, skip_reset) self.submit()
[ "def", "fill_out_and_submit", "(", "self", ",", "data", ",", "prefix", "=", "''", ",", "skip_reset", "=", "False", ")", ":", "self", ".", "fill_out", "(", "data", ",", "prefix", ",", "skip_reset", ")", "self", ".", "submit", "(", ")" ]
37.333333
0.008734
def __ensure_gcloud(): """The *NIX installer is not guaranteed to add the google cloud sdk to the user's PATH (the Windows installer does). This ensures that if the default directory for the executables exists, it is added to the PATH for the duration of this package's use.""" if which('gcloud') is None: gcloud_path = os.path.join(os.path.expanduser('~'), 'google-cloud-sdk', 'bin') env_path = os.getenv('PATH') if os.path.isdir(gcloud_path): if env_path is not None: os.environ['PATH'] = gcloud_path + os.pathsep + env_path else: os.environ['PATH'] = gcloud_path
[ "def", "__ensure_gcloud", "(", ")", ":", "if", "which", "(", "'gcloud'", ")", "is", "None", ":", "gcloud_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'google-cloud-sdk'", ",", "'bin'", ")", "env_path", "=", "os", ".", "getenv", "(", "'PATH'", ")", "if", "os", ".", "path", ".", "isdir", "(", "gcloud_path", ")", ":", "if", "env_path", "is", "not", "None", ":", "os", ".", "environ", "[", "'PATH'", "]", "=", "gcloud_path", "+", "os", ".", "pathsep", "+", "env_path", "else", ":", "os", ".", "environ", "[", "'PATH'", "]", "=", "gcloud_path" ]
49.071429
0.001429
def voicing_measures(ref_voicing, est_voicing): """Compute the voicing recall and false alarm rates given two voicing indicator sequences, one as reference (truth) and the other as the estimate (prediction). The sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> recall, false_alarm = mir_eval.melody.voicing_measures(ref_v, ... est_v) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array est_voicing : np.ndarray Estimated boolean voicing array Returns ------- vx_recall : float Voicing recall rate, the fraction of voiced frames in ref indicated as voiced in est vx_false_alarm : float Voicing false alarm rate, the fraction of unvoiced frames in ref indicated as voiced in est """ validate_voicing(ref_voicing, est_voicing) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) # When input arrays are empty, return 0 by special case if ref_voicing.size == 0 or est_voicing.size == 0: return 0. # How voicing is computed # | ref_v | !ref_v | # -------|-------|--------| # est_v | TP | FP | # -------|-------|------- | # !est_v | FN | TN | # ------------------------- TP = (ref_voicing*est_voicing).sum() FP = ((ref_voicing == 0)*est_voicing).sum() FN = (ref_voicing*(est_voicing == 0)).sum() TN = ((ref_voicing == 0)*(est_voicing == 0)).sum() # Voicing recall = fraction of voiced frames according the reference that # are declared as voiced by the estimate if TP + FN == 0: vx_recall = 0. else: vx_recall = TP/float(TP + FN) # Voicing false alarm = fraction of unvoiced frames according to the # reference that are declared as voiced by the estimate if FP + TN == 0: vx_false_alm = 0. else: vx_false_alm = FP/float(FP + TN) return vx_recall, vx_false_alm
[ "def", "voicing_measures", "(", "ref_voicing", ",", "est_voicing", ")", ":", "validate_voicing", "(", "ref_voicing", ",", "est_voicing", ")", "ref_voicing", "=", "ref_voicing", ".", "astype", "(", "bool", ")", "est_voicing", "=", "est_voicing", ".", "astype", "(", "bool", ")", "# When input arrays are empty, return 0 by special case", "if", "ref_voicing", ".", "size", "==", "0", "or", "est_voicing", ".", "size", "==", "0", ":", "return", "0.", "# How voicing is computed", "# | ref_v | !ref_v |", "# -------|-------|--------|", "# est_v | TP | FP |", "# -------|-------|------- |", "# !est_v | FN | TN |", "# -------------------------", "TP", "=", "(", "ref_voicing", "*", "est_voicing", ")", ".", "sum", "(", ")", "FP", "=", "(", "(", "ref_voicing", "==", "0", ")", "*", "est_voicing", ")", ".", "sum", "(", ")", "FN", "=", "(", "ref_voicing", "*", "(", "est_voicing", "==", "0", ")", ")", ".", "sum", "(", ")", "TN", "=", "(", "(", "ref_voicing", "==", "0", ")", "*", "(", "est_voicing", "==", "0", ")", ")", ".", "sum", "(", ")", "# Voicing recall = fraction of voiced frames according the reference that", "# are declared as voiced by the estimate", "if", "TP", "+", "FN", "==", "0", ":", "vx_recall", "=", "0.", "else", ":", "vx_recall", "=", "TP", "/", "float", "(", "TP", "+", "FN", ")", "# Voicing false alarm = fraction of unvoiced frames according to the", "# reference that are declared as voiced by the estimate", "if", "FP", "+", "TN", "==", "0", ":", "vx_false_alm", "=", "0.", "else", ":", "vx_false_alm", "=", "FP", "/", "float", "(", "FP", "+", "TN", ")", "return", "vx_recall", ",", "vx_false_alm" ]
35.028986
0.000402
def register_pickle(): """The fastest serialization method, but restricts you to python clients.""" import cPickle registry.register('pickle', cPickle.dumps, cPickle.loads, content_type='application/x-python-serialize', content_encoding='binary')
[ "def", "register_pickle", "(", ")", ":", "import", "cPickle", "registry", ".", "register", "(", "'pickle'", ",", "cPickle", ".", "dumps", ",", "cPickle", ".", "loads", ",", "content_type", "=", "'application/x-python-serialize'", ",", "content_encoding", "=", "'binary'", ")" ]
42.857143
0.003268
def split_utxos(self, wif, limit, fee=10000, max_outputs=100): """Split utxos of <wif> unitil <limit> or <max_outputs> reached.""" key = deserialize.key(self.testnet, wif) limit = deserialize.positive_integer(limit) fee = deserialize.positive_integer(fee) max_outputs = deserialize.positive_integer(max_outputs) spendables = control.retrieve_utxos(self.service, [key.address()]) txids = control.split_utxos(self.service, self.testnet, key, spendables, limit, fee=fee, max_outputs=max_outputs, publish=(not self.dryrun)) return serialize.txids(txids)
[ "def", "split_utxos", "(", "self", ",", "wif", ",", "limit", ",", "fee", "=", "10000", ",", "max_outputs", "=", "100", ")", ":", "key", "=", "deserialize", ".", "key", "(", "self", ".", "testnet", ",", "wif", ")", "limit", "=", "deserialize", ".", "positive_integer", "(", "limit", ")", "fee", "=", "deserialize", ".", "positive_integer", "(", "fee", ")", "max_outputs", "=", "deserialize", ".", "positive_integer", "(", "max_outputs", ")", "spendables", "=", "control", ".", "retrieve_utxos", "(", "self", ".", "service", ",", "[", "key", ".", "address", "(", ")", "]", ")", "txids", "=", "control", ".", "split_utxos", "(", "self", ".", "service", ",", "self", ".", "testnet", ",", "key", ",", "spendables", ",", "limit", ",", "fee", "=", "fee", ",", "max_outputs", "=", "max_outputs", ",", "publish", "=", "(", "not", "self", ".", "dryrun", ")", ")", "return", "serialize", ".", "txids", "(", "txids", ")" ]
59.166667
0.002774
def get_agent(self): """Gets the ``Agent`` identified in this authentication credential. :return: the ``Agent`` :rtype: ``osid.authentication.Agent`` :raise: ``OperationFailed`` -- unable to complete request *compliance: mandatory -- This method must be implemented.* """ agent_id = self.get_agent_id() return Agent(identifier=agent_id.identifier, namespace=agent_id.namespace, authority=agent_id.authority)
[ "def", "get_agent", "(", "self", ")", ":", "agent_id", "=", "self", ".", "get_agent_id", "(", ")", "return", "Agent", "(", "identifier", "=", "agent_id", ".", "identifier", ",", "namespace", "=", "agent_id", ".", "namespace", ",", "authority", "=", "agent_id", ".", "authority", ")" ]
35.928571
0.003876
def _iter_frequencies(self): """Iterate over the frequencies of this `QPlane` """ # work out how many frequencies we need minf, maxf = self.frange fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2. nfreq = int(max(1, ceil(fcum_mismatch / self.deltam))) fstep = fcum_mismatch / nfreq fstepmin = 1 / self.duration # for each frequency, yield a QTile for i in xrange(nfreq): yield (minf * exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep) // fstepmin * fstepmin)
[ "def", "_iter_frequencies", "(", "self", ")", ":", "# work out how many frequencies we need", "minf", ",", "maxf", "=", "self", ".", "frange", "fcum_mismatch", "=", "log", "(", "maxf", "/", "minf", ")", "*", "(", "2", "+", "self", ".", "q", "**", "2", ")", "**", "(", "1", "/", "2.", ")", "/", "2.", "nfreq", "=", "int", "(", "max", "(", "1", ",", "ceil", "(", "fcum_mismatch", "/", "self", ".", "deltam", ")", ")", ")", "fstep", "=", "fcum_mismatch", "/", "nfreq", "fstepmin", "=", "1", "/", "self", ".", "duration", "# for each frequency, yield a QTile", "for", "i", "in", "xrange", "(", "nfreq", ")", ":", "yield", "(", "minf", "*", "exp", "(", "2", "/", "(", "2", "+", "self", ".", "q", "**", "2", ")", "**", "(", "1", "/", "2.", ")", "*", "(", "i", "+", ".5", ")", "*", "fstep", ")", "//", "fstepmin", "*", "fstepmin", ")" ]
42.214286
0.003311
def Process(self, parser_mediator, root_item=None, **kwargs): """Parses an OLECF file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. root_item (Optional[pyolecf.item]): root item of the OLECF file. Raises: ValueError: If the root_item is not set. """ # This will raise if unhandled keyword arguments are passed. super(AutomaticDestinationsOLECFPlugin, self).Process( parser_mediator, **kwargs) if not root_item: raise ValueError('Root item not set.') for item in root_item.sub_items: if item.name == 'DestList': self.ParseDestList(parser_mediator, item) elif self._RE_LNK_ITEM_NAME.match(item.name): display_name = parser_mediator.GetDisplayName() if display_name: display_name = '{0:s} # {1:s}'.format(display_name, item.name) else: display_name = '# {0:s}'.format(item.name) parser_mediator.AppendToParserChain(self._WINLNK_PARSER) try: item.seek(0, os.SEEK_SET) self._WINLNK_PARSER.ParseFileLNKFile( parser_mediator, item, display_name) finally: parser_mediator.PopFromParserChain()
[ "def", "Process", "(", "self", ",", "parser_mediator", ",", "root_item", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# This will raise if unhandled keyword arguments are passed.", "super", "(", "AutomaticDestinationsOLECFPlugin", ",", "self", ")", ".", "Process", "(", "parser_mediator", ",", "*", "*", "kwargs", ")", "if", "not", "root_item", ":", "raise", "ValueError", "(", "'Root item not set.'", ")", "for", "item", "in", "root_item", ".", "sub_items", ":", "if", "item", ".", "name", "==", "'DestList'", ":", "self", ".", "ParseDestList", "(", "parser_mediator", ",", "item", ")", "elif", "self", ".", "_RE_LNK_ITEM_NAME", ".", "match", "(", "item", ".", "name", ")", ":", "display_name", "=", "parser_mediator", ".", "GetDisplayName", "(", ")", "if", "display_name", ":", "display_name", "=", "'{0:s} # {1:s}'", ".", "format", "(", "display_name", ",", "item", ".", "name", ")", "else", ":", "display_name", "=", "'# {0:s}'", ".", "format", "(", "item", ".", "name", ")", "parser_mediator", ".", "AppendToParserChain", "(", "self", ".", "_WINLNK_PARSER", ")", "try", ":", "item", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "self", ".", "_WINLNK_PARSER", ".", "ParseFileLNKFile", "(", "parser_mediator", ",", "item", ",", "display_name", ")", "finally", ":", "parser_mediator", ".", "PopFromParserChain", "(", ")" ]
34.527778
0.007042
def loadFromFile(fileName): """ load the configuration for the ReturnInfo from a fileName @param fileName: filename that contains the json configuration to use in the ReturnInfo """ assert os.path.exists(fileName), "File " + fileName + " does not exist" conf = json.load(open(fileName)) return ReturnInfo( articleInfo=ArticleInfoFlags(**conf.get("articleInfo", {})), eventInfo=EventInfoFlags(**conf.get("eventInfo", {})), sourceInfo=SourceInfoFlags(**conf.get("sourceInfo", {})), categoryInfo=CategoryInfoFlags(**conf.get("categoryInfo", {})), conceptInfo=ConceptInfoFlags(**conf.get("conceptInfo", {})), locationInfo=LocationInfoFlags(**conf.get("locationInfo", {})), storyInfo=StoryInfoFlags(**conf.get("storyInfo", {})), conceptClassInfo=ConceptClassInfoFlags(**conf.get("conceptClassInfo", {})), conceptFolderInfo=ConceptFolderInfoFlags(**conf.get("conceptFolderInfo", {})) )
[ "def", "loadFromFile", "(", "fileName", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "fileName", ")", ",", "\"File \"", "+", "fileName", "+", "\" does not exist\"", "conf", "=", "json", ".", "load", "(", "open", "(", "fileName", ")", ")", "return", "ReturnInfo", "(", "articleInfo", "=", "ArticleInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"articleInfo\"", ",", "{", "}", ")", ")", ",", "eventInfo", "=", "EventInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"eventInfo\"", ",", "{", "}", ")", ")", ",", "sourceInfo", "=", "SourceInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"sourceInfo\"", ",", "{", "}", ")", ")", ",", "categoryInfo", "=", "CategoryInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"categoryInfo\"", ",", "{", "}", ")", ")", ",", "conceptInfo", "=", "ConceptInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"conceptInfo\"", ",", "{", "}", ")", ")", ",", "locationInfo", "=", "LocationInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"locationInfo\"", ",", "{", "}", ")", ")", ",", "storyInfo", "=", "StoryInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"storyInfo\"", ",", "{", "}", ")", ")", ",", "conceptClassInfo", "=", "ConceptClassInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"conceptClassInfo\"", ",", "{", "}", ")", ")", ",", "conceptFolderInfo", "=", "ConceptFolderInfoFlags", "(", "*", "*", "conf", ".", "get", "(", "\"conceptFolderInfo\"", ",", "{", "}", ")", ")", ")" ]
57.444444
0.004757
def GroupsPost(self, parameters): """ Create a group in CommonSense. If GroupsPost is successful, the group details, including its group_id, can be obtained by a call to getResponse(), and should be a json string. @param parameters (dictonary) - Dictionary containing the details of the group to be created. @return (bool) - Boolean indicating whether GroupsPost was successful. """ if self.__SenseApiCall__('/groups.json', 'POST', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
[ "def", "GroupsPost", "(", "self", ",", "parameters", ")", ":", "if", "self", ".", "__SenseApiCall__", "(", "'/groups.json'", ",", "'POST'", ",", "parameters", "=", "parameters", ")", ":", "return", "True", "else", ":", "self", ".", "__error__", "=", "\"api call unsuccessful\"", "return", "False" ]
49.714286
0.015515
def to_checksum_address(value: AnyStr) -> ChecksumAddress: """ Makes a checksum address given a supported format. """ norm_address = to_normalized_address(value) address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address))) checksum_address = add_0x_prefix( "".join( ( norm_address[i].upper() if int(address_hash[i], 16) > 7 else norm_address[i] ) for i in range(2, 42) ) ) return ChecksumAddress(HexAddress(checksum_address))
[ "def", "to_checksum_address", "(", "value", ":", "AnyStr", ")", "->", "ChecksumAddress", ":", "norm_address", "=", "to_normalized_address", "(", "value", ")", "address_hash", "=", "encode_hex", "(", "keccak", "(", "text", "=", "remove_0x_prefix", "(", "norm_address", ")", ")", ")", "checksum_address", "=", "add_0x_prefix", "(", "\"\"", ".", "join", "(", "(", "norm_address", "[", "i", "]", ".", "upper", "(", ")", "if", "int", "(", "address_hash", "[", "i", "]", ",", "16", ")", ">", "7", "else", "norm_address", "[", "i", "]", ")", "for", "i", "in", "range", "(", "2", ",", "42", ")", ")", ")", "return", "ChecksumAddress", "(", "HexAddress", "(", "checksum_address", ")", ")" ]
30.611111
0.001761
def extend_array(edges, binsz, lo, hi): """Extend an array to encompass lo and hi values.""" numlo = int(np.ceil((edges[0] - lo) / binsz)) numhi = int(np.ceil((hi - edges[-1]) / binsz)) edges = copy.deepcopy(edges) if numlo > 0: edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1) edges = np.concatenate((edges_lo[:-1], edges)) if numhi > 0: edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1) edges = np.concatenate((edges, edges_hi[1:])) return edges
[ "def", "extend_array", "(", "edges", ",", "binsz", ",", "lo", ",", "hi", ")", ":", "numlo", "=", "int", "(", "np", ".", "ceil", "(", "(", "edges", "[", "0", "]", "-", "lo", ")", "/", "binsz", ")", ")", "numhi", "=", "int", "(", "np", ".", "ceil", "(", "(", "hi", "-", "edges", "[", "-", "1", "]", ")", "/", "binsz", ")", ")", "edges", "=", "copy", ".", "deepcopy", "(", "edges", ")", "if", "numlo", ">", "0", ":", "edges_lo", "=", "np", ".", "linspace", "(", "edges", "[", "0", "]", "-", "numlo", "*", "binsz", ",", "edges", "[", "0", "]", ",", "numlo", "+", "1", ")", "edges", "=", "np", ".", "concatenate", "(", "(", "edges_lo", "[", ":", "-", "1", "]", ",", "edges", ")", ")", "if", "numhi", ">", "0", ":", "edges_hi", "=", "np", ".", "linspace", "(", "edges", "[", "-", "1", "]", ",", "edges", "[", "-", "1", "]", "+", "numhi", "*", "binsz", ",", "numhi", "+", "1", ")", "edges", "=", "np", ".", "concatenate", "(", "(", "edges", ",", "edges_hi", "[", "1", ":", "]", ")", ")", "return", "edges" ]
33.6875
0.001805
def false_positives(links_true, links_pred): """Count the number of False Positives. Returns the number of incorrect predictions of true non-links. (true non- links, but predicted as links). This value is known as the number of False Positives (FP). Parameters ---------- links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series The true (or actual) links. links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series The predicted links. Returns ------- int The number of false positives. """ links_true = _get_multiindex(links_true) links_pred = _get_multiindex(links_pred) return len(links_pred.difference(links_true))
[ "def", "false_positives", "(", "links_true", ",", "links_pred", ")", ":", "links_true", "=", "_get_multiindex", "(", "links_true", ")", "links_pred", "=", "_get_multiindex", "(", "links_pred", ")", "return", "len", "(", "links_pred", ".", "difference", "(", "links_true", ")", ")" ]
27.8
0.001391
def api_user(request, userPk, key=None, hproPk=None): """Return information about an user""" if not check_api_key(request, key, hproPk): return HttpResponseForbidden if settings.PIAPI_STANDALONE: if not settings.PIAPI_REALUSERS: user = generate_user(pk=userPk) if user is None: return HttpResponseNotFound() else: user = get_object_or_404(DUser, pk=userPk) hproject = None else: from users.models import TechUser user = get_object_or_404(TechUser, pk=userPk) (_, _, hproject) = getPlugItObject(hproPk) user.ebuio_member = hproject.isMemberRead(user) user.ebuio_admin = hproject.isMemberWrite(user) user.subscription_labels = _get_subscription_labels(user, hproject) retour = {} # Append properties for the user data for prop in settings.PIAPI_USERDATA: if hasattr(user, prop): retour[prop] = getattr(user, prop) retour['id'] = str(retour['pk']) # Append the users organisation and access levels orgas = {} if user: limitedOrgas = [] if hproject and hproject.plugItLimitOrgaJoinable: # Get List of Plugit Available Orgas first projectOrgaIds = hproject.plugItOrgaJoinable.order_by('name').values_list('pk', flat=True) for (orga, isAdmin) in user.getOrgas(distinct=True): if orga.pk in projectOrgaIds: limitedOrgas.append((orga, isAdmin)) elif hasattr(user, 'getOrgas'): limitedOrgas = user.getOrgas(distinct=True) # Create List orgas = [{'id': orga.pk, 'name': orga.name, 'codops': orga.ebu_codops, 'is_admin': isAdmin} for (orga, isAdmin) in limitedOrgas] retour['orgas'] = orgas return HttpResponse(json.dumps(retour), content_type="application/json")
[ "def", "api_user", "(", "request", ",", "userPk", ",", "key", "=", "None", ",", "hproPk", "=", "None", ")", ":", "if", "not", "check_api_key", "(", "request", ",", "key", ",", "hproPk", ")", ":", "return", "HttpResponseForbidden", "if", "settings", ".", "PIAPI_STANDALONE", ":", "if", "not", "settings", ".", "PIAPI_REALUSERS", ":", "user", "=", "generate_user", "(", "pk", "=", "userPk", ")", "if", "user", "is", "None", ":", "return", "HttpResponseNotFound", "(", ")", "else", ":", "user", "=", "get_object_or_404", "(", "DUser", ",", "pk", "=", "userPk", ")", "hproject", "=", "None", "else", ":", "from", "users", ".", "models", "import", "TechUser", "user", "=", "get_object_or_404", "(", "TechUser", ",", "pk", "=", "userPk", ")", "(", "_", ",", "_", ",", "hproject", ")", "=", "getPlugItObject", "(", "hproPk", ")", "user", ".", "ebuio_member", "=", "hproject", ".", "isMemberRead", "(", "user", ")", "user", ".", "ebuio_admin", "=", "hproject", ".", "isMemberWrite", "(", "user", ")", "user", ".", "subscription_labels", "=", "_get_subscription_labels", "(", "user", ",", "hproject", ")", "retour", "=", "{", "}", "# Append properties for the user data", "for", "prop", "in", "settings", ".", "PIAPI_USERDATA", ":", "if", "hasattr", "(", "user", ",", "prop", ")", ":", "retour", "[", "prop", "]", "=", "getattr", "(", "user", ",", "prop", ")", "retour", "[", "'id'", "]", "=", "str", "(", "retour", "[", "'pk'", "]", ")", "# Append the users organisation and access levels", "orgas", "=", "{", "}", "if", "user", ":", "limitedOrgas", "=", "[", "]", "if", "hproject", "and", "hproject", ".", "plugItLimitOrgaJoinable", ":", "# Get List of Plugit Available Orgas first", "projectOrgaIds", "=", "hproject", ".", "plugItOrgaJoinable", ".", "order_by", "(", "'name'", ")", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", "for", "(", "orga", ",", "isAdmin", ")", "in", "user", ".", "getOrgas", "(", "distinct", "=", "True", ")", ":", "if", "orga", ".", "pk", "in", "projectOrgaIds", ":", "limitedOrgas", ".", "append", "(", "(", "orga", ",", "isAdmin", ")", ")", "elif", "hasattr", "(", "user", ",", "'getOrgas'", ")", ":", "limitedOrgas", "=", "user", ".", "getOrgas", "(", "distinct", "=", "True", ")", "# Create List", "orgas", "=", "[", "{", "'id'", ":", "orga", ".", "pk", ",", "'name'", ":", "orga", ".", "name", ",", "'codops'", ":", "orga", ".", "ebu_codops", ",", "'is_admin'", ":", "isAdmin", "}", "for", "(", "orga", ",", "isAdmin", ")", "in", "limitedOrgas", "]", "retour", "[", "'orgas'", "]", "=", "orgas", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "retour", ")", ",", "content_type", "=", "\"application/json\"", ")" ]
33.745455
0.001571
def fromDict(cls, _dict): """ Builds instance from dictionary of properties. """ obj = cls() obj.__dict__.update(_dict) return obj
[ "def", "fromDict", "(", "cls", ",", "_dict", ")", ":", "obj", "=", "cls", "(", ")", "obj", ".", "__dict__", ".", "update", "(", "_dict", ")", "return", "obj" ]
31.6
0.012346
def _parse(self): """ Given self.resource, split information from the CTS API :return: None """ self.response = self.resource self.resource = self.resource.xpath("//ti:passage/tei:TEI", namespaces=XPATH_NAMESPACES)[0] self._prev_id, self._next_id = _SharedMethod.prevnext(self.response) if not self.citation.is_set() and len(self.resource.xpath("//ti:citation", namespaces=XPATH_NAMESPACES)): self.citation = CtsCollection.XmlCtsCitation.ingest( self.response, xpath=".//ti:citation[not(ancestor::ti:citation)]" )
[ "def", "_parse", "(", "self", ")", ":", "self", ".", "response", "=", "self", ".", "resource", "self", ".", "resource", "=", "self", ".", "resource", ".", "xpath", "(", "\"//ti:passage/tei:TEI\"", ",", "namespaces", "=", "XPATH_NAMESPACES", ")", "[", "0", "]", "self", ".", "_prev_id", ",", "self", ".", "_next_id", "=", "_SharedMethod", ".", "prevnext", "(", "self", ".", "response", ")", "if", "not", "self", ".", "citation", ".", "is_set", "(", ")", "and", "len", "(", "self", ".", "resource", ".", "xpath", "(", "\"//ti:citation\"", ",", "namespaces", "=", "XPATH_NAMESPACES", ")", ")", ":", "self", ".", "citation", "=", "CtsCollection", ".", "XmlCtsCitation", ".", "ingest", "(", "self", ".", "response", ",", "xpath", "=", "\".//ti:citation[not(ancestor::ti:citation)]\"", ")" ]
40.933333
0.006369
def _zone_expired(self, zone): """ Determine if a zone is expired or not. :param zone: zone number :type zone: int :returns: whether or not the zone is expired """ return (time.time() > self._zones[zone].timestamp + Zonetracker.EXPIRE) and self._zones[zone].expander is False
[ "def", "_zone_expired", "(", "self", ",", "zone", ")", ":", "return", "(", "time", ".", "time", "(", ")", ">", "self", ".", "_zones", "[", "zone", "]", ".", "timestamp", "+", "Zonetracker", ".", "EXPIRE", ")", "and", "self", ".", "_zones", "[", "zone", "]", ".", "expander", "is", "False" ]
32.4
0.009009
def fetch_request_ids(item_ids, cls, attr_name, verification_list=None): """Return a list of cls instances for all the ids provided in item_ids. :param item_ids: The list of ids to fetch objects for :param cls: The class to fetch the ids from :param attr_name: The name of the attribute for exception purposes :param verification_list: If provided, a list of acceptable instances Raise InvalidId exception using attr_name if any do not exist, or are not present in the verification_list. """ if not item_ids: return [] items = [] for item_id in item_ids: item = cls.fetch_by_id(item_id) if not item or (verification_list is not None and item not in verification_list): raise InvalidId(attr_name) items.append(item) return items
[ "def", "fetch_request_ids", "(", "item_ids", ",", "cls", ",", "attr_name", ",", "verification_list", "=", "None", ")", ":", "if", "not", "item_ids", ":", "return", "[", "]", "items", "=", "[", "]", "for", "item_id", "in", "item_ids", ":", "item", "=", "cls", ".", "fetch_by_id", "(", "item_id", ")", "if", "not", "item", "or", "(", "verification_list", "is", "not", "None", "and", "item", "not", "in", "verification_list", ")", ":", "raise", "InvalidId", "(", "attr_name", ")", "items", ".", "append", "(", "item", ")", "return", "items" ]
37.681818
0.001176
def get_permissions(self): """ Permissions of the user. Returns: List of Permission objects. """ user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role return user_role.get_permissions()
[ "def", "get_permissions", "(", "self", ")", ":", "user_role", "=", "self", ".", "last_login_role", "(", ")", "if", "self", ".", "last_login_role_key", "else", "self", ".", "role_set", "[", "0", "]", ".", "role", "return", "user_role", ".", "get_permissions", "(", ")" ]
30.444444
0.010638
def _create_merge_filelist(bam_files, base_file, config): """Create list of input files for merge, ensuring all files are valid. """ bam_file_list = "%s.list" % os.path.splitext(base_file)[0] samtools = config_utils.get_program("samtools", config) with open(bam_file_list, "w") as out_handle: for f in sorted(bam_files): do.run('{} quickcheck -v {}'.format(samtools, f), "Ensure integrity of input merge BAM files") out_handle.write("%s\n" % f) return bam_file_list
[ "def", "_create_merge_filelist", "(", "bam_files", ",", "base_file", ",", "config", ")", ":", "bam_file_list", "=", "\"%s.list\"", "%", "os", ".", "path", ".", "splitext", "(", "base_file", ")", "[", "0", "]", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "config", ")", "with", "open", "(", "bam_file_list", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "f", "in", "sorted", "(", "bam_files", ")", ":", "do", ".", "run", "(", "'{} quickcheck -v {}'", ".", "format", "(", "samtools", ",", "f", ")", ",", "\"Ensure integrity of input merge BAM files\"", ")", "out_handle", ".", "write", "(", "\"%s\\n\"", "%", "f", ")", "return", "bam_file_list" ]
48.181818
0.001852
def process_command_line(argv): """ parses the arguments. removes our arguments from the command line """ setup = {} for handler in ACCEPTED_ARG_HANDLERS: setup[handler.arg_name] = handler.default_val setup['file'] = '' setup['qt-support'] = '' i = 0 del argv[0] while i < len(argv): handler = ARGV_REP_TO_HANDLER.get(argv[i]) if handler is not None: handler.handle_argv(argv, i, setup) elif argv[i].startswith('--qt-support'): # The --qt-support is special because we want to keep backward compatibility: # Previously, just passing '--qt-support' meant that we should use the auto-discovery mode # whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where # mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside'. if argv[i] == '--qt-support': setup['qt-support'] = 'auto' elif argv[i].startswith('--qt-support='): qt_support = argv[i][len('--qt-support='):] valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside') if qt_support not in valid_modes: raise ValueError("qt-support mode invalid: " + qt_support) if qt_support == 'none': # On none, actually set an empty string to evaluate to False. setup['qt-support'] = '' else: setup['qt-support'] = qt_support else: raise ValueError("Unexpected definition for qt-support flag: " + argv[i]) del argv[i] elif argv[i] == '--file': # --file is special because it's the last one (so, no handler for it). del argv[i] setup['file'] = argv[i] i = len(argv) # pop out, file is our last argument elif argv[i] == '--DEBUG': from pydevd import set_debug del argv[i] set_debug(setup) else: raise ValueError("Unexpected option: " + argv[i]) return setup
[ "def", "process_command_line", "(", "argv", ")", ":", "setup", "=", "{", "}", "for", "handler", "in", "ACCEPTED_ARG_HANDLERS", ":", "setup", "[", "handler", ".", "arg_name", "]", "=", "handler", ".", "default_val", "setup", "[", "'file'", "]", "=", "''", "setup", "[", "'qt-support'", "]", "=", "''", "i", "=", "0", "del", "argv", "[", "0", "]", "while", "i", "<", "len", "(", "argv", ")", ":", "handler", "=", "ARGV_REP_TO_HANDLER", ".", "get", "(", "argv", "[", "i", "]", ")", "if", "handler", "is", "not", "None", ":", "handler", ".", "handle_argv", "(", "argv", ",", "i", ",", "setup", ")", "elif", "argv", "[", "i", "]", ".", "startswith", "(", "'--qt-support'", ")", ":", "# The --qt-support is special because we want to keep backward compatibility:", "# Previously, just passing '--qt-support' meant that we should use the auto-discovery mode", "# whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where", "# mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside'.", "if", "argv", "[", "i", "]", "==", "'--qt-support'", ":", "setup", "[", "'qt-support'", "]", "=", "'auto'", "elif", "argv", "[", "i", "]", ".", "startswith", "(", "'--qt-support='", ")", ":", "qt_support", "=", "argv", "[", "i", "]", "[", "len", "(", "'--qt-support='", ")", ":", "]", "valid_modes", "=", "(", "'none'", ",", "'auto'", ",", "'pyqt5'", ",", "'pyqt4'", ",", "'pyside'", ")", "if", "qt_support", "not", "in", "valid_modes", ":", "raise", "ValueError", "(", "\"qt-support mode invalid: \"", "+", "qt_support", ")", "if", "qt_support", "==", "'none'", ":", "# On none, actually set an empty string to evaluate to False.", "setup", "[", "'qt-support'", "]", "=", "''", "else", ":", "setup", "[", "'qt-support'", "]", "=", "qt_support", "else", ":", "raise", "ValueError", "(", "\"Unexpected definition for qt-support flag: \"", "+", "argv", "[", "i", "]", ")", "del", "argv", "[", "i", "]", "elif", "argv", "[", "i", "]", "==", "'--file'", ":", "# --file is special because it's the last one (so, no handler for it).", "del", "argv", "[", "i", "]", "setup", "[", "'file'", "]", "=", "argv", "[", "i", "]", "i", "=", "len", "(", "argv", ")", "# pop out, file is our last argument", "elif", "argv", "[", "i", "]", "==", "'--DEBUG'", ":", "from", "pydevd", "import", "set_debug", "del", "argv", "[", "i", "]", "set_debug", "(", "setup", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected option: \"", "+", "argv", "[", "i", "]", ")", "return", "setup" ]
39.283019
0.003749
def force_unicode(raw): '''Try really really hard to get a Unicode copy of a string. First try :class:`BeautifulSoup.UnicodeDammit` to try to force to Unicode; if that fails, assume UTF-8 encoding, and ignore all errors. :param str raw: string to coerce :return: Unicode approximation of `raw` :returntype: :class:`unicode` ''' converted = UnicodeDammit(raw, isHTML=True) if not converted.unicode: converted.unicode = unicode(raw, 'utf8', errors='ignore') encoding_m = encoding_re.match(converted.unicode) if encoding_m: converted.unicode = \ encoding_m.group('start_xml') + \ encoding_m.group('remainder') return converted.unicode
[ "def", "force_unicode", "(", "raw", ")", ":", "converted", "=", "UnicodeDammit", "(", "raw", ",", "isHTML", "=", "True", ")", "if", "not", "converted", ".", "unicode", ":", "converted", ".", "unicode", "=", "unicode", "(", "raw", ",", "'utf8'", ",", "errors", "=", "'ignore'", ")", "encoding_m", "=", "encoding_re", ".", "match", "(", "converted", ".", "unicode", ")", "if", "encoding_m", ":", "converted", ".", "unicode", "=", "encoding_m", ".", "group", "(", "'start_xml'", ")", "+", "encoding_m", ".", "group", "(", "'remainder'", ")", "return", "converted", ".", "unicode" ]
30.695652
0.001374
def _get_objects_by_path(self, paths): """Return a list of all bluez DBus objects from the provided list of paths. """ return map(lambda x: self._bus.get_object('org.bluez', x), paths)
[ "def", "_get_objects_by_path", "(", "self", ",", "paths", ")", ":", "return", "map", "(", "lambda", "x", ":", "self", ".", "_bus", ".", "get_object", "(", "'org.bluez'", ",", "x", ")", ",", "paths", ")" ]
51.25
0.014423
def fixup_scipy_ndimage_result(whatever_it_returned): """Convert a result from scipy.ndimage to a numpy array scipy.ndimage has the annoying habit of returning a single, bare value instead of an array if the indexes passed in are of length 1. For instance: scind.maximum(image, labels, [1]) returns a float but scind.maximum(image, labels, [1,2]) returns a list """ if getattr(whatever_it_returned,"__getitem__",False): return np.array(whatever_it_returned) else: return np.array([whatever_it_returned])
[ "def", "fixup_scipy_ndimage_result", "(", "whatever_it_returned", ")", ":", "if", "getattr", "(", "whatever_it_returned", ",", "\"__getitem__\"", ",", "False", ")", ":", "return", "np", ".", "array", "(", "whatever_it_returned", ")", "else", ":", "return", "np", ".", "array", "(", "[", "whatever_it_returned", "]", ")" ]
39.357143
0.007092
def and_evaluator(conditions, leaf_evaluator): """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if all operands evaluate to True. - False if a single operand evaluates to False. None: if conditions couldn't be evaluated. """ saw_null_result = False for condition in conditions: result = evaluate(condition, leaf_evaluator) if result is False: return False if result is None: saw_null_result = True return None if saw_null_result else True
[ "def", "and_evaluator", "(", "conditions", ",", "leaf_evaluator", ")", ":", "saw_null_result", "=", "False", "for", "condition", "in", "conditions", ":", "result", "=", "evaluate", "(", "condition", ",", "leaf_evaluator", ")", "if", "result", "is", "False", ":", "return", "False", "if", "result", "is", "None", ":", "saw_null_result", "=", "True", "return", "None", "if", "saw_null_result", "else", "True" ]
30.333333
0.010652
def verify(self, msg, sig, key): """ Verify a message signature :param msg: The message :param sig: A signature :param key: A ec.EllipticCurvePublicKey to use for the verification. :raises: BadSignature if the signature can't be verified. :return: True """ if not isinstance(key, ec.EllipticCurvePublicKey): raise TypeError( "The public key must be an instance of " "ec.EllipticCurvePublicKey") self._cross_check(key) num_bits = key.curve.key_size num_bytes = (num_bits + 7) // 8 if len(sig) != 2 * num_bytes: raise ValueError('Invalid signature') try: # cryptography uses ASN.1-encoded signature data; split JWS # signature (r||s) and encode before verification (r, s) = self._split_raw_signature(sig) asn1sig = encode_dss_signature(r, s) key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm())) except InvalidSignature as err: raise BadSignature(err) else: return True
[ "def", "verify", "(", "self", ",", "msg", ",", "sig", ",", "key", ")", ":", "if", "not", "isinstance", "(", "key", ",", "ec", ".", "EllipticCurvePublicKey", ")", ":", "raise", "TypeError", "(", "\"The public key must be an instance of \"", "\"ec.EllipticCurvePublicKey\"", ")", "self", ".", "_cross_check", "(", "key", ")", "num_bits", "=", "key", ".", "curve", ".", "key_size", "num_bytes", "=", "(", "num_bits", "+", "7", ")", "//", "8", "if", "len", "(", "sig", ")", "!=", "2", "*", "num_bytes", ":", "raise", "ValueError", "(", "'Invalid signature'", ")", "try", ":", "# cryptography uses ASN.1-encoded signature data; split JWS", "# signature (r||s) and encode before verification", "(", "r", ",", "s", ")", "=", "self", ".", "_split_raw_signature", "(", "sig", ")", "asn1sig", "=", "encode_dss_signature", "(", "r", ",", "s", ")", "key", ".", "verify", "(", "asn1sig", ",", "msg", ",", "ec", ".", "ECDSA", "(", "self", ".", "hash_algorithm", "(", ")", ")", ")", "except", "InvalidSignature", "as", "err", ":", "raise", "BadSignature", "(", "err", ")", "else", ":", "return", "True" ]
35.870968
0.001751
def start(self, *args, **kwargs):#pylint:disable=unused-argument """ | Launch the consumer. | It can listen forever for messages or just wait for one. :param forever: If set, the consumer listens forever. Default to `True`. :type forever: bool :param timeout: If set, the consumer waits the specified seconds before quitting. :type timeout: None, int :rtype: None :raises socket.timeout: when no message has been received since `timeout`. """ forever = kwargs.get('forever', True) timeout = kwargs.get('timeout', None) if forever: return self.run(timeout=timeout) elif timeout: next((self.consume(timeout=timeout)), None) else: next((self.consume(limit=1, timeout=timeout)), None)
[ "def", "start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pylint:disable=unused-argument", "forever", "=", "kwargs", ".", "get", "(", "'forever'", ",", "True", ")", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "None", ")", "if", "forever", ":", "return", "self", ".", "run", "(", "timeout", "=", "timeout", ")", "elif", "timeout", ":", "next", "(", "(", "self", ".", "consume", "(", "timeout", "=", "timeout", ")", ")", ",", "None", ")", "else", ":", "next", "(", "(", "self", ".", "consume", "(", "limit", "=", "1", ",", "timeout", "=", "timeout", ")", ")", ",", "None", ")" ]
40.9
0.008363
def append_text(self, content): """ Append text nodes into L{Content.data} @param content: The current content being unmarshalled. @type content: L{Content} """ if content.node.hasText(): content.text = content.node.getText()
[ "def", "append_text", "(", "self", ",", "content", ")", ":", "if", "content", ".", "node", ".", "hasText", "(", ")", ":", "content", ".", "text", "=", "content", ".", "node", ".", "getText", "(", ")" ]
34.75
0.007018
def sched(self): """ Yield CPU. This will choose another process from the running list and change current running process. May give the same cpu if only one running process. """ if len(self.procs) > 1: logger.debug("SCHED:") logger.debug(f"\tProcess: {self.procs!r}") logger.debug(f"\tRunning: {self.running!r}") logger.debug(f"\tRWait: {self.rwait!r}") logger.debug(f"\tTWait: {self.twait!r}") logger.debug(f"\tTimers: {self.timers!r}") logger.debug(f"\tCurrent clock: {self.clocks}") logger.debug(f"\tCurrent cpu: {self._current}") if len(self.running) == 0: logger.debug("None running checking if there is some process waiting for a timeout") if all([x is None for x in self.timers]): raise Deadlock() self.clocks = min(x for x in self.timers if x is not None) + 1 self.check_timers() assert len(self.running) != 0, "DEADLOCK!" self._current = self.running[0] return next_index = (self.running.index(self._current) + 1) % len(self.running) next_running_idx = self.running[next_index] if len(self.procs) > 1: logger.debug(f"\tTransfer control from process {self._current} to {next_running_idx}") self._current = next_running_idx
[ "def", "sched", "(", "self", ")", ":", "if", "len", "(", "self", ".", "procs", ")", ">", "1", ":", "logger", ".", "debug", "(", "\"SCHED:\"", ")", "logger", ".", "debug", "(", "f\"\\tProcess: {self.procs!r}\"", ")", "logger", ".", "debug", "(", "f\"\\tRunning: {self.running!r}\"", ")", "logger", ".", "debug", "(", "f\"\\tRWait: {self.rwait!r}\"", ")", "logger", ".", "debug", "(", "f\"\\tTWait: {self.twait!r}\"", ")", "logger", ".", "debug", "(", "f\"\\tTimers: {self.timers!r}\"", ")", "logger", ".", "debug", "(", "f\"\\tCurrent clock: {self.clocks}\"", ")", "logger", ".", "debug", "(", "f\"\\tCurrent cpu: {self._current}\"", ")", "if", "len", "(", "self", ".", "running", ")", "==", "0", ":", "logger", ".", "debug", "(", "\"None running checking if there is some process waiting for a timeout\"", ")", "if", "all", "(", "[", "x", "is", "None", "for", "x", "in", "self", ".", "timers", "]", ")", ":", "raise", "Deadlock", "(", ")", "self", ".", "clocks", "=", "min", "(", "x", "for", "x", "in", "self", ".", "timers", "if", "x", "is", "not", "None", ")", "+", "1", "self", ".", "check_timers", "(", ")", "assert", "len", "(", "self", ".", "running", ")", "!=", "0", ",", "\"DEADLOCK!\"", "self", ".", "_current", "=", "self", ".", "running", "[", "0", "]", "return", "next_index", "=", "(", "self", ".", "running", ".", "index", "(", "self", ".", "_current", ")", "+", "1", ")", "%", "len", "(", "self", ".", "running", ")", "next_running_idx", "=", "self", ".", "running", "[", "next_index", "]", "if", "len", "(", "self", ".", "procs", ")", ">", "1", ":", "logger", ".", "debug", "(", "f\"\\tTransfer control from process {self._current} to {next_running_idx}\"", ")", "self", ".", "_current", "=", "next_running_idx" ]
47
0.003475
def openOrder(self, orderId, contract, order, orderState): """ This wrapper is called to: * feed in open orders at startup; * feed in open orders or order updates from other clients and TWS if clientId=master id; * feed in manual orders and order updates from TWS if clientId=0; * handle openOrders and allOpenOrders responses. """ if order.whatIf: # response to whatIfOrder self._endReq(order.orderId, orderState) else: key = self.orderKey(order.clientId, order.orderId, order.permId) trade = self.trades.get(key) # ignore '?' values in the order d = {k: v for k, v in order.dict().items() if v != '?'} if trade: trade.order.update(**d) else: contract = Contract.create(**contract.dict()) order = Order(**d) orderStatus = OrderStatus(status=orderState.status) trade = Trade(contract, order, orderStatus, [], []) self.trades[key] = trade self._logger.info(f'openOrder: {trade}') results = self._results.get('openOrders') if results is None: self.ib.openOrderEvent.emit(trade) else: # response to reqOpenOrders or reqAllOpenOrders results.append(order)
[ "def", "openOrder", "(", "self", ",", "orderId", ",", "contract", ",", "order", ",", "orderState", ")", ":", "if", "order", ".", "whatIf", ":", "# response to whatIfOrder", "self", ".", "_endReq", "(", "order", ".", "orderId", ",", "orderState", ")", "else", ":", "key", "=", "self", ".", "orderKey", "(", "order", ".", "clientId", ",", "order", ".", "orderId", ",", "order", ".", "permId", ")", "trade", "=", "self", ".", "trades", ".", "get", "(", "key", ")", "# ignore '?' values in the order", "d", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "order", ".", "dict", "(", ")", ".", "items", "(", ")", "if", "v", "!=", "'?'", "}", "if", "trade", ":", "trade", ".", "order", ".", "update", "(", "*", "*", "d", ")", "else", ":", "contract", "=", "Contract", ".", "create", "(", "*", "*", "contract", ".", "dict", "(", ")", ")", "order", "=", "Order", "(", "*", "*", "d", ")", "orderStatus", "=", "OrderStatus", "(", "status", "=", "orderState", ".", "status", ")", "trade", "=", "Trade", "(", "contract", ",", "order", ",", "orderStatus", ",", "[", "]", ",", "[", "]", ")", "self", ".", "trades", "[", "key", "]", "=", "trade", "self", ".", "_logger", ".", "info", "(", "f'openOrder: {trade}'", ")", "results", "=", "self", ".", "_results", ".", "get", "(", "'openOrders'", ")", "if", "results", "is", "None", ":", "self", ".", "ib", ".", "openOrderEvent", ".", "emit", "(", "trade", ")", "else", ":", "# response to reqOpenOrders or reqAllOpenOrders", "results", ".", "append", "(", "order", ")" ]
42.242424
0.001403
def delete_alias(self, alias_name): """Delete the alias.""" for aliases in self.key_to_aliases.values(): if alias_name in aliases: aliases.remove(alias_name)
[ "def", "delete_alias", "(", "self", ",", "alias_name", ")", ":", "for", "aliases", "in", "self", ".", "key_to_aliases", ".", "values", "(", ")", ":", "if", "alias_name", "in", "aliases", ":", "aliases", ".", "remove", "(", "alias_name", ")" ]
39.4
0.00995
def _left_motion(self, event): """Function bound to move event for marker canvas""" iid = self.current_iid if iid is None: return marker = self._markers[iid] if marker["move"] is False: return delta = marker["finish"] - marker["start"] # Limit x to 0 x = max(self._timeline.canvasx(event.x), 0) # Check if the timeline needs to be extended limit = self.get_time_position(self._finish - delta) if self._extend is False: x = min(x, limit) elif x > limit: # self._extend is True self.configure(finish=(self.get_position_time(x) + (marker["finish"] - marker["start"])) * 1.1) # Get the new start value start = self.get_position_time(x) finish = start + (marker["finish"] - marker["start"]) rectangle_id, text_id = marker["rectangle_id"], marker["text_id"] if rectangle_id not in self._timeline.find_all(): return x1, y1, x2, y2 = self._timeline.coords(rectangle_id) # Overlap protection allow_overlap = marker["allow_overlap"] allow_overlap = self._marker_allow_overlap if allow_overlap == "default" else allow_overlap if allow_overlap is False: for marker_dict in self.markers.values(): if marker_dict["allow_overlap"] is True: continue if marker["iid"] != marker_dict["iid"] and marker["category"] == marker_dict["category"]: if marker_dict["start"] < start < marker_dict["finish"]: start = marker_dict["finish"] if start < marker_dict["finish"] else marker_dict["start"] finish = start + (marker["finish"] - marker["start"]) x = self.get_time_position(start) break if marker_dict["start"] < finish < marker_dict["finish"]: finish = marker_dict["finish"] if finish > marker_dict["finish"] else marker_dict["start"] start = finish - (marker_dict["finish"] - marker_dict["start"]) x = self.get_time_position(start) break # Vertical movement if marker["change_category"] is True or \ (marker["change_category"] == "default" and self._marker_change_category): y = max(self._timeline.canvasy(event.y), 0) category = min(self._rows.keys(), key=lambda category: abs(self._rows[category][0] - y)) marker["category"] = category y1, y2 = self._rows[category] # Snapping to ticks if marker["snap_to_ticks"] is True or (marker["snap_to_ticks"] == "default" and self._marker_snap_to_ticks): # Start is prioritized over finish for tick in self._ticks: tick = self.get_time_position(tick) # Start if abs(x - tick) < self._snap_margin: x = tick break # Finish x_finish = x + delta if abs(x_finish - tick) < self._snap_margin: delta = self.get_time_position(marker["finish"] - marker["start"]) x = tick - delta break rectangle_coords = (x, y1, x2 + (x - x1), y2) self._timeline.coords(rectangle_id, *rectangle_coords) if text_id is not None: text_x, text_y = TimeLine.calculate_text_coords(rectangle_coords) self._timeline.coords(text_id, text_x, text_y) if self._after_id is not None: self.after_cancel(self._after_id) args = (iid, (marker["start"], marker["finish"]), (start, finish)) self._after_id = self.after(10, self._after_handler(iid, "move_callback", args)) marker["start"] = start marker["finish"] = finish
[ "def", "_left_motion", "(", "self", ",", "event", ")", ":", "iid", "=", "self", ".", "current_iid", "if", "iid", "is", "None", ":", "return", "marker", "=", "self", ".", "_markers", "[", "iid", "]", "if", "marker", "[", "\"move\"", "]", "is", "False", ":", "return", "delta", "=", "marker", "[", "\"finish\"", "]", "-", "marker", "[", "\"start\"", "]", "# Limit x to 0", "x", "=", "max", "(", "self", ".", "_timeline", ".", "canvasx", "(", "event", ".", "x", ")", ",", "0", ")", "# Check if the timeline needs to be extended", "limit", "=", "self", ".", "get_time_position", "(", "self", ".", "_finish", "-", "delta", ")", "if", "self", ".", "_extend", "is", "False", ":", "x", "=", "min", "(", "x", ",", "limit", ")", "elif", "x", ">", "limit", ":", "# self._extend is True", "self", ".", "configure", "(", "finish", "=", "(", "self", ".", "get_position_time", "(", "x", ")", "+", "(", "marker", "[", "\"finish\"", "]", "-", "marker", "[", "\"start\"", "]", ")", ")", "*", "1.1", ")", "# Get the new start value", "start", "=", "self", ".", "get_position_time", "(", "x", ")", "finish", "=", "start", "+", "(", "marker", "[", "\"finish\"", "]", "-", "marker", "[", "\"start\"", "]", ")", "rectangle_id", ",", "text_id", "=", "marker", "[", "\"rectangle_id\"", "]", ",", "marker", "[", "\"text_id\"", "]", "if", "rectangle_id", "not", "in", "self", ".", "_timeline", ".", "find_all", "(", ")", ":", "return", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "self", ".", "_timeline", ".", "coords", "(", "rectangle_id", ")", "# Overlap protection", "allow_overlap", "=", "marker", "[", "\"allow_overlap\"", "]", "allow_overlap", "=", "self", ".", "_marker_allow_overlap", "if", "allow_overlap", "==", "\"default\"", "else", "allow_overlap", "if", "allow_overlap", "is", "False", ":", "for", "marker_dict", "in", "self", ".", "markers", ".", "values", "(", ")", ":", "if", "marker_dict", "[", "\"allow_overlap\"", "]", "is", "True", ":", "continue", "if", "marker", "[", "\"iid\"", "]", "!=", "marker_dict", "[", "\"iid\"", "]", "and", "marker", "[", "\"category\"", "]", "==", "marker_dict", "[", "\"category\"", "]", ":", "if", "marker_dict", "[", "\"start\"", "]", "<", "start", "<", "marker_dict", "[", "\"finish\"", "]", ":", "start", "=", "marker_dict", "[", "\"finish\"", "]", "if", "start", "<", "marker_dict", "[", "\"finish\"", "]", "else", "marker_dict", "[", "\"start\"", "]", "finish", "=", "start", "+", "(", "marker", "[", "\"finish\"", "]", "-", "marker", "[", "\"start\"", "]", ")", "x", "=", "self", ".", "get_time_position", "(", "start", ")", "break", "if", "marker_dict", "[", "\"start\"", "]", "<", "finish", "<", "marker_dict", "[", "\"finish\"", "]", ":", "finish", "=", "marker_dict", "[", "\"finish\"", "]", "if", "finish", ">", "marker_dict", "[", "\"finish\"", "]", "else", "marker_dict", "[", "\"start\"", "]", "start", "=", "finish", "-", "(", "marker_dict", "[", "\"finish\"", "]", "-", "marker_dict", "[", "\"start\"", "]", ")", "x", "=", "self", ".", "get_time_position", "(", "start", ")", "break", "# Vertical movement", "if", "marker", "[", "\"change_category\"", "]", "is", "True", "or", "(", "marker", "[", "\"change_category\"", "]", "==", "\"default\"", "and", "self", ".", "_marker_change_category", ")", ":", "y", "=", "max", "(", "self", ".", "_timeline", ".", "canvasy", "(", "event", ".", "y", ")", ",", "0", ")", "category", "=", "min", "(", "self", ".", "_rows", ".", "keys", "(", ")", ",", "key", "=", "lambda", "category", ":", "abs", "(", "self", ".", "_rows", "[", "category", "]", "[", "0", "]", "-", "y", ")", ")", "marker", "[", "\"category\"", "]", "=", "category", "y1", ",", "y2", "=", "self", ".", "_rows", "[", "category", "]", "# Snapping to ticks", "if", "marker", "[", "\"snap_to_ticks\"", "]", "is", "True", "or", "(", "marker", "[", "\"snap_to_ticks\"", "]", "==", "\"default\"", "and", "self", ".", "_marker_snap_to_ticks", ")", ":", "# Start is prioritized over finish", "for", "tick", "in", "self", ".", "_ticks", ":", "tick", "=", "self", ".", "get_time_position", "(", "tick", ")", "# Start", "if", "abs", "(", "x", "-", "tick", ")", "<", "self", ".", "_snap_margin", ":", "x", "=", "tick", "break", "# Finish", "x_finish", "=", "x", "+", "delta", "if", "abs", "(", "x_finish", "-", "tick", ")", "<", "self", ".", "_snap_margin", ":", "delta", "=", "self", ".", "get_time_position", "(", "marker", "[", "\"finish\"", "]", "-", "marker", "[", "\"start\"", "]", ")", "x", "=", "tick", "-", "delta", "break", "rectangle_coords", "=", "(", "x", ",", "y1", ",", "x2", "+", "(", "x", "-", "x1", ")", ",", "y2", ")", "self", ".", "_timeline", ".", "coords", "(", "rectangle_id", ",", "*", "rectangle_coords", ")", "if", "text_id", "is", "not", "None", ":", "text_x", ",", "text_y", "=", "TimeLine", ".", "calculate_text_coords", "(", "rectangle_coords", ")", "self", ".", "_timeline", ".", "coords", "(", "text_id", ",", "text_x", ",", "text_y", ")", "if", "self", ".", "_after_id", "is", "not", "None", ":", "self", ".", "after_cancel", "(", "self", ".", "_after_id", ")", "args", "=", "(", "iid", ",", "(", "marker", "[", "\"start\"", "]", ",", "marker", "[", "\"finish\"", "]", ")", ",", "(", "start", ",", "finish", ")", ")", "self", ".", "_after_id", "=", "self", ".", "after", "(", "10", ",", "self", ".", "_after_handler", "(", "iid", ",", "\"move_callback\"", ",", "args", ")", ")", "marker", "[", "\"start\"", "]", "=", "start", "marker", "[", "\"finish\"", "]", "=", "finish" ]
51.64
0.003294
def federated_query(self, environment_id, filter=None, query=None, natural_language_query=None, passages=None, aggregation=None, count=None, return_fields=None, offset=None, sort=None, highlight=None, passages_fields=None, passages_count=None, passages_characters=None, deduplicate=None, deduplicate_field=None, collection_ids=None, similar=None, similar_document_ids=None, similar_fields=None, bias=None, logging_opt_out=None, **kwargs): """ Long environment queries. Complex queries might be too long for a standard method query. By using this method, you can construct longer queries. However, these queries may take longer to complete than the standard method. For details, see the [Discovery service documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). :param str environment_id: The ID of the environment. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param str natural_language_query: A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use **natural_language_query** and **query** at the same time. :param bool passages: A passages query that returns the most relevant passages from the results. :param str aggregation: An aggregation search that returns an exact answer by combining query search with filters. Useful for applications to build lists, tables, and time series. For a full list of possible aggregations, see the Query reference. :param int count: Number of results to return. :param str return_fields: A comma-separated list of the portion of the document hierarchy to return. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. This parameter cannot be used in the same query as the **bias** parameter. :param bool highlight: When true, a highlight field is returned for each result which contains the fields which match the query with `<em></em>` tags around the matching query terms. :param str passages_fields: A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. :param bool deduplicate: When `true`, and used with a Watson Discovery News collection, duplicate results (based on the contents of the **title** field) are removed. Duplicate comparison is limited to the current query only; **offset** is not considered. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not considered. This parameter is currently Beta functionality. :param str collection_ids: A comma-separated list of collection IDs to be queried against. Required when querying multiple collections, invalid when performing a single collection query. :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the **similar.document_ids** parameter. :param str similar_document_ids: A comma-separated list of document IDs to find similar documents. **Tip:** Include the **natural_language_query** parameter to expand the scope of the document similarity search with the natural language query. Other query parameters, such as **filter** and **query**, are subsequently applied and reduce the scope. :param str similar_fields: A comma-separated list of field names that are used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :param str bias: Field which the returned results will be biased against. The specified field must be either a **date** or **number** format. When a **date** type field is specified returned results are biased towards field values closer to the current date. When a **number** type field is specified, returned results are biased towards higher field values. This parameter cannot be used in the same query as the **sort** parameter. :param bool logging_opt_out: If `true`, queries are not stored in the Discovery **Logs** endpoint. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if environment_id is None: raise ValueError('environment_id must be provided') headers = {'X-Watson-Logging-Opt-Out': logging_opt_out} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query') headers.update(sdk_headers) params = {'version': self.version} data = { 'filter': filter, 'query': query, 'natural_language_query': natural_language_query, 'passages': passages, 'aggregation': aggregation, 'count': count, 'return': return_fields, 'offset': offset, 'sort': sort, 'highlight': highlight, 'passages.fields': passages_fields, 'passages.count': passages_count, 'passages.characters': passages_characters, 'deduplicate': deduplicate, 'deduplicate.field': deduplicate_field, 'collection_ids': collection_ids, 'similar': similar, 'similar.document_ids': similar_document_ids, 'similar.fields': similar_fields, 'bias': bias } url = '/v1/environments/{0}/query'.format( *self._encode_path_vars(environment_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
[ "def", "federated_query", "(", "self", ",", "environment_id", ",", "filter", "=", "None", ",", "query", "=", "None", ",", "natural_language_query", "=", "None", ",", "passages", "=", "None", ",", "aggregation", "=", "None", ",", "count", "=", "None", ",", "return_fields", "=", "None", ",", "offset", "=", "None", ",", "sort", "=", "None", ",", "highlight", "=", "None", ",", "passages_fields", "=", "None", ",", "passages_count", "=", "None", ",", "passages_characters", "=", "None", ",", "deduplicate", "=", "None", ",", "deduplicate_field", "=", "None", ",", "collection_ids", "=", "None", ",", "similar", "=", "None", ",", "similar_document_ids", "=", "None", ",", "similar_fields", "=", "None", ",", "bias", "=", "None", ",", "logging_opt_out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "environment_id", "is", "None", ":", "raise", "ValueError", "(", "'environment_id must be provided'", ")", "headers", "=", "{", "'X-Watson-Logging-Opt-Out'", ":", "logging_opt_out", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'discovery'", ",", "'V1'", ",", "'federated_query'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "data", "=", "{", "'filter'", ":", "filter", ",", "'query'", ":", "query", ",", "'natural_language_query'", ":", "natural_language_query", ",", "'passages'", ":", "passages", ",", "'aggregation'", ":", "aggregation", ",", "'count'", ":", "count", ",", "'return'", ":", "return_fields", ",", "'offset'", ":", "offset", ",", "'sort'", ":", "sort", ",", "'highlight'", ":", "highlight", ",", "'passages.fields'", ":", "passages_fields", ",", "'passages.count'", ":", "passages_count", ",", "'passages.characters'", ":", "passages_characters", ",", "'deduplicate'", ":", "deduplicate", ",", "'deduplicate.field'", ":", "deduplicate_field", ",", "'collection_ids'", ":", "collection_ids", ",", "'similar'", ":", "similar", ",", "'similar.document_ids'", ":", "similar_document_ids", ",", "'similar.fields'", ":", "similar_fields", ",", "'bias'", ":", "bias", "}", "url", "=", "'/v1/environments/{0}/query'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "environment_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "json", "=", "data", ",", "accept_json", "=", "True", ")", "return", "response" ]
53.4
0.009315
def rec_edit(self, zone, record_type, record_id, name, content, ttl=1, service_mode=None, priority=None, service=None, service_name=None, protocol=None, weight=None, port=None, target=None): """ Edit a DNS record for the given zone. :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param record_id: DNS Record ID. Available by using the rec_load_all call. :type record_id: int :param name: Name of the DNS record :type name: str :param content: The content of the DNS record, will depend on the the type of record being added :type content: str :param ttl: TTL of record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param service_mode: [applies to A/AAAA/CNAME] Status of CloudFlare Proxy, 1 = orange cloud, 0 = grey cloud. :type service_mode: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict """ params = { 'a': 'rec_edit', 'z': zone, 'type': record_type, 'id': record_id, 'name': name, 'content': content, 'ttl': ttl } if service_mode is not None: params['service_mode'] = service_mode if priority is not None: params['prio'] = priority if service is not None: params['service'] = service if service_name is not None: params['srvname'] = service_name if protocol is not None: params['protocol'] = protocol if weight is not None: params['weight'] = weight if port is not None: params['port'] = port if target is not None: params['target'] = target return self._request(params)
[ "def", "rec_edit", "(", "self", ",", "zone", ",", "record_type", ",", "record_id", ",", "name", ",", "content", ",", "ttl", "=", "1", ",", "service_mode", "=", "None", ",", "priority", "=", "None", ",", "service", "=", "None", ",", "service_name", "=", "None", ",", "protocol", "=", "None", ",", "weight", "=", "None", ",", "port", "=", "None", ",", "target", "=", "None", ")", ":", "params", "=", "{", "'a'", ":", "'rec_edit'", ",", "'z'", ":", "zone", ",", "'type'", ":", "record_type", ",", "'id'", ":", "record_id", ",", "'name'", ":", "name", ",", "'content'", ":", "content", ",", "'ttl'", ":", "ttl", "}", "if", "service_mode", "is", "not", "None", ":", "params", "[", "'service_mode'", "]", "=", "service_mode", "if", "priority", "is", "not", "None", ":", "params", "[", "'prio'", "]", "=", "priority", "if", "service", "is", "not", "None", ":", "params", "[", "'service'", "]", "=", "service", "if", "service_name", "is", "not", "None", ":", "params", "[", "'srvname'", "]", "=", "service_name", "if", "protocol", "is", "not", "None", ":", "params", "[", "'protocol'", "]", "=", "protocol", "if", "weight", "is", "not", "None", ":", "params", "[", "'weight'", "]", "=", "weight", "if", "port", "is", "not", "None", ":", "params", "[", "'port'", "]", "=", "port", "if", "target", "is", "not", "None", ":", "params", "[", "'target'", "]", "=", "target", "return", "self", ".", "_request", "(", "params", ")" ]
40.080645
0.003928
def oauth2_auth_url(redirect_uri=None, client_id=None, base_url=OH_BASE_URL): """ Returns an OAuth2 authorization URL for a project, given Client ID. This function constructs an authorization URL for a user to follow. The user will be redirected to Authorize Open Humans data for our external application. An OAuth2 project on Open Humans is required for this to properly work. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param redirect_uri: This field is set to `None` by default. However, if provided, it appends it in the URL returned. :param client_id: This field is also set to `None` by default however, is a mandatory field for the final URL to work. It uniquely identifies a given OAuth2 project. :param base_url: It is this URL `https://www.openhumans.org`. """ if not client_id: client_id = os.getenv('OHAPI_CLIENT_ID') if not client_id: raise SettingsError( "Client ID not provided! Provide client_id as a parameter, " "or set OHAPI_CLIENT_ID in your environment.") params = OrderedDict([ ('client_id', client_id), ('response_type', 'code'), ]) if redirect_uri: params['redirect_uri'] = redirect_uri auth_url = urlparse.urljoin( base_url, '/direct-sharing/projects/oauth2/authorize/?{}'.format( urlparse.urlencode(params))) return auth_url
[ "def", "oauth2_auth_url", "(", "redirect_uri", "=", "None", ",", "client_id", "=", "None", ",", "base_url", "=", "OH_BASE_URL", ")", ":", "if", "not", "client_id", ":", "client_id", "=", "os", ".", "getenv", "(", "'OHAPI_CLIENT_ID'", ")", "if", "not", "client_id", ":", "raise", "SettingsError", "(", "\"Client ID not provided! Provide client_id as a parameter, \"", "\"or set OHAPI_CLIENT_ID in your environment.\"", ")", "params", "=", "OrderedDict", "(", "[", "(", "'client_id'", ",", "client_id", ")", ",", "(", "'response_type'", ",", "'code'", ")", ",", "]", ")", "if", "redirect_uri", ":", "params", "[", "'redirect_uri'", "]", "=", "redirect_uri", "auth_url", "=", "urlparse", ".", "urljoin", "(", "base_url", ",", "'/direct-sharing/projects/oauth2/authorize/?{}'", ".", "format", "(", "urlparse", ".", "urlencode", "(", "params", ")", ")", ")", "return", "auth_url" ]
43.764706
0.000657
def setEditorData(self, editor, index): """Sets the current data for the editor. The data displayed has the same value as `index.data(Qt.EditRole)` (the translated name of the datatype). Therefor a lookup for all items of the combobox is made and the matching item is set as the currently displayed item. Signals emitted by the editor are blocked during exection of this method. Args: editor (QtGui.QComboBox): The current editor for the item. Should be a `QtGui.QComboBox` as defined in `createEditor`. index (QtCore.QModelIndex): The index of the current item. """ editor.blockSignals(True) data = index.data() dataIndex = editor.findData(data) # dataIndex = editor.findData(data, role=Qt.EditRole) editor.setCurrentIndex(dataIndex) editor.blockSignals(False)
[ "def", "setEditorData", "(", "self", ",", "editor", ",", "index", ")", ":", "editor", ".", "blockSignals", "(", "True", ")", "data", "=", "index", ".", "data", "(", ")", "dataIndex", "=", "editor", ".", "findData", "(", "data", ")", "# dataIndex = editor.findData(data, role=Qt.EditRole)", "editor", ".", "setCurrentIndex", "(", "dataIndex", ")", "editor", ".", "blockSignals", "(", "False", ")" ]
40.727273
0.004362
def hostapi_info(index=None): """Return a generator with information about each host API. If index is given, only one dictionary for the given host API is returned. """ if index is None: return (hostapi_info(i) for i in range(_pa.Pa_GetHostApiCount())) else: info = _pa.Pa_GetHostApiInfo(index) if not info: raise RuntimeError("Invalid host API") assert info.structVersion == 1 return {'name': ffi.string(info.name).decode(errors='ignore'), 'default_input_device': info.defaultInputDevice, 'default_output_device': info.defaultOutputDevice}
[ "def", "hostapi_info", "(", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "return", "(", "hostapi_info", "(", "i", ")", "for", "i", "in", "range", "(", "_pa", ".", "Pa_GetHostApiCount", "(", ")", ")", ")", "else", ":", "info", "=", "_pa", ".", "Pa_GetHostApiInfo", "(", "index", ")", "if", "not", "info", ":", "raise", "RuntimeError", "(", "\"Invalid host API\"", ")", "assert", "info", ".", "structVersion", "==", "1", "return", "{", "'name'", ":", "ffi", ".", "string", "(", "info", ".", "name", ")", ".", "decode", "(", "errors", "=", "'ignore'", ")", ",", "'default_input_device'", ":", "info", ".", "defaultInputDevice", ",", "'default_output_device'", ":", "info", ".", "defaultOutputDevice", "}" ]
37.294118
0.001538
def _get_name_info(name_index, name_list): """Helper to get optional details about named references Returns the dereferenced name as both value and repr if the name list is defined. Otherwise returns the name index and its repr(). """ argval = name_index if (name_list is not None # PyPY seems to "optimize" out constant names, # so we need for that: and name_index < len(name_list)): argval = name_list[name_index] argrepr = argval else: argrepr = repr(argval) return argval, argrepr
[ "def", "_get_name_info", "(", "name_index", ",", "name_list", ")", ":", "argval", "=", "name_index", "if", "(", "name_list", "is", "not", "None", "# PyPY seems to \"optimize\" out constant names,", "# so we need for that:", "and", "name_index", "<", "len", "(", "name_list", ")", ")", ":", "argval", "=", "name_list", "[", "name_index", "]", "argrepr", "=", "argval", "else", ":", "argrepr", "=", "repr", "(", "argval", ")", "return", "argval", ",", "argrepr" ]
33.058824
0.00346
def reshape(self, data_shapes, label_shapes): """Reshape executors. Parameters ---------- data_shapes : list label_shapes : list """ if data_shapes == self.data_shapes and label_shapes == self.label_shapes: return if self._default_execs is None: self._default_execs = [i for i in self.execs] self.bind_exec(data_shapes, label_shapes, reshape=True)
[ "def", "reshape", "(", "self", ",", "data_shapes", ",", "label_shapes", ")", ":", "if", "data_shapes", "==", "self", ".", "data_shapes", "and", "label_shapes", "==", "self", ".", "label_shapes", ":", "return", "if", "self", ".", "_default_execs", "is", "None", ":", "self", ".", "_default_execs", "=", "[", "i", "for", "i", "in", "self", ".", "execs", "]", "self", ".", "bind_exec", "(", "data_shapes", ",", "label_shapes", ",", "reshape", "=", "True", ")" ]
33.230769
0.006757
def import_string(dotted_path): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError: msg = "%s doesn't look like a module path" % dotted_path six.reraise(ImportError, ImportError(msg), sys.exc_info()[2]) module = import_module(module_path) try: return getattr(module, class_name) except AttributeError: msg = 'Module "%s" does not define a "%s" attribute/class' % ( dotted_path, class_name ) six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
[ "def", "import_string", "(", "dotted_path", ")", ":", "try", ":", "module_path", ",", "class_name", "=", "dotted_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "msg", "=", "\"%s doesn't look like a module path\"", "%", "dotted_path", "six", ".", "reraise", "(", "ImportError", ",", "ImportError", "(", "msg", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "module", "=", "import_module", "(", "module_path", ")", "try", ":", "return", "getattr", "(", "module", ",", "class_name", ")", "except", "AttributeError", ":", "msg", "=", "'Module \"%s\" does not define a \"%s\" attribute/class'", "%", "(", "dotted_path", ",", "class_name", ")", "six", ".", "reraise", "(", "ImportError", ",", "ImportError", "(", "msg", ")", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")" ]
35.6
0.001368
def current_frame(self): """ Compute the number of the current frame (0-indexed) """ if not self._pause_level: return ( int((self._clock() + self._offset) * self.frames_per_second) % len(self._frames) ) else: return self._paused_frame
[ "def", "current_frame", "(", "self", ")", ":", "if", "not", "self", ".", "_pause_level", ":", "return", "(", "int", "(", "(", "self", ".", "_clock", "(", ")", "+", "self", ".", "_offset", ")", "*", "self", ".", "frames_per_second", ")", "%", "len", "(", "self", ".", "_frames", ")", ")", "else", ":", "return", "self", ".", "_paused_frame" ]
30.181818
0.005848
def to_color(self): """Convert the grayscale image to a ColorImage. Returns ------- :obj:`ColorImage` A color image equivalent to the grayscale one. """ color_data = np.repeat(self.data[:,:,np.newaxis], 3, axis=2) return ColorImage(color_data, self._frame)
[ "def", "to_color", "(", "self", ")", ":", "color_data", "=", "np", ".", "repeat", "(", "self", ".", "data", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", ",", "3", ",", "axis", "=", "2", ")", "return", "ColorImage", "(", "color_data", ",", "self", ".", "_frame", ")" ]
31.6
0.012308
def get_latex(ink_filename): """Get the LaTeX string from a file by the *.ink filename.""" tex_file = os.path.splitext(ink_filename)[0] + ".tex" with open(tex_file) as f: tex_content = f.read().strip() pattern = re.compile(r"\\begin\{displaymath\}(.*?)\\end\{displaymath\}", re.DOTALL) matches = pattern.findall(tex_content) if len(matches) == 0: pattern = re.compile(r"$$(.*?)$$", re.DOTALL) matches = pattern.findall(tex_content) if len(matches) != 1: raise Exception("%s: Found not one match, but %i: %s" % (ink_filename, len(matches), matches)) formula_in_latex = matches[0].strip() formula_in_latex = remove_matching_braces(formula_in_latex) # repl = [] # for letter in string.letters: # repl.append(('\mbox{%s}' % letter, letter)) # for search, replace in repl: # formula_in_latex = formula_in_latex.replace(search, replace) return formula_in_latex
[ "def", "get_latex", "(", "ink_filename", ")", ":", "tex_file", "=", "os", ".", "path", ".", "splitext", "(", "ink_filename", ")", "[", "0", "]", "+", "\".tex\"", "with", "open", "(", "tex_file", ")", "as", "f", ":", "tex_content", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "pattern", "=", "re", ".", "compile", "(", "r\"\\\\begin\\{displaymath\\}(.*?)\\\\end\\{displaymath\\}\"", ",", "re", ".", "DOTALL", ")", "matches", "=", "pattern", ".", "findall", "(", "tex_content", ")", "if", "len", "(", "matches", ")", "==", "0", ":", "pattern", "=", "re", ".", "compile", "(", "r\"$$(.*?)$$\"", ",", "re", ".", "DOTALL", ")", "matches", "=", "pattern", ".", "findall", "(", "tex_content", ")", "if", "len", "(", "matches", ")", "!=", "1", ":", "raise", "Exception", "(", "\"%s: Found not one match, but %i: %s\"", "%", "(", "ink_filename", ",", "len", "(", "matches", ")", ",", "matches", ")", ")", "formula_in_latex", "=", "matches", "[", "0", "]", ".", "strip", "(", ")", "formula_in_latex", "=", "remove_matching_braces", "(", "formula_in_latex", ")", "# repl = []", "# for letter in string.letters:", "# repl.append(('\\mbox{%s}' % letter, letter))", "# for search, replace in repl:", "# formula_in_latex = formula_in_latex.replace(search, replace)", "return", "formula_in_latex" ]
38.846154
0.000966
def reboot(name, call=None): ''' reboot a machine by name :param name: name given to the machine :param call: call value in this case is 'action' :return: true if successful CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name ''' node = get_node(name) ret = take_action(name=name, call=call, method='POST', command='my/machines/{0}'.format(node['id']), location=node['location'], data={'action': 'reboot'}) return ret[0] in VALID_RESPONSE_CODES
[ "def", "reboot", "(", "name", ",", "call", "=", "None", ")", ":", "node", "=", "get_node", "(", "name", ")", "ret", "=", "take_action", "(", "name", "=", "name", ",", "call", "=", "call", ",", "method", "=", "'POST'", ",", "command", "=", "'my/machines/{0}'", ".", "format", "(", "node", "[", "'id'", "]", ")", ",", "location", "=", "node", "[", "'location'", "]", ",", "data", "=", "{", "'action'", ":", "'reboot'", "}", ")", "return", "ret", "[", "0", "]", "in", "VALID_RESPONSE_CODES" ]
29.833333
0.001805
def call_actions_parallel_future(self, service_name, actions, **kwargs): """ This method is identical in signature and behavior to `call_actions_parallel`, except that it sends the requests and then immediately returns a `FutureResponse` instead of blocking waiting on responses and returning a generator. Just call `result(timeout=None)` on the future response to block for an available response (which will be a generator). Some of the possible exceptions may be raised when this method is called; others may be raised when the future is used. If argument `raise_job_errors` is supplied and is `False`, some items in the result list might be lists of job errors instead of individual `ActionResponse`s. Be sure to check for that if used in this manner. If argument `catch_transport_errors` is supplied and is `True`, some items in the result list might be instances of `Exception` instead of individual `ActionResponse`s. Be sure to check for that if used in this manner. :return: A generator of action responses that blocks waiting on responses once you begin iteration :rtype: Client.FutureResponse """ job_responses = self.call_jobs_parallel_future( jobs=({'service_name': service_name, 'actions': [action]} for action in actions), **kwargs ) def parse_results(results): for job in results: if isinstance(job, Exception): yield job elif job.errors: yield job.errors else: yield job.actions[0] return self.FutureResponse(lambda _timeout: (x for x in parse_results(job_responses.result(_timeout))))
[ "def", "call_actions_parallel_future", "(", "self", ",", "service_name", ",", "actions", ",", "*", "*", "kwargs", ")", ":", "job_responses", "=", "self", ".", "call_jobs_parallel_future", "(", "jobs", "=", "(", "{", "'service_name'", ":", "service_name", ",", "'actions'", ":", "[", "action", "]", "}", "for", "action", "in", "actions", ")", ",", "*", "*", "kwargs", ")", "def", "parse_results", "(", "results", ")", ":", "for", "job", "in", "results", ":", "if", "isinstance", "(", "job", ",", "Exception", ")", ":", "yield", "job", "elif", "job", ".", "errors", ":", "yield", "job", ".", "errors", "else", ":", "yield", "job", ".", "actions", "[", "0", "]", "return", "self", ".", "FutureResponse", "(", "lambda", "_timeout", ":", "(", "x", "for", "x", "in", "parse_results", "(", "job_responses", ".", "result", "(", "_timeout", ")", ")", ")", ")" ]
54.84375
0.007279
def norm(x, encoding="latin1"): "Convertir acentos codificados en ISO 8859-1 u otro, a ASCII regular" if not isinstance(x, basestring): x = unicode(x) elif isinstance(x, str): x = x.decode(encoding, 'ignore') return unicodedata.normalize('NFKD', x).encode('ASCII', 'ignore')
[ "def", "norm", "(", "x", ",", "encoding", "=", "\"latin1\"", ")", ":", "if", "not", "isinstance", "(", "x", ",", "basestring", ")", ":", "x", "=", "unicode", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "x", ".", "decode", "(", "encoding", ",", "'ignore'", ")", "return", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "x", ")", ".", "encode", "(", "'ASCII'", ",", "'ignore'", ")" ]
42.857143
0.003268
def isa_from_graph(graph: nx.Graph, oneq_type='Xhalves', twoq_type='CZ') -> ISA: """ Generate an ISA object from a NetworkX graph. :param graph: The graph :param oneq_type: The type of 1-qubit gate. Currently 'Xhalves' :param twoq_type: The type of 2-qubit gate. One of 'CZ' or 'CPHASE'. """ all_qubits = list(range(max(graph.nodes) + 1)) qubits = [Qubit(i, type=oneq_type, dead=i not in graph.nodes) for i in all_qubits] edges = [Edge(sorted((a, b)), type=twoq_type, dead=False) for a, b in graph.edges] return ISA(qubits, edges)
[ "def", "isa_from_graph", "(", "graph", ":", "nx", ".", "Graph", ",", "oneq_type", "=", "'Xhalves'", ",", "twoq_type", "=", "'CZ'", ")", "->", "ISA", ":", "all_qubits", "=", "list", "(", "range", "(", "max", "(", "graph", ".", "nodes", ")", "+", "1", ")", ")", "qubits", "=", "[", "Qubit", "(", "i", ",", "type", "=", "oneq_type", ",", "dead", "=", "i", "not", "in", "graph", ".", "nodes", ")", "for", "i", "in", "all_qubits", "]", "edges", "=", "[", "Edge", "(", "sorted", "(", "(", "a", ",", "b", ")", ")", ",", "type", "=", "twoq_type", ",", "dead", "=", "False", ")", "for", "a", ",", "b", "in", "graph", ".", "edges", "]", "return", "ISA", "(", "qubits", ",", "edges", ")" ]
46.666667
0.007005
def serialize_model(model): """Serialize the HTK model into a file. :param model: Model to be serialized """ result = '' # First serialize the macros for macro in model['macros']: if macro.get('options', None): result += '~o ' for option in macro['options']['definition']: result += _serialize_option(option) elif macro.get('transition', None): result += '~t "{}"\n'.format(macro['transition']['name']) result += _serialize_transp(macro['transition']['definition']) elif macro.get('variance', None): result += '~v "{}"\n'.format(macro['variance']['name']) result += _serialize_variance(macro['variance']['definition']) elif macro.get('state', None): result += '~s "{}"\n'.format(macro['state']['name']) result += _serialize_stateinfo(macro['state']['definition']) elif macro.get('mean', None): result += '~u "{}"\n'.format(macro['mean']['name']) result += _serialize_mean(macro['mean']['definition']) elif macro.get('duration', None): result += '~d "{}"\n'.format(macro['duration']['name']) result += _serialize_duration(macro['duration']['definition']) else: raise NotImplementedError('Cannot serialize {}'.format(macro)) for hmm in model['hmms']: if hmm.get('name', None) is not None: result += '~h "{}"\n'.format(hmm['name']) result += _serialize_hmm(hmm['definition']) return result
[ "def", "serialize_model", "(", "model", ")", ":", "result", "=", "''", "# First serialize the macros", "for", "macro", "in", "model", "[", "'macros'", "]", ":", "if", "macro", ".", "get", "(", "'options'", ",", "None", ")", ":", "result", "+=", "'~o '", "for", "option", "in", "macro", "[", "'options'", "]", "[", "'definition'", "]", ":", "result", "+=", "_serialize_option", "(", "option", ")", "elif", "macro", ".", "get", "(", "'transition'", ",", "None", ")", ":", "result", "+=", "'~t \"{}\"\\n'", ".", "format", "(", "macro", "[", "'transition'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_transp", "(", "macro", "[", "'transition'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'variance'", ",", "None", ")", ":", "result", "+=", "'~v \"{}\"\\n'", ".", "format", "(", "macro", "[", "'variance'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_variance", "(", "macro", "[", "'variance'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'state'", ",", "None", ")", ":", "result", "+=", "'~s \"{}\"\\n'", ".", "format", "(", "macro", "[", "'state'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_stateinfo", "(", "macro", "[", "'state'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'mean'", ",", "None", ")", ":", "result", "+=", "'~u \"{}\"\\n'", ".", "format", "(", "macro", "[", "'mean'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_mean", "(", "macro", "[", "'mean'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'duration'", ",", "None", ")", ":", "result", "+=", "'~d \"{}\"\\n'", ".", "format", "(", "macro", "[", "'duration'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_duration", "(", "macro", "[", "'duration'", "]", "[", "'definition'", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "'Cannot serialize {}'", ".", "format", "(", "macro", ")", ")", "for", "hmm", "in", "model", "[", "'hmms'", "]", ":", "if", "hmm", ".", "get", "(", "'name'", ",", "None", ")", "is", "not", "None", ":", "result", "+=", "'~h \"{}\"\\n'", ".", "format", "(", "hmm", "[", "'name'", "]", ")", "result", "+=", "_serialize_hmm", "(", "hmm", "[", "'definition'", "]", ")", "return", "result" ]
34.266667
0.000631
def transliterate(mode, string, ignore = '', reverse = False ): # @todo: arabtex and iso8859-6 need individual handling because in some cases using one-two mapping """ encode & decode different romanization systems :param mode: :param string: :param ignore: :param reverse: :return: """ if mode in available_transliterate_systems(): MAPPING = ROMANIZATION_SYSTEMS_MAPPINGS[mode] else: print(mode+" not supported! \n") MAPPING = {} if reverse: mapping = {} for k, v in MAPPING.items(): # reverse the mapping buckwalter <-> unicode mapping[v] = k else: mapping = MAPPING result = "" for char in string: if char in mapping.keys() and char not in ignore: result += mapping[char] else: result += char return result
[ "def", "transliterate", "(", "mode", ",", "string", ",", "ignore", "=", "''", ",", "reverse", "=", "False", ")", ":", "# @todo: arabtex and iso8859-6 need individual handling because in some cases using one-two mapping", "if", "mode", "in", "available_transliterate_systems", "(", ")", ":", "MAPPING", "=", "ROMANIZATION_SYSTEMS_MAPPINGS", "[", "mode", "]", "else", ":", "print", "(", "mode", "+", "\" not supported! \\n\"", ")", "MAPPING", "=", "{", "}", "if", "reverse", ":", "mapping", "=", "{", "}", "for", "k", ",", "v", "in", "MAPPING", ".", "items", "(", ")", ":", "# reverse the mapping buckwalter <-> unicode", "mapping", "[", "v", "]", "=", "k", "else", ":", "mapping", "=", "MAPPING", "result", "=", "\"\"", "for", "char", "in", "string", ":", "if", "char", "in", "mapping", ".", "keys", "(", ")", "and", "char", "not", "in", "ignore", ":", "result", "+=", "mapping", "[", "char", "]", "else", ":", "result", "+=", "char", "return", "result" ]
26.84375
0.007865
def make(self): """ Make the lock file. """ try: # Create the lock file self.mkfile(self.lock_file) except Exception as e: self.die('Failed to generate lock file: {}'.format(str(e)))
[ "def", "make", "(", "self", ")", ":", "try", ":", "# Create the lock file", "self", ".", "mkfile", "(", "self", ".", "lock_file", ")", "except", "Exception", "as", "e", ":", "self", ".", "die", "(", "'Failed to generate lock file: {}'", ".", "format", "(", "str", "(", "e", ")", ")", ")" ]
26.6
0.010909
def attach(self, lun_or_snap, skip_hlu_0=False): """ Attaches lun, snap or member snap of cg snap to host. Don't pass cg snapshot in as `lun_or_snap`. :param lun_or_snap: the lun, snap, or a member snap of cg snap :param skip_hlu_0: whether to skip hlu 0 :return: the hlu number """ # `UnityResourceAlreadyAttachedError` check was removed due to there # is a host cache existing in Cinder driver. If the lun was attached to # the host and the info was stored in the cache, wrong hlu would be # returned. # And attaching a lun to a host twice would success, if Cinder retry # triggers another attachment of same lun to the host, the cost would # be one more rest request of `modifyLun` and one for host instance # query. try: return self._attach_with_retry(lun_or_snap, skip_hlu_0) except ex.SystemAPINotSupported: # Attaching snap to host not support before 4.1. raise except ex.UnityAttachExceedLimitError: # The number of luns exceeds system limit raise except: # noqa # other attach error, remove this lun if already attached self.detach(lun_or_snap) raise
[ "def", "attach", "(", "self", ",", "lun_or_snap", ",", "skip_hlu_0", "=", "False", ")", ":", "# `UnityResourceAlreadyAttachedError` check was removed due to there", "# is a host cache existing in Cinder driver. If the lun was attached to", "# the host and the info was stored in the cache, wrong hlu would be", "# returned.", "# And attaching a lun to a host twice would success, if Cinder retry", "# triggers another attachment of same lun to the host, the cost would", "# be one more rest request of `modifyLun` and one for host instance", "# query.", "try", ":", "return", "self", ".", "_attach_with_retry", "(", "lun_or_snap", ",", "skip_hlu_0", ")", "except", "ex", ".", "SystemAPINotSupported", ":", "# Attaching snap to host not support before 4.1.", "raise", "except", "ex", ".", "UnityAttachExceedLimitError", ":", "# The number of luns exceeds system limit", "raise", "except", ":", "# noqa", "# other attach error, remove this lun if already attached", "self", ".", "detach", "(", "lun_or_snap", ")", "raise" ]
41.096774
0.001534
def artist(self): """ :class:`Artist` object of album's artist """ if not self._artist: self._artist = Artist(self._artist_id, self._artist_name, self._connection) return self._artist
[ "def", "artist", "(", "self", ")", ":", "if", "not", "self", ".", "_artist", ":", "self", ".", "_artist", "=", "Artist", "(", "self", ".", "_artist_id", ",", "self", ".", "_artist_name", ",", "self", ".", "_connection", ")", "return", "self", ".", "_artist" ]
32.714286
0.012766
def _nullpager(stream, generator, color): """Simply print unformatted text. This is the ultimate fallback.""" for text in generator: if not color: text = strip_ansi(text) stream.write(text)
[ "def", "_nullpager", "(", "stream", ",", "generator", ",", "color", ")", ":", "for", "text", "in", "generator", ":", "if", "not", "color", ":", "text", "=", "strip_ansi", "(", "text", ")", "stream", ".", "write", "(", "text", ")" ]
36.833333
0.004425
def djng_locale_script(context, default_language='en'): """ Returns a script tag for including the proper locale script in any HTML page. This tag determines the current language with its locale. Usage: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script %}"></script> or, if used with a default language: <script src="{% static 'node_modules/angular-i18n/' %}{% djng_locale_script 'de' %}"></script> """ language = get_language_from_request(context['request']) if not language: language = default_language return format_html('angular-locale_{}.js', language.lower())
[ "def", "djng_locale_script", "(", "context", ",", "default_language", "=", "'en'", ")", ":", "language", "=", "get_language_from_request", "(", "context", "[", "'request'", "]", ")", "if", "not", "language", ":", "language", "=", "default_language", "return", "format_html", "(", "'angular-locale_{}.js'", ",", "language", ".", "lower", "(", ")", ")" ]
45.642857
0.006135
def _report_profile(self, command, lock_name, elapsed_time, memory): """ Writes a string to self.pipeline_profile_file. """ message_raw = str(command) + "\t " + \ str(lock_name) + "\t" + \ str(datetime.timedelta(seconds = round(elapsed_time, 2))) + "\t " + \ str(memory) with open(self.pipeline_profile_file, "a") as myfile: myfile.write(message_raw + "\n")
[ "def", "_report_profile", "(", "self", ",", "command", ",", "lock_name", ",", "elapsed_time", ",", "memory", ")", ":", "message_raw", "=", "str", "(", "command", ")", "+", "\"\\t \"", "+", "str", "(", "lock_name", ")", "+", "\"\\t\"", "+", "str", "(", "datetime", ".", "timedelta", "(", "seconds", "=", "round", "(", "elapsed_time", ",", "2", ")", ")", ")", "+", "\"\\t \"", "+", "str", "(", "memory", ")", "with", "open", "(", "self", ".", "pipeline_profile_file", ",", "\"a\"", ")", "as", "myfile", ":", "myfile", ".", "write", "(", "message_raw", "+", "\"\\n\"", ")" ]
39.636364
0.011211
def getEdgeDirected(self, networkId, edgeId, verbose=None): """ Returns true if the edge specified by the `edgeId` and `networkId` parameters is directed. :param networkId: SUID of the network containing the edge :param edgeId: SUID of the edge :param verbose: print more :returns: 200: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/edges/'+str(edgeId)+'/isDirected', method="GET", verbose=verbose, parse_params=False) return response
[ "def", "getEdgeDirected", "(", "self", ",", "networkId", ",", "edgeId", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'networks/'", "+", "str", "(", "networkId", ")", "+", "'/edges/'", "+", "str", "(", "edgeId", ")", "+", "'/isDirected'", ",", "method", "=", "\"GET\"", ",", "verbose", "=", "verbose", ",", "parse_params", "=", "False", ")", "return", "response" ]
41.076923
0.009158
def GetHTTPHeaders(self): """Returns the HTTP headers required for request authorization. Returns: A dictionary containing the required headers. """ http_headers = self._adwords_client.oauth2_client.CreateHttpHeader() if self.enable_compression: http_headers['accept-encoding'] = 'gzip' http_headers.update(self.custom_http_headers) return http_headers
[ "def", "GetHTTPHeaders", "(", "self", ")", ":", "http_headers", "=", "self", ".", "_adwords_client", ".", "oauth2_client", ".", "CreateHttpHeader", "(", ")", "if", "self", ".", "enable_compression", ":", "http_headers", "[", "'accept-encoding'", "]", "=", "'gzip'", "http_headers", ".", "update", "(", "self", ".", "custom_http_headers", ")", "return", "http_headers" ]
29.461538
0.005063
def gene_dir(self): """Gene folder""" if self.root_dir: return op.join(self.root_dir, self.id) else: return None
[ "def", "gene_dir", "(", "self", ")", ":", "if", "self", ".", "root_dir", ":", "return", "op", ".", "join", "(", "self", ".", "root_dir", ",", "self", ".", "id", ")", "else", ":", "return", "None" ]
25.833333
0.0125
def policy_map_clss_priority_mapping_table_imprt_cee(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer") po_name_key = ET.SubElement(policy_map, "po-name") po_name_key.text = kwargs.pop('po_name') clss = ET.SubElement(policy_map, "class") cl_name_key = ET.SubElement(clss, "cl-name") cl_name_key.text = kwargs.pop('cl_name') priority_mapping_table = ET.SubElement(clss, "priority-mapping-table") imprt = ET.SubElement(priority_mapping_table, "import") cee = ET.SubElement(imprt, "cee") cee.text = kwargs.pop('cee') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "policy_map_clss_priority_mapping_table_imprt_cee", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "policy_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"policy-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-policer\"", ")", "po_name_key", "=", "ET", ".", "SubElement", "(", "policy_map", ",", "\"po-name\"", ")", "po_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'po_name'", ")", "clss", "=", "ET", ".", "SubElement", "(", "policy_map", ",", "\"class\"", ")", "cl_name_key", "=", "ET", ".", "SubElement", "(", "clss", ",", "\"cl-name\"", ")", "cl_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'cl_name'", ")", "priority_mapping_table", "=", "ET", ".", "SubElement", "(", "clss", ",", "\"priority-mapping-table\"", ")", "imprt", "=", "ET", ".", "SubElement", "(", "priority_mapping_table", ",", "\"import\"", ")", "cee", "=", "ET", ".", "SubElement", "(", "imprt", ",", "\"cee\"", ")", "cee", ".", "text", "=", "kwargs", ".", "pop", "(", "'cee'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
47.647059
0.003632
def fulfill(self, agreement_id, message, account_address, signature, from_account): """ Fulfill the sign conditon. :param agreement_id: id of the agreement, hex str :param message: :param account_address: ethereum account address, hex str :param signature: signed agreement hash, hex str :param from_account: Account doing the transaction :return: """ return self._fulfill( agreement_id, message, account_address, signature, transact={'from': from_account.address, 'passphrase': from_account.password} )
[ "def", "fulfill", "(", "self", ",", "agreement_id", ",", "message", ",", "account_address", ",", "signature", ",", "from_account", ")", ":", "return", "self", ".", "_fulfill", "(", "agreement_id", ",", "message", ",", "account_address", ",", "signature", ",", "transact", "=", "{", "'from'", ":", "from_account", ".", "address", ",", "'passphrase'", ":", "from_account", ".", "password", "}", ")" ]
34.526316
0.004451
def distances(self): """The matrix with the all-pairs shortest path lenghts""" from molmod.ext import graphs_floyd_warshall distances = np.zeros((self.num_vertices,)*2, dtype=int) #distances[:] = -1 # set all -1, which is just a very big integer #distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero for i, j in self.edges: # set edges to one distances[i, j] = 1 distances[j, i] = 1 graphs_floyd_warshall(distances) return distances
[ "def", "distances", "(", "self", ")", ":", "from", "molmod", ".", "ext", "import", "graphs_floyd_warshall", "distances", "=", "np", ".", "zeros", "(", "(", "self", ".", "num_vertices", ",", ")", "*", "2", ",", "dtype", "=", "int", ")", "#distances[:] = -1 # set all -1, which is just a very big integer", "#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero", "for", "i", ",", "j", "in", "self", ".", "edges", ":", "# set edges to one", "distances", "[", "i", ",", "j", "]", "=", "1", "distances", "[", "j", ",", "i", "]", "=", "1", "graphs_floyd_warshall", "(", "distances", ")", "return", "distances" ]
47.454545
0.009398
def find_match(self): """Try to find a pattern that matches the source and calll a parser method to create Python objects. A callback that raises an IgnoredMatchException indicates that the given string data is ignored by the parser and no objects are created. If none of the pattern match a NoMatchException is raised. """ for pattern, callback in self.rules: match = pattern.match(self.source, pos=self.pos) if not match: continue try: node = callback(match) except IgnoredMatchException: pass else: self.seen.append(node) return match raise NoMatchException( 'None of the known patterns match for {}' ''.format(self.source[self.pos:]) )
[ "def", "find_match", "(", "self", ")", ":", "for", "pattern", ",", "callback", "in", "self", ".", "rules", ":", "match", "=", "pattern", ".", "match", "(", "self", ".", "source", ",", "pos", "=", "self", ".", "pos", ")", "if", "not", "match", ":", "continue", "try", ":", "node", "=", "callback", "(", "match", ")", "except", "IgnoredMatchException", ":", "pass", "else", ":", "self", ".", "seen", ".", "append", "(", "node", ")", "return", "match", "raise", "NoMatchException", "(", "'None of the known patterns match for {}'", "''", ".", "format", "(", "self", ".", "source", "[", "self", ".", "pos", ":", "]", ")", ")" ]
30.357143
0.002281
def update_relationship(self, relationship_form): """Updates an existing relationship. arg: relationship_form (osid.relationship.RelationshipForm): the form containing the elements to be updated raise: IllegalState - ``relationship_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``relationship_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``relationship_form`` did not originate from ``get_relationship_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('relationship', collection='Relationship', runtime=self._runtime) if not isinstance(relationship_form, ABCRelationshipForm): raise errors.InvalidArgument('argument type is not an RelationshipForm') if not relationship_form.is_for_update(): raise errors.InvalidArgument('the RelationshipForm is for update only, not create') try: if self._forms[relationship_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('relationship_form already used in an update transaction') except KeyError: raise errors.Unsupported('relationship_form did not originate from this session') if not relationship_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(relationship_form._my_map) self._forms[relationship_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.Relationship( osid_object_map=relationship_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_relationship", "(", "self", ",", "relationship_form", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.update_resource_template", "collection", "=", "JSONClientValidated", "(", "'relationship'", ",", "collection", "=", "'Relationship'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "relationship_form", ",", "ABCRelationshipForm", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument type is not an RelationshipForm'", ")", "if", "not", "relationship_form", ".", "is_for_update", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the RelationshipForm is for update only, not create'", ")", "try", ":", "if", "self", ".", "_forms", "[", "relationship_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "==", "UPDATED", ":", "raise", "errors", ".", "IllegalState", "(", "'relationship_form already used in an update transaction'", ")", "except", "KeyError", ":", "raise", "errors", ".", "Unsupported", "(", "'relationship_form did not originate from this session'", ")", "if", "not", "relationship_form", ".", "is_valid", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more of the form elements is invalid'", ")", "collection", ".", "save", "(", "relationship_form", ".", "_my_map", ")", "self", ".", "_forms", "[", "relationship_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "UPDATED", "# Note: this is out of spec. The OSIDs don't require an object to be returned:", "return", "objects", ".", "Relationship", "(", "osid_object_map", "=", "relationship_form", ".", "_my_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
53.170732
0.004054
def make_path(*sub_paths): """ Create a path from a list of sub paths. :param sub_paths: a list of sub paths :return: """ queued_params = [quote(c.encode('utf-8'), '') for c in sub_paths if c not in NULL_VALUES] queued_params.insert(0, '') return '/'.join(queued_params)
[ "def", "make_path", "(", "*", "sub_paths", ")", ":", "queued_params", "=", "[", "quote", "(", "c", ".", "encode", "(", "'utf-8'", ")", ",", "''", ")", "for", "c", "in", "sub_paths", "if", "c", "not", "in", "NULL_VALUES", "]", "queued_params", ".", "insert", "(", "0", ",", "''", ")", "return", "'/'", ".", "join", "(", "queued_params", ")" ]
36.222222
0.008982
def result(self): """Return the context result object pulled from the persistence_engine if it has been set. """ if not self._result: if not self._persistence_engine: return None self._result = self._persistence_engine.get_context_result(self) return self._result
[ "def", "result", "(", "self", ")", ":", "if", "not", "self", ".", "_result", ":", "if", "not", "self", ".", "_persistence_engine", ":", "return", "None", "self", ".", "_result", "=", "self", ".", "_persistence_engine", ".", "get_context_result", "(", "self", ")", "return", "self", ".", "_result" ]
30.454545
0.005797
def _add_match(self, match_key, match_value): """Adds a match key/value""" if match_key is None: raise errors.NullArgument() self._query_terms[match_key] = str(match_key) + '=' + str(match_value)
[ "def", "_add_match", "(", "self", ",", "match_key", ",", "match_value", ")", ":", "if", "match_key", "is", "None", ":", "raise", "errors", ".", "NullArgument", "(", ")", "self", ".", "_query_terms", "[", "match_key", "]", "=", "str", "(", "match_key", ")", "+", "'='", "+", "str", "(", "match_value", ")" ]
45.4
0.008658
def _buffer_iter_rows(self, start): """ Read in the buffer for iteration """ self._row_buffer = self[start:start+self._iter_row_buffer] # start back at the front of the buffer self._row_buffer_index = 0
[ "def", "_buffer_iter_rows", "(", "self", ",", "start", ")", ":", "self", ".", "_row_buffer", "=", "self", "[", "start", ":", "start", "+", "self", ".", "_iter_row_buffer", "]", "# start back at the front of the buffer", "self", ".", "_row_buffer_index", "=", "0" ]
30.5
0.007968
def grab_token(host, email, password): """Grab token from gateway. Press sync button before running.""" urllib3.disable_warnings() url = ('https://' + host + '/gwr/gop.php?cmd=GWRLogin&data=<gip><version>1</version><email>' + str(email) + '</email><password>' + str(password) + '</password></gip>&fmt=xml') response = requests.get(url, verify=False) if '<rc>404</rc>' in response.text: raise PermissionError('Not In Pairing Mode') parsed = xmltodict.parse(response.content) parsed = parsed['gip']['token'] return parsed
[ "def", "grab_token", "(", "host", ",", "email", ",", "password", ")", ":", "urllib3", ".", "disable_warnings", "(", ")", "url", "=", "(", "'https://'", "+", "host", "+", "'/gwr/gop.php?cmd=GWRLogin&data=<gip><version>1</version><email>'", "+", "str", "(", "email", ")", "+", "'</email><password>'", "+", "str", "(", "password", ")", "+", "'</password></gip>&fmt=xml'", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "False", ")", "if", "'<rc>404</rc>'", "in", "response", ".", "text", ":", "raise", "PermissionError", "(", "'Not In Pairing Mode'", ")", "parsed", "=", "xmltodict", ".", "parse", "(", "response", ".", "content", ")", "parsed", "=", "parsed", "[", "'gip'", "]", "[", "'token'", "]", "return", "parsed" ]
55
0.003578
def _bbox_to_area_polygon(self, bbox): """Transforms bounding box into a polygon object in the area CRS. :param bbox: A bounding box :type bbox: BBox :return: A polygon :rtype: shapely.geometry.polygon.Polygon """ projected_bbox = bbox.transform(self.crs) return projected_bbox.geometry
[ "def", "_bbox_to_area_polygon", "(", "self", ",", "bbox", ")", ":", "projected_bbox", "=", "bbox", ".", "transform", "(", "self", ".", "crs", ")", "return", "projected_bbox", ".", "geometry" ]
34.2
0.005698
def get_symops(self, data): """ In order to generate symmetry equivalent positions, the symmetry operations are parsed. If the symops are not present, the space group symbol is parsed, and symops are generated. """ symops = [] for symmetry_label in ["_symmetry_equiv_pos_as_xyz", "_symmetry_equiv_pos_as_xyz_", "_space_group_symop_operation_xyz", "_space_group_symop_operation_xyz_"]: if data.data.get(symmetry_label): xyz = data.data.get(symmetry_label) if isinstance(xyz, str): msg = "A 1-line symmetry op P1 CIF is detected!" warnings.warn(msg) self.errors.append(msg) xyz = [xyz] try: symops = [SymmOp.from_xyz_string(s) for s in xyz] break except ValueError: continue if not symops: # Try to parse symbol for symmetry_label in ["_symmetry_space_group_name_H-M", "_symmetry_space_group_name_H_M", "_symmetry_space_group_name_H-M_", "_symmetry_space_group_name_H_M_", "_space_group_name_Hall", "_space_group_name_Hall_", "_space_group_name_H-M_alt", "_space_group_name_H-M_alt_", "_symmetry_space_group_name_hall", "_symmetry_space_group_name_hall_", "_symmetry_space_group_name_h-m", "_symmetry_space_group_name_h-m_"]: sg = data.data.get(symmetry_label) if sg: sg = sub_spgrp(sg) try: spg = space_groups.get(sg) if spg: symops = SpaceGroup(spg).symmetry_ops msg = "No _symmetry_equiv_pos_as_xyz type key found. " \ "Spacegroup from %s used." % symmetry_label warnings.warn(msg) self.errors.append(msg) break except ValueError: # Ignore any errors pass try: for d in _get_cod_data(): if sg == re.sub(r"\s+", "", d["hermann_mauguin"]): xyz = d["symops"] symops = [SymmOp.from_xyz_string(s) for s in xyz] msg = "No _symmetry_equiv_pos_as_xyz type key found. " \ "Spacegroup from %s used." % symmetry_label warnings.warn(msg) self.errors.append(msg) break except Exception as ex: continue if symops: break if not symops: # Try to parse International number for symmetry_label in ["_space_group_IT_number", "_space_group_IT_number_", "_symmetry_Int_Tables_number", "_symmetry_Int_Tables_number_"]: if data.data.get(symmetry_label): try: i = int(str2float(data.data.get(symmetry_label))) symops = SpaceGroup.from_int_number(i).symmetry_ops break except ValueError: continue if not symops: msg = "No _symmetry_equiv_pos_as_xyz type key found. " \ "Defaulting to P1." warnings.warn(msg) self.errors.append(msg) symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']] return symops
[ "def", "get_symops", "(", "self", ",", "data", ")", ":", "symops", "=", "[", "]", "for", "symmetry_label", "in", "[", "\"_symmetry_equiv_pos_as_xyz\"", ",", "\"_symmetry_equiv_pos_as_xyz_\"", ",", "\"_space_group_symop_operation_xyz\"", ",", "\"_space_group_symop_operation_xyz_\"", "]", ":", "if", "data", ".", "data", ".", "get", "(", "symmetry_label", ")", ":", "xyz", "=", "data", ".", "data", ".", "get", "(", "symmetry_label", ")", "if", "isinstance", "(", "xyz", ",", "str", ")", ":", "msg", "=", "\"A 1-line symmetry op P1 CIF is detected!\"", "warnings", ".", "warn", "(", "msg", ")", "self", ".", "errors", ".", "append", "(", "msg", ")", "xyz", "=", "[", "xyz", "]", "try", ":", "symops", "=", "[", "SymmOp", ".", "from_xyz_string", "(", "s", ")", "for", "s", "in", "xyz", "]", "break", "except", "ValueError", ":", "continue", "if", "not", "symops", ":", "# Try to parse symbol", "for", "symmetry_label", "in", "[", "\"_symmetry_space_group_name_H-M\"", ",", "\"_symmetry_space_group_name_H_M\"", ",", "\"_symmetry_space_group_name_H-M_\"", ",", "\"_symmetry_space_group_name_H_M_\"", ",", "\"_space_group_name_Hall\"", ",", "\"_space_group_name_Hall_\"", ",", "\"_space_group_name_H-M_alt\"", ",", "\"_space_group_name_H-M_alt_\"", ",", "\"_symmetry_space_group_name_hall\"", ",", "\"_symmetry_space_group_name_hall_\"", ",", "\"_symmetry_space_group_name_h-m\"", ",", "\"_symmetry_space_group_name_h-m_\"", "]", ":", "sg", "=", "data", ".", "data", ".", "get", "(", "symmetry_label", ")", "if", "sg", ":", "sg", "=", "sub_spgrp", "(", "sg", ")", "try", ":", "spg", "=", "space_groups", ".", "get", "(", "sg", ")", "if", "spg", ":", "symops", "=", "SpaceGroup", "(", "spg", ")", ".", "symmetry_ops", "msg", "=", "\"No _symmetry_equiv_pos_as_xyz type key found. \"", "\"Spacegroup from %s used.\"", "%", "symmetry_label", "warnings", ".", "warn", "(", "msg", ")", "self", ".", "errors", ".", "append", "(", "msg", ")", "break", "except", "ValueError", ":", "# Ignore any errors", "pass", "try", ":", "for", "d", "in", "_get_cod_data", "(", ")", ":", "if", "sg", "==", "re", ".", "sub", "(", "r\"\\s+\"", ",", "\"\"", ",", "d", "[", "\"hermann_mauguin\"", "]", ")", ":", "xyz", "=", "d", "[", "\"symops\"", "]", "symops", "=", "[", "SymmOp", ".", "from_xyz_string", "(", "s", ")", "for", "s", "in", "xyz", "]", "msg", "=", "\"No _symmetry_equiv_pos_as_xyz type key found. \"", "\"Spacegroup from %s used.\"", "%", "symmetry_label", "warnings", ".", "warn", "(", "msg", ")", "self", ".", "errors", ".", "append", "(", "msg", ")", "break", "except", "Exception", "as", "ex", ":", "continue", "if", "symops", ":", "break", "if", "not", "symops", ":", "# Try to parse International number", "for", "symmetry_label", "in", "[", "\"_space_group_IT_number\"", ",", "\"_space_group_IT_number_\"", ",", "\"_symmetry_Int_Tables_number\"", ",", "\"_symmetry_Int_Tables_number_\"", "]", ":", "if", "data", ".", "data", ".", "get", "(", "symmetry_label", ")", ":", "try", ":", "i", "=", "int", "(", "str2float", "(", "data", ".", "data", ".", "get", "(", "symmetry_label", ")", ")", ")", "symops", "=", "SpaceGroup", ".", "from_int_number", "(", "i", ")", ".", "symmetry_ops", "break", "except", "ValueError", ":", "continue", "if", "not", "symops", ":", "msg", "=", "\"No _symmetry_equiv_pos_as_xyz type key found. \"", "\"Defaulting to P1.\"", "warnings", ".", "warn", "(", "msg", ")", "self", ".", "errors", ".", "append", "(", "msg", ")", "symops", "=", "[", "SymmOp", ".", "from_xyz_string", "(", "s", ")", "for", "s", "in", "[", "'x'", ",", "'y'", ",", "'z'", "]", "]", "return", "symops" ]
45.957447
0.001586
def findAttr(self, svgNode, name): """Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned. """ # This needs also to lookup values like "url(#SomeName)"... if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False): if isinstance(svgNode, NodeTracker): svgNode.apply_rules(self.css_rules) else: ElementWrapper(svgNode).apply_rules(self.css_rules) attr_value = svgNode.attrib.get(name, '').strip() if attr_value and attr_value != "inherit": return attr_value elif svgNode.attrib.get("style"): dict = self.parseMultiAttributes(svgNode.attrib.get("style")) if name in dict: return dict[name] if svgNode.getparent() is not None: return self.findAttr(svgNode.getparent(), name) return ''
[ "def", "findAttr", "(", "self", ",", "svgNode", ",", "name", ")", ":", "# This needs also to lookup values like \"url(#SomeName)\"...", "if", "self", ".", "css_rules", "is", "not", "None", "and", "not", "svgNode", ".", "attrib", ".", "get", "(", "'__rules_applied'", ",", "False", ")", ":", "if", "isinstance", "(", "svgNode", ",", "NodeTracker", ")", ":", "svgNode", ".", "apply_rules", "(", "self", ".", "css_rules", ")", "else", ":", "ElementWrapper", "(", "svgNode", ")", ".", "apply_rules", "(", "self", ".", "css_rules", ")", "attr_value", "=", "svgNode", ".", "attrib", ".", "get", "(", "name", ",", "''", ")", ".", "strip", "(", ")", "if", "attr_value", "and", "attr_value", "!=", "\"inherit\"", ":", "return", "attr_value", "elif", "svgNode", ".", "attrib", ".", "get", "(", "\"style\"", ")", ":", "dict", "=", "self", ".", "parseMultiAttributes", "(", "svgNode", ".", "attrib", ".", "get", "(", "\"style\"", ")", ")", "if", "name", "in", "dict", ":", "return", "dict", "[", "name", "]", "if", "svgNode", ".", "getparent", "(", ")", "is", "not", "None", ":", "return", "self", ".", "findAttr", "(", "svgNode", ".", "getparent", "(", ")", ",", "name", ")", "return", "''" ]
40.730769
0.002768
def get_redirect_url(self, **kwargs): """ Redirect to request parameter 'next' or to referrer if url is not defined. """ if self.request.REQUEST.has_key('next'): return self.request.REQUEST.get('next') url = RedirectView.get_redirect_url(self, **kwargs) if url: return url return self.request.META.get('HTTP_REFERER')
[ "def", "get_redirect_url", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "request", ".", "REQUEST", ".", "has_key", "(", "'next'", ")", ":", "return", "self", ".", "request", ".", "REQUEST", ".", "get", "(", "'next'", ")", "url", "=", "RedirectView", ".", "get_redirect_url", "(", "self", ",", "*", "*", "kwargs", ")", "if", "url", ":", "return", "url", "return", "self", ".", "request", ".", "META", ".", "get", "(", "'HTTP_REFERER'", ")" ]
39
0.015038
def pseudo_bytes(num): """ Returns num bytes of pseudo random data. Pseudo- random byte sequences generated by pseudo_bytes() will be unique if they are of sufficient length, but are not necessarily unpredictable. They can be used for non-cryptographic purposes and for certain purposes in cryptographic protocols, but usually not for key generation etc. """ if num <= 0: raise ValueError("'num' should be > 0") buf = create_string_buffer(num) libcrypto.RAND_pseudo_bytes(buf, num) return buf.raw[:num]
[ "def", "pseudo_bytes", "(", "num", ")", ":", "if", "num", "<=", "0", ":", "raise", "ValueError", "(", "\"'num' should be > 0\"", ")", "buf", "=", "create_string_buffer", "(", "num", ")", "libcrypto", ".", "RAND_pseudo_bytes", "(", "buf", ",", "num", ")", "return", "buf", ".", "raw", "[", ":", "num", "]" ]
39
0.001789
def get_active_lines(lines, comment_char="#"): """ Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line'] """ return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
[ "def", "get_active_lines", "(", "lines", ",", "comment_char", "=", "\"#\"", ")", ":", "return", "list", "(", "filter", "(", "None", ",", "(", "line", ".", "split", "(", "comment_char", ",", "1", ")", "[", "0", "]", ".", "strip", "(", ")", "for", "line", "in", "lines", ")", ")", ")" ]
35.259259
0.002045
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "loaded_dates", "=", "[", "]", "loaded_indices", "=", "[", "]", "for", "t", ",", "timestamp", "in", "enumerate", "(", "self", ".", "all_dates", ")", ":", "date_str", "=", "timestamp", ".", "date", "(", ")", ".", "strftime", "(", "\"%Y%m%d\"", ")", "full_path", "=", "self", ".", "path_start", "+", "date_str", "+", "\"/\"", "if", "self", ".", "variable", "in", "os", ".", "listdir", "(", "full_path", ")", ":", "full_path", "+=", "self", ".", "variable", "+", "\"/\"", "data_files", "=", "sorted", "(", "os", ".", "listdir", "(", "full_path", ")", ")", "file_dates", "=", "pd", ".", "to_datetime", "(", "[", "d", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", "0", ":", "13", "]", "for", "d", "in", "data_files", "]", ")", "if", "timestamp", "in", "file_dates", ":", "data_file", "=", "data_files", "[", "np", ".", "where", "(", "timestamp", "==", "file_dates", ")", "[", "0", "]", "[", "0", "]", "]", "print", "(", "full_path", "+", "data_file", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gunzip\"", ",", "full_path", "+", "data_file", "]", ")", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", "[", ":", "-", "3", "]", ")", "else", ":", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", ")", "var_name", "=", "sorted", "(", "file_obj", ".", "variables", ".", "keys", "(", ")", ")", "[", "0", "]", "data", ".", "append", "(", "file_obj", ".", "variables", "[", "var_name", "]", "[", ":", "]", ")", "if", "self", ".", "lon", "is", "None", ":", "self", ".", "lon", "=", "file_obj", ".", "variables", "[", "\"lon_0\"", "]", "[", ":", "]", "# Translates longitude values from 0:360 to -180:180", "if", "np", ".", "count_nonzero", "(", "self", ".", "lon", ">", "180", ")", ">", "0", ":", "self", ".", "lon", "-=", "360", "self", ".", "lat", "=", "file_obj", ".", "variables", "[", "\"lat_0\"", "]", "[", ":", "]", "file_obj", ".", "close", "(", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "[", ":", "-", "3", "]", "]", ")", "else", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "]", ")", "loaded_dates", ".", "append", "(", "timestamp", ")", "loaded_indices", ".", "append", "(", "t", ")", "if", "len", "(", "loaded_dates", ")", ">", "0", ":", "self", ".", "loaded_dates", "=", "pd", ".", "DatetimeIndex", "(", "loaded_dates", ")", "self", ".", "data", "=", "np", ".", "ones", "(", "(", "self", ".", "all_dates", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "1", "]", ")", ")", "*", "-", "9999", "self", ".", "data", "[", "loaded_indices", "]", "=", "np", ".", "array", "(", "data", ")" ]
53
0.003163
def hatchery(): """ Main entry point for the hatchery program """ args = docopt.docopt(__doc__) task_list = args['<task>'] if not task_list or 'help' in task_list or args['--help']: print(__doc__.format(version=_version.__version__, config_files=config.CONFIG_LOCATIONS)) return 0 level_str = args['--log-level'] try: level_const = getattr(logging, level_str.upper()) logging.basicConfig(level=level_const) if level_const == logging.DEBUG: workdir.options.debug = True except LookupError: logging.basicConfig() logger.error('received invalid log level: ' + level_str) return 1 for task in task_list: if task not in ORDERED_TASKS: logger.info('starting task: check') logger.error('received invalid task: ' + task) return 1 for task in CHECK_TASKS: if task in task_list: task_check(args) break if 'package' in task_list and not args['--release-version']: logger.error('--release-version is required for the package task') return 1 config_dict = _get_config_or_die( calling_task='hatchery', required_params=['auto_push_tag'] ) if config_dict['auto_push_tag'] and 'upload' in task_list: logger.info('adding task: tag (auto_push_tag==True)') task_list.append('tag') # all commands will raise a SystemExit if they fail # check will have already been run for task in ORDERED_TASKS: if task in task_list and task != 'check': logger.info('starting task: ' + task) globals()['task_' + task](args) logger.info("all's well that ends well...hatchery out") return 0
[ "def", "hatchery", "(", ")", ":", "args", "=", "docopt", ".", "docopt", "(", "__doc__", ")", "task_list", "=", "args", "[", "'<task>'", "]", "if", "not", "task_list", "or", "'help'", "in", "task_list", "or", "args", "[", "'--help'", "]", ":", "print", "(", "__doc__", ".", "format", "(", "version", "=", "_version", ".", "__version__", ",", "config_files", "=", "config", ".", "CONFIG_LOCATIONS", ")", ")", "return", "0", "level_str", "=", "args", "[", "'--log-level'", "]", "try", ":", "level_const", "=", "getattr", "(", "logging", ",", "level_str", ".", "upper", "(", ")", ")", "logging", ".", "basicConfig", "(", "level", "=", "level_const", ")", "if", "level_const", "==", "logging", ".", "DEBUG", ":", "workdir", ".", "options", ".", "debug", "=", "True", "except", "LookupError", ":", "logging", ".", "basicConfig", "(", ")", "logger", ".", "error", "(", "'received invalid log level: '", "+", "level_str", ")", "return", "1", "for", "task", "in", "task_list", ":", "if", "task", "not", "in", "ORDERED_TASKS", ":", "logger", ".", "info", "(", "'starting task: check'", ")", "logger", ".", "error", "(", "'received invalid task: '", "+", "task", ")", "return", "1", "for", "task", "in", "CHECK_TASKS", ":", "if", "task", "in", "task_list", ":", "task_check", "(", "args", ")", "break", "if", "'package'", "in", "task_list", "and", "not", "args", "[", "'--release-version'", "]", ":", "logger", ".", "error", "(", "'--release-version is required for the package task'", ")", "return", "1", "config_dict", "=", "_get_config_or_die", "(", "calling_task", "=", "'hatchery'", ",", "required_params", "=", "[", "'auto_push_tag'", "]", ")", "if", "config_dict", "[", "'auto_push_tag'", "]", "and", "'upload'", "in", "task_list", ":", "logger", ".", "info", "(", "'adding task: tag (auto_push_tag==True)'", ")", "task_list", ".", "append", "(", "'tag'", ")", "# all commands will raise a SystemExit if they fail", "# check will have already been run", "for", "task", "in", "ORDERED_TASKS", ":", "if", "task", "in", "task_list", "and", "task", "!=", "'check'", ":", "logger", ".", "info", "(", "'starting task: '", "+", "task", ")", "globals", "(", ")", "[", "'task_'", "+", "task", "]", "(", "args", ")", "logger", ".", "info", "(", "\"all's well that ends well...hatchery out\"", ")", "return", "0" ]
32.923077
0.001134
def get_precision(self): """ Get the current precision from the sensor. :returns: sensor resolution from 9-12 bits :rtype: int """ config_str = self.raw_sensor_strings[1].split()[4] # Byte 5 is the config register bit_base = int(config_str, 16) >> 5 # Bit 5-6 contains the resolution, cut off the rest return bit_base + 9
[ "def", "get_precision", "(", "self", ")", ":", "config_str", "=", "self", ".", "raw_sensor_strings", "[", "1", "]", ".", "split", "(", ")", "[", "4", "]", "# Byte 5 is the config register", "bit_base", "=", "int", "(", "config_str", ",", "16", ")", ">>", "5", "# Bit 5-6 contains the resolution, cut off the rest", "return", "bit_base", "+", "9" ]
39.1
0.01
def url(self, url): """ Set API URL endpoint Args: url: the url of the API endpoint """ if url and url.endswith('/'): url = url[:-1] self._url = url
[ "def", "url", "(", "self", ",", "url", ")", ":", "if", "url", "and", "url", ".", "endswith", "(", "'/'", ")", ":", "url", "=", "url", "[", ":", "-", "1", "]", "self", ".", "_url", "=", "url" ]
21.3
0.009009
def flags(self, index): """"Determines interaction allowed with table cells. See :qtdoc:`QAbstractItemModel<QAbstractItemModel.flags>`, and :qtdoc:`subclassing<qabstractitemmodel.subclassing>` """ if index.isValid(): if self.model.editableRow(index.row()) and index.column() < 4: return QtCore.Qt.ItemIsDragEnabled | \ QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | \ QtCore.Qt.ItemIsEditable else: return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled else: print 'flags: index invalid'
[ "def", "flags", "(", "self", ",", "index", ")", ":", "if", "index", ".", "isValid", "(", ")", ":", "if", "self", ".", "model", ".", "editableRow", "(", "index", ".", "row", "(", ")", ")", "and", "index", ".", "column", "(", ")", "<", "4", ":", "return", "QtCore", ".", "Qt", ".", "ItemIsDragEnabled", "|", "QtCore", ".", "Qt", ".", "ItemIsEnabled", "|", "QtCore", ".", "Qt", ".", "ItemIsSelectable", "|", "QtCore", ".", "Qt", ".", "ItemIsEditable", "else", ":", "return", "QtCore", ".", "Qt", ".", "ItemIsSelectable", "|", "QtCore", ".", "Qt", ".", "ItemIsEnabled", "else", ":", "print", "'flags: index invalid'" ]
43.4
0.004511
def ensure_stacker_compat_config(config_filename): """Ensure config file can be loaded by Stacker.""" try: with open(config_filename, 'r') as stream: yaml.safe_load(stream) except yaml.constructor.ConstructorError as yaml_error: if yaml_error.problem.startswith( 'could not determine a constructor for the tag \'!'): LOGGER.error('"%s" appears to be a CloudFormation template, ' 'but is located in the top level of a module ' 'alongside the CloudFormation config files (i.e. ' 'the file or files indicating the stack names & ' 'parameters). Please move the template to a ' 'subdirectory.', config_filename) sys.exit(1)
[ "def", "ensure_stacker_compat_config", "(", "config_filename", ")", ":", "try", ":", "with", "open", "(", "config_filename", ",", "'r'", ")", "as", "stream", ":", "yaml", ".", "safe_load", "(", "stream", ")", "except", "yaml", ".", "constructor", ".", "ConstructorError", "as", "yaml_error", ":", "if", "yaml_error", ".", "problem", ".", "startswith", "(", "'could not determine a constructor for the tag \\'!'", ")", ":", "LOGGER", ".", "error", "(", "'\"%s\" appears to be a CloudFormation template, '", "'but is located in the top level of a module '", "'alongside the CloudFormation config files (i.e. '", "'the file or files indicating the stack names & '", "'parameters). Please move the template to a '", "'subdirectory.'", ",", "config_filename", ")", "sys", ".", "exit", "(", "1", ")" ]
52.0625
0.001179