text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def drop_dose(self): """ Drop the maximum dose and related response values. """ doses = np.array(self.individual_doses) responses = np.array(self.responses) mask = doses != doses.max() self.individual_doses = doses[mask].tolist() self.responses = responses[mask].tolist() self.set_summary_data() self._validate()
[ "def", "drop_dose", "(", "self", ")", ":", "doses", "=", "np", ".", "array", "(", "self", ".", "individual_doses", ")", "responses", "=", "np", ".", "array", "(", "self", ".", "responses", ")", "mask", "=", "doses", "!=", "doses", ".", "max", "(", ")", "self", ".", "individual_doses", "=", "doses", "[", "mask", "]", ".", "tolist", "(", ")", "self", ".", "responses", "=", "responses", "[", "mask", "]", ".", "tolist", "(", ")", "self", ".", "set_summary_data", "(", ")", "self", ".", "_validate", "(", ")" ]
34.727273
0.005102
def select_view_indexes(self, indexes, flags=QItemSelectionModel.Select | QItemSelectionModel.Rows): """ Selects the View given indexes. :param view: View. :type view: QWidget :param indexes: Indexes to select. :type indexes: list :param flags: Selection flags. ( QItemSelectionModel.SelectionFlags ) :return: Definition success. :rtype: bool """ if self.selectionModel(): selection = QItemSelection() for index in indexes: selection.merge(QItemSelection(index, index), flags) self.selectionModel().select(selection, flags) return True
[ "def", "select_view_indexes", "(", "self", ",", "indexes", ",", "flags", "=", "QItemSelectionModel", ".", "Select", "|", "QItemSelectionModel", ".", "Rows", ")", ":", "if", "self", ".", "selectionModel", "(", ")", ":", "selection", "=", "QItemSelection", "(", ")", "for", "index", "in", "indexes", ":", "selection", ".", "merge", "(", "QItemSelection", "(", "index", ",", "index", ")", ",", "flags", ")", "self", ".", "selectionModel", "(", ")", ".", "select", "(", "selection", ",", "flags", ")", "return", "True" ]
35.105263
0.00438
def get_task_logs(self, taskId): """ :param taskId: Task identifier :type caseTaskLog: CaseTaskLog defined in models.py :return: TheHive logs :rtype: json """ req = self.url + "/api/case/task/{}/log".format(taskId) try: return requests.get(req, proxies=self.proxies, auth=self.auth, verify=self.cert) except requests.exceptions.RequestException as e: raise CaseTaskException("Case task logs search error: {}".format(e))
[ "def", "get_task_logs", "(", "self", ",", "taskId", ")", ":", "req", "=", "self", ".", "url", "+", "\"/api/case/task/{}/log\"", ".", "format", "(", "taskId", ")", "try", ":", "return", "requests", ".", "get", "(", "req", ",", "proxies", "=", "self", ".", "proxies", ",", "auth", "=", "self", ".", "auth", ",", "verify", "=", "self", ".", "cert", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "CaseTaskException", "(", "\"Case task logs search error: {}\"", ".", "format", "(", "e", ")", ")" ]
36
0.007737
def without_args(self, *args, **kwargs): """Set the last call to expect that certain arguments will not exist. This is the opposite of :func:`fudge.Fake.with_matching_args`. It will fail if any of the arguments are passed. .. doctest:: >>> import fudge >>> query = fudge.Fake('query').expects_call().without_args( ... 'http://example.com', name="Steve" ... ) >>> query('http://python.org', name="Joe") >>> query('http://example.com') Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with arg http://example.com >>> query("Joe", "Frank", "Bartholomew", "Steve") >>> query(name='Steve') Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with kwarg name=Steve >>> query('http://python.org', name='Steve') Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with kwarg name=Steve >>> query(city='Chicago', name='Steve') Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with kwarg name=Steve >>> query.expects_call().without_args('http://example2.com') fake:query >>> query('foobar') >>> query('foobar', 'http://example2.com') Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with arg http://example2.com >>> query.expects_call().without_args(name="Hieronymus") fake:query >>> query("Gottfried", "Hieronymus") >>> query(name="Wexter", other_name="Hieronymus") >>> query('asdf', name="Hieronymus") Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus >>> query(name="Hieronymus") Traceback (most recent call last): ... AssertionError: fake:query() was called unexpectedly with kwarg name=Hieronymus >>> query = fudge.Fake('query').expects_call().without_args( ... 'http://example.com', name="Steve" ... ).with_args('dog') >>> query('dog') >>> query('dog', 'http://example.com') Traceback (most recent call last): ... AssertionError: fake:query('dog') was called unexpectedly with args ('dog', 'http://example.com') >>> query() Traceback (most recent call last): ... AssertionError: fake:query('dog') was called unexpectedly with args () """ exp = self._get_current_call() if args: exp.unexpected_args = args if kwargs: exp.unexpected_kwargs = kwargs return self
[ "def", "without_args", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "exp", "=", "self", ".", "_get_current_call", "(", ")", "if", "args", ":", "exp", ".", "unexpected_args", "=", "args", "if", "kwargs", ":", "exp", ".", "unexpected_kwargs", "=", "kwargs", "return", "self" ]
41.219178
0.00357
def from_config(cls, cp, section, variable_args): """Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by `prior.VARARGS_DELIM`. These must appear in the "tag" part of the section header. Returns ------- Uniform A distribution instance from the pycbc.inference.prior module. """ return super(UniformPowerLaw, cls).from_config(cp, section, variable_args, bounds_required=True)
[ "def", "from_config", "(", "cls", ",", "cp", ",", "section", ",", "variable_args", ")", ":", "return", "super", "(", "UniformPowerLaw", ",", "cls", ")", ".", "from_config", "(", "cp", ",", "section", ",", "variable_args", ",", "bounds_required", "=", "True", ")" ]
42.08
0.001859
def _check_and_replace_parser_args(parser, section, option, rename_func, make_dirs=True): """ Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created. """ args = parser.get(section, option, raw=True) strings = get_strings(args) replace = False for string in strings: isfilename = any(x in string for x in FILENAME_INDICATORS) if isfilename: newstring = rename_func(string) if make_dirs: try_make_dirs(newstring) # To work with windows path specifications we need this replacement: raw_string = string.replace('\\', '\\\\') raw_newstring = newstring.replace('\\', '\\\\') args = args.replace(raw_string, raw_newstring) replace = True if replace: parser.set(section, option, args)
[ "def", "_check_and_replace_parser_args", "(", "parser", ",", "section", ",", "option", ",", "rename_func", ",", "make_dirs", "=", "True", ")", ":", "args", "=", "parser", ".", "get", "(", "section", ",", "option", ",", "raw", "=", "True", ")", "strings", "=", "get_strings", "(", "args", ")", "replace", "=", "False", "for", "string", "in", "strings", ":", "isfilename", "=", "any", "(", "x", "in", "string", "for", "x", "in", "FILENAME_INDICATORS", ")", "if", "isfilename", ":", "newstring", "=", "rename_func", "(", "string", ")", "if", "make_dirs", ":", "try_make_dirs", "(", "newstring", ")", "# To work with windows path specifications we need this replacement:", "raw_string", "=", "string", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "raw_newstring", "=", "newstring", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "args", "=", "args", ".", "replace", "(", "raw_string", ",", "raw_newstring", ")", "replace", "=", "True", "if", "replace", ":", "parser", ".", "set", "(", "section", ",", "option", ",", "args", ")" ]
44.482759
0.003035
def to_qubo(self): """Convert a binary quadratic model to QUBO format. If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`, values are converted. Returns: tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict in which keys are pairs of variables and values are the associated linear or quadratic bias and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model with spin variables to QUBO format with binary variables. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_qubo() # doctest: +SKIP ({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9) """ qubo = dict(self.binary.quadratic) qubo.update(((v, v), bias) for v, bias in iteritems(self.binary.linear)) return qubo, self.binary.offset
[ "def", "to_qubo", "(", "self", ")", ":", "qubo", "=", "dict", "(", "self", ".", "binary", ".", "quadratic", ")", "qubo", ".", "update", "(", "(", "(", "v", ",", "v", ")", ",", "bias", ")", "for", "v", ",", "bias", "in", "iteritems", "(", "self", ".", "binary", ".", "linear", ")", ")", "return", "qubo", ",", "self", ".", "binary", ".", "offset" ]
44.5
0.00707
def deregister_listener(self, member_uuid, listener): """ Deregister listener for audio group changes of cast uuid.""" self._casts[str(member_uuid)]['listeners'].remove(listener)
[ "def", "deregister_listener", "(", "self", ",", "member_uuid", ",", "listener", ")", ":", "self", ".", "_casts", "[", "str", "(", "member_uuid", ")", "]", "[", "'listeners'", "]", ".", "remove", "(", "listener", ")" ]
64
0.010309
def cancel(): """HTTP endpoint for canceling tasks If an active task is cancelled, an inactive task with the same code and the smallest interval will be activated if it exists. """ task_id = request.form['id'] task = Task.query.get(task_id) if not task: return json.dumps({ 'status': 'success', 'id': None, }) task.delete() if task.active: current_app.scheduler.cancel(task_id) code = task.code other_task = Task.query.filter_by(code=code).order_by('interval').first() if other_task: other_task.active = True other_task.save() current_app.scheduler.schedule({ 'id': other_task.id, 'code': other_task.code, 'interval': other_task.interval }) return json.dumps({ 'status': 'success', 'id': task_id, })
[ "def", "cancel", "(", ")", ":", "task_id", "=", "request", ".", "form", "[", "'id'", "]", "task", "=", "Task", ".", "query", ".", "get", "(", "task_id", ")", "if", "not", "task", ":", "return", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", ",", "'id'", ":", "None", ",", "}", ")", "task", ".", "delete", "(", ")", "if", "task", ".", "active", ":", "current_app", ".", "scheduler", ".", "cancel", "(", "task_id", ")", "code", "=", "task", ".", "code", "other_task", "=", "Task", ".", "query", ".", "filter_by", "(", "code", "=", "code", ")", ".", "order_by", "(", "'interval'", ")", ".", "first", "(", ")", "if", "other_task", ":", "other_task", ".", "active", "=", "True", "other_task", ".", "save", "(", ")", "current_app", ".", "scheduler", ".", "schedule", "(", "{", "'id'", ":", "other_task", ".", "id", ",", "'code'", ":", "other_task", ".", "code", ",", "'interval'", ":", "other_task", ".", "interval", "}", ")", "return", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", ",", "'id'", ":", "task_id", ",", "}", ")" ]
22.428571
0.013431
def SLOAD(self, offset): """Load word from storage""" storage_address = self.address self._publish('will_evm_read_storage', storage_address, offset) value = self.world.get_storage_data(storage_address, offset) self._publish('did_evm_read_storage', storage_address, offset, value) return value
[ "def", "SLOAD", "(", "self", ",", "offset", ")", ":", "storage_address", "=", "self", ".", "address", "self", ".", "_publish", "(", "'will_evm_read_storage'", ",", "storage_address", ",", "offset", ")", "value", "=", "self", ".", "world", ".", "get_storage_data", "(", "storage_address", ",", "offset", ")", "self", ".", "_publish", "(", "'did_evm_read_storage'", ",", "storage_address", ",", "offset", ",", "value", ")", "return", "value" ]
47.714286
0.005882
def get_entity_propnames(entity): """ Get entity property names :param entity: Entity :type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta :returns: Set of entity property names :rtype: set """ ins = entity if isinstance(entity, InstanceState) else inspect(entity) return set( ins.mapper.column_attrs.keys() + # Columns ins.mapper.relationships.keys() # Relationships )
[ "def", "get_entity_propnames", "(", "entity", ")", ":", "ins", "=", "entity", "if", "isinstance", "(", "entity", ",", "InstanceState", ")", "else", "inspect", "(", "entity", ")", "return", "set", "(", "ins", ".", "mapper", ".", "column_attrs", ".", "keys", "(", ")", "+", "# Columns", "ins", ".", "mapper", ".", "relationships", ".", "keys", "(", ")", "# Relationships", ")" ]
33.538462
0.002232
def convert_to_ip(self): """Convert the Data Collection to IP units.""" self._values, self._header._unit = self._header.data_type.to_ip( self._values, self._header.unit)
[ "def", "convert_to_ip", "(", "self", ")", ":", "self", ".", "_values", ",", "self", ".", "_header", ".", "_unit", "=", "self", ".", "_header", ".", "data_type", ".", "to_ip", "(", "self", ".", "_values", ",", "self", ".", "_header", ".", "unit", ")" ]
49.5
0.00995
def application_information(self, application_id): """ The MapReduce application master information resource provides overall information about that mapreduce application master. This includes application id, time it was started, user, name, etc. :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` """ path = '/proxy/{appid}/ws/v1/mapreduce/info'.format( appid=application_id) return self.request(path)
[ "def", "application_information", "(", "self", ",", "application_id", ")", ":", "path", "=", "'/proxy/{appid}/ws/v1/mapreduce/info'", ".", "format", "(", "appid", "=", "application_id", ")", "return", "self", ".", "request", "(", "path", ")" ]
40
0.003759
def map(self, *sequences): """call a function on each element of a sequence remotely. This should behave very much like the builtin map, but return an AsyncMapResult if self.block is False. """ # set _map as a flag for use inside self.__call__ self._map = True try: ret = self.__call__(*sequences) finally: del self._map return ret
[ "def", "map", "(", "self", ",", "*", "sequences", ")", ":", "# set _map as a flag for use inside self.__call__", "self", ".", "_map", "=", "True", "try", ":", "ret", "=", "self", ".", "__call__", "(", "*", "sequences", ")", "finally", ":", "del", "self", ".", "_map", "return", "ret" ]
34.666667
0.007026
def get_current_version_by_config_file() -> str: """ Get current version from the version variable defined in the configuration :return: A string with the current version number :raises ImproperConfigurationError: if version variable cannot be parsed """ debug('get_current_version_by_config_file') filename, variable = config.get('semantic_release', 'version_variable').split(':') variable = variable.strip() debug(filename, variable) with open(filename, 'r') as fd: parts = re.search( r'^{0}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(variable), fd.read(), re.MULTILINE ) if not parts: raise ImproperConfigurationError debug(parts) return parts.group(1)
[ "def", "get_current_version_by_config_file", "(", ")", "->", "str", ":", "debug", "(", "'get_current_version_by_config_file'", ")", "filename", ",", "variable", "=", "config", ".", "get", "(", "'semantic_release'", ",", "'version_variable'", ")", ".", "split", "(", "':'", ")", "variable", "=", "variable", ".", "strip", "(", ")", "debug", "(", "filename", ",", "variable", ")", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fd", ":", "parts", "=", "re", ".", "search", "(", "r'^{0}\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'", ".", "format", "(", "variable", ")", ",", "fd", ".", "read", "(", ")", ",", "re", ".", "MULTILINE", ")", "if", "not", "parts", ":", "raise", "ImproperConfigurationError", "debug", "(", "parts", ")", "return", "parts", ".", "group", "(", "1", ")" ]
35.909091
0.001233
def create_analysis(name, kernel, src_dir, scaffold_name): """Create analysis files.""" # analysis folder folder = os.path.join(os.getcwd(), 'analyses', name) if not os.path.exists(folder): os.makedirs(folder) else: log.warning('Analysis folder {} already exists.'.format(folder)) # copy all other files for f in os.listdir(src_dir): if f in ('__pycache__',) or \ any(f.endswith(ending) for ending in ('.pyc',)): continue copy_scaffold_file(os.path.join(src_dir, f), os.path.join(folder, f), name, scaffold_name)
[ "def", "create_analysis", "(", "name", ",", "kernel", ",", "src_dir", ",", "scaffold_name", ")", ":", "# analysis folder", "folder", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'analyses'", ",", "name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "folder", ")", ":", "os", ".", "makedirs", "(", "folder", ")", "else", ":", "log", ".", "warning", "(", "'Analysis folder {} already exists.'", ".", "format", "(", "folder", ")", ")", "# copy all other files", "for", "f", "in", "os", ".", "listdir", "(", "src_dir", ")", ":", "if", "f", "in", "(", "'__pycache__'", ",", ")", "or", "any", "(", "f", ".", "endswith", "(", "ending", ")", "for", "ending", "in", "(", "'.pyc'", ",", ")", ")", ":", "continue", "copy_scaffold_file", "(", "os", ".", "path", ".", "join", "(", "src_dir", ",", "f", ")", ",", "os", ".", "path", ".", "join", "(", "folder", ",", "f", ")", ",", "name", ",", "scaffold_name", ")" ]
35.222222
0.001536
def download(self, overwrite=False): """ Download data files needed by this Genome instance. Parameters ---------- overwrite : bool, optional Download files regardless whether local copy already exists. """ self._set_local_paths(download_if_missing=True, overwrite=overwrite)
[ "def", "download", "(", "self", ",", "overwrite", "=", "False", ")", ":", "self", ".", "_set_local_paths", "(", "download_if_missing", "=", "True", ",", "overwrite", "=", "overwrite", ")" ]
33.5
0.005814
def get_config(self): """Return configurations of MaxBoltzmannQPolicy # Returns Dict of config """ config = super(MaxBoltzmannQPolicy, self).get_config() config['eps'] = self.eps config['tau'] = self.tau config['clip'] = self.clip return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "MaxBoltzmannQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'eps'", "]", "=", "self", ".", "eps", "config", "[", "'tau'", "]", "=", "self", ".", "tau", "config", "[", "'clip'", "]", "=", "self", ".", "clip", "return", "config" ]
28.272727
0.006231
def remove_formatting_codes(line, irc=False): """Remove girc control codes from the given line.""" if irc: line = escape(line) new_line = '' while len(line) > 0: try: if line[0] == '$': line = line[1:] if line[0] == '$': new_line += '$' line = line[1:] elif line[0] == 'c': line = line[1:] if line[0].isdigit(): line = line[1:] if line[0].isdigit(): line = line[1:] if line[0] == ',': line = line[1:] if line[0].isdigit(): line = line[1:] if line[0].isdigit(): line = line[1:] elif line[0] == ',': line = line[1:] if line[0].isdigit(): line = line[1:] if line[0].isdigit(): line = line[1:] if line[0] == '[': while line[0] != ']': line = line[1:] line = line[1:] elif line[0] == '{': if line[:3] == '{$}': new_line += '$' line = line[3:] continue while line[0] != '}': line = line[1:] line = line[1:] else: line = line[1:] else: new_line += line[0] line = line[1:] except IndexError: continue return new_line
[ "def", "remove_formatting_codes", "(", "line", ",", "irc", "=", "False", ")", ":", "if", "irc", ":", "line", "=", "escape", "(", "line", ")", "new_line", "=", "''", "while", "len", "(", "line", ")", ">", "0", ":", "try", ":", "if", "line", "[", "0", "]", "==", "'$'", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", "==", "'$'", ":", "new_line", "+=", "'$'", "line", "=", "line", "[", "1", ":", "]", "elif", "line", "[", "0", "]", "==", "'c'", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", "==", "','", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "elif", "line", "[", "0", "]", "==", "','", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", ".", "isdigit", "(", ")", ":", "line", "=", "line", "[", "1", ":", "]", "if", "line", "[", "0", "]", "==", "'['", ":", "while", "line", "[", "0", "]", "!=", "']'", ":", "line", "=", "line", "[", "1", ":", "]", "line", "=", "line", "[", "1", ":", "]", "elif", "line", "[", "0", "]", "==", "'{'", ":", "if", "line", "[", ":", "3", "]", "==", "'{$}'", ":", "new_line", "+=", "'$'", "line", "=", "line", "[", "3", ":", "]", "continue", "while", "line", "[", "0", "]", "!=", "'}'", ":", "line", "=", "line", "[", "1", ":", "]", "line", "=", "line", "[", "1", ":", "]", "else", ":", "line", "=", "line", "[", "1", ":", "]", "else", ":", "new_line", "+=", "line", "[", "0", "]", "line", "=", "line", "[", "1", ":", "]", "except", "IndexError", ":", "continue", "return", "new_line" ]
33.872727
0.000522
def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} .. versionadded:: 2.7 """ seq, func = prepare_map(args, kwargs) if seq: for item in seq: yield func(item)
[ "def", "do_map", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "seq", ",", "func", "=", "prepare_map", "(", "args", ",", "kwargs", ")", "if", "seq", ":", "for", "item", "in", "seq", ":", "yield", "func", "(", "item", ")" ]
33.576923
0.001114
def noise_from_step_num(): """Quantization noise equal to (phi * (step_num + 1)) mod 1.0. Not using random_uniform here due to a problem on TPU in that random seeds are not respected, which may cause the parameters on different replicas to go out-of-sync. Returns: a float32 scalar """ step = tf.to_int32(tf.train.get_or_create_global_step()) + 1 phi = ((5 ** 0.5) - 1) / 2 # Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous # due to loss of precision when the step number gets large. # Computation in doubles does not work on TPU, so we use this complicated # alternative computation which does not suffer from these roundoff errors. ret = 0.0 for i in range(30): ret += (((phi * (2 ** i)) % 1.0) # double-precision computation in python * tf.to_float(tf.mod(step // (2 ** i), 2))) return tf.mod(ret, 1.0)
[ "def", "noise_from_step_num", "(", ")", ":", "step", "=", "tf", ".", "to_int32", "(", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", ")", "+", "1", "phi", "=", "(", "(", "5", "**", "0.5", ")", "-", "1", ")", "/", "2", "# Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous", "# due to loss of precision when the step number gets large.", "# Computation in doubles does not work on TPU, so we use this complicated", "# alternative computation which does not suffer from these roundoff errors.", "ret", "=", "0.0", "for", "i", "in", "range", "(", "30", ")", ":", "ret", "+=", "(", "(", "(", "phi", "*", "(", "2", "**", "i", ")", ")", "%", "1.0", ")", "# double-precision computation in python", "*", "tf", ".", "to_float", "(", "tf", ".", "mod", "(", "step", "//", "(", "2", "**", "i", ")", ",", "2", ")", ")", ")", "return", "tf", ".", "mod", "(", "ret", ",", "1.0", ")" ]
41.095238
0.012458
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"): """ n: the number of instances d: the number of covariates lifelines: the observational times constant: make the coeffients constant (not time dependent) n_binary: the number of binary covariates model: from ["aalen", "cox"] Returns:s hazard rates: (t,n) dataframe, coefficients: (t,d+1) dataframe of coefficients, covarites: (n,d) dataframe """ covariates = generate_covariates(n, d, n_binary=n_binary) if model == "aalen": coefficients = time_varying_coefficients(d + 1, timelines, independent=independent, constant=constant) hazard_rates = np.dot(covariates, coefficients.T) return (pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)) if model == "cox": covariates = covariates[:, :-1] coefficients = constant_coefficients(d, timelines, independent) baseline = time_varying_coefficients(1, timelines) hazard_rates = np.exp(np.dot(covariates, coefficients.T)) * baseline[baseline.columns[0]].values coefficients["baseline: " + baseline.columns[0]] = baseline.values return (pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)) raise Exception
[ "def", "generate_hazard_rates", "(", "n", ",", "d", ",", "timelines", ",", "constant", "=", "False", ",", "independent", "=", "0", ",", "n_binary", "=", "0", ",", "model", "=", "\"aalen\"", ")", ":", "covariates", "=", "generate_covariates", "(", "n", ",", "d", ",", "n_binary", "=", "n_binary", ")", "if", "model", "==", "\"aalen\"", ":", "coefficients", "=", "time_varying_coefficients", "(", "d", "+", "1", ",", "timelines", ",", "independent", "=", "independent", ",", "constant", "=", "constant", ")", "hazard_rates", "=", "np", ".", "dot", "(", "covariates", ",", "coefficients", ".", "T", ")", "return", "(", "pd", ".", "DataFrame", "(", "hazard_rates", ".", "T", ",", "index", "=", "timelines", ")", ",", "coefficients", ",", "pd", ".", "DataFrame", "(", "covariates", ")", ")", "if", "model", "==", "\"cox\"", ":", "covariates", "=", "covariates", "[", ":", ",", ":", "-", "1", "]", "coefficients", "=", "constant_coefficients", "(", "d", ",", "timelines", ",", "independent", ")", "baseline", "=", "time_varying_coefficients", "(", "1", ",", "timelines", ")", "hazard_rates", "=", "np", ".", "exp", "(", "np", ".", "dot", "(", "covariates", ",", "coefficients", ".", "T", ")", ")", "*", "baseline", "[", "baseline", ".", "columns", "[", "0", "]", "]", ".", "values", "coefficients", "[", "\"baseline: \"", "+", "baseline", ".", "columns", "[", "0", "]", "]", "=", "baseline", ".", "values", "return", "(", "pd", ".", "DataFrame", "(", "hazard_rates", ".", "T", ",", "index", "=", "timelines", ")", ",", "coefficients", ",", "pd", ".", "DataFrame", "(", "covariates", ")", ")", "raise", "Exception" ]
48
0.004376
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None): """ Creates the TensorFlow operations for calculating the loss per batch instance. Args: states: Dict of state tensors. internals: Dict of prior internal state tensors. actions: Dict of action tensors. terminal: Terminal boolean tensor. reward: Reward tensor. next_states: Dict of successor state tensors. next_internals: List of posterior internal state tensors. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss per instance tensor. """ raise NotImplementedError
[ "def", "tf_loss_per_instance", "(", "self", ",", "states", ",", "internals", ",", "actions", ",", "terminal", ",", "reward", ",", "next_states", ",", "next_internals", ",", "update", ",", "reference", "=", "None", ")", ":", "raise", "NotImplementedError" ]
44.45
0.007709
def build_requirements(docs_path, package_name="yacms"): """ Updates the requirements file with yacms's version number. """ mezz_string = "yacms==" project_path = os.path.join(docs_path, "..") requirements_file = os.path.join(project_path, package_name, "project_template", "requirements.txt") with open(requirements_file, "r") as f: requirements = f.readlines() with open(requirements_file, "w") as f: f.write("yacms==%s\n" % __version__) for requirement in requirements: if requirement.strip() and not requirement.startswith(mezz_string): f.write(requirement)
[ "def", "build_requirements", "(", "docs_path", ",", "package_name", "=", "\"yacms\"", ")", ":", "mezz_string", "=", "\"yacms==\"", "project_path", "=", "os", ".", "path", ".", "join", "(", "docs_path", ",", "\"..\"", ")", "requirements_file", "=", "os", ".", "path", ".", "join", "(", "project_path", ",", "package_name", ",", "\"project_template\"", ",", "\"requirements.txt\"", ")", "with", "open", "(", "requirements_file", ",", "\"r\"", ")", "as", "f", ":", "requirements", "=", "f", ".", "readlines", "(", ")", "with", "open", "(", "requirements_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"yacms==%s\\n\"", "%", "__version__", ")", "for", "requirement", "in", "requirements", ":", "if", "requirement", ".", "strip", "(", ")", "and", "not", "requirement", ".", "startswith", "(", "mezz_string", ")", ":", "f", ".", "write", "(", "requirement", ")" ]
44.533333
0.001466
def max_width(self): """ The maximum width of all the Decors in the Legend. This is needed to scale a Legend or Striplog when plotting with widths turned on. """ try: maximum = max([row.width for row in self.__list if row.width is not None]) return maximum except: return 0
[ "def", "max_width", "(", "self", ")", ":", "try", ":", "maximum", "=", "max", "(", "[", "row", ".", "width", "for", "row", "in", "self", ".", "__list", "if", "row", ".", "width", "is", "not", "None", "]", ")", "return", "maximum", "except", ":", "return", "0" ]
34.8
0.011204
def text(files): '''Returns the whole transcribed text''' sentences = convert_timestamps(files) out = [] for s in sentences: out.append(' '.join([w[0] for w in s['words']])) return '\n'.join(out)
[ "def", "text", "(", "files", ")", ":", "sentences", "=", "convert_timestamps", "(", "files", ")", "out", "=", "[", "]", "for", "s", "in", "sentences", ":", "out", ".", "append", "(", "' '", ".", "join", "(", "[", "w", "[", "0", "]", "for", "w", "in", "s", "[", "'words'", "]", "]", ")", ")", "return", "'\\n'", ".", "join", "(", "out", ")" ]
31
0.004484
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None): """save stock_min Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ stock_list = QA_fetch_get_stock_list().code.unique().tolist() coll = client.stock_min coll.create_index( [ ('code', pymongo.ASCENDING), ('time_stamp', pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING) ] ) err = [] def __saving_work(code, coll): QA_util_log_info( '##JOB03 Now Saving STOCK_MIN ==== {}'.format(str(code)), ui_log=ui_log ) try: for type in ['1min', '5min', '15min', '30min', '60min']: ref_ = coll.find({'code': str(code)[0:6], 'type': type}) end_time = str(now_time())[0:19] if ref_.count() > 0: start_time = ref_[ref_.count() - 1]['datetime'] QA_util_log_info( '##JOB03.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_stock_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data)[1::] ) else: start_time = '2015-01-01' QA_util_log_info( '##JOB03.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_stock_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data) ) except Exception as e: QA_util_log_info(e, ui_log=ui_log) err.append(code) QA_util_log_info(err, ui_log=ui_log) executor = ThreadPoolExecutor(max_workers=4) # executor.map((__saving_work, stock_list[i_], coll),URLS) res = { executor.submit(__saving_work, stock_list[i_], coll) for i_ in range(len(stock_list)) } count = 0 for i_ in concurrent.futures.as_completed(res): QA_util_log_info( 'The {} of Total {}'.format(count, len(stock_list)), ui_log=ui_log ) strProgress = 'DOWNLOAD PROGRESS {} '.format( str(float(count / len(stock_list) * 100))[0:4] + '%' ) intProgress = int(count / len(stock_list) * 10000.0) QA_util_log_info( strProgress, ui_log, ui_progress=ui_progress, ui_progress_int_value=intProgress ) count = count + 1 if len(err) < 1: QA_util_log_info('SUCCESS', ui_log=ui_log) else: QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log) QA_util_log_info(err, ui_log=ui_log)
[ "def", "QA_SU_save_stock_min", "(", "client", "=", "DATABASE", ",", "ui_log", "=", "None", ",", "ui_progress", "=", "None", ")", ":", "stock_list", "=", "QA_fetch_get_stock_list", "(", ")", ".", "code", ".", "unique", "(", ")", ".", "tolist", "(", ")", "coll", "=", "client", ".", "stock_min", "coll", ".", "create_index", "(", "[", "(", "'code'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'time_stamp'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'date_stamp'", ",", "pymongo", ".", "ASCENDING", ")", "]", ")", "err", "=", "[", "]", "def", "__saving_work", "(", "code", ",", "coll", ")", ":", "QA_util_log_info", "(", "'##JOB03 Now Saving STOCK_MIN ==== {}'", ".", "format", "(", "str", "(", "code", ")", ")", ",", "ui_log", "=", "ui_log", ")", "try", ":", "for", "type", "in", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ":", "ref_", "=", "coll", ".", "find", "(", "{", "'code'", ":", "str", "(", "code", ")", "[", "0", ":", "6", "]", ",", "'type'", ":", "type", "}", ")", "end_time", "=", "str", "(", "now_time", "(", ")", ")", "[", "0", ":", "19", "]", "if", "ref_", ".", "count", "(", ")", ">", "0", ":", "start_time", "=", "ref_", "[", "ref_", ".", "count", "(", ")", "-", "1", "]", "[", "'datetime'", "]", "QA_util_log_info", "(", "'##JOB03.{} Now Saving {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_stock_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", ")", "[", "1", ":", ":", "]", ")", "else", ":", "start_time", "=", "'2015-01-01'", "QA_util_log_info", "(", "'##JOB03.{} Now Saving {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_stock_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", ")", ")", "except", "Exception", "as", "e", ":", "QA_util_log_info", "(", "e", ",", "ui_log", "=", "ui_log", ")", "err", ".", "append", "(", "code", ")", "QA_util_log_info", "(", "err", ",", "ui_log", "=", "ui_log", ")", "executor", "=", "ThreadPoolExecutor", "(", "max_workers", "=", "4", ")", "# executor.map((__saving_work, stock_list[i_], coll),URLS)", "res", "=", "{", "executor", ".", "submit", "(", "__saving_work", ",", "stock_list", "[", "i_", "]", ",", "coll", ")", "for", "i_", "in", "range", "(", "len", "(", "stock_list", ")", ")", "}", "count", "=", "0", "for", "i_", "in", "concurrent", ".", "futures", ".", "as_completed", "(", "res", ")", ":", "QA_util_log_info", "(", "'The {} of Total {}'", ".", "format", "(", "count", ",", "len", "(", "stock_list", ")", ")", ",", "ui_log", "=", "ui_log", ")", "strProgress", "=", "'DOWNLOAD PROGRESS {} '", ".", "format", "(", "str", "(", "float", "(", "count", "/", "len", "(", "stock_list", ")", "*", "100", ")", ")", "[", "0", ":", "4", "]", "+", "'%'", ")", "intProgress", "=", "int", "(", "count", "/", "len", "(", "stock_list", ")", "*", "10000.0", ")", "QA_util_log_info", "(", "strProgress", ",", "ui_log", ",", "ui_progress", "=", "ui_progress", ",", "ui_progress_int_value", "=", "intProgress", ")", "count", "=", "count", "+", "1", "if", "len", "(", "err", ")", "<", "1", ":", "QA_util_log_info", "(", "'SUCCESS'", ",", "ui_log", "=", "ui_log", ")", "else", ":", "QA_util_log_info", "(", "' ERROR CODE \\n '", ",", "ui_log", "=", "ui_log", ")", "QA_util_log_info", "(", "err", ",", "ui_log", "=", "ui_log", ")" ]
34.704918
0.00023
def get_default_config(self): """ Returns the default collector settings """ config = super(TCPCollector, self).get_default_config() config.update({ 'path': 'tcp', 'allowed_names': 'ListenOverflows, ListenDrops, TCPLoss, ' + 'TCPTimeouts, TCPFastRetrans, TCPLostRetransmit, ' + 'TCPForwardRetrans, TCPSlowStartRetrans, CurrEstab, ' + 'TCPAbortOnMemory, TCPBacklogDrop, AttemptFails, ' + 'EstabResets, InErrs, ActiveOpens, PassiveOpens', 'gauges': 'CurrEstab, MaxConn', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "TCPCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'tcp'", ",", "'allowed_names'", ":", "'ListenOverflows, ListenDrops, TCPLoss, '", "+", "'TCPTimeouts, TCPFastRetrans, TCPLostRetransmit, '", "+", "'TCPForwardRetrans, TCPSlowStartRetrans, CurrEstab, '", "+", "'TCPAbortOnMemory, TCPBacklogDrop, AttemptFails, '", "+", "'EstabResets, InErrs, ActiveOpens, PassiveOpens'", ",", "'gauges'", ":", "'CurrEstab, MaxConn'", ",", "}", ")", "return", "config" ]
40.125
0.003044
def get_length(self, length, trim=0, offset=0): """Return string at current position + length. If trim == true then get as much as possible before eos. """ if trim and not self.has_space(offset + length): return self.string[self.pos + offset:] elif self.has_space(offset + length): return self.string[self.pos + offset:self.pos + offset + length] else: return ''
[ "def", "get_length", "(", "self", ",", "length", ",", "trim", "=", "0", ",", "offset", "=", "0", ")", ":", "if", "trim", "and", "not", "self", ".", "has_space", "(", "offset", "+", "length", ")", ":", "return", "self", ".", "string", "[", "self", ".", "pos", "+", "offset", ":", "]", "elif", "self", ".", "has_space", "(", "offset", "+", "length", ")", ":", "return", "self", ".", "string", "[", "self", ".", "pos", "+", "offset", ":", "self", ".", "pos", "+", "offset", "+", "length", "]", "else", ":", "return", "''" ]
43.7
0.004484
def work_model_factory(*, validator=validators.is_work_model, **kwargs): """Generate a Work model. Expects ``data``, ``validator``, ``model_cls``, and ``ld_context`` as keyword arguments. Raises: :exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword argument is given. """ kwargs['ld_type'] = 'AbstractWork' return _model_factory(validator=validator, **kwargs)
[ "def", "work_model_factory", "(", "*", ",", "validator", "=", "validators", ".", "is_work_model", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'ld_type'", "]", "=", "'AbstractWork'", "return", "_model_factory", "(", "validator", "=", "validator", ",", "*", "*", "kwargs", ")" ]
34
0.002387
def format_character(char): """Returns the C-formatting of the character""" if \ char in string.ascii_letters \ or char in string.digits \ or char in [ '_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<', '>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{', '}', '&', '*', '^', '%']: return char elif char in ['"', '\'', '\\']: return '\\{0}'.format(char) elif char == '\n': return '\\n' elif char == '\r': return '\\r' elif char == '\t': return '\\t' else: return '\\x{:02x}'.format(ord(char))
[ "def", "format_character", "(", "char", ")", ":", "if", "char", "in", "string", ".", "ascii_letters", "or", "char", "in", "string", ".", "digits", "or", "char", "in", "[", "'_'", ",", "'.'", ",", "':'", ",", "';'", ",", "' '", ",", "'!'", ",", "'?'", ",", "'+'", ",", "'-'", ",", "'/'", ",", "'='", ",", "'<'", ",", "'>'", ",", "'$'", ",", "'('", ",", "')'", ",", "'@'", ",", "'~'", ",", "'`'", ",", "'|'", ",", "'#'", ",", "'['", ",", "']'", ",", "'{'", ",", "'}'", ",", "'&'", ",", "'*'", ",", "'^'", ",", "'%'", "]", ":", "return", "char", "elif", "char", "in", "[", "'\"'", ",", "'\\''", ",", "'\\\\'", "]", ":", "return", "'\\\\{0}'", ".", "format", "(", "char", ")", "elif", "char", "==", "'\\n'", ":", "return", "'\\\\n'", "elif", "char", "==", "'\\r'", ":", "return", "'\\\\r'", "elif", "char", "==", "'\\t'", ":", "return", "'\\\\t'", "else", ":", "return", "'\\\\x{:02x}'", ".", "format", "(", "ord", "(", "char", ")", ")" ]
31.85
0.001524
def cmd_compassmot(self, args): '''do a compass/motor interference calibration''' mav = self.master print("compassmot starting") mav.mav.command_long_send(mav.target_system, mav.target_component, mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0, 0, 0, 0, 0, 0, 1, 0) self.compassmot_running = True self.empty_input_count = self.mpstate.empty_input_count
[ "def", "cmd_compassmot", "(", "self", ",", "args", ")", ":", "mav", "=", "self", ".", "master", "print", "(", "\"compassmot starting\"", ")", "mav", ".", "mav", ".", "command_long_send", "(", "mav", ".", "target_system", ",", "mav", ".", "target_component", ",", "mavutil", ".", "mavlink", ".", "MAV_CMD_PREFLIGHT_CALIBRATION", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "0", ")", "self", ".", "compassmot_running", "=", "True", "self", ".", "empty_input_count", "=", "self", ".", "mpstate", ".", "empty_input_count" ]
51.222222
0.006397
def init_logger(self): """Create configuration for the root logger.""" # All logs are comming to this logger self.logger.setLevel(logging.DEBUG) self.logger.propagate = False # Logging to console if self.min_log_level_to_print: level = self.min_log_level_to_print handler_class = logging.StreamHandler self._create_handler(handler_class, level) # Logging to file if self.min_log_level_to_save: level = self.min_log_level_to_save handler_class = logging.handlers.TimedRotatingFileHandler self._create_handler(handler_class, level) # Logging to syslog if self.min_log_level_to_syslog: level = self.min_log_level_to_syslog handler_class = logging.handlers.SysLogHandler self._create_handler(handler_class, level) # Logging to email if self.min_log_level_to_mail: level = self.min_log_level_to_mail handler_class = AlkiviEmailHandler self._create_handler(handler_class, level) return
[ "def", "init_logger", "(", "self", ")", ":", "# All logs are comming to this logger", "self", ".", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "self", ".", "logger", ".", "propagate", "=", "False", "# Logging to console", "if", "self", ".", "min_log_level_to_print", ":", "level", "=", "self", ".", "min_log_level_to_print", "handler_class", "=", "logging", ".", "StreamHandler", "self", ".", "_create_handler", "(", "handler_class", ",", "level", ")", "# Logging to file", "if", "self", ".", "min_log_level_to_save", ":", "level", "=", "self", ".", "min_log_level_to_save", "handler_class", "=", "logging", ".", "handlers", ".", "TimedRotatingFileHandler", "self", ".", "_create_handler", "(", "handler_class", ",", "level", ")", "# Logging to syslog", "if", "self", ".", "min_log_level_to_syslog", ":", "level", "=", "self", ".", "min_log_level_to_syslog", "handler_class", "=", "logging", ".", "handlers", ".", "SysLogHandler", "self", ".", "_create_handler", "(", "handler_class", ",", "level", ")", "# Logging to email", "if", "self", ".", "min_log_level_to_mail", ":", "level", "=", "self", ".", "min_log_level_to_mail", "handler_class", "=", "AlkiviEmailHandler", "self", ".", "_create_handler", "(", "handler_class", ",", "level", ")", "return" ]
35.548387
0.001767
def add_doc(self, doc, index_update=True, label_guesser_update=True): """ Add a document to the index """ if not self.index_writer and index_update: self.index_writer = self.index.writer() if not self.label_guesser_updater and label_guesser_update: self.label_guesser_updater = self.label_guesser.get_updater() logger.info("Indexing new doc: %s" % doc) if index_update: self._update_doc_in_index(self.index_writer, doc) if label_guesser_update: self.label_guesser_updater.add_doc(doc) if doc.docid not in self._docs_by_id: self._docs_by_id[doc.docid] = doc
[ "def", "add_doc", "(", "self", ",", "doc", ",", "index_update", "=", "True", ",", "label_guesser_update", "=", "True", ")", ":", "if", "not", "self", ".", "index_writer", "and", "index_update", ":", "self", ".", "index_writer", "=", "self", ".", "index", ".", "writer", "(", ")", "if", "not", "self", ".", "label_guesser_updater", "and", "label_guesser_update", ":", "self", ".", "label_guesser_updater", "=", "self", ".", "label_guesser", ".", "get_updater", "(", ")", "logger", ".", "info", "(", "\"Indexing new doc: %s\"", "%", "doc", ")", "if", "index_update", ":", "self", ".", "_update_doc_in_index", "(", "self", ".", "index_writer", ",", "doc", ")", "if", "label_guesser_update", ":", "self", ".", "label_guesser_updater", ".", "add_doc", "(", "doc", ")", "if", "doc", ".", "docid", "not", "in", "self", ".", "_docs_by_id", ":", "self", ".", "_docs_by_id", "[", "doc", ".", "docid", "]", "=", "doc" ]
44.933333
0.002907
def timestamp(self): "Return POSIX timestamp as float" if self._tzinfo is None: return _time.mktime((self.year, self.month, self.day, self.hour, self.minute, self.second, -1, -1, -1)) + self.microsecond / 1e6 else: return (self - _EPOCH).total_seconds()
[ "def", "timestamp", "(", "self", ")", ":", "if", "self", ".", "_tzinfo", "is", "None", ":", "return", "_time", ".", "mktime", "(", "(", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "day", ",", "self", ".", "hour", ",", "self", ".", "minute", ",", "self", ".", "second", ",", "-", "1", ",", "-", "1", ",", "-", "1", ")", ")", "+", "self", ".", "microsecond", "/", "1e6", "else", ":", "return", "(", "self", "-", "_EPOCH", ")", ".", "total_seconds", "(", ")" ]
45
0.00545
def _generateModel0(numCategories): """ Generate the initial, first order, and second order transition probabilities for 'model0'. For this model, we generate the following set of sequences: 1-2-3 (4X) 1-2-4 (1X) 5-2-3 (1X) 5-2-4 (4X) Parameters: ---------------------------------------------------------------------- numCategories: Number of categories retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. Here is an example of some return values: initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # =============================================================== # Let's model the following: # a-b-c (4X) # a-b-d (1X) # e-b-c (1X) # e-b-d (4X) # -------------------------------------------------------------------- # Initial probabilities, 'a' and 'e' equally likely initProb = numpy.zeros(numCategories) initProb[0] = 0.5 initProb[4] = 0.5 # -------------------------------------------------------------------- # 1st order transitions # both 'a' and 'e' should lead to 'b' firstOrder = dict() for catIdx in range(numCategories): key = str([catIdx]) probs = numpy.ones(numCategories) / numCategories if catIdx == 0 or catIdx == 4: probs.fill(0) probs[1] = 1.0 # lead only to b firstOrder[key] = probs # -------------------------------------------------------------------- # 2nd order transitions # a-b should lead to c 80% and d 20% # e-b should lead to c 20% and d 80% secondOrder = dict() for firstIdx in range(numCategories): for secondIdx in range(numCategories): key = str([firstIdx, secondIdx]) probs = numpy.ones(numCategories) / numCategories if key == str([0,1]): probs.fill(0) probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time elif key == str([4,1]): probs.fill(0) probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time secondOrder[key] = probs return (initProb, firstOrder, secondOrder, 3)
[ "def", "_generateModel0", "(", "numCategories", ")", ":", "# ===============================================================", "# Let's model the following:", "# a-b-c (4X)", "# a-b-d (1X)", "# e-b-c (1X)", "# e-b-d (4X)", "# --------------------------------------------------------------------", "# Initial probabilities, 'a' and 'e' equally likely", "initProb", "=", "numpy", ".", "zeros", "(", "numCategories", ")", "initProb", "[", "0", "]", "=", "0.5", "initProb", "[", "4", "]", "=", "0.5", "# --------------------------------------------------------------------", "# 1st order transitions", "# both 'a' and 'e' should lead to 'b'", "firstOrder", "=", "dict", "(", ")", "for", "catIdx", "in", "range", "(", "numCategories", ")", ":", "key", "=", "str", "(", "[", "catIdx", "]", ")", "probs", "=", "numpy", ".", "ones", "(", "numCategories", ")", "/", "numCategories", "if", "catIdx", "==", "0", "or", "catIdx", "==", "4", ":", "probs", ".", "fill", "(", "0", ")", "probs", "[", "1", "]", "=", "1.0", "# lead only to b", "firstOrder", "[", "key", "]", "=", "probs", "# --------------------------------------------------------------------", "# 2nd order transitions", "# a-b should lead to c 80% and d 20%", "# e-b should lead to c 20% and d 80%", "secondOrder", "=", "dict", "(", ")", "for", "firstIdx", "in", "range", "(", "numCategories", ")", ":", "for", "secondIdx", "in", "range", "(", "numCategories", ")", ":", "key", "=", "str", "(", "[", "firstIdx", ",", "secondIdx", "]", ")", "probs", "=", "numpy", ".", "ones", "(", "numCategories", ")", "/", "numCategories", "if", "key", "==", "str", "(", "[", "0", ",", "1", "]", ")", ":", "probs", ".", "fill", "(", "0", ")", "probs", "[", "2", "]", "=", "0.80", "# 'ab' leads to 'c' 80% of the time", "probs", "[", "3", "]", "=", "0.20", "# 'ab' leads to 'd' 20% of the time", "elif", "key", "==", "str", "(", "[", "4", ",", "1", "]", ")", ":", "probs", ".", "fill", "(", "0", ")", "probs", "[", "2", "]", "=", "0.20", "# 'eb' leads to 'c' 20% of the time", "probs", "[", "3", "]", "=", "0.80", "# 'eb' leads to 'd' 80% of the time", "secondOrder", "[", "key", "]", "=", "probs", "return", "(", "initProb", ",", "firstOrder", ",", "secondOrder", ",", "3", ")" ]
37.333333
0.01495
def check_type(self): """Make sure each stochastic has a correct type, and identify discrete stochastics.""" self.isdiscrete = {} for stochastic in self.stochastics: if stochastic.dtype in integer_dtypes: self.isdiscrete[stochastic] = True elif stochastic.dtype in bool_dtypes: raise ValueError( 'Binary stochastics not supported by AdaptativeMetropolis.') else: self.isdiscrete[stochastic] = False
[ "def", "check_type", "(", "self", ")", ":", "self", ".", "isdiscrete", "=", "{", "}", "for", "stochastic", "in", "self", ".", "stochastics", ":", "if", "stochastic", ".", "dtype", "in", "integer_dtypes", ":", "self", ".", "isdiscrete", "[", "stochastic", "]", "=", "True", "elif", "stochastic", ".", "dtype", "in", "bool_dtypes", ":", "raise", "ValueError", "(", "'Binary stochastics not supported by AdaptativeMetropolis.'", ")", "else", ":", "self", ".", "isdiscrete", "[", "stochastic", "]", "=", "False" ]
46.909091
0.007605
def update_dropdown_list_slot(self): """Keep updating the dropdown list. Say, don't let the user choose USB devices if none is available """ self.dropdown_widget.clear() # this will trigger dropdown_changed_slot self.row_instance_by_index = [] for i, key in enumerate(self.row_instance_by_name.keys()): row_instance = self.row_instance_by_name[key] if (row_instance.isActive()): self.row_instance_by_index.append(row_instance) display_name = row_instance.getName() self.dropdown_widget.insertItem(i, display_name) row_instance.updateWidget()
[ "def", "update_dropdown_list_slot", "(", "self", ")", ":", "self", ".", "dropdown_widget", ".", "clear", "(", ")", "# this will trigger dropdown_changed_slot", "self", ".", "row_instance_by_index", "=", "[", "]", "for", "i", ",", "key", "in", "enumerate", "(", "self", ".", "row_instance_by_name", ".", "keys", "(", ")", ")", ":", "row_instance", "=", "self", ".", "row_instance_by_name", "[", "key", "]", "if", "(", "row_instance", ".", "isActive", "(", ")", ")", ":", "self", ".", "row_instance_by_index", ".", "append", "(", "row_instance", ")", "display_name", "=", "row_instance", ".", "getName", "(", ")", "self", ".", "dropdown_widget", ".", "insertItem", "(", "i", ",", "display_name", ")", "row_instance", ".", "updateWidget", "(", ")" ]
54.583333
0.006006
def _calc_inst_pmf(self): """Calculate the epsilon-greedy instrumental distribution""" # Easy vars t = self.t_ epsilon = self.epsilon alpha = self.alpha preds = self._preds_avg_in_strata weights = self.strata.weights_[:,np.newaxis] p1 = self._BB_model.theta_[:,np.newaxis] p0 = 1 - p1 if t==0: F = self._F_guess[self.opt_class] else: F = self._estimate[t - 1, self.opt_class] # Fill in non-finite estimates with the initial guess nonfinite = ~np.isfinite(F) F[nonfinite] = self._F_guess[self.opt_class][nonfinite] # Calculate optimal instrumental pmf sqrt_arg = np.sum(preds * (alpha**2 * F**2 * p0 + (1 - F)**2 * p1) + \ (1 - preds) * (1 - alpha)**2 * F**2 * p1, \ axis=1, keepdims=True) #: sum is over classifiers inst_pmf = weights * np.sqrt(sqrt_arg) # Normalize inst_pmf /= np.sum(inst_pmf) # Epsilon-greedy: (1 - epsilon) q + epsilon * p inst_pmf *= (1 - epsilon) inst_pmf += epsilon * weights if self.record_inst_hist: self._inst_pmf[:,t] = inst_pmf.ravel() else: self._inst_pmf = inst_pmf.ravel()
[ "def", "_calc_inst_pmf", "(", "self", ")", ":", "# Easy vars", "t", "=", "self", ".", "t_", "epsilon", "=", "self", ".", "epsilon", "alpha", "=", "self", ".", "alpha", "preds", "=", "self", ".", "_preds_avg_in_strata", "weights", "=", "self", ".", "strata", ".", "weights_", "[", ":", ",", "np", ".", "newaxis", "]", "p1", "=", "self", ".", "_BB_model", ".", "theta_", "[", ":", ",", "np", ".", "newaxis", "]", "p0", "=", "1", "-", "p1", "if", "t", "==", "0", ":", "F", "=", "self", ".", "_F_guess", "[", "self", ".", "opt_class", "]", "else", ":", "F", "=", "self", ".", "_estimate", "[", "t", "-", "1", ",", "self", ".", "opt_class", "]", "# Fill in non-finite estimates with the initial guess", "nonfinite", "=", "~", "np", ".", "isfinite", "(", "F", ")", "F", "[", "nonfinite", "]", "=", "self", ".", "_F_guess", "[", "self", ".", "opt_class", "]", "[", "nonfinite", "]", "# Calculate optimal instrumental pmf", "sqrt_arg", "=", "np", ".", "sum", "(", "preds", "*", "(", "alpha", "**", "2", "*", "F", "**", "2", "*", "p0", "+", "(", "1", "-", "F", ")", "**", "2", "*", "p1", ")", "+", "(", "1", "-", "preds", ")", "*", "(", "1", "-", "alpha", ")", "**", "2", "*", "F", "**", "2", "*", "p1", ",", "axis", "=", "1", ",", "keepdims", "=", "True", ")", "#: sum is over classifiers", "inst_pmf", "=", "weights", "*", "np", ".", "sqrt", "(", "sqrt_arg", ")", "# Normalize", "inst_pmf", "/=", "np", ".", "sum", "(", "inst_pmf", ")", "# Epsilon-greedy: (1 - epsilon) q + epsilon * p", "inst_pmf", "*=", "(", "1", "-", "epsilon", ")", "inst_pmf", "+=", "epsilon", "*", "weights", "if", "self", ".", "record_inst_hist", ":", "self", ".", "_inst_pmf", "[", ":", ",", "t", "]", "=", "inst_pmf", ".", "ravel", "(", ")", "else", ":", "self", ".", "_inst_pmf", "=", "inst_pmf", ".", "ravel", "(", ")" ]
38.727273
0.00687
def _dash_escape_text(text: str) -> str: """ Add dash '-' (0x2D) and space ' ' (0x20) as prefix on each line :param text: Text to dash-escape :return: """ dash_escaped_text = str() for line in text.splitlines(True): # add dash '-' (0x2D) and space ' ' (0x20) as prefix dash_escaped_text += DASH_ESCAPE_PREFIX + line return dash_escaped_text
[ "def", "_dash_escape_text", "(", "text", ":", "str", ")", "->", "str", ":", "dash_escaped_text", "=", "str", "(", ")", "for", "line", "in", "text", ".", "splitlines", "(", "True", ")", ":", "# add dash '-' (0x2D) and space ' ' (0x20) as prefix", "dash_escaped_text", "+=", "DASH_ESCAPE_PREFIX", "+", "line", "return", "dash_escaped_text" ]
29.857143
0.00464
def _get_valid_with_defaults_modes(capabilities): """Reference: https://tools.ietf.org/html/rfc6243#section-4.3""" capability = capabilities[":with-defaults"] try: valid_modes = [capability.parameters["basic-mode"]] except KeyError: raise WithDefaultsError( "Invalid 'with-defaults' capability URI advertised by the server; " "missing 'basic-mode' parameter" ) try: also_supported = capability.parameters["also-supported"] except KeyError: return valid_modes valid_modes.extend(also_supported.split(",")) return valid_modes
[ "def", "_get_valid_with_defaults_modes", "(", "capabilities", ")", ":", "capability", "=", "capabilities", "[", "\":with-defaults\"", "]", "try", ":", "valid_modes", "=", "[", "capability", ".", "parameters", "[", "\"basic-mode\"", "]", "]", "except", "KeyError", ":", "raise", "WithDefaultsError", "(", "\"Invalid 'with-defaults' capability URI advertised by the server; \"", "\"missing 'basic-mode' parameter\"", ")", "try", ":", "also_supported", "=", "capability", ".", "parameters", "[", "\"also-supported\"", "]", "except", "KeyError", ":", "return", "valid_modes", "valid_modes", ".", "extend", "(", "also_supported", ".", "split", "(", "\",\"", ")", ")", "return", "valid_modes" ]
30.2
0.001605
def get_connection(self): """ Return a pair of socket object and string address. May block! """ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: conn.bind(self._agent._get_filename()) conn.listen(1) (r, addr) = conn.accept() return r, addr except: raise
[ "def", "get_connection", "(", "self", ")", ":", "conn", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "try", ":", "conn", ".", "bind", "(", "self", ".", "_agent", ".", "_get_filename", "(", ")", ")", "conn", ".", "listen", "(", "1", ")", "(", "r", ",", "addr", ")", "=", "conn", ".", "accept", "(", ")", "return", "r", ",", "addr", "except", ":", "raise" ]
26.428571
0.007833
def _search_parents(initial_dir): """Search the initial and parent directories for a ``conf.py`` Sphinx configuration file that represents the root of a Sphinx project. Returns ------- root_dir : `pathlib.Path` Directory path containing a ``conf.py`` file. Raises ------ FileNotFoundError Raised if a ``conf.py`` file is not found in the initial directory or any parents. """ root_paths = ('.', '/') parent = pathlib.Path(initial_dir) while True: if _has_conf_py(parent): return parent if str(parent) in root_paths: break parent = parent.parent msg = ( "Cannot detect a conf.py Sphinx configuration file from {!s}. " "Are you inside a Sphinx documenation repository?" ).format(initial_dir) raise FileNotFoundError(msg)
[ "def", "_search_parents", "(", "initial_dir", ")", ":", "root_paths", "=", "(", "'.'", ",", "'/'", ")", "parent", "=", "pathlib", ".", "Path", "(", "initial_dir", ")", "while", "True", ":", "if", "_has_conf_py", "(", "parent", ")", ":", "return", "parent", "if", "str", "(", "parent", ")", "in", "root_paths", ":", "break", "parent", "=", "parent", ".", "parent", "msg", "=", "(", "\"Cannot detect a conf.py Sphinx configuration file from {!s}. \"", "\"Are you inside a Sphinx documenation repository?\"", ")", ".", "format", "(", "initial_dir", ")", "raise", "FileNotFoundError", "(", "msg", ")" ]
30.035714
0.001152
def pattern(self): """ Return the pattern used to check if a field name can be accepted by this dynamic field. Use a default one ('^fieldname_(.+)$') if not set when the field was initialized """ if self.dynamic_version_of is not None: return self.dynamic_version_of.pattern if not self._pattern: self._pattern = re.compile('^%s_(.+)$' % self.name) return self._pattern
[ "def", "pattern", "(", "self", ")", ":", "if", "self", ".", "dynamic_version_of", "is", "not", "None", ":", "return", "self", ".", "dynamic_version_of", ".", "pattern", "if", "not", "self", ".", "_pattern", ":", "self", ".", "_pattern", "=", "re", ".", "compile", "(", "'^%s_(.+)$'", "%", "self", ".", "name", ")", "return", "self", ".", "_pattern" ]
37.25
0.00655
def daily(target_coll, source_coll, interp_days=32, interp_method='linear'): """Generate daily ETa collection from ETo and ETf collections Parameters ---------- target_coll : ee.ImageCollection Source images will be interpolated to each target image time_start. Target images should have a daily time step. This will typically be the reference ET (ETr) collection. source_coll : ee.ImageCollection Images that will be interpolated to the target image collection. This will typically be the fraction of reference ET (ETrF) collection. interp_days : int, optional Number of days before and after each image date to include in the interpolation (the default is 32). interp_method : {'linear'}, optional Interpolation method (the default is 'linear'). Returns ------- ee.ImageCollection() of daily interpolated images Raises ------ ValueError If `interp_method` is not a supported method. """ # # DEADBEEF - This module is assuming that the time band is already in # # the source collection. # # Uncomment the following to add a time band here instead. # def add_utc0_time_band(image): # date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) # return image.addBands([ # image.select([0]).double().multiply(0).add(date_0utc.millis())\ # .rename(['time'])]) # source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band)) if interp_method.lower() == 'linear': def _linear(image): """Linearly interpolate source images to target image time_start(s) Parameters ---------- image : ee.Image. The first band in the image will be used as the "target" image and will be returned with the output image. Returns ------- ee.Image of interpolated values with band name 'src' Notes ----- The source collection images must have a time band. This function is intended to be mapped over an image collection and can only take one input parameter. """ target_image = ee.Image(image).select(0).double() target_date = ee.Date(image.get('system:time_start')) # All filtering will be done based on 0 UTC dates utc0_date = utils.date_0utc(target_date) # utc0_time = target_date.update(hour=0, minute=0, second=0)\ # .millis().divide(1000).floor().multiply(1000) time_image = ee.Image.constant(utc0_date.millis()).double() # Build nodata images/masks that can be placed at the front/back of # of the qm image collections in case the collections are empty. bands = source_coll.first().bandNames() prev_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\ .double().rename(bands).updateMask(0)\ .set({ 'system:time_start': utc0_date.advance( -interp_days - 1, 'day').millis()}) next_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\ .double().rename(bands).updateMask(0)\ .set({ 'system:time_start': utc0_date.advance( interp_days + 2, 'day').millis()}) # Build separate collections for before and after the target date prev_qm_coll = source_coll.filterDate( utc0_date.advance(-interp_days, 'day'), utc0_date)\ .merge(ee.ImageCollection(prev_qm_mask)) next_qm_coll = source_coll.filterDate( utc0_date, utc0_date.advance(interp_days + 1, 'day'))\ .merge(ee.ImageCollection(next_qm_mask)) # Flatten the previous/next collections to single images # The closest image in time should be on "top" # CGM - Is the previous collection already sorted? # prev_qm_image = prev_qm_coll.mosaic() prev_qm_image = prev_qm_coll.sort('system:time_start', True).mosaic() next_qm_image = next_qm_coll.sort('system:time_start', False).mosaic() # DEADBEEF - It might be easier to interpolate all bands instead of # separating the value and time bands # prev_value_image = ee.Image(prev_qm_image).double() # next_value_image = ee.Image(next_qm_image).double() # Interpolate all bands except the "time" band prev_bands = prev_qm_image.bandNames()\ .filter(ee.Filter.notEquals('item', 'time')) next_bands = next_qm_image.bandNames() \ .filter(ee.Filter.notEquals('item', 'time')) prev_value_image = ee.Image(prev_qm_image.select(prev_bands)).double() next_value_image = ee.Image(next_qm_image.select(next_bands)).double() prev_time_image = ee.Image(prev_qm_image.select('time')).double() next_time_image = ee.Image(next_qm_image.select('time')).double() # Fill masked values with values from the opposite image # Something like this is needed to ensure there are always two # values to interpolate between # For data gaps, this will cause a flat line instead of a ramp prev_time_mosaic = ee.Image(ee.ImageCollection.fromImages([ next_time_image, prev_time_image]).mosaic()) next_time_mosaic = ee.Image(ee.ImageCollection.fromImages([ prev_time_image, next_time_image]).mosaic()) prev_value_mosaic = ee.Image(ee.ImageCollection.fromImages([ next_value_image, prev_value_image]).mosaic()) next_value_mosaic = ee.Image(ee.ImageCollection.fromImages([ prev_value_image, next_value_image]).mosaic()) # Calculate time ratio of the current image between other cloud free images time_ratio_image = time_image.subtract(prev_time_mosaic) \ .divide(next_time_mosaic.subtract(prev_time_mosaic)) # Interpolate values to the current image time interp_value_image = next_value_mosaic.subtract(prev_value_mosaic) \ .multiply(time_ratio_image).add(prev_value_mosaic) # CGM # Should/can the target image be mapped to the interpolated image? # Is there a clean way of computing ET here? return interp_value_image \ .addBands(target_image) \ .set({ 'system:index': image.get('system:index'), 'system:time_start': image.get('system:time_start'), # 'system:time_start': utc0_time, }) interp_coll = ee.ImageCollection(target_coll.map(_linear)) # elif interp_method.lower() == 'nearest': # interp_coll = ee.ImageCollection(target_coll.map(_nearest)) else: raise ValueError('invalid interpolation method: {}'.format(interp_method)) return interp_coll
[ "def", "daily", "(", "target_coll", ",", "source_coll", ",", "interp_days", "=", "32", ",", "interp_method", "=", "'linear'", ")", ":", "# # DEADBEEF - This module is assuming that the time band is already in", "# # the source collection.", "# # Uncomment the following to add a time band here instead.", "# def add_utc0_time_band(image):", "# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start')))", "# return image.addBands([", "# image.select([0]).double().multiply(0).add(date_0utc.millis())\\", "# .rename(['time'])])", "# source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band))", "if", "interp_method", ".", "lower", "(", ")", "==", "'linear'", ":", "def", "_linear", "(", "image", ")", ":", "\"\"\"Linearly interpolate source images to target image time_start(s)\n\n Parameters\n ----------\n image : ee.Image.\n The first band in the image will be used as the \"target\" image\n and will be returned with the output image.\n\n Returns\n -------\n ee.Image of interpolated values with band name 'src'\n\n Notes\n -----\n The source collection images must have a time band.\n This function is intended to be mapped over an image collection and\n can only take one input parameter.\n\n \"\"\"", "target_image", "=", "ee", ".", "Image", "(", "image", ")", ".", "select", "(", "0", ")", ".", "double", "(", ")", "target_date", "=", "ee", ".", "Date", "(", "image", ".", "get", "(", "'system:time_start'", ")", ")", "# All filtering will be done based on 0 UTC dates", "utc0_date", "=", "utils", ".", "date_0utc", "(", "target_date", ")", "# utc0_time = target_date.update(hour=0, minute=0, second=0)\\", "# .millis().divide(1000).floor().multiply(1000)", "time_image", "=", "ee", ".", "Image", ".", "constant", "(", "utc0_date", ".", "millis", "(", ")", ")", ".", "double", "(", ")", "# Build nodata images/masks that can be placed at the front/back of", "# of the qm image collections in case the collections are empty.", "bands", "=", "source_coll", ".", "first", "(", ")", ".", "bandNames", "(", ")", "prev_qm_mask", "=", "ee", ".", "Image", ".", "constant", "(", "ee", ".", "List", ".", "repeat", "(", "1", ",", "bands", ".", "length", "(", ")", ")", ")", ".", "double", "(", ")", ".", "rename", "(", "bands", ")", ".", "updateMask", "(", "0", ")", ".", "set", "(", "{", "'system:time_start'", ":", "utc0_date", ".", "advance", "(", "-", "interp_days", "-", "1", ",", "'day'", ")", ".", "millis", "(", ")", "}", ")", "next_qm_mask", "=", "ee", ".", "Image", ".", "constant", "(", "ee", ".", "List", ".", "repeat", "(", "1", ",", "bands", ".", "length", "(", ")", ")", ")", ".", "double", "(", ")", ".", "rename", "(", "bands", ")", ".", "updateMask", "(", "0", ")", ".", "set", "(", "{", "'system:time_start'", ":", "utc0_date", ".", "advance", "(", "interp_days", "+", "2", ",", "'day'", ")", ".", "millis", "(", ")", "}", ")", "# Build separate collections for before and after the target date", "prev_qm_coll", "=", "source_coll", ".", "filterDate", "(", "utc0_date", ".", "advance", "(", "-", "interp_days", ",", "'day'", ")", ",", "utc0_date", ")", ".", "merge", "(", "ee", ".", "ImageCollection", "(", "prev_qm_mask", ")", ")", "next_qm_coll", "=", "source_coll", ".", "filterDate", "(", "utc0_date", ",", "utc0_date", ".", "advance", "(", "interp_days", "+", "1", ",", "'day'", ")", ")", ".", "merge", "(", "ee", ".", "ImageCollection", "(", "next_qm_mask", ")", ")", "# Flatten the previous/next collections to single images", "# The closest image in time should be on \"top\"", "# CGM - Is the previous collection already sorted?", "# prev_qm_image = prev_qm_coll.mosaic()", "prev_qm_image", "=", "prev_qm_coll", ".", "sort", "(", "'system:time_start'", ",", "True", ")", ".", "mosaic", "(", ")", "next_qm_image", "=", "next_qm_coll", ".", "sort", "(", "'system:time_start'", ",", "False", ")", ".", "mosaic", "(", ")", "# DEADBEEF - It might be easier to interpolate all bands instead of", "# separating the value and time bands", "# prev_value_image = ee.Image(prev_qm_image).double()", "# next_value_image = ee.Image(next_qm_image).double()", "# Interpolate all bands except the \"time\" band", "prev_bands", "=", "prev_qm_image", ".", "bandNames", "(", ")", ".", "filter", "(", "ee", ".", "Filter", ".", "notEquals", "(", "'item'", ",", "'time'", ")", ")", "next_bands", "=", "next_qm_image", ".", "bandNames", "(", ")", ".", "filter", "(", "ee", ".", "Filter", ".", "notEquals", "(", "'item'", ",", "'time'", ")", ")", "prev_value_image", "=", "ee", ".", "Image", "(", "prev_qm_image", ".", "select", "(", "prev_bands", ")", ")", ".", "double", "(", ")", "next_value_image", "=", "ee", ".", "Image", "(", "next_qm_image", ".", "select", "(", "next_bands", ")", ")", ".", "double", "(", ")", "prev_time_image", "=", "ee", ".", "Image", "(", "prev_qm_image", ".", "select", "(", "'time'", ")", ")", ".", "double", "(", ")", "next_time_image", "=", "ee", ".", "Image", "(", "next_qm_image", ".", "select", "(", "'time'", ")", ")", ".", "double", "(", ")", "# Fill masked values with values from the opposite image", "# Something like this is needed to ensure there are always two", "# values to interpolate between", "# For data gaps, this will cause a flat line instead of a ramp", "prev_time_mosaic", "=", "ee", ".", "Image", "(", "ee", ".", "ImageCollection", ".", "fromImages", "(", "[", "next_time_image", ",", "prev_time_image", "]", ")", ".", "mosaic", "(", ")", ")", "next_time_mosaic", "=", "ee", ".", "Image", "(", "ee", ".", "ImageCollection", ".", "fromImages", "(", "[", "prev_time_image", ",", "next_time_image", "]", ")", ".", "mosaic", "(", ")", ")", "prev_value_mosaic", "=", "ee", ".", "Image", "(", "ee", ".", "ImageCollection", ".", "fromImages", "(", "[", "next_value_image", ",", "prev_value_image", "]", ")", ".", "mosaic", "(", ")", ")", "next_value_mosaic", "=", "ee", ".", "Image", "(", "ee", ".", "ImageCollection", ".", "fromImages", "(", "[", "prev_value_image", ",", "next_value_image", "]", ")", ".", "mosaic", "(", ")", ")", "# Calculate time ratio of the current image between other cloud free images", "time_ratio_image", "=", "time_image", ".", "subtract", "(", "prev_time_mosaic", ")", ".", "divide", "(", "next_time_mosaic", ".", "subtract", "(", "prev_time_mosaic", ")", ")", "# Interpolate values to the current image time", "interp_value_image", "=", "next_value_mosaic", ".", "subtract", "(", "prev_value_mosaic", ")", ".", "multiply", "(", "time_ratio_image", ")", ".", "add", "(", "prev_value_mosaic", ")", "# CGM", "# Should/can the target image be mapped to the interpolated image?", "# Is there a clean way of computing ET here?", "return", "interp_value_image", ".", "addBands", "(", "target_image", ")", ".", "set", "(", "{", "'system:index'", ":", "image", ".", "get", "(", "'system:index'", ")", ",", "'system:time_start'", ":", "image", ".", "get", "(", "'system:time_start'", ")", ",", "# 'system:time_start': utc0_time,", "}", ")", "interp_coll", "=", "ee", ".", "ImageCollection", "(", "target_coll", ".", "map", "(", "_linear", ")", ")", "# elif interp_method.lower() == 'nearest':", "# interp_coll = ee.ImageCollection(target_coll.map(_nearest))", "else", ":", "raise", "ValueError", "(", "'invalid interpolation method: {}'", ".", "format", "(", "interp_method", ")", ")", "return", "interp_coll" ]
46.480263
0.001524
def subscribe(self, connection, destination): """ Subscribes a connection to the specified topic destination. @param connection: The client connection to subscribe. @type connection: L{coilmq.server.StompConnection} @param destination: The topic destination (e.g. '/topic/foo') @type destination: C{str} """ self.log.debug("Subscribing %s to %s" % (connection, destination)) self._topics[destination].add(connection)
[ "def", "subscribe", "(", "self", ",", "connection", ",", "destination", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Subscribing %s to %s\"", "%", "(", "connection", ",", "destination", ")", ")", "self", ".", "_topics", "[", "destination", "]", ".", "add", "(", "connection", ")" ]
40.083333
0.00813
def _headers(self, headers_dict): """ Convert dictionary of headers into twisted.web.client.Headers object. """ return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))
[ "def", "_headers", "(", "self", ",", "headers_dict", ")", ":", "return", "Headers", "(", "dict", "(", "(", "k", ",", "[", "v", "]", ")", "for", "(", "k", ",", "v", ")", "in", "headers_dict", ".", "items", "(", ")", ")", ")" ]
40.6
0.019324
def get_login(self, use_session=True): """ Get an active login session @param use_session: Use a saved session file if available @type use_session: bool """ # Should we try and return an existing login session? if use_session and self._login.check(): self.cookiejar = self._login.cookiejar return self.cookiejar # Prompt the user for their login credentials username = click.prompt('IPS Username') password = click.prompt('IPS Password', hide_input=True) remember = click.confirm('Save login session?', True) # Process the login cookiejar = self._login.process(username, password, remember) if remember: self.cookiejar = cookiejar return cookiejar
[ "def", "get_login", "(", "self", ",", "use_session", "=", "True", ")", ":", "# Should we try and return an existing login session?", "if", "use_session", "and", "self", ".", "_login", ".", "check", "(", ")", ":", "self", ".", "cookiejar", "=", "self", ".", "_login", ".", "cookiejar", "return", "self", ".", "cookiejar", "# Prompt the user for their login credentials", "username", "=", "click", ".", "prompt", "(", "'IPS Username'", ")", "password", "=", "click", ".", "prompt", "(", "'IPS Password'", ",", "hide_input", "=", "True", ")", "remember", "=", "click", ".", "confirm", "(", "'Save login session?'", ",", "True", ")", "# Process the login", "cookiejar", "=", "self", ".", "_login", ".", "process", "(", "username", ",", "password", ",", "remember", ")", "if", "remember", ":", "self", ".", "cookiejar", "=", "cookiejar", "return", "cookiejar" ]
36.090909
0.002454
def astral(msg): """Does `msg` have characters outside the Basic Multilingual Plane?""" # Python2 narrow builds present astral characters as surrogate pairs. # By encoding as utf32, and decoding DWORDS, we can get at the real code # points. utf32 = msg.encode("utf32")[4:] # [4:] to drop the bom code_points = struct.unpack("%dI" % (len(utf32) / 4), utf32) return any(cp > 0xFFFF for cp in code_points)
[ "def", "astral", "(", "msg", ")", ":", "# Python2 narrow builds present astral characters as surrogate pairs.", "# By encoding as utf32, and decoding DWORDS, we can get at the real code", "# points.", "utf32", "=", "msg", ".", "encode", "(", "\"utf32\"", ")", "[", "4", ":", "]", "# [4:] to drop the bom", "code_points", "=", "struct", ".", "unpack", "(", "\"%dI\"", "%", "(", "len", "(", "utf32", ")", "/", "4", ")", ",", "utf32", ")", "return", "any", "(", "cp", ">", "0xFFFF", "for", "cp", "in", "code_points", ")" ]
53.875
0.002283
def checkPortIsOpen(remoteServerHost=ServerHost, port = Port): ''' Checks if the specified port is open :param remoteServerHost: the host address :param port: port which needs to be checked :return: ``True`` if port is open, ``False`` otherwise ''' remoteServerIP = socket.gethostbyname(remoteServerHost) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((remoteServerIP, int(port))) if result == 0: return True else : return False sock.close() #FIXME: the above line is unreachable except KeyboardInterrupt: print("You pressed Ctrl+C") sys.exit() except socket.gaierror: print('Hostname could not be resolved. Exiting') sys.exit() except socket.error: print("Couldn't connect to server") sys.exit()
[ "def", "checkPortIsOpen", "(", "remoteServerHost", "=", "ServerHost", ",", "port", "=", "Port", ")", ":", "remoteServerIP", "=", "socket", ".", "gethostbyname", "(", "remoteServerHost", ")", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "result", "=", "sock", ".", "connect_ex", "(", "(", "remoteServerIP", ",", "int", "(", "port", ")", ")", ")", "if", "result", "==", "0", ":", "return", "True", "else", ":", "return", "False", "sock", ".", "close", "(", ")", "#FIXME: the above line is unreachable", "except", "KeyboardInterrupt", ":", "print", "(", "\"You pressed Ctrl+C\"", ")", "sys", ".", "exit", "(", ")", "except", "socket", ".", "gaierror", ":", "print", "(", "'Hostname could not be resolved. Exiting'", ")", "sys", ".", "exit", "(", ")", "except", "socket", ".", "error", ":", "print", "(", "\"Couldn't connect to server\"", ")", "sys", ".", "exit", "(", ")" ]
30.206897
0.006637
def isStochastic(matrix): """Check that ``matrix`` is row stochastic. Returns ======= is_stochastic : bool ``True`` if ``matrix`` is row stochastic, ``False`` otherwise. """ try: absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0]))) except AttributeError: matrix = _np.array(matrix) absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0]))) return (absdiff.max() <= 10*_np.spacing(_np.float64(1)))
[ "def", "isStochastic", "(", "matrix", ")", ":", "try", ":", "absdiff", "=", "(", "_np", ".", "abs", "(", "matrix", ".", "sum", "(", "axis", "=", "1", ")", "-", "_np", ".", "ones", "(", "matrix", ".", "shape", "[", "0", "]", ")", ")", ")", "except", "AttributeError", ":", "matrix", "=", "_np", ".", "array", "(", "matrix", ")", "absdiff", "=", "(", "_np", ".", "abs", "(", "matrix", ".", "sum", "(", "axis", "=", "1", ")", "-", "_np", ".", "ones", "(", "matrix", ".", "shape", "[", "0", "]", ")", ")", ")", "return", "(", "absdiff", ".", "max", "(", ")", "<=", "10", "*", "_np", ".", "spacing", "(", "_np", ".", "float64", "(", "1", ")", ")", ")" ]
31.533333
0.002053
def is_code(self): """Is this cell a code cell?""" if self.cell_type == 'code': return True if self.cell_type == 'raw' and 'active' in self.metadata: return True return False
[ "def", "is_code", "(", "self", ")", ":", "if", "self", ".", "cell_type", "==", "'code'", ":", "return", "True", "if", "self", ".", "cell_type", "==", "'raw'", "and", "'active'", "in", "self", ".", "metadata", ":", "return", "True", "return", "False" ]
32
0.008696
def smooth_ot_dual(a, b, M, reg, reg_type='l2', method="L-BFGS-B", stopThr=1e-9, numItermax=500, verbose=False, log=False): r""" Solve the regularized OT problem in the dual and return the OT matrix The function solves the smooth relaxed dual formulation (7) in [17]_ : .. math:: \max_{\alpha,\beta}\quad a^T\alpha+b^T\beta-\sum_j\delta_\Omega(\alpha+\beta_j-\mathbf{m}_j) where : - :math:`\mathbf{m}_j` is the jth column of the cost matrix - :math:`\delta_\Omega` is the convex conjugate of the regularization term :math:`\Omega` - a and b are source and target weights (sum to 1) The OT matrix can is reconstructed from the gradient of :math:`\delta_\Omega` (See [17]_ Proposition 1). The optimization algorithm is using gradient decent (L-BFGS by default). Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) or np.ndarray (nt,nbb) samples in the target domain, compute sinkhorn with multiple targets and fixed M if b is a matrix (return OT loss + dual variables in log) M : np.ndarray (ns,nt) loss matrix reg : float Regularization term >0 reg_type : str Regularization type, can be the following (default ='l2'): - 'kl' : Kullback Leibler (~ Neg-entropy used in sinkhorn [2]_) - 'l2' : Squared Euclidean regularization method : str Solver to use for scipy.optimize.minimize numItermax : int, optional Max number of iterations stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013 .. [17] Blondel, M., Seguy, V., & Rolet, A. (2018). Smooth and Sparse Optimal Transport. Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics (AISTATS). See Also -------- ot.lp.emd : Unregularized OT ot.sinhorn : Entropic regularized OT ot.optim.cg : General regularized OT """ if reg_type.lower() in ['l2', 'squaredl2']: regul = SquaredL2(gamma=reg) elif reg_type.lower() in ['entropic', 'negentropy', 'kl']: regul = NegEntropy(gamma=reg) else: raise NotImplementedError('Unknown regularization') # solve dual alpha, beta, res = solve_dual(a, b, M, regul, max_iter=numItermax, tol=stopThr, verbose=verbose) # reconstruct transport matrix G = get_plan_from_dual(alpha, beta, M, regul) if log: log = {'alpha': alpha, 'beta': beta, 'res': res} return G, log else: return G
[ "def", "smooth_ot_dual", "(", "a", ",", "b", ",", "M", ",", "reg", ",", "reg_type", "=", "'l2'", ",", "method", "=", "\"L-BFGS-B\"", ",", "stopThr", "=", "1e-9", ",", "numItermax", "=", "500", ",", "verbose", "=", "False", ",", "log", "=", "False", ")", ":", "if", "reg_type", ".", "lower", "(", ")", "in", "[", "'l2'", ",", "'squaredl2'", "]", ":", "regul", "=", "SquaredL2", "(", "gamma", "=", "reg", ")", "elif", "reg_type", ".", "lower", "(", ")", "in", "[", "'entropic'", ",", "'negentropy'", ",", "'kl'", "]", ":", "regul", "=", "NegEntropy", "(", "gamma", "=", "reg", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unknown regularization'", ")", "# solve dual", "alpha", ",", "beta", ",", "res", "=", "solve_dual", "(", "a", ",", "b", ",", "M", ",", "regul", ",", "max_iter", "=", "numItermax", ",", "tol", "=", "stopThr", ",", "verbose", "=", "verbose", ")", "# reconstruct transport matrix", "G", "=", "get_plan_from_dual", "(", "alpha", ",", "beta", ",", "M", ",", "regul", ")", "if", "log", ":", "log", "=", "{", "'alpha'", ":", "alpha", ",", "'beta'", ":", "beta", ",", "'res'", ":", "res", "}", "return", "G", ",", "log", "else", ":", "return", "G" ]
33.644444
0.002246
def verify(self): """ ## FOR DEBUGGING ONLY ## Checks the table to ensure that the invariants are held. """ if self.all_intervals: ## top_node.all_children() == self.all_intervals try: assert self.top_node.all_children() == self.all_intervals except AssertionError as e: print( 'Error: the tree and the membership set are out of sync!' ) tivs = set(self.top_node.all_children()) print('top_node.all_children() - all_intervals:') try: pprint except NameError: from pprint import pprint pprint(tivs - self.all_intervals) print('all_intervals - top_node.all_children():') pprint(self.all_intervals - tivs) raise e ## All members are Intervals for iv in self: assert isinstance(iv, Interval), ( "Error: Only Interval objects allowed in IntervalTree:" " {0}".format(iv) ) ## No null intervals for iv in self: assert not iv.is_null(), ( "Error: Null Interval objects not allowed in IntervalTree:" " {0}".format(iv) ) ## Reconstruct boundary_table bound_check = {} for iv in self: if iv.begin in bound_check: bound_check[iv.begin] += 1 else: bound_check[iv.begin] = 1 if iv.end in bound_check: bound_check[iv.end] += 1 else: bound_check[iv.end] = 1 ## Reconstructed boundary table (bound_check) ==? boundary_table assert set(self.boundary_table.keys()) == set(bound_check.keys()),\ 'Error: boundary_table is out of sync with ' \ 'the intervals in the tree!' # For efficiency reasons this should be iteritems in Py2, but we # don't care much for efficiency in debug methods anyway. for key, val in self.boundary_table.items(): assert bound_check[key] == val, \ 'Error: boundary_table[{0}] should be {1},' \ ' but is {2}!'.format( key, bound_check[key], val) ## Internal tree structure self.top_node.verify(set()) else: ## Verify empty tree assert not self.boundary_table, \ "Error: boundary table should be empty!" assert self.top_node is None, \ "Error: top_node isn't None!"
[ "def", "verify", "(", "self", ")", ":", "if", "self", ".", "all_intervals", ":", "## top_node.all_children() == self.all_intervals", "try", ":", "assert", "self", ".", "top_node", ".", "all_children", "(", ")", "==", "self", ".", "all_intervals", "except", "AssertionError", "as", "e", ":", "print", "(", "'Error: the tree and the membership set are out of sync!'", ")", "tivs", "=", "set", "(", "self", ".", "top_node", ".", "all_children", "(", ")", ")", "print", "(", "'top_node.all_children() - all_intervals:'", ")", "try", ":", "pprint", "except", "NameError", ":", "from", "pprint", "import", "pprint", "pprint", "(", "tivs", "-", "self", ".", "all_intervals", ")", "print", "(", "'all_intervals - top_node.all_children():'", ")", "pprint", "(", "self", ".", "all_intervals", "-", "tivs", ")", "raise", "e", "## All members are Intervals", "for", "iv", "in", "self", ":", "assert", "isinstance", "(", "iv", ",", "Interval", ")", ",", "(", "\"Error: Only Interval objects allowed in IntervalTree:\"", "\" {0}\"", ".", "format", "(", "iv", ")", ")", "## No null intervals", "for", "iv", "in", "self", ":", "assert", "not", "iv", ".", "is_null", "(", ")", ",", "(", "\"Error: Null Interval objects not allowed in IntervalTree:\"", "\" {0}\"", ".", "format", "(", "iv", ")", ")", "## Reconstruct boundary_table", "bound_check", "=", "{", "}", "for", "iv", "in", "self", ":", "if", "iv", ".", "begin", "in", "bound_check", ":", "bound_check", "[", "iv", ".", "begin", "]", "+=", "1", "else", ":", "bound_check", "[", "iv", ".", "begin", "]", "=", "1", "if", "iv", ".", "end", "in", "bound_check", ":", "bound_check", "[", "iv", ".", "end", "]", "+=", "1", "else", ":", "bound_check", "[", "iv", ".", "end", "]", "=", "1", "## Reconstructed boundary table (bound_check) ==? boundary_table", "assert", "set", "(", "self", ".", "boundary_table", ".", "keys", "(", ")", ")", "==", "set", "(", "bound_check", ".", "keys", "(", ")", ")", ",", "'Error: boundary_table is out of sync with '", "'the intervals in the tree!'", "# For efficiency reasons this should be iteritems in Py2, but we", "# don't care much for efficiency in debug methods anyway.", "for", "key", ",", "val", "in", "self", ".", "boundary_table", ".", "items", "(", ")", ":", "assert", "bound_check", "[", "key", "]", "==", "val", ",", "'Error: boundary_table[{0}] should be {1},'", "' but is {2}!'", ".", "format", "(", "key", ",", "bound_check", "[", "key", "]", ",", "val", ")", "## Internal tree structure", "self", ".", "top_node", ".", "verify", "(", "set", "(", ")", ")", "else", ":", "## Verify empty tree", "assert", "not", "self", ".", "boundary_table", ",", "\"Error: boundary table should be empty!\"", "assert", "self", ".", "top_node", "is", "None", ",", "\"Error: top_node isn't None!\"" ]
38.873239
0.00318
def add_locations(self, locations): """Add extra locations to AstralGeocoder. Extra locations can be * A single string containing one or more locations separated by a newline. * A list of strings * A list of lists/tuples that are passed to a :class:`Location` constructor """ if isinstance(locations, (str, ustr)): self._add_from_str(locations) elif isinstance(locations, (list, tuple)): self._add_from_list(locations)
[ "def", "add_locations", "(", "self", ",", "locations", ")", ":", "if", "isinstance", "(", "locations", ",", "(", "str", ",", "ustr", ")", ")", ":", "self", ".", "_add_from_str", "(", "locations", ")", "elif", "isinstance", "(", "locations", ",", "(", "list", ",", "tuple", ")", ")", ":", "self", ".", "_add_from_list", "(", "locations", ")" ]
35.428571
0.007859
def main( upload='usbasp', core='arduino', replace_existing=True, ): """install custom boards.""" def install(mcu, f_cpu, kbyte): board = AutoBunch() board.name = TEMPL_NAME.format(mcu=mcu, f_cpu=format_freq(f_cpu), upload=upload) board_id = TEMPL_ID.format(mcu=mcu, f_cpu=(f_cpu), upload=upload) board.upload.using = upload board.upload.maximum_size = kbyte * 1024 board.build.mcu = mcu board.build.f_cpu = str(f_cpu) + 'L' board.build.core = core # for 1.0 board.build.variant = 'standard' install_board(board_id, board, replace_existing=replace_existing) install('atmega8', 1000000, 8) install('atmega8', 8000000, 8) install('atmega8', 12000000, 8) install('atmega88', 1000000, 8) install('atmega88', 8000000, 8) install('atmega88', 12000000, 8) install('atmega88', 20000000, 8) install('atmega328p', 20000000, 32) install('atmega328p', 16000000, 32) install('atmega328p', 8000000, 32) install('atmega328p', 1000000, 32)
[ "def", "main", "(", "upload", "=", "'usbasp'", ",", "core", "=", "'arduino'", ",", "replace_existing", "=", "True", ",", ")", ":", "def", "install", "(", "mcu", ",", "f_cpu", ",", "kbyte", ")", ":", "board", "=", "AutoBunch", "(", ")", "board", ".", "name", "=", "TEMPL_NAME", ".", "format", "(", "mcu", "=", "mcu", ",", "f_cpu", "=", "format_freq", "(", "f_cpu", ")", ",", "upload", "=", "upload", ")", "board_id", "=", "TEMPL_ID", ".", "format", "(", "mcu", "=", "mcu", ",", "f_cpu", "=", "(", "f_cpu", ")", ",", "upload", "=", "upload", ")", "board", ".", "upload", ".", "using", "=", "upload", "board", ".", "upload", ".", "maximum_size", "=", "kbyte", "*", "1024", "board", ".", "build", ".", "mcu", "=", "mcu", "board", ".", "build", ".", "f_cpu", "=", "str", "(", "f_cpu", ")", "+", "'L'", "board", ".", "build", ".", "core", "=", "core", "# for 1.0", "board", ".", "build", ".", "variant", "=", "'standard'", "install_board", "(", "board_id", ",", "board", ",", "replace_existing", "=", "replace_existing", ")", "install", "(", "'atmega8'", ",", "1000000", ",", "8", ")", "install", "(", "'atmega8'", ",", "8000000", ",", "8", ")", "install", "(", "'atmega8'", ",", "12000000", ",", "8", ")", "install", "(", "'atmega88'", ",", "1000000", ",", "8", ")", "install", "(", "'atmega88'", ",", "8000000", ",", "8", ")", "install", "(", "'atmega88'", ",", "12000000", ",", "8", ")", "install", "(", "'atmega88'", ",", "20000000", ",", "8", ")", "install", "(", "'atmega328p'", ",", "20000000", ",", "32", ")", "install", "(", "'atmega328p'", ",", "16000000", ",", "32", ")", "install", "(", "'atmega328p'", ",", "8000000", ",", "32", ")", "install", "(", "'atmega328p'", ",", "1000000", ",", "32", ")" ]
30.538462
0.000814
def schedule_checksum_verification(frequency=None, batch_interval=None, max_count=None, max_size=None, files_query=None, checksum_kwargs=None): """Schedule a batch of files for checksum verification. The purpose of this task is to be periodically called through `celerybeat`, in order achieve a repeated verification cycle of all file checksums, while following a set of constraints in order to throttle the execution rate of the checks. :param dict frequency: Time period over which a full check of all files should be performed. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. Defaults to a month (30 days). :param dict batch_interval: How often a batch is sent. If not supplied, this information will be extracted, if possible, from the celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. :param int max_count: Max count of files of a single batch. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param int max_size: Max size of a single batch in bytes. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param str files_query: Import path for a function returning a FileInstance query for files that should be checked. :param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``. """ assert max_count is not None or max_size is not None frequency = timedelta(**frequency) if frequency else timedelta(days=30) if batch_interval: batch_interval = timedelta(**batch_interval) else: celery_schedule = current_celery.conf.get('CELERYBEAT_SCHEDULE', {}) batch_interval = batch_interval or next( (v['schedule'] for v in celery_schedule.values() if v.get('task') == schedule_checksum_verification.name), None) if not batch_interval or not isinstance(batch_interval, timedelta): raise Exception(u'No "batch_interval" could be decided') total_batches = int( frequency.total_seconds() / batch_interval.total_seconds()) files = obj_or_import_string( files_query, default=default_checksum_verification_files_query)() files = files.order_by( sa.func.coalesce(FileInstance.last_check_at, date.min)) if max_count is not None: all_files_count = files.count() min_count = int(math.ceil(all_files_count / total_batches)) max_count = min_count if max_count == 0 else max_count if max_count < min_count: current_app.logger.warning( u'The "max_count" you specified ({0}) is smaller than the ' 'minimum batch file count required ({1}) in order to achieve ' 'the file checks over the specified period ({2}).' .format(max_count, min_count, frequency)) files = files.limit(max_count) if max_size is not None: all_files_size = db.session.query( sa.func.sum(FileInstance.size)).scalar() min_size = int(math.ceil(all_files_size / total_batches)) max_size = min_size if max_size == 0 else max_size if max_size < min_size: current_app.logger.warning( u'The "max_size" you specified ({0}) is smaller than the ' 'minimum batch total file size required ({1}) in order to ' 'achieve the file checks over the specified period ({2}).' .format(max_size, min_size, frequency)) files = files.yield_per(1000) scheduled_file_ids = [] total_size = 0 for f in files: # Add at least the first file, since it might be larger than "max_size" scheduled_file_ids.append(str(f.id)) total_size += f.size if max_size and max_size <= total_size: break group( verify_checksum.s( file_id, pessimistic=True, throws=False, checksum_kwargs=(checksum_kwargs or {})) for file_id in scheduled_file_ids ).apply_async()
[ "def", "schedule_checksum_verification", "(", "frequency", "=", "None", ",", "batch_interval", "=", "None", ",", "max_count", "=", "None", ",", "max_size", "=", "None", ",", "files_query", "=", "None", ",", "checksum_kwargs", "=", "None", ")", ":", "assert", "max_count", "is", "not", "None", "or", "max_size", "is", "not", "None", "frequency", "=", "timedelta", "(", "*", "*", "frequency", ")", "if", "frequency", "else", "timedelta", "(", "days", "=", "30", ")", "if", "batch_interval", ":", "batch_interval", "=", "timedelta", "(", "*", "*", "batch_interval", ")", "else", ":", "celery_schedule", "=", "current_celery", ".", "conf", ".", "get", "(", "'CELERYBEAT_SCHEDULE'", ",", "{", "}", ")", "batch_interval", "=", "batch_interval", "or", "next", "(", "(", "v", "[", "'schedule'", "]", "for", "v", "in", "celery_schedule", ".", "values", "(", ")", "if", "v", ".", "get", "(", "'task'", ")", "==", "schedule_checksum_verification", ".", "name", ")", ",", "None", ")", "if", "not", "batch_interval", "or", "not", "isinstance", "(", "batch_interval", ",", "timedelta", ")", ":", "raise", "Exception", "(", "u'No \"batch_interval\" could be decided'", ")", "total_batches", "=", "int", "(", "frequency", ".", "total_seconds", "(", ")", "/", "batch_interval", ".", "total_seconds", "(", ")", ")", "files", "=", "obj_or_import_string", "(", "files_query", ",", "default", "=", "default_checksum_verification_files_query", ")", "(", ")", "files", "=", "files", ".", "order_by", "(", "sa", ".", "func", ".", "coalesce", "(", "FileInstance", ".", "last_check_at", ",", "date", ".", "min", ")", ")", "if", "max_count", "is", "not", "None", ":", "all_files_count", "=", "files", ".", "count", "(", ")", "min_count", "=", "int", "(", "math", ".", "ceil", "(", "all_files_count", "/", "total_batches", ")", ")", "max_count", "=", "min_count", "if", "max_count", "==", "0", "else", "max_count", "if", "max_count", "<", "min_count", ":", "current_app", ".", "logger", ".", "warning", "(", "u'The \"max_count\" you specified ({0}) is smaller than the '", "'minimum batch file count required ({1}) in order to achieve '", "'the file checks over the specified period ({2}).'", ".", "format", "(", "max_count", ",", "min_count", ",", "frequency", ")", ")", "files", "=", "files", ".", "limit", "(", "max_count", ")", "if", "max_size", "is", "not", "None", ":", "all_files_size", "=", "db", ".", "session", ".", "query", "(", "sa", ".", "func", ".", "sum", "(", "FileInstance", ".", "size", ")", ")", ".", "scalar", "(", ")", "min_size", "=", "int", "(", "math", ".", "ceil", "(", "all_files_size", "/", "total_batches", ")", ")", "max_size", "=", "min_size", "if", "max_size", "==", "0", "else", "max_size", "if", "max_size", "<", "min_size", ":", "current_app", ".", "logger", ".", "warning", "(", "u'The \"max_size\" you specified ({0}) is smaller than the '", "'minimum batch total file size required ({1}) in order to '", "'achieve the file checks over the specified period ({2}).'", ".", "format", "(", "max_size", ",", "min_size", ",", "frequency", ")", ")", "files", "=", "files", ".", "yield_per", "(", "1000", ")", "scheduled_file_ids", "=", "[", "]", "total_size", "=", "0", "for", "f", "in", "files", ":", "# Add at least the first file, since it might be larger than \"max_size\"", "scheduled_file_ids", ".", "append", "(", "str", "(", "f", ".", "id", ")", ")", "total_size", "+=", "f", ".", "size", "if", "max_size", "and", "max_size", "<=", "total_size", ":", "break", "group", "(", "verify_checksum", ".", "s", "(", "file_id", ",", "pessimistic", "=", "True", ",", "throws", "=", "False", ",", "checksum_kwargs", "=", "(", "checksum_kwargs", "or", "{", "}", ")", ")", "for", "file_id", "in", "scheduled_file_ids", ")", ".", "apply_async", "(", ")" ]
48.123596
0.000229
async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
[ "async", "def", "main", "(", "loop", ")", ":", "pyvlx", "=", "PyVLX", "(", "'pyvlx.yaml'", ",", "loop", "=", "loop", ")", "# Alternative:", "# pyvlx = PyVLX(host=\"192.168.2.127\", password=\"velux123\", loop=loop)", "# Runing scenes:", "await", "pyvlx", ".", "load_scenes", "(", ")", "await", "pyvlx", ".", "scenes", "[", "\"All Windows Closed\"", "]", ".", "run", "(", ")", "# Changing position of windows:", "await", "pyvlx", ".", "load_nodes", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "open", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "close", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "set_position", "(", "Position", "(", "position_percent", "=", "45", ")", ")", "# Changing of on-off switches:", "# await pyvlx.nodes['CoffeeMaker'].set_on()", "# await pyvlx.nodes['CoffeeMaker'].set_off()", "# You can easily rename nodes:", "# await pyvlx.nodes[\"Window 10\"].rename(\"Window 11\")", "await", "pyvlx", ".", "disconnect", "(", ")" ]
31.416667
0.001287
def optimize_rfc(data, targets): """Apply Bayesian Optimization to Random Forest parameters.""" def rfc_crossval(n_estimators, min_samples_split, max_features): """Wrapper of RandomForest cross validation. Notice how we ensure n_estimators and min_samples_split are casted to integer before we pass them along. Moreover, to avoid max_features taking values outside the (0, 1) range, we also ensure it is capped accordingly. """ return rfc_cv( n_estimators=int(n_estimators), min_samples_split=int(min_samples_split), max_features=max(min(max_features, 0.999), 1e-3), data=data, targets=targets, ) optimizer = BayesianOptimization( f=rfc_crossval, pbounds={ "n_estimators": (10, 250), "min_samples_split": (2, 25), "max_features": (0.1, 0.999), }, random_state=1234, verbose=2 ) optimizer.maximize(n_iter=10) print("Final result:", optimizer.max)
[ "def", "optimize_rfc", "(", "data", ",", "targets", ")", ":", "def", "rfc_crossval", "(", "n_estimators", ",", "min_samples_split", ",", "max_features", ")", ":", "\"\"\"Wrapper of RandomForest cross validation.\n\n Notice how we ensure n_estimators and min_samples_split are casted\n to integer before we pass them along. Moreover, to avoid max_features\n taking values outside the (0, 1) range, we also ensure it is capped\n accordingly.\n \"\"\"", "return", "rfc_cv", "(", "n_estimators", "=", "int", "(", "n_estimators", ")", ",", "min_samples_split", "=", "int", "(", "min_samples_split", ")", ",", "max_features", "=", "max", "(", "min", "(", "max_features", ",", "0.999", ")", ",", "1e-3", ")", ",", "data", "=", "data", ",", "targets", "=", "targets", ",", ")", "optimizer", "=", "BayesianOptimization", "(", "f", "=", "rfc_crossval", ",", "pbounds", "=", "{", "\"n_estimators\"", ":", "(", "10", ",", "250", ")", ",", "\"min_samples_split\"", ":", "(", "2", ",", "25", ")", ",", "\"max_features\"", ":", "(", "0.1", ",", "0.999", ")", ",", "}", ",", "random_state", "=", "1234", ",", "verbose", "=", "2", ")", "optimizer", ".", "maximize", "(", "n_iter", "=", "10", ")", "print", "(", "\"Final result:\"", ",", "optimizer", ".", "max", ")" ]
33.612903
0.000933
def _rebuffer(self): """ (internal) refill the repeat buffer """ # collect a stride worth of results(result lists) or exceptions results = [] exceptions = [] for i in xrange(self.stride): try: results.append(self.iterable.next()) exceptions.append(False) except Exception, excp: results.append(excp) exceptions.append(True) # un-roll the result lists res_exc = [] for rep in xrange(self.n): flat_results = [] for i in xrange(self.stride): result_list, exception = results[i], exceptions[i] if not exception: flat_results.append(result_list[rep]) else: flat_results.append(result_list) res_exc.append((flat_results, exceptions)) # make an iterator (like repeat) self._repeat_buffer = iter(res_exc)
[ "def", "_rebuffer", "(", "self", ")", ":", "# collect a stride worth of results(result lists) or exceptions", "results", "=", "[", "]", "exceptions", "=", "[", "]", "for", "i", "in", "xrange", "(", "self", ".", "stride", ")", ":", "try", ":", "results", ".", "append", "(", "self", ".", "iterable", ".", "next", "(", ")", ")", "exceptions", ".", "append", "(", "False", ")", "except", "Exception", ",", "excp", ":", "results", ".", "append", "(", "excp", ")", "exceptions", ".", "append", "(", "True", ")", "# un-roll the result lists", "res_exc", "=", "[", "]", "for", "rep", "in", "xrange", "(", "self", ".", "n", ")", ":", "flat_results", "=", "[", "]", "for", "i", "in", "xrange", "(", "self", ".", "stride", ")", ":", "result_list", ",", "exception", "=", "results", "[", "i", "]", ",", "exceptions", "[", "i", "]", "if", "not", "exception", ":", "flat_results", ".", "append", "(", "result_list", "[", "rep", "]", ")", "else", ":", "flat_results", ".", "append", "(", "result_list", ")", "res_exc", ".", "append", "(", "(", "flat_results", ",", "exceptions", ")", ")", "# make an iterator (like repeat)", "self", ".", "_repeat_buffer", "=", "iter", "(", "res_exc", ")" ]
35.214286
0.002962
def collect_metrics(): """ Register the decorated function to run for the collect_metrics hook. """ def _register(action): handler = Handler.get(action) handler.add_predicate(partial(_restricted_hook, 'collect-metrics')) return action return _register
[ "def", "collect_metrics", "(", ")", ":", "def", "_register", "(", "action", ")", ":", "handler", "=", "Handler", ".", "get", "(", "action", ")", "handler", ".", "add_predicate", "(", "partial", "(", "_restricted_hook", ",", "'collect-metrics'", ")", ")", "return", "action", "return", "_register" ]
31.888889
0.00339
def _update_process_stats(self): """Updates the process stats with the information from the processes This method is called at the end of each static parsing of the nextflow trace file. It re-populates the :attr:`process_stats` dictionary with the new stat metrics. """ good_status = ["COMPLETED", "CACHED"] for process, vals in self.trace_info.items(): # Update submission status of tags for each process vals = self._update_tag_status(process, vals) # Update process resources self._update_process_resources(process, vals) self.process_stats[process] = {} inst = self.process_stats[process] # Get number of completed samples inst["completed"] = "{}".format( len([x for x in vals if x["status"] in good_status])) # Get average time try: time_array = [self._hms(x["realtime"]) for x in vals] mean_time = round(sum(time_array) / len(time_array), 1) mean_time_str = strftime('%H:%M:%S', gmtime(mean_time)) inst["realtime"] = mean_time_str # When the realtime column is not present except KeyError: inst["realtime"] = "-" # Get cumulative cpu/hours try: cpu_hours = [self._cpu_load_parser( x["cpus"], x["%cpu"], x["realtime"]) for x in vals] inst["cpuhour"] = round(sum(cpu_hours), 2) # When the realtime, cpus or %cpus column are not present except KeyError: inst["cpuhour"] = "-" # Assess resource warnings inst["cpu_warnings"], inst["mem_warnings"] = \ self._assess_resource_warnings(process, vals) # Get maximum memory try: rss_values = [self._size_coverter(x["rss"]) for x in vals if x["rss"] != "-"] if rss_values: max_rss = round(max(rss_values)) rss_str = self._size_compress(max_rss) else: rss_str = "-" inst["maxmem"] = rss_str except KeyError: inst["maxmem"] = "-" # Get read size try: rchar_values = [self._size_coverter(x["rchar"]) for x in vals if x["rchar"] != "-"] if rchar_values: avg_rchar = round(sum(rchar_values) / len(rchar_values)) rchar_str = self._size_compress(avg_rchar) else: rchar_str = "-" except KeyError: rchar_str = "-" inst["avgread"] = rchar_str # Get write size try: wchar_values = [self._size_coverter(x["wchar"]) for x in vals if x["wchar"] != "-"] if wchar_values: avg_wchar = round(sum(wchar_values) / len(wchar_values)) wchar_str = self._size_compress(avg_wchar) else: wchar_str = "-" except KeyError: wchar_str = "-" inst["avgwrite"] = wchar_str
[ "def", "_update_process_stats", "(", "self", ")", ":", "good_status", "=", "[", "\"COMPLETED\"", ",", "\"CACHED\"", "]", "for", "process", ",", "vals", "in", "self", ".", "trace_info", ".", "items", "(", ")", ":", "# Update submission status of tags for each process", "vals", "=", "self", ".", "_update_tag_status", "(", "process", ",", "vals", ")", "# Update process resources", "self", ".", "_update_process_resources", "(", "process", ",", "vals", ")", "self", ".", "process_stats", "[", "process", "]", "=", "{", "}", "inst", "=", "self", ".", "process_stats", "[", "process", "]", "# Get number of completed samples", "inst", "[", "\"completed\"", "]", "=", "\"{}\"", ".", "format", "(", "len", "(", "[", "x", "for", "x", "in", "vals", "if", "x", "[", "\"status\"", "]", "in", "good_status", "]", ")", ")", "# Get average time", "try", ":", "time_array", "=", "[", "self", ".", "_hms", "(", "x", "[", "\"realtime\"", "]", ")", "for", "x", "in", "vals", "]", "mean_time", "=", "round", "(", "sum", "(", "time_array", ")", "/", "len", "(", "time_array", ")", ",", "1", ")", "mean_time_str", "=", "strftime", "(", "'%H:%M:%S'", ",", "gmtime", "(", "mean_time", ")", ")", "inst", "[", "\"realtime\"", "]", "=", "mean_time_str", "# When the realtime column is not present", "except", "KeyError", ":", "inst", "[", "\"realtime\"", "]", "=", "\"-\"", "# Get cumulative cpu/hours", "try", ":", "cpu_hours", "=", "[", "self", ".", "_cpu_load_parser", "(", "x", "[", "\"cpus\"", "]", ",", "x", "[", "\"%cpu\"", "]", ",", "x", "[", "\"realtime\"", "]", ")", "for", "x", "in", "vals", "]", "inst", "[", "\"cpuhour\"", "]", "=", "round", "(", "sum", "(", "cpu_hours", ")", ",", "2", ")", "# When the realtime, cpus or %cpus column are not present", "except", "KeyError", ":", "inst", "[", "\"cpuhour\"", "]", "=", "\"-\"", "# Assess resource warnings", "inst", "[", "\"cpu_warnings\"", "]", ",", "inst", "[", "\"mem_warnings\"", "]", "=", "self", ".", "_assess_resource_warnings", "(", "process", ",", "vals", ")", "# Get maximum memory", "try", ":", "rss_values", "=", "[", "self", ".", "_size_coverter", "(", "x", "[", "\"rss\"", "]", ")", "for", "x", "in", "vals", "if", "x", "[", "\"rss\"", "]", "!=", "\"-\"", "]", "if", "rss_values", ":", "max_rss", "=", "round", "(", "max", "(", "rss_values", ")", ")", "rss_str", "=", "self", ".", "_size_compress", "(", "max_rss", ")", "else", ":", "rss_str", "=", "\"-\"", "inst", "[", "\"maxmem\"", "]", "=", "rss_str", "except", "KeyError", ":", "inst", "[", "\"maxmem\"", "]", "=", "\"-\"", "# Get read size", "try", ":", "rchar_values", "=", "[", "self", ".", "_size_coverter", "(", "x", "[", "\"rchar\"", "]", ")", "for", "x", "in", "vals", "if", "x", "[", "\"rchar\"", "]", "!=", "\"-\"", "]", "if", "rchar_values", ":", "avg_rchar", "=", "round", "(", "sum", "(", "rchar_values", ")", "/", "len", "(", "rchar_values", ")", ")", "rchar_str", "=", "self", ".", "_size_compress", "(", "avg_rchar", ")", "else", ":", "rchar_str", "=", "\"-\"", "except", "KeyError", ":", "rchar_str", "=", "\"-\"", "inst", "[", "\"avgread\"", "]", "=", "rchar_str", "# Get write size", "try", ":", "wchar_values", "=", "[", "self", ".", "_size_coverter", "(", "x", "[", "\"wchar\"", "]", ")", "for", "x", "in", "vals", "if", "x", "[", "\"wchar\"", "]", "!=", "\"-\"", "]", "if", "wchar_values", ":", "avg_wchar", "=", "round", "(", "sum", "(", "wchar_values", ")", "/", "len", "(", "wchar_values", ")", ")", "wchar_str", "=", "self", ".", "_size_compress", "(", "avg_wchar", ")", "else", ":", "wchar_str", "=", "\"-\"", "except", "KeyError", ":", "wchar_str", "=", "\"-\"", "inst", "[", "\"avgwrite\"", "]", "=", "wchar_str" ]
37.678161
0.000595
def get_all(cls): """ Returns an array with all databases :returns Database list """ api = Client.instance().api data = api.database.get() database_names = data['result'] databases = [] for name in database_names: db = Database(name=name, api=api) databases.append(db) return databases
[ "def", "get_all", "(", "cls", ")", ":", "api", "=", "Client", ".", "instance", "(", ")", ".", "api", "data", "=", "api", ".", "database", ".", "get", "(", ")", "database_names", "=", "data", "[", "'result'", "]", "databases", "=", "[", "]", "for", "name", "in", "database_names", ":", "db", "=", "Database", "(", "name", "=", "name", ",", "api", "=", "api", ")", "databases", ".", "append", "(", "db", ")", "return", "databases" ]
20.263158
0.004963
def get_ports_by_name(device_name): '''Returns all serial devices with a given name''' filtered_devices = filter( lambda device: device_name in device[1], list_ports.comports() ) device_ports = [device[0] for device in filtered_devices] return device_ports
[ "def", "get_ports_by_name", "(", "device_name", ")", ":", "filtered_devices", "=", "filter", "(", "lambda", "device", ":", "device_name", "in", "device", "[", "1", "]", ",", "list_ports", ".", "comports", "(", ")", ")", "device_ports", "=", "[", "device", "[", "0", "]", "for", "device", "in", "filtered_devices", "]", "return", "device_ports" ]
35.625
0.003425
def _create_http_session(self): """Create a http session and initialize the retry object.""" self.session = requests.Session() if self.headers: self.session.headers.update(self.headers) retries = urllib3.util.Retry(total=self.max_retries, connect=self.max_retries_on_connect, read=self.max_retries_on_read, redirect=self.max_retries_on_redirect, status=self.max_retries_on_status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.sleep_time, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, respect_retry_after_header=self.respect_retry_after_header) self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries)) self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
[ "def", "_create_http_session", "(", "self", ")", ":", "self", ".", "session", "=", "requests", ".", "Session", "(", ")", "if", "self", ".", "headers", ":", "self", ".", "session", ".", "headers", ".", "update", "(", "self", ".", "headers", ")", "retries", "=", "urllib3", ".", "util", ".", "Retry", "(", "total", "=", "self", ".", "max_retries", ",", "connect", "=", "self", ".", "max_retries_on_connect", ",", "read", "=", "self", ".", "max_retries_on_read", ",", "redirect", "=", "self", ".", "max_retries_on_redirect", ",", "status", "=", "self", ".", "max_retries_on_status", ",", "method_whitelist", "=", "self", ".", "method_whitelist", ",", "status_forcelist", "=", "self", ".", "status_forcelist", ",", "backoff_factor", "=", "self", ".", "sleep_time", ",", "raise_on_redirect", "=", "self", ".", "raise_on_redirect", ",", "raise_on_status", "=", "self", ".", "raise_on_status", ",", "respect_retry_after_header", "=", "self", ".", "respect_retry_after_header", ")", "self", ".", "session", ".", "mount", "(", "'http://'", ",", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retries", ")", ")", "self", ".", "session", ".", "mount", "(", "'https://'", ",", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retries", ")", ")" ]
55
0.004062
def json_2_routing_area(json_obj): """ transform JSON obj coming from Ariane to ariane_clip3 object :param json_obj: the JSON obj coming from Ariane :return: ariane_clip3 RoutingArea object """ LOGGER.debug("RoutingArea.json_2_routing_area") return RoutingArea(raid=json_obj['routingAreaID'], name=json_obj['routingAreaName'], description=json_obj['routingAreaDescription'], ra_type=json_obj['routingAreaType'], multicast=json_obj['routingAreaMulticast'], routing_area_loc_ids=json_obj['routingAreaLocationsID'], routing_area_subnet_ids=json_obj['routingAreaSubnetsID'])
[ "def", "json_2_routing_area", "(", "json_obj", ")", ":", "LOGGER", ".", "debug", "(", "\"RoutingArea.json_2_routing_area\"", ")", "return", "RoutingArea", "(", "raid", "=", "json_obj", "[", "'routingAreaID'", "]", ",", "name", "=", "json_obj", "[", "'routingAreaName'", "]", ",", "description", "=", "json_obj", "[", "'routingAreaDescription'", "]", ",", "ra_type", "=", "json_obj", "[", "'routingAreaType'", "]", ",", "multicast", "=", "json_obj", "[", "'routingAreaMulticast'", "]", ",", "routing_area_loc_ids", "=", "json_obj", "[", "'routingAreaLocationsID'", "]", ",", "routing_area_subnet_ids", "=", "json_obj", "[", "'routingAreaSubnetsID'", "]", ")" ]
55.357143
0.005076
def create_file_service(self): ''' Creates a FileService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.file.fileservice.FileService` ''' try: from azure.storage.file.fileservice import FileService return FileService(self.account_name, self.account_key, sas_token=self.sas_token, endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-file is required. ' + 'Please install it using "pip install azure-storage-file"')
[ "def", "create_file_service", "(", "self", ")", ":", "try", ":", "from", "azure", ".", "storage", ".", "file", ".", "fileservice", "import", "FileService", "return", "FileService", "(", "self", ".", "account_name", ",", "self", ".", "account_key", ",", "sas_token", "=", "self", ".", "sas_token", ",", "endpoint_suffix", "=", "self", ".", "endpoint_suffix", ")", "except", "ImportError", ":", "raise", "Exception", "(", "'The package azure-storage-file is required. '", "+", "'Please install it using \"pip install azure-storage-file\"'", ")" ]
44.5625
0.005495
def validate_request_timestamp(req_body, max_diff=150): """Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills. """ time_str = req_body.get('request', {}).get('timestamp') if not time_str: log.error('timestamp not present %s', req_body) return False req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") diff = (datetime.utcnow() - req_ts).total_seconds() if abs(diff) > max_diff: log.error('timestamp difference too high: %d sec', diff) return False return True
[ "def", "validate_request_timestamp", "(", "req_body", ",", "max_diff", "=", "150", ")", ":", "time_str", "=", "req_body", ".", "get", "(", "'request'", ",", "{", "}", ")", ".", "get", "(", "'timestamp'", ")", "if", "not", "time_str", ":", "log", ".", "error", "(", "'timestamp not present %s'", ",", "req_body", ")", "return", "False", "req_ts", "=", "datetime", ".", "strptime", "(", "time_str", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "diff", "=", "(", "datetime", ".", "utcnow", "(", ")", "-", "req_ts", ")", ".", "total_seconds", "(", ")", "if", "abs", "(", "diff", ")", ">", "max_diff", ":", "log", ".", "error", "(", "'timestamp difference too high: %d sec'", ",", "diff", ")", "return", "False", "return", "True" ]
33.192308
0.001126
def loadFromURL(self, url): """Return a WSDL instance loaded from the given url.""" document = DOM.loadFromURL(url) wsdl = WSDL() wsdl.location = url wsdl.load(document) return wsdl
[ "def", "loadFromURL", "(", "self", ",", "url", ")", ":", "document", "=", "DOM", ".", "loadFromURL", "(", "url", ")", "wsdl", "=", "WSDL", "(", ")", "wsdl", ".", "location", "=", "url", "wsdl", ".", "load", "(", "document", ")", "return", "wsdl" ]
31.857143
0.008734
def get_hex_chain(self, index): """Assemble and return the chain leading from a given node to the merkle root of this tree with hash values in hex form """ return [(codecs.encode(i[0], 'hex_codec'), i[1]) for i in self.get_chain(index)]
[ "def", "get_hex_chain", "(", "self", ",", "index", ")", ":", "return", "[", "(", "codecs", ".", "encode", "(", "i", "[", "0", "]", ",", "'hex_codec'", ")", ",", "i", "[", "1", "]", ")", "for", "i", "in", "self", ".", "get_chain", "(", "index", ")", "]" ]
52.8
0.014925
def pow(self, *args, **kwargs): """ pow(other, rho=0, inplace=True) Raises by the power of an *other* number instance. The correlation coefficient *rho* can be configured per uncertainty when passed as a dict. When *inplace* is *False*, a new instance is returned. """ return self._apply(operator.pow, *args, **kwargs)
[ "def", "pow", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_apply", "(", "operator", ".", "pow", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
51.428571
0.010929
def remote(href): """ Normalize remote href :param href: remote path :return: normalize href >>> remote("/test/hello.txt") u'/test/hello.txt' >>> remote("test/hello.txt") u'/test/hello.txt' >>> remote("test\hello.txt") u'/test/hello.txt' >>> remote(None) u'/' """ href = _(href) href = os.path.join(u("/"), href) if os.sep == "\\": href = href.replace("\\", "/") return href
[ "def", "remote", "(", "href", ")", ":", "href", "=", "_", "(", "href", ")", "href", "=", "os", ".", "path", ".", "join", "(", "u", "(", "\"/\"", ")", ",", "href", ")", "if", "os", ".", "sep", "==", "\"\\\\\"", ":", "href", "=", "href", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "return", "href" ]
21.6
0.004435
def _largest_integer_by_dtype(dt): """Helper returning the largest integer exactly representable by dtype.""" if not _is_known_dtype(dt): raise TypeError("Unrecognized dtype: {}".format(dt.name)) if dt.is_floating: return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1)) if dt.is_integer: return np.iinfo(dt.as_numpy_dtype).max if dt.base_dtype == tf.bool: return int(1) # We actually can't land here but keep the case for completeness. raise TypeError("Unrecognized dtype: {}".format(dt.name))
[ "def", "_largest_integer_by_dtype", "(", "dt", ")", ":", "if", "not", "_is_known_dtype", "(", "dt", ")", ":", "raise", "TypeError", "(", "\"Unrecognized dtype: {}\"", ".", "format", "(", "dt", ".", "name", ")", ")", "if", "dt", ".", "is_floating", ":", "return", "int", "(", "2", "**", "(", "np", ".", "finfo", "(", "dt", ".", "as_numpy_dtype", ")", ".", "nmant", "+", "1", ")", ")", "if", "dt", ".", "is_integer", ":", "return", "np", ".", "iinfo", "(", "dt", ".", "as_numpy_dtype", ")", ".", "max", "if", "dt", ".", "base_dtype", "==", "tf", ".", "bool", ":", "return", "int", "(", "1", ")", "# We actually can't land here but keep the case for completeness.", "raise", "TypeError", "(", "\"Unrecognized dtype: {}\"", ".", "format", "(", "dt", ".", "name", ")", ")" ]
42.666667
0.015296
def _check_scalar_vertical_extents(self, ds, z_variable): ''' Check the scalar value of Z compared to the vertical extents which should also be equivalent :param netCDF4.Dataset ds: An open netCDF dataset :param str z_variable: Name of the variable representing the Z-Axis ''' vert_min = ds.geospatial_vertical_min vert_max = ds.geospatial_vertical_max msgs = [] total = 2 zvalue = ds.variables[z_variable][:].item() if not np.isclose(vert_min, vert_max): msgs.append("geospatial_vertical_min != geospatial_vertical_max for scalar depth values, %s != %s" % ( vert_min, vert_max )) if not np.isclose(vert_max, zvalue): msgs.append("geospatial_vertical_max != %s values, %s != %s" % ( z_variable, vert_max, zvalue )) return Result(BaseCheck.MEDIUM, (total - len(msgs), total), 'geospatial_vertical_extents_match', msgs)
[ "def", "_check_scalar_vertical_extents", "(", "self", ",", "ds", ",", "z_variable", ")", ":", "vert_min", "=", "ds", ".", "geospatial_vertical_min", "vert_max", "=", "ds", ".", "geospatial_vertical_max", "msgs", "=", "[", "]", "total", "=", "2", "zvalue", "=", "ds", ".", "variables", "[", "z_variable", "]", "[", ":", "]", ".", "item", "(", ")", "if", "not", "np", ".", "isclose", "(", "vert_min", ",", "vert_max", ")", ":", "msgs", ".", "append", "(", "\"geospatial_vertical_min != geospatial_vertical_max for scalar depth values, %s != %s\"", "%", "(", "vert_min", ",", "vert_max", ")", ")", "if", "not", "np", ".", "isclose", "(", "vert_max", ",", "zvalue", ")", ":", "msgs", ".", "append", "(", "\"geospatial_vertical_max != %s values, %s != %s\"", "%", "(", "z_variable", ",", "vert_max", ",", "zvalue", ")", ")", "return", "Result", "(", "BaseCheck", ".", "MEDIUM", ",", "(", "total", "-", "len", "(", "msgs", ")", ",", "total", ")", ",", "'geospatial_vertical_extents_match'", ",", "msgs", ")" ]
35.387097
0.002662
async def _send_rtcp_nack(self, media_ssrc, lost): """ Send an RTCP packet to report missing RTP packets. """ if self.__rtcp_ssrc is not None: packet = RtcpRtpfbPacket( fmt=RTCP_RTPFB_NACK, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc) packet.lost = lost await self._send_rtcp(packet)
[ "async", "def", "_send_rtcp_nack", "(", "self", ",", "media_ssrc", ",", "lost", ")", ":", "if", "self", ".", "__rtcp_ssrc", "is", "not", "None", ":", "packet", "=", "RtcpRtpfbPacket", "(", "fmt", "=", "RTCP_RTPFB_NACK", ",", "ssrc", "=", "self", ".", "__rtcp_ssrc", ",", "media_ssrc", "=", "media_ssrc", ")", "packet", ".", "lost", "=", "lost", "await", "self", ".", "_send_rtcp", "(", "packet", ")" ]
40
0.008152
def get_statements_for_paper(ids, ev_limit=10, best_first=True, tries=2, max_stmts=None): """Get the set of raw Statements extracted from a paper given by the id. Parameters ---------- ids : list[(<id type>, <id value>)] A list of tuples with ids and their type. The type can be any one of 'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the primary key id of the text references in the database. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 10. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. max_stmts : int or None Select a maximum number of statements to be returned. Default is None. Returns ------- stmts : list[:py:class:`indra.statements.Statement`] A list of INDRA Statement instances. """ id_l = [{'id': id_val, 'type': id_type} for id_type, id_val in ids] resp = submit_statement_request('post', 'from_papers', data={'ids': id_l}, ev_limit=ev_limit, best_first=best_first, tries=tries, max_stmts=max_stmts) stmts_json = resp.json()['statements'] return stmts_from_json(stmts_json.values())
[ "def", "get_statements_for_paper", "(", "ids", ",", "ev_limit", "=", "10", ",", "best_first", "=", "True", ",", "tries", "=", "2", ",", "max_stmts", "=", "None", ")", ":", "id_l", "=", "[", "{", "'id'", ":", "id_val", ",", "'type'", ":", "id_type", "}", "for", "id_type", ",", "id_val", "in", "ids", "]", "resp", "=", "submit_statement_request", "(", "'post'", ",", "'from_papers'", ",", "data", "=", "{", "'ids'", ":", "id_l", "}", ",", "ev_limit", "=", "ev_limit", ",", "best_first", "=", "best_first", ",", "tries", "=", "tries", ",", "max_stmts", "=", "max_stmts", ")", "stmts_json", "=", "resp", ".", "json", "(", ")", "[", "'statements'", "]", "return", "stmts_from_json", "(", "stmts_json", ".", "values", "(", ")", ")" ]
50.567568
0.001049
def get_first_child_of_type(self, klass): """! @brief Breadth-first search for a child of the given class. @param self @param klass The class type to search for. The first child at any depth that is an instance of this class or a subclass thereof will be returned. Matching children at more shallow nodes will take precedence over deeper nodes. @returns Either a node object or None. """ matches = self.find_children(lambda c: isinstance(c, klass)) if len(matches): return matches[0] else: return None
[ "def", "get_first_child_of_type", "(", "self", ",", "klass", ")", ":", "matches", "=", "self", ".", "find_children", "(", "lambda", "c", ":", "isinstance", "(", "c", ",", "klass", ")", ")", "if", "len", "(", "matches", ")", ":", "return", "matches", "[", "0", "]", "else", ":", "return", "None" ]
46.230769
0.006525
def idle_task(self): '''called rapidly by mavproxy''' now = time.time() if now-self.last_bored > self.boredom_interval: self.last_bored = now message = self.boredom_message() self.say("%s: %s" % (self.name,message)) # See if whatever we're connected to would like to play: self.master.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_NOTICE, message)
[ "def", "idle_task", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "self", ".", "last_bored", ">", "self", ".", "boredom_interval", ":", "self", ".", "last_bored", "=", "now", "message", "=", "self", ".", "boredom_message", "(", ")", "self", ".", "say", "(", "\"%s: %s\"", "%", "(", "self", ".", "name", ",", "message", ")", ")", "# See if whatever we're connected to would like to play:", "self", ".", "master", ".", "mav", ".", "statustext_send", "(", "mavutil", ".", "mavlink", ".", "MAV_SEVERITY_NOTICE", ",", "message", ")" ]
46.9
0.008368
def _process_request(self, request): """Adds data necessary for Horizon to function to the request.""" request.horizon = {'dashboard': None, 'panel': None, 'async_messages': []} if not hasattr(request, "user") or not request.user.is_authenticated: # proceed no further if the current request is already known # not to be authenticated # it is CRITICAL to perform this check as early as possible # to avoid creating too many sessions return None # Since we know the user is present and authenticated, lets refresh the # session expiry if configured to do so. if getattr(settings, "SESSION_REFRESH", True): timeout = getattr(settings, "SESSION_TIMEOUT", 3600) token_life = request.user.token.expires - datetime.datetime.now( pytz.utc) session_time = min(timeout, int(token_life.total_seconds())) request.session.set_expiry(session_time) if request.is_ajax(): # if the request is Ajax we do not want to proceed, as clients can # 1) create pages with constant polling, which can create race # conditions when a page navigation occurs # 2) might leave a user seemingly left logged in forever # 3) thrashes db backed session engines with tons of changes return None # If we use cookie-based sessions, check that the cookie size does not # reach the max size accepted by common web browsers. if ( settings.SESSION_ENGINE == 'django.contrib.sessions.backends.signed_cookies' ): max_cookie_size = getattr( settings, 'SESSION_COOKIE_MAX_SIZE', None) session_cookie_name = getattr( settings, 'SESSION_COOKIE_NAME', None) session_key = request.COOKIES.get(session_cookie_name) if max_cookie_size is not None and session_key is not None: cookie_size = sum(( len(key) + len(value) for key, value in request.COOKIES.items() )) if cookie_size >= max_cookie_size: LOG.error( 'Total Cookie size for user_id: %(user_id)s is ' '%(cookie_size)sB >= %(max_cookie_size)sB. ' 'You need to configure file-based or database-backed ' 'sessions instead of cookie-based sessions: ' 'https://docs.openstack.org/horizon/latest/' 'admin/sessions.html', { 'user_id': request.session.get( 'user_id', 'Unknown'), 'cookie_size': cookie_size, 'max_cookie_size': max_cookie_size, } ) tz = utils.get_timezone(request) if tz: timezone.activate(tz)
[ "def", "_process_request", "(", "self", ",", "request", ")", ":", "request", ".", "horizon", "=", "{", "'dashboard'", ":", "None", ",", "'panel'", ":", "None", ",", "'async_messages'", ":", "[", "]", "}", "if", "not", "hasattr", "(", "request", ",", "\"user\"", ")", "or", "not", "request", ".", "user", ".", "is_authenticated", ":", "# proceed no further if the current request is already known", "# not to be authenticated", "# it is CRITICAL to perform this check as early as possible", "# to avoid creating too many sessions", "return", "None", "# Since we know the user is present and authenticated, lets refresh the", "# session expiry if configured to do so.", "if", "getattr", "(", "settings", ",", "\"SESSION_REFRESH\"", ",", "True", ")", ":", "timeout", "=", "getattr", "(", "settings", ",", "\"SESSION_TIMEOUT\"", ",", "3600", ")", "token_life", "=", "request", ".", "user", ".", "token", ".", "expires", "-", "datetime", ".", "datetime", ".", "now", "(", "pytz", ".", "utc", ")", "session_time", "=", "min", "(", "timeout", ",", "int", "(", "token_life", ".", "total_seconds", "(", ")", ")", ")", "request", ".", "session", ".", "set_expiry", "(", "session_time", ")", "if", "request", ".", "is_ajax", "(", ")", ":", "# if the request is Ajax we do not want to proceed, as clients can", "# 1) create pages with constant polling, which can create race", "# conditions when a page navigation occurs", "# 2) might leave a user seemingly left logged in forever", "# 3) thrashes db backed session engines with tons of changes", "return", "None", "# If we use cookie-based sessions, check that the cookie size does not", "# reach the max size accepted by common web browsers.", "if", "(", "settings", ".", "SESSION_ENGINE", "==", "'django.contrib.sessions.backends.signed_cookies'", ")", ":", "max_cookie_size", "=", "getattr", "(", "settings", ",", "'SESSION_COOKIE_MAX_SIZE'", ",", "None", ")", "session_cookie_name", "=", "getattr", "(", "settings", ",", "'SESSION_COOKIE_NAME'", ",", "None", ")", "session_key", "=", "request", ".", "COOKIES", ".", "get", "(", "session_cookie_name", ")", "if", "max_cookie_size", "is", "not", "None", "and", "session_key", "is", "not", "None", ":", "cookie_size", "=", "sum", "(", "(", "len", "(", "key", ")", "+", "len", "(", "value", ")", "for", "key", ",", "value", "in", "request", ".", "COOKIES", ".", "items", "(", ")", ")", ")", "if", "cookie_size", ">=", "max_cookie_size", ":", "LOG", ".", "error", "(", "'Total Cookie size for user_id: %(user_id)s is '", "'%(cookie_size)sB >= %(max_cookie_size)sB. '", "'You need to configure file-based or database-backed '", "'sessions instead of cookie-based sessions: '", "'https://docs.openstack.org/horizon/latest/'", "'admin/sessions.html'", ",", "{", "'user_id'", ":", "request", ".", "session", ".", "get", "(", "'user_id'", ",", "'Unknown'", ")", ",", "'cookie_size'", ":", "cookie_size", ",", "'max_cookie_size'", ":", "max_cookie_size", ",", "}", ")", "tz", "=", "utils", ".", "get_timezone", "(", "request", ")", "if", "tz", ":", "timezone", ".", "activate", "(", "tz", ")" ]
47.828125
0.00064
def select(self, table, items=None, limit=None, offset=None, remote_filter=None, func_filters=None): '''This method simulate a select on a table >>> yql.select('geo.countries', limit=5) >>> yql.select('social.profile', ['guid', 'givenName', 'gender']) ''' self._table = table if remote_filter: if not isinstance(remote_filter, tuple): raise TypeError("{0} must be of type <type tuple>".format(remote_filter)) table = "%s(%s)" %(table, ','.join(map(str, remote_filter))) if not items: items = ['*'] self._query = "SELECT {1} FROM {0} ".format(table, ','.join(items)) if func_filters: self._func = self._func_filters(func_filters) self._limit = limit self._offset = offset return self
[ "def", "select", "(", "self", ",", "table", ",", "items", "=", "None", ",", "limit", "=", "None", ",", "offset", "=", "None", ",", "remote_filter", "=", "None", ",", "func_filters", "=", "None", ")", ":", "self", ".", "_table", "=", "table", "if", "remote_filter", ":", "if", "not", "isinstance", "(", "remote_filter", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"{0} must be of type <type tuple>\"", ".", "format", "(", "remote_filter", ")", ")", "table", "=", "\"%s(%s)\"", "%", "(", "table", ",", "','", ".", "join", "(", "map", "(", "str", ",", "remote_filter", ")", ")", ")", "if", "not", "items", ":", "items", "=", "[", "'*'", "]", "self", ".", "_query", "=", "\"SELECT {1} FROM {0} \"", ".", "format", "(", "table", ",", "','", ".", "join", "(", "items", ")", ")", "if", "func_filters", ":", "self", ".", "_func", "=", "self", ".", "_func_filters", "(", "func_filters", ")", "self", ".", "_limit", "=", "limit", "self", ".", "_offset", "=", "offset", "return", "self" ]
34.958333
0.009281
def inv(z: int) -> int: """$= z^{-1} mod q$, for z != 0""" # Adapted from curve25519_athlon.c in djb's Curve25519. z2 = z * z % q # 2 z9 = pow2(z2, 2) * z % q # 9 z11 = z9 * z2 % q # 11 z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0 z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0 z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ... z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0 return pow2(z2_250_0, 5) * z11 % q
[ "def", "inv", "(", "z", ":", "int", ")", "->", "int", ":", "# Adapted from curve25519_athlon.c in djb's Curve25519.", "z2", "=", "z", "*", "z", "%", "q", "# 2", "z9", "=", "pow2", "(", "z2", ",", "2", ")", "*", "z", "%", "q", "# 9", "z11", "=", "z9", "*", "z2", "%", "q", "# 11", "z2_5_0", "=", "(", "z11", "*", "z11", ")", "%", "q", "*", "z9", "%", "q", "# 31 == 2^5 - 2^0", "z2_10_0", "=", "pow2", "(", "z2_5_0", ",", "5", ")", "*", "z2_5_0", "%", "q", "# 2^10 - 2^0", "z2_20_0", "=", "pow2", "(", "z2_10_0", ",", "10", ")", "*", "z2_10_0", "%", "q", "# ...", "z2_40_0", "=", "pow2", "(", "z2_20_0", ",", "20", ")", "*", "z2_20_0", "%", "q", "z2_50_0", "=", "pow2", "(", "z2_40_0", ",", "10", ")", "*", "z2_10_0", "%", "q", "z2_100_0", "=", "pow2", "(", "z2_50_0", ",", "50", ")", "*", "z2_50_0", "%", "q", "z2_200_0", "=", "pow2", "(", "z2_100_0", ",", "100", ")", "*", "z2_100_0", "%", "q", "z2_250_0", "=", "pow2", "(", "z2_200_0", ",", "50", ")", "*", "z2_50_0", "%", "q", "# 2^250 - 2^0", "return", "pow2", "(", "z2_250_0", ",", "5", ")", "*", "z11", "%", "q" ]
43.466667
0.001502
def parse_nhx(NHX_string): """ NHX format: [&&NHX:prop1=value1:prop2=value2] MB format: ((a[&Z=1,Y=2], b[&Z=1,Y=2]):1.0[&L=1,W=0], ... """ # store features ndict = {} # parse NHX or MB features if "[&&NHX:" in NHX_string: NHX_string = NHX_string.replace("[&&NHX:", "") NHX_string = NHX_string.replace("]", "") for field in NHX_string.split(":"): try: pname, pvalue = field.split("=") ndict[pname] = pvalue except ValueError as e: raise NewickError('Invalid NHX format %s' % field) return ndict
[ "def", "parse_nhx", "(", "NHX_string", ")", ":", "# store features", "ndict", "=", "{", "}", "# parse NHX or MB features", "if", "\"[&&NHX:\"", "in", "NHX_string", ":", "NHX_string", "=", "NHX_string", ".", "replace", "(", "\"[&&NHX:\"", ",", "\"\"", ")", "NHX_string", "=", "NHX_string", ".", "replace", "(", "\"]\"", ",", "\"\"", ")", "for", "field", "in", "NHX_string", ".", "split", "(", "\":\"", ")", ":", "try", ":", "pname", ",", "pvalue", "=", "field", ".", "split", "(", "\"=\"", ")", "ndict", "[", "pname", "]", "=", "pvalue", "except", "ValueError", "as", "e", ":", "raise", "NewickError", "(", "'Invalid NHX format %s'", "%", "field", ")", "return", "ndict" ]
30.95
0.00627
def stack_keys(ddict, keys, extra=None): """ Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)). Useful for preparing data for sklearn. Parameters ---------- ddict : dict A dict containing arrays or lists to be stacked. Must be of equal length. keys : list or str The keys of dict to stack. Must be present in ddict. extra : list (optional) A list of additional arrays to stack. Elements of extra must be the same length as arrays in ddict. Extras are inserted as the first columns of output. """ if isinstance(keys, str): d = [ddict[keys]] else: d = [ddict[k] for k in keys] if extra is not None: d = extra + d return np.vstack(d).T
[ "def", "stack_keys", "(", "ddict", ",", "keys", ",", "extra", "=", "None", ")", ":", "if", "isinstance", "(", "keys", ",", "str", ")", ":", "d", "=", "[", "ddict", "[", "keys", "]", "]", "else", ":", "d", "=", "[", "ddict", "[", "k", "]", "for", "k", "in", "keys", "]", "if", "extra", "is", "not", "None", ":", "d", "=", "extra", "+", "d", "return", "np", ".", "vstack", "(", "d", ")", ".", "T" ]
30.48
0.002545
def monitors(self): # type: () -> Monitors """ Get positions of monitors (see parent class). """ if not self._monitors: int_ = int core = self.core # All monitors # We need to update the value with every single monitor found # using CGRectUnion. Else we will end with infinite values. all_monitors = CGRect() self._monitors.append({}) # Each monitors display_count = ctypes.c_uint32(0) active_displays = (ctypes.c_uint32 * self.max_displays)() core.CGGetActiveDisplayList( self.max_displays, active_displays, ctypes.byref(display_count) ) rotations = {0.0: "normal", 90.0: "right", -90.0: "left"} for idx in range(display_count.value): display = active_displays[idx] rect = core.CGDisplayBounds(display) rect = core.CGRectStandardize(rect) width, height = rect.size.width, rect.size.height rot = core.CGDisplayRotation(display) if rotations[rot] in ["left", "right"]: width, height = height, width self._monitors.append( { "left": int_(rect.origin.x), "top": int_(rect.origin.y), "width": int_(width), "height": int_(height), } ) # Update AiO monitor's values all_monitors = core.CGRectUnion(all_monitors, rect) # Set the AiO monitor's values self._monitors[0] = { "left": int_(all_monitors.origin.x), "top": int_(all_monitors.origin.y), "width": int_(all_monitors.size.width), "height": int_(all_monitors.size.height), } return self._monitors
[ "def", "monitors", "(", "self", ")", ":", "# type: () -> Monitors", "if", "not", "self", ".", "_monitors", ":", "int_", "=", "int", "core", "=", "self", ".", "core", "# All monitors", "# We need to update the value with every single monitor found", "# using CGRectUnion. Else we will end with infinite values.", "all_monitors", "=", "CGRect", "(", ")", "self", ".", "_monitors", ".", "append", "(", "{", "}", ")", "# Each monitors", "display_count", "=", "ctypes", ".", "c_uint32", "(", "0", ")", "active_displays", "=", "(", "ctypes", ".", "c_uint32", "*", "self", ".", "max_displays", ")", "(", ")", "core", ".", "CGGetActiveDisplayList", "(", "self", ".", "max_displays", ",", "active_displays", ",", "ctypes", ".", "byref", "(", "display_count", ")", ")", "rotations", "=", "{", "0.0", ":", "\"normal\"", ",", "90.0", ":", "\"right\"", ",", "-", "90.0", ":", "\"left\"", "}", "for", "idx", "in", "range", "(", "display_count", ".", "value", ")", ":", "display", "=", "active_displays", "[", "idx", "]", "rect", "=", "core", ".", "CGDisplayBounds", "(", "display", ")", "rect", "=", "core", ".", "CGRectStandardize", "(", "rect", ")", "width", ",", "height", "=", "rect", ".", "size", ".", "width", ",", "rect", ".", "size", ".", "height", "rot", "=", "core", ".", "CGDisplayRotation", "(", "display", ")", "if", "rotations", "[", "rot", "]", "in", "[", "\"left\"", ",", "\"right\"", "]", ":", "width", ",", "height", "=", "height", ",", "width", "self", ".", "_monitors", ".", "append", "(", "{", "\"left\"", ":", "int_", "(", "rect", ".", "origin", ".", "x", ")", ",", "\"top\"", ":", "int_", "(", "rect", ".", "origin", ".", "y", ")", ",", "\"width\"", ":", "int_", "(", "width", ")", ",", "\"height\"", ":", "int_", "(", "height", ")", ",", "}", ")", "# Update AiO monitor's values", "all_monitors", "=", "core", ".", "CGRectUnion", "(", "all_monitors", ",", "rect", ")", "# Set the AiO monitor's values", "self", ".", "_monitors", "[", "0", "]", "=", "{", "\"left\"", ":", "int_", "(", "all_monitors", ".", "origin", ".", "x", ")", ",", "\"top\"", ":", "int_", "(", "all_monitors", ".", "origin", ".", "y", ")", ",", "\"width\"", ":", "int_", "(", "all_monitors", ".", "size", ".", "width", ")", ",", "\"height\"", ":", "int_", "(", "all_monitors", ".", "size", ".", "height", ")", ",", "}", "return", "self", ".", "_monitors" ]
38.66
0.001514
def CreateStorageWriterForFile(cls, session, path): """Creates a storage writer based on the file. Args: session (Session): session the storage changes are part of. path (str): path to the storage file. Returns: StorageWriter: a storage writer or None if the storage file cannot be opened or the storage format is not supported. """ if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path): return sqlite_writer.SQLiteStorageFileWriter(session, path) return None
[ "def", "CreateStorageWriterForFile", "(", "cls", ",", "session", ",", "path", ")", ":", "if", "sqlite_file", ".", "SQLiteStorageFile", ".", "CheckSupportedFormat", "(", "path", ")", ":", "return", "sqlite_writer", ".", "SQLiteStorageFileWriter", "(", "session", ",", "path", ")", "return", "None" ]
34.133333
0.003802
def _resolve_shape(self, name, layers): '''Given a list of layers, find the layer output with the given name. Parameters ---------- name : str Name of a layer to resolve. layers : list of :class:`theanets.layers.base.Layer` A list of layers to search in. Raises ------ util.ConfigurationError : If there is no such layer, or if there are more than one. Returns ------- name : str The fully-scoped name of the desired output. shape : tuple of None and/or int The shape of the named output. ''' matches = [l for l in layers if name.split(':')[0] == l.name] if len(matches) != 1: raise util.ConfigurationError( 'layer "{}" cannot resolve "{}" using {}' .format(self.name, name, [l.name for l in layers])) name = name if ':' in name else matches[0].output_name return name, matches[0]._output_shapes[name.split(':')[1]]
[ "def", "_resolve_shape", "(", "self", ",", "name", ",", "layers", ")", ":", "matches", "=", "[", "l", "for", "l", "in", "layers", "if", "name", ".", "split", "(", "':'", ")", "[", "0", "]", "==", "l", ".", "name", "]", "if", "len", "(", "matches", ")", "!=", "1", ":", "raise", "util", ".", "ConfigurationError", "(", "'layer \"{}\" cannot resolve \"{}\" using {}'", ".", "format", "(", "self", ".", "name", ",", "name", ",", "[", "l", ".", "name", "for", "l", "in", "layers", "]", ")", ")", "name", "=", "name", "if", "':'", "in", "name", "else", "matches", "[", "0", "]", ".", "output_name", "return", "name", ",", "matches", "[", "0", "]", ".", "_output_shapes", "[", "name", ".", "split", "(", "':'", ")", "[", "1", "]", "]" ]
35.517241
0.003781
def register_middleware(self, middleware, attach_to="request"): """ Register an application level middleware that will be attached to all the API URLs registered under this application. This method is internally invoked by the :func:`middleware` decorator provided at the app level. :param middleware: Callback method to be attached to the middleware :param attach_to: The state at which the middleware needs to be invoked in the lifecycle of an *HTTP Request*. **request** - Invoke before the request is processed **response** - Invoke before the response is returned back :return: decorated method """ if attach_to == "request": if middleware not in self.request_middleware: self.request_middleware.append(middleware) if attach_to == "response": if middleware not in self.response_middleware: self.response_middleware.appendleft(middleware) return middleware
[ "def", "register_middleware", "(", "self", ",", "middleware", ",", "attach_to", "=", "\"request\"", ")", ":", "if", "attach_to", "==", "\"request\"", ":", "if", "middleware", "not", "in", "self", ".", "request_middleware", ":", "self", ".", "request_middleware", ".", "append", "(", "middleware", ")", "if", "attach_to", "==", "\"response\"", ":", "if", "middleware", "not", "in", "self", ".", "response_middleware", ":", "self", ".", "response_middleware", ".", "appendleft", "(", "middleware", ")", "return", "middleware" ]
45.217391
0.001883
def _auth_stage1(self): """Do the first stage (<iq type='get'/>) of legacy ("plain" or "digest") authentication. [client only]""" iq=Iq(stanza_type="get") q=iq.new_query("jabber:iq:auth") q.newTextChild(None,"username",to_utf8(self.my_jid.node)) q.newTextChild(None,"resource",to_utf8(self.my_jid.resource)) self.send(iq) self.set_response_handlers(iq,self.auth_stage2,self.auth_error, self.auth_timeout,timeout=60) iq.free()
[ "def", "_auth_stage1", "(", "self", ")", ":", "iq", "=", "Iq", "(", "stanza_type", "=", "\"get\"", ")", "q", "=", "iq", ".", "new_query", "(", "\"jabber:iq:auth\"", ")", "q", ".", "newTextChild", "(", "None", ",", "\"username\"", ",", "to_utf8", "(", "self", ".", "my_jid", ".", "node", ")", ")", "q", ".", "newTextChild", "(", "None", ",", "\"resource\"", ",", "to_utf8", "(", "self", ".", "my_jid", ".", "resource", ")", ")", "self", ".", "send", "(", "iq", ")", "self", ".", "set_response_handlers", "(", "iq", ",", "self", ".", "auth_stage2", ",", "self", ".", "auth_error", ",", "self", ".", "auth_timeout", ",", "timeout", "=", "60", ")", "iq", ".", "free", "(", ")" ]
40.153846
0.022472
def CacheStorage_requestCachedResponse(self, cacheId, requestURL): """ Function path: CacheStorage.requestCachedResponse Domain: CacheStorage Method name: requestCachedResponse Parameters: Required arguments: 'cacheId' (type: CacheId) -> Id of cache that contains the enty. 'requestURL' (type: string) -> URL spec of the request. Returns: 'response' (type: CachedResponse) -> Response read from the cache. Description: Fetches cache entry. """ assert isinstance(requestURL, (str,) ), "Argument 'requestURL' must be of type '['str']'. Received type: '%s'" % type( requestURL) subdom_funcs = self.synchronous_command('CacheStorage.requestCachedResponse', cacheId=cacheId, requestURL=requestURL) return subdom_funcs
[ "def", "CacheStorage_requestCachedResponse", "(", "self", ",", "cacheId", ",", "requestURL", ")", ":", "assert", "isinstance", "(", "requestURL", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'requestURL' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "requestURL", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'CacheStorage.requestCachedResponse'", ",", "cacheId", "=", "cacheId", ",", "requestURL", "=", "requestURL", ")", "return", "subdom_funcs" ]
36.238095
0.038412
def get_environ_proxies(): """Return a dict of environment proxies. From requests_.""" proxy_keys = [ 'all', 'http', 'https', 'ftp', 'socks', 'ws', 'wss', 'no' ] def get_proxy(k): return os.environ.get(k) or os.environ.get(k.upper()) proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys] return dict([(key, val) for (key, val) in proxies if val])
[ "def", "get_environ_proxies", "(", ")", ":", "proxy_keys", "=", "[", "'all'", ",", "'http'", ",", "'https'", ",", "'ftp'", ",", "'socks'", ",", "'ws'", ",", "'wss'", ",", "'no'", "]", "def", "get_proxy", "(", "k", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "k", ")", "or", "os", ".", "environ", ".", "get", "(", "k", ".", "upper", "(", ")", ")", "proxies", "=", "[", "(", "key", ",", "get_proxy", "(", "key", "+", "'_proxy'", ")", ")", "for", "key", "in", "proxy_keys", "]", "return", "dict", "(", "[", "(", "key", ",", "val", ")", "for", "(", "key", ",", "val", ")", "in", "proxies", "if", "val", "]", ")" ]
23.157895
0.002183
def createPushParser(SAX, chunk, size, URI): """Create a progressive XML parser context to build either an event flow if the SAX object is not None, or a DOM tree otherwise. """ ret = libxml2mod.xmlCreatePushParser(SAX, chunk, size, URI) if ret is None:raise parserError('xmlCreatePushParser() failed') return parserCtxt(_obj=ret)
[ "def", "createPushParser", "(", "SAX", ",", "chunk", ",", "size", ",", "URI", ")", ":", "ret", "=", "libxml2mod", ".", "xmlCreatePushParser", "(", "SAX", ",", "chunk", ",", "size", ",", "URI", ")", "if", "ret", "is", "None", ":", "raise", "parserError", "(", "'xmlCreatePushParser() failed'", ")", "return", "parserCtxt", "(", "_obj", "=", "ret", ")" ]
50.428571
0.008357
def _find_players(self, boxscore): """ Find all players for each team. Iterate through every player for both teams as found in the boxscore tables and create a list of instances of the BoxscorePlayer class for each player. Return lists of player instances comprising the away and home team players, respectively. Parameters ---------- boxscore : PyQuery object A PyQuery object containing all of the HTML data from the boxscore. Returns ------- tuple Returns a ``tuple`` in the format (away_players, home_players) where each element is a list of player instances for the away and home teams, respectively. """ player_dict = {} table_count = 0 tables = self._find_boxscore_tables(boxscore) for table in tables: home_or_away = HOME # There are two tables per team with the odd tables belonging to # the away team. if table_count % 2 == 1: home_or_away = AWAY player_dict = self._extract_player_stats(table, player_dict, home_or_away) table_count += 1 away_players, home_players = self._instantiate_players(player_dict) return away_players, home_players
[ "def", "_find_players", "(", "self", ",", "boxscore", ")", ":", "player_dict", "=", "{", "}", "table_count", "=", "0", "tables", "=", "self", ".", "_find_boxscore_tables", "(", "boxscore", ")", "for", "table", "in", "tables", ":", "home_or_away", "=", "HOME", "# There are two tables per team with the odd tables belonging to", "# the away team.", "if", "table_count", "%", "2", "==", "1", ":", "home_or_away", "=", "AWAY", "player_dict", "=", "self", ".", "_extract_player_stats", "(", "table", ",", "player_dict", ",", "home_or_away", ")", "table_count", "+=", "1", "away_players", ",", "home_players", "=", "self", ".", "_instantiate_players", "(", "player_dict", ")", "return", "away_players", ",", "home_players" ]
38.162162
0.001381
def files(self): """List of files (only supported file formats)""" if self._files is None: fifo = SeriesFolder._search_files(self.path) self._files = [ff[0] for ff in fifo] self._formats = [ff[1] for ff in fifo] return self._files
[ "def", "files", "(", "self", ")", ":", "if", "self", ".", "_files", "is", "None", ":", "fifo", "=", "SeriesFolder", ".", "_search_files", "(", "self", ".", "path", ")", "self", ".", "_files", "=", "[", "ff", "[", "0", "]", "for", "ff", "in", "fifo", "]", "self", ".", "_formats", "=", "[", "ff", "[", "1", "]", "for", "ff", "in", "fifo", "]", "return", "self", ".", "_files" ]
40.571429
0.006897
def fs_obj_remove(self, path): """Removes a file system object (file, symlink, etc) in the guest. Will not work on directories, use :py:func:`IGuestSession.directory_remove` to remove directories. This method will remove symbolic links in the final path component, not follow them. in path of type str Path to the file system object to remove. Guest style path. raises :class:`OleErrorNotimpl` The method has not been implemented yet. raises :class:`VBoxErrorObjectNotFound` The file system object was not found. raises :class:`VBoxErrorIprtError` For most other errors. We know this is unhelpful, will fix shortly... """ if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") self._call("fsObjRemove", in_p=[path])
[ "def", "fs_obj_remove", "(", "self", ",", "path", ")", ":", "if", "not", "isinstance", "(", "path", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"path can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"fsObjRemove\"", ",", "in_p", "=", "[", "path", "]", ")" ]
38.32
0.010183
def log(package): """ List all of the changes to a package on the server. """ team, owner, pkg = parse_package(package) session = _get_session(team) response = session.get( "{url}/api/log/{owner}/{pkg}/".format( url=get_registry_url(team), owner=owner, pkg=pkg ) ) table = [("Hash", "Pushed", "Author", "Tags", "Versions")] for entry in reversed(response.json()['logs']): ugly = datetime.fromtimestamp(entry['created']) nice = ugly.strftime("%Y-%m-%d %H:%M:%S") table.append((entry['hash'], nice, entry['author'], str(entry.get('tags', [])), str(entry.get('versions', [])))) _print_table(table)
[ "def", "log", "(", "package", ")", ":", "team", ",", "owner", ",", "pkg", "=", "parse_package", "(", "package", ")", "session", "=", "_get_session", "(", "team", ")", "response", "=", "session", ".", "get", "(", "\"{url}/api/log/{owner}/{pkg}/\"", ".", "format", "(", "url", "=", "get_registry_url", "(", "team", ")", ",", "owner", "=", "owner", ",", "pkg", "=", "pkg", ")", ")", "table", "=", "[", "(", "\"Hash\"", ",", "\"Pushed\"", ",", "\"Author\"", ",", "\"Tags\"", ",", "\"Versions\"", ")", "]", "for", "entry", "in", "reversed", "(", "response", ".", "json", "(", ")", "[", "'logs'", "]", ")", ":", "ugly", "=", "datetime", ".", "fromtimestamp", "(", "entry", "[", "'created'", "]", ")", "nice", "=", "ugly", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "table", ".", "append", "(", "(", "entry", "[", "'hash'", "]", ",", "nice", ",", "entry", "[", "'author'", "]", ",", "str", "(", "entry", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", ",", "str", "(", "entry", ".", "get", "(", "'versions'", ",", "[", "]", ")", ")", ")", ")", "_print_table", "(", "table", ")" ]
31.954545
0.002762
def vars_class(cls): """Return a dict of vars for the given class, including all ancestors. This differs from the usual behaviour of `vars` which returns attributes belonging to the given class and not its ancestors. """ return dict(chain.from_iterable( vars(cls).items() for cls in reversed(cls.__mro__)))
[ "def", "vars_class", "(", "cls", ")", ":", "return", "dict", "(", "chain", ".", "from_iterable", "(", "vars", "(", "cls", ")", ".", "items", "(", ")", "for", "cls", "in", "reversed", "(", "cls", ".", "__mro__", ")", ")", ")" ]
41
0.002985
def c_if(self, classical, val): """Add classical control register to all instructions.""" for gate in self.instructions: gate.c_if(classical, val) return self
[ "def", "c_if", "(", "self", ",", "classical", ",", "val", ")", ":", "for", "gate", "in", "self", ".", "instructions", ":", "gate", ".", "c_if", "(", "classical", ",", "val", ")", "return", "self" ]
38
0.010309
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False): """Calculate power spectrum density of data. Args: data (np.ndarray): Input data. dt (float): Time between each data. ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them). ax (matplotlib.axes): Axis you want to plot on. doplot (bool): Plot how averaging works. overlap_half (bool): Split data to half-overlapped regions. Returns: vk (np.ndarray): Frequency. psd (np.ndarray): PSD """ logger = getLogger('decode.utils.ndarray.psd') if overlap_half: step = int(len(data) / (ndivide + 1)) size = step * 2 else: step = int(len(data) / ndivide) size = step if bin(len(data)).count('1') != 1: logger.warning('warning: length of data is not power of 2: {}'.format(len(data))) size = int(len(data) / ndivide) if bin(size).count('1') != 1.: if overlap_half: logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size)) else: logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size)) psd = np.zeros(size) T = (size - 1) * dt vs = 1 / dt vk_ = fftfreq(size, dt) vk = vk_[np.where(vk_ >= 0)] for i in range(ndivide): d = data[i * step:i * step + size] if window is None: w = np.ones(size) corr = 1.0 else: w = window(size) corr = np.mean(w**2) psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr return vk, psd[:len(vk)] / ndivide
[ "def", "psd", "(", "data", ",", "dt", ",", "ndivide", "=", "1", ",", "window", "=", "hanning", ",", "overlap_half", "=", "False", ")", ":", "logger", "=", "getLogger", "(", "'decode.utils.ndarray.psd'", ")", "if", "overlap_half", ":", "step", "=", "int", "(", "len", "(", "data", ")", "/", "(", "ndivide", "+", "1", ")", ")", "size", "=", "step", "*", "2", "else", ":", "step", "=", "int", "(", "len", "(", "data", ")", "/", "ndivide", ")", "size", "=", "step", "if", "bin", "(", "len", "(", "data", ")", ")", ".", "count", "(", "'1'", ")", "!=", "1", ":", "logger", ".", "warning", "(", "'warning: length of data is not power of 2: {}'", ".", "format", "(", "len", "(", "data", ")", ")", ")", "size", "=", "int", "(", "len", "(", "data", ")", "/", "ndivide", ")", "if", "bin", "(", "size", ")", ".", "count", "(", "'1'", ")", "!=", "1.", ":", "if", "overlap_half", ":", "logger", ".", "warning", "(", "'warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'", ".", "format", "(", "size", ")", ")", "else", ":", "logger", ".", "warning", "(", "'warning: (length of data) / ndivide is not power of 2: {}'", ".", "format", "(", "size", ")", ")", "psd", "=", "np", ".", "zeros", "(", "size", ")", "T", "=", "(", "size", "-", "1", ")", "*", "dt", "vs", "=", "1", "/", "dt", "vk_", "=", "fftfreq", "(", "size", ",", "dt", ")", "vk", "=", "vk_", "[", "np", ".", "where", "(", "vk_", ">=", "0", ")", "]", "for", "i", "in", "range", "(", "ndivide", ")", ":", "d", "=", "data", "[", "i", "*", "step", ":", "i", "*", "step", "+", "size", "]", "if", "window", "is", "None", ":", "w", "=", "np", ".", "ones", "(", "size", ")", "corr", "=", "1.0", "else", ":", "w", "=", "window", "(", "size", ")", "corr", "=", "np", ".", "mean", "(", "w", "**", "2", ")", "psd", "=", "psd", "+", "2", "*", "(", "np", ".", "abs", "(", "fft", "(", "d", "*", "w", ")", ")", ")", "**", "2", "/", "size", "*", "dt", "/", "corr", "return", "vk", ",", "psd", "[", ":", "len", "(", "vk", ")", "]", "/", "ndivide" ]
33.795918
0.005869
def len(iterable): """Redefining len here so it will be able to work with non-ASCII characters """ if not isinstance(iterable, str): return iterable.__len__() try: return len(unicode(iterable, 'utf')) except: return iterable.__len__()
[ "def", "len", "(", "iterable", ")", ":", "if", "not", "isinstance", "(", "iterable", ",", "str", ")", ":", "return", "iterable", ".", "__len__", "(", ")", "try", ":", "return", "len", "(", "unicode", "(", "iterable", ",", "'utf'", ")", ")", "except", ":", "return", "iterable", ".", "__len__", "(", ")" ]
27
0.007168
def _get_names(self, path: str) -> Iterator[str]: """Load required packages from path to requirements file """ for i in RequirementsFinder._get_names_cached(path): yield i
[ "def", "_get_names", "(", "self", ",", "path", ":", "str", ")", "->", "Iterator", "[", "str", "]", ":", "for", "i", "in", "RequirementsFinder", ".", "_get_names_cached", "(", "path", ")", ":", "yield", "i" ]
40.6
0.009662