text
stringlengths
78
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
487
def sparse(numRows, numCols, colPtrs, rowIndices, values): """ Create a SparseMatrix """ return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
[ "def", "sparse", "(", "numRows", ",", "numCols", ",", "colPtrs", ",", "rowIndices", ",", "values", ")", ":", "return", "SparseMatrix", "(", "numRows", ",", "numCols", ",", "colPtrs", ",", "rowIndices", ",", "values", ")" ]
36.6
12.6
def _handle_message(self, data): """ Handle messages. """ if data.type == MSG_SERVER_SETTINGS: _LOGGER.info(data.payload) elif data.type == MSG_SAMPLE_FORMAT: _LOGGER.info(data.payload) self._connected = True elif data.type == MSG_TIME: if not self._buffered: _LOGGER.info('Buffering') elif data.type == MSG_HEADER: # Push to app source and start playing. _LOGGER.info(data.payload.codec.decode('ascii')) self._source.push(data.payload.header) self._source.play() elif data.type == MSG_WIRE_CHUNK: # Add chunks to play queue. self._buffer.put(data.payload.chunk) if self._buffer.qsize() > BUFFER_SIZE: self._buffered = True if self._buffer.empty(): self._buffered = False
[ "def", "_handle_message", "(", "self", ",", "data", ")", ":", "if", "data", ".", "type", "==", "MSG_SERVER_SETTINGS", ":", "_LOGGER", ".", "info", "(", "data", ".", "payload", ")", "elif", "data", ".", "type", "==", "MSG_SAMPLE_FORMAT", ":", "_LOGGER", ".", "info", "(", "data", ".", "payload", ")", "self", ".", "_connected", "=", "True", "elif", "data", ".", "type", "==", "MSG_TIME", ":", "if", "not", "self", ".", "_buffered", ":", "_LOGGER", ".", "info", "(", "'Buffering'", ")", "elif", "data", ".", "type", "==", "MSG_HEADER", ":", "# Push to app source and start playing.", "_LOGGER", ".", "info", "(", "data", ".", "payload", ".", "codec", ".", "decode", "(", "'ascii'", ")", ")", "self", ".", "_source", ".", "push", "(", "data", ".", "payload", ".", "header", ")", "self", ".", "_source", ".", "play", "(", ")", "elif", "data", ".", "type", "==", "MSG_WIRE_CHUNK", ":", "# Add chunks to play queue.", "self", ".", "_buffer", ".", "put", "(", "data", ".", "payload", ".", "chunk", ")", "if", "self", ".", "_buffer", ".", "qsize", "(", ")", ">", "BUFFER_SIZE", ":", "self", ".", "_buffered", "=", "True", "if", "self", ".", "_buffer", ".", "empty", "(", ")", ":", "self", ".", "_buffered", "=", "False" ]
40.454545
5.454545
def delete_relay(self, relayid, data): """Delete relay settings""" return self.api_call( ENDPOINTS['relays']['delete'], dict(relayid=relayid), body=data)
[ "def", "delete_relay", "(", "self", ",", "relayid", ",", "data", ")", ":", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'relays'", "]", "[", "'delete'", "]", ",", "dict", "(", "relayid", "=", "relayid", ")", ",", "body", "=", "data", ")" ]
33.333333
6.5
def open(self, session, resource_name, access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE): """Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. :type access_mode: :class:`pyvisa.constants.AccessModes` :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode` """ raise NotImplementedError
[ "def", "open", "(", "self", ",", "session", ",", "resource_name", ",", "access_mode", "=", "constants", ".", "AccessModes", ".", "no_lock", ",", "open_timeout", "=", "constants", ".", "VI_TMO_IMMEDIATE", ")", ":", "raise", "NotImplementedError" ]
59.5625
31.6875
def set_prompt(self, prompt=None): """ Defines a pattern that is waited for when calling the expect_prompt() method. If the set_prompt() method is not called, or if it is called with the prompt argument set to None, a default prompt is used that should work with many devices running Unix, IOS, IOS-XR, or Junos and others. :type prompt: RegEx :param prompt: The pattern that matches the prompt of the remote host. """ if prompt is None: self.manual_prompt_re = prompt else: self.manual_prompt_re = to_regexs(prompt)
[ "def", "set_prompt", "(", "self", ",", "prompt", "=", "None", ")", ":", "if", "prompt", "is", "None", ":", "self", ".", "manual_prompt_re", "=", "prompt", "else", ":", "self", ".", "manual_prompt_re", "=", "to_regexs", "(", "prompt", ")" ]
41.066667
21.466667
def aggregate(self, q=None, scroll_size=SEARCH_LIMIT, reset_query=True, **kwargs): """Perform an advanced query, and return *all* matching results. Will automatically perform multiple queries in order to retrieve all results. Note: All ``aggregate`` queries run in advanced mode, and ``info`` is not available. Arguments: q (str): The query to execute. **Default:** The current helper-formed query, if any. There must be some query to execute. scroll_size (int): Maximum number of records returned per query. Must be between one and the ``SEARCH_LIMIT`` (inclusive). **Default:** ``SEARCH_LIMIT``. reset_query (bool): If ``True``, will destroy the current query after execution and start a fresh one. If ``False``, will keep the current query set. **Default:** ``True``. Keyword Arguments: scroll_field (str): The field on which to scroll. This should be a field that counts/indexes the entries. This should be set in ``self.scroll_field``, but if your application requires separate scroll fields for a single client, it can be set in this way as well. **Default**: ``self.scroll_field``. Returns: list of dict: All matching records. """ scroll_field = kwargs.get("scroll_field", self.scroll_field) # If q not specified, use internal, helper-built query if q is None: res = self._aggregate(scroll_field=scroll_field, scroll_size=scroll_size) if reset_query: self.reset_query() return res # Otherwise, run an independent query as SearchHelper.search() does. else: return self.__class__(index=self.index, q=q, advanced=True, search_client=self._SearchHelper__search_client ).aggregate(scroll_size=scroll_size, reset_query=reset_query)
[ "def", "aggregate", "(", "self", ",", "q", "=", "None", ",", "scroll_size", "=", "SEARCH_LIMIT", ",", "reset_query", "=", "True", ",", "*", "*", "kwargs", ")", ":", "scroll_field", "=", "kwargs", ".", "get", "(", "\"scroll_field\"", ",", "self", ".", "scroll_field", ")", "# If q not specified, use internal, helper-built query", "if", "q", "is", "None", ":", "res", "=", "self", ".", "_aggregate", "(", "scroll_field", "=", "scroll_field", ",", "scroll_size", "=", "scroll_size", ")", "if", "reset_query", ":", "self", ".", "reset_query", "(", ")", "return", "res", "# Otherwise, run an independent query as SearchHelper.search() does.", "else", ":", "return", "self", ".", "__class__", "(", "index", "=", "self", ".", "index", ",", "q", "=", "q", ",", "advanced", "=", "True", ",", "search_client", "=", "self", ".", "_SearchHelper__search_client", ")", ".", "aggregate", "(", "scroll_size", "=", "scroll_size", ",", "reset_query", "=", "reset_query", ")" ]
50.380952
27.928571
def parseRest(self, response): """ Parse a REST response. If the response contains an error field, we will raise it as an exception. """ body = json.loads(response) try: error = body['error']['description'] code = body['error']['code'] except Exception: return body['data'] else: raise ClickatellError(error, code);
[ "def", "parseRest", "(", "self", ",", "response", ")", ":", "body", "=", "json", ".", "loads", "(", "response", ")", "try", ":", "error", "=", "body", "[", "'error'", "]", "[", "'description'", "]", "code", "=", "body", "[", "'error'", "]", "[", "'code'", "]", "except", "Exception", ":", "return", "body", "[", "'data'", "]", "else", ":", "raise", "ClickatellError", "(", "error", ",", "code", ")" ]
29.642857
13.928571
def _header_resized(self, row, old_height, new_height): """Resize the corresponding row of the header section selected.""" self.table_header.setRowHeight(row, new_height) self._update_layout()
[ "def", "_header_resized", "(", "self", ",", "row", ",", "old_height", ",", "new_height", ")", ":", "self", ".", "table_header", ".", "setRowHeight", "(", "row", ",", "new_height", ")", "self", ".", "_update_layout", "(", ")" ]
54
10.75
def flushInput(self): '''flush any pending input''' self.buf = '' saved_timeout = self.timeout self.timeout = 0.5 self._recv() self.timeout = saved_timeout self.buf = '' self.debug("flushInput")
[ "def", "flushInput", "(", "self", ")", ":", "self", ".", "buf", "=", "''", "saved_timeout", "=", "self", ".", "timeout", "self", ".", "timeout", "=", "0.5", "self", ".", "_recv", "(", ")", "self", ".", "timeout", "=", "saved_timeout", "self", ".", "buf", "=", "''", "self", ".", "debug", "(", "\"flushInput\"", ")" ]
34.888889
8
def poll_integration_information_for_waiting_integration_alerts(): """poll_integration_information_for_waiting_integration_alerts.""" if not polling_integration_alerts: return logger.debug("Polling information for waiting integration alerts") for integration_alert in polling_integration_alerts: configured_integration = integration_alert.configured_integration integration = configured_integration.integration polling_duration = integration.polling_duration if get_current_datetime_utc() - integration_alert.send_time > polling_duration: logger.debug("Polling duration expired for integration alert %s", integration_alert) integration_alert.status = IntegrationAlertStatuses.ERROR_POLLING.name else: integration_alert.status = IntegrationAlertStatuses.IN_POLLING.name poll_integration_alert_data(integration_alert)
[ "def", "poll_integration_information_for_waiting_integration_alerts", "(", ")", ":", "if", "not", "polling_integration_alerts", ":", "return", "logger", ".", "debug", "(", "\"Polling information for waiting integration alerts\"", ")", "for", "integration_alert", "in", "polling_integration_alerts", ":", "configured_integration", "=", "integration_alert", ".", "configured_integration", "integration", "=", "configured_integration", ".", "integration", "polling_duration", "=", "integration", ".", "polling_duration", "if", "get_current_datetime_utc", "(", ")", "-", "integration_alert", ".", "send_time", ">", "polling_duration", ":", "logger", ".", "debug", "(", "\"Polling duration expired for integration alert %s\"", ",", "integration_alert", ")", "integration_alert", ".", "status", "=", "IntegrationAlertStatuses", ".", "ERROR_POLLING", ".", "name", "else", ":", "integration_alert", ".", "status", "=", "IntegrationAlertStatuses", ".", "IN_POLLING", ".", "name", "poll_integration_alert_data", "(", "integration_alert", ")" ]
48.052632
29.105263
def serialize_link(ctx, document, elem, root): """Serilaze link element. This works only for external links at the moment. """ _a = etree.SubElement(root, 'a') for el in elem.elements: _ser = ctx.get_serializer(el) if _ser: _td = _ser(ctx, document, el, _a) else: if isinstance(el, doc.Text): children = list(_a) if len(children) == 0: _text = _a.text or u'' _a.text = u'{}{}'.format(_text, el.value()) else: _text = children[-1].tail or u'' children[-1].tail = u'{}{}'.format(_text, el.value()) if elem.rid in document.relationships[ctx.options['relationship']]: _a.set('href', document.relationships[ctx.options['relationship']][elem.rid].get('target', '')) fire_hooks(ctx, document, elem, _a, ctx.get_hook('a')) return root
[ "def", "serialize_link", "(", "ctx", ",", "document", ",", "elem", ",", "root", ")", ":", "_a", "=", "etree", ".", "SubElement", "(", "root", ",", "'a'", ")", "for", "el", "in", "elem", ".", "elements", ":", "_ser", "=", "ctx", ".", "get_serializer", "(", "el", ")", "if", "_ser", ":", "_td", "=", "_ser", "(", "ctx", ",", "document", ",", "el", ",", "_a", ")", "else", ":", "if", "isinstance", "(", "el", ",", "doc", ".", "Text", ")", ":", "children", "=", "list", "(", "_a", ")", "if", "len", "(", "children", ")", "==", "0", ":", "_text", "=", "_a", ".", "text", "or", "u''", "_a", ".", "text", "=", "u'{}{}'", ".", "format", "(", "_text", ",", "el", ".", "value", "(", ")", ")", "else", ":", "_text", "=", "children", "[", "-", "1", "]", ".", "tail", "or", "u''", "children", "[", "-", "1", "]", ".", "tail", "=", "u'{}{}'", ".", "format", "(", "_text", ",", "el", ".", "value", "(", ")", ")", "if", "elem", ".", "rid", "in", "document", ".", "relationships", "[", "ctx", ".", "options", "[", "'relationship'", "]", "]", ":", "_a", ".", "set", "(", "'href'", ",", "document", ".", "relationships", "[", "ctx", ".", "options", "[", "'relationship'", "]", "]", "[", "elem", ".", "rid", "]", ".", "get", "(", "'target'", ",", "''", ")", ")", "fire_hooks", "(", "ctx", ",", "document", ",", "elem", ",", "_a", ",", "ctx", ".", "get_hook", "(", "'a'", ")", ")", "return", "root" ]
28.875
22.625
def combine_initial_dims(tensor: torch.Tensor) -> torch.Tensor: """ Given a (possibly higher order) tensor of ids with shape (d1, ..., dn, sequence_length) Return a view that's (d1 * ... * dn, sequence_length). If original tensor is 1-d or 2-d, return it as is. """ if tensor.dim() <= 2: return tensor else: return tensor.view(-1, tensor.size(-1))
[ "def", "combine_initial_dims", "(", "tensor", ":", "torch", ".", "Tensor", ")", "->", "torch", ".", "Tensor", ":", "if", "tensor", ".", "dim", "(", ")", "<=", "2", ":", "return", "tensor", "else", ":", "return", "tensor", ".", "view", "(", "-", "1", ",", "tensor", ".", "size", "(", "-", "1", ")", ")" ]
35
13.909091
def error_state(self): """Set the error condition""" self.buildstate.state.lasttime = time() self.buildstate.commit() return self.buildstate.state.error
[ "def", "error_state", "(", "self", ")", ":", "self", ".", "buildstate", ".", "state", ".", "lasttime", "=", "time", "(", ")", "self", ".", "buildstate", ".", "commit", "(", ")", "return", "self", ".", "buildstate", ".", "state", ".", "error" ]
36
7
def connect(self, source, target, witnesses): """ :type source: integer :type target: integer """ # print("Adding Edge: "+source+":"+target) if self.graph.has_edge(source, target): self.graph[source][target]["label"] += ", " + str(witnesses) else: self.graph.add_edge(source, target, label=witnesses)
[ "def", "connect", "(", "self", ",", "source", ",", "target", ",", "witnesses", ")", ":", "# print(\"Adding Edge: \"+source+\":\"+target)", "if", "self", ".", "graph", ".", "has_edge", "(", "source", ",", "target", ")", ":", "self", ".", "graph", "[", "source", "]", "[", "target", "]", "[", "\"label\"", "]", "+=", "\", \"", "+", "str", "(", "witnesses", ")", "else", ":", "self", ".", "graph", ".", "add_edge", "(", "source", ",", "target", ",", "label", "=", "witnesses", ")" ]
37.1
12.7
def index(self): """ Index all files/directories below the current BIDSNode. """ config_list = self.config layout = self.layout for (dirpath, dirnames, filenames) in os.walk(self.path): # If layout configuration file exists, delete it layout_file = self.layout.config_filename if layout_file in filenames: filenames.remove(layout_file) for f in filenames: abs_fn = os.path.join(self.path, f) # Skip files that fail validation, unless forcibly indexing if not self.force_index and not layout._validate_file(abs_fn): continue bf = BIDSFile(abs_fn, self) # Extract entity values match_vals = {} for e in self.available_entities.values(): m = e.match_file(bf) if m is None and e.mandatory: break if m is not None: match_vals[e.name] = (e, m) # Create Entity <=> BIDSFile mappings if match_vals: for name, (ent, val) in match_vals.items(): bf.entities[name] = val ent.add_file(bf.path, val) self.files.append(bf) # Also add to the Layout's master list self.layout.files[bf.path] = bf root_node = self if self.root is None else self.root for d in dirnames: d = os.path.join(dirpath, d) # Derivative directories must always be added separately and # passed as their own root, so terminate if passed. if d.startswith(os.path.join(self.layout.root, 'derivatives')): continue # Skip directories that fail validation, unless force_index # is defined, in which case we have to keep scanning, in the # event that a file somewhere below the current level matches. # Unfortunately we probably can't do much better than this # without a lot of additional work, because the elements of # .force_index can be SRE_Patterns that match files below in # unpredictable ways. if check_path_matches_patterns(d, self.layout.force_index): self.force_index = True else: valid_dir = layout._validate_dir(d) # Note the difference between self.force_index and # self.layout.force_index. if not valid_dir and not self.layout.force_index: continue child_class = self._get_child_class(d) # TODO: filter the config files based on include/exclude rules child = child_class(d, config_list, root_node, self, force_index=self.force_index) if self.force_index or valid_dir: self.children.append(child) # prevent subdirectory traversal break
[ "def", "index", "(", "self", ")", ":", "config_list", "=", "self", ".", "config", "layout", "=", "self", ".", "layout", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "self", ".", "path", ")", ":", "# If layout configuration file exists, delete it", "layout_file", "=", "self", ".", "layout", ".", "config_filename", "if", "layout_file", "in", "filenames", ":", "filenames", ".", "remove", "(", "layout_file", ")", "for", "f", "in", "filenames", ":", "abs_fn", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "f", ")", "# Skip files that fail validation, unless forcibly indexing", "if", "not", "self", ".", "force_index", "and", "not", "layout", ".", "_validate_file", "(", "abs_fn", ")", ":", "continue", "bf", "=", "BIDSFile", "(", "abs_fn", ",", "self", ")", "# Extract entity values", "match_vals", "=", "{", "}", "for", "e", "in", "self", ".", "available_entities", ".", "values", "(", ")", ":", "m", "=", "e", ".", "match_file", "(", "bf", ")", "if", "m", "is", "None", "and", "e", ".", "mandatory", ":", "break", "if", "m", "is", "not", "None", ":", "match_vals", "[", "e", ".", "name", "]", "=", "(", "e", ",", "m", ")", "# Create Entity <=> BIDSFile mappings", "if", "match_vals", ":", "for", "name", ",", "(", "ent", ",", "val", ")", "in", "match_vals", ".", "items", "(", ")", ":", "bf", ".", "entities", "[", "name", "]", "=", "val", "ent", ".", "add_file", "(", "bf", ".", "path", ",", "val", ")", "self", ".", "files", ".", "append", "(", "bf", ")", "# Also add to the Layout's master list", "self", ".", "layout", ".", "files", "[", "bf", ".", "path", "]", "=", "bf", "root_node", "=", "self", "if", "self", ".", "root", "is", "None", "else", "self", ".", "root", "for", "d", "in", "dirnames", ":", "d", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "d", ")", "# Derivative directories must always be added separately and", "# passed as their own root, so terminate if passed.", "if", "d", ".", "startswith", "(", "os", ".", "path", ".", "join", "(", "self", ".", "layout", ".", "root", ",", "'derivatives'", ")", ")", ":", "continue", "# Skip directories that fail validation, unless force_index", "# is defined, in which case we have to keep scanning, in the", "# event that a file somewhere below the current level matches.", "# Unfortunately we probably can't do much better than this", "# without a lot of additional work, because the elements of", "# .force_index can be SRE_Patterns that match files below in", "# unpredictable ways.", "if", "check_path_matches_patterns", "(", "d", ",", "self", ".", "layout", ".", "force_index", ")", ":", "self", ".", "force_index", "=", "True", "else", ":", "valid_dir", "=", "layout", ".", "_validate_dir", "(", "d", ")", "# Note the difference between self.force_index and", "# self.layout.force_index.", "if", "not", "valid_dir", "and", "not", "self", ".", "layout", ".", "force_index", ":", "continue", "child_class", "=", "self", ".", "_get_child_class", "(", "d", ")", "# TODO: filter the config files based on include/exclude rules", "child", "=", "child_class", "(", "d", ",", "config_list", ",", "root_node", ",", "self", ",", "force_index", "=", "self", ".", "force_index", ")", "if", "self", ".", "force_index", "or", "valid_dir", ":", "self", ".", "children", ".", "append", "(", "child", ")", "# prevent subdirectory traversal", "break" ]
39.797468
22.088608
def dump_private_keys_or_addrs_chooser(wallet_obj): ''' Offline-enabled mechanism to dump everything ''' if wallet_obj.private_key: puts('Which private keys and addresses do you want?') else: puts('Which addresses do you want?') with indent(2): puts(colored.cyan('1: Active - have funds to spend')) puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)')) puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)')) puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)')) puts(colored.cyan('\nb: Go Back\n')) choice = choice_prompt( user_prompt=DEFAULT_PROMPT, acceptable_responses=[0, 1, 2, 3], default_input='1', show_default=True, quit_ok=True, ) if choice is False: return if choice == '1': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True) elif choice == '2': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True) elif choice == '3': return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False) elif choice == '0': return dump_all_keys_or_addrs(wallet_obj=wallet_obj)
[ "def", "dump_private_keys_or_addrs_chooser", "(", "wallet_obj", ")", ":", "if", "wallet_obj", ".", "private_key", ":", "puts", "(", "'Which private keys and addresses do you want?'", ")", "else", ":", "puts", "(", "'Which addresses do you want?'", ")", "with", "indent", "(", "2", ")", ":", "puts", "(", "colored", ".", "cyan", "(", "'1: Active - have funds to spend'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'2: Spent - no funds to spend (because they have been spent)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'3: Unused - no funds to spend (because the address has never been used)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)'", ")", ")", "puts", "(", "colored", ".", "cyan", "(", "'\\nb: Go Back\\n'", ")", ")", "choice", "=", "choice_prompt", "(", "user_prompt", "=", "DEFAULT_PROMPT", ",", "acceptable_responses", "=", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "default_input", "=", "'1'", ",", "show_default", "=", "True", ",", "quit_ok", "=", "True", ",", ")", "if", "choice", "is", "False", ":", "return", "if", "choice", "==", "'1'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "False", ",", "used", "=", "True", ")", "elif", "choice", "==", "'2'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "True", ",", "used", "=", "True", ")", "elif", "choice", "==", "'3'", ":", "return", "dump_selected_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ",", "zero_balance", "=", "None", ",", "used", "=", "False", ")", "elif", "choice", "==", "'0'", ":", "return", "dump_all_keys_or_addrs", "(", "wallet_obj", "=", "wallet_obj", ")" ]
40.705882
26.411765
def _push(self): """Offer to push changes, if needed.""" push_cmds = self.vcs.push_commands() if not push_cmds: return if utils.ask("OK to push commits to the server?"): for push_cmd in push_cmds: output = utils.system(push_cmd) logger.info(output)
[ "def", "_push", "(", "self", ")", ":", "push_cmds", "=", "self", ".", "vcs", ".", "push_commands", "(", ")", "if", "not", "push_cmds", ":", "return", "if", "utils", ".", "ask", "(", "\"OK to push commits to the server?\"", ")", ":", "for", "push_cmd", "in", "push_cmds", ":", "output", "=", "utils", ".", "system", "(", "push_cmd", ")", "logger", ".", "info", "(", "output", ")" ]
32.8
13.7
def principal_direction_extent(points): '''Calculate the extent of a set of 3D points. The extent is defined as the maximum distance between the projections on the principal directions of the covariance matrix of the points. Parameter: points : a 2D numpy array of points Returns: extents : the extents for each of the eigenvectors of the cov matrix eigs : eigenvalues of the covariance matrix eigv : respective eigenvectors of the covariance matrix ''' # center the points around 0.0 points = np.copy(points) points -= np.mean(points, axis=0) # principal components _, eigv = pca(points) extent = np.zeros(3) for i in range(eigv.shape[1]): # orthogonal projection onto the direction of the v component scalar_projs = np.sort(np.array([np.dot(p, eigv[:, i]) for p in points])) extent[i] = scalar_projs[-1] if scalar_projs[0] < 0.: extent -= scalar_projs[0] return extent
[ "def", "principal_direction_extent", "(", "points", ")", ":", "# center the points around 0.0", "points", "=", "np", ".", "copy", "(", "points", ")", "points", "-=", "np", ".", "mean", "(", "points", ",", "axis", "=", "0", ")", "# principal components", "_", ",", "eigv", "=", "pca", "(", "points", ")", "extent", "=", "np", ".", "zeros", "(", "3", ")", "for", "i", "in", "range", "(", "eigv", ".", "shape", "[", "1", "]", ")", ":", "# orthogonal projection onto the direction of the v component", "scalar_projs", "=", "np", ".", "sort", "(", "np", ".", "array", "(", "[", "np", ".", "dot", "(", "p", ",", "eigv", "[", ":", ",", "i", "]", ")", "for", "p", "in", "points", "]", ")", ")", "extent", "[", "i", "]", "=", "scalar_projs", "[", "-", "1", "]", "if", "scalar_projs", "[", "0", "]", "<", "0.", ":", "extent", "-=", "scalar_projs", "[", "0", "]", "return", "extent" ]
29.484848
22.393939
def delete(self): """ Deletes the records in the current QuerySet. """ del_query = self._clone() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._clear_ordering() get_es_connection(self.es_url, self.es_kwargs).delete_by_query(self.index, self.type, self._build_query()) # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None
[ "def", "delete", "(", "self", ")", ":", "del_query", "=", "self", ".", "_clone", "(", ")", "# The delete is actually 2 queries - one to find related objects,", "# and one to delete. Make sure that the discovery of related", "# objects is performed on the same database as the deletion.", "del_query", ".", "_clear_ordering", "(", ")", "get_es_connection", "(", "self", ".", "es_url", ",", "self", ".", "es_kwargs", ")", ".", "delete_by_query", "(", "self", ".", "index", ",", "self", ".", "type", ",", "self", ".", "_build_query", "(", ")", ")", "# Clear the result cache, in case this QuerySet gets reused.", "self", ".", "_result_cache", "=", "None" ]
44.846154
21.923077
def _repr(obj): """Show the received object as precise as possible.""" vals = ", ".join("{}={!r}".format( name, getattr(obj, name)) for name in obj._attribs) if vals: t = "{}(name={}, {})".format(obj.__class__.__name__, obj.name, vals) else: t = "{}(name={})".format(obj.__class__.__name__, obj.name) return t
[ "def", "_repr", "(", "obj", ")", ":", "vals", "=", "\", \"", ".", "join", "(", "\"{}={!r}\"", ".", "format", "(", "name", ",", "getattr", "(", "obj", ",", "name", ")", ")", "for", "name", "in", "obj", ".", "_attribs", ")", "if", "vals", ":", "t", "=", "\"{}(name={}, {})\"", ".", "format", "(", "obj", ".", "__class__", ".", "__name__", ",", "obj", ".", "name", ",", "vals", ")", "else", ":", "t", "=", "\"{}(name={})\"", ".", "format", "(", "obj", ".", "__class__", ".", "__name__", ",", "obj", ".", "name", ")", "return", "t" ]
38.333333
21.666667
def get_user_profiles(self, prefix): """Get the user profil from the cache to the given prefix.""" filepath = "{}{}".format(self.base_path, prefix) return UserProfiles(filepath, prefix)
[ "def", "get_user_profiles", "(", "self", ",", "prefix", ")", ":", "filepath", "=", "\"{}{}\"", ".", "format", "(", "self", ".", "base_path", ",", "prefix", ")", "return", "UserProfiles", "(", "filepath", ",", "prefix", ")" ]
51.5
6.25
def configure(self, component, all_dependencies): ''' Ensure all config-time files have been generated. Return a dictionary of generated items. ''' r = {} builddir = self.buildroot # only dependencies which are actually valid can contribute to the # config data (which includes the versions of all dependencies in its # build info) if the dependencies aren't available we can't tell what # version they are. Anything missing here should always be a test # dependency that isn't going to be used, otherwise the yotta build # command will fail before we get here available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v) self.set_toplevel_definitions = '' if self.build_info_include_file is None: self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir) self.set_toplevel_definitions += build_info_definitions if self.config_include_file is None: self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file) self.set_toplevel_definitions += config_definitions self.configured = True return { 'merged_config_include': self.config_include_file, 'merged_config_json': self.config_json_file, 'build_info_include': self.build_info_include_file }
[ "def", "configure", "(", "self", ",", "component", ",", "all_dependencies", ")", ":", "r", "=", "{", "}", "builddir", "=", "self", ".", "buildroot", "# only dependencies which are actually valid can contribute to the", "# config data (which includes the versions of all dependencies in its", "# build info) if the dependencies aren't available we can't tell what", "# version they are. Anything missing here should always be a test", "# dependency that isn't going to be used, otherwise the yotta build", "# command will fail before we get here", "available_dependencies", "=", "OrderedDict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "all_dependencies", ".", "items", "(", ")", "if", "v", ")", "self", ".", "set_toplevel_definitions", "=", "''", "if", "self", ".", "build_info_include_file", "is", "None", ":", "self", ".", "build_info_include_file", ",", "build_info_definitions", "=", "self", ".", "getBuildInfo", "(", "component", ".", "path", ",", "builddir", ")", "self", ".", "set_toplevel_definitions", "+=", "build_info_definitions", "if", "self", ".", "config_include_file", "is", "None", ":", "self", ".", "config_include_file", ",", "config_definitions", ",", "self", ".", "config_json_file", "=", "self", ".", "_getConfigData", "(", "available_dependencies", ",", "component", ",", "builddir", ",", "self", ".", "build_info_include_file", ")", "self", ".", "set_toplevel_definitions", "+=", "config_definitions", "self", ".", "configured", "=", "True", "return", "{", "'merged_config_include'", ":", "self", ".", "config_include_file", ",", "'merged_config_json'", ":", "self", ".", "config_json_file", ",", "'build_info_include'", ":", "self", ".", "build_info_include_file", "}" ]
49.225806
30.322581
def rename(self, fnames=None): """Rename files""" if fnames is None: fnames = self.get_selected_filenames() if not isinstance(fnames, (tuple, list)): fnames = [fnames] for fname in fnames: self.rename_file(fname)
[ "def", "rename", "(", "self", ",", "fnames", "=", "None", ")", ":", "if", "fnames", "is", "None", ":", "fnames", "=", "self", ".", "get_selected_filenames", "(", ")", "if", "not", "isinstance", "(", "fnames", ",", "(", "tuple", ",", "list", ")", ")", ":", "fnames", "=", "[", "fnames", "]", "for", "fname", "in", "fnames", ":", "self", ".", "rename_file", "(", "fname", ")" ]
35
8.625
def merge(self, other, merge_body=True): """ Default merge method. Args: other: another MujocoXML instance raises XML error if @other is not a MujocoXML instance. merges <worldbody/>, <actuator/> and <asset/> of @other into @self merge_body: True if merging child bodies of @other. Defaults to True. """ if not isinstance(other, MujocoXML): raise XMLError("{} is not a MujocoXML instance.".format(type(other))) if merge_body: for body in other.worldbody: self.worldbody.append(body) self.merge_asset(other) for one_actuator in other.actuator: self.actuator.append(one_actuator) for one_equality in other.equality: self.equality.append(one_equality) for one_contact in other.contact: self.contact.append(one_contact) for one_default in other.default: self.default.append(one_default)
[ "def", "merge", "(", "self", ",", "other", ",", "merge_body", "=", "True", ")", ":", "if", "not", "isinstance", "(", "other", ",", "MujocoXML", ")", ":", "raise", "XMLError", "(", "\"{} is not a MujocoXML instance.\"", ".", "format", "(", "type", "(", "other", ")", ")", ")", "if", "merge_body", ":", "for", "body", "in", "other", ".", "worldbody", ":", "self", ".", "worldbody", ".", "append", "(", "body", ")", "self", ".", "merge_asset", "(", "other", ")", "for", "one_actuator", "in", "other", ".", "actuator", ":", "self", ".", "actuator", ".", "append", "(", "one_actuator", ")", "for", "one_equality", "in", "other", ".", "equality", ":", "self", ".", "equality", ".", "append", "(", "one_equality", ")", "for", "one_contact", "in", "other", ".", "contact", ":", "self", ".", "contact", ".", "append", "(", "one_contact", ")", "for", "one_default", "in", "other", ".", "default", ":", "self", ".", "default", ".", "append", "(", "one_default", ")" ]
41.333333
12.5
def _check_directory_win(name, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=None, win_perms_reset=None): ''' Check what changes need to be made on a directory ''' changes = {} if not os.path.isdir(name): changes = {name: {'directory': 'new'}} else: # Check owner by SID if win_owner is not None: current_owner = salt.utils.win_dacl.get_owner(name) current_owner_sid = salt.utils.win_functions.get_sid_from_name(current_owner) expected_owner_sid = salt.utils.win_functions.get_sid_from_name(win_owner) if not current_owner_sid == expected_owner_sid: changes['owner'] = win_owner # Check perms perms = salt.utils.win_dacl.get_permissions(name) # Verify Permissions if win_perms is not None: for user in win_perms: # Check that user exists: try: salt.utils.win_dacl.get_name(user) except CommandExecutionError: continue grant_perms = [] # Check for permissions if isinstance(win_perms[user]['perms'], six.string_types): if not salt.utils.win_dacl.has_permission( name, user, win_perms[user]['perms']): grant_perms = win_perms[user]['perms'] else: for perm in win_perms[user]['perms']: if not salt.utils.win_dacl.has_permission( name, user, perm, exact=False): grant_perms.append(win_perms[user]['perms']) if grant_perms: if 'grant_perms' not in changes: changes['grant_perms'] = {} if user not in changes['grant_perms']: changes['grant_perms'][user] = {} changes['grant_perms'][user]['perms'] = grant_perms # Check Applies to if 'applies_to' not in win_perms[user]: applies_to = 'this_folder_subfolders_files' else: applies_to = win_perms[user]['applies_to'] if user in perms: user = salt.utils.win_dacl.get_name(user) # Get the proper applies_to text at_flag = salt.utils.win_dacl.flags().ace_prop['file'][applies_to] applies_to_text = salt.utils.win_dacl.flags().ace_prop['file'][at_flag] if 'grant' in perms[user]: if not perms[user]['grant']['applies to'] == applies_to_text: if 'grant_perms' not in changes: changes['grant_perms'] = {} if user not in changes['grant_perms']: changes['grant_perms'][user] = {} changes['grant_perms'][user]['applies_to'] = applies_to # Verify Deny Permissions if win_deny_perms is not None: for user in win_deny_perms: # Check that user exists: try: salt.utils.win_dacl.get_name(user) except CommandExecutionError: continue deny_perms = [] # Check for permissions if isinstance(win_deny_perms[user]['perms'], six.string_types): if not salt.utils.win_dacl.has_permission( name, user, win_deny_perms[user]['perms'], 'deny'): deny_perms = win_deny_perms[user]['perms'] else: for perm in win_deny_perms[user]['perms']: if not salt.utils.win_dacl.has_permission( name, user, perm, 'deny', exact=False): deny_perms.append(win_deny_perms[user]['perms']) if deny_perms: if 'deny_perms' not in changes: changes['deny_perms'] = {} if user not in changes['deny_perms']: changes['deny_perms'][user] = {} changes['deny_perms'][user]['perms'] = deny_perms # Check Applies to if 'applies_to' not in win_deny_perms[user]: applies_to = 'this_folder_subfolders_files' else: applies_to = win_deny_perms[user]['applies_to'] if user in perms: user = salt.utils.win_dacl.get_name(user) # Get the proper applies_to text at_flag = salt.utils.win_dacl.flags().ace_prop['file'][applies_to] applies_to_text = salt.utils.win_dacl.flags().ace_prop['file'][at_flag] if 'deny' in perms[user]: if not perms[user]['deny']['applies to'] == applies_to_text: if 'deny_perms' not in changes: changes['deny_perms'] = {} if user not in changes['deny_perms']: changes['deny_perms'][user] = {} changes['deny_perms'][user]['applies_to'] = applies_to # Check inheritance if win_inheritance is not None: if not win_inheritance == salt.utils.win_dacl.get_inheritance(name): changes['inheritance'] = win_inheritance # Check reset if win_perms_reset: for user_name in perms: if user_name not in win_perms: if 'grant' in perms[user_name] and not perms[user_name]['grant']['inherited']: if 'remove_perms' not in changes: changes['remove_perms'] = {} changes['remove_perms'].update({user_name: perms[user_name]}) if user_name not in win_deny_perms: if 'deny' in perms[user_name] and not perms[user_name]['deny']['inherited']: if 'remove_perms' not in changes: changes['remove_perms'] = {} changes['remove_perms'].update({user_name: perms[user_name]}) if changes: return None, 'The directory "{0}" will be changed'.format(name), changes return True, 'The directory {0} is in the correct state'.format(name), changes
[ "def", "_check_directory_win", "(", "name", ",", "win_owner", "=", "None", ",", "win_perms", "=", "None", ",", "win_deny_perms", "=", "None", ",", "win_inheritance", "=", "None", ",", "win_perms_reset", "=", "None", ")", ":", "changes", "=", "{", "}", "if", "not", "os", ".", "path", ".", "isdir", "(", "name", ")", ":", "changes", "=", "{", "name", ":", "{", "'directory'", ":", "'new'", "}", "}", "else", ":", "# Check owner by SID", "if", "win_owner", "is", "not", "None", ":", "current_owner", "=", "salt", ".", "utils", ".", "win_dacl", ".", "get_owner", "(", "name", ")", "current_owner_sid", "=", "salt", ".", "utils", ".", "win_functions", ".", "get_sid_from_name", "(", "current_owner", ")", "expected_owner_sid", "=", "salt", ".", "utils", ".", "win_functions", ".", "get_sid_from_name", "(", "win_owner", ")", "if", "not", "current_owner_sid", "==", "expected_owner_sid", ":", "changes", "[", "'owner'", "]", "=", "win_owner", "# Check perms", "perms", "=", "salt", ".", "utils", ".", "win_dacl", ".", "get_permissions", "(", "name", ")", "# Verify Permissions", "if", "win_perms", "is", "not", "None", ":", "for", "user", "in", "win_perms", ":", "# Check that user exists:", "try", ":", "salt", ".", "utils", ".", "win_dacl", ".", "get_name", "(", "user", ")", "except", "CommandExecutionError", ":", "continue", "grant_perms", "=", "[", "]", "# Check for permissions", "if", "isinstance", "(", "win_perms", "[", "user", "]", "[", "'perms'", "]", ",", "six", ".", "string_types", ")", ":", "if", "not", "salt", ".", "utils", ".", "win_dacl", ".", "has_permission", "(", "name", ",", "user", ",", "win_perms", "[", "user", "]", "[", "'perms'", "]", ")", ":", "grant_perms", "=", "win_perms", "[", "user", "]", "[", "'perms'", "]", "else", ":", "for", "perm", "in", "win_perms", "[", "user", "]", "[", "'perms'", "]", ":", "if", "not", "salt", ".", "utils", ".", "win_dacl", ".", "has_permission", "(", "name", ",", "user", ",", "perm", ",", "exact", "=", "False", ")", ":", "grant_perms", ".", "append", "(", "win_perms", "[", "user", "]", "[", "'perms'", "]", ")", "if", "grant_perms", ":", "if", "'grant_perms'", "not", "in", "changes", ":", "changes", "[", "'grant_perms'", "]", "=", "{", "}", "if", "user", "not", "in", "changes", "[", "'grant_perms'", "]", ":", "changes", "[", "'grant_perms'", "]", "[", "user", "]", "=", "{", "}", "changes", "[", "'grant_perms'", "]", "[", "user", "]", "[", "'perms'", "]", "=", "grant_perms", "# Check Applies to", "if", "'applies_to'", "not", "in", "win_perms", "[", "user", "]", ":", "applies_to", "=", "'this_folder_subfolders_files'", "else", ":", "applies_to", "=", "win_perms", "[", "user", "]", "[", "'applies_to'", "]", "if", "user", "in", "perms", ":", "user", "=", "salt", ".", "utils", ".", "win_dacl", ".", "get_name", "(", "user", ")", "# Get the proper applies_to text", "at_flag", "=", "salt", ".", "utils", ".", "win_dacl", ".", "flags", "(", ")", ".", "ace_prop", "[", "'file'", "]", "[", "applies_to", "]", "applies_to_text", "=", "salt", ".", "utils", ".", "win_dacl", ".", "flags", "(", ")", ".", "ace_prop", "[", "'file'", "]", "[", "at_flag", "]", "if", "'grant'", "in", "perms", "[", "user", "]", ":", "if", "not", "perms", "[", "user", "]", "[", "'grant'", "]", "[", "'applies to'", "]", "==", "applies_to_text", ":", "if", "'grant_perms'", "not", "in", "changes", ":", "changes", "[", "'grant_perms'", "]", "=", "{", "}", "if", "user", "not", "in", "changes", "[", "'grant_perms'", "]", ":", "changes", "[", "'grant_perms'", "]", "[", "user", "]", "=", "{", "}", "changes", "[", "'grant_perms'", "]", "[", "user", "]", "[", "'applies_to'", "]", "=", "applies_to", "# Verify Deny Permissions", "if", "win_deny_perms", "is", "not", "None", ":", "for", "user", "in", "win_deny_perms", ":", "# Check that user exists:", "try", ":", "salt", ".", "utils", ".", "win_dacl", ".", "get_name", "(", "user", ")", "except", "CommandExecutionError", ":", "continue", "deny_perms", "=", "[", "]", "# Check for permissions", "if", "isinstance", "(", "win_deny_perms", "[", "user", "]", "[", "'perms'", "]", ",", "six", ".", "string_types", ")", ":", "if", "not", "salt", ".", "utils", ".", "win_dacl", ".", "has_permission", "(", "name", ",", "user", ",", "win_deny_perms", "[", "user", "]", "[", "'perms'", "]", ",", "'deny'", ")", ":", "deny_perms", "=", "win_deny_perms", "[", "user", "]", "[", "'perms'", "]", "else", ":", "for", "perm", "in", "win_deny_perms", "[", "user", "]", "[", "'perms'", "]", ":", "if", "not", "salt", ".", "utils", ".", "win_dacl", ".", "has_permission", "(", "name", ",", "user", ",", "perm", ",", "'deny'", ",", "exact", "=", "False", ")", ":", "deny_perms", ".", "append", "(", "win_deny_perms", "[", "user", "]", "[", "'perms'", "]", ")", "if", "deny_perms", ":", "if", "'deny_perms'", "not", "in", "changes", ":", "changes", "[", "'deny_perms'", "]", "=", "{", "}", "if", "user", "not", "in", "changes", "[", "'deny_perms'", "]", ":", "changes", "[", "'deny_perms'", "]", "[", "user", "]", "=", "{", "}", "changes", "[", "'deny_perms'", "]", "[", "user", "]", "[", "'perms'", "]", "=", "deny_perms", "# Check Applies to", "if", "'applies_to'", "not", "in", "win_deny_perms", "[", "user", "]", ":", "applies_to", "=", "'this_folder_subfolders_files'", "else", ":", "applies_to", "=", "win_deny_perms", "[", "user", "]", "[", "'applies_to'", "]", "if", "user", "in", "perms", ":", "user", "=", "salt", ".", "utils", ".", "win_dacl", ".", "get_name", "(", "user", ")", "# Get the proper applies_to text", "at_flag", "=", "salt", ".", "utils", ".", "win_dacl", ".", "flags", "(", ")", ".", "ace_prop", "[", "'file'", "]", "[", "applies_to", "]", "applies_to_text", "=", "salt", ".", "utils", ".", "win_dacl", ".", "flags", "(", ")", ".", "ace_prop", "[", "'file'", "]", "[", "at_flag", "]", "if", "'deny'", "in", "perms", "[", "user", "]", ":", "if", "not", "perms", "[", "user", "]", "[", "'deny'", "]", "[", "'applies to'", "]", "==", "applies_to_text", ":", "if", "'deny_perms'", "not", "in", "changes", ":", "changes", "[", "'deny_perms'", "]", "=", "{", "}", "if", "user", "not", "in", "changes", "[", "'deny_perms'", "]", ":", "changes", "[", "'deny_perms'", "]", "[", "user", "]", "=", "{", "}", "changes", "[", "'deny_perms'", "]", "[", "user", "]", "[", "'applies_to'", "]", "=", "applies_to", "# Check inheritance", "if", "win_inheritance", "is", "not", "None", ":", "if", "not", "win_inheritance", "==", "salt", ".", "utils", ".", "win_dacl", ".", "get_inheritance", "(", "name", ")", ":", "changes", "[", "'inheritance'", "]", "=", "win_inheritance", "# Check reset", "if", "win_perms_reset", ":", "for", "user_name", "in", "perms", ":", "if", "user_name", "not", "in", "win_perms", ":", "if", "'grant'", "in", "perms", "[", "user_name", "]", "and", "not", "perms", "[", "user_name", "]", "[", "'grant'", "]", "[", "'inherited'", "]", ":", "if", "'remove_perms'", "not", "in", "changes", ":", "changes", "[", "'remove_perms'", "]", "=", "{", "}", "changes", "[", "'remove_perms'", "]", ".", "update", "(", "{", "user_name", ":", "perms", "[", "user_name", "]", "}", ")", "if", "user_name", "not", "in", "win_deny_perms", ":", "if", "'deny'", "in", "perms", "[", "user_name", "]", "and", "not", "perms", "[", "user_name", "]", "[", "'deny'", "]", "[", "'inherited'", "]", ":", "if", "'remove_perms'", "not", "in", "changes", ":", "changes", "[", "'remove_perms'", "]", "=", "{", "}", "changes", "[", "'remove_perms'", "]", ".", "update", "(", "{", "user_name", ":", "perms", "[", "user_name", "]", "}", ")", "if", "changes", ":", "return", "None", ",", "'The directory \"{0}\" will be changed'", ".", "format", "(", "name", ")", ",", "changes", "return", "True", ",", "'The directory {0} is in the correct state'", ".", "format", "(", "name", ")", ",", "changes" ]
45.645833
22.493056
def getopt(self, p, default=None): """Returns the first option value stored that matches p or default.""" for k, v in self.pairs: if k == p: return v return default
[ "def", "getopt", "(", "self", ",", "p", ",", "default", "=", "None", ")", ":", "for", "k", ",", "v", "in", "self", ".", "pairs", ":", "if", "k", "==", "p", ":", "return", "v", "return", "default" ]
35.166667
11.166667
def get(self, uri, logon_required=True): """ Perform the HTTP GET method against the resource identified by a URI, on the faked HMC. Parameters: uri (:term:`string`): Relative URI path of the resource, e.g. "/api/session". This URI is relative to the base URL of the session (see the :attr:`~zhmcclient.Session.base_url` property). Must not be `None`. logon_required (bool): Boolean indicating whether the operation requires that the session is logged on to the HMC. Because this is a faked HMC, this does not perform a real logon, but it is still used to update the state in the faked HMC. Returns: :term:`json object` with the operation result. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` (not implemented) :exc:`~zhmcclient.AuthError` (not implemented) :exc:`~zhmcclient.ConnectionError` """ try: return self._urihandler.get(self._hmc, uri, logon_required) except HTTPError as exc: raise zhmcclient.HTTPError(exc.response()) except ConnectionError as exc: raise zhmcclient.ConnectionError(exc.message, None)
[ "def", "get", "(", "self", ",", "uri", ",", "logon_required", "=", "True", ")", ":", "try", ":", "return", "self", ".", "_urihandler", ".", "get", "(", "self", ".", "_hmc", ",", "uri", ",", "logon_required", ")", "except", "HTTPError", "as", "exc", ":", "raise", "zhmcclient", ".", "HTTPError", "(", "exc", ".", "response", "(", ")", ")", "except", "ConnectionError", "as", "exc", ":", "raise", "zhmcclient", ".", "ConnectionError", "(", "exc", ".", "message", ",", "None", ")" ]
34.783784
22.027027
def mimeData( self, items ): """ Returns the mime data for dragging for this instance. :param items | [<QTableWidgetItem>, ..] """ func = self.dataCollector() if ( func ): return func(self, items) return super(XTableWidget, self).mimeData(items)
[ "def", "mimeData", "(", "self", ",", "items", ")", ":", "func", "=", "self", ".", "dataCollector", "(", ")", "if", "(", "func", ")", ":", "return", "func", "(", "self", ",", "items", ")", "return", "super", "(", "XTableWidget", ",", "self", ")", ".", "mimeData", "(", "items", ")" ]
30
13.636364
def _sf2(args): """ A shallow wrapper for sigma_filter. Parameters ---------- args : list A list of arguments for sigma_filter Returns ------- None """ # an easier to debug traceback when multiprocessing # thanks to https://stackoverflow.com/a/16618842/1710603 try: return sigma_filter(*args) except: import traceback raise Exception("".join(traceback.format_exception(*sys.exc_info())))
[ "def", "_sf2", "(", "args", ")", ":", "# an easier to debug traceback when multiprocessing", "# thanks to https://stackoverflow.com/a/16618842/1710603", "try", ":", "return", "sigma_filter", "(", "*", "args", ")", "except", ":", "import", "traceback", "raise", "Exception", "(", "\"\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", ")", ")" ]
22.7
21.6
def add_commit(self, commit): """Adds the commit to the commits array if it doesn't already exist, and returns the commit's index in the array. """ sha1 = commit.hex if sha1 in self._commits: return self._commits[sha1] title, separator, body = commit.message.partition("\n") commit = { 'explored': False, 'sha1': sha1, 'name': GitUtils.abbreviate_sha1(sha1), 'describe': GitUtils.describe(sha1), 'refs': GitUtils.refs_to(sha1, self.repo()), 'author_name': commit.author.name, 'author_mail': commit.author.email, 'author_time': commit.author.time, 'author_offset': commit.author.offset, 'committer_name': commit.committer.name, 'committer_mail': commit.committer.email, 'committer_time': commit.committer.time, 'committer_offset': commit.committer.offset, # 'message': commit.message, 'title': title, 'separator': separator, 'body': body.lstrip("\n"), } self._json['commits'].append(commit) self._commits[sha1] = len(self._json['commits']) - 1 return self._commits[sha1]
[ "def", "add_commit", "(", "self", ",", "commit", ")", ":", "sha1", "=", "commit", ".", "hex", "if", "sha1", "in", "self", ".", "_commits", ":", "return", "self", ".", "_commits", "[", "sha1", "]", "title", ",", "separator", ",", "body", "=", "commit", ".", "message", ".", "partition", "(", "\"\\n\"", ")", "commit", "=", "{", "'explored'", ":", "False", ",", "'sha1'", ":", "sha1", ",", "'name'", ":", "GitUtils", ".", "abbreviate_sha1", "(", "sha1", ")", ",", "'describe'", ":", "GitUtils", ".", "describe", "(", "sha1", ")", ",", "'refs'", ":", "GitUtils", ".", "refs_to", "(", "sha1", ",", "self", ".", "repo", "(", ")", ")", ",", "'author_name'", ":", "commit", ".", "author", ".", "name", ",", "'author_mail'", ":", "commit", ".", "author", ".", "email", ",", "'author_time'", ":", "commit", ".", "author", ".", "time", ",", "'author_offset'", ":", "commit", ".", "author", ".", "offset", ",", "'committer_name'", ":", "commit", ".", "committer", ".", "name", ",", "'committer_mail'", ":", "commit", ".", "committer", ".", "email", ",", "'committer_time'", ":", "commit", ".", "committer", ".", "time", ",", "'committer_offset'", ":", "commit", ".", "committer", ".", "offset", ",", "# 'message': commit.message,", "'title'", ":", "title", ",", "'separator'", ":", "separator", ",", "'body'", ":", "body", ".", "lstrip", "(", "\"\\n\"", ")", ",", "}", "self", ".", "_json", "[", "'commits'", "]", ".", "append", "(", "commit", ")", "self", ".", "_commits", "[", "sha1", "]", "=", "len", "(", "self", ".", "_json", "[", "'commits'", "]", ")", "-", "1", "return", "self", ".", "_commits", "[", "sha1", "]" ]
41.466667
10.5
def get_wd_search_results(search_string='', mediawiki_api_url='https://www.wikidata.org/w/api.php', user_agent=config['USER_AGENT_DEFAULT'], max_results=500, language='en'): """ Performs a search in WD for a certain WD search string :param search_string: a string which should be searched for in WD :type search_string: str :param mediawiki_api_url: Specify the mediawiki_api_url. :type mediawiki_api_url: str :param user_agent: The user agent string transmitted in the http header :type user_agent: str :param max_results: The maximum number of search results returned. Default 500 :type max_results: int :param language: The language in which to perform the search. Default 'en' :type language: str :return: returns a list of QIDs found in the search and a list of labels complementary to the QIDs """ params = { 'action': 'wbsearchentities', 'language': language, 'search': search_string, 'format': 'json', 'limit': 50 } headers = { 'User-Agent': user_agent } cont_count = 1 id_list = [] id_labels = [] while cont_count > 0: params.update({'continue': 0 if cont_count == 1 else cont_count}) reply = requests.get(mediawiki_api_url, params=params, headers=headers) reply.raise_for_status() search_results = reply.json() if search_results['success'] != 1: raise WDSearchError('WD search failed') else: for i in search_results['search']: id_list.append(i['id']) id_labels.append(i['label']) if 'search-continue' not in search_results: cont_count = 0 else: cont_count = search_results['search-continue'] if cont_count > max_results: break return id_list
[ "def", "get_wd_search_results", "(", "search_string", "=", "''", ",", "mediawiki_api_url", "=", "'https://www.wikidata.org/w/api.php'", ",", "user_agent", "=", "config", "[", "'USER_AGENT_DEFAULT'", "]", ",", "max_results", "=", "500", ",", "language", "=", "'en'", ")", ":", "params", "=", "{", "'action'", ":", "'wbsearchentities'", ",", "'language'", ":", "language", ",", "'search'", ":", "search_string", ",", "'format'", ":", "'json'", ",", "'limit'", ":", "50", "}", "headers", "=", "{", "'User-Agent'", ":", "user_agent", "}", "cont_count", "=", "1", "id_list", "=", "[", "]", "id_labels", "=", "[", "]", "while", "cont_count", ">", "0", ":", "params", ".", "update", "(", "{", "'continue'", ":", "0", "if", "cont_count", "==", "1", "else", "cont_count", "}", ")", "reply", "=", "requests", ".", "get", "(", "mediawiki_api_url", ",", "params", "=", "params", ",", "headers", "=", "headers", ")", "reply", ".", "raise_for_status", "(", ")", "search_results", "=", "reply", ".", "json", "(", ")", "if", "search_results", "[", "'success'", "]", "!=", "1", ":", "raise", "WDSearchError", "(", "'WD search failed'", ")", "else", ":", "for", "i", "in", "search_results", "[", "'search'", "]", ":", "id_list", ".", "append", "(", "i", "[", "'id'", "]", ")", "id_labels", ".", "append", "(", "i", "[", "'label'", "]", ")", "if", "'search-continue'", "not", "in", "search_results", ":", "cont_count", "=", "0", "else", ":", "cont_count", "=", "search_results", "[", "'search-continue'", "]", "if", "cont_count", ">", "max_results", ":", "break", "return", "id_list" ]
36.571429
21.821429
def _consume(self): """ Main loop for each thread, handles picking a task off the queue, processing it and notifying the queue that it is done. """ while True: try: task, args, kwargs = self.queue.get(True) task(*args, **kwargs) finally: self.queue.task_done()
[ "def", "_consume", "(", "self", ")", ":", "while", "True", ":", "try", ":", "task", ",", "args", ",", "kwargs", "=", "self", ".", "queue", ".", "get", "(", "True", ")", "task", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "self", ".", "queue", ".", "task_done", "(", ")" ]
35.5
13.7
def ip_interface(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Interface or IPv6Interface object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. Notes: The IPv?Interface classes describe an Address on a particular Network, so they're basically a combination of both the Address and Network classes. """ try: return IPv4Interface(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Interface(address) except (AddressValueError, NetmaskValueError): pass raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' % address)
[ "def", "ip_interface", "(", "address", ")", ":", "try", ":", "return", "IPv4Interface", "(", "address", ")", "except", "(", "AddressValueError", ",", "NetmaskValueError", ")", ":", "pass", "try", ":", "return", "IPv6Interface", "(", "address", ")", "except", "(", "AddressValueError", ",", "NetmaskValueError", ")", ":", "pass", "raise", "ValueError", "(", "'%r does not appear to be an IPv4 or IPv6 interface'", "%", "address", ")" ]
28.787879
24.212121
def emg_process(emg, sampling_rate=1000, emg_names=None, envelope_freqs=[10, 400], envelope_lfreq=4, activation_treshold="default", activation_n_above=0.25, activation_n_below=1): """ Automated processing of EMG signal. Parameters ---------- emg : list, array or DataFrame EMG signal array. Can include multiple channels. sampling_rate : int Sampling rate (samples/second). emg_names : list List of EMG channel names. envelope_freqs : list [fc_h, fc_l], optional cutoff frequencies for the band-pass filter (in Hz). envelope_lfreq : number, optional cutoff frequency for the low-pass filter (in Hz). activation_treshold : float minimum amplitude of `x` to detect. activation_n_above : float minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`). activation_n_below : float minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`. Returns ---------- processed_emg : dict Dict containing processed EMG features. Contains the EMG raw signal, the filtered signal and pulse onsets. This function is mainly a wrapper for the biosppy.emg.emg() function. Credits go to its authors. Example ---------- >>> import neurokit as nk >>> >>> processed_emg = nk.emg_process(emg_signal) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - biosppy - numpy - pandas *See Also* - BioSPPy: https://github.com/PIA-Group/BioSPPy References ----------- - None """ if emg_names is None: if isinstance(emg, pd.DataFrame): emg_names = emg.columns.values emg = np.array(emg) if len(np.shape(emg)) == 1: emg = np.array(pd.DataFrame(emg)) if emg_names is None: if np.shape(emg)[1]>1: emg_names = [] for index in range(np.shape(emg)[1]): emg_names.append("EMG_" + str(index)) else: emg_names = ["EMG"] processed_emg = {"df": pd.DataFrame()} for index, emg_chan in enumerate(emg.T): # Store Raw signal processed_emg["df"][emg_names[index] + "_Raw"] = emg_chan # Compute several features using biosppy biosppy_emg = dict(biosppy.emg.emg(emg_chan, sampling_rate=sampling_rate, show=False)) # Store EMG pulse onsets pulse_onsets = np.array([np.nan]*len(emg)) if len(biosppy_emg['onsets']) > 0: pulse_onsets[biosppy_emg['onsets']] = 1 processed_emg["df"][emg_names[index] + "_Pulse_Onsets"] = pulse_onsets processed_emg["df"][emg_names[index] + "_Filtered"] = biosppy_emg["filtered"] processed_emg[emg_names[index]] = {} processed_emg[emg_names[index]]["EMG_Pulse_Onsets"] = biosppy_emg['onsets'] # Envelope envelope = emg_linear_envelope(biosppy_emg["filtered"], sampling_rate=sampling_rate, freqs=envelope_freqs, lfreq=envelope_lfreq) processed_emg["df"][emg_names[index] + "_Envelope"] = envelope # Activation if activation_treshold == "default": activation_treshold = 1*np.std(envelope) processed_emg["df"][emg_names[index] + "_Activation"] = emg_find_activation(envelope, sampling_rate=sampling_rate, threshold=1*np.std(envelope), n_above=activation_n_above, n_below=activation_n_below) return(processed_emg)
[ "def", "emg_process", "(", "emg", ",", "sampling_rate", "=", "1000", ",", "emg_names", "=", "None", ",", "envelope_freqs", "=", "[", "10", ",", "400", "]", ",", "envelope_lfreq", "=", "4", ",", "activation_treshold", "=", "\"default\"", ",", "activation_n_above", "=", "0.25", ",", "activation_n_below", "=", "1", ")", ":", "if", "emg_names", "is", "None", ":", "if", "isinstance", "(", "emg", ",", "pd", ".", "DataFrame", ")", ":", "emg_names", "=", "emg", ".", "columns", ".", "values", "emg", "=", "np", ".", "array", "(", "emg", ")", "if", "len", "(", "np", ".", "shape", "(", "emg", ")", ")", "==", "1", ":", "emg", "=", "np", ".", "array", "(", "pd", ".", "DataFrame", "(", "emg", ")", ")", "if", "emg_names", "is", "None", ":", "if", "np", ".", "shape", "(", "emg", ")", "[", "1", "]", ">", "1", ":", "emg_names", "=", "[", "]", "for", "index", "in", "range", "(", "np", ".", "shape", "(", "emg", ")", "[", "1", "]", ")", ":", "emg_names", ".", "append", "(", "\"EMG_\"", "+", "str", "(", "index", ")", ")", "else", ":", "emg_names", "=", "[", "\"EMG\"", "]", "processed_emg", "=", "{", "\"df\"", ":", "pd", ".", "DataFrame", "(", ")", "}", "for", "index", ",", "emg_chan", "in", "enumerate", "(", "emg", ".", "T", ")", ":", "# Store Raw signal", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Raw\"", "]", "=", "emg_chan", "# Compute several features using biosppy", "biosppy_emg", "=", "dict", "(", "biosppy", ".", "emg", ".", "emg", "(", "emg_chan", ",", "sampling_rate", "=", "sampling_rate", ",", "show", "=", "False", ")", ")", "# Store EMG pulse onsets", "pulse_onsets", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "]", "*", "len", "(", "emg", ")", ")", "if", "len", "(", "biosppy_emg", "[", "'onsets'", "]", ")", ">", "0", ":", "pulse_onsets", "[", "biosppy_emg", "[", "'onsets'", "]", "]", "=", "1", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Pulse_Onsets\"", "]", "=", "pulse_onsets", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Filtered\"", "]", "=", "biosppy_emg", "[", "\"filtered\"", "]", "processed_emg", "[", "emg_names", "[", "index", "]", "]", "=", "{", "}", "processed_emg", "[", "emg_names", "[", "index", "]", "]", "[", "\"EMG_Pulse_Onsets\"", "]", "=", "biosppy_emg", "[", "'onsets'", "]", "# Envelope", "envelope", "=", "emg_linear_envelope", "(", "biosppy_emg", "[", "\"filtered\"", "]", ",", "sampling_rate", "=", "sampling_rate", ",", "freqs", "=", "envelope_freqs", ",", "lfreq", "=", "envelope_lfreq", ")", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Envelope\"", "]", "=", "envelope", "# Activation", "if", "activation_treshold", "==", "\"default\"", ":", "activation_treshold", "=", "1", "*", "np", ".", "std", "(", "envelope", ")", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Activation\"", "]", "=", "emg_find_activation", "(", "envelope", ",", "sampling_rate", "=", "sampling_rate", ",", "threshold", "=", "1", "*", "np", ".", "std", "(", "envelope", ")", ",", "n_above", "=", "activation_n_above", ",", "n_below", "=", "activation_n_below", ")", "return", "(", "processed_emg", ")" ]
32.698113
27.207547
def _adaptive(self, gamma=1.0, relative_tolerance=1.0e-8, maximum_iterations=1000, verbose=True, print_warning=True): """ Determine dimensionless free energies by a combination of Newton-Raphson iteration and self-consistent iteration. Picks whichever method gives the lowest gradient. Is slower than NR (approximated, not calculated) since it calculates the log norms twice each iteration. OPTIONAL ARGUMENTS gamma (float between 0 and 1) - incrementor for NR iterations. relative_tolerance (float between 0 and 1) - relative tolerance for convergence (default 1.0e-6) maximum_iterations (int) - maximum number of Newton-Raphson iterations (default 1000) verbose (boolean) - verbosity level for debug output NOTES This method determines the dimensionless free energies by minimizing a convex function whose solution is the desired estimator. The original idea came from the construction of a likelihood function that independently reproduced the work of Geyer (see [1] and Section 6 of [2]). This can alternatively be formulated as a root-finding algorithm for the Z-estimator. More details of this procedure will follow in a subsequent paper. Only those states with nonzero counts are include in the estimation procedure. REFERENCES See Appendix C.2 of [1]. """ if verbose: print("Determining dimensionless free energies by Newton-Raphson iteration.") # keep track of Newton-Raphson and self-consistent iterations nr_iter = 0 sci_iter = 0 N_k = self.N_k[self.states_with_samples] K = len(N_k) f_k_sci = np.zeros([K], dtype=np.float64) f_k_new = np.zeros([K], dtype=np.float64) # Perform Newton-Raphson iterations (with sci computed on the way) for iteration in range(0, maximum_iterations): # Store for new estimate of dimensionless relative free energies. f_k = self.f_k[self.states_with_samples].copy() # compute weights for gradients: the denominators and free energies are from the previous # iteration in most cases. (W_nk, f_k_sci) = self._computeWeights( recalc_denom=(iteration == 0), return_f_k = True) # Compute gradient and Hessian of last (K-1) states. # # gradient (defined by Eq. C6 of [1]) # g_i(theta) = N_i - \sum_n N_i W_ni # # Hessian (defined by Eq. C9 of [1]) # H_ii(theta) = - \sum_n N_i W_ni (1 - N_i W_ni) # H_ij(theta) = \sum_n N_i W_ni N_j W_nj # """ g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian for i in range(1,K): g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum() H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum() for j in range(1,i): H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum() H[j-1,i-1] = H[i-1,j-1] # Update the free energy estimate (Eq. C11 of [1]). Hinvg = linalg.lstsq(H,g)[0] # # Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank. for k in range(0,K-1): f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k] """ g = N_k - N_k * W_nk.sum(axis=0) NW = N_k * W_nk H = np.dot(NW.T, NW) H += (g.T - N_k) * np.eye(K) # Update the free energy estimate (Eq. C11 of [1]). # will always have lower rank the way it is set up Hinvg = linalg.lstsq(H, g)[0] Hinvg -= Hinvg[0] f_k_new = f_k - gamma * Hinvg # self-consistent iteration gradient norm and saved log sums. g_sci = self._gradientF(f_k_sci) gnorm_sci = np.dot(g_sci, g_sci) # save this so we can switch it back in if g_sci is lower. log_weight_denom = self.log_weight_denom.copy() # newton raphson gradient norm and saved log sums. g_nr = self._gradientF(f_k_new) gnorm_nr = np.dot(g_nr, g_nr) # we could save the gradient, too, but it's not too expensive to # compute since we are doing the Hessian anyway. if verbose: print("self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % (gnorm_sci, gnorm_nr)) # decide which directon to go depending on size of gradient norm if (gnorm_sci < gnorm_nr or sci_iter < 2): sci_iter += 1 self.log_weight_denom = log_weight_denom.copy() if verbose: if sci_iter < 2: print("Choosing self-consistent iteration on iteration %d" % iteration) else: print("Choosing self-consistent iteration for lower gradient on iteration %d" % iteration) f_k_new = f_k_sci.copy() else: nr_iter += 1 if verbose: print("Newton-Raphson used on iteration %d" % iteration) # get rid of big matrices that are not used. del(log_weight_denom, NW, W_nk) # have to set the free energies back in self, since the gradient # routine changes them. self.f_k[self.states_with_samples] = f_k if (self._amIdoneIterating(f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose)): if verbose: print('Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations' % (iteration + 1, nr_iter, sci_iter)) break return
[ "def", "_adaptive", "(", "self", ",", "gamma", "=", "1.0", ",", "relative_tolerance", "=", "1.0e-8", ",", "maximum_iterations", "=", "1000", ",", "verbose", "=", "True", ",", "print_warning", "=", "True", ")", ":", "if", "verbose", ":", "print", "(", "\"Determining dimensionless free energies by Newton-Raphson iteration.\"", ")", "# keep track of Newton-Raphson and self-consistent iterations", "nr_iter", "=", "0", "sci_iter", "=", "0", "N_k", "=", "self", ".", "N_k", "[", "self", ".", "states_with_samples", "]", "K", "=", "len", "(", "N_k", ")", "f_k_sci", "=", "np", ".", "zeros", "(", "[", "K", "]", ",", "dtype", "=", "np", ".", "float64", ")", "f_k_new", "=", "np", ".", "zeros", "(", "[", "K", "]", ",", "dtype", "=", "np", ".", "float64", ")", "# Perform Newton-Raphson iterations (with sci computed on the way)", "for", "iteration", "in", "range", "(", "0", ",", "maximum_iterations", ")", ":", "# Store for new estimate of dimensionless relative free energies.", "f_k", "=", "self", ".", "f_k", "[", "self", ".", "states_with_samples", "]", ".", "copy", "(", ")", "# compute weights for gradients: the denominators and free energies are from the previous", "# iteration in most cases.", "(", "W_nk", ",", "f_k_sci", ")", "=", "self", ".", "_computeWeights", "(", "recalc_denom", "=", "(", "iteration", "==", "0", ")", ",", "return_f_k", "=", "True", ")", "# Compute gradient and Hessian of last (K-1) states.", "#", "# gradient (defined by Eq. C6 of [1])", "# g_i(theta) = N_i - \\sum_n N_i W_ni", "#", "# Hessian (defined by Eq. C9 of [1])", "# H_ii(theta) = - \\sum_n N_i W_ni (1 - N_i W_ni)", "# H_ij(theta) = \\sum_n N_i W_ni N_j W_nj", "#", "\"\"\"\n g = np.matrix(np.zeros([K-1,1], dtype=np.float64)) # gradient\n H = np.matrix(np.zeros([K-1,K-1], dtype=np.float64)) # Hessian\n for i in range(1,K):\n g[i-1] = N_k[i] - N_k[i] * W_nk[:,i].sum()\n H[i-1,i-1] = - (N_k[i] * W_nk[:,i] * (1.0 - N_k[i] * W_nk[:,i])).sum() \n for j in range(1,i):\n H[i-1,j-1] = (N_k[i] * W_nk[:,i] * N_k[j] * W_nk[:,j]).sum()\n H[j-1,i-1] = H[i-1,j-1]\n\n # Update the free energy estimate (Eq. C11 of [1]).\n Hinvg = linalg.lstsq(H,g)[0] #\n # Hinvg = linalg.solve(H,g) # This might be faster if we can guarantee full rank.\n for k in range(0,K-1):\n f_k_new[k+1] = f_k[k+1] - gamma*Hinvg[k]\n\n \"\"\"", "g", "=", "N_k", "-", "N_k", "*", "W_nk", ".", "sum", "(", "axis", "=", "0", ")", "NW", "=", "N_k", "*", "W_nk", "H", "=", "np", ".", "dot", "(", "NW", ".", "T", ",", "NW", ")", "H", "+=", "(", "g", ".", "T", "-", "N_k", ")", "*", "np", ".", "eye", "(", "K", ")", "# Update the free energy estimate (Eq. C11 of [1]).", "# will always have lower rank the way it is set up", "Hinvg", "=", "linalg", ".", "lstsq", "(", "H", ",", "g", ")", "[", "0", "]", "Hinvg", "-=", "Hinvg", "[", "0", "]", "f_k_new", "=", "f_k", "-", "gamma", "*", "Hinvg", "# self-consistent iteration gradient norm and saved log sums.", "g_sci", "=", "self", ".", "_gradientF", "(", "f_k_sci", ")", "gnorm_sci", "=", "np", ".", "dot", "(", "g_sci", ",", "g_sci", ")", "# save this so we can switch it back in if g_sci is lower.", "log_weight_denom", "=", "self", ".", "log_weight_denom", ".", "copy", "(", ")", "# newton raphson gradient norm and saved log sums.", "g_nr", "=", "self", ".", "_gradientF", "(", "f_k_new", ")", "gnorm_nr", "=", "np", ".", "dot", "(", "g_nr", ",", "g_nr", ")", "# we could save the gradient, too, but it's not too expensive to", "# compute since we are doing the Hessian anyway.", "if", "verbose", ":", "print", "(", "\"self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g\"", "%", "(", "gnorm_sci", ",", "gnorm_nr", ")", ")", "# decide which directon to go depending on size of gradient norm", "if", "(", "gnorm_sci", "<", "gnorm_nr", "or", "sci_iter", "<", "2", ")", ":", "sci_iter", "+=", "1", "self", ".", "log_weight_denom", "=", "log_weight_denom", ".", "copy", "(", ")", "if", "verbose", ":", "if", "sci_iter", "<", "2", ":", "print", "(", "\"Choosing self-consistent iteration on iteration %d\"", "%", "iteration", ")", "else", ":", "print", "(", "\"Choosing self-consistent iteration for lower gradient on iteration %d\"", "%", "iteration", ")", "f_k_new", "=", "f_k_sci", ".", "copy", "(", ")", "else", ":", "nr_iter", "+=", "1", "if", "verbose", ":", "print", "(", "\"Newton-Raphson used on iteration %d\"", "%", "iteration", ")", "# get rid of big matrices that are not used.", "del", "(", "log_weight_denom", ",", "NW", ",", "W_nk", ")", "# have to set the free energies back in self, since the gradient", "# routine changes them.", "self", ".", "f_k", "[", "self", ".", "states_with_samples", "]", "=", "f_k", "if", "(", "self", ".", "_amIdoneIterating", "(", "f_k_new", ",", "relative_tolerance", ",", "iteration", ",", "maximum_iterations", ",", "print_warning", ",", "verbose", ")", ")", ":", "if", "verbose", ":", "print", "(", "'Of %d iterations, %d were Newton-Raphson iterations and %d were self-consistent iterations'", "%", "(", "iteration", "+", "1", ",", "nr_iter", ",", "sci_iter", ")", ")", "break", "return" ]
45.069767
27.813953
def _set_main_widget(self, widget, redraw): """ add provided widget to widget list and display it :param widget: :return: """ self.set_body(widget) self.reload_footer() if redraw: logger.debug("redraw main widget") self.refresh()
[ "def", "_set_main_widget", "(", "self", ",", "widget", ",", "redraw", ")", ":", "self", ".", "set_body", "(", "widget", ")", "self", ".", "reload_footer", "(", ")", "if", "redraw", ":", "logger", ".", "debug", "(", "\"redraw main widget\"", ")", "self", ".", "refresh", "(", ")" ]
25.583333
13.916667
def add_docker_file_to_tarfile(self, docker_file, tar): """Add a Dockerfile to a tarfile""" with hp.a_temp_file() as dockerfile: log.debug("Context: ./Dockerfile") dockerfile.write("\n".join(docker_file.docker_lines).encode('utf-8')) dockerfile.seek(0) tar.add(dockerfile.name, arcname="./Dockerfile")
[ "def", "add_docker_file_to_tarfile", "(", "self", ",", "docker_file", ",", "tar", ")", ":", "with", "hp", ".", "a_temp_file", "(", ")", "as", "dockerfile", ":", "log", ".", "debug", "(", "\"Context: ./Dockerfile\"", ")", "dockerfile", ".", "write", "(", "\"\\n\"", ".", "join", "(", "docker_file", ".", "docker_lines", ")", ".", "encode", "(", "'utf-8'", ")", ")", "dockerfile", ".", "seek", "(", "0", ")", "tar", ".", "add", "(", "dockerfile", ".", "name", ",", "arcname", "=", "\"./Dockerfile\"", ")" ]
51.285714
13.714286
def is_header(self, elem, font_size, node, style=None): """Used for checking if specific element is a header or not. :Returns: True or False """ # This logic has been disabled for now. Mark this as header if it has # been marked during the parsing or mark. # if hasattr(elem, 'possible_header'): # if elem.possible_header: # return True # if not style: # return False if hasattr(style, 'style_id'): fnt_size = _get_font_size(self.doc, style) from .importer import calculate_weight weight = calculate_weight(self.doc, elem) if weight > 50: return False if fnt_size in self.doc.possible_headers_style: return True return font_size in self.doc.possible_headers else: list_of_sizes = {} for el in elem.elements: try: fs = get_style_fontsize(el) weight = len(el.value()) if el.value() else 0 list_of_sizes[fs] = list_of_sizes.setdefault(fs, 0) + weight except: pass sorted_list_of_sizes = list(collections.OrderedDict(sorted(six.iteritems(list_of_sizes), key=lambda t: t[0]))) font_size_to_check = font_size if len(sorted_list_of_sizes) > 0: if sorted_list_of_sizes[0] != font_size: return sorted_list_of_sizes[0] in self.doc.possible_headers return font_size in self.doc.possible_headers
[ "def", "is_header", "(", "self", ",", "elem", ",", "font_size", ",", "node", ",", "style", "=", "None", ")", ":", "# This logic has been disabled for now. Mark this as header if it has", "# been marked during the parsing or mark.", "# if hasattr(elem, 'possible_header'):", "# if elem.possible_header: ", "# return True", "# if not style:", "# return False", "if", "hasattr", "(", "style", ",", "'style_id'", ")", ":", "fnt_size", "=", "_get_font_size", "(", "self", ".", "doc", ",", "style", ")", "from", ".", "importer", "import", "calculate_weight", "weight", "=", "calculate_weight", "(", "self", ".", "doc", ",", "elem", ")", "if", "weight", ">", "50", ":", "return", "False", "if", "fnt_size", "in", "self", ".", "doc", ".", "possible_headers_style", ":", "return", "True", "return", "font_size", "in", "self", ".", "doc", ".", "possible_headers", "else", ":", "list_of_sizes", "=", "{", "}", "for", "el", "in", "elem", ".", "elements", ":", "try", ":", "fs", "=", "get_style_fontsize", "(", "el", ")", "weight", "=", "len", "(", "el", ".", "value", "(", ")", ")", "if", "el", ".", "value", "(", ")", "else", "0", "list_of_sizes", "[", "fs", "]", "=", "list_of_sizes", ".", "setdefault", "(", "fs", ",", "0", ")", "+", "weight", "except", ":", "pass", "sorted_list_of_sizes", "=", "list", "(", "collections", ".", "OrderedDict", "(", "sorted", "(", "six", ".", "iteritems", "(", "list_of_sizes", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", ")", ")", "font_size_to_check", "=", "font_size", "if", "len", "(", "sorted_list_of_sizes", ")", ">", "0", ":", "if", "sorted_list_of_sizes", "[", "0", "]", "!=", "font_size", ":", "return", "sorted_list_of_sizes", "[", "0", "]", "in", "self", ".", "doc", ".", "possible_headers", "return", "font_size", "in", "self", ".", "doc", ".", "possible_headers" ]
34.291667
22.479167
def Nu_horizontal_cylinder(Pr, Gr, Method=None, AvailableMethods=False): r'''This function handles choosing which horizontal cylinder free convection correlation is used. Generally this is used by a helper class, but can be used directly. Will automatically select the correlation to use if none is provided; returns None if insufficient information is provided. Prefered functions are 'Morgan' when discontinuous results are acceptable and 'Churchill-Chu' otherwise. Parameters ---------- Pr : float Prandtl number [-] Gr : float Grashof number [-] Returns ------- Nu : float Nusselt number, [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to calculate Nu with the given inputs Other Parameters ---------------- Method : string, optional A string of the function name to use, as in the dictionary horizontal_cylinder_correlations AvailableMethods : bool, optional If True, function will consider which methods which can be used to calculate Nu with the given inputs Examples -------- >>> Nu_horizontal_cylinder(0.72, 1E7) 24.864192615468973 ''' def list_methods(): methods = [] for key, values in horizontal_cylinder_correlations.items(): methods.append(key) if 'Morgan' in methods: methods.remove('Morgan') methods.insert(0, 'Morgan') return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method in horizontal_cylinder_correlations: return horizontal_cylinder_correlations[Method](Pr=Pr, Gr=Gr) else: raise Exception("Correlation name not recognized; see the " "documentation for the available options.")
[ "def", "Nu_horizontal_cylinder", "(", "Pr", ",", "Gr", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "horizontal_cylinder_correlations", ".", "items", "(", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Morgan'", "in", "methods", ":", "methods", ".", "remove", "(", "'Morgan'", ")", "methods", ".", "insert", "(", "0", ",", "'Morgan'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "horizontal_cylinder_correlations", ":", "return", "horizontal_cylinder_correlations", "[", "Method", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
33.196429
23.053571
def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1): """ Compute observable fraction of stars with masses greater than mass_min in each pixel in the interior region of the mask. Incorporates simplistic photometric errors. ADW: Careful, this function is fragile! The selection here should be the same as mask.restrictCatalogToObservable space. However, for technical reasons it is faster to do the calculation with broadcasting here. ADW: This function is currently a rate-limiting step in the likelihood calculation. Could it be faster? """ method = 'step' mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False) mag_1 = mag_1+distance_modulus mag_2 = mag_2+distance_modulus mask_1,mask_2 = mask.mask_roi_unique.T mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1) mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2) # "upper" bound set by maglim delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1 delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2 # "lower" bound set by bins_mag (maglim shouldn't be 0) delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1 delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2 cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1) cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2) cdf = cdf_1*cdf_2 if method is None or method == 'none': comp_cdf = cdf elif self.band_1_detection == True: comp = mask.mask_1.completeness(mag_1, method=method) comp_cdf = comp*cdf elif self.band_1_detection == False: comp =mask.mask_2.completeness(mag_2, method=method) comp_cdf = comp*cdf else: comp_1 = mask.mask_1.completeness(mag_1, method=method) comp_2 = mask.mask_2.completeness(mag_2, method=method) comp_cdf = comp_1*comp_2*cdf observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1) return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
[ "def", "observableFractionCDF", "(", "self", ",", "mask", ",", "distance_modulus", ",", "mass_min", "=", "0.1", ")", ":", "method", "=", "'step'", "mass_init", ",", "mass_pdf", ",", "mass_act", ",", "mag_1", ",", "mag_2", "=", "self", ".", "sample", "(", "mass_min", "=", "mass_min", ",", "full_data_range", "=", "False", ")", "mag_1", "=", "mag_1", "+", "distance_modulus", "mag_2", "=", "mag_2", "+", "distance_modulus", "mask_1", ",", "mask_2", "=", "mask", ".", "mask_roi_unique", ".", "T", "mag_err_1", "=", "mask", ".", "photo_err_1", "(", "mask_1", "[", ":", ",", "np", ".", "newaxis", "]", "-", "mag_1", ")", "mag_err_2", "=", "mask", ".", "photo_err_2", "(", "mask_2", "[", ":", ",", "np", ".", "newaxis", "]", "-", "mag_2", ")", "# \"upper\" bound set by maglim", "delta_hi_1", "=", "(", "mask_1", "[", ":", ",", "np", ".", "newaxis", "]", "-", "mag_1", ")", "/", "mag_err_1", "delta_hi_2", "=", "(", "mask_2", "[", ":", ",", "np", ".", "newaxis", "]", "-", "mag_2", ")", "/", "mag_err_2", "# \"lower\" bound set by bins_mag (maglim shouldn't be 0)", "delta_lo_1", "=", "(", "mask", ".", "roi", ".", "bins_mag", "[", "0", "]", "-", "mag_1", ")", "/", "mag_err_1", "delta_lo_2", "=", "(", "mask", ".", "roi", ".", "bins_mag", "[", "0", "]", "-", "mag_2", ")", "/", "mag_err_2", "cdf_1", "=", "norm_cdf", "(", "delta_hi_1", ")", "-", "norm_cdf", "(", "delta_lo_1", ")", "cdf_2", "=", "norm_cdf", "(", "delta_hi_2", ")", "-", "norm_cdf", "(", "delta_lo_2", ")", "cdf", "=", "cdf_1", "*", "cdf_2", "if", "method", "is", "None", "or", "method", "==", "'none'", ":", "comp_cdf", "=", "cdf", "elif", "self", ".", "band_1_detection", "==", "True", ":", "comp", "=", "mask", ".", "mask_1", ".", "completeness", "(", "mag_1", ",", "method", "=", "method", ")", "comp_cdf", "=", "comp", "*", "cdf", "elif", "self", ".", "band_1_detection", "==", "False", ":", "comp", "=", "mask", ".", "mask_2", ".", "completeness", "(", "mag_2", ",", "method", "=", "method", ")", "comp_cdf", "=", "comp", "*", "cdf", "else", ":", "comp_1", "=", "mask", ".", "mask_1", ".", "completeness", "(", "mag_1", ",", "method", "=", "method", ")", "comp_2", "=", "mask", ".", "mask_2", ".", "completeness", "(", "mag_2", ",", "method", "=", "method", ")", "comp_cdf", "=", "comp_1", "*", "comp_2", "*", "cdf", "observable_fraction", "=", "(", "mass_pdf", "[", "np", ".", "newaxis", "]", "*", "comp_cdf", ")", ".", "sum", "(", "axis", "=", "-", "1", ")", "return", "observable_fraction", "[", "mask", ".", "mask_roi_digi", "[", "mask", ".", "roi", ".", "pixel_interior_cut", "]", "]" ]
43.788462
22.134615
def random_string(length=8, charset=None): ''' Generates a string with random characters. If no charset is specified, only letters and digits are used. Args: length (int) length of the returned string charset (string) list of characters to choose from Returns: (str) with random characters from charset Raises: - ''' if length < 1: raise ValueError('Length must be > 0') if not charset: charset = string.letters + string.digits return ''.join(random.choice(charset) for unused in xrange(length))
[ "def", "random_string", "(", "length", "=", "8", ",", "charset", "=", "None", ")", ":", "if", "length", "<", "1", ":", "raise", "ValueError", "(", "'Length must be > 0'", ")", "if", "not", "charset", ":", "charset", "=", "string", ".", "letters", "+", "string", ".", "digits", "return", "''", ".", "join", "(", "random", ".", "choice", "(", "charset", ")", "for", "unused", "in", "xrange", "(", "length", ")", ")" ]
31.5
22.166667
def find(name, app=None, components=None, raw=False): """ Discover any named attributes, modules, or packages and coalesces the results. Looks in any module or package declared in the the 'COMPONENTS' key in the application config. Order of found results are persisted from the order that the component was declared in. @param[in] components An array of components; overrides any setting in the application config. @param[in] raw If True then no processing is done on the found items. """ if components is None: if app is None: from flask import current_app as app components = app.config.get('COMPONENTS', []) items = [] for key in components: # Attempt to import the component and access the specified name # as an attribute. module = import_module(key) item = getattr(module, name, None) if item is None: # Attempt to import a module or package in the component # with the specified name. try: item = import_module('.'.join((key, name))) except ImportError: # Assume this component has nothing under the specified name. continue if not raw: if isinstance(item, types.ModuleType): all_ = getattr(item, '__all__', None) if all_: item = {n: getattr(item, n) for n in all_} else: item = vars(item) items.append(item) return items
[ "def", "find", "(", "name", ",", "app", "=", "None", ",", "components", "=", "None", ",", "raw", "=", "False", ")", ":", "if", "components", "is", "None", ":", "if", "app", "is", "None", ":", "from", "flask", "import", "current_app", "as", "app", "components", "=", "app", ".", "config", ".", "get", "(", "'COMPONENTS'", ",", "[", "]", ")", "items", "=", "[", "]", "for", "key", "in", "components", ":", "# Attempt to import the component and access the specified name", "# as an attribute.", "module", "=", "import_module", "(", "key", ")", "item", "=", "getattr", "(", "module", ",", "name", ",", "None", ")", "if", "item", "is", "None", ":", "# Attempt to import a module or package in the component", "# with the specified name.", "try", ":", "item", "=", "import_module", "(", "'.'", ".", "join", "(", "(", "key", ",", "name", ")", ")", ")", "except", "ImportError", ":", "# Assume this component has nothing under the specified name.", "continue", "if", "not", "raw", ":", "if", "isinstance", "(", "item", ",", "types", ".", "ModuleType", ")", ":", "all_", "=", "getattr", "(", "item", ",", "'__all__'", ",", "None", ")", "if", "all_", ":", "item", "=", "{", "n", ":", "getattr", "(", "item", ",", "n", ")", "for", "n", "in", "all_", "}", "else", ":", "item", "=", "vars", "(", "item", ")", "items", ".", "append", "(", "item", ")", "return", "items" ]
28.666667
22.62963
def select_fds(read_fds, timeout, selector=AutoSelector): """ Wait for a list of file descriptors (`read_fds`) to become ready for reading. This chooses the most appropriate select-tool for use in prompt-toolkit. """ # Map to ensure that we return the objects that were passed in originally. # Whether they are a fd integer or an object that has a fileno(). # (The 'poll' implementation for instance, returns always integers.) fd_map = dict((fd_to_int(fd), fd) for fd in read_fds) # Wait, using selector. sel = selector() try: for fd in read_fds: sel.register(fd) result = sel.select(timeout) if result is not None: return [fd_map[fd_to_int(fd)] for fd in result] finally: sel.close()
[ "def", "select_fds", "(", "read_fds", ",", "timeout", ",", "selector", "=", "AutoSelector", ")", ":", "# Map to ensure that we return the objects that were passed in originally.", "# Whether they are a fd integer or an object that has a fileno().", "# (The 'poll' implementation for instance, returns always integers.)", "fd_map", "=", "dict", "(", "(", "fd_to_int", "(", "fd", ")", ",", "fd", ")", "for", "fd", "in", "read_fds", ")", "# Wait, using selector.", "sel", "=", "selector", "(", ")", "try", ":", "for", "fd", "in", "read_fds", ":", "sel", ".", "register", "(", "fd", ")", "result", "=", "sel", ".", "select", "(", "timeout", ")", "if", "result", "is", "not", "None", ":", "return", "[", "fd_map", "[", "fd_to_int", "(", "fd", ")", "]", "for", "fd", "in", "result", "]", "finally", ":", "sel", ".", "close", "(", ")" ]
33.608696
22.043478
def display_reports(self, layout): """display results encapsulated in the layout tree""" self.section = 0 if hasattr(layout, "report_id"): layout.children[0].children[0].data += " (%s)" % layout.report_id self._display(layout)
[ "def", "display_reports", "(", "self", ",", "layout", ")", ":", "self", ".", "section", "=", "0", "if", "hasattr", "(", "layout", ",", "\"report_id\"", ")", ":", "layout", ".", "children", "[", "0", "]", ".", "children", "[", "0", "]", ".", "data", "+=", "\" (%s)\"", "%", "layout", ".", "report_id", "self", ".", "_display", "(", "layout", ")" ]
44.166667
11.666667
def write_xml(self, root): """Write this source to an XML node.""" if not self.extended: try: source_element = utils.create_xml_element(root, 'source', dict(name=self['Source_Name'], type='PointSource')) except TypeError as msg: print (self['Source_Name'], self) raise TypeError(msg) spat_el = ElementTree.SubElement(source_element, 'spatialModel') spat_el.set('type', 'SkyDirFunction') elif self['SpatialType'] == 'SpatialMap': source_element = utils.create_xml_element(root, 'source', dict(name=self['Source_Name'], type='DiffuseSource')) filename = utils.path_to_xmlpath(self['Spatial_Filename']) spat_el = utils.create_xml_element(source_element, 'spatialModel', dict(map_based_integral='True', type='SpatialMap', file=filename)) else: source_element = utils.create_xml_element(root, 'source', dict(name=self['Source_Name'], type='DiffuseSource')) spat_el = utils.create_xml_element(source_element, 'spatialModel', dict(type=self['SpatialType'])) for k, v in self.spatial_pars.items(): utils.create_xml_element(spat_el, 'parameter', v) el = ElementTree.SubElement(source_element, 'spectrum') stype = self['SpectrumType'].strip() el.set('type', stype) if self['Spectrum_Filename'] is not None: filename = utils.path_to_xmlpath(self['Spectrum_Filename']) el.set('file', filename) for k, v in self.spectral_pars.items(): utils.create_xml_element(el, 'parameter', v)
[ "def", "write_xml", "(", "self", ",", "root", ")", ":", "if", "not", "self", ".", "extended", ":", "try", ":", "source_element", "=", "utils", ".", "create_xml_element", "(", "root", ",", "'source'", ",", "dict", "(", "name", "=", "self", "[", "'Source_Name'", "]", ",", "type", "=", "'PointSource'", ")", ")", "except", "TypeError", "as", "msg", ":", "print", "(", "self", "[", "'Source_Name'", "]", ",", "self", ")", "raise", "TypeError", "(", "msg", ")", "spat_el", "=", "ElementTree", ".", "SubElement", "(", "source_element", ",", "'spatialModel'", ")", "spat_el", ".", "set", "(", "'type'", ",", "'SkyDirFunction'", ")", "elif", "self", "[", "'SpatialType'", "]", "==", "'SpatialMap'", ":", "source_element", "=", "utils", ".", "create_xml_element", "(", "root", ",", "'source'", ",", "dict", "(", "name", "=", "self", "[", "'Source_Name'", "]", ",", "type", "=", "'DiffuseSource'", ")", ")", "filename", "=", "utils", ".", "path_to_xmlpath", "(", "self", "[", "'Spatial_Filename'", "]", ")", "spat_el", "=", "utils", ".", "create_xml_element", "(", "source_element", ",", "'spatialModel'", ",", "dict", "(", "map_based_integral", "=", "'True'", ",", "type", "=", "'SpatialMap'", ",", "file", "=", "filename", ")", ")", "else", ":", "source_element", "=", "utils", ".", "create_xml_element", "(", "root", ",", "'source'", ",", "dict", "(", "name", "=", "self", "[", "'Source_Name'", "]", ",", "type", "=", "'DiffuseSource'", ")", ")", "spat_el", "=", "utils", ".", "create_xml_element", "(", "source_element", ",", "'spatialModel'", ",", "dict", "(", "type", "=", "self", "[", "'SpatialType'", "]", ")", ")", "for", "k", ",", "v", "in", "self", ".", "spatial_pars", ".", "items", "(", ")", ":", "utils", ".", "create_xml_element", "(", "spat_el", ",", "'parameter'", ",", "v", ")", "el", "=", "ElementTree", ".", "SubElement", "(", "source_element", ",", "'spectrum'", ")", "stype", "=", "self", "[", "'SpectrumType'", "]", ".", "strip", "(", ")", "el", ".", "set", "(", "'type'", ",", "stype", ")", "if", "self", "[", "'Spectrum_Filename'", "]", "is", "not", "None", ":", "filename", "=", "utils", ".", "path_to_xmlpath", "(", "self", "[", "'Spectrum_Filename'", "]", ")", "el", ".", "set", "(", "'file'", ",", "filename", ")", "for", "k", ",", "v", "in", "self", ".", "spectral_pars", ".", "items", "(", ")", ":", "utils", ".", "create_xml_element", "(", "el", ",", "'parameter'", ",", "v", ")" ]
46.934783
26.73913
def adjust_response(self, response): 'Locate placeholder magic strings and replace with content' try: c1 = self._replace(response.content, self.header_placeholder, self.embedded_holder.css) response.content = self._replace(c1, self.footer_placeholder, "\n".join([self.embedded_holder.config, self.embedded_holder.scripts])) except AttributeError: # Catch the "FileResponse instance has no `content` attribute" error when serving media files in the Django development server. pass return response
[ "def", "adjust_response", "(", "self", ",", "response", ")", ":", "try", ":", "c1", "=", "self", ".", "_replace", "(", "response", ".", "content", ",", "self", ".", "header_placeholder", ",", "self", ".", "embedded_holder", ".", "css", ")", "response", ".", "content", "=", "self", ".", "_replace", "(", "c1", ",", "self", ".", "footer_placeholder", ",", "\"\\n\"", ".", "join", "(", "[", "self", ".", "embedded_holder", ".", "config", ",", "self", ".", "embedded_holder", ".", "scripts", "]", ")", ")", "except", "AttributeError", ":", "# Catch the \"FileResponse instance has no `content` attribute\" error when serving media files in the Django development server.", "pass", "return", "response" ]
45.294118
29.176471
def _surfdens(self,R,z,phi=0.,t=0.): """ NAME: _surfdens PURPOSE: evaluate the surface density INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: Sigma (R,z) HISTORY: 2018-08-19 - Written - Bovy (UofT) """ return nu.exp(-self._alpha*R)
[ "def", "_surfdens", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "return", "nu", ".", "exp", "(", "-", "self", ".", "_alpha", "*", "R", ")" ]
24.470588
13.647059
def as_required_fields(self, fields=[]): """ set required to True """ fields = self.filter_fields(fields) for f in fields: f = self.fields[f.name] f.required = True
[ "def", "as_required_fields", "(", "self", ",", "fields", "=", "[", "]", ")", ":", "fields", "=", "self", ".", "filter_fields", "(", "fields", ")", "for", "f", "in", "fields", ":", "f", "=", "self", ".", "fields", "[", "f", ".", "name", "]", "f", ".", "required", "=", "True" ]
34.5
5.833333
def getPixels(self): """ Return a stream of pixels from current Canvas. """ array = self.toArray() (width, height, depth) = array.size for x in range(width): for y in range(height): yield Pixel(array, x, y)
[ "def", "getPixels", "(", "self", ")", ":", "array", "=", "self", ".", "toArray", "(", ")", "(", "width", ",", "height", ",", "depth", ")", "=", "array", ".", "size", "for", "x", "in", "range", "(", "width", ")", ":", "for", "y", "in", "range", "(", "height", ")", ":", "yield", "Pixel", "(", "array", ",", "x", ",", "y", ")" ]
30.444444
6.888889
def resolve_job_ref(jbor, job_outputs={}, should_resolve=True): ''' :param jbor: a dict that is a valid job-based object reference :type jbor: dict :param job_outputs: a dict of finished local jobs to their output hashes :type job_outputs: :class:`collections.OrderedDict` :returns: the referenced value if present :raises: :exc:`Exception` if the job-based object reference cannot be resolved TODO: Support metadata references ''' ref_job_id = get_job_from_jbor(jbor) ref_job_field = get_field_from_jbor(jbor) ref_job_index = get_index_from_jbor(jbor) def resolve_from_hash(output_hash): if ref_job_index is None: return output_hash[ref_job_field] else: return output_hash[ref_job_field][ref_job_index] if is_localjob_id(ref_job_id): if job_outputs.get(ref_job_id) is None: if should_resolve: raise Exception('Job ' + ref_job_id + ' not found in local finished jobs') else: return jbor if ref_job_field not in job_outputs[ref_job_id]: raise Exception('Cannot resolve a JBOR with job ID ' + ref_job_id + ' because field "' + ref_job_field + '" was not found in its output') return resolve_from_hash(job_outputs[ref_job_id]) else: dxjob = dxpy.DXJob(ref_job_id) try: dxjob.wait_on_done() except Exception as e: raise Exception('Could not wait for ' + ref_job_id + ' to finish: ' + str(e)) job_desc = dxjob.describe() if ref_job_field not in job_desc['output']: raise Exception('Cannot resolve a JBOR with job ID ' + ref_job_id + ' because field "' + ref_job_field + '" was not found in its output') return resolve_from_hash(job_desc['output'])
[ "def", "resolve_job_ref", "(", "jbor", ",", "job_outputs", "=", "{", "}", ",", "should_resolve", "=", "True", ")", ":", "ref_job_id", "=", "get_job_from_jbor", "(", "jbor", ")", "ref_job_field", "=", "get_field_from_jbor", "(", "jbor", ")", "ref_job_index", "=", "get_index_from_jbor", "(", "jbor", ")", "def", "resolve_from_hash", "(", "output_hash", ")", ":", "if", "ref_job_index", "is", "None", ":", "return", "output_hash", "[", "ref_job_field", "]", "else", ":", "return", "output_hash", "[", "ref_job_field", "]", "[", "ref_job_index", "]", "if", "is_localjob_id", "(", "ref_job_id", ")", ":", "if", "job_outputs", ".", "get", "(", "ref_job_id", ")", "is", "None", ":", "if", "should_resolve", ":", "raise", "Exception", "(", "'Job '", "+", "ref_job_id", "+", "' not found in local finished jobs'", ")", "else", ":", "return", "jbor", "if", "ref_job_field", "not", "in", "job_outputs", "[", "ref_job_id", "]", ":", "raise", "Exception", "(", "'Cannot resolve a JBOR with job ID '", "+", "ref_job_id", "+", "' because field \"'", "+", "ref_job_field", "+", "'\" was not found in its output'", ")", "return", "resolve_from_hash", "(", "job_outputs", "[", "ref_job_id", "]", ")", "else", ":", "dxjob", "=", "dxpy", ".", "DXJob", "(", "ref_job_id", ")", "try", ":", "dxjob", ".", "wait_on_done", "(", ")", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "'Could not wait for '", "+", "ref_job_id", "+", "' to finish: '", "+", "str", "(", "e", ")", ")", "job_desc", "=", "dxjob", ".", "describe", "(", ")", "if", "ref_job_field", "not", "in", "job_desc", "[", "'output'", "]", ":", "raise", "Exception", "(", "'Cannot resolve a JBOR with job ID '", "+", "ref_job_id", "+", "' because field \"'", "+", "ref_job_field", "+", "'\" was not found in its output'", ")", "return", "resolve_from_hash", "(", "job_desc", "[", "'output'", "]", ")" ]
46.894737
22.684211
def reset_tasks(self, request, context): """Resets all captured tasks.""" _log_request(request, context) self.listener.memory.clear_tasks() return clearly_pb2.Empty()
[ "def", "reset_tasks", "(", "self", ",", "request", ",", "context", ")", ":", "_log_request", "(", "request", ",", "context", ")", "self", ".", "listener", ".", "memory", ".", "clear_tasks", "(", ")", "return", "clearly_pb2", ".", "Empty", "(", ")" ]
38.8
2
def flexifunction_directory_encode(self, target_system, target_component, directory_type, start_index, count, directory_data): ''' Acknowldge sucess or failure of a flexifunction command target_system : System ID (uint8_t) target_component : Component ID (uint8_t) directory_type : 0=inputs, 1=outputs (uint8_t) start_index : index of first directory entry to write (uint8_t) count : count of directory entries to write (uint8_t) directory_data : Settings data (int8_t) ''' return MAVLink_flexifunction_directory_message(target_system, target_component, directory_type, start_index, count, directory_data)
[ "def", "flexifunction_directory_encode", "(", "self", ",", "target_system", ",", "target_component", ",", "directory_type", ",", "start_index", ",", "count", ",", "directory_data", ")", ":", "return", "MAVLink_flexifunction_directory_message", "(", "target_system", ",", "target_component", ",", "directory_type", ",", "start_index", ",", "count", ",", "directory_data", ")" ]
64
42.769231
def _get_index(self): """ Get the anchor's index. This must return an ``int``. Subclasses may override this method. """ glyph = self.glyph if glyph is None: return None return glyph.anchors.index(self)
[ "def", "_get_index", "(", "self", ")", ":", "glyph", "=", "self", ".", "glyph", "if", "glyph", "is", "None", ":", "return", "None", "return", "glyph", ".", "anchors", ".", "index", "(", "self", ")" ]
24.363636
11.090909
def download_metadata_tar(self,download_dir): """ Downloads the ${run_name}.metadata.tar file from the DNAnexus sequencing results project. Args: download_dir: `str` - The local directory path to download the QC report to. Returns: `str`: The filepath to the downloaded metadata tarball. """ if not os.path.isdir(download_dir): os.makedirs(download_dir) res = dxpy.find_one_data_object(project=self.dx_project_id,folder=self.DX_RAW_DATA_FOLDER,name="*metadata.tar",name_mode="glob") #res will be something like {u'project': u'project-BzqVkxj08kVZbPXk54X0P2JY', u'id': u'file-BzqVkg800Fb0z4437GXJfGY6'} #dxpy.find_one_data_object() raises a dxpy.exceptions.DXSearchError() if nothing is found. dx_file = dxpy.DXFile(dxid=res["id"],project=res["project"]) download_file_name = os.path.join(download_dir,dx_file.name) msg = "{filename} to {download_dir}.".format(filename=dx_file.name,download_dir=download_dir) debug_logger.debug("Downloading " + msg) dxpy.bindings.dxfile_functions.download_dxfile(dxid=dx_file,filename=download_file_name) success_logger.info("Downloaded " + msg) return download_file_name
[ "def", "download_metadata_tar", "(", "self", ",", "download_dir", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "download_dir", ")", ":", "os", ".", "makedirs", "(", "download_dir", ")", "res", "=", "dxpy", ".", "find_one_data_object", "(", "project", "=", "self", ".", "dx_project_id", ",", "folder", "=", "self", ".", "DX_RAW_DATA_FOLDER", ",", "name", "=", "\"*metadata.tar\"", ",", "name_mode", "=", "\"glob\"", ")", "#res will be something like {u'project': u'project-BzqVkxj08kVZbPXk54X0P2JY', u'id': u'file-BzqVkg800Fb0z4437GXJfGY6'}", "#dxpy.find_one_data_object() raises a dxpy.exceptions.DXSearchError() if nothing is found.", "dx_file", "=", "dxpy", ".", "DXFile", "(", "dxid", "=", "res", "[", "\"id\"", "]", ",", "project", "=", "res", "[", "\"project\"", "]", ")", "download_file_name", "=", "os", ".", "path", ".", "join", "(", "download_dir", ",", "dx_file", ".", "name", ")", "msg", "=", "\"{filename} to {download_dir}.\"", ".", "format", "(", "filename", "=", "dx_file", ".", "name", ",", "download_dir", "=", "download_dir", ")", "debug_logger", ".", "debug", "(", "\"Downloading \"", "+", "msg", ")", "dxpy", ".", "bindings", ".", "dxfile_functions", ".", "download_dxfile", "(", "dxid", "=", "dx_file", ",", "filename", "=", "download_file_name", ")", "success_logger", ".", "info", "(", "\"Downloaded \"", "+", "msg", ")", "return", "download_file_name" ]
59.952381
31.666667
def rebuild(self): """Rebuild RIFF tree and index from streams.""" movi = self.riff.find('LIST', 'movi') movi.chunks = self.combine_streams() self.rebuild_index()
[ "def", "rebuild", "(", "self", ")", ":", "movi", "=", "self", ".", "riff", ".", "find", "(", "'LIST'", ",", "'movi'", ")", "movi", ".", "chunks", "=", "self", ".", "combine_streams", "(", ")", "self", ".", "rebuild_index", "(", ")" ]
38
8.6
def check_partition_status(method, uri, partition, valid_statuses=None, invalid_statuses=None): """ Check that the partition is in one of the valid statuses (if specified) and not in one of the invalid statuses (if specified), as indicated by its 'status' property. If the Partition object does not have a 'status' property set, this function does nothing (in order to make the mock support easy to use). Raises: ConflictError with reason 1 (reason 6 is not used for partitions). """ status = partition.properties.get('status', None) if status is None: # Do nothing if no status is set on the faked partition return if valid_statuses and status not in valid_statuses or \ invalid_statuses and status in invalid_statuses: if uri.startswith(partition.uri): # The uri targets the partition (either is the partition uri or # some multiplicity under the partition uri) raise ConflictError(method, uri, reason=1, message="The operation cannot be performed " "because the targeted partition {} has a " "status that is not valid for the operation: " "{}". format(partition.name, status)) else: # The uri targets a resource hosted by the partition raise ConflictError(method, uri, reason=1, # Note: 6 not used for partitions message="The operation cannot be performed " "because partition {} hosting the targeted " "resource has a status that is not valid for " "the operation: {}". format(partition.name, status))
[ "def", "check_partition_status", "(", "method", ",", "uri", ",", "partition", ",", "valid_statuses", "=", "None", ",", "invalid_statuses", "=", "None", ")", ":", "status", "=", "partition", ".", "properties", ".", "get", "(", "'status'", ",", "None", ")", "if", "status", "is", "None", ":", "# Do nothing if no status is set on the faked partition", "return", "if", "valid_statuses", "and", "status", "not", "in", "valid_statuses", "or", "invalid_statuses", "and", "status", "in", "invalid_statuses", ":", "if", "uri", ".", "startswith", "(", "partition", ".", "uri", ")", ":", "# The uri targets the partition (either is the partition uri or", "# some multiplicity under the partition uri)", "raise", "ConflictError", "(", "method", ",", "uri", ",", "reason", "=", "1", ",", "message", "=", "\"The operation cannot be performed \"", "\"because the targeted partition {} has a \"", "\"status that is not valid for the operation: \"", "\"{}\"", ".", "format", "(", "partition", ".", "name", ",", "status", ")", ")", "else", ":", "# The uri targets a resource hosted by the partition", "raise", "ConflictError", "(", "method", ",", "uri", ",", "reason", "=", "1", ",", "# Note: 6 not used for partitions", "message", "=", "\"The operation cannot be performed \"", "\"because partition {} hosting the targeted \"", "\"resource has a status that is not valid for \"", "\"the operation: {}\"", ".", "format", "(", "partition", ".", "name", ",", "status", ")", ")" ]
51.486486
24.135135
def on_train_begin(self, **kwargs): "Call watch method to log model topology, gradients & weights" # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" super().on_train_begin() # Ensure we don't call "watch" multiple times if not WandbCallback.watch_called: WandbCallback.watch_called = True # Logs model topology and optionally gradients and weights wandb.watch(self.learn.model, log=self.log)
[ "def", "on_train_begin", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Set self.best, method inherited from \"TrackerCallback\" by \"SaveModelCallback\"", "super", "(", ")", ".", "on_train_begin", "(", ")", "# Ensure we don't call \"watch\" multiple times", "if", "not", "WandbCallback", ".", "watch_called", ":", "WandbCallback", ".", "watch_called", "=", "True", "# Logs model topology and optionally gradients and weights", "wandb", ".", "watch", "(", "self", ".", "learn", ".", "model", ",", "log", "=", "self", ".", "log", ")" ]
40.75
22.916667
def teardown_callsite(self, state, return_val=None, arg_types=None, force_callee_cleanup=False): """ This function performs the actions of the callee as it's getting ready to return. It returns the address to return to. :param state: The state to mutate :param return_val: The value to return :param arg_types: The fp-ness of each of the args. Used to calculate sizes to clean up :param force_callee_cleanup: If we should clean up the stack allocation for the arguments even if it's not the callee's job to do so TODO: support the stack_base parameter from setup_callsite...? Does that make sense in this context? Maybe it could make sense by saying that you pass it in as something like the "saved base pointer" value? """ if return_val is not None: self.set_return_val(state, return_val) ret_addr = self.return_addr.get_value(state) if state.arch.sp_offset is not None: if force_callee_cleanup or self.CALLEE_CLEANUP: if arg_types is not None: session = self.arg_session state.regs.sp += self.stack_space([session.next_arg(x) for x in arg_types]) elif self.args is not None: state.regs.sp += self.stack_space(self.args) else: l.warning("Can't perform callee cleanup when I have no idea how many arguments there are! Assuming 0") state.regs.sp += self.STACKARG_SP_DIFF else: state.regs.sp += self.STACKARG_SP_DIFF return ret_addr
[ "def", "teardown_callsite", "(", "self", ",", "state", ",", "return_val", "=", "None", ",", "arg_types", "=", "None", ",", "force_callee_cleanup", "=", "False", ")", ":", "if", "return_val", "is", "not", "None", ":", "self", ".", "set_return_val", "(", "state", ",", "return_val", ")", "ret_addr", "=", "self", ".", "return_addr", ".", "get_value", "(", "state", ")", "if", "state", ".", "arch", ".", "sp_offset", "is", "not", "None", ":", "if", "force_callee_cleanup", "or", "self", ".", "CALLEE_CLEANUP", ":", "if", "arg_types", "is", "not", "None", ":", "session", "=", "self", ".", "arg_session", "state", ".", "regs", ".", "sp", "+=", "self", ".", "stack_space", "(", "[", "session", ".", "next_arg", "(", "x", ")", "for", "x", "in", "arg_types", "]", ")", "elif", "self", ".", "args", "is", "not", "None", ":", "state", ".", "regs", ".", "sp", "+=", "self", ".", "stack_space", "(", "self", ".", "args", ")", "else", ":", "l", ".", "warning", "(", "\"Can't perform callee cleanup when I have no idea how many arguments there are! Assuming 0\"", ")", "state", ".", "regs", ".", "sp", "+=", "self", ".", "STACKARG_SP_DIFF", "else", ":", "state", ".", "regs", ".", "sp", "+=", "self", ".", "STACKARG_SP_DIFF", "return", "ret_addr" ]
51.606061
29.424242
def write_src(hdf5_out, gctoo_object, out_file_name): """ Writes src as attribute of gctx out file. Input: - hdf5_out (h5py): hdf5 file to write to - gctoo_object (GCToo): GCToo instance to be written to .gctx - out_file_name (str): name of hdf5 out file. """ if gctoo_object.src == None: hdf5_out.attrs[src_attr] = out_file_name else: hdf5_out.attrs[src_attr] = gctoo_object.src
[ "def", "write_src", "(", "hdf5_out", ",", "gctoo_object", ",", "out_file_name", ")", ":", "if", "gctoo_object", ".", "src", "==", "None", ":", "hdf5_out", ".", "attrs", "[", "src_attr", "]", "=", "out_file_name", "else", ":", "hdf5_out", ".", "attrs", "[", "src_attr", "]", "=", "gctoo_object", ".", "src" ]
31.384615
13.923077
def can_create_repository_with_record_types(self, repository_record_types): """Tests if this user can create a single ``Repository`` using the desired record types. While ``RepositoryManager.getRepositoryRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Repository``. Providing an empty array tests if a ``Repository`` can be created with no records. arg: repository_record_types (osid.type.Type[]): array of repository record types return: (boolean) - ``true`` if ``Repository`` creation using the specified ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``repository_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=repository_record_types) return True
[ "def", "can_create_repository_with_record_types", "(", "self", ",", "repository_record_types", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.can_create_bin_with_record_types", "# NOTE: It is expected that real authentication hints will be", "# handled in a service adapter above the pay grade of this impl.", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "can_create_catalog_with_record_types", "(", "catalog_record_types", "=", "repository_record_types", ")", "return", "True" ]
53.36
24.92
def compose_machine_filename(self, name, group, create_flags, base_folder): """Returns a recommended full path of the settings file name for a new virtual machine. This API serves two purposes: It gets called by :py:func:`create_machine` if @c null or empty string (which is recommended) is specified for the @a settingsFile argument there, which means that API should use a recommended default file name. It can be called manually by a client software before creating a machine, e.g. if that client wants to pre-create the machine directory to create virtual hard disks in that directory together with the new machine settings file. In that case, the file name should be stripped from the full settings file path returned by this function to obtain the machine directory. See :py:func:`IMachine.name` and :py:func:`create_machine` for more details about the machine name. @a groupName defines which additional subdirectory levels should be included. It must be either a valid group name or @c null or empty string which designates that the machine will not be related to a machine group. If @a baseFolder is a @c null or empty string (which is recommended), the default machine settings folder (see :py:func:`ISystemProperties.default_machine_folder` ) will be used as a base folder for the created machine, resulting in a file name like "/home/user/VirtualBox VMs/name/name.vbox". Otherwise the given base folder will be used. This method does not access the host disks. In particular, it does not check for whether a machine with this name already exists. in name of type str Suggested machine name. in group of type str Machine group name for the new machine or machine group. It is used to determine the right subdirectory. in create_flags of type str Machine creation flags, see :py:func:`create_machine` (optional). in base_folder of type str Base machine folder (optional). return file_p of type str Fully qualified path where the machine would be created. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(group, basestring): raise TypeError("group can only be an instance of type basestring") if not isinstance(create_flags, basestring): raise TypeError("create_flags can only be an instance of type basestring") if not isinstance(base_folder, basestring): raise TypeError("base_folder can only be an instance of type basestring") file_p = self._call("composeMachineFilename", in_p=[name, group, create_flags, base_folder]) return file_p
[ "def", "compose_machine_filename", "(", "self", ",", "name", ",", "group", ",", "create_flags", ",", "base_folder", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "group", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"group can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "create_flags", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"create_flags can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "base_folder", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"base_folder can only be an instance of type basestring\"", ")", "file_p", "=", "self", ".", "_call", "(", "\"composeMachineFilename\"", ",", "in_p", "=", "[", "name", ",", "group", ",", "create_flags", ",", "base_folder", "]", ")", "return", "file_p" ]
45.484848
25.681818
def handle_run_exception(self, pipeline_key, pipeline_func, e): """Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method. """ if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
[ "def", "handle_run_exception", "(", "self", ",", "pipeline_key", ",", "pipeline_func", ",", "e", ")", ":", "if", "isinstance", "(", "e", ",", "Retry", ")", ":", "retry_message", "=", "str", "(", "e", ")", "logging", ".", "warning", "(", "'User forced retry for pipeline ID \"%s\" of %r: %s'", ",", "pipeline_key", ".", "name", "(", ")", ",", "pipeline_func", ",", "retry_message", ")", "self", ".", "transition_retry", "(", "pipeline_key", ",", "retry_message", ")", "elif", "isinstance", "(", "e", ",", "Abort", ")", ":", "abort_message", "=", "str", "(", "e", ")", "logging", ".", "warning", "(", "'User forced abort for pipeline ID \"%s\" of %r: %s'", ",", "pipeline_key", ".", "name", "(", ")", ",", "pipeline_func", ",", "abort_message", ")", "pipeline_func", ".", "abort", "(", "abort_message", ")", "else", ":", "retry_message", "=", "'%s: %s'", "%", "(", "e", ".", "__class__", ".", "__name__", ",", "str", "(", "e", ")", ")", "logging", ".", "exception", "(", "'Generator %r#%s raised exception. %s'", ",", "pipeline_func", ",", "pipeline_key", ".", "name", "(", ")", ",", "retry_message", ")", "self", ".", "transition_retry", "(", "pipeline_key", ",", "retry_message", ")", "return", "pipeline_func", ".", "task_retry" ]
42.448276
21.275862
def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
[ "def", "rollforward", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "onOffset", "(", "dt", ")", ":", "if", "self", ".", "n", ">=", "0", ":", "return", "self", ".", "_next_opening_time", "(", "dt", ")", "else", ":", "return", "self", ".", "_prev_opening_time", "(", "dt", ")", "return", "dt" ]
31.4
13.2
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads. Normally, we have a blocking socket, but a read() can be interrupted by a signal (resulting in a partial read). Note that we cannot distinguish between EOF and an interrupt when zero bytes have been read. IncompleteRead() will be raised in this situation. This function should be used when <amt> bytes "should" be present for reading. If the bytes are truly not available (due to EOF), then the IncompleteRead exception can be used to detect the problem. """ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(bytes(b'').join(s), amt) s.append(chunk) amt -= len(chunk) return bytes(b"").join(s)
[ "def", "_safe_read", "(", "self", ",", "amt", ")", ":", "s", "=", "[", "]", "while", "amt", ">", "0", ":", "chunk", "=", "self", ".", "fp", ".", "read", "(", "min", "(", "amt", ",", "MAXAMOUNT", ")", ")", "if", "not", "chunk", ":", "raise", "IncompleteRead", "(", "bytes", "(", "b''", ")", ".", "join", "(", "s", ")", ",", "amt", ")", "s", ".", "append", "(", "chunk", ")", "amt", "-=", "len", "(", "chunk", ")", "return", "bytes", "(", "b\"\"", ")", ".", "join", "(", "s", ")" ]
40.454545
22.409091
def decompile(ast, indentation=4, line_length=100, starting_indentation=0): """Decompiles an AST into Python code. Arguments: - ast: code to decompile, using AST objects as generated by the standard library ast module - indentation: indentation level of lines - line_length: if lines become longer than this length, ast_decompiler will try to break them up (but it will not necessarily succeed in all cases) - starting_indentation: indentation level at which to start producing code """ decompiler = Decompiler( indentation=indentation, line_length=line_length, starting_indentation=starting_indentation, ) return decompiler.run(ast)
[ "def", "decompile", "(", "ast", ",", "indentation", "=", "4", ",", "line_length", "=", "100", ",", "starting_indentation", "=", "0", ")", ":", "decompiler", "=", "Decompiler", "(", "indentation", "=", "indentation", ",", "line_length", "=", "line_length", ",", "starting_indentation", "=", "starting_indentation", ",", ")", "return", "decompiler", ".", "run", "(", "ast", ")" ]
40.529412
23.411765
def compute(self, gsim, num_events, seed=None): """ :param gsim: a GSIM instance :param num_events: the number of seismic events :param seed: a random seed or None :returns: a 32 bit array of shape (num_imts, num_sites, num_events) and two arrays with shape (num_imts, num_events): sig for stddev_inter and eps for the random part """ try: # read the seed from self.rupture.serial seed = seed or self.rupture.serial except AttributeError: pass if seed is not None: numpy.random.seed(seed) result = numpy.zeros((len(self.imts), len(self.sids), num_events), F32) sig = numpy.zeros((len(self.imts), num_events), F32) eps = numpy.zeros((len(self.imts), num_events), F32) for imti, imt in enumerate(self.imts): if isinstance(gsim, MultiGMPE): gs = gsim[str(imt)] # MultiGMPE else: gs = gsim # regular GMPE try: result[imti], sig[imti], eps[imti] = self._compute( None, gs, num_events, imt) except Exception as exc: raise exc.__class__( '%s for %s, %s, srcidx=%s' % (exc, gs, imt, self.srcidx) ).with_traceback(exc.__traceback__) return result, sig, eps
[ "def", "compute", "(", "self", ",", "gsim", ",", "num_events", ",", "seed", "=", "None", ")", ":", "try", ":", "# read the seed from self.rupture.serial", "seed", "=", "seed", "or", "self", ".", "rupture", ".", "serial", "except", "AttributeError", ":", "pass", "if", "seed", "is", "not", "None", ":", "numpy", ".", "random", ".", "seed", "(", "seed", ")", "result", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "self", ".", "imts", ")", ",", "len", "(", "self", ".", "sids", ")", ",", "num_events", ")", ",", "F32", ")", "sig", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "self", ".", "imts", ")", ",", "num_events", ")", ",", "F32", ")", "eps", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "self", ".", "imts", ")", ",", "num_events", ")", ",", "F32", ")", "for", "imti", ",", "imt", "in", "enumerate", "(", "self", ".", "imts", ")", ":", "if", "isinstance", "(", "gsim", ",", "MultiGMPE", ")", ":", "gs", "=", "gsim", "[", "str", "(", "imt", ")", "]", "# MultiGMPE", "else", ":", "gs", "=", "gsim", "# regular GMPE", "try", ":", "result", "[", "imti", "]", ",", "sig", "[", "imti", "]", ",", "eps", "[", "imti", "]", "=", "self", ".", "_compute", "(", "None", ",", "gs", ",", "num_events", ",", "imt", ")", "except", "Exception", "as", "exc", ":", "raise", "exc", ".", "__class__", "(", "'%s for %s, %s, srcidx=%s'", "%", "(", "exc", ",", "gs", ",", "imt", ",", "self", ".", "srcidx", ")", ")", ".", "with_traceback", "(", "exc", ".", "__traceback__", ")", "return", "result", ",", "sig", ",", "eps" ]
42.84375
13.59375
def get_jids(): ''' Return a dict mapping all job ids to job information ''' ret = {} for jid, job, _, _ in _walk_through(_job_dir()): ret[jid] = salt.utils.jid.format_jid_instance(jid, job) if __opts__.get('job_cache_store_endtime'): endtime = get_endtime(jid) if endtime: ret[jid]['EndTime'] = endtime return ret
[ "def", "get_jids", "(", ")", ":", "ret", "=", "{", "}", "for", "jid", ",", "job", ",", "_", ",", "_", "in", "_walk_through", "(", "_job_dir", "(", ")", ")", ":", "ret", "[", "jid", "]", "=", "salt", ".", "utils", ".", "jid", ".", "format_jid_instance", "(", "jid", ",", "job", ")", "if", "__opts__", ".", "get", "(", "'job_cache_store_endtime'", ")", ":", "endtime", "=", "get_endtime", "(", "jid", ")", "if", "endtime", ":", "ret", "[", "jid", "]", "[", "'EndTime'", "]", "=", "endtime", "return", "ret" ]
27.357143
22.214286
def ncontains(self, column, value): """ Set the main dataframe instance to rows that do not contains a string value in a column """ df = self.df[self.df[column].str.contains(value) == False] if df is None: self.err("Can not select contained data") return self.df = df
[ "def", "ncontains", "(", "self", ",", "column", ",", "value", ")", ":", "df", "=", "self", ".", "df", "[", "self", ".", "df", "[", "column", "]", ".", "str", ".", "contains", "(", "value", ")", "==", "False", "]", "if", "df", "is", "None", ":", "self", ".", "err", "(", "\"Can not select contained data\"", ")", "return", "self", ".", "df", "=", "df" ]
33.8
12.6
def get_port_for_handle(handle, state): """Looks for and returns the PortView to the given handle in the provided state :param handle: Handle to look for port :param state: State containing handle and port :returns: PortView for handle """ from rafcon.gui.mygaphas.items.state import StateView if isinstance(state, StateView): if state.income.handle == handle: return state.income else: for outcome in state.outcomes: if outcome.handle == handle: return outcome for input in state.inputs: if input.handle == handle: return input for output in state.outputs: if output.handle == handle: return output for scoped in state.scoped_variables: if scoped.handle == handle: return scoped
[ "def", "get_port_for_handle", "(", "handle", ",", "state", ")", ":", "from", "rafcon", ".", "gui", ".", "mygaphas", ".", "items", ".", "state", "import", "StateView", "if", "isinstance", "(", "state", ",", "StateView", ")", ":", "if", "state", ".", "income", ".", "handle", "==", "handle", ":", "return", "state", ".", "income", "else", ":", "for", "outcome", "in", "state", ".", "outcomes", ":", "if", "outcome", ".", "handle", "==", "handle", ":", "return", "outcome", "for", "input", "in", "state", ".", "inputs", ":", "if", "input", ".", "handle", "==", "handle", ":", "return", "input", "for", "output", "in", "state", ".", "outputs", ":", "if", "output", ".", "handle", "==", "handle", ":", "return", "output", "for", "scoped", "in", "state", ".", "scoped_variables", ":", "if", "scoped", ".", "handle", "==", "handle", ":", "return", "scoped" ]
37.708333
7.125
def _create_osf_project(dlgr_id, description=None): """Create a project on the OSF.""" if not description: description = "Experiment {} registered by Dallinger.".format(dlgr_id) r = requests.post( "{}/nodes/".format(root), data={ "type": "nodes", "category": "project", "title": "Experiment dlgr-{}".format(dlgr_id[0:8]), "description": description, }, headers={"Authorization": "Bearer {}".format(config.get("osf_access_token"))}, ) r.raise_for_status() osf_id = r.json()["data"]["id"] logger.info("Project registered on OSF at http://osf.io/{}".format(osf_id)) return osf_id
[ "def", "_create_osf_project", "(", "dlgr_id", ",", "description", "=", "None", ")", ":", "if", "not", "description", ":", "description", "=", "\"Experiment {} registered by Dallinger.\"", ".", "format", "(", "dlgr_id", ")", "r", "=", "requests", ".", "post", "(", "\"{}/nodes/\"", ".", "format", "(", "root", ")", ",", "data", "=", "{", "\"type\"", ":", "\"nodes\"", ",", "\"category\"", ":", "\"project\"", ",", "\"title\"", ":", "\"Experiment dlgr-{}\"", ".", "format", "(", "dlgr_id", "[", "0", ":", "8", "]", ")", ",", "\"description\"", ":", "description", ",", "}", ",", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer {}\"", ".", "format", "(", "config", ".", "get", "(", "\"osf_access_token\"", ")", ")", "}", ",", ")", "r", ".", "raise_for_status", "(", ")", "osf_id", "=", "r", ".", "json", "(", ")", "[", "\"data\"", "]", "[", "\"id\"", "]", "logger", ".", "info", "(", "\"Project registered on OSF at http://osf.io/{}\"", ".", "format", "(", "osf_id", ")", ")", "return", "osf_id" ]
30.863636
23.318182
def get_server_info(self, anonymous = True): """ Performs bind on the server and grabs the DSA info object. If anonymous is set to true, then it will perform anonymous bind, not using user credentials Otherwise it will use the credentials set in the object constructor. """ if anonymous == True: logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) conn = Connection(server, auto_bind=True) logger.debug('Got server info') else: logger.debug('Getting server info via credentials supplied on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) if self.use_sspi == True: conn = self.monkeypatch() else: conn = Connection(self._srv, user=self.login_credential.get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod()) logger.debug('Performing BIND to server %s' % self.target_server.get_host()) if not self._con.bind(): if 'description' in self._con.result: raise Exception('Failed to bind to server! Reason: %s' % conn.result['description']) raise Exception('Failed to bind to server! Reason: %s' % conn.result) logger.debug('Connected to server!') return server.info
[ "def", "get_server_info", "(", "self", ",", "anonymous", "=", "True", ")", ":", "if", "anonymous", "==", "True", ":", "logger", ".", "debug", "(", "'Getting server info via Anonymous BIND on server %s'", "%", "self", ".", "target_server", ".", "get_host", "(", ")", ")", "server", "=", "Server", "(", "self", ".", "target_server", ".", "get_host", "(", ")", ",", "use_ssl", "=", "self", ".", "target_server", ".", "is_ssl", "(", ")", ",", "get_info", "=", "ALL", ")", "conn", "=", "Connection", "(", "server", ",", "auto_bind", "=", "True", ")", "logger", ".", "debug", "(", "'Got server info'", ")", "else", ":", "logger", ".", "debug", "(", "'Getting server info via credentials supplied on server %s'", "%", "self", ".", "target_server", ".", "get_host", "(", ")", ")", "server", "=", "Server", "(", "self", ".", "target_server", ".", "get_host", "(", ")", ",", "use_ssl", "=", "self", ".", "target_server", ".", "is_ssl", "(", ")", ",", "get_info", "=", "ALL", ")", "if", "self", ".", "use_sspi", "==", "True", ":", "conn", "=", "self", ".", "monkeypatch", "(", ")", "else", ":", "conn", "=", "Connection", "(", "self", ".", "_srv", ",", "user", "=", "self", ".", "login_credential", ".", "get_msuser", "(", ")", ",", "password", "=", "self", ".", "login_credential", ".", "get_password", "(", ")", ",", "authentication", "=", "self", ".", "login_credential", ".", "get_authmethod", "(", ")", ")", "logger", ".", "debug", "(", "'Performing BIND to server %s'", "%", "self", ".", "target_server", ".", "get_host", "(", ")", ")", "if", "not", "self", ".", "_con", ".", "bind", "(", ")", ":", "if", "'description'", "in", "self", ".", "_con", ".", "result", ":", "raise", "Exception", "(", "'Failed to bind to server! Reason: %s'", "%", "conn", ".", "result", "[", "'description'", "]", ")", "raise", "Exception", "(", "'Failed to bind to server! Reason: %s'", "%", "conn", ".", "result", ")", "logger", ".", "debug", "(", "'Connected to server!'", ")", "return", "server", ".", "info" ]
56.16
30.56
def remove_value(self, name): """Remove a variable""" ns = self._get_reference_namespace(name) ns.pop(name)
[ "def", "remove_value", "(", "self", ",", "name", ")", ":", "ns", "=", "self", ".", "_get_reference_namespace", "(", "name", ")", "ns", ".", "pop", "(", "name", ")" ]
32
9.75
def get_default(self, *args, **kwargs): """Get the default parameters as defined in the Settings instance. This function proceeds to seamlessly retrieve the argument to pass through, depending on either it was overidden or not: If no argument was overridden in a function of the toolbox, the default argument will be set to ``None``, and this function will retrieve the default parameters as defined by the ``cdt.SETTINGS`` 's attributes. It has two modes of processing: 1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``. 2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once. """ def retrieve_param(i): try: return self.__getattribute__(i) except AttributeError: if i == "device": return self.default_device else: return self.__getattribute__(i.upper()) if len(args) == 0: if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None: return kwargs[list(kwargs.keys())[0]] elif len(kwargs) == 1: return retrieve_param(list(kwargs.keys())[0]) else: raise TypeError("As dict is unordered, it is impossible to give" "the parameters in the correct order.") else: out = [] for i in args: if i[1] is None: out.append(retrieve_param(i[0])) else: out.append(i[1]) return out
[ "def", "get_default", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "retrieve_param", "(", "i", ")", ":", "try", ":", "return", "self", ".", "__getattribute__", "(", "i", ")", "except", "AttributeError", ":", "if", "i", "==", "\"device\"", ":", "return", "self", ".", "default_device", "else", ":", "return", "self", ".", "__getattribute__", "(", "i", ".", "upper", "(", ")", ")", "if", "len", "(", "args", ")", "==", "0", ":", "if", "len", "(", "kwargs", ")", "==", "1", "and", "kwargs", "[", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "is", "not", "None", ":", "return", "kwargs", "[", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "elif", "len", "(", "kwargs", ")", "==", "1", ":", "return", "retrieve_param", "(", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", ")", "else", ":", "raise", "TypeError", "(", "\"As dict is unordered, it is impossible to give\"", "\"the parameters in the correct order.\"", ")", "else", ":", "out", "=", "[", "]", "for", "i", "in", "args", ":", "if", "i", "[", "1", "]", "is", "None", ":", "out", ".", "append", "(", "retrieve_param", "(", "i", "[", "0", "]", ")", ")", "else", ":", "out", ".", "append", "(", "i", "[", "1", "]", ")", "return", "out" ]
43.947368
21.763158
def add_clause(self, clause, soft=False): """ The method for adding a new hard of soft clause to the problem formula. Although the input formula is to be specified as an argument of the constructor of :class:`LBX`, adding clauses may be helpful when *enumerating* MCSes of the formula. This way, the clauses are added incrementally, i.e. *on the fly*. The clause to add can be any iterable over integer literals. The additional Boolean parameter ``soft`` can be set to ``True`` meaning the the clause being added is soft (note that parameter ``soft`` is set to ``False`` by default). :param clause: a clause to add :param soft: whether or not the clause is soft :type clause: iterable(int) :type soft: bool """ # first, map external literals to internal literals # introduce new variables if necessary cl = list(map(lambda l: self._map_extlit(l), clause)) if not soft: # the clause is hard, and so we simply add it to the SAT oracle self.oracle.add_clause(cl) else: self.soft.append(cl) # soft clauses should be augmented with a selector sel = cl[0] if len(cl) > 1 or cl[0] < 0: self.topv += 1 sel = self.topv self.oracle.add_clause(cl + [-sel]) self.sels.append(sel)
[ "def", "add_clause", "(", "self", ",", "clause", ",", "soft", "=", "False", ")", ":", "# first, map external literals to internal literals", "# introduce new variables if necessary", "cl", "=", "list", "(", "map", "(", "lambda", "l", ":", "self", ".", "_map_extlit", "(", "l", ")", ",", "clause", ")", ")", "if", "not", "soft", ":", "# the clause is hard, and so we simply add it to the SAT oracle", "self", ".", "oracle", ".", "add_clause", "(", "cl", ")", "else", ":", "self", ".", "soft", ".", "append", "(", "cl", ")", "# soft clauses should be augmented with a selector", "sel", "=", "cl", "[", "0", "]", "if", "len", "(", "cl", ")", ">", "1", "or", "cl", "[", "0", "]", "<", "0", ":", "self", ".", "topv", "+=", "1", "sel", "=", "self", ".", "topv", "self", ".", "oracle", ".", "add_clause", "(", "cl", "+", "[", "-", "sel", "]", ")", "self", ".", "sels", ".", "append", "(", "sel", ")" ]
37.974359
21.666667
def prepend_model(self, value, model): """ Prepends model name if it is not already prepended. For example model is "Offer": key -> Offer.key -key -> -Offer.key Offer.key -> Offer.key -Offer.key -> -Offer.key """ if '.' not in value: direction = '' if value.startswith('-'): value = value[1:] direction = '-' value = '%s%s.%s' % (direction, model, value) return value
[ "def", "prepend_model", "(", "self", ",", "value", ",", "model", ")", ":", "if", "'.'", "not", "in", "value", ":", "direction", "=", "''", "if", "value", ".", "startswith", "(", "'-'", ")", ":", "value", "=", "value", "[", "1", ":", "]", "direction", "=", "'-'", "value", "=", "'%s%s.%s'", "%", "(", "direction", ",", "model", ",", "value", ")", "return", "value" ]
30.352941
10.470588
def rundata(self, strjson): """POST JSON data object to server""" d = json.loads(strjson) return self.api.data.post(d)
[ "def", "rundata", "(", "self", ",", "strjson", ")", ":", "d", "=", "json", ".", "loads", "(", "strjson", ")", "return", "self", ".", "api", ".", "data", ".", "post", "(", "d", ")" ]
27.8
13.2
def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: if the collection file does not exist. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') filter_file = cls._ParseStringOption(options, 'file_filter') # Search the data location for the filter file. if filter_file and not os.path.isfile(filter_file): data_location = getattr(configuration_object, '_data_location', None) if data_location: filter_file_basename = os.path.basename(filter_file) filter_file_path = os.path.join(data_location, filter_file_basename) if os.path.isfile(filter_file_path): filter_file = filter_file_path if filter_file and not os.path.isfile(filter_file): raise errors.BadConfigOption( 'No such collection filter file: {0:s}.'.format(filter_file)) setattr(configuration_object, '_filter_file', filter_file)
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "configuration_object", ")", ":", "if", "not", "isinstance", "(", "configuration_object", ",", "tools", ".", "CLITool", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "'Configuration object is not an instance of CLITool'", ")", "filter_file", "=", "cls", ".", "_ParseStringOption", "(", "options", ",", "'file_filter'", ")", "# Search the data location for the filter file.", "if", "filter_file", "and", "not", "os", ".", "path", ".", "isfile", "(", "filter_file", ")", ":", "data_location", "=", "getattr", "(", "configuration_object", ",", "'_data_location'", ",", "None", ")", "if", "data_location", ":", "filter_file_basename", "=", "os", ".", "path", ".", "basename", "(", "filter_file", ")", "filter_file_path", "=", "os", ".", "path", ".", "join", "(", "data_location", ",", "filter_file_basename", ")", "if", "os", ".", "path", ".", "isfile", "(", "filter_file_path", ")", ":", "filter_file", "=", "filter_file_path", "if", "filter_file", "and", "not", "os", ".", "path", ".", "isfile", "(", "filter_file", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'No such collection filter file: {0:s}.'", ".", "format", "(", "filter_file", ")", ")", "setattr", "(", "configuration_object", ",", "'_filter_file'", ",", "filter_file", ")" ]
39.5
22.53125
def gdcsreporter(self, analysistype='GDCS'): """ Creates a report of the GDCS results :param analysistype: The variable to use when accessing attributes in the metadata object """ logging.info('Creating {} report'.format(analysistype)) # Initialise list to store all the GDCS genes, and genera in the analysis gdcs = list() genera = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': if os.path.isdir(sample[analysistype].targetpath): # Update the fai dict with all the genes in the analysis, rather than just those with baited hits self.gdcs_fai(sample) sample[analysistype].createreport = True # Determine which genera are present in the analysis if sample.general.closestrefseqgenus not in genera: genera.append(sample.general.closestrefseqgenus) try: # Add all the GDCS genes to the list for gene in sorted(sample[analysistype].faidict): if gene not in gdcs: gdcs.append(gene) except AttributeError: sample[analysistype].createreport = False else: sample[analysistype].createreport = False else: sample[analysistype].createreport = False sample.general.incomplete = True header = 'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\n'.format(','.join(gdcs)) data = str() with open(os.path.join(self.reportpath, '{}.csv'.format(analysistype)), 'w') as report: # Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus # will be grouped together in the report for genus in genera: for sample in self.runmetadata.samples: if sample.general.closestrefseqgenus == genus: if sample[analysistype].createreport: sample[analysistype].totaldepth = list() # Add the sample to the report if it matches the current genus # if genus == sample.general.closestrefseqgenus: data += '{},{},'.format(sample.name, genus) # Initialise a variable to store the number of GDCS genes were matched count = 0 # As I want the count to be in the report before all the gene results, this string will # store the specific sample information, and will be added to data once count is known specific = str() for gene in gdcs: # As there are different genes present in the GDCS databases for each organism of # interest, genes that did not match because they're absent in the specific database are # indicated using an X if gene not in [result for result in sample[analysistype].faidict]: specific += 'X,' else: try: # Report the necessary information for each gene result identity = sample[analysistype].results[gene] specific += '{}% ({} +/- {}),'\ .format(identity, sample[analysistype].avgdepth[gene], sample[analysistype].standarddev[gene]) sample[analysistype].totaldepth.append( float(sample[analysistype].avgdepth[gene])) count += 1 # If the gene was missing from the results attribute, add a - to the cell except (KeyError, AttributeError): sample.general.incomplete = True specific += '-,' # Calculate the mean depth of the genes and the standard deviation sample[analysistype].mean = numpy.mean(sample[analysistype].totaldepth) sample[analysistype].stddev = numpy.std(sample[analysistype].totaldepth) # Determine whether the sample pass the necessary quality criteria: # Pass, all GDCS, mean coverage greater than 20X coverage; # ?: Indeterminate value; # -: Fail value # Allow one missing GDCS to still be considered a pass if count >= len(sample[analysistype].faidict) - 1: if sample[analysistype].mean > 20: quality = '+' else: quality = '?' sample.general.incomplete = True else: quality = '-' sample.general.incomplete = True # Add the count, mean depth with standard deviation, the pass/fail determination, # and the total number of GDCS genes as well as the results data += '{hits}/{total},{mean} +/- {std},{fail},{gdcs}\n'\ .format(hits=str(count), total=len(sample[analysistype].faidict), mean='{:.2f}'.format(sample[analysistype].mean), std='{:.2f}'.format(sample[analysistype].stddev), fail=quality, gdcs=specific) # # Any samples with a best assembly of 'NA' are considered incomplete. # else: # data += '{},{},,,-\n'.format(sample.name, sample.general.closestrefseqgenus) # sample.general.incomplete = True elif sample.general.closestrefseqgenus == 'NA': data += '{}\n'.format(sample.name) sample.general.incomplete = True # Write the header and data to file report.write(header) report.write(data)
[ "def", "gdcsreporter", "(", "self", ",", "analysistype", "=", "'GDCS'", ")", ":", "logging", ".", "info", "(", "'Creating {} report'", ".", "format", "(", "analysistype", ")", ")", "# Initialise list to store all the GDCS genes, and genera in the analysis", "gdcs", "=", "list", "(", ")", "genera", "=", "list", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "if", "os", ".", "path", ".", "isdir", "(", "sample", "[", "analysistype", "]", ".", "targetpath", ")", ":", "# Update the fai dict with all the genes in the analysis, rather than just those with baited hits", "self", ".", "gdcs_fai", "(", "sample", ")", "sample", "[", "analysistype", "]", ".", "createreport", "=", "True", "# Determine which genera are present in the analysis", "if", "sample", ".", "general", ".", "closestrefseqgenus", "not", "in", "genera", ":", "genera", ".", "append", "(", "sample", ".", "general", ".", "closestrefseqgenus", ")", "try", ":", "# Add all the GDCS genes to the list", "for", "gene", "in", "sorted", "(", "sample", "[", "analysistype", "]", ".", "faidict", ")", ":", "if", "gene", "not", "in", "gdcs", ":", "gdcs", ".", "append", "(", "gene", ")", "except", "AttributeError", ":", "sample", "[", "analysistype", "]", ".", "createreport", "=", "False", "else", ":", "sample", "[", "analysistype", "]", ".", "createreport", "=", "False", "else", ":", "sample", "[", "analysistype", "]", ".", "createreport", "=", "False", "sample", ".", "general", ".", "incomplete", "=", "True", "header", "=", "'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\\n'", ".", "format", "(", "','", ".", "join", "(", "gdcs", ")", ")", "data", "=", "str", "(", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'{}.csv'", ".", "format", "(", "analysistype", ")", ")", ",", "'w'", ")", "as", "report", ":", "# Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus", "# will be grouped together in the report", "for", "genus", "in", "genera", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "general", ".", "closestrefseqgenus", "==", "genus", ":", "if", "sample", "[", "analysistype", "]", ".", "createreport", ":", "sample", "[", "analysistype", "]", ".", "totaldepth", "=", "list", "(", ")", "# Add the sample to the report if it matches the current genus", "# if genus == sample.general.closestrefseqgenus:", "data", "+=", "'{},{},'", ".", "format", "(", "sample", ".", "name", ",", "genus", ")", "# Initialise a variable to store the number of GDCS genes were matched", "count", "=", "0", "# As I want the count to be in the report before all the gene results, this string will", "# store the specific sample information, and will be added to data once count is known", "specific", "=", "str", "(", ")", "for", "gene", "in", "gdcs", ":", "# As there are different genes present in the GDCS databases for each organism of", "# interest, genes that did not match because they're absent in the specific database are", "# indicated using an X", "if", "gene", "not", "in", "[", "result", "for", "result", "in", "sample", "[", "analysistype", "]", ".", "faidict", "]", ":", "specific", "+=", "'X,'", "else", ":", "try", ":", "# Report the necessary information for each gene result", "identity", "=", "sample", "[", "analysistype", "]", ".", "results", "[", "gene", "]", "specific", "+=", "'{}% ({} +/- {}),'", ".", "format", "(", "identity", ",", "sample", "[", "analysistype", "]", ".", "avgdepth", "[", "gene", "]", ",", "sample", "[", "analysistype", "]", ".", "standarddev", "[", "gene", "]", ")", "sample", "[", "analysistype", "]", ".", "totaldepth", ".", "append", "(", "float", "(", "sample", "[", "analysistype", "]", ".", "avgdepth", "[", "gene", "]", ")", ")", "count", "+=", "1", "# If the gene was missing from the results attribute, add a - to the cell", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "sample", ".", "general", ".", "incomplete", "=", "True", "specific", "+=", "'-,'", "# Calculate the mean depth of the genes and the standard deviation", "sample", "[", "analysistype", "]", ".", "mean", "=", "numpy", ".", "mean", "(", "sample", "[", "analysistype", "]", ".", "totaldepth", ")", "sample", "[", "analysistype", "]", ".", "stddev", "=", "numpy", ".", "std", "(", "sample", "[", "analysistype", "]", ".", "totaldepth", ")", "# Determine whether the sample pass the necessary quality criteria:", "# Pass, all GDCS, mean coverage greater than 20X coverage;", "# ?: Indeterminate value;", "# -: Fail value", "# Allow one missing GDCS to still be considered a pass", "if", "count", ">=", "len", "(", "sample", "[", "analysistype", "]", ".", "faidict", ")", "-", "1", ":", "if", "sample", "[", "analysistype", "]", ".", "mean", ">", "20", ":", "quality", "=", "'+'", "else", ":", "quality", "=", "'?'", "sample", ".", "general", ".", "incomplete", "=", "True", "else", ":", "quality", "=", "'-'", "sample", ".", "general", ".", "incomplete", "=", "True", "# Add the count, mean depth with standard deviation, the pass/fail determination,", "# and the total number of GDCS genes as well as the results", "data", "+=", "'{hits}/{total},{mean} +/- {std},{fail},{gdcs}\\n'", ".", "format", "(", "hits", "=", "str", "(", "count", ")", ",", "total", "=", "len", "(", "sample", "[", "analysistype", "]", ".", "faidict", ")", ",", "mean", "=", "'{:.2f}'", ".", "format", "(", "sample", "[", "analysistype", "]", ".", "mean", ")", ",", "std", "=", "'{:.2f}'", ".", "format", "(", "sample", "[", "analysistype", "]", ".", "stddev", ")", ",", "fail", "=", "quality", ",", "gdcs", "=", "specific", ")", "# # Any samples with a best assembly of 'NA' are considered incomplete.", "# else:", "# data += '{},{},,,-\\n'.format(sample.name, sample.general.closestrefseqgenus)", "# sample.general.incomplete = True", "elif", "sample", ".", "general", ".", "closestrefseqgenus", "==", "'NA'", ":", "data", "+=", "'{}\\n'", ".", "format", "(", "sample", ".", "name", ")", "sample", ".", "general", ".", "incomplete", "=", "True", "# Write the header and data to file", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
65.5
29.230769
def list(self, count=None, **kwargs): """Retrieves a list of entities in this collection. The entire collection is loaded at once and is returned as a list. This function makes a single roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. There is no caching--every call makes at least one round trip. :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param kwargs: Additional arguments (optional): - "offset" (``integer``): The offset of the first item to return. - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` :return: A ``list`` of entities. """ # response = self.get(count=count, **kwargs) # return self._load_list(response) return list(self.iter(count=count, **kwargs))
[ "def", "list", "(", "self", ",", "count", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# response = self.get(count=count, **kwargs)", "# return self._load_list(response)", "return", "list", "(", "self", ".", "iter", "(", "count", "=", "count", ",", "*", "*", "kwargs", ")", ")" ]
42.166667
25.9
def policy_assignments(self): """Instance depends on the API version: * 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>` * 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>` * 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>` * 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>` * 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>` * 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>` """ api_version = self._get_api_version('policy_assignments') if api_version == '2015-10-01-preview': from .v2015_10_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-04-01': from .v2016_04_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-12-01': from .v2016_12_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2017-06-01-preview': from .v2017_06_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-03-01': from .v2018_03_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-05-01': from .v2018_05_01.operations import PolicyAssignmentsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "policy_assignments", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'policy_assignments'", ")", "if", "api_version", "==", "'2015-10-01-preview'", ":", "from", ".", "v2015_10_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-04-01'", ":", "from", ".", "v2016_04_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-12-01'", ":", "from", ".", "v2016_12_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2017-06-01-preview'", ":", "from", ".", "v2017_06_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-03-01'", ":", "from", ".", "v2018_03_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-05-01'", ":", "from", ".", "v2018_05_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
81.076923
47.923077
def visit_complex_value(self, node): """Convert :class:`ComplexValue` to one of ExactMatch, PartialMatch and Regex Value nodes.""" if node.value.startswith(ComplexValue.EXACT_VALUE_TOKEN): value = node.value.strip(ComplexValue.EXACT_VALUE_TOKEN) return ExactMatchValue(value) elif node.value.startswith(ComplexValue.PARTIAL_VALUE_TOKEN): value = node.value.strip(ComplexValue.PARTIAL_VALUE_TOKEN) return PartialMatchValue(value, True if ast.GenericValue.WILDCARD_TOKEN in value else False) elif node.value.startswith(ComplexValue.REGEX_VALUE_TOKEN): return RegexValue(node.value.strip(ComplexValue.REGEX_VALUE_TOKEN)) else: # Covering the case where ComplexValue supports more than ExactMatch, PartialMatch and Regex values. msg = self.__class__.__name__ + ': Unrecognized complex value' try: msg += ' lookahead token: "' + node.value[0] + '"' except IndexError: msg += ': \"' + repr(node.value) + '"' msg += '.\nUsing simple value instead: "' + node.value + '".' logger.warn(msg) return ast.Value(node.value)
[ "def", "visit_complex_value", "(", "self", ",", "node", ")", ":", "if", "node", ".", "value", ".", "startswith", "(", "ComplexValue", ".", "EXACT_VALUE_TOKEN", ")", ":", "value", "=", "node", ".", "value", ".", "strip", "(", "ComplexValue", ".", "EXACT_VALUE_TOKEN", ")", "return", "ExactMatchValue", "(", "value", ")", "elif", "node", ".", "value", ".", "startswith", "(", "ComplexValue", ".", "PARTIAL_VALUE_TOKEN", ")", ":", "value", "=", "node", ".", "value", ".", "strip", "(", "ComplexValue", ".", "PARTIAL_VALUE_TOKEN", ")", "return", "PartialMatchValue", "(", "value", ",", "True", "if", "ast", ".", "GenericValue", ".", "WILDCARD_TOKEN", "in", "value", "else", "False", ")", "elif", "node", ".", "value", ".", "startswith", "(", "ComplexValue", ".", "REGEX_VALUE_TOKEN", ")", ":", "return", "RegexValue", "(", "node", ".", "value", ".", "strip", "(", "ComplexValue", ".", "REGEX_VALUE_TOKEN", ")", ")", "else", ":", "# Covering the case where ComplexValue supports more than ExactMatch, PartialMatch and Regex values.", "msg", "=", "self", ".", "__class__", ".", "__name__", "+", "': Unrecognized complex value'", "try", ":", "msg", "+=", "' lookahead token: \"'", "+", "node", ".", "value", "[", "0", "]", "+", "'\"'", "except", "IndexError", ":", "msg", "+=", "': \\\"'", "+", "repr", "(", "node", ".", "value", ")", "+", "'\"'", "msg", "+=", "'.\\nUsing simple value instead: \"'", "+", "node", ".", "value", "+", "'\".'", "logger", ".", "warn", "(", "msg", ")", "return", "ast", ".", "Value", "(", "node", ".", "value", ")" ]
54.818182
26.318182
def get_blacklist_entries(self): """Get a list of all blacklist entries. """ get_blacklist_entries_endpoint = Template("${rest_root}/blacklist/${public_key}/") url = get_blacklist_entries_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key) response = self.__get_request(url) return response["list"]["entry"]
[ "def", "get_blacklist_entries", "(", "self", ")", ":", "get_blacklist_entries_endpoint", "=", "Template", "(", "\"${rest_root}/blacklist/${public_key}/\"", ")", "url", "=", "get_blacklist_entries_endpoint", ".", "substitute", "(", "rest_root", "=", "self", ".", "_rest_root", ",", "public_key", "=", "self", ".", "_public_key", ")", "response", "=", "self", ".", "__get_request", "(", "url", ")", "return", "response", "[", "\"list\"", "]", "[", "\"entry\"", "]" ]
41.444444
23.444444
def dump(self, include_address=True, include_id=True) -> str: """Dump the keystore for later disk storage. The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and `'include_id`'. If address or id are not known, they are not added, even if requested. Args: include_address: flag denoting if the address should be included or not include_id: flag denoting if the id should be included or not """ d = { 'crypto': self.keystore['crypto'], 'version': self.keystore['version'], } if include_address and self.address is not None: d['address'] = remove_0x_prefix(encode_hex(self.address)) if include_id and self.uuid is not None: d['id'] = self.uuid return json.dumps(d)
[ "def", "dump", "(", "self", ",", "include_address", "=", "True", ",", "include_id", "=", "True", ")", "->", "str", ":", "d", "=", "{", "'crypto'", ":", "self", ".", "keystore", "[", "'crypto'", "]", ",", "'version'", ":", "self", ".", "keystore", "[", "'version'", "]", ",", "}", "if", "include_address", "and", "self", ".", "address", "is", "not", "None", ":", "d", "[", "'address'", "]", "=", "remove_0x_prefix", "(", "encode_hex", "(", "self", ".", "address", ")", ")", "if", "include_id", "and", "self", ".", "uuid", "is", "not", "None", ":", "d", "[", "'id'", "]", "=", "self", ".", "uuid", "return", "json", ".", "dumps", "(", "d", ")" ]
42.272727
25.136364
def connection_lost(self, exc: Optional[Exception]) -> None: """ 7.1.4. The WebSocket Connection is Closed. """ logger.debug("%s - event = connection_lost(%s)", self.side, exc) self.state = State.CLOSED logger.debug("%s - state = CLOSED", self.side) if not hasattr(self, "close_code"): self.close_code = 1006 if not hasattr(self, "close_reason"): self.close_reason = "" logger.debug( "%s x code = %d, reason = %s", self.side, self.close_code, self.close_reason or "[no reason]", ) self.abort_keepalive_pings() # If self.connection_lost_waiter isn't pending, that's a bug, because: # - it's set only here in connection_lost() which is called only once; # - it must never be canceled. self.connection_lost_waiter.set_result(None) super().connection_lost(exc)
[ "def", "connection_lost", "(", "self", ",", "exc", ":", "Optional", "[", "Exception", "]", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"%s - event = connection_lost(%s)\"", ",", "self", ".", "side", ",", "exc", ")", "self", ".", "state", "=", "State", ".", "CLOSED", "logger", ".", "debug", "(", "\"%s - state = CLOSED\"", ",", "self", ".", "side", ")", "if", "not", "hasattr", "(", "self", ",", "\"close_code\"", ")", ":", "self", ".", "close_code", "=", "1006", "if", "not", "hasattr", "(", "self", ",", "\"close_reason\"", ")", ":", "self", ".", "close_reason", "=", "\"\"", "logger", ".", "debug", "(", "\"%s x code = %d, reason = %s\"", ",", "self", ".", "side", ",", "self", ".", "close_code", ",", "self", ".", "close_reason", "or", "\"[no reason]\"", ",", ")", "self", ".", "abort_keepalive_pings", "(", ")", "# If self.connection_lost_waiter isn't pending, that's a bug, because:", "# - it's set only here in connection_lost() which is called only once;", "# - it must never be canceled.", "self", ".", "connection_lost_waiter", ".", "set_result", "(", "None", ")", "super", "(", ")", ".", "connection_lost", "(", "exc", ")" ]
38.916667
13.75
def list_gen(self, keyword=None, arg=None): """Generator for LIST command. See list() for more information. Yields: An element in the list returned by list(). """ if keyword: keyword = keyword.upper() if keyword is None or keyword == "ACTIVE": return self.list_active_gen(arg) if keyword == "ACTIVE.TIMES": return self.list_active_times_gen() if keyword == "DISTRIB.PATS": return self.list_distrib_pats_gen() if keyword == "HEADERS": return self.list_headers_gen(arg) if keyword == "NEWSGROUPS": return self.list_newsgroups_gen(arg) if keyword == "OVERVIEW.FMT": return self.list_overview_fmt_gen() if keyword == "EXTENSIONS": return self.list_extensions_gen() raise NotImplementedError()
[ "def", "list_gen", "(", "self", ",", "keyword", "=", "None", ",", "arg", "=", "None", ")", ":", "if", "keyword", ":", "keyword", "=", "keyword", ".", "upper", "(", ")", "if", "keyword", "is", "None", "or", "keyword", "==", "\"ACTIVE\"", ":", "return", "self", ".", "list_active_gen", "(", "arg", ")", "if", "keyword", "==", "\"ACTIVE.TIMES\"", ":", "return", "self", ".", "list_active_times_gen", "(", ")", "if", "keyword", "==", "\"DISTRIB.PATS\"", ":", "return", "self", ".", "list_distrib_pats_gen", "(", ")", "if", "keyword", "==", "\"HEADERS\"", ":", "return", "self", ".", "list_headers_gen", "(", "arg", ")", "if", "keyword", "==", "\"NEWSGROUPS\"", ":", "return", "self", ".", "list_newsgroups_gen", "(", "arg", ")", "if", "keyword", "==", "\"OVERVIEW.FMT\"", ":", "return", "self", ".", "list_overview_fmt_gen", "(", ")", "if", "keyword", "==", "\"EXTENSIONS\"", ":", "return", "self", ".", "list_extensions_gen", "(", ")", "raise", "NotImplementedError", "(", ")" ]
32.518519
11.518519
def sub_base_uri(self): """ This will return the sub_base_uri parsed from the base_uri :return: str of the sub_base_uri """ return self._base_uri and \ self._base_uri.split('://')[-1].split('.')[0] \ or self._base_uri
[ "def", "sub_base_uri", "(", "self", ")", ":", "return", "self", ".", "_base_uri", "and", "self", ".", "_base_uri", ".", "split", "(", "'://'", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "or", "self", ".", "_base_uri" ]
39
7.428571
def clean_sources(self): """Like clean, but also clears out files. """ for src in self.dataset.sources: src.st_id = None src.t_id = None self.dataset.sources[:] = [] self.dataset.source_tables[:] = [] self.dataset.st_sequence_id = 1
[ "def", "clean_sources", "(", "self", ")", ":", "for", "src", "in", "self", ".", "dataset", ".", "sources", ":", "src", ".", "st_id", "=", "None", "src", ".", "t_id", "=", "None", "self", ".", "dataset", ".", "sources", "[", ":", "]", "=", "[", "]", "self", ".", "dataset", ".", "source_tables", "[", ":", "]", "=", "[", "]", "self", ".", "dataset", ".", "st_sequence_id", "=", "1" ]
28.9
12.8
def disconnect(filename=None): """ Connect to the local cache, so no internet connection is required. :returns: void """ global _CONNECTED if filename is not None: try: with open(filename, 'r') as f: _load_from_string(f.read()) except FileNotFoundError: raise USGSException("""The cache file '{0}' was not found, and I cannot disconnect without one. If you have not been given a cache.json file, then you can create a new one: >>> from earthquakes import earthquakes >>> earthquakes.connect() >>> earthquakes._start_editing() ... >>> earthquakes.get_report() ... >>> earthquakes._save_cache('{0}')""".format(filename)) for key in _CACHE.keys(): _CACHE_COUNTER[key] = 0 _CONNECTED = False
[ "def", "disconnect", "(", "filename", "=", "None", ")", ":", "global", "_CONNECTED", "if", "filename", "is", "not", "None", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "_load_from_string", "(", "f", ".", "read", "(", ")", ")", "except", "FileNotFoundError", ":", "raise", "USGSException", "(", "\"\"\"The cache file '{0}' was not found, and I cannot disconnect without one. If you have not been given a cache.json file, then you can create a new one:\n >>> from earthquakes import earthquakes\n >>> earthquakes.connect()\n >>> earthquakes._start_editing()\n ...\n >>> earthquakes.get_report()\n ...\n >>> earthquakes._save_cache('{0}')\"\"\"", ".", "format", "(", "filename", ")", ")", "for", "key", "in", "_CACHE", ".", "keys", "(", ")", ":", "_CACHE_COUNTER", "[", "key", "]", "=", "0", "_CONNECTED", "=", "False" ]
37.181818
11.227273
def within_polygon(self, polygon, distance=None, **kwargs): ''' Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' if distance: # If a distance is specified then dilate the polyon by distance zone_polygon = polygon.dilate(distance) else: zone_polygon = polygon # Make valid all events inside depth range upper_depth, lower_depth = _check_depth_limits(kwargs) valid_depth = np.logical_and( self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) # Events outside polygon returned to invalid assignment catalogue_mesh = Mesh(self.catalogue.data['longitude'], self.catalogue.data['latitude'], self.catalogue.data['depth']) valid_id = np.logical_and(valid_depth, zone_polygon.intersects(catalogue_mesh)) return self.select_catalogue(valid_id)
[ "def", "within_polygon", "(", "self", ",", "polygon", ",", "distance", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "distance", ":", "# If a distance is specified then dilate the polyon by distance", "zone_polygon", "=", "polygon", ".", "dilate", "(", "distance", ")", "else", ":", "zone_polygon", "=", "polygon", "# Make valid all events inside depth range", "upper_depth", ",", "lower_depth", "=", "_check_depth_limits", "(", "kwargs", ")", "valid_depth", "=", "np", ".", "logical_and", "(", "self", ".", "catalogue", ".", "data", "[", "'depth'", "]", ">=", "upper_depth", ",", "self", ".", "catalogue", ".", "data", "[", "'depth'", "]", "<", "lower_depth", ")", "# Events outside polygon returned to invalid assignment", "catalogue_mesh", "=", "Mesh", "(", "self", ".", "catalogue", ".", "data", "[", "'longitude'", "]", ",", "self", ".", "catalogue", ".", "data", "[", "'latitude'", "]", ",", "self", ".", "catalogue", ".", "data", "[", "'depth'", "]", ")", "valid_id", "=", "np", ".", "logical_and", "(", "valid_depth", ",", "zone_polygon", ".", "intersects", "(", "catalogue_mesh", ")", ")", "return", "self", ".", "select_catalogue", "(", "valid_id", ")" ]
37.4
22.771429
def _get_connection_state(self, conn_or_int_id): """Get a connection's state by either conn_id or internal_id This routine must only be called from the internal worker thread. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) if key not in table: return self.Disconnected data = table[key] return data['state']
[ "def", "_get_connection_state", "(", "self", ",", "conn_or_int_id", ")", ":", "key", "=", "conn_or_int_id", "if", "isinstance", "(", "key", ",", "str", ")", ":", "table", "=", "self", ".", "_int_connections", "elif", "isinstance", "(", "key", ",", "int", ")", ":", "table", "=", "self", ".", "_connections", "else", ":", "raise", "ArgumentError", "(", "\"You must supply either an int connection id or a string internal id to _get_connection_state\"", ",", "id", "=", "key", ")", "if", "key", "not", "in", "table", ":", "return", "self", ".", "Disconnected", "data", "=", "table", "[", "key", "]", "return", "data", "[", "'state'", "]" ]
33.826087
22.217391
def get_branch(self, repository_id, name, project=None, base_version_descriptor=None): """GetBranch. Retrieve statistics about a single branch. :param str repository_id: The name or ID of the repository. :param str name: Name of the branch. :param str project: Project ID or project name :param :class:`<GitVersionDescriptor> <azure.devops.v5_0.git.models.GitVersionDescriptor>` base_version_descriptor: Identifies the commit or branch to use as the base. :rtype: :class:`<GitBranchStats> <azure.devops.v5_0.git.models.GitBranchStats>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if name is not None: query_parameters['name'] = self._serialize.query('name', name, 'str') if base_version_descriptor is not None: if base_version_descriptor.version_type is not None: query_parameters['baseVersionDescriptor.versionType'] = base_version_descriptor.version_type if base_version_descriptor.version is not None: query_parameters['baseVersionDescriptor.version'] = base_version_descriptor.version if base_version_descriptor.version_options is not None: query_parameters['baseVersionDescriptor.versionOptions'] = base_version_descriptor.version_options response = self._send(http_method='GET', location_id='d5b216de-d8d5-4d32-ae76-51df755b16d3', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('GitBranchStats', response)
[ "def", "get_branch", "(", "self", ",", "repository_id", ",", "name", ",", "project", "=", "None", ",", "base_version_descriptor", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "route_values", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "name", "is", "not", "None", ":", "query_parameters", "[", "'name'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'name'", ",", "name", ",", "'str'", ")", "if", "base_version_descriptor", "is", "not", "None", ":", "if", "base_version_descriptor", ".", "version_type", "is", "not", "None", ":", "query_parameters", "[", "'baseVersionDescriptor.versionType'", "]", "=", "base_version_descriptor", ".", "version_type", "if", "base_version_descriptor", ".", "version", "is", "not", "None", ":", "query_parameters", "[", "'baseVersionDescriptor.version'", "]", "=", "base_version_descriptor", ".", "version", "if", "base_version_descriptor", ".", "version_options", "is", "not", "None", ":", "query_parameters", "[", "'baseVersionDescriptor.versionOptions'", "]", "=", "base_version_descriptor", ".", "version_options", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'d5b216de-d8d5-4d32-ae76-51df755b16d3'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'GitBranchStats'", ",", "response", ")" ]
64.066667
29
def __perform_rest_call(self, requestURL, params=None, headers=None, restType='GET', body=None): """Returns the JSON representation of the response if the response status was ok, returns ``None`` otherwise. """ auth, headers = self.__prepare_gprest_call(requestURL, params=params, headers=headers, restType=restType, body=body) if restType == 'GET': r = requests.get(requestURL, auth=auth, headers=headers, params=params) elif restType == 'PUT': r = requests.put(requestURL, data=body, auth=auth, headers=headers, params=params) elif restType == 'POST': r = requests.post(requestURL, data=body, auth=auth, headers=headers, params=params) elif restType == 'DELETE': r = requests.delete(requestURL, auth=auth, headers=headers, params=params) resp = self.__process_gprest_response(r, restType=restType) return resp
[ "def", "__perform_rest_call", "(", "self", ",", "requestURL", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "restType", "=", "'GET'", ",", "body", "=", "None", ")", ":", "auth", ",", "headers", "=", "self", ".", "__prepare_gprest_call", "(", "requestURL", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "restType", "=", "restType", ",", "body", "=", "body", ")", "if", "restType", "==", "'GET'", ":", "r", "=", "requests", ".", "get", "(", "requestURL", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "elif", "restType", "==", "'PUT'", ":", "r", "=", "requests", ".", "put", "(", "requestURL", ",", "data", "=", "body", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "elif", "restType", "==", "'POST'", ":", "r", "=", "requests", ".", "post", "(", "requestURL", ",", "data", "=", "body", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "elif", "restType", "==", "'DELETE'", ":", "r", "=", "requests", ".", "delete", "(", "requestURL", ",", "auth", "=", "auth", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "resp", "=", "self", ".", "__process_gprest_response", "(", "r", ",", "restType", "=", "restType", ")", "return", "resp" ]
62.6
28.933333
def pad(self, val): """ :param val: :rtype: bytes """ padding = len(int_to_bytes(self._prime)) padded = int_to_bytes(val).rjust(padding, b'\x00') return padded
[ "def", "pad", "(", "self", ",", "val", ")", ":", "padding", "=", "len", "(", "int_to_bytes", "(", "self", ".", "_prime", ")", ")", "padded", "=", "int_to_bytes", "(", "val", ")", ".", "rjust", "(", "padding", ",", "b'\\x00'", ")", "return", "padded" ]
26
13.25
def get_statistics_by_account(self, account_id, term_id): """ Returns statistics for the given account_id and term_id. https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.department_statistics """ url = ("/api/v1/accounts/sis_account_id:%s/analytics/" "terms/sis_term_id:%s/statistics.json") % (account_id, term_id) return self._get_resource(url)
[ "def", "get_statistics_by_account", "(", "self", ",", "account_id", ",", "term_id", ")", ":", "url", "=", "(", "\"/api/v1/accounts/sis_account_id:%s/analytics/\"", "\"terms/sis_term_id:%s/statistics.json\"", ")", "%", "(", "account_id", ",", "term_id", ")", "return", "self", ".", "_get_resource", "(", "url", ")" ]
47.222222
23
def MatrixSolve(a, rhs, adj): """ Matrix solve op. """ return np.linalg.solve(a if not adj else _adjoint(a), rhs),
[ "def", "MatrixSolve", "(", "a", ",", "rhs", ",", "adj", ")", ":", "return", "np", ".", "linalg", ".", "solve", "(", "a", "if", "not", "adj", "else", "_adjoint", "(", "a", ")", ",", "rhs", ")", "," ]
25.2
10.8
def create(cls, jar): """Creates an actual M2Coordinate from the given M2Coordinate-like object (eg a JarDependency). :API: public :param JarDependency jar: the input coordinate. :return: A new M2Coordinate, unless the input is already an M2Coordinate in which case it just returns the input unchanged. :rtype: M2Coordinate """ if isinstance(jar, cls): return jar return cls(org=jar.org, name=jar.name, rev=jar.rev, classifier=jar.classifier, ext=jar.ext)
[ "def", "create", "(", "cls", ",", "jar", ")", ":", "if", "isinstance", "(", "jar", ",", "cls", ")", ":", "return", "jar", "return", "cls", "(", "org", "=", "jar", ".", "org", ",", "name", "=", "jar", ".", "name", ",", "rev", "=", "jar", ".", "rev", ",", "classifier", "=", "jar", ".", "classifier", ",", "ext", "=", "jar", ".", "ext", ")" ]
37.615385
23.461538