text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def if_begin_action(self, text, loc, arg): """Code executed after recognising an if statement (if keyword)""" exshared.setpos(loc, text) if DEBUG > 0: print("IF_BEGIN:",arg) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return self.false_label_number += 1 self.label_number = self.false_label_number self.codegen.newline_label("if{0}".format(self.label_number), True, True)
[ "def", "if_begin_action", "(", "self", ",", "text", ",", "loc", ",", "arg", ")", ":", "exshared", ".", "setpos", "(", "loc", ",", "text", ")", "if", "DEBUG", ">", "0", ":", "print", "(", "\"IF_BEGIN:\"", ",", "arg", ")", "if", "DEBUG", "==", "2", ":", "self", ".", "symtab", ".", "display", "(", ")", "if", "DEBUG", ">", "2", ":", "return", "self", ".", "false_label_number", "+=", "1", "self", ".", "label_number", "=", "self", ".", "false_label_number", "self", ".", "codegen", ".", "newline_label", "(", "\"if{0}\"", ".", "format", "(", "self", ".", "label_number", ")", ",", "True", ",", "True", ")" ]
46.2
0.012739
def wait_for_operation_completion(self, operation, timeout): """Waits until the given operation is done with a given timeout in milliseconds; specify -1 for an indefinite wait. See :py:func:`wait_for_completion` for event queue considerations. in operation of type int Number of the operation to wait for. Must be less than :py:func:`operation_count` . in timeout of type int Maximum time in milliseconds to wait or -1 to wait indefinitely. raises :class:`VBoxErrorIprtError` Failed to wait for operation completion. """ if not isinstance(operation, baseinteger): raise TypeError("operation can only be an instance of type baseinteger") if not isinstance(timeout, baseinteger): raise TypeError("timeout can only be an instance of type baseinteger") self._call("waitForOperationCompletion", in_p=[operation, timeout])
[ "def", "wait_for_operation_completion", "(", "self", ",", "operation", ",", "timeout", ")", ":", "if", "not", "isinstance", "(", "operation", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"operation can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "timeout", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"timeout can only be an instance of type baseinteger\"", ")", "self", ".", "_call", "(", "\"waitForOperationCompletion\"", ",", "in_p", "=", "[", "operation", ",", "timeout", "]", ")" ]
42.956522
0.006931
def list_and_add(a, b): """ Concatenate anything into a list. Args: a: the first thing b: the second thing Returns: list. All the things in a list. """ if not isinstance(b, list): b = [b] if not isinstance(a, list): a = [a] return a + b
[ "def", "list_and_add", "(", "a", ",", "b", ")", ":", "if", "not", "isinstance", "(", "b", ",", "list", ")", ":", "b", "=", "[", "b", "]", "if", "not", "isinstance", "(", "a", ",", "list", ")", ":", "a", "=", "[", "a", "]", "return", "a", "+", "b" ]
18.4375
0.003226
def vesting_balance_withdraw(self, vesting_id, amount=None, account=None, **kwargs): """ Withdraw vesting balance :param str vesting_id: Id of the vesting object :param bitshares.amount.Amount Amount: to withdraw ("all" if not provided") :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) if not amount: obj = Vesting(vesting_id, blockchain_instance=self) amount = obj.claimable op = operations.Vesting_balance_withdraw( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "vesting_balance": vesting_id, "owner": account["id"], "amount": {"amount": int(amount), "asset_id": amount["asset"]["id"]}, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active")
[ "def", "vesting_balance_withdraw", "(", "self", ",", "vesting_id", ",", "amount", "=", "None", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "self", ")", "if", "not", "amount", ":", "obj", "=", "Vesting", "(", "vesting_id", ",", "blockchain_instance", "=", "self", ")", "amount", "=", "obj", ".", "claimable", "op", "=", "operations", ".", "Vesting_balance_withdraw", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"vesting_balance\"", ":", "vesting_id", ",", "\"owner\"", ":", "account", "[", "\"id\"", "]", ",", "\"amount\"", ":", "{", "\"amount\"", ":", "int", "(", "amount", ")", ",", "\"asset_id\"", ":", "amount", "[", "\"asset\"", "]", "[", "\"id\"", "]", "}", ",", "\"prefix\"", ":", "self", ".", "prefix", ",", "}", ")", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", "[", "\"name\"", "]", ",", "\"active\"", ")" ]
39.290323
0.003205
def ws_disconnect(message): """ Channels connection close. Deregister the client """ language = message.channel_session['knocker'] gr = Group('knocker-{0}'.format(language)) gr.discard(message.reply_channel)
[ "def", "ws_disconnect", "(", "message", ")", ":", "language", "=", "message", ".", "channel_session", "[", "'knocker'", "]", "gr", "=", "Group", "(", "'knocker-{0}'", ".", "format", "(", "language", ")", ")", "gr", ".", "discard", "(", "message", ".", "reply_channel", ")" ]
28.5
0.004255
def add_nodes(self, node_name_list, dataframe=False): """ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. """ res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS) check_response(res) nodes = res.json() if dataframe: return pd.DataFrame(nodes).set_index(['SUID']) else: return {node['name']: node['SUID'] for node in nodes}
[ "def", "add_nodes", "(", "self", ",", "node_name_list", ",", "dataframe", "=", "False", ")", ":", "res", "=", "self", ".", "session", ".", "post", "(", "self", ".", "__url", "+", "'nodes'", ",", "data", "=", "json", ".", "dumps", "(", "node_name_list", ")", ",", "headers", "=", "HEADERS", ")", "check_response", "(", "res", ")", "nodes", "=", "res", ".", "json", "(", ")", "if", "dataframe", ":", "return", "pd", ".", "DataFrame", "(", "nodes", ")", ".", "set_index", "(", "[", "'SUID'", "]", ")", "else", ":", "return", "{", "node", "[", "'name'", "]", ":", "node", "[", "'SUID'", "]", "for", "node", "in", "nodes", "}" ]
43.266667
0.004525
def _parse_proc_mount(self): """Parse /proc/mounts""" """ cgroup /cgroup/cpu cgroup rw,relatime,cpuacct,cpu,release_agent=/sbin/cgroup_clean 0 0 cgroup /cgroup/memory cgroup rw,relatime,memory 0 0 cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0 cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0 """ for line in fileops.readlines('/proc/mounts'): if 'cgroup' not in line: continue items = line.split(' ') path = items[1] opts = items[3].split(',') name = None for opt in opts: if opt in self: name = opt self.paths[name] = path if 'name=' in opt: # We treat name=XXX as its name name = opt self.paths[name] = path self[name] = {} self[name]['name'] = name self[name]['enabled'] = True self[name]['hierarchy'] = 0 self[name]['num_cgroups'] = 0 # release_agent= may appear before name= for opt in opts: if 'release_agent=' in opt: self[name]['release_agent'] = opt.replace('release_agent=', '')
[ "def", "_parse_proc_mount", "(", "self", ")", ":", "\"\"\"\n cgroup /cgroup/cpu cgroup rw,relatime,cpuacct,cpu,release_agent=/sbin/cgroup_clean 0 0\n cgroup /cgroup/memory cgroup rw,relatime,memory 0 0\n cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0\n cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0\n \"\"\"", "for", "line", "in", "fileops", ".", "readlines", "(", "'/proc/mounts'", ")", ":", "if", "'cgroup'", "not", "in", "line", ":", "continue", "items", "=", "line", ".", "split", "(", "' '", ")", "path", "=", "items", "[", "1", "]", "opts", "=", "items", "[", "3", "]", ".", "split", "(", "','", ")", "name", "=", "None", "for", "opt", "in", "opts", ":", "if", "opt", "in", "self", ":", "name", "=", "opt", "self", ".", "paths", "[", "name", "]", "=", "path", "if", "'name='", "in", "opt", ":", "# We treat name=XXX as its name", "name", "=", "opt", "self", ".", "paths", "[", "name", "]", "=", "path", "self", "[", "name", "]", "=", "{", "}", "self", "[", "name", "]", "[", "'name'", "]", "=", "name", "self", "[", "name", "]", "[", "'enabled'", "]", "=", "True", "self", "[", "name", "]", "[", "'hierarchy'", "]", "=", "0", "self", "[", "name", "]", "[", "'num_cgroups'", "]", "=", "0", "# release_agent= may appear before name=", "for", "opt", "in", "opts", ":", "if", "'release_agent='", "in", "opt", ":", "self", "[", "name", "]", "[", "'release_agent'", "]", "=", "opt", ".", "replace", "(", "'release_agent='", ",", "''", ")" ]
36.388889
0.002974
def _compile(self, source, filename): """Override jinja's compilation to stash the rendered source inside the python linecache for debugging. """ if filename == '<template>': # make a better filename filename = 'dbt-{}'.format( codecs.encode(os.urandom(12), 'hex').decode('ascii') ) # encode, though I don't think this matters filename = jinja2._compat.encode_filename(filename) # put ourselves in the cache linecache.cache[filename] = ( len(source), None, [line + '\n' for line in source.splitlines()], filename ) return super(MacroFuzzEnvironment, self)._compile(source, filename)
[ "def", "_compile", "(", "self", ",", "source", ",", "filename", ")", ":", "if", "filename", "==", "'<template>'", ":", "# make a better filename", "filename", "=", "'dbt-{}'", ".", "format", "(", "codecs", ".", "encode", "(", "os", ".", "urandom", "(", "12", ")", ",", "'hex'", ")", ".", "decode", "(", "'ascii'", ")", ")", "# encode, though I don't think this matters", "filename", "=", "jinja2", ".", "_compat", ".", "encode_filename", "(", "filename", ")", "# put ourselves in the cache", "linecache", ".", "cache", "[", "filename", "]", "=", "(", "len", "(", "source", ")", ",", "None", ",", "[", "line", "+", "'\\n'", "for", "line", "in", "source", ".", "splitlines", "(", ")", "]", ",", "filename", ")", "return", "super", "(", "MacroFuzzEnvironment", ",", "self", ")", ".", "_compile", "(", "source", ",", "filename", ")" ]
39
0.002503
def get(self, block=True, timeout=None): """ Removes and returns an item from the queue. """ value = self._queue.get(block, timeout) if self._queue.empty(): self.clear() return value
[ "def", "get", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "value", "=", "self", ".", "_queue", ".", "get", "(", "block", ",", "timeout", ")", "if", "self", ".", "_queue", ".", "empty", "(", ")", ":", "self", ".", "clear", "(", ")", "return", "value" ]
36.833333
0.00885
def valid_str(x: str) -> bool: """ Return ``True`` if ``x`` is a non-blank string; otherwise return ``False``. """ if isinstance(x, str) and x.strip(): return True else: return False
[ "def", "valid_str", "(", "x", ":", "str", ")", "->", "bool", ":", "if", "isinstance", "(", "x", ",", "str", ")", "and", "x", ".", "strip", "(", ")", ":", "return", "True", "else", ":", "return", "False" ]
23.777778
0.004505
def poplistitem(self, last=True): """ Pop and return a key:valuelist item comprised of a key and that key's list of values. If <last> is False, a key:valuelist item comprised of keys()[0] and its list of values is popped and returned. If <last> is True, a key:valuelist item comprised of keys()[-1] and its list of values is popped and returned. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplistitem(last=True) == (3,[3]) omd.poplistitem(last=False) == (1,[1,11,111]) Params: last: Boolean whether to pop the first or last key and its associated list of values. Raises: KeyError if the dictionary is empty. Returns: A two-tuple comprised of the first or last key and its associated list of values. """ if not self._items: s = 'poplistitem(): %s is empty' % self.__class__.__name__ raise KeyError(s) key = self.keys()[-1 if last else 0] return key, self.poplist(key)
[ "def", "poplistitem", "(", "self", ",", "last", "=", "True", ")", ":", "if", "not", "self", ".", "_items", ":", "s", "=", "'poplistitem(): %s is empty'", "%", "self", ".", "__class__", ".", "__name__", "raise", "KeyError", "(", "s", ")", "key", "=", "self", ".", "keys", "(", ")", "[", "-", "1", "if", "last", "else", "0", "]", "return", "key", ",", "self", ".", "poplist", "(", "key", ")" ]
40.961538
0.001835
def read_raw(self, length, *, error=None): """Read raw packet data.""" if length is None: length = len(self) raw = dict( packet=self._read_fileng(length), error=error or None, ) return raw
[ "def", "read_raw", "(", "self", ",", "length", ",", "*", ",", "error", "=", "None", ")", ":", "if", "length", "is", "None", ":", "length", "=", "len", "(", "self", ")", "raw", "=", "dict", "(", "packet", "=", "self", ".", "_read_fileng", "(", "length", ")", ",", "error", "=", "error", "or", "None", ",", ")", "return", "raw" ]
23.272727
0.007519
def list_operations(self, name, filter_, page_size=0, options=None): """ Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns ``UNIMPLEMENTED``. NOTE: the ``name`` binding below allows API services to override the binding to use different resource name schemes, such as ``users/*/operations``. Example: >>> from google.gapic.longrunning import operations_client >>> from google.gax import CallOptions, INITIAL_PAGE >>> api = operations_client.OperationsClient() >>> name = '' >>> filter_ = '' >>> >>> # Iterate over all results >>> for element in api.list_operations(name, filter_): >>> # process element >>> pass >>> >>> # Or iterate over results one page at a time >>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)): >>> for element in page: >>> # process element >>> pass Args: name (string): The name of the operation collection. filter_ (string): The standard list filter. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.gax.PageIterator` instance. By default, this is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances. This object can also be configured to iterate over the pages of the response through the `CallOptions` parameter. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """ # Create the request object. request = operations_pb2.ListOperationsRequest( name=name, filter=filter_, page_size=page_size) return self._list_operations(request, options)
[ "def", "list_operations", "(", "self", ",", "name", ",", "filter_", ",", "page_size", "=", "0", ",", "options", "=", "None", ")", ":", "# Create the request object.", "request", "=", "operations_pb2", ".", "ListOperationsRequest", "(", "name", "=", "name", ",", "filter", "=", "filter_", ",", "page_size", "=", "page_size", ")", "return", "self", ".", "_list_operations", "(", "request", ",", "options", ")" ]
47.06
0.002082
def send(self, *fields): """ Serialize and send the given fields using the IB socket protocol. """ if not self.isConnected(): raise ConnectionError('Not connected') msg = io.StringIO() for field in fields: typ = type(field) if field in (None, UNSET_INTEGER, UNSET_DOUBLE): s = '' elif typ in (str, int, float): s = str(field) elif typ is bool: s = '1' if field else '0' elif typ is list: # list of TagValue s = ''.join(f'{v.tag}={v.value};' for v in field) elif isinstance(field, Contract): c = field s = '\0'.join(str(f) for f in ( c.conId, c.symbol, c.secType, c.lastTradeDateOrContractMonth, c.strike, c.right, c.multiplier, c.exchange, c.primaryExchange, c.currency, c.localSymbol, c.tradingClass)) else: s = str(field) msg.write(s) msg.write('\0') self.sendMsg(msg.getvalue())
[ "def", "send", "(", "self", ",", "*", "fields", ")", ":", "if", "not", "self", ".", "isConnected", "(", ")", ":", "raise", "ConnectionError", "(", "'Not connected'", ")", "msg", "=", "io", ".", "StringIO", "(", ")", "for", "field", "in", "fields", ":", "typ", "=", "type", "(", "field", ")", "if", "field", "in", "(", "None", ",", "UNSET_INTEGER", ",", "UNSET_DOUBLE", ")", ":", "s", "=", "''", "elif", "typ", "in", "(", "str", ",", "int", ",", "float", ")", ":", "s", "=", "str", "(", "field", ")", "elif", "typ", "is", "bool", ":", "s", "=", "'1'", "if", "field", "else", "'0'", "elif", "typ", "is", "list", ":", "# list of TagValue", "s", "=", "''", ".", "join", "(", "f'{v.tag}={v.value};'", "for", "v", "in", "field", ")", "elif", "isinstance", "(", "field", ",", "Contract", ")", ":", "c", "=", "field", "s", "=", "'\\0'", ".", "join", "(", "str", "(", "f", ")", "for", "f", "in", "(", "c", ".", "conId", ",", "c", ".", "symbol", ",", "c", ".", "secType", ",", "c", ".", "lastTradeDateOrContractMonth", ",", "c", ".", "strike", ",", "c", ".", "right", ",", "c", ".", "multiplier", ",", "c", ".", "exchange", ",", "c", ".", "primaryExchange", ",", "c", ".", "currency", ",", "c", ".", "localSymbol", ",", "c", ".", "tradingClass", ")", ")", "else", ":", "s", "=", "str", "(", "field", ")", "msg", ".", "write", "(", "s", ")", "msg", ".", "write", "(", "'\\0'", ")", "self", ".", "sendMsg", "(", "msg", ".", "getvalue", "(", ")", ")" ]
36.09375
0.001686
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
[ "def", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "return", "False", "if", "len", "(", "lhs", ".", "shape", ")", "==", "1", ":", "return", "False", "else", ":", "lhs_rows", ",", "lhs_cols", "=", "lhs", ".", "shape", "if", "len", "(", "rhs", ".", "shape", ")", "==", "1", ":", "rhs_rows", "=", "1", "rhs_cols", "=", "rhs", ".", "size", "else", ":", "rhs_rows", ",", "rhs_cols", "=", "rhs", ".", "shape", "result_size", "=", "lhs_rows", "*", "rhs_cols", "if", "sp", ".", "issparse", "(", "lhs", ")", "and", "sp", ".", "issparse", "(", "rhs", ")", ":", "return", "True", "elif", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs_zero_rows", "=", "lhs_rows", "-", "np", ".", "unique", "(", "lhs", ".", "nonzero", "(", ")", "[", "0", "]", ")", ".", "size", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "elif", "sp", ".", "issparse", "(", "rhs", ")", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "rhs_cols", "-", "np", ".", "unique", "(", "rhs", ".", "nonzero", "(", ")", "[", "1", "]", ")", ".", "size", "else", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "num_zeros", "=", "lhs_zero_rows", "*", "rhs_cols", "+", "rhs_zero_cols", "*", "lhs_rows", "-", "lhs_zero_rows", "*", "rhs_zero_cols", "# A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff.", "return", "(", "float", "(", "num_zeros", ")", "/", "float", "(", "size", ")", ")", ">=", "0.5" ]
40.171429
0.006944
def prim(G, start, weight='weight'): """ Algorithm for finding a minimum spanning tree for a weighted undirected graph. """ if len(connected_components(G)) != 1: raise GraphInsertError("Prim algorithm work with connected graph only") if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) pred = {} key = {} pqueue = {} lowest = 0 for edge in G.edges: if G.edges[edge][weight] > lowest: lowest = G.edges[edge][weight] for vertex in G.vertices: pred[vertex] = None key[vertex] = 2 * lowest key[start] = 0 for vertex in G.vertices: pqueue[vertex] = key[vertex] while pqueue: current = popmin(pqueue, lowest) for neighbor in G.vertices[current]: if (neighbor in pqueue and G.edges[(current, neighbor)][weight] < key[neighbor]): pred[neighbor] = current key[neighbor] = G.edges[(current, neighbor)][weight] pqueue[neighbor] = G.edges[(current, neighbor)][weight] return pred
[ "def", "prim", "(", "G", ",", "start", ",", "weight", "=", "'weight'", ")", ":", "if", "len", "(", "connected_components", "(", "G", ")", ")", "!=", "1", ":", "raise", "GraphInsertError", "(", "\"Prim algorithm work with connected graph only\"", ")", "if", "start", "not", "in", "G", ".", "vertices", ":", "raise", "GraphInsertError", "(", "\"Vertex %s doesn't exist.\"", "%", "(", "start", ",", ")", ")", "pred", "=", "{", "}", "key", "=", "{", "}", "pqueue", "=", "{", "}", "lowest", "=", "0", "for", "edge", "in", "G", ".", "edges", ":", "if", "G", ".", "edges", "[", "edge", "]", "[", "weight", "]", ">", "lowest", ":", "lowest", "=", "G", ".", "edges", "[", "edge", "]", "[", "weight", "]", "for", "vertex", "in", "G", ".", "vertices", ":", "pred", "[", "vertex", "]", "=", "None", "key", "[", "vertex", "]", "=", "2", "*", "lowest", "key", "[", "start", "]", "=", "0", "for", "vertex", "in", "G", ".", "vertices", ":", "pqueue", "[", "vertex", "]", "=", "key", "[", "vertex", "]", "while", "pqueue", ":", "current", "=", "popmin", "(", "pqueue", ",", "lowest", ")", "for", "neighbor", "in", "G", ".", "vertices", "[", "current", "]", ":", "if", "(", "neighbor", "in", "pqueue", "and", "G", ".", "edges", "[", "(", "current", ",", "neighbor", ")", "]", "[", "weight", "]", "<", "key", "[", "neighbor", "]", ")", ":", "pred", "[", "neighbor", "]", "=", "current", "key", "[", "neighbor", "]", "=", "G", ".", "edges", "[", "(", "current", ",", "neighbor", ")", "]", "[", "weight", "]", "pqueue", "[", "neighbor", "]", "=", "G", ".", "edges", "[", "(", "current", ",", "neighbor", ")", "]", "[", "weight", "]", "return", "pred" ]
36.290323
0.000866
def display(self): """Display the recordings.""" if self.data is None: return if self.scene is not None: self.y_scrollbar_value = self.verticalScrollBar().value() self.scene.clear() self.create_chan_labels() self.create_time_labels() window_start = self.parent.value('window_start') window_length = self.parent.value('window_length') time_height = max([x.boundingRect().height() for x in self.idx_time]) label_width = window_length * self.parent.value('label_ratio') scene_height = (len(self.idx_label) * self.parent.value('y_distance') + time_height) self.scene = QGraphicsScene(window_start - label_width, 0, window_length + label_width, scene_height) self.setScene(self.scene) self.idx_markers = [] self.idx_annot = [] self.idx_annot_labels = [] self.add_chan_labels() self.add_time_labels() self.add_traces() self.display_grid() self.display_markers() self.display_annotations() self.resizeEvent(None) self.verticalScrollBar().setValue(self.y_scrollbar_value) self.parent.info.display_view() self.parent.overview.display_current()
[ "def", "display", "(", "self", ")", ":", "if", "self", ".", "data", "is", "None", ":", "return", "if", "self", ".", "scene", "is", "not", "None", ":", "self", ".", "y_scrollbar_value", "=", "self", ".", "verticalScrollBar", "(", ")", ".", "value", "(", ")", "self", ".", "scene", ".", "clear", "(", ")", "self", ".", "create_chan_labels", "(", ")", "self", ".", "create_time_labels", "(", ")", "window_start", "=", "self", ".", "parent", ".", "value", "(", "'window_start'", ")", "window_length", "=", "self", ".", "parent", ".", "value", "(", "'window_length'", ")", "time_height", "=", "max", "(", "[", "x", ".", "boundingRect", "(", ")", ".", "height", "(", ")", "for", "x", "in", "self", ".", "idx_time", "]", ")", "label_width", "=", "window_length", "*", "self", ".", "parent", ".", "value", "(", "'label_ratio'", ")", "scene_height", "=", "(", "len", "(", "self", ".", "idx_label", ")", "*", "self", ".", "parent", ".", "value", "(", "'y_distance'", ")", "+", "time_height", ")", "self", ".", "scene", "=", "QGraphicsScene", "(", "window_start", "-", "label_width", ",", "0", ",", "window_length", "+", "label_width", ",", "scene_height", ")", "self", ".", "setScene", "(", "self", ".", "scene", ")", "self", ".", "idx_markers", "=", "[", "]", "self", ".", "idx_annot", "=", "[", "]", "self", ".", "idx_annot_labels", "=", "[", "]", "self", ".", "add_chan_labels", "(", ")", "self", ".", "add_time_labels", "(", ")", "self", ".", "add_traces", "(", ")", "self", ".", "display_grid", "(", ")", "self", ".", "display_markers", "(", ")", "self", ".", "display_annotations", "(", ")", "self", ".", "resizeEvent", "(", "None", ")", "self", ".", "verticalScrollBar", "(", ")", ".", "setValue", "(", "self", ".", "y_scrollbar_value", ")", "self", ".", "parent", ".", "info", ".", "display_view", "(", ")", "self", ".", "parent", ".", "overview", ".", "display_current", "(", ")" ]
33.414634
0.001418
def get_subnets(context, limit=None, page_reverse=False, sorts=['id'], marker=None, filters=None, fields=None): """Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnets for tenant %s with filters %s fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} subnets = db_api.subnet_find(context, limit=limit, page_reverse=page_reverse, sorts=sorts, marker_obj=marker, join_dns=True, join_routes=True, join_pool=True, **filters) for subnet in subnets: cache = subnet.get("_allocation_pool_cache") if not cache: db_api.subnet_update_set_alloc_pool_cache( context, subnet, subnet.allocation_pools) return v._make_subnets_list(subnets, fields=fields)
[ "def", "get_subnets", "(", "context", ",", "limit", "=", "None", ",", "page_reverse", "=", "False", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "filters", "=", "None", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_subnets for tenant %s with filters %s fields %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "filters", "=", "filters", "or", "{", "}", "subnets", "=", "db_api", ".", "subnet_find", "(", "context", ",", "limit", "=", "limit", ",", "page_reverse", "=", "page_reverse", ",", "sorts", "=", "sorts", ",", "marker_obj", "=", "marker", ",", "join_dns", "=", "True", ",", "join_routes", "=", "True", ",", "join_pool", "=", "True", ",", "*", "*", "filters", ")", "for", "subnet", "in", "subnets", ":", "cache", "=", "subnet", ".", "get", "(", "\"_allocation_pool_cache\"", ")", "if", "not", "cache", ":", "db_api", ".", "subnet_update_set_alloc_pool_cache", "(", "context", ",", "subnet", ",", "subnet", ".", "allocation_pools", ")", "return", "v", ".", "_make_subnets_list", "(", "subnets", ",", "fields", "=", "fields", ")" ]
50.848485
0.000585
def to_array(self): """ Serializes this OrderInfo to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(OrderInfo, self).to_array() if self.name is not None: array['name'] = u(self.name) # py2: type unicode, py3: type str if self.phone_number is not None: array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str if self.email is not None: array['email'] = u(self.email) # py2: type unicode, py3: type str if self.shipping_address is not None: array['shipping_address'] = self.shipping_address.to_array() # type ShippingAddress return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "OrderInfo", ",", "self", ")", ".", "to_array", "(", ")", "if", "self", ".", "name", "is", "not", "None", ":", "array", "[", "'name'", "]", "=", "u", "(", "self", ".", "name", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "phone_number", "is", "not", "None", ":", "array", "[", "'phone_number'", "]", "=", "u", "(", "self", ".", "phone_number", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "email", "is", "not", "None", ":", "array", "[", "'email'", "]", "=", "u", "(", "self", ".", "email", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "shipping_address", "is", "not", "None", ":", "array", "[", "'shipping_address'", "]", "=", "self", ".", "shipping_address", ".", "to_array", "(", ")", "# type ShippingAddress", "return", "array" ]
43.117647
0.00534
def only(iterable, default=None, too_long=None): """If *iterable* has only one item, return it. If it has zero items, return *default*. If it has more than one item, raise the exception given by *too_long*, which is ``ValueError`` by default. >>> only([], default='missing') 'missing' >>> only([1]) 1 >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (expected 1)' >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError Note that :func:`only` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) value = next(it, default) try: next(it) except StopIteration: pass else: raise too_long or ValueError('too many items in iterable (expected 1)') return value
[ "def", "only", "(", "iterable", ",", "default", "=", "None", ",", "too_long", "=", "None", ")", ":", "it", "=", "iter", "(", "iterable", ")", "value", "=", "next", "(", "it", ",", "default", ")", "try", ":", "next", "(", "it", ")", "except", "StopIteration", ":", "pass", "else", ":", "raise", "too_long", "or", "ValueError", "(", "'too many items in iterable (expected 1)'", ")", "return", "value" ]
30.235294
0.000943
def fromfile(file_, threadpool_size=None, ignore_lock=False): """ Instantiate BlockStorageRAM device from a file saved in block storage format. The file_ argument can be a file object or a string that represents a filename. If called with a file object, it should be opened in binary mode, and the caller is responsible for closing the file. This method returns a BlockStorageRAM instance. """ close_file = False if not hasattr(file_, 'read'): file_ = open(file_, 'rb') close_file = True try: header_data = file_.read(BlockStorageRAM._index_offset) block_size, block_count, user_header_size, locked = \ struct.unpack( BlockStorageRAM._index_struct_string, header_data) if locked and (not ignore_lock): raise IOError( "Can not open block storage device because it is " "locked by another process. To ignore this check, " "call this method with the keyword 'ignore_lock' " "set to True.") header_offset = len(header_data) + \ user_header_size f = bytearray(header_offset + \ (block_size * block_count)) f[:header_offset] = header_data + file_.read(user_header_size) f[header_offset:] = file_.read(block_size * block_count) finally: if close_file: file_.close() return BlockStorageRAM(f, threadpool_size=threadpool_size, ignore_lock=ignore_lock)
[ "def", "fromfile", "(", "file_", ",", "threadpool_size", "=", "None", ",", "ignore_lock", "=", "False", ")", ":", "close_file", "=", "False", "if", "not", "hasattr", "(", "file_", ",", "'read'", ")", ":", "file_", "=", "open", "(", "file_", ",", "'rb'", ")", "close_file", "=", "True", "try", ":", "header_data", "=", "file_", ".", "read", "(", "BlockStorageRAM", ".", "_index_offset", ")", "block_size", ",", "block_count", ",", "user_header_size", ",", "locked", "=", "struct", ".", "unpack", "(", "BlockStorageRAM", ".", "_index_struct_string", ",", "header_data", ")", "if", "locked", "and", "(", "not", "ignore_lock", ")", ":", "raise", "IOError", "(", "\"Can not open block storage device because it is \"", "\"locked by another process. To ignore this check, \"", "\"call this method with the keyword 'ignore_lock' \"", "\"set to True.\"", ")", "header_offset", "=", "len", "(", "header_data", ")", "+", "user_header_size", "f", "=", "bytearray", "(", "header_offset", "+", "(", "block_size", "*", "block_count", ")", ")", "f", "[", ":", "header_offset", "]", "=", "header_data", "+", "file_", ".", "read", "(", "user_header_size", ")", "f", "[", "header_offset", ":", "]", "=", "file_", ".", "read", "(", "block_size", "*", "block_count", ")", "finally", ":", "if", "close_file", ":", "file_", ".", "close", "(", ")", "return", "BlockStorageRAM", "(", "f", ",", "threadpool_size", "=", "threadpool_size", ",", "ignore_lock", "=", "ignore_lock", ")" ]
42.585366
0.003359
def _dims2shape(*dims): """Convert input dimensions to a shape.""" if not dims: raise ValueError("expected at least one dimension spec") shape = list() for dim in dims: if isinstance(dim, int): dim = (0, dim) if isinstance(dim, tuple) and len(dim) == 2: if dim[0] < 0: raise ValueError("expected low dimension to be >= 0") if dim[1] < 0: raise ValueError("expected high dimension to be >= 0") if dim[0] > dim[1]: raise ValueError("expected low <= high dimensions") start, stop = dim else: raise TypeError("expected dimension to be int or (int, int)") shape.append((start, stop)) return tuple(shape)
[ "def", "_dims2shape", "(", "*", "dims", ")", ":", "if", "not", "dims", ":", "raise", "ValueError", "(", "\"expected at least one dimension spec\"", ")", "shape", "=", "list", "(", ")", "for", "dim", "in", "dims", ":", "if", "isinstance", "(", "dim", ",", "int", ")", ":", "dim", "=", "(", "0", ",", "dim", ")", "if", "isinstance", "(", "dim", ",", "tuple", ")", "and", "len", "(", "dim", ")", "==", "2", ":", "if", "dim", "[", "0", "]", "<", "0", ":", "raise", "ValueError", "(", "\"expected low dimension to be >= 0\"", ")", "if", "dim", "[", "1", "]", "<", "0", ":", "raise", "ValueError", "(", "\"expected high dimension to be >= 0\"", ")", "if", "dim", "[", "0", "]", ">", "dim", "[", "1", "]", ":", "raise", "ValueError", "(", "\"expected low <= high dimensions\"", ")", "start", ",", "stop", "=", "dim", "else", ":", "raise", "TypeError", "(", "\"expected dimension to be int or (int, int)\"", ")", "shape", ".", "append", "(", "(", "start", ",", "stop", ")", ")", "return", "tuple", "(", "shape", ")" ]
37.95
0.001285
def rerunTask(self, *args, **kwargs): """ Rerun a Resolved Task This method _reruns_ a previously resolved task, even if it was _completed_. This is useful if your task completes unsuccessfully, and you just want to run it from scratch again. This will also reset the number of `retries` allowed. This method is deprecated in favour of creating a new task with the same task definition (but with a new taskId). Remember that `retries` in the task status counts the number of runs that the queue have started because the worker stopped responding, for example because a spot node died. **Remark** this operation is idempotent, if you try to rerun a task that is not either `failed` or `completed`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``deprecated`` """ return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
[ "def", "rerunTask", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"rerunTask\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
40.115385
0.005618
def _load_rule_file(self, filename): """Import the given rule file""" if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) return try: basename = os.path.basename(filename) (name, ext) = os.path.splitext(basename) imp.load_source(name, filename) except Exception as e: sys.stderr.write("rflint: %s: exception while loading: %s\n" % (filename, str(e)))
[ "def", "_load_rule_file", "(", "self", ",", "filename", ")", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "filename", ")", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"rflint: %s: No such file or directory\\n\"", "%", "filename", ")", "return", "try", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "(", "name", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "imp", ".", "load_source", "(", "name", ",", "filename", ")", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "\"rflint: %s: exception while loading: %s\\n\"", "%", "(", "filename", ",", "str", "(", "e", ")", ")", ")" ]
45.272727
0.007874
def as_square_array(arr): """Return arr massaged into a square array. Raises ValueError if arr cannot be so massaged. """ arr = np.atleast_2d(arr) if len(arr.shape) != 2 or arr.shape[0] != arr.shape[1]: raise ValueError("Expected square array") return arr
[ "def", "as_square_array", "(", "arr", ")", ":", "arr", "=", "np", ".", "atleast_2d", "(", "arr", ")", "if", "len", "(", "arr", ".", "shape", ")", "!=", "2", "or", "arr", ".", "shape", "[", "0", "]", "!=", "arr", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Expected square array\"", ")", "return", "arr" ]
31.111111
0.006944
def b_rgb(self, r, g, b, text=None, fore=None, style=None): """ A chained method that sets the back color to an RGB value. Arguments: r : Red value. g : Green value. b : Blue value. text : Text to style if not building up color codes. fore : Fore color for the text. style : Style for the text. """ return self.chained(text=text, fore=fore, back=(r, g, b), style=style)
[ "def", "b_rgb", "(", "self", ",", "r", ",", "g", ",", "b", ",", "text", "=", "None", ",", "fore", "=", "None", ",", "style", "=", "None", ")", ":", "return", "self", ".", "chained", "(", "text", "=", "text", ",", "fore", "=", "fore", ",", "back", "=", "(", "r", ",", "g", ",", "b", ")", ",", "style", "=", "style", ")" ]
45.909091
0.003883
def find_exe(name, multi=False, path=None): """ Locate a command. Search your local filesystem for an executable and return the first matching file with executable permission. Args: name (str): globstr of matching filename multi (bool): if True return all matches instead of just the first. Defaults to False. path (str or Iterable[PathLike]): overrides the system PATH variable. Returns: PathLike or List[PathLike] or None: returns matching executable(s). SeeAlso: shutil.which - which is available in Python 3.3+. Notes: This is essentially the `which` UNIX command References: https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028 https://docs.python.org/dev/library/shutil.html#shutil.which Example: >>> find_exe('ls') >>> find_exe('ping') >>> assert find_exe('which') == find_exe(find_exe('which')) >>> find_exe('which', multi=True) >>> find_exe('ping', multi=True) >>> find_exe('cmake', multi=True) >>> find_exe('nvcc', multi=True) >>> find_exe('noexist', multi=True) Example: >>> assert not find_exe('noexist', multi=False) >>> assert find_exe('ping', multi=False) >>> assert not find_exe('noexist', multi=True) >>> assert find_exe('ping', multi=True) Benchmark: >>> # xdoctest: +IGNORE_WANT >>> import ubelt as ub >>> import shutil >>> for timer in ub.Timerit(100, bestof=10, label='ub.find_exe'): >>> ub.find_exe('which') >>> for timer in ub.Timerit(100, bestof=10, label='shutil.which'): >>> shutil.which('which') Timed best=58.71 µs, mean=59.64 ± 0.96 µs for ub.find_exe Timed best=72.75 µs, mean=73.07 ± 0.22 µs for shutil.which """ candidates = find_path(name, path=path, exact=True) mode = os.X_OK | os.F_OK results = (fpath for fpath in candidates if os.access(fpath, mode) and not isdir(fpath)) if not multi: for fpath in results: return fpath else: return list(results)
[ "def", "find_exe", "(", "name", ",", "multi", "=", "False", ",", "path", "=", "None", ")", ":", "candidates", "=", "find_path", "(", "name", ",", "path", "=", "path", ",", "exact", "=", "True", ")", "mode", "=", "os", ".", "X_OK", "|", "os", ".", "F_OK", "results", "=", "(", "fpath", "for", "fpath", "in", "candidates", "if", "os", ".", "access", "(", "fpath", ",", "mode", ")", "and", "not", "isdir", "(", "fpath", ")", ")", "if", "not", "multi", ":", "for", "fpath", "in", "results", ":", "return", "fpath", "else", ":", "return", "list", "(", "results", ")" ]
33.4375
0.000454
def delete_object(self, cont, obj): ''' Delete a file from Swift ''' try: self.conn.delete_object(cont, obj) return True except Exception as exc: log.error('There was an error::') if hasattr(exc, 'code') and hasattr(exc, 'msg'): log.error(' Code: %s: %s', exc.code, exc.msg) log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))()) return False
[ "def", "delete_object", "(", "self", ",", "cont", ",", "obj", ")", ":", "try", ":", "self", ".", "conn", ".", "delete_object", "(", "cont", ",", "obj", ")", "return", "True", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'There was an error::'", ")", "if", "hasattr", "(", "exc", ",", "'code'", ")", "and", "hasattr", "(", "exc", ",", "'msg'", ")", ":", "log", ".", "error", "(", "' Code: %s: %s'", ",", "exc", ".", "code", ",", "exc", ".", "msg", ")", "log", ".", "error", "(", "' Content: \\n%s'", ",", "getattr", "(", "exc", ",", "'read'", ",", "lambda", ":", "six", ".", "text_type", "(", "exc", ")", ")", "(", ")", ")", "return", "False" ]
37.615385
0.005988
def rgChromaticity(img): ''' returns the normalized RGB space (RGB/intensity) see https://en.wikipedia.org/wiki/Rg_chromaticity ''' out = _calc(img) if img.dtype == np.uint8: out = (255 * out).astype(np.uint8) return out
[ "def", "rgChromaticity", "(", "img", ")", ":", "out", "=", "_calc", "(", "img", ")", "if", "img", ".", "dtype", "==", "np", ".", "uint8", ":", "out", "=", "(", "255", "*", "out", ")", ".", "astype", "(", "np", ".", "uint8", ")", "return", "out" ]
28.444444
0.003788
def run_to_states(self): """Property for the _run_to_states field """ self.execution_engine_lock.acquire() return_value = self._run_to_states self.execution_engine_lock.release() return return_value
[ "def", "run_to_states", "(", "self", ")", ":", "self", ".", "execution_engine_lock", ".", "acquire", "(", ")", "return_value", "=", "self", ".", "_run_to_states", "self", ".", "execution_engine_lock", ".", "release", "(", ")", "return", "return_value" ]
30
0.008097
def configure(cls, name, registry_host: str="0.0.0.0", registry_port: int=4500, pubsub_host: str="0.0.0.0", pubsub_port: int=6379): """ A convenience method for providing registry and pubsub(redis) endpoints :param name: Used for process name :param registry_host: IP Address for vyked-registry; default = 0.0.0.0 :param registry_port: Port for vyked-registry; default = 4500 :param pubsub_host: IP Address for pubsub component, usually redis; default = 0.0.0.0 :param pubsub_port: Port for pubsub component; default= 6379 :return: None """ Host.name = name Host.registry_host = registry_host Host.registry_port = registry_port Host.pubsub_host = pubsub_host Host.pubsub_port = pubsub_port
[ "def", "configure", "(", "cls", ",", "name", ",", "registry_host", ":", "str", "=", "\"0.0.0.0\"", ",", "registry_port", ":", "int", "=", "4500", ",", "pubsub_host", ":", "str", "=", "\"0.0.0.0\"", ",", "pubsub_port", ":", "int", "=", "6379", ")", ":", "Host", ".", "name", "=", "name", "Host", ".", "registry_host", "=", "registry_host", "Host", ".", "registry_port", "=", "registry_port", "Host", ".", "pubsub_host", "=", "pubsub_host", "Host", ".", "pubsub_port", "=", "pubsub_port" ]
49.8125
0.01601
def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item, 'EGG-INFO') ) ) return entries = safe_listdir(path_item) # for performance, before sorting by version, # screen entries for only those that will yield # distributions filtered = ( entry for entry in entries if dist_factory(path_item, entry, only) ) # scan for .egg and .egg-info in directory path_item_entries = _by_version_descending(filtered) for entry in path_item_entries: fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): yield dist
[ "def", "find_on_path", "(", "importer", ",", "path_item", ",", "only", "=", "False", ")", ":", "path_item", "=", "_normalize_cached", "(", "path_item", ")", "if", "_is_unpacked_egg", "(", "path_item", ")", ":", "yield", "Distribution", ".", "from_filename", "(", "path_item", ",", "metadata", "=", "PathMetadata", "(", "path_item", ",", "os", ".", "path", ".", "join", "(", "path_item", ",", "'EGG-INFO'", ")", ")", ")", "return", "entries", "=", "safe_listdir", "(", "path_item", ")", "# for performance, before sorting by version,", "# screen entries for only those that will yield", "# distributions", "filtered", "=", "(", "entry", "for", "entry", "in", "entries", "if", "dist_factory", "(", "path_item", ",", "entry", ",", "only", ")", ")", "# scan for .egg and .egg-info in directory", "path_item_entries", "=", "_by_version_descending", "(", "filtered", ")", "for", "entry", "in", "path_item_entries", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "path_item", ",", "entry", ")", "factory", "=", "dist_factory", "(", "path_item", ",", "entry", ",", "only", ")", "for", "dist", "in", "factory", "(", "fullpath", ")", ":", "yield", "dist" ]
31.4
0.00103
def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Will attempt best case reconstruction, which means it will return nan for handle_missing and handle_unknown settings that break the bijection. We issue warnings when some of those cases occur. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ X = X_in.copy(deep=True) # first check the type X = util.convert_input(X) if self._dim is None: raise ValueError( 'Must train encoder before it can be used to inverse_transform data') # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "set as False when transform data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,)) if not self.cols: return X if self.return_df else X.values if self.handle_unknown == 'value': for col in self.cols: if any(X[col] == -1): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category -1 when encode %s" % (col,)) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[col].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) for switch in self.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values()) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) return X if self.return_df else X.values
[ "def", "inverse_transform", "(", "self", ",", "X_in", ")", ":", "X", "=", "X_in", ".", "copy", "(", "deep", "=", "True", ")", "# first check the type", "X", "=", "util", ".", "convert_input", "(", "X", ")", "if", "self", ".", "_dim", "is", "None", ":", "raise", "ValueError", "(", "'Must train encoder before it can be used to inverse_transform data'", ")", "# then make sure that it is the right size", "if", "X", ".", "shape", "[", "1", "]", "!=", "self", ".", "_dim", ":", "if", "self", ".", "drop_invariant", ":", "raise", "ValueError", "(", "\"Unexpected input dimension %d, the attribute drop_invariant should \"", "\"set as False when transform data\"", "%", "(", "X", ".", "shape", "[", "1", "]", ",", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unexpected input dimension %d, expected %d'", "%", "(", "X", ".", "shape", "[", "1", "]", ",", "self", ".", "_dim", ",", ")", ")", "if", "not", "self", ".", "cols", ":", "return", "X", "if", "self", ".", "return_df", "else", "X", ".", "values", "if", "self", ".", "handle_unknown", "==", "'value'", ":", "for", "col", "in", "self", ".", "cols", ":", "if", "any", "(", "X", "[", "col", "]", "==", "-", "1", ")", ":", "warnings", ".", "warn", "(", "\"inverse_transform is not supported because transform impute \"", "\"the unknown category -1 when encode %s\"", "%", "(", "col", ",", ")", ")", "if", "self", ".", "handle_unknown", "==", "'return_nan'", "and", "self", ".", "handle_missing", "==", "'return_nan'", ":", "for", "col", "in", "self", ".", "cols", ":", "if", "X", "[", "col", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "warnings", ".", "warn", "(", "\"inverse_transform is not supported because transform impute \"", "\"the unknown category nan when encode %s\"", "%", "(", "col", ",", ")", ")", "for", "switch", "in", "self", ".", "mapping", ":", "column_mapping", "=", "switch", ".", "get", "(", "'mapping'", ")", "inverse", "=", "pd", ".", "Series", "(", "data", "=", "column_mapping", ".", "index", ",", "index", "=", "column_mapping", ".", "get_values", "(", ")", ")", "X", "[", "switch", ".", "get", "(", "'col'", ")", "]", "=", "X", "[", "switch", ".", "get", "(", "'col'", ")", "]", ".", "map", "(", "inverse", ")", ".", "astype", "(", "switch", ".", "get", "(", "'data_type'", ")", ")", "return", "X", "if", "self", ".", "return_df", "else", "X", ".", "values" ]
41.283019
0.006696
def pb_for_delete(document_path, option): """Make a ``Write`` protobuf for ``delete()`` methods. Args: document_path (str): A fully-qualified document path. option (optional[~.firestore_v1beta1.client.WriteOption]): A write option to make assertions / preconditions on the server state of the document before applying changes. Returns: google.cloud.firestore_v1beta1.types.Write: A ``Write`` protobuf instance for the ``delete()``. """ write_pb = write_pb2.Write(delete=document_path) if option is not None: option.modify_write(write_pb) return write_pb
[ "def", "pb_for_delete", "(", "document_path", ",", "option", ")", ":", "write_pb", "=", "write_pb2", ".", "Write", "(", "delete", "=", "document_path", ")", "if", "option", "is", "not", "None", ":", "option", ".", "modify_write", "(", "write_pb", ")", "return", "write_pb" ]
34.944444
0.001548
def user_in_group(user, group): """Returns True if the given user is in given group""" if isinstance(group, Group): return user_is_superuser(user) or group in user.groups.all() elif isinstance(group, six.string_types): return user_is_superuser(user) or user.groups.filter(name=group).exists() raise TypeError("'group' argument must be a string or a Group instance")
[ "def", "user_in_group", "(", "user", ",", "group", ")", ":", "if", "isinstance", "(", "group", ",", "Group", ")", ":", "return", "user_is_superuser", "(", "user", ")", "or", "group", "in", "user", ".", "groups", ".", "all", "(", ")", "elif", "isinstance", "(", "group", ",", "six", ".", "string_types", ")", ":", "return", "user_is_superuser", "(", "user", ")", "or", "user", ".", "groups", ".", "filter", "(", "name", "=", "group", ")", ".", "exists", "(", ")", "raise", "TypeError", "(", "\"'group' argument must be a string or a Group instance\"", ")" ]
55.857143
0.005038
def to_point(self, timestamp): """Get a Point conversion of this aggregation. :type timestamp: :class: `datetime.datetime` :param timestamp: The time to report the point as having been recorded. :rtype: :class: `opencensus.metrics.export.point.Point` :return: a :class: `opencensus.metrics.export.value.ValueDouble`-valued Point with value equal to `sum_data`. """ return point.Point(value.ValueDouble(self.sum_data), timestamp)
[ "def", "to_point", "(", "self", ",", "timestamp", ")", ":", "return", "point", ".", "Point", "(", "value", ".", "ValueDouble", "(", "self", ".", "sum_data", ")", ",", "timestamp", ")" ]
44
0.004049
def agg(self, aggregations): """Multiple aggregations optimized. Parameters ---------- aggregations : list of str Which aggregations to perform. Returns ------- Series Series with resulting aggregations. """ check_type(aggregations, list) new_index = Index(np.array(aggregations, dtype=np.bytes_), np.dtype(np.bytes_)) return _series_agg(self, aggregations, new_index)
[ "def", "agg", "(", "self", ",", "aggregations", ")", ":", "check_type", "(", "aggregations", ",", "list", ")", "new_index", "=", "Index", "(", "np", ".", "array", "(", "aggregations", ",", "dtype", "=", "np", ".", "bytes_", ")", ",", "np", ".", "dtype", "(", "np", ".", "bytes_", ")", ")", "return", "_series_agg", "(", "self", ",", "aggregations", ",", "new_index", ")" ]
24.578947
0.006186
def match(self, name, chamber=None): """ If this matcher has uniquely seen a matching name, return its value. Otherwise, return None. If chamber is set then the search will be limited to legislators with matching chamber. If chamber is None then the search will be cross-chamber. """ try: return self._manual[chamber][name] except KeyError: pass if chamber == 'joint': chamber = None try: return self._codes[chamber][name] except KeyError: pass if chamber not in self._names: logger.warning("Chamber %s is invalid for a legislator." % ( chamber )) return None name = self._normalize(name) return self._names[chamber].get(name, None)
[ "def", "match", "(", "self", ",", "name", ",", "chamber", "=", "None", ")", ":", "try", ":", "return", "self", ".", "_manual", "[", "chamber", "]", "[", "name", "]", "except", "KeyError", ":", "pass", "if", "chamber", "==", "'joint'", ":", "chamber", "=", "None", "try", ":", "return", "self", ".", "_codes", "[", "chamber", "]", "[", "name", "]", "except", "KeyError", ":", "pass", "if", "chamber", "not", "in", "self", ".", "_names", ":", "logger", ".", "warning", "(", "\"Chamber %s is invalid for a legislator.\"", "%", "(", "chamber", ")", ")", "return", "None", "name", "=", "self", ".", "_normalize", "(", "name", ")", "return", "self", ".", "_names", "[", "chamber", "]", ".", "get", "(", "name", ",", "None", ")" ]
28
0.002301
def read_user_choice(var_name, options): """Prompt the user to choose from several options for the given variable. The first item will be returned if no input happens. :param str var_name: Variable as specified in the context :param list options: Sequence of options that are available to select from :return: Exactly one item of ``options`` that has been chosen by the user """ # Please see http://click.pocoo.org/4/api/#click.prompt if not isinstance(options, list): raise TypeError if not options: raise ValueError choice_map = OrderedDict( (u'{}'.format(i), value) for i, value in enumerate(options, 1) ) choices = choice_map.keys() default = u'1' choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()] prompt = u'\n'.join(( u'Select {}:'.format(var_name), u'\n'.join(choice_lines), u'Choose from {}'.format(u', '.join(choices)) )) user_choice = click.prompt( prompt, type=click.Choice(choices), default=default ) return choice_map[user_choice]
[ "def", "read_user_choice", "(", "var_name", ",", "options", ")", ":", "# Please see http://click.pocoo.org/4/api/#click.prompt", "if", "not", "isinstance", "(", "options", ",", "list", ")", ":", "raise", "TypeError", "if", "not", "options", ":", "raise", "ValueError", "choice_map", "=", "OrderedDict", "(", "(", "u'{}'", ".", "format", "(", "i", ")", ",", "value", ")", "for", "i", ",", "value", "in", "enumerate", "(", "options", ",", "1", ")", ")", "choices", "=", "choice_map", ".", "keys", "(", ")", "default", "=", "u'1'", "choice_lines", "=", "[", "u'{} - {}'", ".", "format", "(", "*", "c", ")", "for", "c", "in", "choice_map", ".", "items", "(", ")", "]", "prompt", "=", "u'\\n'", ".", "join", "(", "(", "u'Select {}:'", ".", "format", "(", "var_name", ")", ",", "u'\\n'", ".", "join", "(", "choice_lines", ")", ",", "u'Choose from {}'", ".", "format", "(", "u', '", ".", "join", "(", "choices", ")", ")", ")", ")", "user_choice", "=", "click", ".", "prompt", "(", "prompt", ",", "type", "=", "click", ".", "Choice", "(", "choices", ")", ",", "default", "=", "default", ")", "return", "choice_map", "[", "user_choice", "]" ]
32.30303
0.000911
def options(argv=[]): """ A helper function that returns a dictionary of the default key-values pairs """ parser = HendrixOptionParser parsed_args = parser.parse_args(argv) return vars(parsed_args[0])
[ "def", "options", "(", "argv", "=", "[", "]", ")", ":", "parser", "=", "HendrixOptionParser", "parsed_args", "=", "parser", ".", "parse_args", "(", "argv", ")", "return", "vars", "(", "parsed_args", "[", "0", "]", ")" ]
31.142857
0.004464
def warning(self, *args) -> "Err": """ Creates a warning message """ error = self._create_err("warning", *args) print(self._errmsg(error)) return error
[ "def", "warning", "(", "self", ",", "*", "args", ")", "->", "\"Err\"", ":", "error", "=", "self", ".", "_create_err", "(", "\"warning\"", ",", "*", "args", ")", "print", "(", "self", ".", "_errmsg", "(", "error", ")", ")", "return", "error" ]
27.571429
0.01005
def easeInOutBack(n, s=1.70158): """A "back-in" tween function that overshoots both the start and destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """ _checkRange(n) n = n * 2 if n < 1: s *= 1.525 return 0.5 * (n * n * ((s + 1) * n - s)) else: n -= 2 s *= 1.525 return 0.5 * (n * n * ((s + 1) * n + s) + 2)
[ "def", "easeInOutBack", "(", "n", ",", "s", "=", "1.70158", ")", ":", "_checkRange", "(", "n", ")", "n", "=", "n", "*", "2", "if", "n", "<", "1", ":", "s", "*=", "1.525", "return", "0.5", "*", "(", "n", "*", "n", "*", "(", "(", "s", "+", "1", ")", "*", "n", "-", "s", ")", ")", "else", ":", "n", "-=", "2", "s", "*=", "1.525", "return", "0.5", "*", "(", "n", "*", "n", "*", "(", "(", "s", "+", "1", ")", "*", "n", "+", "s", ")", "+", "2", ")" ]
29.055556
0.005556
def get_details_letssingit(song_name): ''' Gets the song details if song details not found through spotify ''' song_name = improvename.songname(song_name) url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + \ quote(song_name.encode('utf-8')) html = urlopen(url).read() soup = BeautifulSoup(html, "html.parser") link = soup.find('a', {'class': 'high_profile'}) try: link = link.get('href') link = urlopen(link).read() soup = BeautifulSoup(link, "html.parser") album_div = soup.find('div', {'id': 'albums'}) title_div = soup.find('div', {'id': 'content_artist'}).find('h1') try: lyrics = soup.find('div', {'id': 'lyrics'}).text lyrics = lyrics[3:] except AttributeError: lyrics = "" log.log_error("* Couldn't find lyrics", indented=True) try: song_title = title_div.contents[0] song_title = song_title[1:-8] except AttributeError: log.log_error("* Couldn't reset song title", indented=True) song_title = song_name try: artist = title_div.contents[1].getText() except AttributeError: log.log_error("* Couldn't find artist name", indented=True) artist = "Unknown" try: album = album_div.find('a').contents[0] album = album[:-7] except AttributeError: log.log_error("* Couldn't find the album name", indented=True) album = artist except AttributeError: log.log_error("* Couldn't find song details", indented=True) album = song_name song_title = song_name artist = "Unknown" lyrics = "" match_bool, score = matching_details(song_name, song_title, artist) return artist, album, song_title, lyrics, match_bool, score
[ "def", "get_details_letssingit", "(", "song_name", ")", ":", "song_name", "=", "improvename", ".", "songname", "(", "song_name", ")", "url", "=", "\"http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=\"", "+", "quote", "(", "song_name", ".", "encode", "(", "'utf-8'", ")", ")", "html", "=", "urlopen", "(", "url", ")", ".", "read", "(", ")", "soup", "=", "BeautifulSoup", "(", "html", ",", "\"html.parser\"", ")", "link", "=", "soup", ".", "find", "(", "'a'", ",", "{", "'class'", ":", "'high_profile'", "}", ")", "try", ":", "link", "=", "link", ".", "get", "(", "'href'", ")", "link", "=", "urlopen", "(", "link", ")", ".", "read", "(", ")", "soup", "=", "BeautifulSoup", "(", "link", ",", "\"html.parser\"", ")", "album_div", "=", "soup", ".", "find", "(", "'div'", ",", "{", "'id'", ":", "'albums'", "}", ")", "title_div", "=", "soup", ".", "find", "(", "'div'", ",", "{", "'id'", ":", "'content_artist'", "}", ")", ".", "find", "(", "'h1'", ")", "try", ":", "lyrics", "=", "soup", ".", "find", "(", "'div'", ",", "{", "'id'", ":", "'lyrics'", "}", ")", ".", "text", "lyrics", "=", "lyrics", "[", "3", ":", "]", "except", "AttributeError", ":", "lyrics", "=", "\"\"", "log", ".", "log_error", "(", "\"* Couldn't find lyrics\"", ",", "indented", "=", "True", ")", "try", ":", "song_title", "=", "title_div", ".", "contents", "[", "0", "]", "song_title", "=", "song_title", "[", "1", ":", "-", "8", "]", "except", "AttributeError", ":", "log", ".", "log_error", "(", "\"* Couldn't reset song title\"", ",", "indented", "=", "True", ")", "song_title", "=", "song_name", "try", ":", "artist", "=", "title_div", ".", "contents", "[", "1", "]", ".", "getText", "(", ")", "except", "AttributeError", ":", "log", ".", "log_error", "(", "\"* Couldn't find artist name\"", ",", "indented", "=", "True", ")", "artist", "=", "\"Unknown\"", "try", ":", "album", "=", "album_div", ".", "find", "(", "'a'", ")", ".", "contents", "[", "0", "]", "album", "=", "album", "[", ":", "-", "7", "]", "except", "AttributeError", ":", "log", ".", "log_error", "(", "\"* Couldn't find the album name\"", ",", "indented", "=", "True", ")", "album", "=", "artist", "except", "AttributeError", ":", "log", ".", "log_error", "(", "\"* Couldn't find song details\"", ",", "indented", "=", "True", ")", "album", "=", "song_name", "song_title", "=", "song_name", "artist", "=", "\"Unknown\"", "lyrics", "=", "\"\"", "match_bool", ",", "score", "=", "matching_details", "(", "song_name", ",", "song_title", ",", "artist", ")", "return", "artist", ",", "album", ",", "song_title", ",", "lyrics", ",", "match_bool", ",", "score" ]
31.813559
0.001034
def new_result(self, loss, budget, parameters, update_model=True): """ Function to register finished runs. Every time a run has finished, this function should be called to register it with the loss. Parameters: ----------- loss: float the loss of the parameters budget: float the budget of the parameters parameters: dict the parameters of this trial update_model: bool whether use this parameter to update BP model Returns ------- None """ if loss is None: # One could skip crashed results, but we decided # assign a +inf loss and count them as bad configurations loss = np.inf if budget not in self.configs.keys(): self.configs[budget] = [] self.losses[budget] = [] # skip model building if we already have a bigger model if max(list(self.kde_models.keys()) + [-np.inf]) > budget: return # We want to get a numerical representation of the configuration in the original space conf = ConfigSpace.Configuration(self.configspace, parameters) self.configs[budget].append(conf.get_array()) self.losses[budget].append(loss) # skip model building: # a) if not enough points are available if len(self.configs[budget]) <= self.min_points_in_model - 1: logger.debug("Only %i run(s) for budget %f available, need more than %s \ -> can't build model!"%(len(self.configs[budget]), budget, self.min_points_in_model+1)) return # b) during warnm starting when we feed previous results in and only update once if not update_model: return train_configs = np.array(self.configs[budget]) train_losses = np.array(self.losses[budget]) n_good = max(self.min_points_in_model, (self.top_n_percent * train_configs.shape[0])//100) n_bad = max(self.min_points_in_model, ((100-self.top_n_percent)*train_configs.shape[0])//100) # Refit KDE for the current budget idx = np.argsort(train_losses) train_data_good = self.impute_conditional_data(train_configs[idx[:n_good]]) train_data_bad = self.impute_conditional_data(train_configs[idx[n_good:n_good+n_bad]]) if train_data_good.shape[0] <= train_data_good.shape[1]: return if train_data_bad.shape[0] <= train_data_bad.shape[1]: return #more expensive crossvalidation method #bw_estimation = 'cv_ls' # quick rule of thumb bw_estimation = 'normal_reference' bad_kde = sm.nonparametric.KDEMultivariate(data=train_data_bad, var_type=self.kde_vartypes, bw=bw_estimation) good_kde = sm.nonparametric.KDEMultivariate(data=train_data_good, var_type=self.kde_vartypes, bw=bw_estimation) bad_kde.bw = np.clip(bad_kde.bw, self.min_bandwidth, None) good_kde.bw = np.clip(good_kde.bw, self.min_bandwidth, None) self.kde_models[budget] = { 'good': good_kde, 'bad' : bad_kde } # update probs for the categorical parameters for later sampling logger.debug('done building a new model for budget %f based on %i/%i split\nBest loss for this budget:%f\n' %(budget, n_good, n_bad, np.min(train_losses)))
[ "def", "new_result", "(", "self", ",", "loss", ",", "budget", ",", "parameters", ",", "update_model", "=", "True", ")", ":", "if", "loss", "is", "None", ":", "# One could skip crashed results, but we decided", "# assign a +inf loss and count them as bad configurations", "loss", "=", "np", ".", "inf", "if", "budget", "not", "in", "self", ".", "configs", ".", "keys", "(", ")", ":", "self", ".", "configs", "[", "budget", "]", "=", "[", "]", "self", ".", "losses", "[", "budget", "]", "=", "[", "]", "# skip model building if we already have a bigger model", "if", "max", "(", "list", "(", "self", ".", "kde_models", ".", "keys", "(", ")", ")", "+", "[", "-", "np", ".", "inf", "]", ")", ">", "budget", ":", "return", "# We want to get a numerical representation of the configuration in the original space", "conf", "=", "ConfigSpace", ".", "Configuration", "(", "self", ".", "configspace", ",", "parameters", ")", "self", ".", "configs", "[", "budget", "]", ".", "append", "(", "conf", ".", "get_array", "(", ")", ")", "self", ".", "losses", "[", "budget", "]", ".", "append", "(", "loss", ")", "# skip model building:", "# a) if not enough points are available", "if", "len", "(", "self", ".", "configs", "[", "budget", "]", ")", "<=", "self", ".", "min_points_in_model", "-", "1", ":", "logger", ".", "debug", "(", "\"Only %i run(s) for budget %f available, need more than %s \\\n -> can't build model!\"", "%", "(", "len", "(", "self", ".", "configs", "[", "budget", "]", ")", ",", "budget", ",", "self", ".", "min_points_in_model", "+", "1", ")", ")", "return", "# b) during warnm starting when we feed previous results in and only update once", "if", "not", "update_model", ":", "return", "train_configs", "=", "np", ".", "array", "(", "self", ".", "configs", "[", "budget", "]", ")", "train_losses", "=", "np", ".", "array", "(", "self", ".", "losses", "[", "budget", "]", ")", "n_good", "=", "max", "(", "self", ".", "min_points_in_model", ",", "(", "self", ".", "top_n_percent", "*", "train_configs", ".", "shape", "[", "0", "]", ")", "//", "100", ")", "n_bad", "=", "max", "(", "self", ".", "min_points_in_model", ",", "(", "(", "100", "-", "self", ".", "top_n_percent", ")", "*", "train_configs", ".", "shape", "[", "0", "]", ")", "//", "100", ")", "# Refit KDE for the current budget", "idx", "=", "np", ".", "argsort", "(", "train_losses", ")", "train_data_good", "=", "self", ".", "impute_conditional_data", "(", "train_configs", "[", "idx", "[", ":", "n_good", "]", "]", ")", "train_data_bad", "=", "self", ".", "impute_conditional_data", "(", "train_configs", "[", "idx", "[", "n_good", ":", "n_good", "+", "n_bad", "]", "]", ")", "if", "train_data_good", ".", "shape", "[", "0", "]", "<=", "train_data_good", ".", "shape", "[", "1", "]", ":", "return", "if", "train_data_bad", ".", "shape", "[", "0", "]", "<=", "train_data_bad", ".", "shape", "[", "1", "]", ":", "return", "#more expensive crossvalidation method", "#bw_estimation = 'cv_ls'", "# quick rule of thumb", "bw_estimation", "=", "'normal_reference'", "bad_kde", "=", "sm", ".", "nonparametric", ".", "KDEMultivariate", "(", "data", "=", "train_data_bad", ",", "var_type", "=", "self", ".", "kde_vartypes", ",", "bw", "=", "bw_estimation", ")", "good_kde", "=", "sm", ".", "nonparametric", ".", "KDEMultivariate", "(", "data", "=", "train_data_good", ",", "var_type", "=", "self", ".", "kde_vartypes", ",", "bw", "=", "bw_estimation", ")", "bad_kde", ".", "bw", "=", "np", ".", "clip", "(", "bad_kde", ".", "bw", ",", "self", ".", "min_bandwidth", ",", "None", ")", "good_kde", ".", "bw", "=", "np", ".", "clip", "(", "good_kde", ".", "bw", ",", "self", ".", "min_bandwidth", ",", "None", ")", "self", ".", "kde_models", "[", "budget", "]", "=", "{", "'good'", ":", "good_kde", ",", "'bad'", ":", "bad_kde", "}", "# update probs for the categorical parameters for later sampling", "logger", ".", "debug", "(", "'done building a new model for budget %f based on %i/%i split\\nBest loss for this budget:%f\\n'", "%", "(", "budget", ",", "n_good", ",", "n_bad", ",", "np", ".", "min", "(", "train_losses", ")", ")", ")" ]
39.940476
0.005526
def run_gevent(self): """Created the server that runs the application supplied a subclass""" from pywb.utils.geventserver import GeventServer, RequestURIWSGIHandler logging.info('Starting Gevent Server on ' + str(self.r.port)) ge = GeventServer(self.application, port=self.r.port, hostname=self.r.bind, handler_class=RequestURIWSGIHandler, direct=True)
[ "def", "run_gevent", "(", "self", ")", ":", "from", "pywb", ".", "utils", ".", "geventserver", "import", "GeventServer", ",", "RequestURIWSGIHandler", "logging", ".", "info", "(", "'Starting Gevent Server on '", "+", "str", "(", "self", ".", "r", ".", "port", ")", ")", "ge", "=", "GeventServer", "(", "self", ".", "application", ",", "port", "=", "self", ".", "r", ".", "port", ",", "hostname", "=", "self", ".", "r", ".", "bind", ",", "handler_class", "=", "RequestURIWSGIHandler", ",", "direct", "=", "True", ")" ]
53.333333
0.004098
def steemconnect(self, accesstoken=None): ''' Initializes the SteemConnect Client class ''' if self.sc is not None: return self.sc if accesstoken is not None: self.accesstoken = accesstoken if self.accesstoken is None: self.sc = Client(client_id=self.client_id, client_secret=self.client_secret) else: self.sc = Client(access_token=self.accesstoken, client_id=self.client_id, client_secret=self.client_secret) return self.sc
[ "def", "steemconnect", "(", "self", ",", "accesstoken", "=", "None", ")", ":", "if", "self", ".", "sc", "is", "not", "None", ":", "return", "self", ".", "sc", "if", "accesstoken", "is", "not", "None", ":", "self", ".", "accesstoken", "=", "accesstoken", "if", "self", ".", "accesstoken", "is", "None", ":", "self", ".", "sc", "=", "Client", "(", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ")", "else", ":", "self", ".", "sc", "=", "Client", "(", "access_token", "=", "self", ".", "accesstoken", ",", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ")", "return", "self", ".", "sc" ]
37.9375
0.011254
def serialize_model(self, model, field_dict=None): """ Takes a model and serializes the fields provided into a dictionary. :param Model model: The Sqlalchemy model instance to serialize :param dict field_dict: The dictionary of fields to return. :return: The serialized model. :rtype: dict """ response = self._serialize_model_helper(model, field_dict=field_dict) return make_json_safe(response)
[ "def", "serialize_model", "(", "self", ",", "model", ",", "field_dict", "=", "None", ")", ":", "response", "=", "self", ".", "_serialize_model_helper", "(", "model", ",", "field_dict", "=", "field_dict", ")", "return", "make_json_safe", "(", "response", ")" ]
38.75
0.004202
def finish_section(section, name): '''finish_section will add the header to a section, to finish the recipe take a custom command or list and return a section. Parameters ========== section: the section content, without a header name: the name of the section for the header ''' if not isinstance(section, list): section = [section] header = ['%' + name ] return header + section
[ "def", "finish_section", "(", "section", ",", "name", ")", ":", "if", "not", "isinstance", "(", "section", ",", "list", ")", ":", "section", "=", "[", "section", "]", "header", "=", "[", "'%'", "+", "name", "]", "return", "header", "+", "section" ]
28.866667
0.006711
def _create_affine_features(output_shape, source_shape): """Generates n-dimensional homogenous coordinates for a given grid definition. `source_shape` and `output_shape` are used to define the size of the source and output signal domains, as opposed to the shape of the respective Tensors. For example, for an image of size `width=W` and `height=H`, `{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H` and `depth=D`, `{source,output}_shape=[H, W, D]`. Args: output_shape: Iterable of integers determining the shape of the grid to be warped. source_shape: Iterable of integers determining the domain of the signal to be resampled. Returns: List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for example: ``` [[x_0_0, .... , x_0_{n-1}], .... [x_{M-1}_0, .... , x_{M-1}_{n-1}], [x_{M}_0=0, .... , x_{M}_{n-1}=0], ... [x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0], [1, ..., 1]] ``` where N is the dimensionality of the sampled space, M is the dimensionality of the output space, i.e. 2 for images and 3 for volumes, and n is the number of points in the output grid. When the dimensionality of `output_shape` is smaller that that of `source_shape` the last rows before [1, ..., 1] will be filled with 0. """ ranges = [np.linspace(-1, 1, x, dtype=np.float32) for x in reversed(output_shape)] psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')] dim_gap = len(source_shape) - len(output_shape) for _ in xrange(dim_gap): psi.append(np.zeros_like(psi[0], dtype=np.float32)) psi.append(np.ones_like(psi[0], dtype=np.float32)) return psi
[ "def", "_create_affine_features", "(", "output_shape", ",", "source_shape", ")", ":", "ranges", "=", "[", "np", ".", "linspace", "(", "-", "1", ",", "1", ",", "x", ",", "dtype", "=", "np", ".", "float32", ")", "for", "x", "in", "reversed", "(", "output_shape", ")", "]", "psi", "=", "[", "x", ".", "reshape", "(", "-", "1", ")", "for", "x", "in", "np", ".", "meshgrid", "(", "*", "ranges", ",", "indexing", "=", "'xy'", ")", "]", "dim_gap", "=", "len", "(", "source_shape", ")", "-", "len", "(", "output_shape", ")", "for", "_", "in", "xrange", "(", "dim_gap", ")", ":", "psi", ".", "append", "(", "np", ".", "zeros_like", "(", "psi", "[", "0", "]", ",", "dtype", "=", "np", ".", "float32", ")", ")", "psi", ".", "append", "(", "np", ".", "ones_like", "(", "psi", "[", "0", "]", ",", "dtype", "=", "np", ".", "float32", ")", ")", "return", "psi" ]
41.390244
0.005757
def encrypt(key, message): '''encrypt leverages KMS encrypt and base64-encode encrypted blob More info on KMS encrypt API: https://docs.aws.amazon.com/kms/latest/APIReference/API_encrypt.html ''' try: ret = kms.encrypt(KeyId=key, Plaintext=message) encrypted_data = base64.encodestring(ret.get('CiphertextBlob')) except Exception as e: # returns http 500 back to user and log error details in Cloudwatch Logs raise Exception("Unable to encrypt data: ", e) return encrypted_data.decode()
[ "def", "encrypt", "(", "key", ",", "message", ")", ":", "try", ":", "ret", "=", "kms", ".", "encrypt", "(", "KeyId", "=", "key", ",", "Plaintext", "=", "message", ")", "encrypted_data", "=", "base64", ".", "encodestring", "(", "ret", ".", "get", "(", "'CiphertextBlob'", ")", ")", "except", "Exception", "as", "e", ":", "# returns http 500 back to user and log error details in Cloudwatch Logs", "raise", "Exception", "(", "\"Unable to encrypt data: \"", ",", "e", ")", "return", "encrypted_data", ".", "decode", "(", ")" ]
38.785714
0.003597
def potential_from_grid(self, grid): """ Calculate the potential at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ eta = (1.0 / self.scale_radius) * self.grid_to_grid_radii(grid) + 0j return np.real(2.0 * self.scale_radius * self.kappa_s * self.potential_func_sph(eta))
[ "def", "potential_from_grid", "(", "self", ",", "grid", ")", ":", "eta", "=", "(", "1.0", "/", "self", ".", "scale_radius", ")", "*", "self", ".", "grid_to_grid_radii", "(", "grid", ")", "+", "0j", "return", "np", ".", "real", "(", "2.0", "*", "self", ".", "scale_radius", "*", "self", ".", "kappa_s", "*", "self", ".", "potential_func_sph", "(", "eta", ")", ")" ]
42.454545
0.010482
def notequal(x, y): """ Return True if x != y and False otherwise. This function returns True whenever x and/or y is a NaN. """ x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return not mpfr.mpfr_equal_p(x, y)
[ "def", "notequal", "(", "x", ",", "y", ")", ":", "x", "=", "BigFloat", ".", "_implicit_convert", "(", "x", ")", "y", "=", "BigFloat", ".", "_implicit_convert", "(", "y", ")", "return", "not", "mpfr", ".", "mpfr_equal_p", "(", "x", ",", "y", ")" ]
25.1
0.003846
def add_user(username='',password='',group='', site_user=False): """ Adds the username """ if group: group = '-g %s'% group if not site_user: run('echo %s:%s > /tmp/users.txt'% (username,password)) if not site_user: sudo('useradd -m -s /bin/bash %s %s'% (group,username)) sudo('chpasswd < /tmp/users.txt') sudo('rm -rf /tmp/users.txt') else: sudo('useradd -M -d /var/www -s /bin/bash %s'% username) sudo('usermod -a -G www-data %s'% username)
[ "def", "add_user", "(", "username", "=", "''", ",", "password", "=", "''", ",", "group", "=", "''", ",", "site_user", "=", "False", ")", ":", "if", "group", ":", "group", "=", "'-g %s'", "%", "group", "if", "not", "site_user", ":", "run", "(", "'echo %s:%s > /tmp/users.txt'", "%", "(", "username", ",", "password", ")", ")", "if", "not", "site_user", ":", "sudo", "(", "'useradd -m -s /bin/bash %s %s'", "%", "(", "group", ",", "username", ")", ")", "sudo", "(", "'chpasswd < /tmp/users.txt'", ")", "sudo", "(", "'rm -rf /tmp/users.txt'", ")", "else", ":", "sudo", "(", "'useradd -M -d /var/www -s /bin/bash %s'", "%", "username", ")", "sudo", "(", "'usermod -a -G www-data %s'", "%", "username", ")" ]
36.071429
0.021236
def p_notificationTypeClause(self, p): """notificationTypeClause : fuzzy_lowercase_identifier NOTIFICATION_TYPE NotificationObjectsPart STATUS Status DESCRIPTION Text ReferPart COLON_COLON_EQUAL '{' NotificationName '}'""" # some MIBs have uppercase and/or lowercase id p[0] = ('notificationTypeClause', p[1], # id # p[2], # NOTIFICATION_TYPE p[3], # NotificationObjectsPart p[5], # status (p[6], p[7]), # description p[8], # Reference p[11])
[ "def", "p_notificationTypeClause", "(", "self", ",", "p", ")", ":", "# some MIBs have uppercase and/or lowercase id", "p", "[", "0", "]", "=", "(", "'notificationTypeClause'", ",", "p", "[", "1", "]", ",", "# id", "# p[2], # NOTIFICATION_TYPE", "p", "[", "3", "]", ",", "# NotificationObjectsPart", "p", "[", "5", "]", ",", "# status", "(", "p", "[", "6", "]", ",", "p", "[", "7", "]", ")", ",", "# description", "p", "[", "8", "]", ",", "# Reference", "p", "[", "11", "]", ")" ]
61.555556
0.005338
def get_immoralities(self): """ Finds all the immoralities in the model A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y . Returns ------- set: A set of all the immoralities in the model Examples --------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'), ... ('intel', 'SAT'), ('grade', 'letter')]) >>> student.get_immoralities() {('diff','intel')} """ immoralities = set() for node in self.nodes(): for parents in itertools.combinations(self.predecessors(node), 2): if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]): immoralities.add(tuple(sorted(parents))) return immoralities
[ "def", "get_immoralities", "(", "self", ")", ":", "immoralities", "=", "set", "(", ")", "for", "node", "in", "self", ".", "nodes", "(", ")", ":", "for", "parents", "in", "itertools", ".", "combinations", "(", "self", ".", "predecessors", "(", "node", ")", ",", "2", ")", ":", "if", "not", "self", ".", "has_edge", "(", "parents", "[", "0", "]", ",", "parents", "[", "1", "]", ")", "and", "not", "self", ".", "has_edge", "(", "parents", "[", "1", "]", ",", "parents", "[", "0", "]", ")", ":", "immoralities", ".", "add", "(", "tuple", "(", "sorted", "(", "parents", ")", ")", ")", "return", "immoralities" ]
38.333333
0.004242
def map(self, func, iterable, chunksize=None): """A parallel equivalent of the map() builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.""" return self.map_async(func, iterable, chunksize).get()
[ "def", "map", "(", "self", ",", "func", ",", "iterable", ",", "chunksize", "=", "None", ")", ":", "return", "self", ".", "map_async", "(", "func", ",", "iterable", ",", "chunksize", ")", ".", "get", "(", ")" ]
50.666667
0.00431
def is_magic(s): """Check whether given string is a __magic__ Python identifier. :return: Whether ``s`` is a __magic__ Python identifier """ if not is_identifier(s): return False return len(s) > 4 and s.startswith('__') and s.endswith('__')
[ "def", "is_magic", "(", "s", ")", ":", "if", "not", "is_identifier", "(", "s", ")", ":", "return", "False", "return", "len", "(", "s", ")", ">", "4", "and", "s", ".", "startswith", "(", "'__'", ")", "and", "s", ".", "endswith", "(", "'__'", ")" ]
37.428571
0.003731
def guess_payload_class(self, payload): """ Handles NTPv4 extensions and MAC part (when authentication is used.) """ plen = len(payload) if plen > _NTP_AUTH_MD5_TAIL_SIZE: return NTPExtensions elif plen == _NTP_AUTH_MD5_TAIL_SIZE: return NTPAuthenticator return Packet.guess_payload_class(self, payload)
[ "def", "guess_payload_class", "(", "self", ",", "payload", ")", ":", "plen", "=", "len", "(", "payload", ")", "if", "plen", ">", "_NTP_AUTH_MD5_TAIL_SIZE", ":", "return", "NTPExtensions", "elif", "plen", "==", "_NTP_AUTH_MD5_TAIL_SIZE", ":", "return", "NTPAuthenticator", "return", "Packet", ".", "guess_payload_class", "(", "self", ",", "payload", ")" ]
31.166667
0.005195
def find(self, path, all=False): ''' Looks for files in the app directories. ''' found = os.path.join(settings.STATIC_ROOT, path) if all: return [found] else: return found
[ "def", "find", "(", "self", ",", "path", ",", "all", "=", "False", ")", ":", "found", "=", "os", ".", "path", ".", "join", "(", "settings", ".", "STATIC_ROOT", ",", "path", ")", "if", "all", ":", "return", "[", "found", "]", "else", ":", "return", "found" ]
26.111111
0.00823
def resolve_id(marked_id): """Given a marked ID, returns the original ID and its :tl:`Peer` type.""" if marked_id >= 0: return marked_id, types.PeerUser # There have been report of chat IDs being 10000xyz, which means their # marked version is -10000xyz, which in turn looks like a channel but # it becomes 00xyz (= xyz). Hence, we must assert that there are only # two zeroes. m = re.match(r'-100([^0]\d*)', str(marked_id)) if m: return int(m.group(1)), types.PeerChannel return -marked_id, types.PeerChat
[ "def", "resolve_id", "(", "marked_id", ")", ":", "if", "marked_id", ">=", "0", ":", "return", "marked_id", ",", "types", ".", "PeerUser", "# There have been report of chat IDs being 10000xyz, which means their", "# marked version is -10000xyz, which in turn looks like a channel but", "# it becomes 00xyz (= xyz). Hence, we must assert that there are only", "# two zeroes.", "m", "=", "re", ".", "match", "(", "r'-100([^0]\\d*)'", ",", "str", "(", "marked_id", ")", ")", "if", "m", ":", "return", "int", "(", "m", ".", "group", "(", "1", ")", ")", ",", "types", ".", "PeerChannel", "return", "-", "marked_id", ",", "types", ".", "PeerChat" ]
39.071429
0.001786
def bookmarks_index_changed(self): """Update the UI when the bookmarks combobox has changed.""" index = self.bookmarks_list.currentIndex() if index >= 0: self.tool.reset() rectangle = self.bookmarks_list.itemData(index) self.tool.set_rectangle(rectangle) self.canvas.setExtent(rectangle) self.ok_button.setEnabled(True) else: self.ok_button.setDisabled(True)
[ "def", "bookmarks_index_changed", "(", "self", ")", ":", "index", "=", "self", ".", "bookmarks_list", ".", "currentIndex", "(", ")", "if", "index", ">=", "0", ":", "self", ".", "tool", ".", "reset", "(", ")", "rectangle", "=", "self", ".", "bookmarks_list", ".", "itemData", "(", "index", ")", "self", ".", "tool", ".", "set_rectangle", "(", "rectangle", ")", "self", ".", "canvas", ".", "setExtent", "(", "rectangle", ")", "self", ".", "ok_button", ".", "setEnabled", "(", "True", ")", "else", ":", "self", ".", "ok_button", ".", "setDisabled", "(", "True", ")" ]
41.090909
0.004329
def _read_results(self, memory): """Read back the probed results. Returns ------- str A string of "0"s and "1"s, one for each millisecond of simulation. """ # Seek to the simulation data and read it all back memory.seek(8) bits = bitarray(endian="little") bits.frombytes(memory.read()) self.recorded_data = bits.to01()
[ "def", "_read_results", "(", "self", ",", "memory", ")", ":", "# Seek to the simulation data and read it all back", "memory", ".", "seek", "(", "8", ")", "bits", "=", "bitarray", "(", "endian", "=", "\"little\"", ")", "bits", ".", "frombytes", "(", "memory", ".", "read", "(", ")", ")", "self", ".", "recorded_data", "=", "bits", ".", "to01", "(", ")" ]
30.692308
0.004866
def custom_str(self, sc_expr_str_fn): """ Works like MenuNode.__str__(), but allows a custom format to be used for all symbol/choice references. See expr_str(). """ return self._menu_comment_node_str(sc_expr_str_fn) \ if self.item in _MENU_COMMENT else \ self._sym_choice_node_str(sc_expr_str_fn)
[ "def", "custom_str", "(", "self", ",", "sc_expr_str_fn", ")", ":", "return", "self", ".", "_menu_comment_node_str", "(", "sc_expr_str_fn", ")", "if", "self", ".", "item", "in", "_MENU_COMMENT", "else", "self", ".", "_sym_choice_node_str", "(", "sc_expr_str_fn", ")" ]
44.875
0.010929
def has_nans(obj): """Check if obj has any NaNs Compatible with different behavior of np.isnan, which sometimes applies over all axes (py35, py35) and sometimes does not (py34). """ nans = np.isnan(obj) while np.ndim(nans): nans = np.any(nans) return bool(nans)
[ "def", "has_nans", "(", "obj", ")", ":", "nans", "=", "np", ".", "isnan", "(", "obj", ")", "while", "np", ".", "ndim", "(", "nans", ")", ":", "nans", "=", "np", ".", "any", "(", "nans", ")", "return", "bool", "(", "nans", ")" ]
28.9
0.003356
def _sensoryComputeInferenceMode(self, anchorInput): """ Infer the location from sensory input. Activate any cells with enough active synapses to this sensory input. Deactivate all other cells. @param anchorInput (numpy array) A sensory input. This will often come from a feature-location pair layer. """ if len(anchorInput) == 0: return overlaps = self.connections.computeActivity(anchorInput, self.connectedPermanence) activeSegments = np.where(overlaps >= self.activationThreshold)[0] sensorySupportedCells = np.unique( self.connections.mapSegmentsToCells(activeSegments)) self.bumpPhases = self.cellPhases[:,sensorySupportedCells] self._computeActiveCells() self.activeSegments = activeSegments self.sensoryAssociatedCells = sensorySupportedCells
[ "def", "_sensoryComputeInferenceMode", "(", "self", ",", "anchorInput", ")", ":", "if", "len", "(", "anchorInput", ")", "==", "0", ":", "return", "overlaps", "=", "self", ".", "connections", ".", "computeActivity", "(", "anchorInput", ",", "self", ".", "connectedPermanence", ")", "activeSegments", "=", "np", ".", "where", "(", "overlaps", ">=", "self", ".", "activationThreshold", ")", "[", "0", "]", "sensorySupportedCells", "=", "np", ".", "unique", "(", "self", ".", "connections", ".", "mapSegmentsToCells", "(", "activeSegments", ")", ")", "self", ".", "bumpPhases", "=", "self", ".", "cellPhases", "[", ":", ",", "sensorySupportedCells", "]", "self", ".", "_computeActiveCells", "(", ")", "self", ".", "activeSegments", "=", "activeSegments", "self", ".", "sensoryAssociatedCells", "=", "sensorySupportedCells" ]
38.590909
0.004598
def get_tunnel_info_input_page_cursor(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info input = ET.SubElement(get_tunnel_info, "input") page_cursor = ET.SubElement(input, "page-cursor") page_cursor.text = kwargs.pop('page_cursor') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_tunnel_info_input_page_cursor", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_tunnel_info", "=", "ET", ".", "Element", "(", "\"get_tunnel_info\"", ")", "config", "=", "get_tunnel_info", "input", "=", "ET", ".", "SubElement", "(", "get_tunnel_info", ",", "\"input\"", ")", "page_cursor", "=", "ET", ".", "SubElement", "(", "input", ",", "\"page-cursor\"", ")", "page_cursor", ".", "text", "=", "kwargs", ".", "pop", "(", "'page_cursor'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
39.25
0.004149
def get_changed_files(self) -> List[str]: """Get the files changed on one git branch vs another. Returns: List[str]: File paths of changed files, relative to the git repo root. """ out = shell_tools.output_of( 'git', 'diff', '--name-only', self.compare_commit_id, self.actual_commit_id, '--', cwd=self.destination_directory) return [e for e in out.split('\n') if e.strip()]
[ "def", "get_changed_files", "(", "self", ")", "->", "List", "[", "str", "]", ":", "out", "=", "shell_tools", ".", "output_of", "(", "'git'", ",", "'diff'", ",", "'--name-only'", ",", "self", ".", "compare_commit_id", ",", "self", ".", "actual_commit_id", ",", "'--'", ",", "cwd", "=", "self", ".", "destination_directory", ")", "return", "[", "e", "for", "e", "in", "out", ".", "split", "(", "'\\n'", ")", "if", "e", ".", "strip", "(", ")", "]" ]
31.9375
0.003802
def load_baseline_from_dict(cls, data): """Initializes a SecretsCollection object from dictionary. :type data: dict :param data: properly formatted dictionary to load SecretsCollection from. :rtype: SecretsCollection :raises: IOError """ result = SecretsCollection() if not all(key in data for key in ( 'plugins_used', 'results', )): raise IOError # In v0.12.0 `exclude_regex` got replaced by `exclude` if not any(key in data for key in ( 'exclude', 'exclude_regex', )): raise IOError if 'exclude_regex' in data: result.exclude_files = data['exclude_regex'] else: result.exclude_files = data['exclude']['files'] result.exclude_lines = data['exclude']['lines'] plugins = [] for plugin in data['plugins_used']: plugin_classname = plugin.pop('name') plugins.append(initialize.from_plugin_classname( plugin_classname, exclude_lines_regex=result.exclude_lines, **plugin )) result.plugins = tuple(plugins) for filename in data['results']: result.data[filename] = {} for item in data['results'][filename]: secret = PotentialSecret( item['type'], filename, secret='will be replaced', lineno=item['line_number'], is_secret=item.get('is_secret'), ) secret.secret_hash = item['hashed_secret'] result.data[filename][secret] = secret result.version = ( data['version'] if 'version' in data else '0.0.0' ) return result
[ "def", "load_baseline_from_dict", "(", "cls", ",", "data", ")", ":", "result", "=", "SecretsCollection", "(", ")", "if", "not", "all", "(", "key", "in", "data", "for", "key", "in", "(", "'plugins_used'", ",", "'results'", ",", ")", ")", ":", "raise", "IOError", "# In v0.12.0 `exclude_regex` got replaced by `exclude`", "if", "not", "any", "(", "key", "in", "data", "for", "key", "in", "(", "'exclude'", ",", "'exclude_regex'", ",", ")", ")", ":", "raise", "IOError", "if", "'exclude_regex'", "in", "data", ":", "result", ".", "exclude_files", "=", "data", "[", "'exclude_regex'", "]", "else", ":", "result", ".", "exclude_files", "=", "data", "[", "'exclude'", "]", "[", "'files'", "]", "result", ".", "exclude_lines", "=", "data", "[", "'exclude'", "]", "[", "'lines'", "]", "plugins", "=", "[", "]", "for", "plugin", "in", "data", "[", "'plugins_used'", "]", ":", "plugin_classname", "=", "plugin", ".", "pop", "(", "'name'", ")", "plugins", ".", "append", "(", "initialize", ".", "from_plugin_classname", "(", "plugin_classname", ",", "exclude_lines_regex", "=", "result", ".", "exclude_lines", ",", "*", "*", "plugin", ")", ")", "result", ".", "plugins", "=", "tuple", "(", "plugins", ")", "for", "filename", "in", "data", "[", "'results'", "]", ":", "result", ".", "data", "[", "filename", "]", "=", "{", "}", "for", "item", "in", "data", "[", "'results'", "]", "[", "filename", "]", ":", "secret", "=", "PotentialSecret", "(", "item", "[", "'type'", "]", ",", "filename", ",", "secret", "=", "'will be replaced'", ",", "lineno", "=", "item", "[", "'line_number'", "]", ",", "is_secret", "=", "item", ".", "get", "(", "'is_secret'", ")", ",", ")", "secret", ".", "secret_hash", "=", "item", "[", "'hashed_secret'", "]", "result", ".", "data", "[", "filename", "]", "[", "secret", "]", "=", "secret", "result", ".", "version", "=", "(", "data", "[", "'version'", "]", "if", "'version'", "in", "data", "else", "'0.0.0'", ")", "return", "result" ]
30.147541
0.00158
def add_listener(self, listener, message_type, data=None, one_shot=False): """Add a listener that will receice incoming messages.""" lst = self._one_shots if one_shot else self._listeners if message_type not in lst: lst[message_type] = [] lst[message_type].append(Listener(listener, data))
[ "def", "add_listener", "(", "self", ",", "listener", ",", "message_type", ",", "data", "=", "None", ",", "one_shot", "=", "False", ")", ":", "lst", "=", "self", ".", "_one_shots", "if", "one_shot", "else", "self", ".", "_listeners", "if", "message_type", "not", "in", "lst", ":", "lst", "[", "message_type", "]", "=", "[", "]", "lst", "[", "message_type", "]", ".", "append", "(", "Listener", "(", "listener", ",", "data", ")", ")" ]
41
0.00597
def sample(self, n): """ Samples data into a Pandas DataFrame. Args: n: number of sampled counts. Returns: A dataframe containing sampled data. Raises: Exception if n is larger than number of rows. """ row_total_count = 0 row_counts = [] for file in self.files: with _util.open_local_or_gcs(file, 'r') as f: num_lines = sum(1 for line in f) row_total_count += num_lines row_counts.append(num_lines) names = None dtype = None if self._schema: _MAPPINGS = { 'FLOAT': np.float64, 'INTEGER': np.int64, 'TIMESTAMP': np.datetime64, 'BOOLEAN': np.bool, } names = [x['name'] for x in self._schema] dtype = {x['name']: _MAPPINGS.get(x['type'], object) for x in self._schema} skip_count = row_total_count - n # Get all skipped indexes. These will be distributed into each file. # Note that random.sample will raise Exception if skip_count is greater than rows count. skip_all = sorted(random.sample(range(0, row_total_count), skip_count)) dfs = [] for file, row_count in zip(self.files, row_counts): skip = [x for x in skip_all if x < row_count] skip_all = [x - row_count for x in skip_all if x >= row_count] with _util.open_local_or_gcs(file, 'r') as f: dfs.append(pd.read_csv(f, skiprows=skip, names=names, dtype=dtype, header=None)) return pd.concat(dfs, axis=0, ignore_index=True)
[ "def", "sample", "(", "self", ",", "n", ")", ":", "row_total_count", "=", "0", "row_counts", "=", "[", "]", "for", "file", "in", "self", ".", "files", ":", "with", "_util", ".", "open_local_or_gcs", "(", "file", ",", "'r'", ")", "as", "f", ":", "num_lines", "=", "sum", "(", "1", "for", "line", "in", "f", ")", "row_total_count", "+=", "num_lines", "row_counts", ".", "append", "(", "num_lines", ")", "names", "=", "None", "dtype", "=", "None", "if", "self", ".", "_schema", ":", "_MAPPINGS", "=", "{", "'FLOAT'", ":", "np", ".", "float64", ",", "'INTEGER'", ":", "np", ".", "int64", ",", "'TIMESTAMP'", ":", "np", ".", "datetime64", ",", "'BOOLEAN'", ":", "np", ".", "bool", ",", "}", "names", "=", "[", "x", "[", "'name'", "]", "for", "x", "in", "self", ".", "_schema", "]", "dtype", "=", "{", "x", "[", "'name'", "]", ":", "_MAPPINGS", ".", "get", "(", "x", "[", "'type'", "]", ",", "object", ")", "for", "x", "in", "self", ".", "_schema", "}", "skip_count", "=", "row_total_count", "-", "n", "# Get all skipped indexes. These will be distributed into each file.", "# Note that random.sample will raise Exception if skip_count is greater than rows count.", "skip_all", "=", "sorted", "(", "random", ".", "sample", "(", "range", "(", "0", ",", "row_total_count", ")", ",", "skip_count", ")", ")", "dfs", "=", "[", "]", "for", "file", ",", "row_count", "in", "zip", "(", "self", ".", "files", ",", "row_counts", ")", ":", "skip", "=", "[", "x", "for", "x", "in", "skip_all", "if", "x", "<", "row_count", "]", "skip_all", "=", "[", "x", "-", "row_count", "for", "x", "in", "skip_all", "if", "x", ">=", "row_count", "]", "with", "_util", ".", "open_local_or_gcs", "(", "file", ",", "'r'", ")", "as", "f", ":", "dfs", ".", "append", "(", "pd", ".", "read_csv", "(", "f", ",", "skiprows", "=", "skip", ",", "names", "=", "names", ",", "dtype", "=", "dtype", ",", "header", "=", "None", ")", ")", "return", "pd", ".", "concat", "(", "dfs", ",", "axis", "=", "0", ",", "ignore_index", "=", "True", ")" ]
35.097561
0.007437
def write(self, location, text, encoding): """ Write file to disk. """ location = os.path.expanduser(location) with codecs.open(location, 'w', encoding) as f: f.write(text)
[ "def", "write", "(", "self", ",", "location", ",", "text", ",", "encoding", ")", ":", "location", "=", "os", ".", "path", ".", "expanduser", "(", "location", ")", "with", "codecs", ".", "open", "(", "location", ",", "'w'", ",", "encoding", ")", "as", "f", ":", "f", ".", "write", "(", "text", ")" ]
27.25
0.008889
def can_cast_to(v: Literal, dt: str) -> bool: """ 5.4.3 Datatype Constraints Determine whether "a value of the lexical form of n can be cast to the target type v per XPath Functions 3.1 section 19 Casting[xpath-functions]." """ # TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte) return v.value is not None and Literal(str(v), datatype=dt).value is not None
[ "def", "can_cast_to", "(", "v", ":", "Literal", ",", "dt", ":", "str", ")", "->", "bool", ":", "# TODO: rdflib doesn't appear to pay any attention to lengths (e.g. 257 is a valid XSD.byte)", "return", "v", ".", "value", "is", "not", "None", "and", "Literal", "(", "str", "(", "v", ")", ",", "datatype", "=", "dt", ")", ".", "value", "is", "not", "None" ]
52
0.009456
def delete(self, job: JobOrID) -> None: """Deletes a job. :param job: The job or job ID to delete. """ self._send_cmd(b'delete %d' % _to_id(job), b'DELETED')
[ "def", "delete", "(", "self", ",", "job", ":", "JobOrID", ")", "->", "None", ":", "self", ".", "_send_cmd", "(", "b'delete %d'", "%", "_to_id", "(", "job", ")", ",", "b'DELETED'", ")" ]
30.833333
0.010526
def show(name, root=None): ''' .. versionadded:: 2014.7.0 Show properties of one or more units/jobs or the manager root Enable/disable/mask unit files in the specified root directory CLI Example: salt '*' service.show <service name> ''' ret = {} out = __salt__['cmd.run'](_systemctl_cmd('show', name, root=root), python_shell=False) for line in salt.utils.itertools.split(out, '\n'): comps = line.split('=') name = comps[0] value = '='.join(comps[1:]) if value.startswith('{'): value = value.replace('{', '').replace('}', '') ret[name] = {} for item in value.split(' ; '): comps = item.split('=') ret[name][comps[0].strip()] = comps[1].strip() elif name in ('Before', 'After', 'Wants'): ret[name] = value.split() else: ret[name] = value return ret
[ "def", "show", "(", "name", ",", "root", "=", "None", ")", ":", "ret", "=", "{", "}", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "_systemctl_cmd", "(", "'show'", ",", "name", ",", "root", "=", "root", ")", ",", "python_shell", "=", "False", ")", "for", "line", "in", "salt", ".", "utils", ".", "itertools", ".", "split", "(", "out", ",", "'\\n'", ")", ":", "comps", "=", "line", ".", "split", "(", "'='", ")", "name", "=", "comps", "[", "0", "]", "value", "=", "'='", ".", "join", "(", "comps", "[", "1", ":", "]", ")", "if", "value", ".", "startswith", "(", "'{'", ")", ":", "value", "=", "value", ".", "replace", "(", "'{'", ",", "''", ")", ".", "replace", "(", "'}'", ",", "''", ")", "ret", "[", "name", "]", "=", "{", "}", "for", "item", "in", "value", ".", "split", "(", "' ; '", ")", ":", "comps", "=", "item", ".", "split", "(", "'='", ")", "ret", "[", "name", "]", "[", "comps", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "comps", "[", "1", "]", ".", "strip", "(", ")", "elif", "name", "in", "(", "'Before'", ",", "'After'", ",", "'Wants'", ")", ":", "ret", "[", "name", "]", "=", "value", ".", "split", "(", ")", "else", ":", "ret", "[", "name", "]", "=", "value", "return", "ret" ]
29.59375
0.001022
def get_data(model, instance_id, kind=''): """Get instance data by id. :param model: a string, model name in rio.models :param id: an integer, instance id. :param kind: a string specified which kind of dict tranformer should be called. :return: data. """ instance = get_instance(model, instance_id) if not instance: return return ins2dict(instance, kind)
[ "def", "get_data", "(", "model", ",", "instance_id", ",", "kind", "=", "''", ")", ":", "instance", "=", "get_instance", "(", "model", ",", "instance_id", ")", "if", "not", "instance", ":", "return", "return", "ins2dict", "(", "instance", ",", "kind", ")" ]
27.714286
0.004988
def stageContent(self, configFiles, dateTimeFormat=None): """Parses a JSON configuration file to stage content. Args: configFiles (list): A list of JSON files on disk containing configuration data for staging content. dateTimeFormat (str): A valid date formatting directive, as understood by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e., ``'%Y-%m-%d %H:%M'``. """ results = None groups = None items = None group = None content = None contentInfo = None startTime = None orgTools = None if dateTimeFormat is None: dateTimeFormat = '%Y-%m-%d %H:%M' scriptStartTime = datetime.datetime.now() try: print ("********************Stage Content Started********************") print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat)) if self.securityhandler.valid == False: print ("Login required") else: orgTools = orgtools.orgtools(securityinfo=self) if orgTools is None: print ("Error creating org tools") else: for configFile in configFiles: config = common.init_config_json(config_file=configFile) if config is not None: if 'ContentItems' in config: startTime = datetime.datetime.now() print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))) contentInfo = config['ContentItems'] for cont in contentInfo: content = cont['Content'] group = cont['ShareToGroup'] print ("Sharing content to: %s" % group) if os.path.isfile(content): with open(content, 'rb') as csvfile: items = [] groups = [] for row in csv.DictReader(csvfile,dialect='excel'): if cont['Type'] == "Group": groups.append(row['id']) elif cont['Type'] == "Items": items.append(row['id']) results = orgTools.shareItemsToGroup(shareToGroupName=group,items=items,groups=groups) print ("Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))) else: print ("Config file missing ContentItems section") else: print ("Config %s not found" % configFile) except(TypeError,ValueError,AttributeError) as e: print (e) except (common.ArcRestHelperError) as e: print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) except Exception as e: if (reportToolsInstalled): if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)): print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) finally: print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)) print ("###############Stage Content Completed#################") print ("") #if orgTools is not None: #orgTools.dispose() results = None groups = None items = None group = None content = None contentInfo = None startTime = None orgTools = None del results del groups del items del group del content del contentInfo del startTime del orgTools gc.collect()
[ "def", "stageContent", "(", "self", ",", "configFiles", ",", "dateTimeFormat", "=", "None", ")", ":", "results", "=", "None", "groups", "=", "None", "items", "=", "None", "group", "=", "None", "content", "=", "None", "contentInfo", "=", "None", "startTime", "=", "None", "orgTools", "=", "None", "if", "dateTimeFormat", "is", "None", ":", "dateTimeFormat", "=", "'%Y-%m-%d %H:%M'", "scriptStartTime", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "try", ":", "print", "(", "\"********************Stage Content Started********************\"", ")", "print", "(", "\"Script started at %s\"", "%", "scriptStartTime", ".", "strftime", "(", "dateTimeFormat", ")", ")", "if", "self", ".", "securityhandler", ".", "valid", "==", "False", ":", "print", "(", "\"Login required\"", ")", "else", ":", "orgTools", "=", "orgtools", ".", "orgtools", "(", "securityinfo", "=", "self", ")", "if", "orgTools", "is", "None", ":", "print", "(", "\"Error creating org tools\"", ")", "else", ":", "for", "configFile", "in", "configFiles", ":", "config", "=", "common", ".", "init_config_json", "(", "config_file", "=", "configFile", ")", "if", "config", "is", "not", "None", ":", "if", "'ContentItems'", "in", "config", ":", "startTime", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "print", "(", "\"Processing config %s, starting at: %s\"", "%", "(", "configFile", ",", "startTime", ".", "strftime", "(", "dateTimeFormat", ")", ")", ")", "contentInfo", "=", "config", "[", "'ContentItems'", "]", "for", "cont", "in", "contentInfo", ":", "content", "=", "cont", "[", "'Content'", "]", "group", "=", "cont", "[", "'ShareToGroup'", "]", "print", "(", "\"Sharing content to: %s\"", "%", "group", ")", "if", "os", ".", "path", ".", "isfile", "(", "content", ")", ":", "with", "open", "(", "content", ",", "'rb'", ")", "as", "csvfile", ":", "items", "=", "[", "]", "groups", "=", "[", "]", "for", "row", "in", "csv", ".", "DictReader", "(", "csvfile", ",", "dialect", "=", "'excel'", ")", ":", "if", "cont", "[", "'Type'", "]", "==", "\"Group\"", ":", "groups", ".", "append", "(", "row", "[", "'id'", "]", ")", "elif", "cont", "[", "'Type'", "]", "==", "\"Items\"", ":", "items", ".", "append", "(", "row", "[", "'id'", "]", ")", "results", "=", "orgTools", ".", "shareItemsToGroup", "(", "shareToGroupName", "=", "group", ",", "items", "=", "items", ",", "groups", "=", "groups", ")", "print", "(", "\"Config %s completed, time to complete: %s\"", "%", "(", "configFile", ",", "str", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "startTime", ")", ")", ")", "else", ":", "print", "(", "\"Config file missing ContentItems section\"", ")", "else", ":", "print", "(", "\"Config %s not found\"", "%", "configFile", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", "as", "e", ":", "print", "(", "e", ")", "except", "(", "common", ".", "ArcRestHelperError", ")", "as", "e", ":", "print", "(", "\"error in function: %s\"", "%", "e", "[", "0", "]", "[", "'function'", "]", ")", "print", "(", "\"error on line: %s\"", "%", "e", "[", "0", "]", "[", "'line'", "]", ")", "print", "(", "\"error in file name: %s\"", "%", "e", "[", "0", "]", "[", "'filename'", "]", ")", "print", "(", "\"with error message: %s\"", "%", "e", "[", "0", "]", "[", "'synerror'", "]", ")", "if", "'arcpyError'", "in", "e", "[", "0", "]", ":", "print", "(", "\"with arcpy message: %s\"", "%", "e", "[", "0", "]", "[", "'arcpyError'", "]", ")", "except", "Exception", "as", "e", ":", "if", "(", "reportToolsInstalled", ")", ":", "if", "isinstance", "(", "e", ",", "(", "ReportTools", ".", "ReportToolsError", ",", "DataPrep", ".", "DataPrepError", ")", ")", ":", "print", "(", "\"error in function: %s\"", "%", "e", "[", "0", "]", "[", "'function'", "]", ")", "print", "(", "\"error on line: %s\"", "%", "e", "[", "0", "]", "[", "'line'", "]", ")", "print", "(", "\"error in file name: %s\"", "%", "e", "[", "0", "]", "[", "'filename'", "]", ")", "print", "(", "\"with error message: %s\"", "%", "e", "[", "0", "]", "[", "'synerror'", "]", ")", "if", "'arcpyError'", "in", "e", "[", "0", "]", ":", "print", "(", "\"with arcpy message: %s\"", "%", "e", "[", "0", "]", "[", "'arcpyError'", "]", ")", "else", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "print", "(", "\"error on line: %s\"", "%", "line", ")", "print", "(", "\"error in file name: %s\"", "%", "filename", ")", "print", "(", "\"with error message: %s\"", "%", "synerror", ")", "else", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "print", "(", "\"error on line: %s\"", "%", "line", ")", "print", "(", "\"error in file name: %s\"", "%", "filename", ")", "print", "(", "\"with error message: %s\"", "%", "synerror", ")", "finally", ":", "print", "(", "\"Script complete, time to complete: %s\"", "%", "str", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "scriptStartTime", ")", ")", "print", "(", "\"###############Stage Content Completed#################\"", ")", "print", "(", "\"\"", ")", "#if orgTools is not None:", "#orgTools.dispose()", "results", "=", "None", "groups", "=", "None", "items", "=", "None", "group", "=", "None", "content", "=", "None", "contentInfo", "=", "None", "startTime", "=", "None", "orgTools", "=", "None", "del", "results", "del", "groups", "del", "items", "del", "group", "del", "content", "del", "contentInfo", "del", "startTime", "del", "orgTools", "gc", ".", "collect", "(", ")" ]
42.133858
0.01059
def fetch_service_config(service_name=None, service_version=None): """Fetches the service config from Google Service Management API. Args: service_name: the service name. When this argument is unspecified, this method uses the value of the "SERVICE_NAME" environment variable as the service name, and raises ValueError if the environment variable is unset. service_version: the service version. When this argument is unspecified, this method uses the value of the "SERVICE_VERSION" environment variable as the service version, and raises ValueError if the environment variable is unset. Returns: the fetched service config JSON object. Raises: ValueError: when the service name/version is neither provided as an argument or set as an environment variable; or when the fetched service config fails validation. Exception: when the Google Service Management API returns non-200 response. """ if not service_name: service_name = _get_env_var_or_raise(_SERVICE_NAME_ENV_KEY) if not service_version: service_version = _get_service_version(_SERVICE_VERSION_ENV_KEY, service_name) _logger.debug(u'Contacting Service Management API for service %s version %s', service_name, service_version) response = _make_service_config_request(service_name, service_version) _logger.debug(u'obtained service json from the management api:\n%s', response.data) service = encoding.JsonToMessage(messages.Service, response.data) _validate_service_config(service, service_name, service_version) return service
[ "def", "fetch_service_config", "(", "service_name", "=", "None", ",", "service_version", "=", "None", ")", ":", "if", "not", "service_name", ":", "service_name", "=", "_get_env_var_or_raise", "(", "_SERVICE_NAME_ENV_KEY", ")", "if", "not", "service_version", ":", "service_version", "=", "_get_service_version", "(", "_SERVICE_VERSION_ENV_KEY", ",", "service_name", ")", "_logger", ".", "debug", "(", "u'Contacting Service Management API for service %s version %s'", ",", "service_name", ",", "service_version", ")", "response", "=", "_make_service_config_request", "(", "service_name", ",", "service_version", ")", "_logger", ".", "debug", "(", "u'obtained service json from the management api:\\n%s'", ",", "response", ".", "data", ")", "service", "=", "encoding", ".", "JsonToMessage", "(", "messages", ".", "Service", ",", "response", ".", "data", ")", "_validate_service_config", "(", "service", ",", "service_name", ",", "service_version", ")", "return", "service" ]
50.515152
0.00412
def from_shape_pixel_scale_and_sub_grid_size(cls, shape, pixel_scale, sub_grid_size): """Setup a sub-grid from a 2D array shape and pixel scale. Here, the center of every pixel on the 2D \ array gives the grid's (y,x) arc-second coordinates, where each pixel has sub-pixels specified by the \ sub-grid size. This is equivalent to using a 2D mask consisting entirely of unmasked pixels. Parameters ----------- shape : (int, int) The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack. pixel_scale : float The size of each pixel in arc seconds. sub_grid_size : int The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid. """ mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale) sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=mask.pixel_scales, sub_grid_size=sub_grid_size) return SubGrid(sub_grid, mask, sub_grid_size)
[ "def", "from_shape_pixel_scale_and_sub_grid_size", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "sub_grid_size", ")", ":", "mask", "=", "msk", ".", "Mask", ".", "unmasked_for_shape_and_pixel_scale", "(", "shape", "=", "shape", ",", "pixel_scale", "=", "pixel_scale", ")", "sub_grid", "=", "grid_util", ".", "sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size", "(", "mask", "=", "mask", ",", "pixel_scales", "=", "mask", ".", "pixel_scales", ",", "sub_grid_size", "=", "sub_grid_size", ")", "return", "SubGrid", "(", "sub_grid", ",", "mask", ",", "sub_grid_size", ")" ]
60.333333
0.009324
def properties(self): ''' This is a lazily loaded dictionary containing the launchd runtime information of the job in question. Internally, this is retrieved using ServiceManagement.SMJobCopyDictionary(). Keep in mind that some dictionary keys are not always present (for example 'PID'). If the job specified by the label cannot be found in launchd, then this method raises a ValueError exception. ''' if hasattr(self, '_nsproperties'): self._properties = convert_NSDictionary_to_dict(self._nsproperties) del self._nsproperties #self._nsproperties = None if self._properties is None: self.refresh() return self._properties
[ "def", "properties", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_nsproperties'", ")", ":", "self", ".", "_properties", "=", "convert_NSDictionary_to_dict", "(", "self", ".", "_nsproperties", ")", "del", "self", ".", "_nsproperties", "#self._nsproperties = None", "if", "self", ".", "_properties", "is", "None", ":", "self", ".", "refresh", "(", ")", "return", "self", ".", "_properties" ]
46.4375
0.003958
def update(self, shuffled=True, cohesion=100, separation=10, alignment=5, goal=20, limit=30): """ Calculates the next motion frame for the flock. """ # Shuffling the list of boids ensures fluid movement. # If you need the boids to retain their position in the list # each update, set the shuffled parameter to False. from random import shuffle if shuffled: shuffle(self) m1 = 1.0 # cohesion m2 = 1.0 # separation m3 = 1.0 # alignment m4 = 1.0 # goal # The flock scatters randomly with a Boids.scatter chance. # This means their cohesion (m1) is reversed, # and their joint alignment (m3) is dimished, # causing boids to oscillate in confusion. # Setting Boids.scatter(chance=0) ensures they never scatter. if not self.scattered and _ctx.random() < self._scatter: self.scattered = True if self.scattered: m1 = -m1 m3 *= 0.25 self._scatter_i += 1 if self._scatter_i >= self._scatter_t: self.scattered = False self._scatter_i = 0 # A flock can have a goal defined with Boids.goal(x,y,z), # a place of interest to flock around. if not self.has_goal: m4 = 0 if self.flee: m4 = -m4 for b in self: # A boid that is perching will continue to do so # until Boid._perch_t reaches zero. if b.is_perching: if b._perch_t > 0: b._perch_t -= 1 continue else: b.is_perching = False vx1, vy1, vz1 = b.cohesion(cohesion) vx2, vy2, vz2 = b.separation(separation) vx3, vy3, vz3 = b.alignment(alignment) vx4, vy4, vz4 = b.goal(self._gx, self._gy, self._gz, goal) b.vx += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4 b.vy += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4 b.vz += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4 b.limit(limit) b.x += b.vx b.y += b.vy b.z += b.vz self.constrain()
[ "def", "update", "(", "self", ",", "shuffled", "=", "True", ",", "cohesion", "=", "100", ",", "separation", "=", "10", ",", "alignment", "=", "5", ",", "goal", "=", "20", ",", "limit", "=", "30", ")", ":", "# Shuffling the list of boids ensures fluid movement.", "# If you need the boids to retain their position in the list", "# each update, set the shuffled parameter to False.", "from", "random", "import", "shuffle", "if", "shuffled", ":", "shuffle", "(", "self", ")", "m1", "=", "1.0", "# cohesion", "m2", "=", "1.0", "# separation", "m3", "=", "1.0", "# alignment", "m4", "=", "1.0", "# goal", "# The flock scatters randomly with a Boids.scatter chance.", "# This means their cohesion (m1) is reversed,", "# and their joint alignment (m3) is dimished,", "# causing boids to oscillate in confusion.", "# Setting Boids.scatter(chance=0) ensures they never scatter.", "if", "not", "self", ".", "scattered", "and", "_ctx", ".", "random", "(", ")", "<", "self", ".", "_scatter", ":", "self", ".", "scattered", "=", "True", "if", "self", ".", "scattered", ":", "m1", "=", "-", "m1", "m3", "*=", "0.25", "self", ".", "_scatter_i", "+=", "1", "if", "self", ".", "_scatter_i", ">=", "self", ".", "_scatter_t", ":", "self", ".", "scattered", "=", "False", "self", ".", "_scatter_i", "=", "0", "# A flock can have a goal defined with Boids.goal(x,y,z),", "# a place of interest to flock around.", "if", "not", "self", ".", "has_goal", ":", "m4", "=", "0", "if", "self", ".", "flee", ":", "m4", "=", "-", "m4", "for", "b", "in", "self", ":", "# A boid that is perching will continue to do so", "# until Boid._perch_t reaches zero.", "if", "b", ".", "is_perching", ":", "if", "b", ".", "_perch_t", ">", "0", ":", "b", ".", "_perch_t", "-=", "1", "continue", "else", ":", "b", ".", "is_perching", "=", "False", "vx1", ",", "vy1", ",", "vz1", "=", "b", ".", "cohesion", "(", "cohesion", ")", "vx2", ",", "vy2", ",", "vz2", "=", "b", ".", "separation", "(", "separation", ")", "vx3", ",", "vy3", ",", "vz3", "=", "b", ".", "alignment", "(", "alignment", ")", "vx4", ",", "vy4", ",", "vz4", "=", "b", ".", "goal", "(", "self", ".", "_gx", ",", "self", ".", "_gy", ",", "self", ".", "_gz", ",", "goal", ")", "b", ".", "vx", "+=", "m1", "*", "vx1", "+", "m2", "*", "vx2", "+", "m3", "*", "vx3", "+", "m4", "*", "vx4", "b", ".", "vy", "+=", "m1", "*", "vy1", "+", "m2", "*", "vy2", "+", "m3", "*", "vy3", "+", "m4", "*", "vy4", "b", ".", "vz", "+=", "m1", "*", "vz1", "+", "m2", "*", "vz2", "+", "m3", "*", "vz3", "+", "m4", "*", "vz4", "b", ".", "limit", "(", "limit", ")", "b", ".", "x", "+=", "b", ".", "vx", "b", ".", "y", "+=", "b", ".", "vy", "b", ".", "z", "+=", "b", ".", "vz", "self", ".", "constrain", "(", ")" ]
32.84507
0.012073
def _parse_module_with_import(self, uri): """Look for functions and classes in an importable module. Parameters ---------- uri : str The name of the module to be parsed. This module needs to be importable. Returns ------- functions : list of str A list of (public) function names in the module. classes : list of str A list of (public) class names in the module. """ mod = __import__(uri, fromlist=[uri]) # find all public objects in the module. obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')] functions = [] classes = [] for obj_str in obj_strs: # find the actual object from its string representation if obj_str not in mod.__dict__: continue obj = mod.__dict__[obj_str] # figure out if obj is a function or class if hasattr(obj, 'func_name') or \ isinstance(obj, BuiltinFunctionType): functions.append(obj_str) else: try: issubclass(obj, object) classes.append(obj_str) except TypeError: # not a function or class pass return functions, classes
[ "def", "_parse_module_with_import", "(", "self", ",", "uri", ")", ":", "mod", "=", "__import__", "(", "uri", ",", "fromlist", "=", "[", "uri", "]", ")", "# find all public objects in the module.", "obj_strs", "=", "[", "obj", "for", "obj", "in", "dir", "(", "mod", ")", "if", "not", "obj", ".", "startswith", "(", "'_'", ")", "]", "functions", "=", "[", "]", "classes", "=", "[", "]", "for", "obj_str", "in", "obj_strs", ":", "# find the actual object from its string representation", "if", "obj_str", "not", "in", "mod", ".", "__dict__", ":", "continue", "obj", "=", "mod", ".", "__dict__", "[", "obj_str", "]", "# figure out if obj is a function or class", "if", "hasattr", "(", "obj", ",", "'func_name'", ")", "or", "isinstance", "(", "obj", ",", "BuiltinFunctionType", ")", ":", "functions", ".", "append", "(", "obj_str", ")", "else", ":", "try", ":", "issubclass", "(", "obj", ",", "object", ")", "classes", ".", "append", "(", "obj_str", ")", "except", "TypeError", ":", "# not a function or class", "pass", "return", "functions", ",", "classes" ]
35.105263
0.001459
def next(self, skip=None): """ Remove the next datum from the buffer and return it. """ buffer = self._buffer popleft = buffer.popleft if skip is not None: while True: try: if not skip(buffer[0]): break popleft() except IndexError: self._buffer_fill() try: datum = popleft() except IndexError: self._buffer_fill() datum = popleft() return datum
[ "def", "next", "(", "self", ",", "skip", "=", "None", ")", ":", "buffer", "=", "self", ".", "_buffer", "popleft", "=", "buffer", ".", "popleft", "if", "skip", "is", "not", "None", ":", "while", "True", ":", "try", ":", "if", "not", "skip", "(", "buffer", "[", "0", "]", ")", ":", "break", "popleft", "(", ")", "except", "IndexError", ":", "self", ".", "_buffer_fill", "(", ")", "try", ":", "datum", "=", "popleft", "(", ")", "except", "IndexError", ":", "self", ".", "_buffer_fill", "(", ")", "datum", "=", "popleft", "(", ")", "return", "datum" ]
28.05
0.003448
def process_extensions( headers: Headers, available_extensions: Optional[Sequence[ClientExtensionFactory]], ) -> List[Extension]: """ Handle the Sec-WebSocket-Extensions HTTP response header. Check that each extension is supported, as well as its parameters. Return the list of accepted extensions. Raise :exc:`~websockets.exceptions.InvalidHandshake` to abort the connection. :rfc:`6455` leaves the rules up to the specification of each :extension. To provide this level of flexibility, for each extension accepted by the server, we check for a match with each extension available in the client configuration. If no match is found, an exception is raised. If several variants of the same extension are accepted by the server, it may be configured severel times, which won't make sense in general. Extensions must implement their own requirements. For this purpose, the list of previously accepted extensions is provided. Other requirements, for example related to mandatory extensions or the order of extensions, may be implemented by overriding this method. """ accepted_extensions: List[Extension] = [] header_values = headers.get_all("Sec-WebSocket-Extensions") if header_values: if available_extensions is None: raise InvalidHandshake("No extensions supported") parsed_header_values: List[ExtensionHeader] = sum( [parse_extension(header_value) for header_value in header_values], [] ) for name, response_params in parsed_header_values: for extension_factory in available_extensions: # Skip non-matching extensions based on their name. if extension_factory.name != name: continue # Skip non-matching extensions based on their params. try: extension = extension_factory.process_response_params( response_params, accepted_extensions ) except NegotiationError: continue # Add matching extension to the final list. accepted_extensions.append(extension) # Break out of the loop once we have a match. break # If we didn't break from the loop, no extension in our list # matched what the server sent. Fail the connection. else: raise NegotiationError( f"Unsupported extension: " f"name = {name}, params = {response_params}" ) return accepted_extensions
[ "def", "process_extensions", "(", "headers", ":", "Headers", ",", "available_extensions", ":", "Optional", "[", "Sequence", "[", "ClientExtensionFactory", "]", "]", ",", ")", "->", "List", "[", "Extension", "]", ":", "accepted_extensions", ":", "List", "[", "Extension", "]", "=", "[", "]", "header_values", "=", "headers", ".", "get_all", "(", "\"Sec-WebSocket-Extensions\"", ")", "if", "header_values", ":", "if", "available_extensions", "is", "None", ":", "raise", "InvalidHandshake", "(", "\"No extensions supported\"", ")", "parsed_header_values", ":", "List", "[", "ExtensionHeader", "]", "=", "sum", "(", "[", "parse_extension", "(", "header_value", ")", "for", "header_value", "in", "header_values", "]", ",", "[", "]", ")", "for", "name", ",", "response_params", "in", "parsed_header_values", ":", "for", "extension_factory", "in", "available_extensions", ":", "# Skip non-matching extensions based on their name.", "if", "extension_factory", ".", "name", "!=", "name", ":", "continue", "# Skip non-matching extensions based on their params.", "try", ":", "extension", "=", "extension_factory", ".", "process_response_params", "(", "response_params", ",", "accepted_extensions", ")", "except", "NegotiationError", ":", "continue", "# Add matching extension to the final list.", "accepted_extensions", ".", "append", "(", "extension", ")", "# Break out of the loop once we have a match.", "break", "# If we didn't break from the loop, no extension in our list", "# matched what the server sent. Fail the connection.", "else", ":", "raise", "NegotiationError", "(", "f\"Unsupported extension: \"", "f\"name = {name}, params = {response_params}\"", ")", "return", "accepted_extensions" ]
38.216216
0.001379
def user_sessions_delete(self, user_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/sessions#bulk-deleting-sessions" api_path = "/api/v2/users/{user_id}/sessions.json" api_path = api_path.format(user_id=user_id) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "user_sessions_delete", "(", "self", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{user_id}/sessions.json\"", "api_path", "=", "api_path", ".", "format", "(", "user_id", "=", "user_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
62
0.009554
def get_symbol_info(self, symbol): """Return information about a symbol :param symbol: required e.g BNBBTC :type symbol: str :returns: Dict if found, None if not .. code-block:: python { "symbol": "ETHBTC", "status": "TRADING", "baseAsset": "ETH", "baseAssetPrecision": 8, "quoteAsset": "BTC", "quotePrecision": 8, "orderTypes": ["LIMIT", "MARKET"], "icebergAllowed": false, "filters": [ { "filterType": "PRICE_FILTER", "minPrice": "0.00000100", "maxPrice": "100000.00000000", "tickSize": "0.00000100" }, { "filterType": "LOT_SIZE", "minQty": "0.00100000", "maxQty": "100000.00000000", "stepSize": "0.00100000" }, { "filterType": "MIN_NOTIONAL", "minNotional": "0.00100000" } ] } :raises: BinanceRequestException, BinanceAPIException """ res = self._get('exchangeInfo') for item in res['symbols']: if item['symbol'] == symbol.upper(): return item return None
[ "def", "get_symbol_info", "(", "self", ",", "symbol", ")", ":", "res", "=", "self", ".", "_get", "(", "'exchangeInfo'", ")", "for", "item", "in", "res", "[", "'symbols'", "]", ":", "if", "item", "[", "'symbol'", "]", "==", "symbol", ".", "upper", "(", ")", ":", "return", "item", "return", "None" ]
29.833333
0.001352
def delete_record(self, domain, record): """ Deletes an existing record for a domain. """ uri = "/domains/%s/records/%s" % (utils.get_id(domain), utils.get_id(record)) resp, resp_body = self._async_call(uri, method="DELETE", error_class=exc.DomainRecordDeletionFailed, has_response=False) return resp_body
[ "def", "delete_record", "(", "self", ",", "domain", ",", "record", ")", ":", "uri", "=", "\"/domains/%s/records/%s\"", "%", "(", "utils", ".", "get_id", "(", "domain", ")", ",", "utils", ".", "get_id", "(", "record", ")", ")", "resp", ",", "resp_body", "=", "self", ".", "_async_call", "(", "uri", ",", "method", "=", "\"DELETE\"", ",", "error_class", "=", "exc", ".", "DomainRecordDeletionFailed", ",", "has_response", "=", "False", ")", "return", "resp_body" ]
41.888889
0.01039
def filter_query(key, expression): """Filter documents with a key that satisfies an expression.""" if (isinstance(expression, dict) and len(expression) == 1 and list(expression.keys())[0].startswith('$')): compiled_expression = compile_query(expression) elif callable(expression): def _filter(index, expression=expression): result = [store_key for value, store_keys in index.get_index().items() if expression(value) for store_key in store_keys] return result compiled_expression = _filter else: compiled_expression = expression def _get(query_function, key=key, expression=compiled_expression): """Get document key and check against expression.""" return query_function(key, expression) return _get
[ "def", "filter_query", "(", "key", ",", "expression", ")", ":", "if", "(", "isinstance", "(", "expression", ",", "dict", ")", "and", "len", "(", "expression", ")", "==", "1", "and", "list", "(", "expression", ".", "keys", "(", ")", ")", "[", "0", "]", ".", "startswith", "(", "'$'", ")", ")", ":", "compiled_expression", "=", "compile_query", "(", "expression", ")", "elif", "callable", "(", "expression", ")", ":", "def", "_filter", "(", "index", ",", "expression", "=", "expression", ")", ":", "result", "=", "[", "store_key", "for", "value", ",", "store_keys", "in", "index", ".", "get_index", "(", ")", ".", "items", "(", ")", "if", "expression", "(", "value", ")", "for", "store_key", "in", "store_keys", "]", "return", "result", "compiled_expression", "=", "_filter", "else", ":", "compiled_expression", "=", "expression", "def", "_get", "(", "query_function", ",", "key", "=", "key", ",", "expression", "=", "compiled_expression", ")", ":", "\"\"\"Get document key and check against expression.\"\"\"", "return", "query_function", "(", "key", ",", "expression", ")", "return", "_get" ]
39.318182
0.001129
def FixmatStimuliFactory(fm, loader): """ Constructs an categories object for all image / category combinations in the fixmat. Parameters: fm: FixMat Used for extracting valid category/image combination. loader: loader Loader that accesses the stimuli for this fixmat Returns: Categories object """ # Find all feature names features = [] if loader.ftrpath: assert os.access(loader.ftrpath, os.R_OK) features = os.listdir(os.path.join(loader.ftrpath, str(fm.category[0]))) # Find all images in all categories img_per_cat = {} for cat in np.unique(fm.category): if not loader.test_for_category(cat): raise ValueError('Category %s is specified in fixmat but '%( str(cat) + 'can not be located by loader')) img_per_cat[cat] = [] for img in np.unique(fm[(fm.category == cat)].filenumber): if not loader.test_for_image(cat, img): raise ValueError('Image %s in category %s is '%(str(cat), str(img)) + 'specified in fixmat but can be located by loader') img_per_cat[cat].append(img) if loader.ftrpath: for feature in features: if not loader.test_for_feature(cat, img, feature): raise RuntimeError( 'Feature %s for image %s' %(str(feature),str(img)) + ' in category %s ' %str(cat) + 'can not be located by loader') return Categories(loader, img_per_cat = img_per_cat, features = features, fixations = fm)
[ "def", "FixmatStimuliFactory", "(", "fm", ",", "loader", ")", ":", "# Find all feature names", "features", "=", "[", "]", "if", "loader", ".", "ftrpath", ":", "assert", "os", ".", "access", "(", "loader", ".", "ftrpath", ",", "os", ".", "R_OK", ")", "features", "=", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "loader", ".", "ftrpath", ",", "str", "(", "fm", ".", "category", "[", "0", "]", ")", ")", ")", "# Find all images in all categories ", "img_per_cat", "=", "{", "}", "for", "cat", "in", "np", ".", "unique", "(", "fm", ".", "category", ")", ":", "if", "not", "loader", ".", "test_for_category", "(", "cat", ")", ":", "raise", "ValueError", "(", "'Category %s is specified in fixmat but '", "%", "(", "str", "(", "cat", ")", "+", "'can not be located by loader'", ")", ")", "img_per_cat", "[", "cat", "]", "=", "[", "]", "for", "img", "in", "np", ".", "unique", "(", "fm", "[", "(", "fm", ".", "category", "==", "cat", ")", "]", ".", "filenumber", ")", ":", "if", "not", "loader", ".", "test_for_image", "(", "cat", ",", "img", ")", ":", "raise", "ValueError", "(", "'Image %s in category %s is '", "%", "(", "str", "(", "cat", ")", ",", "str", "(", "img", ")", ")", "+", "'specified in fixmat but can be located by loader'", ")", "img_per_cat", "[", "cat", "]", ".", "append", "(", "img", ")", "if", "loader", ".", "ftrpath", ":", "for", "feature", "in", "features", ":", "if", "not", "loader", ".", "test_for_feature", "(", "cat", ",", "img", ",", "feature", ")", ":", "raise", "RuntimeError", "(", "'Feature %s for image %s'", "%", "(", "str", "(", "feature", ")", ",", "str", "(", "img", ")", ")", "+", "' in category %s '", "%", "str", "(", "cat", ")", "+", "'can not be located by loader'", ")", "return", "Categories", "(", "loader", ",", "img_per_cat", "=", "img_per_cat", ",", "features", "=", "features", ",", "fixations", "=", "fm", ")" ]
41.512195
0.014351
def make_value_setter(**model_kwargs): """Creates a value-setting interceptor. This function creates an interceptor that sets values of Edward2 random variable objects. This is useful for a range of tasks, including conditioning on observed data, sampling from posterior predictive distributions, and as a building block of inference primitives such as computing log joint probabilities (see examples below). Args: **model_kwargs: dict of str to Tensor. Keys are the names of random variables in the model to which this interceptor is being applied. Values are Tensors to set their value to. Variables not included in this dict will not be set and will maintain their existing value semantics (by default, a sample from the parent-conditional distribution). Returns: set_values: function that sets the value of intercepted ops. #### Examples Consider for illustration a model with latent `z` and observed `x`, and a corresponding trainable posterior model: ```python num_observations = 10 def model(): z = ed.Normal(loc=0, scale=1., name='z') # log rate x = ed.Poisson(rate=tf.exp(z) * tf.ones(num_observations), name='x') return x def variational_model(): return ed.Normal(loc=tf.Variable(0.), scale=tf.nn.softplus(tf.Variable(-4.)), name='z') # for simplicity, match name of the model RV. ``` We can use a value-setting interceptor to condition the model on observed data. This approach is slightly more cumbersome than that of partially evaluating the complete log-joint function, but has the potential advantage that it returns a new model callable, which may be used to sample downstream variables, passed into additional transformations, etc. ```python x_observed = np.array([6, 3, 1, 8, 7, 0, 6, 4, 7, 5]) def observed_model(): with ed.interception(make_value_setter(x=x_observed)): model() observed_log_joint_fn = ed.make_log_joint_fn(observed_model) # After fixing 'x', the observed log joint is now only a function of 'z'. # This enables us to define a variational lower bound, # `E_q[ log p(x, z) - log q(z)]`, simply by evaluating the observed and # variational log joints at variational samples. variational_log_joint_fn = ed.make_log_joint_fn(variational_model) with ed.tape() as variational_sample: # Sample trace from variational model. variational_model() elbo_loss = -(observed_log_joint_fn(**variational_sample) - variational_log_joint_fn(**variational_sample)) ``` After performing inference by minimizing the variational loss, a value-setting interceptor enables simulation from the posterior predictive distribution: ```python with ed.tape() as posterior_samples: # tape is a map {rv.name : rv} variational_model() with ed.interception(ed.make_value_setter(**posterior_samples)): x = model() # x is a sample from p(X | Z = z') where z' ~ q(z) (the variational model) ``` As another example, using a value setter inside of `ed.tape` enables computing the log joint probability, by setting all variables to posterior values and then accumulating the log probs of those values under the induced parent-conditional distributions. This is one way that we could have implemented `ed.make_log_joint_fn`: ```python def make_log_joint_fn_demo(model): def log_joint_fn(**model_kwargs): with ed.tape() as model_tape: with ed.make_value_setter(**model_kwargs): model() # accumulate sum_i log p(X_i = x_i | X_{:i-1} = x_{:i-1}) log_prob = 0. for rv in model_tape.values(): log_prob += tf.reduce_sum(rv.log_prob(rv.value)) return log_prob return log_joint_fn ``` """ def set_values(f, *args, **kwargs): """Sets random variable values to its aligned value.""" name = kwargs.get("name") if name in model_kwargs: kwargs["value"] = model_kwargs[name] return interceptable(f)(*args, **kwargs) return set_values
[ "def", "make_value_setter", "(", "*", "*", "model_kwargs", ")", ":", "def", "set_values", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Sets random variable values to its aligned value.\"\"\"", "name", "=", "kwargs", ".", "get", "(", "\"name\"", ")", "if", "name", "in", "model_kwargs", ":", "kwargs", "[", "\"value\"", "]", "=", "model_kwargs", "[", "name", "]", "return", "interceptable", "(", "f", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "set_values" ]
38.715686
0.001481
def _anova(self, dv=None, between=None, detailed=False, export_filename=None): """Return one-way and two-way ANOVA.""" aov = anova(data=self, dv=dv, between=between, detailed=detailed, export_filename=export_filename) return aov
[ "def", "_anova", "(", "self", ",", "dv", "=", "None", ",", "between", "=", "None", ",", "detailed", "=", "False", ",", "export_filename", "=", "None", ")", ":", "aov", "=", "anova", "(", "data", "=", "self", ",", "dv", "=", "dv", ",", "between", "=", "between", ",", "detailed", "=", "detailed", ",", "export_filename", "=", "export_filename", ")", "return", "aov" ]
50.4
0.003906
def query(self, startTime=None, endTime=None, sinceServerStart=False, level="WARNING", services="*", machines="*", server="*", codes=[], processIds=[], export=False, exportType="CSV", #CSV or TAB out_path=None ): """ The query operation on the logs resource provides a way to aggregate, filter, and page through logs across the entire site. Inputs: """ allowed_levels = ("SEVERE", "WARNING", "INFO", "FINE", "VERBOSE", "DEBUG") qFilter = { "services": "*", "machines": "*", "server" : "*" } if len(processIds) > 0: qFilter['processIds'] = processIds if len(codes) > 0: qFilter['codes'] = codes params = { "f" : "json", "sinceServerStart" : sinceServerStart, "pageSize" : 10000 } if startTime is not None and \ isinstance(startTime, datetime): params['startTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S") if endTime is not None and \ isinstance(endTime, datetime): params['endTime'] = endTime.strftime("%Y-%m-%dT%H:%M:%S") if level.upper() in allowed_levels: params['level'] = level if server != "*": qFilter['server'] = server.split(',') if services != "*": qFilter['services'] = services.split(',') if machines != "*": qFilter['machines'] = machines.split(",") params['filter'] = qFilter if export == True and \ out_path is not None: messages = self._post(self._url + "/query", params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) with open(name=out_path, mode='wb') as f: hasKeys = False if exportType == "TAB": csvwriter = csv.writer(f, delimiter='\t') else: csvwriter = csv.writer(f) for message in messages['logMessages']: if hasKeys == False: csvwriter.writerow(message.keys()) hasKeys = True csvwriter.writerow(message.values()) del message del messages return out_path else: return self._post(self._url + "/query", params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "query", "(", "self", ",", "startTime", "=", "None", ",", "endTime", "=", "None", ",", "sinceServerStart", "=", "False", ",", "level", "=", "\"WARNING\"", ",", "services", "=", "\"*\"", ",", "machines", "=", "\"*\"", ",", "server", "=", "\"*\"", ",", "codes", "=", "[", "]", ",", "processIds", "=", "[", "]", ",", "export", "=", "False", ",", "exportType", "=", "\"CSV\"", ",", "#CSV or TAB", "out_path", "=", "None", ")", ":", "allowed_levels", "=", "(", "\"SEVERE\"", ",", "\"WARNING\"", ",", "\"INFO\"", ",", "\"FINE\"", ",", "\"VERBOSE\"", ",", "\"DEBUG\"", ")", "qFilter", "=", "{", "\"services\"", ":", "\"*\"", ",", "\"machines\"", ":", "\"*\"", ",", "\"server\"", ":", "\"*\"", "}", "if", "len", "(", "processIds", ")", ">", "0", ":", "qFilter", "[", "'processIds'", "]", "=", "processIds", "if", "len", "(", "codes", ")", ">", "0", ":", "qFilter", "[", "'codes'", "]", "=", "codes", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"sinceServerStart\"", ":", "sinceServerStart", ",", "\"pageSize\"", ":", "10000", "}", "if", "startTime", "is", "not", "None", "and", "isinstance", "(", "startTime", ",", "datetime", ")", ":", "params", "[", "'startTime'", "]", "=", "startTime", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "if", "endTime", "is", "not", "None", "and", "isinstance", "(", "endTime", ",", "datetime", ")", ":", "params", "[", "'endTime'", "]", "=", "endTime", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "if", "level", ".", "upper", "(", ")", "in", "allowed_levels", ":", "params", "[", "'level'", "]", "=", "level", "if", "server", "!=", "\"*\"", ":", "qFilter", "[", "'server'", "]", "=", "server", ".", "split", "(", "','", ")", "if", "services", "!=", "\"*\"", ":", "qFilter", "[", "'services'", "]", "=", "services", ".", "split", "(", "','", ")", "if", "machines", "!=", "\"*\"", ":", "qFilter", "[", "'machines'", "]", "=", "machines", ".", "split", "(", "\",\"", ")", "params", "[", "'filter'", "]", "=", "qFilter", "if", "export", "==", "True", "and", "out_path", "is", "not", "None", ":", "messages", "=", "self", ".", "_post", "(", "self", ".", "_url", "+", "\"/query\"", ",", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "with", "open", "(", "name", "=", "out_path", ",", "mode", "=", "'wb'", ")", "as", "f", ":", "hasKeys", "=", "False", "if", "exportType", "==", "\"TAB\"", ":", "csvwriter", "=", "csv", ".", "writer", "(", "f", ",", "delimiter", "=", "'\\t'", ")", "else", ":", "csvwriter", "=", "csv", ".", "writer", "(", "f", ")", "for", "message", "in", "messages", "[", "'logMessages'", "]", ":", "if", "hasKeys", "==", "False", ":", "csvwriter", ".", "writerow", "(", "message", ".", "keys", "(", ")", ")", "hasKeys", "=", "True", "csvwriter", ".", "writerow", "(", "message", ".", "values", "(", ")", ")", "del", "message", "del", "messages", "return", "out_path", "else", ":", "return", "self", ".", "_post", "(", "self", ".", "_url", "+", "\"/query\"", ",", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
37.272727
0.009844
def words(min_size, max_size = None): """ Generates a random text that consists of random number of random words separated by spaces. :param min_size: (optional) a minimum number of words. :param max_size: a maximum number of words. :return: a random text. """ max_size = max_size if max_size != None else min_size result = "" count = RandomInteger.next_integer(min_size, max_size) for i in range(count): result += random.choice(_all_words) return result
[ "def", "words", "(", "min_size", ",", "max_size", "=", "None", ")", ":", "max_size", "=", "max_size", "if", "max_size", "!=", "None", "else", "min_size", "result", "=", "\"\"", "count", "=", "RandomInteger", ".", "next_integer", "(", "min_size", ",", "max_size", ")", "for", "i", "in", "range", "(", "count", ")", ":", "result", "+=", "random", ".", "choice", "(", "_all_words", ")", "return", "result" ]
30.555556
0.012346
def qtrim_back(self, name, size=1): """ Sets the list element at ``index`` to ``value``. An error is returned for out of range indexes. :param string name: the queue name :param int size: the max length of removed elements :return: the length of removed elements :rtype: int """ size = get_positive_integer("size", size) return self.execute_command('qtrim_back', name, size)
[ "def", "qtrim_back", "(", "self", ",", "name", ",", "size", "=", "1", ")", ":", "size", "=", "get_positive_integer", "(", "\"size\"", ",", "size", ")", "return", "self", ".", "execute_command", "(", "'qtrim_back'", ",", "name", ",", "size", ")" ]
35.384615
0.010593
def put(self, *msgs): """Put one or more messages onto the queue. Example: >>> queue.put("my message") >>> queue.put("another message") To put messages onto the queue in bulk, which can be significantly faster if you have a large number of messages: >>> queue.put("my message", "another message", "third message") """ if self.serializer is not None: msgs = map(self.serializer.dumps, msgs) self.__redis.rpush(self.key, *msgs)
[ "def", "put", "(", "self", ",", "*", "msgs", ")", ":", "if", "self", ".", "serializer", "is", "not", "None", ":", "msgs", "=", "map", "(", "self", ".", "serializer", ".", "dumps", ",", "msgs", ")", "self", ".", "__redis", ".", "rpush", "(", "self", ".", "key", ",", "*", "msgs", ")" ]
37.357143
0.009328
def color_diff(rgb1, rgb2): """ Calculate distance between two RGB colors. See discussion: http://stackoverflow.com/questions/8863810/python-find-similar-colors-best-way - for basic / fast calculations, you can use dE76 but beware of its problems - for graphics arts use we recommend dE94 and perhaps dE-CMC 2:1 - for textiles use dE-CMC """ import numpy as np from skimage.color import rgb2lab, deltaE_cmc rgb1 = np.array(rgb1, dtype="float64").reshape(1, 1, 3) / 255. rgb2 = np.array(rgb2, dtype="float64").reshape(1, 1, 3) / 255. lab1 = rgb2lab(rgb1) lab2 = rgb2lab(rgb2) return deltaE_cmc(lab1, lab2, kL=2, kC=1)[0, 0]
[ "def", "color_diff", "(", "rgb1", ",", "rgb2", ")", ":", "import", "numpy", "as", "np", "from", "skimage", ".", "color", "import", "rgb2lab", ",", "deltaE_cmc", "rgb1", "=", "np", ".", "array", "(", "rgb1", ",", "dtype", "=", "\"float64\"", ")", ".", "reshape", "(", "1", ",", "1", ",", "3", ")", "/", "255.", "rgb2", "=", "np", ".", "array", "(", "rgb2", ",", "dtype", "=", "\"float64\"", ")", ".", "reshape", "(", "1", ",", "1", ",", "3", ")", "/", "255.", "lab1", "=", "rgb2lab", "(", "rgb1", ")", "lab2", "=", "rgb2lab", "(", "rgb2", ")", "return", "deltaE_cmc", "(", "lab1", ",", "lab2", ",", "kL", "=", "2", ",", "kC", "=", "1", ")", "[", "0", ",", "0", "]" ]
36.888889
0.002937
def _disc_kn(clearness_index, airmass, max_airmass=12): """ Calculate Kn for `disc` Args: clearness_index : numeric airmass : numeric max_airmass : float airmass > max_airmass is set to max_airmass before being used in calculating Kn. Returns: Kn : numeric am : numeric airmass used in the calculation of Kn. am <= max_airmass. """ # short names for equations kt = clearness_index am = airmass am = min(am, max_airmass) # GH 450 # powers of kt will be used repeatedly, so compute only once kt2 = kt * kt # about the same as kt ** 2 kt3 = kt2 * kt # 5-10x faster than kt ** 3 if kt <= 0.6: a = 0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3 b = 0.37 + 0.962*kt c = -0.28 + 0.932*kt - 2.048*kt2 else: a = -5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3 b = 41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3 c = -47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3 delta_kn = a + b * math.exp(c*am) Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4 Kn = Knc - delta_kn return Kn, am
[ "def", "_disc_kn", "(", "clearness_index", ",", "airmass", ",", "max_airmass", "=", "12", ")", ":", "# short names for equations", "kt", "=", "clearness_index", "am", "=", "airmass", "am", "=", "min", "(", "am", ",", "max_airmass", ")", "# GH 450", "# powers of kt will be used repeatedly, so compute only once", "kt2", "=", "kt", "*", "kt", "# about the same as kt ** 2", "kt3", "=", "kt2", "*", "kt", "# 5-10x faster than kt ** 3", "if", "kt", "<=", "0.6", ":", "a", "=", "0.512", "-", "1.56", "*", "kt", "+", "2.286", "*", "kt2", "-", "2.222", "*", "kt3", "b", "=", "0.37", "+", "0.962", "*", "kt", "c", "=", "-", "0.28", "+", "0.932", "*", "kt", "-", "2.048", "*", "kt2", "else", ":", "a", "=", "-", "5.743", "+", "21.77", "*", "kt", "-", "27.49", "*", "kt2", "+", "11.56", "*", "kt3", "b", "=", "41.4", "-", "118.5", "*", "kt", "+", "66.05", "*", "kt2", "+", "31.9", "*", "kt3", "c", "=", "-", "47.01", "+", "184.2", "*", "kt", "-", "222.0", "*", "kt2", "+", "73.81", "*", "kt3", "delta_kn", "=", "a", "+", "b", "*", "math", ".", "exp", "(", "c", "*", "am", ")", "Knc", "=", "0.866", "-", "0.122", "*", "am", "+", "0.0121", "*", "am", "**", "2", "-", "0.000653", "*", "am", "**", "3", "+", "1.4e-05", "*", "am", "**", "4", "Kn", "=", "Knc", "-", "delta_kn", "return", "Kn", ",", "am" ]
28.3
0.000854
def _findSwiplDar(): """ This function uses several heuristics to guess where SWI-Prolog is installed in MacOS. :returns: A tuple of (path to the swipl so, path to the resource file) :returns type: ({str, None}, {str, None}) """ # If the exec is in path (path, swiHome) = _findSwiplFromExec() if path is not None: return (path, swiHome) # If it is not, use find_library path = _findSwiplPathFromFindLib() if path is not None: return (path, swiHome) # Last guess, searching for the file paths = ['.', './lib', '/usr/lib/', '/usr/local/lib', '/opt/local/lib'] names = ['libswipl.dylib', 'libpl.dylib'] for name in names: for path in paths: path = os.path.join(path, name) if os.path.exists(path): return (path, None) return (None, None)
[ "def", "_findSwiplDar", "(", ")", ":", "# If the exec is in path", "(", "path", ",", "swiHome", ")", "=", "_findSwiplFromExec", "(", ")", "if", "path", "is", "not", "None", ":", "return", "(", "path", ",", "swiHome", ")", "# If it is not, use find_library", "path", "=", "_findSwiplPathFromFindLib", "(", ")", "if", "path", "is", "not", "None", ":", "return", "(", "path", ",", "swiHome", ")", "# Last guess, searching for the file", "paths", "=", "[", "'.'", ",", "'./lib'", ",", "'/usr/lib/'", ",", "'/usr/local/lib'", ",", "'/opt/local/lib'", "]", "names", "=", "[", "'libswipl.dylib'", ",", "'libpl.dylib'", "]", "for", "name", "in", "names", ":", "for", "path", "in", "paths", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "(", "path", ",", "None", ")", "return", "(", "None", ",", "None", ")" ]
26
0.001124
def find_best_ensemble(results, options): """ Return the best performing ensemble. If the user hasn't specified a FPF, the default behavior sorts ensembles by largest enrichment factor at the smallest FPF (1 / n, where n is the total number of decoys). If the user supplied a FPF, ensembles are sorted by the largest enrichment factor at the input FPF. If the user supplied a FPF = 1, ensembles are sorted by AUC. Ties are broken by considering enrichment factors at the smallest FPF not already considered. :param results: {ensemble_storage_object1, ensemble_storage_object2, ..., ensemble_storage_objectn} :param options: options object, contains user-specified arguments as attributes. :return: ensemble_storage_object (classification.EnsembleStorage) that contains the best performing ensemble """ # We need the total number of decoys in the set to determine the number of decoys that correspond to the FPF values # at which enrichment factors were measured. The number of decoys are keys in the ef dictionary of each # ensemble object stored in the results dictionary. n = sorted(list(list(results.items())[0][1].ef.keys()), reverse=True)[0] # determine the number of decoys that correspond to the FPF used for training if not options.fpf: ndecoys = 1 else: ndecoys = int(round(n * options.fpf)) # sort the results according to the user-specified training method if ndecoys == n: # the user specified an fpf of 1, so wants the ensemble the maximizes the AUC, so sort on auc prop_key = 'auc' sorted_list = sorted(results.items(), key = lambda x: x[1].get_prop(prop_key), reverse=True) else: # the user is interested in an ensemble that maximizes an enrichment factor at some FPF prop_key = 'ef' sorted_list = sorted(results.items(), key = lambda x: x[1].get_prop(ndecoys, prop_key), reverse=True) # we only need to consider breaking a tie if there is more than one ensemble to consider if len(sorted_list) > 1: sorted_list = tie_break(sorted_list, results, prop_key, ndecoys) return sorted_list[0][0]
[ "def", "find_best_ensemble", "(", "results", ",", "options", ")", ":", "# We need the total number of decoys in the set to determine the number of decoys that correspond to the FPF values", "# at which enrichment factors were measured. The number of decoys are keys in the ef dictionary of each", "# ensemble object stored in the results dictionary.", "n", "=", "sorted", "(", "list", "(", "list", "(", "results", ".", "items", "(", ")", ")", "[", "0", "]", "[", "1", "]", ".", "ef", ".", "keys", "(", ")", ")", ",", "reverse", "=", "True", ")", "[", "0", "]", "# determine the number of decoys that correspond to the FPF used for training", "if", "not", "options", ".", "fpf", ":", "ndecoys", "=", "1", "else", ":", "ndecoys", "=", "int", "(", "round", "(", "n", "*", "options", ".", "fpf", ")", ")", "# sort the results according to the user-specified training method", "if", "ndecoys", "==", "n", ":", "# the user specified an fpf of 1, so wants the ensemble the maximizes the AUC, so sort on auc", "prop_key", "=", "'auc'", "sorted_list", "=", "sorted", "(", "results", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ".", "get_prop", "(", "prop_key", ")", ",", "reverse", "=", "True", ")", "else", ":", "# the user is interested in an ensemble that maximizes an enrichment factor at some FPF", "prop_key", "=", "'ef'", "sorted_list", "=", "sorted", "(", "results", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ".", "get_prop", "(", "ndecoys", ",", "prop_key", ")", ",", "reverse", "=", "True", ")", "# we only need to consider breaking a tie if there is more than one ensemble to consider", "if", "len", "(", "sorted_list", ")", ">", "1", ":", "sorted_list", "=", "tie_break", "(", "sorted_list", ",", "results", ",", "prop_key", ",", "ndecoys", ")", "return", "sorted_list", "[", "0", "]", "[", "0", "]" ]
56.210526
0.009204
def draw_pl_vote(m, gamma): """ Description: Generate a Plackett-Luce vote given the model parameters. Parameters: m: number of alternatives gamma: parameters of the Plackett-Luce model """ localgamma = np.copy(gamma) # work on a copy of gamma localalts = np.arange(m) # enumeration of the candidates vote = [] for j in range(m): # generate position in vote for every alternative # transform local gamma into intervals up to 1.0 localgammaintervals = np.copy(localgamma) prev = 0.0 for k in range(len(localgammaintervals)): localgammaintervals[k] += prev prev = localgammaintervals[k] selection = np.random.random() # pick random number # selection will fall into a gamma interval for l in range(len(localgammaintervals)): # determine position if selection <= localgammaintervals[l]: vote.append(localalts[l]) localgamma = np.delete(localgamma, l) # remove that gamma localalts = np.delete(localalts, l) # remove the alternative localgamma /= np.sum(localgamma) # renormalize break return vote
[ "def", "draw_pl_vote", "(", "m", ",", "gamma", ")", ":", "localgamma", "=", "np", ".", "copy", "(", "gamma", ")", "# work on a copy of gamma\r", "localalts", "=", "np", ".", "arange", "(", "m", ")", "# enumeration of the candidates\r", "vote", "=", "[", "]", "for", "j", "in", "range", "(", "m", ")", ":", "# generate position in vote for every alternative\r", "# transform local gamma into intervals up to 1.0\r", "localgammaintervals", "=", "np", ".", "copy", "(", "localgamma", ")", "prev", "=", "0.0", "for", "k", "in", "range", "(", "len", "(", "localgammaintervals", ")", ")", ":", "localgammaintervals", "[", "k", "]", "+=", "prev", "prev", "=", "localgammaintervals", "[", "k", "]", "selection", "=", "np", ".", "random", ".", "random", "(", ")", "# pick random number\r", "# selection will fall into a gamma interval\r", "for", "l", "in", "range", "(", "len", "(", "localgammaintervals", ")", ")", ":", "# determine position\r", "if", "selection", "<=", "localgammaintervals", "[", "l", "]", ":", "vote", ".", "append", "(", "localalts", "[", "l", "]", ")", "localgamma", "=", "np", ".", "delete", "(", "localgamma", ",", "l", ")", "# remove that gamma\r", "localalts", "=", "np", ".", "delete", "(", "localalts", ",", "l", ")", "# remove the alternative\r", "localgamma", "/=", "np", ".", "sum", "(", "localgamma", ")", "# renormalize\r", "break", "return", "vote" ]
38.53125
0.007911
def make_tables(job_dict): """Build and return an `astropy.table.Table' to store `JobDetails`""" col_dbkey = Column(name='dbkey', dtype=int) col_jobname = Column(name='jobname', dtype='S64') col_jobkey = Column(name='jobkey', dtype='S64') col_appname = Column(name='appname', dtype='S64') col_logfile = Column(name='logfile', dtype='S256') col_job_config = Column(name='job_config', dtype='S1024') col_timestamp = Column(name='timestamp', dtype=int) col_infile_refs = Column(name='infile_refs', dtype=int, shape=(2)) col_outfile_refs = Column(name='outfile_refs', dtype=int, shape=(2)) col_rmfile_refs = Column(name='rmfile_refs', dtype=int, shape=(2)) col_intfile_refs = Column(name='intfile_refs', dtype=int, shape=(2)) col_status = Column(name='status', dtype=int) columns = [col_dbkey, col_jobname, col_jobkey, col_appname, col_logfile, col_job_config, col_timestamp, col_infile_refs, col_outfile_refs, col_rmfile_refs, col_intfile_refs, col_status] table = Table(data=columns) col_file_ids = Column(name='file_id', dtype=int) table_ids = Table(data=[col_file_ids]) for val in job_dict.values(): val.append_to_tables(table, table_ids) return table, table_ids
[ "def", "make_tables", "(", "job_dict", ")", ":", "col_dbkey", "=", "Column", "(", "name", "=", "'dbkey'", ",", "dtype", "=", "int", ")", "col_jobname", "=", "Column", "(", "name", "=", "'jobname'", ",", "dtype", "=", "'S64'", ")", "col_jobkey", "=", "Column", "(", "name", "=", "'jobkey'", ",", "dtype", "=", "'S64'", ")", "col_appname", "=", "Column", "(", "name", "=", "'appname'", ",", "dtype", "=", "'S64'", ")", "col_logfile", "=", "Column", "(", "name", "=", "'logfile'", ",", "dtype", "=", "'S256'", ")", "col_job_config", "=", "Column", "(", "name", "=", "'job_config'", ",", "dtype", "=", "'S1024'", ")", "col_timestamp", "=", "Column", "(", "name", "=", "'timestamp'", ",", "dtype", "=", "int", ")", "col_infile_refs", "=", "Column", "(", "name", "=", "'infile_refs'", ",", "dtype", "=", "int", ",", "shape", "=", "(", "2", ")", ")", "col_outfile_refs", "=", "Column", "(", "name", "=", "'outfile_refs'", ",", "dtype", "=", "int", ",", "shape", "=", "(", "2", ")", ")", "col_rmfile_refs", "=", "Column", "(", "name", "=", "'rmfile_refs'", ",", "dtype", "=", "int", ",", "shape", "=", "(", "2", ")", ")", "col_intfile_refs", "=", "Column", "(", "name", "=", "'intfile_refs'", ",", "dtype", "=", "int", ",", "shape", "=", "(", "2", ")", ")", "col_status", "=", "Column", "(", "name", "=", "'status'", ",", "dtype", "=", "int", ")", "columns", "=", "[", "col_dbkey", ",", "col_jobname", ",", "col_jobkey", ",", "col_appname", ",", "col_logfile", ",", "col_job_config", ",", "col_timestamp", ",", "col_infile_refs", ",", "col_outfile_refs", ",", "col_rmfile_refs", ",", "col_intfile_refs", ",", "col_status", "]", "table", "=", "Table", "(", "data", "=", "columns", ")", "col_file_ids", "=", "Column", "(", "name", "=", "'file_id'", ",", "dtype", "=", "int", ")", "table_ids", "=", "Table", "(", "data", "=", "[", "col_file_ids", "]", ")", "for", "val", "in", "job_dict", ".", "values", "(", ")", ":", "val", ".", "append_to_tables", "(", "table", ",", "table_ids", ")", "return", "table", ",", "table_ids" ]
49.214286
0.001423