repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
sofiatolaosebikan/hopcroftkarp
hopcroftkarp/__init__.py
HopcroftKarp.__dfs
def __dfs(self, v, index, layers): """ we recursively run dfs on each vertices in free_vertex, :param v: vertices in free_vertex :return: True if P is not empty (i.e., the maximal set of vertex-disjoint alternating path of length k) and false otherwise. """ if index == 0: path = [v] while self._dfs_parent[v] != v: path.append(self._dfs_parent[v]) v = self._dfs_parent[v] self._dfs_paths.append(path) return True for neighbour in self._graph[v]: # check the neighbours of vertex if neighbour in layers[index - 1]: # if neighbour is in left, we are traversing unmatched edges.. if neighbour in self._dfs_parent: continue if (neighbour in self._left and (v not in self._matching or neighbour != self._matching[v])) or \ (neighbour in self._right and (v in self._matching and neighbour == self._matching[v])): self._dfs_parent[neighbour] = v if self.__dfs(neighbour, index-1, layers): return True return False
python
def __dfs(self, v, index, layers): """ we recursively run dfs on each vertices in free_vertex, :param v: vertices in free_vertex :return: True if P is not empty (i.e., the maximal set of vertex-disjoint alternating path of length k) and false otherwise. """ if index == 0: path = [v] while self._dfs_parent[v] != v: path.append(self._dfs_parent[v]) v = self._dfs_parent[v] self._dfs_paths.append(path) return True for neighbour in self._graph[v]: # check the neighbours of vertex if neighbour in layers[index - 1]: # if neighbour is in left, we are traversing unmatched edges.. if neighbour in self._dfs_parent: continue if (neighbour in self._left and (v not in self._matching or neighbour != self._matching[v])) or \ (neighbour in self._right and (v in self._matching and neighbour == self._matching[v])): self._dfs_parent[neighbour] = v if self.__dfs(neighbour, index-1, layers): return True return False
[ "def", "__dfs", "(", "self", ",", "v", ",", "index", ",", "layers", ")", ":", "if", "index", "==", "0", ":", "path", "=", "[", "v", "]", "while", "self", ".", "_dfs_parent", "[", "v", "]", "!=", "v", ":", "path", ".", "append", "(", "self", ".", "_dfs_parent", "[", "v", "]", ")", "v", "=", "self", ".", "_dfs_parent", "[", "v", "]", "self", ".", "_dfs_paths", ".", "append", "(", "path", ")", "return", "True", "for", "neighbour", "in", "self", ".", "_graph", "[", "v", "]", ":", "# check the neighbours of vertex", "if", "neighbour", "in", "layers", "[", "index", "-", "1", "]", ":", "# if neighbour is in left, we are traversing unmatched edges..", "if", "neighbour", "in", "self", ".", "_dfs_parent", ":", "continue", "if", "(", "neighbour", "in", "self", ".", "_left", "and", "(", "v", "not", "in", "self", ".", "_matching", "or", "neighbour", "!=", "self", ".", "_matching", "[", "v", "]", ")", ")", "or", "(", "neighbour", "in", "self", ".", "_right", "and", "(", "v", "in", "self", ".", "_matching", "and", "neighbour", "==", "self", ".", "_matching", "[", "v", "]", ")", ")", ":", "self", ".", "_dfs_parent", "[", "neighbour", "]", "=", "v", "if", "self", ".", "__dfs", "(", "neighbour", ",", "index", "-", "1", ",", "layers", ")", ":", "return", "True", "return", "False" ]
we recursively run dfs on each vertices in free_vertex, :param v: vertices in free_vertex :return: True if P is not empty (i.e., the maximal set of vertex-disjoint alternating path of length k) and false otherwise.
[ "we", "recursively", "run", "dfs", "on", "each", "vertices", "in", "free_vertex" ]
5e6cf4f95702304847307a07d369f8041edff8c9
https://github.com/sofiatolaosebikan/hopcroftkarp/blob/5e6cf4f95702304847307a07d369f8041edff8c9/hopcroftkarp/__init__.py#L84-L109
train
tehmaze/parser
parser/base.py
Parser.method
def method(self, symbol): ''' Symbol decorator. ''' assert issubclass(symbol, SymbolBase) def wrapped(fn): setattr(symbol, fn.__name__, fn) return wrapped
python
def method(self, symbol): ''' Symbol decorator. ''' assert issubclass(symbol, SymbolBase) def wrapped(fn): setattr(symbol, fn.__name__, fn) return wrapped
[ "def", "method", "(", "self", ",", "symbol", ")", ":", "assert", "issubclass", "(", "symbol", ",", "SymbolBase", ")", "def", "wrapped", "(", "fn", ")", ":", "setattr", "(", "symbol", ",", "fn", ".", "__name__", ",", "fn", ")", "return", "wrapped" ]
Symbol decorator.
[ "Symbol", "decorator", "." ]
ccc69236304b2f00671f14c62433e8830b838101
https://github.com/tehmaze/parser/blob/ccc69236304b2f00671f14c62433e8830b838101/parser/base.py#L69-L76
train
antoniobotelho/py-business-calendar
business_calendar/business_calendar.py
_simpleparsefun
def _simpleparsefun(date): """Simple date parsing function""" if hasattr(date, 'year'): return date try: date = datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError: date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') return date
python
def _simpleparsefun(date): """Simple date parsing function""" if hasattr(date, 'year'): return date try: date = datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError: date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') return date
[ "def", "_simpleparsefun", "(", "date", ")", ":", "if", "hasattr", "(", "date", ",", "'year'", ")", ":", "return", "date", "try", ":", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "'%Y-%m-%d'", ")", "except", "ValueError", ":", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "'%Y-%m-%d %H:%M:%S'", ")", "return", "date" ]
Simple date parsing function
[ "Simple", "date", "parsing", "function" ]
92365fbddd043e41e33b01f1ddd9dd6a5094c031
https://github.com/antoniobotelho/py-business-calendar/blob/92365fbddd043e41e33b01f1ddd9dd6a5094c031/business_calendar/business_calendar.py#L63-L71
train
nephila/django-knocker
knocker/mixins.py
KnockerModel._connect
def _connect(cls): """ Connect signal to current model """ post_save.connect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
python
def _connect(cls): """ Connect signal to current model """ post_save.connect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
[ "def", "_connect", "(", "cls", ")", ":", "post_save", ".", "connect", "(", "notify_items", ",", "sender", "=", "cls", ",", "dispatch_uid", "=", "'knocker_{0}'", ".", "format", "(", "cls", ".", "__name__", ")", ")" ]
Connect signal to current model
[ "Connect", "signal", "to", "current", "model" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/mixins.py#L31-L38
train
nephila/django-knocker
knocker/mixins.py
KnockerModel._disconnect
def _disconnect(cls): """ Disconnect signal from current model """ post_save.disconnect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
python
def _disconnect(cls): """ Disconnect signal from current model """ post_save.disconnect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
[ "def", "_disconnect", "(", "cls", ")", ":", "post_save", ".", "disconnect", "(", "notify_items", ",", "sender", "=", "cls", ",", "dispatch_uid", "=", "'knocker_{0}'", ".", "format", "(", "cls", ".", "__name__", ")", ")" ]
Disconnect signal from current model
[ "Disconnect", "signal", "from", "current", "model" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/mixins.py#L41-L48
train
nephila/django-knocker
knocker/mixins.py
KnockerModel.as_knock
def as_knock(self, created=False): """ Returns a dictionary with the knock data built from _knocker_data """ knock = {} if self.should_knock(created): for field, data in self._retrieve_data(None, self._knocker_data): knock[field] = data return knock
python
def as_knock(self, created=False): """ Returns a dictionary with the knock data built from _knocker_data """ knock = {} if self.should_knock(created): for field, data in self._retrieve_data(None, self._knocker_data): knock[field] = data return knock
[ "def", "as_knock", "(", "self", ",", "created", "=", "False", ")", ":", "knock", "=", "{", "}", "if", "self", ".", "should_knock", "(", "created", ")", ":", "for", "field", ",", "data", "in", "self", ".", "_retrieve_data", "(", "None", ",", "self", ".", "_knocker_data", ")", ":", "knock", "[", "field", "]", "=", "data", "return", "knock" ]
Returns a dictionary with the knock data built from _knocker_data
[ "Returns", "a", "dictionary", "with", "the", "knock", "data", "built", "from", "_knocker_data" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/mixins.py#L97-L105
train
nephila/django-knocker
knocker/mixins.py
KnockerModel.send_knock
def send_knock(self, created=False): """ Send the knock in the associated channels Group """ knock = self.as_knock(created) if knock: gr = Group('knocker-{0}'.format(knock['language'])) gr.send({'text': json.dumps(knock)})
python
def send_knock(self, created=False): """ Send the knock in the associated channels Group """ knock = self.as_knock(created) if knock: gr = Group('knocker-{0}'.format(knock['language'])) gr.send({'text': json.dumps(knock)})
[ "def", "send_knock", "(", "self", ",", "created", "=", "False", ")", ":", "knock", "=", "self", ".", "as_knock", "(", "created", ")", "if", "knock", ":", "gr", "=", "Group", "(", "'knocker-{0}'", ".", "format", "(", "knock", "[", "'language'", "]", ")", ")", "gr", ".", "send", "(", "{", "'text'", ":", "json", ".", "dumps", "(", "knock", ")", "}", ")" ]
Send the knock in the associated channels Group
[ "Send", "the", "knock", "in", "the", "associated", "channels", "Group" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/mixins.py#L107-L114
train
ryukinix/decorating
decorating/color.py
colorize
def colorize(printable, color, style='normal', autoreset=True): """Colorize some message with ANSI colors specification :param printable: interface whose has __str__ or __repr__ method :param color: the colors defined in COLOR_MAP to colorize the text :style: can be 'normal', 'bold' or 'underline' :returns: the 'printable' colorized with style """ if not COLORED: # disable color return printable if color not in COLOR_MAP: raise RuntimeError('invalid color set, no {}'.format(color)) return '{color}{printable}{reset}'.format( printable=printable, color=COLOR_MAP[color].format(style=STYLE_MAP[style]), reset=COLOR_MAP['reset'] if autoreset else '' )
python
def colorize(printable, color, style='normal', autoreset=True): """Colorize some message with ANSI colors specification :param printable: interface whose has __str__ or __repr__ method :param color: the colors defined in COLOR_MAP to colorize the text :style: can be 'normal', 'bold' or 'underline' :returns: the 'printable' colorized with style """ if not COLORED: # disable color return printable if color not in COLOR_MAP: raise RuntimeError('invalid color set, no {}'.format(color)) return '{color}{printable}{reset}'.format( printable=printable, color=COLOR_MAP[color].format(style=STYLE_MAP[style]), reset=COLOR_MAP['reset'] if autoreset else '' )
[ "def", "colorize", "(", "printable", ",", "color", ",", "style", "=", "'normal'", ",", "autoreset", "=", "True", ")", ":", "if", "not", "COLORED", ":", "# disable color", "return", "printable", "if", "color", "not", "in", "COLOR_MAP", ":", "raise", "RuntimeError", "(", "'invalid color set, no {}'", ".", "format", "(", "color", ")", ")", "return", "'{color}{printable}{reset}'", ".", "format", "(", "printable", "=", "printable", ",", "color", "=", "COLOR_MAP", "[", "color", "]", ".", "format", "(", "style", "=", "STYLE_MAP", "[", "style", "]", ")", ",", "reset", "=", "COLOR_MAP", "[", "'reset'", "]", "if", "autoreset", "else", "''", ")" ]
Colorize some message with ANSI colors specification :param printable: interface whose has __str__ or __repr__ method :param color: the colors defined in COLOR_MAP to colorize the text :style: can be 'normal', 'bold' or 'underline' :returns: the 'printable' colorized with style
[ "Colorize", "some", "message", "with", "ANSI", "colors", "specification" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/color.py#L45-L63
train
FortyNorthSecurity/Hasher
hashes/common/helpers.py
color
def color(string, status=True, warning=False, bold=True): """ Change text color for the linux terminal, defaults to green. Set "warning=True" for red. """ attr = [] if status: # green attr.append('32') if warning: # red attr.append('31') if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
python
def color(string, status=True, warning=False, bold=True): """ Change text color for the linux terminal, defaults to green. Set "warning=True" for red. """ attr = [] if status: # green attr.append('32') if warning: # red attr.append('31') if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
[ "def", "color", "(", "string", ",", "status", "=", "True", ",", "warning", "=", "False", ",", "bold", "=", "True", ")", ":", "attr", "=", "[", "]", "if", "status", ":", "# green", "attr", ".", "append", "(", "'32'", ")", "if", "warning", ":", "# red", "attr", ".", "append", "(", "'31'", ")", "if", "bold", ":", "attr", ".", "append", "(", "'1'", ")", "return", "'\\x1b[%sm%s\\x1b[0m'", "%", "(", "';'", ".", "join", "(", "attr", ")", ",", "string", ")" ]
Change text color for the linux terminal, defaults to green. Set "warning=True" for red.
[ "Change", "text", "color", "for", "the", "linux", "terminal", "defaults", "to", "green", ".", "Set", "warning", "=", "True", "for", "red", "." ]
40173c56b36680ab1ddc57a9c13c36b3a1ec51c3
https://github.com/FortyNorthSecurity/Hasher/blob/40173c56b36680ab1ddc57a9c13c36b3a1ec51c3/hashes/common/helpers.py#L10-L24
train
marrow/mongo
marrow/mongo/util/capped.py
_patch
def _patch(): """Patch pymongo's Collection object to add a tail method. While not nessicarily recommended, you can use this to inject `tail` as a method into Collection, making it generally accessible. """ if not __debug__: # pragma: no cover import warnings warnings.warn("A catgirl has died.", ImportWarning) from pymongo.collection import Collection Collection.tail = tail
python
def _patch(): """Patch pymongo's Collection object to add a tail method. While not nessicarily recommended, you can use this to inject `tail` as a method into Collection, making it generally accessible. """ if not __debug__: # pragma: no cover import warnings warnings.warn("A catgirl has died.", ImportWarning) from pymongo.collection import Collection Collection.tail = tail
[ "def", "_patch", "(", ")", ":", "if", "not", "__debug__", ":", "# pragma: no cover", "import", "warnings", "warnings", ".", "warn", "(", "\"A catgirl has died.\"", ",", "ImportWarning", ")", "from", "pymongo", ".", "collection", "import", "Collection", "Collection", ".", "tail", "=", "tail" ]
Patch pymongo's Collection object to add a tail method. While not nessicarily recommended, you can use this to inject `tail` as a method into Collection, making it generally accessible.
[ "Patch", "pymongo", "s", "Collection", "object", "to", "add", "a", "tail", "method", ".", "While", "not", "nessicarily", "recommended", "you", "can", "use", "this", "to", "inject", "tail", "as", "a", "method", "into", "Collection", "making", "it", "generally", "accessible", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/util/capped.py#L49-L61
train
marrow/mongo
marrow/mongo/core/trait/queryable.py
Queryable._prepare_find
def _prepare_find(cls, *args, **kw): """Execute a find and return the resulting queryset using combined plain and parametric query generation. Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring. """ cls, collection, query, options = cls._prepare_query( cls.FIND_MAPPING, cls.FIND_OPTIONS, *args, **kw ) if 'await' in options: raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.") if 'cursor_type' in options and {'tail', 'wait'} & set(options): raise TypeError("Can not combine cursor_type and tail/wait arguments.") elif options.pop('tail', False): options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE elif 'wait' in options: raise TypeError("Wait option only applies to tailing cursors.") modifiers = options.get('modifiers', dict()) if 'max_time_ms' in options: modifiers['$maxTimeMS'] = options.pop('max_time_ms') if modifiers: options['modifiers'] = modifiers return cls, collection, query, options
python
def _prepare_find(cls, *args, **kw): """Execute a find and return the resulting queryset using combined plain and parametric query generation. Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring. """ cls, collection, query, options = cls._prepare_query( cls.FIND_MAPPING, cls.FIND_OPTIONS, *args, **kw ) if 'await' in options: raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.") if 'cursor_type' in options and {'tail', 'wait'} & set(options): raise TypeError("Can not combine cursor_type and tail/wait arguments.") elif options.pop('tail', False): options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE elif 'wait' in options: raise TypeError("Wait option only applies to tailing cursors.") modifiers = options.get('modifiers', dict()) if 'max_time_ms' in options: modifiers['$maxTimeMS'] = options.pop('max_time_ms') if modifiers: options['modifiers'] = modifiers return cls, collection, query, options
[ "def", "_prepare_find", "(", "cls", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "cls", ",", "collection", ",", "query", ",", "options", "=", "cls", ".", "_prepare_query", "(", "cls", ".", "FIND_MAPPING", ",", "cls", ".", "FIND_OPTIONS", ",", "*", "args", ",", "*", "*", "kw", ")", "if", "'await'", "in", "options", ":", "raise", "TypeError", "(", "\"Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.\"", ")", "if", "'cursor_type'", "in", "options", "and", "{", "'tail'", ",", "'wait'", "}", "&", "set", "(", "options", ")", ":", "raise", "TypeError", "(", "\"Can not combine cursor_type and tail/wait arguments.\"", ")", "elif", "options", ".", "pop", "(", "'tail'", ",", "False", ")", ":", "options", "[", "'cursor_type'", "]", "=", "CursorType", ".", "TAILABLE_AWAIT", "if", "options", ".", "pop", "(", "'wait'", ",", "True", ")", "else", "CursorType", ".", "TAILABLE", "elif", "'wait'", "in", "options", ":", "raise", "TypeError", "(", "\"Wait option only applies to tailing cursors.\"", ")", "modifiers", "=", "options", ".", "get", "(", "'modifiers'", ",", "dict", "(", ")", ")", "if", "'max_time_ms'", "in", "options", ":", "modifiers", "[", "'$maxTimeMS'", "]", "=", "options", ".", "pop", "(", "'max_time_ms'", ")", "if", "modifiers", ":", "options", "[", "'modifiers'", "]", "=", "modifiers", "return", "cls", ",", "collection", ",", "query", ",", "options" ]
Execute a find and return the resulting queryset using combined plain and parametric query generation. Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
[ "Execute", "a", "find", "and", "return", "the", "resulting", "queryset", "using", "combined", "plain", "and", "parametric", "query", "generation", ".", "Additionally", "performs", "argument", "case", "normalization", "refer", "to", "the", "_prepare_query", "method", "s", "docstring", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/queryable.py#L113-L146
train
marrow/mongo
marrow/mongo/core/trait/queryable.py
Queryable.reload
def reload(self, *fields, **kw): """Reload the entire document from the database, or refresh specific named top-level fields.""" Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw) result = collection.find_one(query, **options) if fields: # Refresh only the requested data. for k in result: # TODO: Better merge algorithm. if k == ~Doc.id: continue self.__data__[k] = result[k] else: self.__data__ = result return self
python
def reload(self, *fields, **kw): """Reload the entire document from the database, or refresh specific named top-level fields.""" Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw) result = collection.find_one(query, **options) if fields: # Refresh only the requested data. for k in result: # TODO: Better merge algorithm. if k == ~Doc.id: continue self.__data__[k] = result[k] else: self.__data__ = result return self
[ "def", "reload", "(", "self", ",", "*", "fields", ",", "*", "*", "kw", ")", ":", "Doc", ",", "collection", ",", "query", ",", "options", "=", "self", ".", "_prepare_find", "(", "id", "=", "self", ".", "id", ",", "projection", "=", "fields", ",", "*", "*", "kw", ")", "result", "=", "collection", ".", "find_one", "(", "query", ",", "*", "*", "options", ")", "if", "fields", ":", "# Refresh only the requested data.", "for", "k", "in", "result", ":", "# TODO: Better merge algorithm.", "if", "k", "==", "~", "Doc", ".", "id", ":", "continue", "self", ".", "__data__", "[", "k", "]", "=", "result", "[", "k", "]", "else", ":", "self", ".", "__data__", "=", "result", "return", "self" ]
Reload the entire document from the database, or refresh specific named top-level fields.
[ "Reload", "the", "entire", "document", "from", "the", "database", "or", "refresh", "specific", "named", "top", "-", "level", "fields", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/queryable.py#L271-L284
train
andrasmaroy/pconf
pconf/pconf.py
Pconf.get
def get(cls): """Get values gathered from the previously set hierarchy. Respects the order in which sources are set, the first source set has the highest priority, overrides values with the same key that exist in sources with lower priority. Returns: dict: The dictionary containing values gathered from all set sources. """ results = {} hierarchy = cls.__hierarchy hierarchy.reverse() for storeMethod in hierarchy: cls.merger.merge(results, storeMethod.get()) return results
python
def get(cls): """Get values gathered from the previously set hierarchy. Respects the order in which sources are set, the first source set has the highest priority, overrides values with the same key that exist in sources with lower priority. Returns: dict: The dictionary containing values gathered from all set sources. """ results = {} hierarchy = cls.__hierarchy hierarchy.reverse() for storeMethod in hierarchy: cls.merger.merge(results, storeMethod.get()) return results
[ "def", "get", "(", "cls", ")", ":", "results", "=", "{", "}", "hierarchy", "=", "cls", ".", "__hierarchy", "hierarchy", ".", "reverse", "(", ")", "for", "storeMethod", "in", "hierarchy", ":", "cls", ".", "merger", ".", "merge", "(", "results", ",", "storeMethod", ".", "get", "(", ")", ")", "return", "results" ]
Get values gathered from the previously set hierarchy. Respects the order in which sources are set, the first source set has the highest priority, overrides values with the same key that exist in sources with lower priority. Returns: dict: The dictionary containing values gathered from all set sources.
[ "Get", "values", "gathered", "from", "the", "previously", "set", "hierarchy", "." ]
1f930bf4e88bf8b4732fcc95557c66f3608b8821
https://github.com/andrasmaroy/pconf/blob/1f930bf4e88bf8b4732fcc95557c66f3608b8821/pconf/pconf.py#L27-L45
train
andrasmaroy/pconf
pconf/pconf.py
Pconf.argv
def argv(cls, name, short_name=None, type=None, help=None): """ Set command line arguments as a source Parses the command line arguments described by the parameters. Args: name: the long name of the argument (foo) short_name: the optional short name of the argument (f) type: the optional type of the argument, defaults to bool help: the optional help text for the argument """ cls.__hierarchy.append(argv.Argv(name, short_name, type, help))
python
def argv(cls, name, short_name=None, type=None, help=None): """ Set command line arguments as a source Parses the command line arguments described by the parameters. Args: name: the long name of the argument (foo) short_name: the optional short name of the argument (f) type: the optional type of the argument, defaults to bool help: the optional help text for the argument """ cls.__hierarchy.append(argv.Argv(name, short_name, type, help))
[ "def", "argv", "(", "cls", ",", "name", ",", "short_name", "=", "None", ",", "type", "=", "None", ",", "help", "=", "None", ")", ":", "cls", ".", "__hierarchy", ".", "append", "(", "argv", ".", "Argv", "(", "name", ",", "short_name", ",", "type", ",", "help", ")", ")" ]
Set command line arguments as a source Parses the command line arguments described by the parameters. Args: name: the long name of the argument (foo) short_name: the optional short name of the argument (f) type: the optional type of the argument, defaults to bool help: the optional help text for the argument
[ "Set", "command", "line", "arguments", "as", "a", "source" ]
1f930bf4e88bf8b4732fcc95557c66f3608b8821
https://github.com/andrasmaroy/pconf/blob/1f930bf4e88bf8b4732fcc95557c66f3608b8821/pconf/pconf.py#L72-L83
train
andrasmaroy/pconf
pconf/pconf.py
Pconf.env
def env(cls, separator=None, match=None, whitelist=None, parse_values=None, to_lower=None, convert_underscores=None): """Set environment variables as a source. By default all environment variables available to the process are used. This can be narrowed by the args. Args: separator: Keys are split along this character, the resulting splits are considered nested values. match: Regular expression for key matching. Keys matching the expression are considered whitelisted. whitelist: Only use environment variables that are listed in this list. parse_values: Try to parse all variable for well-known types. to_lower: Convert all variable names to lower case. convert_underscores: Convert all underscores in the name to dashes, this takes place after separation via the separator option. """ cls.__hierarchy.append(env.Env(separator, match, whitelist, parse_values, to_lower, convert_underscores))
python
def env(cls, separator=None, match=None, whitelist=None, parse_values=None, to_lower=None, convert_underscores=None): """Set environment variables as a source. By default all environment variables available to the process are used. This can be narrowed by the args. Args: separator: Keys are split along this character, the resulting splits are considered nested values. match: Regular expression for key matching. Keys matching the expression are considered whitelisted. whitelist: Only use environment variables that are listed in this list. parse_values: Try to parse all variable for well-known types. to_lower: Convert all variable names to lower case. convert_underscores: Convert all underscores in the name to dashes, this takes place after separation via the separator option. """ cls.__hierarchy.append(env.Env(separator, match, whitelist, parse_values, to_lower, convert_underscores))
[ "def", "env", "(", "cls", ",", "separator", "=", "None", ",", "match", "=", "None", ",", "whitelist", "=", "None", ",", "parse_values", "=", "None", ",", "to_lower", "=", "None", ",", "convert_underscores", "=", "None", ")", ":", "cls", ".", "__hierarchy", ".", "append", "(", "env", ".", "Env", "(", "separator", ",", "match", ",", "whitelist", ",", "parse_values", ",", "to_lower", ",", "convert_underscores", ")", ")" ]
Set environment variables as a source. By default all environment variables available to the process are used. This can be narrowed by the args. Args: separator: Keys are split along this character, the resulting splits are considered nested values. match: Regular expression for key matching. Keys matching the expression are considered whitelisted. whitelist: Only use environment variables that are listed in this list. parse_values: Try to parse all variable for well-known types. to_lower: Convert all variable names to lower case. convert_underscores: Convert all underscores in the name to dashes, this takes place after separation via the separator option.
[ "Set", "environment", "variables", "as", "a", "source", "." ]
1f930bf4e88bf8b4732fcc95557c66f3608b8821
https://github.com/andrasmaroy/pconf/blob/1f930bf4e88bf8b4732fcc95557c66f3608b8821/pconf/pconf.py#L86-L104
train
andrasmaroy/pconf
pconf/pconf.py
Pconf.file
def file(cls, path, encoding=None, parser=None): """Set a file as a source. File are parsed as literal python dicts by default, this behaviour can be configured. Args: path: The path to the file to be parsed encoding: The encoding of the file. Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'. Custom value can be used in conjunction with parser. parser: A parser function for a custom encoder. It is expected to return a dict containing the parsed values when called with the contents of the file as an argument. """ cls.__hierarchy.append(file.File(path, encoding, parser))
python
def file(cls, path, encoding=None, parser=None): """Set a file as a source. File are parsed as literal python dicts by default, this behaviour can be configured. Args: path: The path to the file to be parsed encoding: The encoding of the file. Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'. Custom value can be used in conjunction with parser. parser: A parser function for a custom encoder. It is expected to return a dict containing the parsed values when called with the contents of the file as an argument. """ cls.__hierarchy.append(file.File(path, encoding, parser))
[ "def", "file", "(", "cls", ",", "path", ",", "encoding", "=", "None", ",", "parser", "=", "None", ")", ":", "cls", ".", "__hierarchy", ".", "append", "(", "file", ".", "File", "(", "path", ",", "encoding", ",", "parser", ")", ")" ]
Set a file as a source. File are parsed as literal python dicts by default, this behaviour can be configured. Args: path: The path to the file to be parsed encoding: The encoding of the file. Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'. Custom value can be used in conjunction with parser. parser: A parser function for a custom encoder. It is expected to return a dict containing the parsed values when called with the contents of the file as an argument.
[ "Set", "a", "file", "as", "a", "source", "." ]
1f930bf4e88bf8b4732fcc95557c66f3608b8821
https://github.com/andrasmaroy/pconf/blob/1f930bf4e88bf8b4732fcc95557c66f3608b8821/pconf/pconf.py#L107-L122
train
marrow/mongo
marrow/mongo/param/project.py
P
def P(Document, *fields, **kw): """Generate a MongoDB projection dictionary using the Django ORM style.""" __always__ = kw.pop('__always__', set()) projected = set() omitted = set() for field in fields: if field[0] in ('-', '!'): omitted.add(field[1:]) elif field[0] == '+': projected.add(field[1:]) else: projected.add(field) if not projected: # We only have exclusions from the default projection. names = set(getattr(Document, '__projection__', Document.__fields__) or Document.__fields__) projected = {name for name in (names - omitted)} projected |= __always__ if not projected: projected = {'_id'} return {unicode(traverse(Document, name, name)): True for name in projected}
python
def P(Document, *fields, **kw): """Generate a MongoDB projection dictionary using the Django ORM style.""" __always__ = kw.pop('__always__', set()) projected = set() omitted = set() for field in fields: if field[0] in ('-', '!'): omitted.add(field[1:]) elif field[0] == '+': projected.add(field[1:]) else: projected.add(field) if not projected: # We only have exclusions from the default projection. names = set(getattr(Document, '__projection__', Document.__fields__) or Document.__fields__) projected = {name for name in (names - omitted)} projected |= __always__ if not projected: projected = {'_id'} return {unicode(traverse(Document, name, name)): True for name in projected}
[ "def", "P", "(", "Document", ",", "*", "fields", ",", "*", "*", "kw", ")", ":", "__always__", "=", "kw", ".", "pop", "(", "'__always__'", ",", "set", "(", ")", ")", "projected", "=", "set", "(", ")", "omitted", "=", "set", "(", ")", "for", "field", "in", "fields", ":", "if", "field", "[", "0", "]", "in", "(", "'-'", ",", "'!'", ")", ":", "omitted", ".", "add", "(", "field", "[", "1", ":", "]", ")", "elif", "field", "[", "0", "]", "==", "'+'", ":", "projected", ".", "add", "(", "field", "[", "1", ":", "]", ")", "else", ":", "projected", ".", "add", "(", "field", ")", "if", "not", "projected", ":", "# We only have exclusions from the default projection.", "names", "=", "set", "(", "getattr", "(", "Document", ",", "'__projection__'", ",", "Document", ".", "__fields__", ")", "or", "Document", ".", "__fields__", ")", "projected", "=", "{", "name", "for", "name", "in", "(", "names", "-", "omitted", ")", "}", "projected", "|=", "__always__", "if", "not", "projected", ":", "projected", "=", "{", "'_id'", "}", "return", "{", "unicode", "(", "traverse", "(", "Document", ",", "name", ",", "name", ")", ")", ":", "True", "for", "name", "in", "projected", "}" ]
Generate a MongoDB projection dictionary using the Django ORM style.
[ "Generate", "a", "MongoDB", "projection", "dictionary", "using", "the", "Django", "ORM", "style", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/param/project.py#L11-L35
train
marrow/mongo
web/session/mongo.py
MongoSession.is_valid
def is_valid(self, context, sid): """Identify if the given session ID is currently valid. Return True if valid, False if explicitly invalid, None if unknown. """ record = self._Document.find_one(sid, project=('expires', )) if not record: return return not record._expired
python
def is_valid(self, context, sid): """Identify if the given session ID is currently valid. Return True if valid, False if explicitly invalid, None if unknown. """ record = self._Document.find_one(sid, project=('expires', )) if not record: return return not record._expired
[ "def", "is_valid", "(", "self", ",", "context", ",", "sid", ")", ":", "record", "=", "self", ".", "_Document", ".", "find_one", "(", "sid", ",", "project", "=", "(", "'expires'", ",", ")", ")", "if", "not", "record", ":", "return", "return", "not", "record", ".", "_expired" ]
Identify if the given session ID is currently valid. Return True if valid, False if explicitly invalid, None if unknown.
[ "Identify", "if", "the", "given", "session", "ID", "is", "currently", "valid", ".", "Return", "True", "if", "valid", "False", "if", "explicitly", "invalid", "None", "if", "unknown", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/web/session/mongo.py#L55-L66
train
marrow/mongo
web/session/mongo.py
MongoSession.invalidate
def invalidate(self, context, sid): """Immediately expire a session from the backing store.""" result = self._Document.get_collection().delete_one({'_id': sid}) return result.deleted_count == 1
python
def invalidate(self, context, sid): """Immediately expire a session from the backing store.""" result = self._Document.get_collection().delete_one({'_id': sid}) return result.deleted_count == 1
[ "def", "invalidate", "(", "self", ",", "context", ",", "sid", ")", ":", "result", "=", "self", ".", "_Document", ".", "get_collection", "(", ")", ".", "delete_one", "(", "{", "'_id'", ":", "sid", "}", ")", "return", "result", ".", "deleted_count", "==", "1" ]
Immediately expire a session from the backing store.
[ "Immediately", "expire", "a", "session", "from", "the", "backing", "store", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/web/session/mongo.py#L68-L73
train
marrow/mongo
web/session/mongo.py
MongoSession.persist
def persist(self, context): """Update or insert the session document into the configured collection""" D = self._Document document = context.session[self.name] D.get_collection().replace_one(D.id == document.id, document, True)
python
def persist(self, context): """Update or insert the session document into the configured collection""" D = self._Document document = context.session[self.name] D.get_collection().replace_one(D.id == document.id, document, True)
[ "def", "persist", "(", "self", ",", "context", ")", ":", "D", "=", "self", ".", "_Document", "document", "=", "context", ".", "session", "[", "self", ".", "name", "]", "D", ".", "get_collection", "(", ")", ".", "replace_one", "(", "D", ".", "id", "==", "document", ".", "id", ",", "document", ",", "True", ")" ]
Update or insert the session document into the configured collection
[ "Update", "or", "insert", "the", "session", "document", "into", "the", "configured", "collection" ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/web/session/mongo.py#L92-L98
train
nephila/django-knocker
knocker/consumers.py
ws_connect
def ws_connect(message): """ Channels connection setup. Register the current client on the related Group according to the language """ prefix, language = message['path'].strip('/').split('/') gr = Group('knocker-{0}'.format(language)) gr.add(message.reply_channel) message.channel_session['knocker'] = language message.reply_channel.send({"accept": True})
python
def ws_connect(message): """ Channels connection setup. Register the current client on the related Group according to the language """ prefix, language = message['path'].strip('/').split('/') gr = Group('knocker-{0}'.format(language)) gr.add(message.reply_channel) message.channel_session['knocker'] = language message.reply_channel.send({"accept": True})
[ "def", "ws_connect", "(", "message", ")", ":", "prefix", ",", "language", "=", "message", "[", "'path'", "]", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "gr", "=", "Group", "(", "'knocker-{0}'", ".", "format", "(", "language", ")", ")", "gr", ".", "add", "(", "message", ".", "reply_channel", ")", "message", ".", "channel_session", "[", "'knocker'", "]", "=", "language", "message", ".", "reply_channel", ".", "send", "(", "{", "\"accept\"", ":", "True", "}", ")" ]
Channels connection setup. Register the current client on the related Group according to the language
[ "Channels", "connection", "setup", ".", "Register", "the", "current", "client", "on", "the", "related", "Group", "according", "to", "the", "language" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/consumers.py#L9-L18
train
nephila/django-knocker
knocker/consumers.py
ws_disconnect
def ws_disconnect(message): """ Channels connection close. Deregister the client """ language = message.channel_session['knocker'] gr = Group('knocker-{0}'.format(language)) gr.discard(message.reply_channel)
python
def ws_disconnect(message): """ Channels connection close. Deregister the client """ language = message.channel_session['knocker'] gr = Group('knocker-{0}'.format(language)) gr.discard(message.reply_channel)
[ "def", "ws_disconnect", "(", "message", ")", ":", "language", "=", "message", ".", "channel_session", "[", "'knocker'", "]", "gr", "=", "Group", "(", "'knocker-{0}'", ".", "format", "(", "language", ")", ")", "gr", ".", "discard", "(", "message", ".", "reply_channel", ")" ]
Channels connection close. Deregister the client
[ "Channels", "connection", "close", ".", "Deregister", "the", "client" ]
d25380d43a1f91285f1581dcf9db8510fe87f354
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/consumers.py#L30-L37
train
ryukinix/decorating
decorating/animation.py
AnimatedDecorator.start
def start(self, autopush=True): """Start a new animation instance""" if self.enabled: if autopush: self.push_message(self.message) self.spinner.message = ' - '.join(self.animation.messages) if not self.spinner.running: self.animation.thread = threading.Thread(target=_spinner, args=(self.spinner,)) self.spinner.running = True self.animation.thread.start() sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
python
def start(self, autopush=True): """Start a new animation instance""" if self.enabled: if autopush: self.push_message(self.message) self.spinner.message = ' - '.join(self.animation.messages) if not self.spinner.running: self.animation.thread = threading.Thread(target=_spinner, args=(self.spinner,)) self.spinner.running = True self.animation.thread.start() sys.stdout = stream.Clean(sys.stdout, self.spinner.stream)
[ "def", "start", "(", "self", ",", "autopush", "=", "True", ")", ":", "if", "self", ".", "enabled", ":", "if", "autopush", ":", "self", ".", "push_message", "(", "self", ".", "message", ")", "self", ".", "spinner", ".", "message", "=", "' - '", ".", "join", "(", "self", ".", "animation", ".", "messages", ")", "if", "not", "self", ".", "spinner", ".", "running", ":", "self", ".", "animation", ".", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "_spinner", ",", "args", "=", "(", "self", ".", "spinner", ",", ")", ")", "self", ".", "spinner", ".", "running", "=", "True", "self", ".", "animation", ".", "thread", ".", "start", "(", ")", "sys", ".", "stdout", "=", "stream", ".", "Clean", "(", "sys", ".", "stdout", ",", "self", ".", "spinner", ".", "stream", ")" ]
Start a new animation instance
[ "Start", "a", "new", "animation", "instance" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L235-L246
train
ryukinix/decorating
decorating/animation.py
AnimatedDecorator.stop
def stop(cls): """Stop the thread animation gracefully and reset_message""" if AnimatedDecorator._enabled: if cls.spinner.running: cls.spinner.running = False cls.animation.thread.join() if any(cls.animation.messages): cls.pop_message() sys.stdout = sys.__stdout__
python
def stop(cls): """Stop the thread animation gracefully and reset_message""" if AnimatedDecorator._enabled: if cls.spinner.running: cls.spinner.running = False cls.animation.thread.join() if any(cls.animation.messages): cls.pop_message() sys.stdout = sys.__stdout__
[ "def", "stop", "(", "cls", ")", ":", "if", "AnimatedDecorator", ".", "_enabled", ":", "if", "cls", ".", "spinner", ".", "running", ":", "cls", ".", "spinner", ".", "running", "=", "False", "cls", ".", "animation", ".", "thread", ".", "join", "(", ")", "if", "any", "(", "cls", ".", "animation", ".", "messages", ")", ":", "cls", ".", "pop_message", "(", ")", "sys", ".", "stdout", "=", "sys", ".", "__stdout__" ]
Stop the thread animation gracefully and reset_message
[ "Stop", "the", "thread", "animation", "gracefully", "and", "reset_message" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L249-L259
train
ryukinix/decorating
decorating/animation.py
AnimatedDecorator.auto_message
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and not self.message: return args[0].__name__ elif not self.message: return self.default_message else: return self.message
python
def auto_message(self, args): """Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already """ if any(args) and callable(args[0]) and not self.message: return args[0].__name__ elif not self.message: return self.default_message else: return self.message
[ "def", "auto_message", "(", "self", ",", "args", ")", ":", "if", "any", "(", "args", ")", "and", "callable", "(", "args", "[", "0", "]", ")", "and", "not", "self", ".", "message", ":", "return", "args", "[", "0", "]", ".", "__name__", "elif", "not", "self", ".", "message", ":", "return", "self", ".", "default_message", "else", ":", "return", "self", ".", "message" ]
Try guess the message by the args passed args: a set of args passed on the wrapper __call__ in the definition above. if the object already have some message (defined in __init__), we don't change that. If the first arg is a function, so is decorated without argument, use the func name as the message. If not self.message anyway, use the default_message global, another else use the default self.message already
[ "Try", "guess", "the", "message", "by", "the", "args", "passed" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L296-L315
train
ryukinix/decorating
decorating/animation.py
WritingDecorator.start
def start(self): """Activate the TypingStream on stdout""" self.streams.append(sys.stdout) sys.stdout = self.stream
python
def start(self): """Activate the TypingStream on stdout""" self.streams.append(sys.stdout) sys.stdout = self.stream
[ "def", "start", "(", "self", ")", ":", "self", ".", "streams", ".", "append", "(", "sys", ".", "stdout", ")", "sys", ".", "stdout", "=", "self", ".", "stream" ]
Activate the TypingStream on stdout
[ "Activate", "the", "TypingStream", "on", "stdout" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L353-L356
train
ryukinix/decorating
decorating/animation.py
WritingDecorator.stop
def stop(cls): """Change back the normal stdout after the end""" if any(cls.streams): sys.stdout = cls.streams.pop(-1) else: sys.stdout = sys.__stdout__
python
def stop(cls): """Change back the normal stdout after the end""" if any(cls.streams): sys.stdout = cls.streams.pop(-1) else: sys.stdout = sys.__stdout__
[ "def", "stop", "(", "cls", ")", ":", "if", "any", "(", "cls", ".", "streams", ")", ":", "sys", ".", "stdout", "=", "cls", ".", "streams", ".", "pop", "(", "-", "1", ")", "else", ":", "sys", ".", "stdout", "=", "sys", ".", "__stdout__" ]
Change back the normal stdout after the end
[ "Change", "back", "the", "normal", "stdout", "after", "the", "end" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/animation.py#L359-L364
train
marrow/mongo
marrow/mongo/core/trait/lockable.py
Lockable.prolong
def prolong(self): """Prolong the working duration of an already held lock. Attempting to prolong a lock not already owned will result in a Locked exception. """ D = self.__class__ collection = self.get_collection() identity = self.Lock() query = D.id == self query &= D.lock.instance == identity.instance query &= D.lock.time >= (identity.time - identity.__period__) previous = collection.find_one_and_update(query, {'$set': {~D.lock.time: identity.time}}, {~D.lock: True}) if previous is None: lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None) if lock and lock.expires <= identity.time: lock.expired(self) raise self.Locked("Unable to prolong lock.", lock) identity.prolonged(self) return identity
python
def prolong(self): """Prolong the working duration of an already held lock. Attempting to prolong a lock not already owned will result in a Locked exception. """ D = self.__class__ collection = self.get_collection() identity = self.Lock() query = D.id == self query &= D.lock.instance == identity.instance query &= D.lock.time >= (identity.time - identity.__period__) previous = collection.find_one_and_update(query, {'$set': {~D.lock.time: identity.time}}, {~D.lock: True}) if previous is None: lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None) if lock and lock.expires <= identity.time: lock.expired(self) raise self.Locked("Unable to prolong lock.", lock) identity.prolonged(self) return identity
[ "def", "prolong", "(", "self", ")", ":", "D", "=", "self", ".", "__class__", "collection", "=", "self", ".", "get_collection", "(", ")", "identity", "=", "self", ".", "Lock", "(", ")", "query", "=", "D", ".", "id", "==", "self", "query", "&=", "D", ".", "lock", ".", "instance", "==", "identity", ".", "instance", "query", "&=", "D", ".", "lock", ".", "time", ">=", "(", "identity", ".", "time", "-", "identity", ".", "__period__", ")", "previous", "=", "collection", ".", "find_one_and_update", "(", "query", ",", "{", "'$set'", ":", "{", "~", "D", ".", "lock", ".", "time", ":", "identity", ".", "time", "}", "}", ",", "{", "~", "D", ".", "lock", ":", "True", "}", ")", "if", "previous", "is", "None", ":", "lock", "=", "getattr", "(", "self", ".", "find_one", "(", "self", ",", "projection", "=", "{", "~", "D", ".", "lock", ":", "True", "}", ")", ",", "'lock'", ",", "None", ")", "if", "lock", "and", "lock", ".", "expires", "<=", "identity", ".", "time", ":", "lock", ".", "expired", "(", "self", ")", "raise", "self", ".", "Locked", "(", "\"Unable to prolong lock.\"", ",", "lock", ")", "identity", ".", "prolonged", "(", "self", ")", "return", "identity" ]
Prolong the working duration of an already held lock. Attempting to prolong a lock not already owned will result in a Locked exception.
[ "Prolong", "the", "working", "duration", "of", "an", "already", "held", "lock", ".", "Attempting", "to", "prolong", "a", "lock", "not", "already", "owned", "will", "result", "in", "a", "Locked", "exception", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/lockable.py#L245-L271
train
marrow/mongo
marrow/mongo/core/trait/lockable.py
Lockable.release
def release(self, force=False): """Release an exclusive lock on this integration task. Unless forcing, if we are not the current owners of the lock a Locked exception will be raised. """ D = self.__class__ collection = self.get_collection() identity = self.Lock() query = D.id == self if not force: query &= D.lock.instance == identity.instance previous = collection.find_one_and_update(query, {'$unset': {~D.lock: True}}, {~D.lock: True}) if previous is None: lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None) raise self.Locked("Unable to release lock.", lock) lock = self.Lock.from_mongo(previous[~D.lock]) if lock and lock.expires <= identity.time: lock.expired(self) identity.released(self, force)
python
def release(self, force=False): """Release an exclusive lock on this integration task. Unless forcing, if we are not the current owners of the lock a Locked exception will be raised. """ D = self.__class__ collection = self.get_collection() identity = self.Lock() query = D.id == self if not force: query &= D.lock.instance == identity.instance previous = collection.find_one_and_update(query, {'$unset': {~D.lock: True}}, {~D.lock: True}) if previous is None: lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None) raise self.Locked("Unable to release lock.", lock) lock = self.Lock.from_mongo(previous[~D.lock]) if lock and lock.expires <= identity.time: lock.expired(self) identity.released(self, force)
[ "def", "release", "(", "self", ",", "force", "=", "False", ")", ":", "D", "=", "self", ".", "__class__", "collection", "=", "self", ".", "get_collection", "(", ")", "identity", "=", "self", ".", "Lock", "(", ")", "query", "=", "D", ".", "id", "==", "self", "if", "not", "force", ":", "query", "&=", "D", ".", "lock", ".", "instance", "==", "identity", ".", "instance", "previous", "=", "collection", ".", "find_one_and_update", "(", "query", ",", "{", "'$unset'", ":", "{", "~", "D", ".", "lock", ":", "True", "}", "}", ",", "{", "~", "D", ".", "lock", ":", "True", "}", ")", "if", "previous", "is", "None", ":", "lock", "=", "getattr", "(", "self", ".", "find_one", "(", "self", ",", "projection", "=", "{", "~", "D", ".", "lock", ":", "True", "}", ")", ",", "'lock'", ",", "None", ")", "raise", "self", ".", "Locked", "(", "\"Unable to release lock.\"", ",", "lock", ")", "lock", "=", "self", ".", "Lock", ".", "from_mongo", "(", "previous", "[", "~", "D", ".", "lock", "]", ")", "if", "lock", "and", "lock", ".", "expires", "<=", "identity", ".", "time", ":", "lock", ".", "expired", "(", "self", ")", "identity", ".", "released", "(", "self", ",", "force", ")" ]
Release an exclusive lock on this integration task. Unless forcing, if we are not the current owners of the lock a Locked exception will be raised.
[ "Release", "an", "exclusive", "lock", "on", "this", "integration", "task", ".", "Unless", "forcing", "if", "we", "are", "not", "the", "current", "owners", "of", "the", "lock", "a", "Locked", "exception", "will", "be", "raised", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/lockable.py#L273-L299
train
ryukinix/decorating
decorating/stream.py
Animation.write
def write(self, message, autoerase=True): """Send something for stdout and erased after delay""" super(Animation, self).write(message) self.last_message = message if autoerase: time.sleep(self.interval) self.erase(message)
python
def write(self, message, autoerase=True): """Send something for stdout and erased after delay""" super(Animation, self).write(message) self.last_message = message if autoerase: time.sleep(self.interval) self.erase(message)
[ "def", "write", "(", "self", ",", "message", ",", "autoerase", "=", "True", ")", ":", "super", "(", "Animation", ",", "self", ")", ".", "write", "(", "message", ")", "self", ".", "last_message", "=", "message", "if", "autoerase", ":", "time", ".", "sleep", "(", "self", ".", "interval", ")", "self", ".", "erase", "(", "message", ")" ]
Send something for stdout and erased after delay
[ "Send", "something", "for", "stdout", "and", "erased", "after", "delay" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L82-L88
train
ryukinix/decorating
decorating/stream.py
Clean.write
def write(self, message, flush=False): """Write something on the default stream with a prefixed message""" # this need be threadsafe because the concurrent spinning running on # the stderr with self.lock: self.paralell_stream.erase() super(Clean, self).write(message, flush)
python
def write(self, message, flush=False): """Write something on the default stream with a prefixed message""" # this need be threadsafe because the concurrent spinning running on # the stderr with self.lock: self.paralell_stream.erase() super(Clean, self).write(message, flush)
[ "def", "write", "(", "self", ",", "message", ",", "flush", "=", "False", ")", ":", "# this need be threadsafe because the concurrent spinning running on", "# the stderr", "with", "self", ".", "lock", ":", "self", ".", "paralell_stream", ".", "erase", "(", ")", "super", "(", "Clean", ",", "self", ")", ".", "write", "(", "message", ",", "flush", ")" ]
Write something on the default stream with a prefixed message
[ "Write", "something", "on", "the", "default", "stream", "with", "a", "prefixed", "message" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L120-L126
train
ryukinix/decorating
decorating/stream.py
Writting.write
def write(self, message, flush=True): if isinstance(message, bytes): # pragma: no cover message = message.decode('utf-8') """A Writting like write method, delayed at each char""" for char in message: time.sleep(self.delay * (4 if char == '\n' else 1)) super(Writting, self).write(char, flush)
python
def write(self, message, flush=True): if isinstance(message, bytes): # pragma: no cover message = message.decode('utf-8') """A Writting like write method, delayed at each char""" for char in message: time.sleep(self.delay * (4 if char == '\n' else 1)) super(Writting, self).write(char, flush)
[ "def", "write", "(", "self", ",", "message", ",", "flush", "=", "True", ")", ":", "if", "isinstance", "(", "message", ",", "bytes", ")", ":", "# pragma: no cover", "message", "=", "message", ".", "decode", "(", "'utf-8'", ")", "for", "char", "in", "message", ":", "time", ".", "sleep", "(", "self", ".", "delay", "*", "(", "4", "if", "char", "==", "'\\n'", "else", "1", ")", ")", "super", "(", "Writting", ",", "self", ")", ".", "write", "(", "char", ",", "flush", ")" ]
A Writting like write method, delayed at each char
[ "A", "Writting", "like", "write", "method", "delayed", "at", "each", "char" ]
df78c3f87800205701704c0bc0fb9b6bb908ba7e
https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/stream.py#L148-L155
train
marrow/mongo
marrow/mongo/core/trait/collection.py
Collection._get_default_projection
def _get_default_projection(cls): """Construct the default projection document.""" projected = [] # The fields explicitly requested for inclusion. neutral = [] # Fields returning neutral (None) status. omitted = False # Have any fields been explicitly omitted? for name, field in cls.__fields__.items(): if field.project is None: neutral.append(name) elif field.project: projected.append(name) else: omitted = True if not projected and not omitted: # No preferences specified. return None elif not projected and omitted: # No positive inclusions given, but negative ones were. projected = neutral return {field: True for field in projected}
python
def _get_default_projection(cls): """Construct the default projection document.""" projected = [] # The fields explicitly requested for inclusion. neutral = [] # Fields returning neutral (None) status. omitted = False # Have any fields been explicitly omitted? for name, field in cls.__fields__.items(): if field.project is None: neutral.append(name) elif field.project: projected.append(name) else: omitted = True if not projected and not omitted: # No preferences specified. return None elif not projected and omitted: # No positive inclusions given, but negative ones were. projected = neutral return {field: True for field in projected}
[ "def", "_get_default_projection", "(", "cls", ")", ":", "projected", "=", "[", "]", "# The fields explicitly requested for inclusion.", "neutral", "=", "[", "]", "# Fields returning neutral (None) status.", "omitted", "=", "False", "# Have any fields been explicitly omitted?", "for", "name", ",", "field", "in", "cls", ".", "__fields__", ".", "items", "(", ")", ":", "if", "field", ".", "project", "is", "None", ":", "neutral", ".", "append", "(", "name", ")", "elif", "field", ".", "project", ":", "projected", ".", "append", "(", "name", ")", "else", ":", "omitted", "=", "True", "if", "not", "projected", "and", "not", "omitted", ":", "# No preferences specified.", "return", "None", "elif", "not", "projected", "and", "omitted", ":", "# No positive inclusions given, but negative ones were.", "projected", "=", "neutral", "return", "{", "field", ":", "True", "for", "field", "in", "projected", "}" ]
Construct the default projection document.
[ "Construct", "the", "default", "projection", "document", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/collection.py#L203-L226
train
marrow/mongo
marrow/mongo/util/__init__.py
adjust_attribute_sequence
def adjust_attribute_sequence(*fields): """Move marrow.schema fields around to control positional instantiation order.""" amount = None if fields and isinstance(fields[0], int): amount, fields = fields[0], fields[1:] def adjust_inner(cls): for field in fields: if field not in cls.__dict__: # TODO: Copy the field definition. raise TypeError("Can only override sequence on non-inherited attributes.") # Adjust the sequence to re-order the field. if amount is None: cls.__dict__[field].__sequence__ = ElementMeta.sequence else: cls.__dict__[field].__sequence__ += amount # Add the given amount. # Update the attribute collection. cls.__attributes__ = OrderedDict( (k, v) for k, v in \ sorted(cls.__attributes__.items(), key=lambda i: i[1].__sequence__) ) return cls return adjust_inner
python
def adjust_attribute_sequence(*fields): """Move marrow.schema fields around to control positional instantiation order.""" amount = None if fields and isinstance(fields[0], int): amount, fields = fields[0], fields[1:] def adjust_inner(cls): for field in fields: if field not in cls.__dict__: # TODO: Copy the field definition. raise TypeError("Can only override sequence on non-inherited attributes.") # Adjust the sequence to re-order the field. if amount is None: cls.__dict__[field].__sequence__ = ElementMeta.sequence else: cls.__dict__[field].__sequence__ += amount # Add the given amount. # Update the attribute collection. cls.__attributes__ = OrderedDict( (k, v) for k, v in \ sorted(cls.__attributes__.items(), key=lambda i: i[1].__sequence__) ) return cls return adjust_inner
[ "def", "adjust_attribute_sequence", "(", "*", "fields", ")", ":", "amount", "=", "None", "if", "fields", "and", "isinstance", "(", "fields", "[", "0", "]", ",", "int", ")", ":", "amount", ",", "fields", "=", "fields", "[", "0", "]", ",", "fields", "[", "1", ":", "]", "def", "adjust_inner", "(", "cls", ")", ":", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "cls", ".", "__dict__", ":", "# TODO: Copy the field definition.", "raise", "TypeError", "(", "\"Can only override sequence on non-inherited attributes.\"", ")", "# Adjust the sequence to re-order the field.", "if", "amount", "is", "None", ":", "cls", ".", "__dict__", "[", "field", "]", ".", "__sequence__", "=", "ElementMeta", ".", "sequence", "else", ":", "cls", ".", "__dict__", "[", "field", "]", ".", "__sequence__", "+=", "amount", "# Add the given amount.", "# Update the attribute collection.", "cls", ".", "__attributes__", "=", "OrderedDict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "sorted", "(", "cls", ".", "__attributes__", ".", "items", "(", ")", ",", "key", "=", "lambda", "i", ":", "i", "[", "1", "]", ".", "__sequence__", ")", ")", "return", "cls", "return", "adjust_inner" ]
Move marrow.schema fields around to control positional instantiation order.
[ "Move", "marrow", ".", "schema", "fields", "around", "to", "control", "positional", "instantiation", "order", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/util/__init__.py#L26-L55
train
tonybaloney/retox
retox/__main__.py
get_hashes
def get_hashes(path, exclude=None): ''' Get a dictionary of file paths and timestamps. Paths matching `exclude` regex will be excluded. ''' out = {} for f in Path(path).rglob('*'): if f.is_dir(): # We want to watch files, not directories. continue if exclude and re.match(exclude, f.as_posix()): retox_log.debug("excluding '{}'".format(f.as_posix())) continue pytime = f.stat().st_mtime out[f.as_posix()] = pytime return out
python
def get_hashes(path, exclude=None): ''' Get a dictionary of file paths and timestamps. Paths matching `exclude` regex will be excluded. ''' out = {} for f in Path(path).rglob('*'): if f.is_dir(): # We want to watch files, not directories. continue if exclude and re.match(exclude, f.as_posix()): retox_log.debug("excluding '{}'".format(f.as_posix())) continue pytime = f.stat().st_mtime out[f.as_posix()] = pytime return out
[ "def", "get_hashes", "(", "path", ",", "exclude", "=", "None", ")", ":", "out", "=", "{", "}", "for", "f", "in", "Path", "(", "path", ")", ".", "rglob", "(", "'*'", ")", ":", "if", "f", ".", "is_dir", "(", ")", ":", "# We want to watch files, not directories.", "continue", "if", "exclude", "and", "re", ".", "match", "(", "exclude", ",", "f", ".", "as_posix", "(", ")", ")", ":", "retox_log", ".", "debug", "(", "\"excluding '{}'\"", ".", "format", "(", "f", ".", "as_posix", "(", ")", ")", ")", "continue", "pytime", "=", "f", ".", "stat", "(", ")", ".", "st_mtime", "out", "[", "f", ".", "as_posix", "(", ")", "]", "=", "pytime", "return", "out" ]
Get a dictionary of file paths and timestamps. Paths matching `exclude` regex will be excluded.
[ "Get", "a", "dictionary", "of", "file", "paths", "and", "timestamps", "." ]
4635e31001d2ac083423f46766249ac8daca7c9c
https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/__main__.py#L103-L119
train
mediawiki-utilities/python-mwapi
mwapi/session.py
Session.request
def request(self, method, params=None, query_continue=None, files=None, auth=None, continuation=False): """ Sends an HTTP request to the API. :Parameters: method : `str` Which HTTP method to use for the request? (Usually "POST" or "GET") params : `dict` A set of parameters to send with the request. These parameters will be included in the POST body for post requests or a query string otherwise. query_continue : `dict` A 'continue' field from a past request. This field represents the point from which a query should be continued. files : `dict` A dictionary of (filename : `str`, data : `bytes`) pairs to send with the request. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) """ normal_params = _normalize_params(params, query_continue) if continuation: return self._continuation(method, params=normal_params, auth=auth, files=files) else: return self._request(method, params=normal_params, auth=auth, files=files)
python
def request(self, method, params=None, query_continue=None, files=None, auth=None, continuation=False): """ Sends an HTTP request to the API. :Parameters: method : `str` Which HTTP method to use for the request? (Usually "POST" or "GET") params : `dict` A set of parameters to send with the request. These parameters will be included in the POST body for post requests or a query string otherwise. query_continue : `dict` A 'continue' field from a past request. This field represents the point from which a query should be continued. files : `dict` A dictionary of (filename : `str`, data : `bytes`) pairs to send with the request. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) """ normal_params = _normalize_params(params, query_continue) if continuation: return self._continuation(method, params=normal_params, auth=auth, files=files) else: return self._request(method, params=normal_params, auth=auth, files=files)
[ "def", "request", "(", "self", ",", "method", ",", "params", "=", "None", ",", "query_continue", "=", "None", ",", "files", "=", "None", ",", "auth", "=", "None", ",", "continuation", "=", "False", ")", ":", "normal_params", "=", "_normalize_params", "(", "params", ",", "query_continue", ")", "if", "continuation", ":", "return", "self", ".", "_continuation", "(", "method", ",", "params", "=", "normal_params", ",", "auth", "=", "auth", ",", "files", "=", "files", ")", "else", ":", "return", "self", ".", "_request", "(", "method", ",", "params", "=", "normal_params", ",", "auth", "=", "auth", ",", "files", "=", "files", ")" ]
Sends an HTTP request to the API. :Parameters: method : `str` Which HTTP method to use for the request? (Usually "POST" or "GET") params : `dict` A set of parameters to send with the request. These parameters will be included in the POST body for post requests or a query string otherwise. query_continue : `dict` A 'continue' field from a past request. This field represents the point from which a query should be continued. files : `dict` A dictionary of (filename : `str`, data : `bytes`) pairs to send with the request. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. :Returns: A response JSON documents (or a generator of documents if `continuation == True`)
[ "Sends", "an", "HTTP", "request", "to", "the", "API", "." ]
7a653c29207ecd318ae4b369d398aed13f26951d
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/session.py#L136-L171
train
mediawiki-utilities/python-mwapi
mwapi/session.py
Session.login
def login(self, username, password, login_token=None): """ Authenticate with the given credentials. If authentication is successful, all further requests sent will be signed the authenticated user. Note that passwords are sent as plaintext. This is a limitation of the Mediawiki API. Use a https host if you want your password to be secure :Parameters: username : str The username of the user to be authenticated password : str The password of the user to be authenticated :Raises: :class:`mwapi.errors.LoginError` : if authentication fails :class:`mwapi.errors.ClientInteractionRequest` : if authentication requires a continue_login() call :class:`mwapi.errors.APIError` : if the API responds with an error """ if login_token is None: token_doc = self.post(action='query', meta='tokens', type='login') login_token = token_doc['query']['tokens']['logintoken'] login_doc = self.post( action="clientlogin", username=username, password=password, logintoken=login_token, loginreturnurl="http://example.org/") if login_doc['clientlogin']['status'] == "UI": raise ClientInteractionRequest.from_doc( login_token, login_doc['clientlogin']) elif login_doc['clientlogin']['status'] != 'PASS': raise LoginError.from_doc(login_doc['clientlogin']) return login_doc['clientlogin']
python
def login(self, username, password, login_token=None): """ Authenticate with the given credentials. If authentication is successful, all further requests sent will be signed the authenticated user. Note that passwords are sent as plaintext. This is a limitation of the Mediawiki API. Use a https host if you want your password to be secure :Parameters: username : str The username of the user to be authenticated password : str The password of the user to be authenticated :Raises: :class:`mwapi.errors.LoginError` : if authentication fails :class:`mwapi.errors.ClientInteractionRequest` : if authentication requires a continue_login() call :class:`mwapi.errors.APIError` : if the API responds with an error """ if login_token is None: token_doc = self.post(action='query', meta='tokens', type='login') login_token = token_doc['query']['tokens']['logintoken'] login_doc = self.post( action="clientlogin", username=username, password=password, logintoken=login_token, loginreturnurl="http://example.org/") if login_doc['clientlogin']['status'] == "UI": raise ClientInteractionRequest.from_doc( login_token, login_doc['clientlogin']) elif login_doc['clientlogin']['status'] != 'PASS': raise LoginError.from_doc(login_doc['clientlogin']) return login_doc['clientlogin']
[ "def", "login", "(", "self", ",", "username", ",", "password", ",", "login_token", "=", "None", ")", ":", "if", "login_token", "is", "None", ":", "token_doc", "=", "self", ".", "post", "(", "action", "=", "'query'", ",", "meta", "=", "'tokens'", ",", "type", "=", "'login'", ")", "login_token", "=", "token_doc", "[", "'query'", "]", "[", "'tokens'", "]", "[", "'logintoken'", "]", "login_doc", "=", "self", ".", "post", "(", "action", "=", "\"clientlogin\"", ",", "username", "=", "username", ",", "password", "=", "password", ",", "logintoken", "=", "login_token", ",", "loginreturnurl", "=", "\"http://example.org/\"", ")", "if", "login_doc", "[", "'clientlogin'", "]", "[", "'status'", "]", "==", "\"UI\"", ":", "raise", "ClientInteractionRequest", ".", "from_doc", "(", "login_token", ",", "login_doc", "[", "'clientlogin'", "]", ")", "elif", "login_doc", "[", "'clientlogin'", "]", "[", "'status'", "]", "!=", "'PASS'", ":", "raise", "LoginError", ".", "from_doc", "(", "login_doc", "[", "'clientlogin'", "]", ")", "return", "login_doc", "[", "'clientlogin'", "]" ]
Authenticate with the given credentials. If authentication is successful, all further requests sent will be signed the authenticated user. Note that passwords are sent as plaintext. This is a limitation of the Mediawiki API. Use a https host if you want your password to be secure :Parameters: username : str The username of the user to be authenticated password : str The password of the user to be authenticated :Raises: :class:`mwapi.errors.LoginError` : if authentication fails :class:`mwapi.errors.ClientInteractionRequest` : if authentication requires a continue_login() call :class:`mwapi.errors.APIError` : if the API responds with an error
[ "Authenticate", "with", "the", "given", "credentials", ".", "If", "authentication", "is", "successful", "all", "further", "requests", "sent", "will", "be", "signed", "the", "authenticated", "user", "." ]
7a653c29207ecd318ae4b369d398aed13f26951d
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/session.py#L213-L246
train
mediawiki-utilities/python-mwapi
mwapi/session.py
Session.continue_login
def continue_login(self, login_token, **params): """ Continues a login that requires an additional step. This is common for when login requires completing a captcha or supplying a two-factor authentication token. :Parameters: login_token : `str` A login token generated by the MediaWiki API (and used in a previous call to login()) params : `mixed` A set of parameters to include with the request. This depends on what "requests" for additional information were made by the MediaWiki API. """ login_params = { 'action': "clientlogin", 'logintoken': login_token, 'logincontinue': 1 } login_params.update(params) login_doc = self.post(**login_params) if login_doc['clientlogin']['status'] != 'PASS': raise LoginError.from_doc(login_doc['clientlogin']) return login_doc['clientlogin']
python
def continue_login(self, login_token, **params): """ Continues a login that requires an additional step. This is common for when login requires completing a captcha or supplying a two-factor authentication token. :Parameters: login_token : `str` A login token generated by the MediaWiki API (and used in a previous call to login()) params : `mixed` A set of parameters to include with the request. This depends on what "requests" for additional information were made by the MediaWiki API. """ login_params = { 'action': "clientlogin", 'logintoken': login_token, 'logincontinue': 1 } login_params.update(params) login_doc = self.post(**login_params) if login_doc['clientlogin']['status'] != 'PASS': raise LoginError.from_doc(login_doc['clientlogin']) return login_doc['clientlogin']
[ "def", "continue_login", "(", "self", ",", "login_token", ",", "*", "*", "params", ")", ":", "login_params", "=", "{", "'action'", ":", "\"clientlogin\"", ",", "'logintoken'", ":", "login_token", ",", "'logincontinue'", ":", "1", "}", "login_params", ".", "update", "(", "params", ")", "login_doc", "=", "self", ".", "post", "(", "*", "*", "login_params", ")", "if", "login_doc", "[", "'clientlogin'", "]", "[", "'status'", "]", "!=", "'PASS'", ":", "raise", "LoginError", ".", "from_doc", "(", "login_doc", "[", "'clientlogin'", "]", ")", "return", "login_doc", "[", "'clientlogin'", "]" ]
Continues a login that requires an additional step. This is common for when login requires completing a captcha or supplying a two-factor authentication token. :Parameters: login_token : `str` A login token generated by the MediaWiki API (and used in a previous call to login()) params : `mixed` A set of parameters to include with the request. This depends on what "requests" for additional information were made by the MediaWiki API.
[ "Continues", "a", "login", "that", "requires", "an", "additional", "step", ".", "This", "is", "common", "for", "when", "login", "requires", "completing", "a", "captcha", "or", "supplying", "a", "two", "-", "factor", "authentication", "token", "." ]
7a653c29207ecd318ae4b369d398aed13f26951d
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/session.py#L248-L273
train
mediawiki-utilities/python-mwapi
mwapi/session.py
Session.get
def get(self, query_continue=None, auth=None, continuation=False, **params): """Makes an API request with the GET method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the query string. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error """ return self.request('GET', params=params, auth=auth, query_continue=query_continue, continuation=continuation)
python
def get(self, query_continue=None, auth=None, continuation=False, **params): """Makes an API request with the GET method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the query string. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error """ return self.request('GET', params=params, auth=auth, query_continue=query_continue, continuation=continuation)
[ "def", "get", "(", "self", ",", "query_continue", "=", "None", ",", "auth", "=", "None", ",", "continuation", "=", "False", ",", "*", "*", "params", ")", ":", "return", "self", ".", "request", "(", "'GET'", ",", "params", "=", "params", ",", "auth", "=", "auth", ",", "query_continue", "=", "query_continue", ",", "continuation", "=", "continuation", ")" ]
Makes an API request with the GET method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the query string. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error
[ "Makes", "an", "API", "request", "with", "the", "GET", "method" ]
7a653c29207ecd318ae4b369d398aed13f26951d
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/session.py#L284-L309
train
mediawiki-utilities/python-mwapi
mwapi/session.py
Session.post
def post(self, query_continue=None, upload_file=None, auth=None, continuation=False, **params): """Makes an API request with the POST method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. upload_file : `bytes` The bytes of a file to upload. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the POST message body. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error """ if upload_file is not None: files = {'file': upload_file} else: files = None return self.request('POST', params=params, auth=auth, query_continue=query_continue, files=files, continuation=continuation)
python
def post(self, query_continue=None, upload_file=None, auth=None, continuation=False, **params): """Makes an API request with the POST method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. upload_file : `bytes` The bytes of a file to upload. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the POST message body. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error """ if upload_file is not None: files = {'file': upload_file} else: files = None return self.request('POST', params=params, auth=auth, query_continue=query_continue, files=files, continuation=continuation)
[ "def", "post", "(", "self", ",", "query_continue", "=", "None", ",", "upload_file", "=", "None", ",", "auth", "=", "None", ",", "continuation", "=", "False", ",", "*", "*", "params", ")", ":", "if", "upload_file", "is", "not", "None", ":", "files", "=", "{", "'file'", ":", "upload_file", "}", "else", ":", "files", "=", "None", "return", "self", ".", "request", "(", "'POST'", ",", "params", "=", "params", ",", "auth", "=", "auth", ",", "query_continue", "=", "query_continue", ",", "files", "=", "files", ",", "continuation", "=", "continuation", ")" ]
Makes an API request with the POST method :Parameters: query_continue : `dict` Optionally, the value of a query continuation 'continue' field. upload_file : `bytes` The bytes of a file to upload. auth : mixed Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. continuation : `bool` If true, a continuation will be attempted and a generator of JSON response documents will be returned. params : Keyword parameters to be sent in the POST message body. :Returns: A response JSON documents (or a generator of documents if `continuation == True`) :Raises: :class:`mwapi.errors.APIError` : if the API responds with an error
[ "Makes", "an", "API", "request", "with", "the", "POST", "method" ]
7a653c29207ecd318ae4b369d398aed13f26951d
https://github.com/mediawiki-utilities/python-mwapi/blob/7a653c29207ecd318ae4b369d398aed13f26951d/mwapi/session.py#L311-L342
train
marrow/mongo
marrow/mongo/core/trait/derived.py
Derived.promote
def promote(self, cls, update=False, preserve=True): """Transform this record into an instance of a more specialized subclass.""" if not issubclass(cls, self.__class__): raise TypeError("Must promote to a subclass of " + self.__class__.__name__) return self._as(cls, update, preserve)
python
def promote(self, cls, update=False, preserve=True): """Transform this record into an instance of a more specialized subclass.""" if not issubclass(cls, self.__class__): raise TypeError("Must promote to a subclass of " + self.__class__.__name__) return self._as(cls, update, preserve)
[ "def", "promote", "(", "self", ",", "cls", ",", "update", "=", "False", ",", "preserve", "=", "True", ")", ":", "if", "not", "issubclass", "(", "cls", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "\"Must promote to a subclass of \"", "+", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "_as", "(", "cls", ",", "update", ",", "preserve", ")" ]
Transform this record into an instance of a more specialized subclass.
[ "Transform", "this", "record", "into", "an", "instance", "of", "a", "more", "specialized", "subclass", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/derived.py#L36-L42
train
jrief/djangocms-bootstrap
cms_bootstrap/templatetags/bootstrap_tags.py
cut_levels
def cut_levels(nodes, start_level): """ cutting nodes away from menus """ final = [] removed = [] for node in nodes: if not hasattr(node, 'level'): # remove and ignore nodes that don't have level information remove(node, removed) continue if node.attr.get('soft_root', False): # remove and ignore nodes that are behind a node marked as 'soft_root' remove(node, removed) continue if node.level == start_level: # turn nodes that are on from_level into root nodes final.append(node) node.parent = None if not node.visible and not node.children: remove(node, removed) elif node.level == start_level + 1: # remove nodes that are deeper than one level node.children = [] else: remove(node, removed) if not node.visible: keep_node = False for child in node.children: keep_node = keep_node or child.visible if not keep_node: remove(node, removed) for node in removed: if node in final: final.remove(node) return final
python
def cut_levels(nodes, start_level): """ cutting nodes away from menus """ final = [] removed = [] for node in nodes: if not hasattr(node, 'level'): # remove and ignore nodes that don't have level information remove(node, removed) continue if node.attr.get('soft_root', False): # remove and ignore nodes that are behind a node marked as 'soft_root' remove(node, removed) continue if node.level == start_level: # turn nodes that are on from_level into root nodes final.append(node) node.parent = None if not node.visible and not node.children: remove(node, removed) elif node.level == start_level + 1: # remove nodes that are deeper than one level node.children = [] else: remove(node, removed) if not node.visible: keep_node = False for child in node.children: keep_node = keep_node or child.visible if not keep_node: remove(node, removed) for node in removed: if node in final: final.remove(node) return final
[ "def", "cut_levels", "(", "nodes", ",", "start_level", ")", ":", "final", "=", "[", "]", "removed", "=", "[", "]", "for", "node", "in", "nodes", ":", "if", "not", "hasattr", "(", "node", ",", "'level'", ")", ":", "# remove and ignore nodes that don't have level information", "remove", "(", "node", ",", "removed", ")", "continue", "if", "node", ".", "attr", ".", "get", "(", "'soft_root'", ",", "False", ")", ":", "# remove and ignore nodes that are behind a node marked as 'soft_root'", "remove", "(", "node", ",", "removed", ")", "continue", "if", "node", ".", "level", "==", "start_level", ":", "# turn nodes that are on from_level into root nodes", "final", ".", "append", "(", "node", ")", "node", ".", "parent", "=", "None", "if", "not", "node", ".", "visible", "and", "not", "node", ".", "children", ":", "remove", "(", "node", ",", "removed", ")", "elif", "node", ".", "level", "==", "start_level", "+", "1", ":", "# remove nodes that are deeper than one level", "node", ".", "children", "=", "[", "]", "else", ":", "remove", "(", "node", ",", "removed", ")", "if", "not", "node", ".", "visible", ":", "keep_node", "=", "False", "for", "child", "in", "node", ".", "children", ":", "keep_node", "=", "keep_node", "or", "child", ".", "visible", "if", "not", "keep_node", ":", "remove", "(", "node", ",", "removed", ")", "for", "node", "in", "removed", ":", "if", "node", "in", "final", ":", "final", ".", "remove", "(", "node", ")", "return", "final" ]
cutting nodes away from menus
[ "cutting", "nodes", "away", "from", "menus" ]
293a7050602d6e9a728acea2fb13893e5ec7992e
https://github.com/jrief/djangocms-bootstrap/blob/293a7050602d6e9a728acea2fb13893e5ec7992e/cms_bootstrap/templatetags/bootstrap_tags.py#L18-L53
train
marrow/mongo
marrow/mongo/core/trait/expires.py
Expires.from_mongo
def from_mongo(cls, data, expired=False, **kw): """In the event a value that has technically already expired is loaded, swap it for None.""" value = super(Expires, cls).from_mongo(data, **kw) if not expired and value.is_expired: return None return value
python
def from_mongo(cls, data, expired=False, **kw): """In the event a value that has technically already expired is loaded, swap it for None.""" value = super(Expires, cls).from_mongo(data, **kw) if not expired and value.is_expired: return None return value
[ "def", "from_mongo", "(", "cls", ",", "data", ",", "expired", "=", "False", ",", "*", "*", "kw", ")", ":", "value", "=", "super", "(", "Expires", ",", "cls", ")", ".", "from_mongo", "(", "data", ",", "*", "*", "kw", ")", "if", "not", "expired", "and", "value", ".", "is_expired", ":", "return", "None", "return", "value" ]
In the event a value that has technically already expired is loaded, swap it for None.
[ "In", "the", "event", "a", "value", "that", "has", "technically", "already", "expired", "is", "loaded", "swap", "it", "for", "None", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/expires.py#L36-L44
train
marrow/mongo
marrow/mongo/param/sort.py
S
def S(Document, *fields): """Generate a MongoDB sort order list using the Django ORM style.""" result = [] for field in fields: if isinstance(field, tuple): # Unpack existing tuple. field, direction = field result.append((field, direction)) continue direction = ASCENDING if not field.startswith('__'): field = field.replace('__', '.') if field[0] == '-': direction = DESCENDING if field[0] in ('+', '-'): field = field[1:] _field = traverse(Document, field, default=None) result.append(((~_field) if _field else field, direction)) return result
python
def S(Document, *fields): """Generate a MongoDB sort order list using the Django ORM style.""" result = [] for field in fields: if isinstance(field, tuple): # Unpack existing tuple. field, direction = field result.append((field, direction)) continue direction = ASCENDING if not field.startswith('__'): field = field.replace('__', '.') if field[0] == '-': direction = DESCENDING if field[0] in ('+', '-'): field = field[1:] _field = traverse(Document, field, default=None) result.append(((~_field) if _field else field, direction)) return result
[ "def", "S", "(", "Document", ",", "*", "fields", ")", ":", "result", "=", "[", "]", "for", "field", "in", "fields", ":", "if", "isinstance", "(", "field", ",", "tuple", ")", ":", "# Unpack existing tuple.", "field", ",", "direction", "=", "field", "result", ".", "append", "(", "(", "field", ",", "direction", ")", ")", "continue", "direction", "=", "ASCENDING", "if", "not", "field", ".", "startswith", "(", "'__'", ")", ":", "field", "=", "field", ".", "replace", "(", "'__'", ",", "'.'", ")", "if", "field", "[", "0", "]", "==", "'-'", ":", "direction", "=", "DESCENDING", "if", "field", "[", "0", "]", "in", "(", "'+'", ",", "'-'", ")", ":", "field", "=", "field", "[", "1", ":", "]", "_field", "=", "traverse", "(", "Document", ",", "field", ",", "default", "=", "None", ")", "result", ".", "append", "(", "(", "(", "~", "_field", ")", "if", "_field", "else", "field", ",", "direction", ")", ")", "return", "result" ]
Generate a MongoDB sort order list using the Django ORM style.
[ "Generate", "a", "MongoDB", "sort", "order", "list", "using", "the", "Django", "ORM", "style", "." ]
2066dc73e281b8a46cb5fc965267d6b8e1b18467
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/param/sort.py#L12-L38
train
vkruoso/receita-tools
receita/tools/get.py
Get.run
def run(self): """Reads data from CNPJ list and write results to output directory.""" self._assure_output_dir(self.output) companies = self.read() print '%s CNPJs found' % len(companies) pbar = ProgressBar( widgets=[Counter(), ' ', Percentage(), ' ', Bar(), ' ', Timer()], maxval=len(companies)).start() resolved = 0 runner = Runner(companies, self.days, self.token) try: for data in runner: self.write(data) resolved = resolved + 1 pbar.update(resolved) except KeyboardInterrupt: print '\naborted: waiting current requests to finish.' runner.stop() return pbar.finish()
python
def run(self): """Reads data from CNPJ list and write results to output directory.""" self._assure_output_dir(self.output) companies = self.read() print '%s CNPJs found' % len(companies) pbar = ProgressBar( widgets=[Counter(), ' ', Percentage(), ' ', Bar(), ' ', Timer()], maxval=len(companies)).start() resolved = 0 runner = Runner(companies, self.days, self.token) try: for data in runner: self.write(data) resolved = resolved + 1 pbar.update(resolved) except KeyboardInterrupt: print '\naborted: waiting current requests to finish.' runner.stop() return pbar.finish()
[ "def", "run", "(", "self", ")", ":", "self", ".", "_assure_output_dir", "(", "self", ".", "output", ")", "companies", "=", "self", ".", "read", "(", ")", "print", "'%s CNPJs found'", "%", "len", "(", "companies", ")", "pbar", "=", "ProgressBar", "(", "widgets", "=", "[", "Counter", "(", ")", ",", "' '", ",", "Percentage", "(", ")", ",", "' '", ",", "Bar", "(", ")", ",", "' '", ",", "Timer", "(", ")", "]", ",", "maxval", "=", "len", "(", "companies", ")", ")", ".", "start", "(", ")", "resolved", "=", "0", "runner", "=", "Runner", "(", "companies", ",", "self", ".", "days", ",", "self", ".", "token", ")", "try", ":", "for", "data", "in", "runner", ":", "self", ".", "write", "(", "data", ")", "resolved", "=", "resolved", "+", "1", "pbar", ".", "update", "(", "resolved", ")", "except", "KeyboardInterrupt", ":", "print", "'\\naborted: waiting current requests to finish.'", "runner", ".", "stop", "(", ")", "return", "pbar", ".", "finish", "(", ")" ]
Reads data from CNPJ list and write results to output directory.
[ "Reads", "data", "from", "CNPJ", "list", "and", "write", "results", "to", "output", "directory", "." ]
fd62a252c76541c9feac6470b9048b31348ffe86
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/get.py#L25-L48
train
vkruoso/receita-tools
receita/tools/get.py
Get.read
def read(self): """Reads data from the CSV file.""" companies = [] with open(self.file) as f: reader = unicodecsv.reader(f) for line in reader: if len(line) >= 1: cnpj = self.format(line[0]) if self.valid(cnpj): companies.append(cnpj) return companies
python
def read(self): """Reads data from the CSV file.""" companies = [] with open(self.file) as f: reader = unicodecsv.reader(f) for line in reader: if len(line) >= 1: cnpj = self.format(line[0]) if self.valid(cnpj): companies.append(cnpj) return companies
[ "def", "read", "(", "self", ")", ":", "companies", "=", "[", "]", "with", "open", "(", "self", ".", "file", ")", "as", "f", ":", "reader", "=", "unicodecsv", ".", "reader", "(", "f", ")", "for", "line", "in", "reader", ":", "if", "len", "(", "line", ")", ">=", "1", ":", "cnpj", "=", "self", ".", "format", "(", "line", "[", "0", "]", ")", "if", "self", ".", "valid", "(", "cnpj", ")", ":", "companies", ".", "append", "(", "cnpj", ")", "return", "companies" ]
Reads data from the CSV file.
[ "Reads", "data", "from", "the", "CSV", "file", "." ]
fd62a252c76541c9feac6470b9048b31348ffe86
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/get.py#L50-L60
train
vkruoso/receita-tools
receita/tools/get.py
Get.write
def write(self, data): """Writes json data to the output directory.""" cnpj, data = data path = os.path.join(self.output, '%s.json' % cnpj) with open(path, 'w') as f: json.dump(data, f, encoding='utf-8')
python
def write(self, data): """Writes json data to the output directory.""" cnpj, data = data path = os.path.join(self.output, '%s.json' % cnpj) with open(path, 'w') as f: json.dump(data, f, encoding='utf-8')
[ "def", "write", "(", "self", ",", "data", ")", ":", "cnpj", ",", "data", "=", "data", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output", ",", "'%s.json'", "%", "cnpj", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "data", ",", "f", ",", "encoding", "=", "'utf-8'", ")" ]
Writes json data to the output directory.
[ "Writes", "json", "data", "to", "the", "output", "directory", "." ]
fd62a252c76541c9feac6470b9048b31348ffe86
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/get.py#L62-L68
train
vkruoso/receita-tools
receita/tools/get.py
Get.valid
def valid(self, cnpj): """Check if a CNPJ is valid. We should avoid sending invalid CNPJ to the web service as we know it is going to be a waste of bandwidth. Assumes CNPJ is a string. """ if len(cnpj) != 14: return False tam = 12 nums = cnpj[:tam] digs = cnpj[tam:] tot = 0 pos = tam-7 for i in range(tam, 0, -1): tot = tot + int(nums[tam-i])*pos pos = pos - 1 if pos < 2: pos = 9 res = 0 if tot % 11 < 2 else 11 - (tot % 11) if res != int(digs[0]): return False tam = tam + 1 nums = cnpj[:tam] tot = 0 pos = tam-7 for i in range(tam, 0, -1): tot = tot + int(nums[tam-i])*pos pos = pos - 1 if pos < 2: pos = 9 res = 0 if tot % 11 < 2 else 11 - (tot % 11) if res != int(digs[1]): return False return True
python
def valid(self, cnpj): """Check if a CNPJ is valid. We should avoid sending invalid CNPJ to the web service as we know it is going to be a waste of bandwidth. Assumes CNPJ is a string. """ if len(cnpj) != 14: return False tam = 12 nums = cnpj[:tam] digs = cnpj[tam:] tot = 0 pos = tam-7 for i in range(tam, 0, -1): tot = tot + int(nums[tam-i])*pos pos = pos - 1 if pos < 2: pos = 9 res = 0 if tot % 11 < 2 else 11 - (tot % 11) if res != int(digs[0]): return False tam = tam + 1 nums = cnpj[:tam] tot = 0 pos = tam-7 for i in range(tam, 0, -1): tot = tot + int(nums[tam-i])*pos pos = pos - 1 if pos < 2: pos = 9 res = 0 if tot % 11 < 2 else 11 - (tot % 11) if res != int(digs[1]): return False return True
[ "def", "valid", "(", "self", ",", "cnpj", ")", ":", "if", "len", "(", "cnpj", ")", "!=", "14", ":", "return", "False", "tam", "=", "12", "nums", "=", "cnpj", "[", ":", "tam", "]", "digs", "=", "cnpj", "[", "tam", ":", "]", "tot", "=", "0", "pos", "=", "tam", "-", "7", "for", "i", "in", "range", "(", "tam", ",", "0", ",", "-", "1", ")", ":", "tot", "=", "tot", "+", "int", "(", "nums", "[", "tam", "-", "i", "]", ")", "*", "pos", "pos", "=", "pos", "-", "1", "if", "pos", "<", "2", ":", "pos", "=", "9", "res", "=", "0", "if", "tot", "%", "11", "<", "2", "else", "11", "-", "(", "tot", "%", "11", ")", "if", "res", "!=", "int", "(", "digs", "[", "0", "]", ")", ":", "return", "False", "tam", "=", "tam", "+", "1", "nums", "=", "cnpj", "[", ":", "tam", "]", "tot", "=", "0", "pos", "=", "tam", "-", "7", "for", "i", "in", "range", "(", "tam", ",", "0", ",", "-", "1", ")", ":", "tot", "=", "tot", "+", "int", "(", "nums", "[", "tam", "-", "i", "]", ")", "*", "pos", "pos", "=", "pos", "-", "1", "if", "pos", "<", "2", ":", "pos", "=", "9", "res", "=", "0", "if", "tot", "%", "11", "<", "2", "else", "11", "-", "(", "tot", "%", "11", ")", "if", "res", "!=", "int", "(", "digs", "[", "1", "]", ")", ":", "return", "False", "return", "True" ]
Check if a CNPJ is valid. We should avoid sending invalid CNPJ to the web service as we know it is going to be a waste of bandwidth. Assumes CNPJ is a string.
[ "Check", "if", "a", "CNPJ", "is", "valid", "." ]
fd62a252c76541c9feac6470b9048b31348ffe86
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/get.py#L88-L125
train
OpenTreeOfLife/peyotl
peyotl/utility/get_config.py
get_default_config_filename
def get_default_config_filename(): """Returns the configuration filepath. If PEYOTL_CONFIG_FILE is in the env that is the preferred choice; otherwise ~/.peyotl/config is preferred. If the preferred file does not exist, then the packaged peyotl/default.conf from the installation of peyotl is used. A RuntimeError is raised if that fails. """ global _CONFIG_FN if _CONFIG_FN is not None: return _CONFIG_FN with _CONFIG_FN_LOCK: if _CONFIG_FN is not None: return _CONFIG_FN if 'PEYOTL_CONFIG_FILE' in os.environ: cfn = os.path.abspath(os.environ['PEYOTL_CONFIG_FILE']) else: cfn = os.path.expanduser("~/.peyotl/config") if not os.path.isfile(cfn): # noinspection PyProtectedMember if 'PEYOTL_CONFIG_FILE' in os.environ: from peyotl.utility.get_logger import warn_from_util_logger msg = 'Filepath "{}" specified via PEYOTL_CONFIG_FILE={} was not found'.format(cfn, os.environ[ 'PEYOTL_CONFIG_FILE']) warn_from_util_logger(msg) from pkg_resources import Requirement, resource_filename pr = Requirement.parse('peyotl') cfn = resource_filename(pr, 'peyotl/default.conf') if not os.path.isfile(cfn): raise RuntimeError('The peyotl configuration file cascade failed looking for "{}"'.format(cfn)) _CONFIG_FN = os.path.abspath(cfn) return _CONFIG_FN
python
def get_default_config_filename(): """Returns the configuration filepath. If PEYOTL_CONFIG_FILE is in the env that is the preferred choice; otherwise ~/.peyotl/config is preferred. If the preferred file does not exist, then the packaged peyotl/default.conf from the installation of peyotl is used. A RuntimeError is raised if that fails. """ global _CONFIG_FN if _CONFIG_FN is not None: return _CONFIG_FN with _CONFIG_FN_LOCK: if _CONFIG_FN is not None: return _CONFIG_FN if 'PEYOTL_CONFIG_FILE' in os.environ: cfn = os.path.abspath(os.environ['PEYOTL_CONFIG_FILE']) else: cfn = os.path.expanduser("~/.peyotl/config") if not os.path.isfile(cfn): # noinspection PyProtectedMember if 'PEYOTL_CONFIG_FILE' in os.environ: from peyotl.utility.get_logger import warn_from_util_logger msg = 'Filepath "{}" specified via PEYOTL_CONFIG_FILE={} was not found'.format(cfn, os.environ[ 'PEYOTL_CONFIG_FILE']) warn_from_util_logger(msg) from pkg_resources import Requirement, resource_filename pr = Requirement.parse('peyotl') cfn = resource_filename(pr, 'peyotl/default.conf') if not os.path.isfile(cfn): raise RuntimeError('The peyotl configuration file cascade failed looking for "{}"'.format(cfn)) _CONFIG_FN = os.path.abspath(cfn) return _CONFIG_FN
[ "def", "get_default_config_filename", "(", ")", ":", "global", "_CONFIG_FN", "if", "_CONFIG_FN", "is", "not", "None", ":", "return", "_CONFIG_FN", "with", "_CONFIG_FN_LOCK", ":", "if", "_CONFIG_FN", "is", "not", "None", ":", "return", "_CONFIG_FN", "if", "'PEYOTL_CONFIG_FILE'", "in", "os", ".", "environ", ":", "cfn", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "environ", "[", "'PEYOTL_CONFIG_FILE'", "]", ")", "else", ":", "cfn", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.peyotl/config\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "cfn", ")", ":", "# noinspection PyProtectedMember", "if", "'PEYOTL_CONFIG_FILE'", "in", "os", ".", "environ", ":", "from", "peyotl", ".", "utility", ".", "get_logger", "import", "warn_from_util_logger", "msg", "=", "'Filepath \"{}\" specified via PEYOTL_CONFIG_FILE={} was not found'", ".", "format", "(", "cfn", ",", "os", ".", "environ", "[", "'PEYOTL_CONFIG_FILE'", "]", ")", "warn_from_util_logger", "(", "msg", ")", "from", "pkg_resources", "import", "Requirement", ",", "resource_filename", "pr", "=", "Requirement", ".", "parse", "(", "'peyotl'", ")", "cfn", "=", "resource_filename", "(", "pr", ",", "'peyotl/default.conf'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "cfn", ")", ":", "raise", "RuntimeError", "(", "'The peyotl configuration file cascade failed looking for \"{}\"'", ".", "format", "(", "cfn", ")", ")", "_CONFIG_FN", "=", "os", ".", "path", ".", "abspath", "(", "cfn", ")", "return", "_CONFIG_FN" ]
Returns the configuration filepath. If PEYOTL_CONFIG_FILE is in the env that is the preferred choice; otherwise ~/.peyotl/config is preferred. If the preferred file does not exist, then the packaged peyotl/default.conf from the installation of peyotl is used. A RuntimeError is raised if that fails.
[ "Returns", "the", "configuration", "filepath", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_config.py#L73-L104
train
OpenTreeOfLife/peyotl
peyotl/utility/get_config.py
get_raw_default_config_and_read_file_list
def get_raw_default_config_and_read_file_list(): """Returns a ConfigParser object and a list of filenames that were parsed to initialize it""" global _CONFIG, _READ_DEFAULT_FILES if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES with _CONFIG_LOCK: if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES try: # noinspection PyCompatibility from ConfigParser import SafeConfigParser except ImportError: # noinspection PyCompatibility,PyUnresolvedReferences from configparser import ConfigParser as SafeConfigParser # pylint: disable=F0401 cfg = SafeConfigParser() read_files = cfg.read(get_default_config_filename()) _CONFIG, _READ_DEFAULT_FILES = cfg, read_files return _CONFIG, _READ_DEFAULT_FILES
python
def get_raw_default_config_and_read_file_list(): """Returns a ConfigParser object and a list of filenames that were parsed to initialize it""" global _CONFIG, _READ_DEFAULT_FILES if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES with _CONFIG_LOCK: if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES try: # noinspection PyCompatibility from ConfigParser import SafeConfigParser except ImportError: # noinspection PyCompatibility,PyUnresolvedReferences from configparser import ConfigParser as SafeConfigParser # pylint: disable=F0401 cfg = SafeConfigParser() read_files = cfg.read(get_default_config_filename()) _CONFIG, _READ_DEFAULT_FILES = cfg, read_files return _CONFIG, _READ_DEFAULT_FILES
[ "def", "get_raw_default_config_and_read_file_list", "(", ")", ":", "global", "_CONFIG", ",", "_READ_DEFAULT_FILES", "if", "_CONFIG", "is", "not", "None", ":", "return", "_CONFIG", ",", "_READ_DEFAULT_FILES", "with", "_CONFIG_LOCK", ":", "if", "_CONFIG", "is", "not", "None", ":", "return", "_CONFIG", ",", "_READ_DEFAULT_FILES", "try", ":", "# noinspection PyCompatibility", "from", "ConfigParser", "import", "SafeConfigParser", "except", "ImportError", ":", "# noinspection PyCompatibility,PyUnresolvedReferences", "from", "configparser", "import", "ConfigParser", "as", "SafeConfigParser", "# pylint: disable=F0401", "cfg", "=", "SafeConfigParser", "(", ")", "read_files", "=", "cfg", ".", "read", "(", "get_default_config_filename", "(", ")", ")", "_CONFIG", ",", "_READ_DEFAULT_FILES", "=", "cfg", ",", "read_files", "return", "_CONFIG", ",", "_READ_DEFAULT_FILES" ]
Returns a ConfigParser object and a list of filenames that were parsed to initialize it
[ "Returns", "a", "ConfigParser", "object", "and", "a", "list", "of", "filenames", "that", "were", "parsed", "to", "initialize", "it" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_config.py#L107-L124
train
OpenTreeOfLife/peyotl
peyotl/utility/get_config.py
get_config_object
def get_config_object(): """Thread-safe accessor for the immutable default ConfigWrapper object""" global _DEFAULT_CONFIG_WRAPPER if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER with _DEFAULT_CONFIG_WRAPPER_LOCK: if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER _DEFAULT_CONFIG_WRAPPER = ConfigWrapper() return _DEFAULT_CONFIG_WRAPPER
python
def get_config_object(): """Thread-safe accessor for the immutable default ConfigWrapper object""" global _DEFAULT_CONFIG_WRAPPER if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER with _DEFAULT_CONFIG_WRAPPER_LOCK: if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER _DEFAULT_CONFIG_WRAPPER = ConfigWrapper() return _DEFAULT_CONFIG_WRAPPER
[ "def", "get_config_object", "(", ")", ":", "global", "_DEFAULT_CONFIG_WRAPPER", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "with", "_DEFAULT_CONFIG_WRAPPER_LOCK", ":", "if", "_DEFAULT_CONFIG_WRAPPER", "is", "not", "None", ":", "return", "_DEFAULT_CONFIG_WRAPPER", "_DEFAULT_CONFIG_WRAPPER", "=", "ConfigWrapper", "(", ")", "return", "_DEFAULT_CONFIG_WRAPPER" ]
Thread-safe accessor for the immutable default ConfigWrapper object
[ "Thread", "-", "safe", "accessor", "for", "the", "immutable", "default", "ConfigWrapper", "object" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_config.py#L315-L324
train
OpenTreeOfLife/peyotl
peyotl/utility/get_config.py
ConfigWrapper.get_from_config_setting_cascade
def get_from_config_setting_cascade(self, sec_param_list, default=None, warn_on_none_level=logging.WARN): """return the first non-None setting from a series where each element in `sec_param_list` is a section, param pair suitable for a get_config_setting call. Note that non-None values for overrides for this ConfigWrapper instance will cause this call to only evaluate the first element in the cascade. """ for section, param in sec_param_list: r = self.get_config_setting(section, param, default=None, warn_on_none_level=None) if r is not None: return r section, param = sec_param_list[-1] if default is None: _warn_missing_setting(section, param, self._config_filename, warn_on_none_level) return default
python
def get_from_config_setting_cascade(self, sec_param_list, default=None, warn_on_none_level=logging.WARN): """return the first non-None setting from a series where each element in `sec_param_list` is a section, param pair suitable for a get_config_setting call. Note that non-None values for overrides for this ConfigWrapper instance will cause this call to only evaluate the first element in the cascade. """ for section, param in sec_param_list: r = self.get_config_setting(section, param, default=None, warn_on_none_level=None) if r is not None: return r section, param = sec_param_list[-1] if default is None: _warn_missing_setting(section, param, self._config_filename, warn_on_none_level) return default
[ "def", "get_from_config_setting_cascade", "(", "self", ",", "sec_param_list", ",", "default", "=", "None", ",", "warn_on_none_level", "=", "logging", ".", "WARN", ")", ":", "for", "section", ",", "param", "in", "sec_param_list", ":", "r", "=", "self", ".", "get_config_setting", "(", "section", ",", "param", ",", "default", "=", "None", ",", "warn_on_none_level", "=", "None", ")", "if", "r", "is", "not", "None", ":", "return", "r", "section", ",", "param", "=", "sec_param_list", "[", "-", "1", "]", "if", "default", "is", "None", ":", "_warn_missing_setting", "(", "section", ",", "param", ",", "self", ".", "_config_filename", ",", "warn_on_none_level", ")", "return", "default" ]
return the first non-None setting from a series where each element in `sec_param_list` is a section, param pair suitable for a get_config_setting call. Note that non-None values for overrides for this ConfigWrapper instance will cause this call to only evaluate the first element in the cascade.
[ "return", "the", "first", "non", "-", "None", "setting", "from", "a", "series", "where", "each", "element", "in", "sec_param_list", "is", "a", "section", "param", "pair", "suitable", "for", "a", "get_config_setting", "call", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/get_config.py#L213-L228
train
hsolbrig/pyjsg
pyjsg/parser_impl/generate_python.py
parse
def parse(input_: Union[str, FileStream], source: str) -> Optional[str]: """Parse the text in infile and save the results in outfile :param input_: string or stream to parse :param source: source name for python file header :return: python text if successful """ # Step 1: Tokenize the input stream error_listener = ParseErrorListener() if not isinstance(input_, FileStream): input_ = InputStream(input_) lexer = jsgLexer(input_) lexer.addErrorListener(error_listener) tokens = CommonTokenStream(lexer) tokens.fill() if error_listener.n_errors: return None # Step 2: Generate the parse tree parser = jsgParser(tokens) parser.addErrorListener(error_listener) parse_tree = parser.doc() if error_listener.n_errors: return None # Step 3: Transform the results the results parser = JSGDocParser() parser.visit(parse_tree) if parser.undefined_tokens(): for tkn in parser.undefined_tokens(): print("Undefined token: " + tkn) return None return parser.as_python(source)
python
def parse(input_: Union[str, FileStream], source: str) -> Optional[str]: """Parse the text in infile and save the results in outfile :param input_: string or stream to parse :param source: source name for python file header :return: python text if successful """ # Step 1: Tokenize the input stream error_listener = ParseErrorListener() if not isinstance(input_, FileStream): input_ = InputStream(input_) lexer = jsgLexer(input_) lexer.addErrorListener(error_listener) tokens = CommonTokenStream(lexer) tokens.fill() if error_listener.n_errors: return None # Step 2: Generate the parse tree parser = jsgParser(tokens) parser.addErrorListener(error_listener) parse_tree = parser.doc() if error_listener.n_errors: return None # Step 3: Transform the results the results parser = JSGDocParser() parser.visit(parse_tree) if parser.undefined_tokens(): for tkn in parser.undefined_tokens(): print("Undefined token: " + tkn) return None return parser.as_python(source)
[ "def", "parse", "(", "input_", ":", "Union", "[", "str", ",", "FileStream", "]", ",", "source", ":", "str", ")", "->", "Optional", "[", "str", "]", ":", "# Step 1: Tokenize the input stream", "error_listener", "=", "ParseErrorListener", "(", ")", "if", "not", "isinstance", "(", "input_", ",", "FileStream", ")", ":", "input_", "=", "InputStream", "(", "input_", ")", "lexer", "=", "jsgLexer", "(", "input_", ")", "lexer", ".", "addErrorListener", "(", "error_listener", ")", "tokens", "=", "CommonTokenStream", "(", "lexer", ")", "tokens", ".", "fill", "(", ")", "if", "error_listener", ".", "n_errors", ":", "return", "None", "# Step 2: Generate the parse tree", "parser", "=", "jsgParser", "(", "tokens", ")", "parser", ".", "addErrorListener", "(", "error_listener", ")", "parse_tree", "=", "parser", ".", "doc", "(", ")", "if", "error_listener", ".", "n_errors", ":", "return", "None", "# Step 3: Transform the results the results", "parser", "=", "JSGDocParser", "(", ")", "parser", ".", "visit", "(", "parse_tree", ")", "if", "parser", ".", "undefined_tokens", "(", ")", ":", "for", "tkn", "in", "parser", ".", "undefined_tokens", "(", ")", ":", "print", "(", "\"Undefined token: \"", "+", "tkn", ")", "return", "None", "return", "parser", ".", "as_python", "(", "source", ")" ]
Parse the text in infile and save the results in outfile :param input_: string or stream to parse :param source: source name for python file header :return: python text if successful
[ "Parse", "the", "text", "in", "infile", "and", "save", "the", "results", "in", "outfile" ]
9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/generate_python.py#L56-L91
train
globocom/tornado-alf
tornadoalf/client.py
Client.fetch
def fetch(self, request, callback=None, raise_error=True, **kwargs): """Executes a request by AsyncHTTPClient, asynchronously returning an `tornado.HTTPResponse`. The ``raise_error=False`` argument currently suppresses *all* errors, encapsulating them in `HTTPResponse` objects following the tornado http-client standard """ # accepts request as string then convert it to HTTPRequest if isinstance(request, str): request = HTTPRequest(request, **kwargs) try: # The first request calls tornado-client ignoring the # possible exception, in case of 401 response, # renews the access token and replay it response = yield self._authorized_fetch(request, callback, raise_error=False, **kwargs) if response.code == BAD_TOKEN: yield self._token_manager.reset_token() elif response.error and raise_error: raise response.error else: raise gen.Return(response) # The request with renewed token response = yield self._authorized_fetch(request, callback, raise_error=raise_error, **kwargs) raise gen.Return(response) except TokenError as err: yield self._token_manager.reset_token() raise err
python
def fetch(self, request, callback=None, raise_error=True, **kwargs): """Executes a request by AsyncHTTPClient, asynchronously returning an `tornado.HTTPResponse`. The ``raise_error=False`` argument currently suppresses *all* errors, encapsulating them in `HTTPResponse` objects following the tornado http-client standard """ # accepts request as string then convert it to HTTPRequest if isinstance(request, str): request = HTTPRequest(request, **kwargs) try: # The first request calls tornado-client ignoring the # possible exception, in case of 401 response, # renews the access token and replay it response = yield self._authorized_fetch(request, callback, raise_error=False, **kwargs) if response.code == BAD_TOKEN: yield self._token_manager.reset_token() elif response.error and raise_error: raise response.error else: raise gen.Return(response) # The request with renewed token response = yield self._authorized_fetch(request, callback, raise_error=raise_error, **kwargs) raise gen.Return(response) except TokenError as err: yield self._token_manager.reset_token() raise err
[ "def", "fetch", "(", "self", ",", "request", ",", "callback", "=", "None", ",", "raise_error", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# accepts request as string then convert it to HTTPRequest", "if", "isinstance", "(", "request", ",", "str", ")", ":", "request", "=", "HTTPRequest", "(", "request", ",", "*", "*", "kwargs", ")", "try", ":", "# The first request calls tornado-client ignoring the", "# possible exception, in case of 401 response,", "# renews the access token and replay it", "response", "=", "yield", "self", ".", "_authorized_fetch", "(", "request", ",", "callback", ",", "raise_error", "=", "False", ",", "*", "*", "kwargs", ")", "if", "response", ".", "code", "==", "BAD_TOKEN", ":", "yield", "self", ".", "_token_manager", ".", "reset_token", "(", ")", "elif", "response", ".", "error", "and", "raise_error", ":", "raise", "response", ".", "error", "else", ":", "raise", "gen", ".", "Return", "(", "response", ")", "# The request with renewed token", "response", "=", "yield", "self", ".", "_authorized_fetch", "(", "request", ",", "callback", ",", "raise_error", "=", "raise_error", ",", "*", "*", "kwargs", ")", "raise", "gen", ".", "Return", "(", "response", ")", "except", "TokenError", "as", "err", ":", "yield", "self", ".", "_token_manager", ".", "reset_token", "(", ")", "raise", "err" ]
Executes a request by AsyncHTTPClient, asynchronously returning an `tornado.HTTPResponse`. The ``raise_error=False`` argument currently suppresses *all* errors, encapsulating them in `HTTPResponse` objects following the tornado http-client standard
[ "Executes", "a", "request", "by", "AsyncHTTPClient", "asynchronously", "returning", "an", "tornado", ".", "HTTPResponse", "." ]
3c3ec58c33f2d4ddfbed4ac18ca89d6beedf9c87
https://github.com/globocom/tornado-alf/blob/3c3ec58c33f2d4ddfbed4ac18ca89d6beedf9c87/tornadoalf/client.py#L29-L66
train
PSPC-SPAC-buyandsell/von_agent
von_agent/validate_config.py
validate_config
def validate_config(key: str, config: dict) -> None: """ Call jsonschema validation to raise JSONValidation on non-compliance or silently pass. :param key: validation schema key of interest :param config: configuration dict to validate """ try: jsonschema.validate(config, CONFIG_JSON_SCHEMA[key]) except jsonschema.ValidationError as x_validation: raise JSONValidation('JSON validation error on {} configuration: {}'.format(key, x_validation.message)) except jsonschema.SchemaError as x_schema: raise JSONValidation('JSON schema error on {} specification: {}'.format(key, x_schema.message))
python
def validate_config(key: str, config: dict) -> None: """ Call jsonschema validation to raise JSONValidation on non-compliance or silently pass. :param key: validation schema key of interest :param config: configuration dict to validate """ try: jsonschema.validate(config, CONFIG_JSON_SCHEMA[key]) except jsonschema.ValidationError as x_validation: raise JSONValidation('JSON validation error on {} configuration: {}'.format(key, x_validation.message)) except jsonschema.SchemaError as x_schema: raise JSONValidation('JSON schema error on {} specification: {}'.format(key, x_schema.message))
[ "def", "validate_config", "(", "key", ":", "str", ",", "config", ":", "dict", ")", "->", "None", ":", "try", ":", "jsonschema", ".", "validate", "(", "config", ",", "CONFIG_JSON_SCHEMA", "[", "key", "]", ")", "except", "jsonschema", ".", "ValidationError", "as", "x_validation", ":", "raise", "JSONValidation", "(", "'JSON validation error on {} configuration: {}'", ".", "format", "(", "key", ",", "x_validation", ".", "message", ")", ")", "except", "jsonschema", ".", "SchemaError", "as", "x_schema", ":", "raise", "JSONValidation", "(", "'JSON schema error on {} specification: {}'", ".", "format", "(", "key", ",", "x_schema", ".", "message", ")", ")" ]
Call jsonschema validation to raise JSONValidation on non-compliance or silently pass. :param key: validation schema key of interest :param config: configuration dict to validate
[ "Call", "jsonschema", "validation", "to", "raise", "JSONValidation", "on", "non", "-", "compliance", "or", "silently", "pass", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/validate_config.py#L86-L99
train
CybOXProject/mixbox
mixbox/signals.py
__make_id
def __make_id(receiver): """Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver. """ if __is_bound_method(receiver): return (id(receiver.__func__), id(receiver.__self__)) return id(receiver)
python
def __make_id(receiver): """Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver. """ if __is_bound_method(receiver): return (id(receiver.__func__), id(receiver.__self__)) return id(receiver)
[ "def", "__make_id", "(", "receiver", ")", ":", "if", "__is_bound_method", "(", "receiver", ")", ":", "return", "(", "id", "(", "receiver", ".", "__func__", ")", ",", "id", "(", "receiver", ".", "__self__", ")", ")", "return", "id", "(", "receiver", ")" ]
Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver.
[ "Generate", "an", "identifier", "for", "a", "callable", "signal", "receiver", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L41-L56
train
CybOXProject/mixbox
mixbox/signals.py
__purge
def __purge(): """Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock. """ global __receivers newreceivers = collections.defaultdict(list) for signal, receivers in six.iteritems(__receivers): alive = [x for x in receivers if not __is_dead(x)] newreceivers[signal] = alive __receivers = newreceivers
python
def __purge(): """Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock. """ global __receivers newreceivers = collections.defaultdict(list) for signal, receivers in six.iteritems(__receivers): alive = [x for x in receivers if not __is_dead(x)] newreceivers[signal] = alive __receivers = newreceivers
[ "def", "__purge", "(", ")", ":", "global", "__receivers", "newreceivers", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "signal", ",", "receivers", "in", "six", ".", "iteritems", "(", "__receivers", ")", ":", "alive", "=", "[", "x", "for", "x", "in", "receivers", "if", "not", "__is_dead", "(", "x", ")", "]", "newreceivers", "[", "signal", "]", "=", "alive", "__receivers", "=", "newreceivers" ]
Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock.
[ "Remove", "all", "dead", "signal", "receivers", "from", "the", "global", "receivers", "collection", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L59-L72
train
CybOXProject/mixbox
mixbox/signals.py
__live_receivers
def __live_receivers(signal): """Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal. """ with __lock: __purge() receivers = [funcref() for funcref in __receivers[signal]] return receivers
python
def __live_receivers(signal): """Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal. """ with __lock: __purge() receivers = [funcref() for funcref in __receivers[signal]] return receivers
[ "def", "__live_receivers", "(", "signal", ")", ":", "with", "__lock", ":", "__purge", "(", ")", "receivers", "=", "[", "funcref", "(", ")", "for", "funcref", "in", "__receivers", "[", "signal", "]", "]", "return", "receivers" ]
Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal.
[ "Return", "all", "signal", "handlers", "that", "are", "currently", "still", "alive", "for", "the", "input", "signal", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L75-L89
train
CybOXProject/mixbox
mixbox/signals.py
__is_bound_method
def __is_bound_method(method): """Return ``True`` if the `method` is a bound method (attached to an class instance. Args: method: A method or function type object. """ if not(hasattr(method, "__func__") and hasattr(method, "__self__")): return False # Bound methods have a __self__ attribute pointing to the owner instance return six.get_method_self(method) is not None
python
def __is_bound_method(method): """Return ``True`` if the `method` is a bound method (attached to an class instance. Args: method: A method or function type object. """ if not(hasattr(method, "__func__") and hasattr(method, "__self__")): return False # Bound methods have a __self__ attribute pointing to the owner instance return six.get_method_self(method) is not None
[ "def", "__is_bound_method", "(", "method", ")", ":", "if", "not", "(", "hasattr", "(", "method", ",", "\"__func__\"", ")", "and", "hasattr", "(", "method", ",", "\"__self__\"", ")", ")", ":", "return", "False", "# Bound methods have a __self__ attribute pointing to the owner instance", "return", "six", ".", "get_method_self", "(", "method", ")", "is", "not", "None" ]
Return ``True`` if the `method` is a bound method (attached to an class instance. Args: method: A method or function type object.
[ "Return", "True", "if", "the", "method", "is", "a", "bound", "method", "(", "attached", "to", "an", "class", "instance", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L92-L103
train
CybOXProject/mixbox
mixbox/signals.py
disconnect
def disconnect(signal, receiver): """Disconnect the receiver `func` from the signal, identified by `signal_id`. Args: signal: The signal identifier. receiver: The callable receiver to disconnect. Returns: True if the receiver was successfully disconnected. False otherwise. """ inputkey = __make_id(receiver) with __lock: __purge() receivers = __receivers.get(signal) for idx in six.moves.range(len(receivers)): connected = receivers[idx]() if inputkey != __make_id(connected): continue del receivers[idx] return True # receiver successfully disconnected! return False
python
def disconnect(signal, receiver): """Disconnect the receiver `func` from the signal, identified by `signal_id`. Args: signal: The signal identifier. receiver: The callable receiver to disconnect. Returns: True if the receiver was successfully disconnected. False otherwise. """ inputkey = __make_id(receiver) with __lock: __purge() receivers = __receivers.get(signal) for idx in six.moves.range(len(receivers)): connected = receivers[idx]() if inputkey != __make_id(connected): continue del receivers[idx] return True # receiver successfully disconnected! return False
[ "def", "disconnect", "(", "signal", ",", "receiver", ")", ":", "inputkey", "=", "__make_id", "(", "receiver", ")", "with", "__lock", ":", "__purge", "(", ")", "receivers", "=", "__receivers", ".", "get", "(", "signal", ")", "for", "idx", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "receivers", ")", ")", ":", "connected", "=", "receivers", "[", "idx", "]", "(", ")", "if", "inputkey", "!=", "__make_id", "(", "connected", ")", ":", "continue", "del", "receivers", "[", "idx", "]", "return", "True", "# receiver successfully disconnected!", "return", "False" ]
Disconnect the receiver `func` from the signal, identified by `signal_id`. Args: signal: The signal identifier. receiver: The callable receiver to disconnect. Returns: True if the receiver was successfully disconnected. False otherwise.
[ "Disconnect", "the", "receiver", "func", "from", "the", "signal", "identified", "by", "signal_id", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L149-L175
train
CybOXProject/mixbox
mixbox/signals.py
emit
def emit(signal, *args, **kwargs): """Emit a signal by serially calling each registered signal receiver for the `signal`. Note: The receiver must accept the *args and/or **kwargs that have been passed to it. There expected parameters are not dictated by mixbox. Args: signal: A signal identifier or name. *args: A variable-length argument list to pass to the receiver. **kwargs: Keyword-arguments to pass to the receiver. """ if signal not in __receivers: return receivers = __live_receivers(signal) for func in receivers: func(*args, **kwargs)
python
def emit(signal, *args, **kwargs): """Emit a signal by serially calling each registered signal receiver for the `signal`. Note: The receiver must accept the *args and/or **kwargs that have been passed to it. There expected parameters are not dictated by mixbox. Args: signal: A signal identifier or name. *args: A variable-length argument list to pass to the receiver. **kwargs: Keyword-arguments to pass to the receiver. """ if signal not in __receivers: return receivers = __live_receivers(signal) for func in receivers: func(*args, **kwargs)
[ "def", "emit", "(", "signal", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "signal", "not", "in", "__receivers", ":", "return", "receivers", "=", "__live_receivers", "(", "signal", ")", "for", "func", "in", "receivers", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Emit a signal by serially calling each registered signal receiver for the `signal`. Note: The receiver must accept the *args and/or **kwargs that have been passed to it. There expected parameters are not dictated by mixbox. Args: signal: A signal identifier or name. *args: A variable-length argument list to pass to the receiver. **kwargs: Keyword-arguments to pass to the receiver.
[ "Emit", "a", "signal", "by", "serially", "calling", "each", "registered", "signal", "receiver", "for", "the", "signal", "." ]
9097dae7a433f5b98c18171c4a5598f69a7d30af
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L194-L214
train
yamins81/tabular
tabular/fast.py
arrayuniqify
def arrayuniqify(X, retainorder=False): """ Very fast uniqify routine for numpy arrays. **Parameters** **X** : numpy array Determine the unique elements of this numpy array. **retainorder** : Boolean, optional Whether or not to return indices corresponding to unique values of `X` that also sort the values. Default value is `False`, in which case `[D,s]` is returned. This can be used to produce a uniqified version of `X` by simply taking:: X[s][D] or:: X[s[D.nonzero()[0]]] **Returns** **D** : numpy array List of "first differences" in the sorted verion of `X`. Returned when `retainorder` is `False` (default). **s** : numpy array Permutation that will sort `X`. Returned when `retainorder` is `False` (default). **ind** : numpy array List of indices that correspond to unique values of `X`, without sorting those values. Returned when `retainorder` is `True`. **See Also:** :func:`tabular.fast.recarrayuniqify` """ s = X.argsort() X = X[s] D = np.append([True],X[1:] != X[:-1]) if retainorder: DD = np.append(D.nonzero()[0],len(X)) ind = [min(s[x:DD[i+1]]) for (i,x) in enumerate(DD[:-1])] ind.sort() return ind else: return [D,s]
python
def arrayuniqify(X, retainorder=False): """ Very fast uniqify routine for numpy arrays. **Parameters** **X** : numpy array Determine the unique elements of this numpy array. **retainorder** : Boolean, optional Whether or not to return indices corresponding to unique values of `X` that also sort the values. Default value is `False`, in which case `[D,s]` is returned. This can be used to produce a uniqified version of `X` by simply taking:: X[s][D] or:: X[s[D.nonzero()[0]]] **Returns** **D** : numpy array List of "first differences" in the sorted verion of `X`. Returned when `retainorder` is `False` (default). **s** : numpy array Permutation that will sort `X`. Returned when `retainorder` is `False` (default). **ind** : numpy array List of indices that correspond to unique values of `X`, without sorting those values. Returned when `retainorder` is `True`. **See Also:** :func:`tabular.fast.recarrayuniqify` """ s = X.argsort() X = X[s] D = np.append([True],X[1:] != X[:-1]) if retainorder: DD = np.append(D.nonzero()[0],len(X)) ind = [min(s[x:DD[i+1]]) for (i,x) in enumerate(DD[:-1])] ind.sort() return ind else: return [D,s]
[ "def", "arrayuniqify", "(", "X", ",", "retainorder", "=", "False", ")", ":", "s", "=", "X", ".", "argsort", "(", ")", "X", "=", "X", "[", "s", "]", "D", "=", "np", ".", "append", "(", "[", "True", "]", ",", "X", "[", "1", ":", "]", "!=", "X", "[", ":", "-", "1", "]", ")", "if", "retainorder", ":", "DD", "=", "np", ".", "append", "(", "D", ".", "nonzero", "(", ")", "[", "0", "]", ",", "len", "(", "X", ")", ")", "ind", "=", "[", "min", "(", "s", "[", "x", ":", "DD", "[", "i", "+", "1", "]", "]", ")", "for", "(", "i", ",", "x", ")", "in", "enumerate", "(", "DD", "[", ":", "-", "1", "]", ")", "]", "ind", ".", "sort", "(", ")", "return", "ind", "else", ":", "return", "[", "D", ",", "s", "]" ]
Very fast uniqify routine for numpy arrays. **Parameters** **X** : numpy array Determine the unique elements of this numpy array. **retainorder** : Boolean, optional Whether or not to return indices corresponding to unique values of `X` that also sort the values. Default value is `False`, in which case `[D,s]` is returned. This can be used to produce a uniqified version of `X` by simply taking:: X[s][D] or:: X[s[D.nonzero()[0]]] **Returns** **D** : numpy array List of "first differences" in the sorted verion of `X`. Returned when `retainorder` is `False` (default). **s** : numpy array Permutation that will sort `X`. Returned when `retainorder` is `False` (default). **ind** : numpy array List of indices that correspond to unique values of `X`, without sorting those values. Returned when `retainorder` is `True`. **See Also:** :func:`tabular.fast.recarrayuniqify`
[ "Very", "fast", "uniqify", "routine", "for", "numpy", "arrays", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L19-L75
train
yamins81/tabular
tabular/fast.py
equalspairs
def equalspairs(X, Y): """ Indices of elements in a sorted numpy array equal to those in another. Given numpy array `X` and sorted numpy array `Y`, determine the indices in Y equal to indices in X. Returns `[A,B]` where `A` and `B` are numpy arrays of indices in `X` such that:: Y[A[i]:B[i]] = Y[Y == X[i]]` `A[i] = B[i] = 0` if `X[i]` is not in `Y`. **Parameters** **X** : numpy array Numpy array to compare to the sorted numpy array `Y`. **Y** : numpy array Sorted numpy array. Determine the indices of elements of `Y` equal to those in numpy array `X`. **Returns** **A** : numpy array List of indices in `Y`, `len(A) = len(Y)`. **B** : numpy array List of indices in `Y`, `len(B) = len(Y)`. **See Also:** :func:`tabular.fast.recarrayequalspairs` """ T = Y.copy() R = (T[1:] != T[:-1]).nonzero()[0] R = np.append(R,np.array([len(T)-1])) M = R[R.searchsorted(range(len(T)))] D = T.searchsorted(X) T = np.append(T,np.array([0])) M = np.append(M,np.array([0])) A = (T[D] == X) * D B = (T[D] == X) * (M[D] + 1) return [A,B]
python
def equalspairs(X, Y): """ Indices of elements in a sorted numpy array equal to those in another. Given numpy array `X` and sorted numpy array `Y`, determine the indices in Y equal to indices in X. Returns `[A,B]` where `A` and `B` are numpy arrays of indices in `X` such that:: Y[A[i]:B[i]] = Y[Y == X[i]]` `A[i] = B[i] = 0` if `X[i]` is not in `Y`. **Parameters** **X** : numpy array Numpy array to compare to the sorted numpy array `Y`. **Y** : numpy array Sorted numpy array. Determine the indices of elements of `Y` equal to those in numpy array `X`. **Returns** **A** : numpy array List of indices in `Y`, `len(A) = len(Y)`. **B** : numpy array List of indices in `Y`, `len(B) = len(Y)`. **See Also:** :func:`tabular.fast.recarrayequalspairs` """ T = Y.copy() R = (T[1:] != T[:-1]).nonzero()[0] R = np.append(R,np.array([len(T)-1])) M = R[R.searchsorted(range(len(T)))] D = T.searchsorted(X) T = np.append(T,np.array([0])) M = np.append(M,np.array([0])) A = (T[D] == X) * D B = (T[D] == X) * (M[D] + 1) return [A,B]
[ "def", "equalspairs", "(", "X", ",", "Y", ")", ":", "T", "=", "Y", ".", "copy", "(", ")", "R", "=", "(", "T", "[", "1", ":", "]", "!=", "T", "[", ":", "-", "1", "]", ")", ".", "nonzero", "(", ")", "[", "0", "]", "R", "=", "np", ".", "append", "(", "R", ",", "np", ".", "array", "(", "[", "len", "(", "T", ")", "-", "1", "]", ")", ")", "M", "=", "R", "[", "R", ".", "searchsorted", "(", "range", "(", "len", "(", "T", ")", ")", ")", "]", "D", "=", "T", ".", "searchsorted", "(", "X", ")", "T", "=", "np", ".", "append", "(", "T", ",", "np", ".", "array", "(", "[", "0", "]", ")", ")", "M", "=", "np", ".", "append", "(", "M", ",", "np", ".", "array", "(", "[", "0", "]", ")", ")", "A", "=", "(", "T", "[", "D", "]", "==", "X", ")", "*", "D", "B", "=", "(", "T", "[", "D", "]", "==", "X", ")", "*", "(", "M", "[", "D", "]", "+", "1", ")", "return", "[", "A", ",", "B", "]" ]
Indices of elements in a sorted numpy array equal to those in another. Given numpy array `X` and sorted numpy array `Y`, determine the indices in Y equal to indices in X. Returns `[A,B]` where `A` and `B` are numpy arrays of indices in `X` such that:: Y[A[i]:B[i]] = Y[Y == X[i]]` `A[i] = B[i] = 0` if `X[i]` is not in `Y`. **Parameters** **X** : numpy array Numpy array to compare to the sorted numpy array `Y`. **Y** : numpy array Sorted numpy array. Determine the indices of elements of `Y` equal to those in numpy array `X`. **Returns** **A** : numpy array List of indices in `Y`, `len(A) = len(Y)`. **B** : numpy array List of indices in `Y`, `len(B) = len(Y)`. **See Also:** :func:`tabular.fast.recarrayequalspairs`
[ "Indices", "of", "elements", "in", "a", "sorted", "numpy", "array", "equal", "to", "those", "in", "another", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L142-L191
train
yamins81/tabular
tabular/fast.py
isin
def isin(X,Y): """ Indices of elements in a numpy array that appear in another. Fast routine for determining indices of elements in numpy array `X` that appear in numpy array `Y`, returning a boolean array `Z` such that:: Z[i] = X[i] in Y **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. For each element of `X`, ask if it is in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. For each element of `X`, ask if it is in `Y`. **Returns** **b** : numpy array (bool) Boolean numpy array, `len(b) = len(X)`. **See Also:** :func:`tabular.fast.recarrayisin`, :func:`tabular.fast.arraydifference` """ if len(Y) > 0: T = Y.copy() T.sort() D = T.searchsorted(X) T = np.append(T,np.array([0])) W = (T[D] == X) if isinstance(W,bool): return np.zeros((len(X),),bool) else: return (T[D] == X) else: return np.zeros((len(X),),bool)
python
def isin(X,Y): """ Indices of elements in a numpy array that appear in another. Fast routine for determining indices of elements in numpy array `X` that appear in numpy array `Y`, returning a boolean array `Z` such that:: Z[i] = X[i] in Y **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. For each element of `X`, ask if it is in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. For each element of `X`, ask if it is in `Y`. **Returns** **b** : numpy array (bool) Boolean numpy array, `len(b) = len(X)`. **See Also:** :func:`tabular.fast.recarrayisin`, :func:`tabular.fast.arraydifference` """ if len(Y) > 0: T = Y.copy() T.sort() D = T.searchsorted(X) T = np.append(T,np.array([0])) W = (T[D] == X) if isinstance(W,bool): return np.zeros((len(X),),bool) else: return (T[D] == X) else: return np.zeros((len(X),),bool)
[ "def", "isin", "(", "X", ",", "Y", ")", ":", "if", "len", "(", "Y", ")", ">", "0", ":", "T", "=", "Y", ".", "copy", "(", ")", "T", ".", "sort", "(", ")", "D", "=", "T", ".", "searchsorted", "(", "X", ")", "T", "=", "np", ".", "append", "(", "T", ",", "np", ".", "array", "(", "[", "0", "]", ")", ")", "W", "=", "(", "T", "[", "D", "]", "==", "X", ")", "if", "isinstance", "(", "W", ",", "bool", ")", ":", "return", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "bool", ")", "else", ":", "return", "(", "T", "[", "D", "]", "==", "X", ")", "else", ":", "return", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "bool", ")" ]
Indices of elements in a numpy array that appear in another. Fast routine for determining indices of elements in numpy array `X` that appear in numpy array `Y`, returning a boolean array `Z` such that:: Z[i] = X[i] in Y **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. For each element of `X`, ask if it is in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. For each element of `X`, ask if it is in `Y`. **Returns** **b** : numpy array (bool) Boolean numpy array, `len(b) = len(X)`. **See Also:** :func:`tabular.fast.recarrayisin`, :func:`tabular.fast.arraydifference`
[ "Indices", "of", "elements", "in", "a", "numpy", "array", "that", "appear", "in", "another", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L260-L304
train
yamins81/tabular
tabular/fast.py
arraydifference
def arraydifference(X,Y): """ Elements of a numpy array that do not appear in another. Fast routine for determining which elements in numpy array `X` do not appear in numpy array `Y`. **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. Return subset of `X` corresponding to elements not in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. Return subset of `X` corresponding to elements not in `Y`. **Returns** **Z** : numpy array Subset of `X` corresponding to elements not in `Y`. **See Also:** :func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin` """ if len(Y) > 0: Z = isin(X,Y) return X[np.invert(Z)] else: return X
python
def arraydifference(X,Y): """ Elements of a numpy array that do not appear in another. Fast routine for determining which elements in numpy array `X` do not appear in numpy array `Y`. **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. Return subset of `X` corresponding to elements not in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. Return subset of `X` corresponding to elements not in `Y`. **Returns** **Z** : numpy array Subset of `X` corresponding to elements not in `Y`. **See Also:** :func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin` """ if len(Y) > 0: Z = isin(X,Y) return X[np.invert(Z)] else: return X
[ "def", "arraydifference", "(", "X", ",", "Y", ")", ":", "if", "len", "(", "Y", ")", ">", "0", ":", "Z", "=", "isin", "(", "X", ",", "Y", ")", "return", "X", "[", "np", ".", "invert", "(", "Z", ")", "]", "else", ":", "return", "X" ]
Elements of a numpy array that do not appear in another. Fast routine for determining which elements in numpy array `X` do not appear in numpy array `Y`. **Parameters** **X** : numpy array Numpy array to comapare to numpy array `Y`. Return subset of `X` corresponding to elements not in `Y`. **Y** : numpy array Numpy array to which numpy array `X` is compared. Return subset of `X` corresponding to elements not in `Y`. **Returns** **Z** : numpy array Subset of `X` corresponding to elements not in `Y`. **See Also:** :func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin`
[ "Elements", "of", "a", "numpy", "array", "that", "do", "not", "appear", "in", "another", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L357-L391
train
yamins81/tabular
tabular/fast.py
arraymax
def arraymax(X,Y): """ Fast "vectorized" max function for element-wise comparison of two numpy arrays. For two numpy arrays `X` and `Y` of equal length, return numpy array `Z` such that:: Z[i] = max(X[i],Y[i]) **Parameters** **X** : numpy array Numpy array; `len(X) = len(Y)`. **Y** : numpy array Numpy array; `len(Y) = len(X)`. **Returns** **Z** : numpy array Numpy array such that `Z[i] = max(X[i],Y[i])`. **See Also** :func:`tabular.fast.arraymin` """ Z = np.zeros((len(X),), int) A = X <= Y B = Y < X Z[A] = Y[A] Z[B] = X[B] return Z
python
def arraymax(X,Y): """ Fast "vectorized" max function for element-wise comparison of two numpy arrays. For two numpy arrays `X` and `Y` of equal length, return numpy array `Z` such that:: Z[i] = max(X[i],Y[i]) **Parameters** **X** : numpy array Numpy array; `len(X) = len(Y)`. **Y** : numpy array Numpy array; `len(Y) = len(X)`. **Returns** **Z** : numpy array Numpy array such that `Z[i] = max(X[i],Y[i])`. **See Also** :func:`tabular.fast.arraymin` """ Z = np.zeros((len(X),), int) A = X <= Y B = Y < X Z[A] = Y[A] Z[B] = X[B] return Z
[ "def", "arraymax", "(", "X", ",", "Y", ")", ":", "Z", "=", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "int", ")", "A", "=", "X", "<=", "Y", "B", "=", "Y", "<", "X", "Z", "[", "A", "]", "=", "Y", "[", "A", "]", "Z", "[", "B", "]", "=", "X", "[", "B", "]", "return", "Z" ]
Fast "vectorized" max function for element-wise comparison of two numpy arrays. For two numpy arrays `X` and `Y` of equal length, return numpy array `Z` such that:: Z[i] = max(X[i],Y[i]) **Parameters** **X** : numpy array Numpy array; `len(X) = len(Y)`. **Y** : numpy array Numpy array; `len(Y) = len(X)`. **Returns** **Z** : numpy array Numpy array such that `Z[i] = max(X[i],Y[i])`. **See Also** :func:`tabular.fast.arraymin`
[ "Fast", "vectorized", "max", "function", "for", "element", "-", "wise", "comparison", "of", "two", "numpy", "arrays", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L434-L469
train
PSPC-SPAC-buyandsell/von_agent
von_agent/wallet.py
Wallet._seed2did
async def _seed2did(self) -> str: """ Derive DID, as per indy-sdk, from seed. :return: DID """ rv = None dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list if dids_with_meta: for did_with_meta in dids_with_meta: # dict if 'metadata' in did_with_meta: try: meta = json.loads(did_with_meta['metadata']) if isinstance(meta, dict) and meta.get('seed', None) == self._seed: rv = did_with_meta.get('did') except json.decoder.JSONDecodeError: continue # it's not one of ours, carry on if not rv: # seed not in metadata, generate did again on temp wallet temp_wallet = await Wallet( self._seed, '{}.seed2did'.format(self.name), None, {'auto-remove': True}).create() rv = temp_wallet.did await temp_wallet.remove() return rv
python
async def _seed2did(self) -> str: """ Derive DID, as per indy-sdk, from seed. :return: DID """ rv = None dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list if dids_with_meta: for did_with_meta in dids_with_meta: # dict if 'metadata' in did_with_meta: try: meta = json.loads(did_with_meta['metadata']) if isinstance(meta, dict) and meta.get('seed', None) == self._seed: rv = did_with_meta.get('did') except json.decoder.JSONDecodeError: continue # it's not one of ours, carry on if not rv: # seed not in metadata, generate did again on temp wallet temp_wallet = await Wallet( self._seed, '{}.seed2did'.format(self.name), None, {'auto-remove': True}).create() rv = temp_wallet.did await temp_wallet.remove() return rv
[ "async", "def", "_seed2did", "(", "self", ")", "->", "str", ":", "rv", "=", "None", "dids_with_meta", "=", "json", ".", "loads", "(", "await", "did", ".", "list_my_dids_with_meta", "(", "self", ".", "handle", ")", ")", "# list", "if", "dids_with_meta", ":", "for", "did_with_meta", "in", "dids_with_meta", ":", "# dict", "if", "'metadata'", "in", "did_with_meta", ":", "try", ":", "meta", "=", "json", ".", "loads", "(", "did_with_meta", "[", "'metadata'", "]", ")", "if", "isinstance", "(", "meta", ",", "dict", ")", "and", "meta", ".", "get", "(", "'seed'", ",", "None", ")", "==", "self", ".", "_seed", ":", "rv", "=", "did_with_meta", ".", "get", "(", "'did'", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "continue", "# it's not one of ours, carry on", "if", "not", "rv", ":", "# seed not in metadata, generate did again on temp wallet", "temp_wallet", "=", "await", "Wallet", "(", "self", ".", "_seed", ",", "'{}.seed2did'", ".", "format", "(", "self", ".", "name", ")", ",", "None", ",", "{", "'auto-remove'", ":", "True", "}", ")", ".", "create", "(", ")", "rv", "=", "temp_wallet", ".", "did", "await", "temp_wallet", ".", "remove", "(", ")", "return", "rv" ]
Derive DID, as per indy-sdk, from seed. :return: DID
[ "Derive", "DID", "as", "per", "indy", "-", "sdk", "from", "seed", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/wallet.py#L181-L211
train
PSPC-SPAC-buyandsell/von_agent
von_agent/wallet.py
Wallet.remove
async def remove(self) -> None: """ Remove serialized wallet if it exists. """ LOGGER.debug('Wallet.remove >>>') try: LOGGER.info('Removing wallet: %s', self.name) await wallet.delete_wallet(json.dumps(self.cfg), json.dumps(self.access_creds)) except IndyError as x_indy: LOGGER.info('Abstaining from wallet removal; indy-sdk error code %s', x_indy.error_code) LOGGER.debug('Wallet.remove <<<')
python
async def remove(self) -> None: """ Remove serialized wallet if it exists. """ LOGGER.debug('Wallet.remove >>>') try: LOGGER.info('Removing wallet: %s', self.name) await wallet.delete_wallet(json.dumps(self.cfg), json.dumps(self.access_creds)) except IndyError as x_indy: LOGGER.info('Abstaining from wallet removal; indy-sdk error code %s', x_indy.error_code) LOGGER.debug('Wallet.remove <<<')
[ "async", "def", "remove", "(", "self", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'Wallet.remove >>>'", ")", "try", ":", "LOGGER", ".", "info", "(", "'Removing wallet: %s'", ",", "self", ".", "name", ")", "await", "wallet", ".", "delete_wallet", "(", "json", ".", "dumps", "(", "self", ".", "cfg", ")", ",", "json", ".", "dumps", "(", "self", ".", "access_creds", ")", ")", "except", "IndyError", "as", "x_indy", ":", "LOGGER", ".", "info", "(", "'Abstaining from wallet removal; indy-sdk error code %s'", ",", "x_indy", ".", "error_code", ")", "LOGGER", ".", "debug", "(", "'Wallet.remove <<<'", ")" ]
Remove serialized wallet if it exists.
[ "Remove", "serialized", "wallet", "if", "it", "exists", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/wallet.py#L353-L366
train
yamins81/tabular
tabular/io.py
loadSV
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None, renamer=None, **kwargs): """ Load a delimited text file to a numpy record array. Basically, this function calls loadSVcols and combines columns returned by that function into a numpy ndarray with stuctured dtype. Also uses and returns metadata including column names, formats, coloring, &c. if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **names** : list of strings Sets the names of the columns of the resulting tabarray. If not specified, `names` value is determined first by looking for metadata in the header of the file, and if that is not found, are assigned by NumPy's `f0, f1, ... fn` convention. See **namesinheader** parameter below. **formats** : string or list of strings Sets the datatypes of the columns. The value of `formats` can be a list or comma-delimited string of values describing values for each column (e.g. "str,str,int,float" or ["str", "str", "int", "float"]), a single value to apply to all columns, or anything that can be used in numpy.rec.array constructor. If the **formats** (or **dtype**) parameter are not specified, typing is done by inference. See **typer** parameter below. **dtype** : numpy dtype object Sets the numpy dtype of the resulting tabarray, combining column format and column name information. If dtype is set, any **names** and **formats** specifications will be overriden. If the **dtype** (or **formats**) parameter are not specified, typing is done by inference. See **typer** parameter below. The **names**, **formats** and **dtype** parameters duplicate parameters of the NumPy record array creation inferface. Additional paramters of the NumPy inferface that are passed through are **shape**, **titles**, **byteorder** and **aligned** (see NumPy documentation for more information.) **kwargs**: keyword argument dictionary of variable length Contains various parameters to be passed down to loadSVcols. These may include **skiprows**, **comments**, **delimiter**, **lineterminator**, **uselines**, **usecols**, **excludecols**, **metametadata**, **namesinheader**,**headerlines**, **valuefixer**, **linefixer**, **colfixer**, **delimiter_regex**, **inflines**, **typer**, **missingvalues**, **fillingvalues**, **verbosity**, and various CSV module parameters like **escapechar**, **quoting**, **quotechar**, **doublequote**, **skipinitialspace**. **Returns** **R** : numpy record array Record array constructed from data in the SV file **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ [columns, metadata] = loadSVcols(fname, **kwargs) if 'names' in metadata.keys(): names = metadata['names'] else: names = None if 'formats' in metadata.keys(): formats = metadata['formats'] else: formats = None if 'dtype' in metadata.keys(): dtype = metadata['dtype'] else: dtype = None if renamer is not None: print 'Trying user-given renamer ...' renamed = renamer(names) if len(renamed) == len(uniqify(renamed)): names = renamed print '''... using renamed names (original names will be in return metadata)''' else: print '... renamer failed to produce unique names, not using.' if names and len(names) != len(uniqify(names)): print 'Names are not unique, reverting to default naming scheme.' names = None return [utils.fromarrays(columns, type=np.ndarray, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder), metadata]
python
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None, renamer=None, **kwargs): """ Load a delimited text file to a numpy record array. Basically, this function calls loadSVcols and combines columns returned by that function into a numpy ndarray with stuctured dtype. Also uses and returns metadata including column names, formats, coloring, &c. if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **names** : list of strings Sets the names of the columns of the resulting tabarray. If not specified, `names` value is determined first by looking for metadata in the header of the file, and if that is not found, are assigned by NumPy's `f0, f1, ... fn` convention. See **namesinheader** parameter below. **formats** : string or list of strings Sets the datatypes of the columns. The value of `formats` can be a list or comma-delimited string of values describing values for each column (e.g. "str,str,int,float" or ["str", "str", "int", "float"]), a single value to apply to all columns, or anything that can be used in numpy.rec.array constructor. If the **formats** (or **dtype**) parameter are not specified, typing is done by inference. See **typer** parameter below. **dtype** : numpy dtype object Sets the numpy dtype of the resulting tabarray, combining column format and column name information. If dtype is set, any **names** and **formats** specifications will be overriden. If the **dtype** (or **formats**) parameter are not specified, typing is done by inference. See **typer** parameter below. The **names**, **formats** and **dtype** parameters duplicate parameters of the NumPy record array creation inferface. Additional paramters of the NumPy inferface that are passed through are **shape**, **titles**, **byteorder** and **aligned** (see NumPy documentation for more information.) **kwargs**: keyword argument dictionary of variable length Contains various parameters to be passed down to loadSVcols. These may include **skiprows**, **comments**, **delimiter**, **lineterminator**, **uselines**, **usecols**, **excludecols**, **metametadata**, **namesinheader**,**headerlines**, **valuefixer**, **linefixer**, **colfixer**, **delimiter_regex**, **inflines**, **typer**, **missingvalues**, **fillingvalues**, **verbosity**, and various CSV module parameters like **escapechar**, **quoting**, **quotechar**, **doublequote**, **skipinitialspace**. **Returns** **R** : numpy record array Record array constructed from data in the SV file **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ [columns, metadata] = loadSVcols(fname, **kwargs) if 'names' in metadata.keys(): names = metadata['names'] else: names = None if 'formats' in metadata.keys(): formats = metadata['formats'] else: formats = None if 'dtype' in metadata.keys(): dtype = metadata['dtype'] else: dtype = None if renamer is not None: print 'Trying user-given renamer ...' renamed = renamer(names) if len(renamed) == len(uniqify(renamed)): names = renamed print '''... using renamed names (original names will be in return metadata)''' else: print '... renamer failed to produce unique names, not using.' if names and len(names) != len(uniqify(names)): print 'Names are not unique, reverting to default naming scheme.' names = None return [utils.fromarrays(columns, type=np.ndarray, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder), metadata]
[ "def", "loadSV", "(", "fname", ",", "shape", "=", "None", ",", "titles", "=", "None", ",", "aligned", "=", "False", ",", "byteorder", "=", "None", ",", "renamer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "[", "columns", ",", "metadata", "]", "=", "loadSVcols", "(", "fname", ",", "*", "*", "kwargs", ")", "if", "'names'", "in", "metadata", ".", "keys", "(", ")", ":", "names", "=", "metadata", "[", "'names'", "]", "else", ":", "names", "=", "None", "if", "'formats'", "in", "metadata", ".", "keys", "(", ")", ":", "formats", "=", "metadata", "[", "'formats'", "]", "else", ":", "formats", "=", "None", "if", "'dtype'", "in", "metadata", ".", "keys", "(", ")", ":", "dtype", "=", "metadata", "[", "'dtype'", "]", "else", ":", "dtype", "=", "None", "if", "renamer", "is", "not", "None", ":", "print", "'Trying user-given renamer ...'", "renamed", "=", "renamer", "(", "names", ")", "if", "len", "(", "renamed", ")", "==", "len", "(", "uniqify", "(", "renamed", ")", ")", ":", "names", "=", "renamed", "print", "'''... using renamed names (original names will be in return \n metadata)'''", "else", ":", "print", "'... renamer failed to produce unique names, not using.'", "if", "names", "and", "len", "(", "names", ")", "!=", "len", "(", "uniqify", "(", "names", ")", ")", ":", "print", "'Names are not unique, reverting to default naming scheme.'", "names", "=", "None", "return", "[", "utils", ".", "fromarrays", "(", "columns", ",", "type", "=", "np", ".", "ndarray", ",", "dtype", "=", "dtype", ",", "shape", "=", "shape", ",", "formats", "=", "formats", ",", "names", "=", "names", ",", "titles", "=", "titles", ",", "aligned", "=", "aligned", ",", "byteorder", "=", "byteorder", ")", ",", "metadata", "]" ]
Load a delimited text file to a numpy record array. Basically, this function calls loadSVcols and combines columns returned by that function into a numpy ndarray with stuctured dtype. Also uses and returns metadata including column names, formats, coloring, &c. if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **names** : list of strings Sets the names of the columns of the resulting tabarray. If not specified, `names` value is determined first by looking for metadata in the header of the file, and if that is not found, are assigned by NumPy's `f0, f1, ... fn` convention. See **namesinheader** parameter below. **formats** : string or list of strings Sets the datatypes of the columns. The value of `formats` can be a list or comma-delimited string of values describing values for each column (e.g. "str,str,int,float" or ["str", "str", "int", "float"]), a single value to apply to all columns, or anything that can be used in numpy.rec.array constructor. If the **formats** (or **dtype**) parameter are not specified, typing is done by inference. See **typer** parameter below. **dtype** : numpy dtype object Sets the numpy dtype of the resulting tabarray, combining column format and column name information. If dtype is set, any **names** and **formats** specifications will be overriden. If the **dtype** (or **formats**) parameter are not specified, typing is done by inference. See **typer** parameter below. The **names**, **formats** and **dtype** parameters duplicate parameters of the NumPy record array creation inferface. Additional paramters of the NumPy inferface that are passed through are **shape**, **titles**, **byteorder** and **aligned** (see NumPy documentation for more information.) **kwargs**: keyword argument dictionary of variable length Contains various parameters to be passed down to loadSVcols. These may include **skiprows**, **comments**, **delimiter**, **lineterminator**, **uselines**, **usecols**, **excludecols**, **metametadata**, **namesinheader**,**headerlines**, **valuefixer**, **linefixer**, **colfixer**, **delimiter_regex**, **inflines**, **typer**, **missingvalues**, **fillingvalues**, **verbosity**, and various CSV module parameters like **escapechar**, **quoting**, **quotechar**, **doublequote**, **skipinitialspace**. **Returns** **R** : numpy record array Record array constructed from data in the SV file **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER`
[ "Load", "a", "delimited", "text", "file", "to", "a", "numpy", "record", "array", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L39-L152
train
yamins81/tabular
tabular/io.py
loadSVrecs
def loadSVrecs(fname, uselines=None, skiprows=0, linefixer=None, delimiter_regex=None, verbosity=DEFAULT_VERBOSITY, **metadata): """ Load a separated value text file to a list of lists of strings of records. Takes a tabular text file with a specified delimeter and end-of-line character, and return data as a list of lists of strings corresponding to records (rows). Also uses and returns metadata (including column names, formats, coloring, &c.) if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **delimiter** : single-character string When reading text file, character to use as delimiter to split fields. If not specified, the delimiter is determined first by looking for special-format metadata specifying the delimiter, and then if no specification is found, attempts are made to infer delimiter from file contents. (See **inflines** parameter below.) **delimiter_regex** : regular expression (compiled or in string format) Regular expression to use to recognize delimiters, in place of a single character. (For instance, to have whitespace delimiting, using delimiter_regex = '[\s*]+') **lineterminator** : single-character string Line terminator to use when reading in using SVfile. **skipinitialspace** : boolean If true, strips whitespace following the delimiter from field. The **delimiter**, **linterminator** and **skipinitialspace** parameters are passed on as parameters to the python CSV module, which is used for reading in delimited text files. Additional parameters from that interface that are replicated in this constructor include **quotechar**, **escapechar**, **quoting**, **doublequote** and **dialect** (see CSV module documentation for more information). **skiprows** : non-negative integer, optional When reading from a text file, the first `skiprows` lines are ignored. Default is 0, e.g no rows are skipped. **uselines** : pair of non-negative integer, optional When reading from a text file, range of lines of data to load. (In contrast to **skiprows**, which specifies file rows to ignore before looking for header information, **uselines** specifies which data (non-header) lines to use, after header has been striped and processed.) See **headerlines** below. **comments** : single-character string, optional When reading from a text file, character used to distinguish header lines. If specified, any lines beginning with this character at the top of the file are assumed to contain header information and not row data. **headerlines** : integer, optional When reading from a text file, the number of lines at the top of the file (after the first `skiprows` lines) corresponding to the header of the file, where metadata can be found. Lines after headerlines are assumed to contain row contents. If not specified, value is determined first by looking for special metametadata in first line of file (see Tabular reference documentation for more information about this), and if no such metadata is found, is inferred by looking at file contents. **namesinheader** : Boolean, optional When reading from a text file, if `namesinheader == True`, then assume the column names are in the last header line (unless overridden by existing metadata or metametadata directive). Default is True. **linefixer** : callable, optional This callable is applied to every line in the file. If specified, the called is applied directly to the strings in the file, after they're split in lines but before they're split into fields. The purpose is to make lines with errors or mistakes amenable to delimiter inference and field-splitting. **inflines** : integer, optional Number of lines of file to use as sample data when inferring delimiter and header. **metametadata** : dictionary of integers or pairs of integers Specifies supplementary metametadata information for use with SVfile loading. See Tabular reference documentation for more information **Returns** **records** : list of lists of strings List of lists corresponding to records (rows) of data. **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSV`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ if delimiter_regex and isinstance(delimiter_regex, types.StringType): import re delimiter_regex = re.compile(delimiter_regex) [metadata, inferedlines, WHOLETHING] = getmetadata(fname, skiprows=skiprows, linefixer=linefixer, delimiter_regex=delimiter_regex, verbosity=verbosity, **metadata) if uselines is None: uselines = (0,False) if is_string_like(fname): fh = file(fname, 'rU') elif hasattr(fname, 'readline'): fh = fname else: raise ValueError('fname must be a string or file handle') for _ind in range(skiprows+uselines[0] + metadata['headerlines']): fh.readline() if linefixer or delimiter_regex: fh2 = tempfile.TemporaryFile('w+b') F = fh.read().strip('\n').split('\n') if linefixer: F = map(linefixer,F) if delimiter_regex: F = map(lambda line: delimiter_regex.sub(metadata['dialect'].delimiter, line), F) fh2.write('\n'.join(F)) fh2.seek(0) fh = fh2 reader = csv.reader(fh, dialect=metadata['dialect']) if uselines[1]: linelist = [] for ln in reader: if reader.line_num <= uselines[1] - uselines[0]: linelist.append(ln) else: break else: linelist = list(reader) fh.close() if linelist[-1] == []: linelist.pop(-1) return [linelist,metadata]
python
def loadSVrecs(fname, uselines=None, skiprows=0, linefixer=None, delimiter_regex=None, verbosity=DEFAULT_VERBOSITY, **metadata): """ Load a separated value text file to a list of lists of strings of records. Takes a tabular text file with a specified delimeter and end-of-line character, and return data as a list of lists of strings corresponding to records (rows). Also uses and returns metadata (including column names, formats, coloring, &c.) if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **delimiter** : single-character string When reading text file, character to use as delimiter to split fields. If not specified, the delimiter is determined first by looking for special-format metadata specifying the delimiter, and then if no specification is found, attempts are made to infer delimiter from file contents. (See **inflines** parameter below.) **delimiter_regex** : regular expression (compiled or in string format) Regular expression to use to recognize delimiters, in place of a single character. (For instance, to have whitespace delimiting, using delimiter_regex = '[\s*]+') **lineterminator** : single-character string Line terminator to use when reading in using SVfile. **skipinitialspace** : boolean If true, strips whitespace following the delimiter from field. The **delimiter**, **linterminator** and **skipinitialspace** parameters are passed on as parameters to the python CSV module, which is used for reading in delimited text files. Additional parameters from that interface that are replicated in this constructor include **quotechar**, **escapechar**, **quoting**, **doublequote** and **dialect** (see CSV module documentation for more information). **skiprows** : non-negative integer, optional When reading from a text file, the first `skiprows` lines are ignored. Default is 0, e.g no rows are skipped. **uselines** : pair of non-negative integer, optional When reading from a text file, range of lines of data to load. (In contrast to **skiprows**, which specifies file rows to ignore before looking for header information, **uselines** specifies which data (non-header) lines to use, after header has been striped and processed.) See **headerlines** below. **comments** : single-character string, optional When reading from a text file, character used to distinguish header lines. If specified, any lines beginning with this character at the top of the file are assumed to contain header information and not row data. **headerlines** : integer, optional When reading from a text file, the number of lines at the top of the file (after the first `skiprows` lines) corresponding to the header of the file, where metadata can be found. Lines after headerlines are assumed to contain row contents. If not specified, value is determined first by looking for special metametadata in first line of file (see Tabular reference documentation for more information about this), and if no such metadata is found, is inferred by looking at file contents. **namesinheader** : Boolean, optional When reading from a text file, if `namesinheader == True`, then assume the column names are in the last header line (unless overridden by existing metadata or metametadata directive). Default is True. **linefixer** : callable, optional This callable is applied to every line in the file. If specified, the called is applied directly to the strings in the file, after they're split in lines but before they're split into fields. The purpose is to make lines with errors or mistakes amenable to delimiter inference and field-splitting. **inflines** : integer, optional Number of lines of file to use as sample data when inferring delimiter and header. **metametadata** : dictionary of integers or pairs of integers Specifies supplementary metametadata information for use with SVfile loading. See Tabular reference documentation for more information **Returns** **records** : list of lists of strings List of lists corresponding to records (rows) of data. **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSV`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ if delimiter_regex and isinstance(delimiter_regex, types.StringType): import re delimiter_regex = re.compile(delimiter_regex) [metadata, inferedlines, WHOLETHING] = getmetadata(fname, skiprows=skiprows, linefixer=linefixer, delimiter_regex=delimiter_regex, verbosity=verbosity, **metadata) if uselines is None: uselines = (0,False) if is_string_like(fname): fh = file(fname, 'rU') elif hasattr(fname, 'readline'): fh = fname else: raise ValueError('fname must be a string or file handle') for _ind in range(skiprows+uselines[0] + metadata['headerlines']): fh.readline() if linefixer or delimiter_regex: fh2 = tempfile.TemporaryFile('w+b') F = fh.read().strip('\n').split('\n') if linefixer: F = map(linefixer,F) if delimiter_regex: F = map(lambda line: delimiter_regex.sub(metadata['dialect'].delimiter, line), F) fh2.write('\n'.join(F)) fh2.seek(0) fh = fh2 reader = csv.reader(fh, dialect=metadata['dialect']) if uselines[1]: linelist = [] for ln in reader: if reader.line_num <= uselines[1] - uselines[0]: linelist.append(ln) else: break else: linelist = list(reader) fh.close() if linelist[-1] == []: linelist.pop(-1) return [linelist,metadata]
[ "def", "loadSVrecs", "(", "fname", ",", "uselines", "=", "None", ",", "skiprows", "=", "0", ",", "linefixer", "=", "None", ",", "delimiter_regex", "=", "None", ",", "verbosity", "=", "DEFAULT_VERBOSITY", ",", "*", "*", "metadata", ")", ":", "if", "delimiter_regex", "and", "isinstance", "(", "delimiter_regex", ",", "types", ".", "StringType", ")", ":", "import", "re", "delimiter_regex", "=", "re", ".", "compile", "(", "delimiter_regex", ")", "[", "metadata", ",", "inferedlines", ",", "WHOLETHING", "]", "=", "getmetadata", "(", "fname", ",", "skiprows", "=", "skiprows", ",", "linefixer", "=", "linefixer", ",", "delimiter_regex", "=", "delimiter_regex", ",", "verbosity", "=", "verbosity", ",", "*", "*", "metadata", ")", "if", "uselines", "is", "None", ":", "uselines", "=", "(", "0", ",", "False", ")", "if", "is_string_like", "(", "fname", ")", ":", "fh", "=", "file", "(", "fname", ",", "'rU'", ")", "elif", "hasattr", "(", "fname", ",", "'readline'", ")", ":", "fh", "=", "fname", "else", ":", "raise", "ValueError", "(", "'fname must be a string or file handle'", ")", "for", "_ind", "in", "range", "(", "skiprows", "+", "uselines", "[", "0", "]", "+", "metadata", "[", "'headerlines'", "]", ")", ":", "fh", ".", "readline", "(", ")", "if", "linefixer", "or", "delimiter_regex", ":", "fh2", "=", "tempfile", ".", "TemporaryFile", "(", "'w+b'", ")", "F", "=", "fh", ".", "read", "(", ")", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "if", "linefixer", ":", "F", "=", "map", "(", "linefixer", ",", "F", ")", "if", "delimiter_regex", ":", "F", "=", "map", "(", "lambda", "line", ":", "delimiter_regex", ".", "sub", "(", "metadata", "[", "'dialect'", "]", ".", "delimiter", ",", "line", ")", ",", "F", ")", "fh2", ".", "write", "(", "'\\n'", ".", "join", "(", "F", ")", ")", "fh2", ".", "seek", "(", "0", ")", "fh", "=", "fh2", "reader", "=", "csv", ".", "reader", "(", "fh", ",", "dialect", "=", "metadata", "[", "'dialect'", "]", ")", "if", "uselines", "[", "1", "]", ":", "linelist", "=", "[", "]", "for", "ln", "in", "reader", ":", "if", "reader", ".", "line_num", "<=", "uselines", "[", "1", "]", "-", "uselines", "[", "0", "]", ":", "linelist", ".", "append", "(", "ln", ")", "else", ":", "break", "else", ":", "linelist", "=", "list", "(", "reader", ")", "fh", ".", "close", "(", ")", "if", "linelist", "[", "-", "1", "]", "==", "[", "]", ":", "linelist", ".", "pop", "(", "-", "1", ")", "return", "[", "linelist", ",", "metadata", "]" ]
Load a separated value text file to a list of lists of strings of records. Takes a tabular text file with a specified delimeter and end-of-line character, and return data as a list of lists of strings corresponding to records (rows). Also uses and returns metadata (including column names, formats, coloring, &c.) if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **delimiter** : single-character string When reading text file, character to use as delimiter to split fields. If not specified, the delimiter is determined first by looking for special-format metadata specifying the delimiter, and then if no specification is found, attempts are made to infer delimiter from file contents. (See **inflines** parameter below.) **delimiter_regex** : regular expression (compiled or in string format) Regular expression to use to recognize delimiters, in place of a single character. (For instance, to have whitespace delimiting, using delimiter_regex = '[\s*]+') **lineterminator** : single-character string Line terminator to use when reading in using SVfile. **skipinitialspace** : boolean If true, strips whitespace following the delimiter from field. The **delimiter**, **linterminator** and **skipinitialspace** parameters are passed on as parameters to the python CSV module, which is used for reading in delimited text files. Additional parameters from that interface that are replicated in this constructor include **quotechar**, **escapechar**, **quoting**, **doublequote** and **dialect** (see CSV module documentation for more information). **skiprows** : non-negative integer, optional When reading from a text file, the first `skiprows` lines are ignored. Default is 0, e.g no rows are skipped. **uselines** : pair of non-negative integer, optional When reading from a text file, range of lines of data to load. (In contrast to **skiprows**, which specifies file rows to ignore before looking for header information, **uselines** specifies which data (non-header) lines to use, after header has been striped and processed.) See **headerlines** below. **comments** : single-character string, optional When reading from a text file, character used to distinguish header lines. If specified, any lines beginning with this character at the top of the file are assumed to contain header information and not row data. **headerlines** : integer, optional When reading from a text file, the number of lines at the top of the file (after the first `skiprows` lines) corresponding to the header of the file, where metadata can be found. Lines after headerlines are assumed to contain row contents. If not specified, value is determined first by looking for special metametadata in first line of file (see Tabular reference documentation for more information about this), and if no such metadata is found, is inferred by looking at file contents. **namesinheader** : Boolean, optional When reading from a text file, if `namesinheader == True`, then assume the column names are in the last header line (unless overridden by existing metadata or metametadata directive). Default is True. **linefixer** : callable, optional This callable is applied to every line in the file. If specified, the called is applied directly to the strings in the file, after they're split in lines but before they're split into fields. The purpose is to make lines with errors or mistakes amenable to delimiter inference and field-splitting. **inflines** : integer, optional Number of lines of file to use as sample data when inferring delimiter and header. **metametadata** : dictionary of integers or pairs of integers Specifies supplementary metametadata information for use with SVfile loading. See Tabular reference documentation for more information **Returns** **records** : list of lists of strings List of lists corresponding to records (rows) of data. **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSV`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER`
[ "Load", "a", "separated", "value", "text", "file", "to", "a", "list", "of", "lists", "of", "strings", "of", "records", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L401-L572
train
yamins81/tabular
tabular/io.py
parsetypes
def parsetypes(dtype): """ Parse the types from a structured numpy dtype object. Return list of string representations of types from a structured numpy dtype object, e.g. ['int', 'float', 'str']. Used by :func:`tabular.io.saveSV` to write out type information in the header. **Parameters** **dtype** : numpy dtype object Structured numpy dtype object to parse. **Returns** **out** : list of strings List of strings corresponding to numpy types:: [dtype[i].name.strip('1234567890').rstrip('ing') \ for i in range(len(dtype))] """ return [dtype[i].name.strip('1234567890').rstrip('ing') for i in range(len(dtype))]
python
def parsetypes(dtype): """ Parse the types from a structured numpy dtype object. Return list of string representations of types from a structured numpy dtype object, e.g. ['int', 'float', 'str']. Used by :func:`tabular.io.saveSV` to write out type information in the header. **Parameters** **dtype** : numpy dtype object Structured numpy dtype object to parse. **Returns** **out** : list of strings List of strings corresponding to numpy types:: [dtype[i].name.strip('1234567890').rstrip('ing') \ for i in range(len(dtype))] """ return [dtype[i].name.strip('1234567890').rstrip('ing') for i in range(len(dtype))]
[ "def", "parsetypes", "(", "dtype", ")", ":", "return", "[", "dtype", "[", "i", "]", ".", "name", ".", "strip", "(", "'1234567890'", ")", ".", "rstrip", "(", "'ing'", ")", "for", "i", "in", "range", "(", "len", "(", "dtype", ")", ")", "]" ]
Parse the types from a structured numpy dtype object. Return list of string representations of types from a structured numpy dtype object, e.g. ['int', 'float', 'str']. Used by :func:`tabular.io.saveSV` to write out type information in the header. **Parameters** **dtype** : numpy dtype object Structured numpy dtype object to parse. **Returns** **out** : list of strings List of strings corresponding to numpy types:: [dtype[i].name.strip('1234567890').rstrip('ing') \ for i in range(len(dtype))]
[ "Parse", "the", "types", "from", "a", "structured", "numpy", "dtype", "object", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L1808-L1835
train
yamins81/tabular
tabular/io.py
thresholdcoloring
def thresholdcoloring(coloring, names): """ Threshold a coloring dictionary for a given list of column names. Threshold `coloring` based on `names`, a list of strings in:: coloring.values() **Parameters** **coloring** : dictionary Hierarchical structure on the columns given in the header of the file; an attribute of tabarrays. See :func:`tabular.tab.tabarray.__new__` for more information about coloring. **names** : list of strings List of strings giving column names. **Returns** **newcoloring** : dictionary The thresholded coloring dictionary. """ for key in coloring.keys(): if len([k for k in coloring[key] if k in names]) == 0: coloring.pop(key) else: coloring[key] = utils.uniqify([k for k in coloring[key] if k in names]) return coloring
python
def thresholdcoloring(coloring, names): """ Threshold a coloring dictionary for a given list of column names. Threshold `coloring` based on `names`, a list of strings in:: coloring.values() **Parameters** **coloring** : dictionary Hierarchical structure on the columns given in the header of the file; an attribute of tabarrays. See :func:`tabular.tab.tabarray.__new__` for more information about coloring. **names** : list of strings List of strings giving column names. **Returns** **newcoloring** : dictionary The thresholded coloring dictionary. """ for key in coloring.keys(): if len([k for k in coloring[key] if k in names]) == 0: coloring.pop(key) else: coloring[key] = utils.uniqify([k for k in coloring[key] if k in names]) return coloring
[ "def", "thresholdcoloring", "(", "coloring", ",", "names", ")", ":", "for", "key", "in", "coloring", ".", "keys", "(", ")", ":", "if", "len", "(", "[", "k", "for", "k", "in", "coloring", "[", "key", "]", "if", "k", "in", "names", "]", ")", "==", "0", ":", "coloring", ".", "pop", "(", "key", ")", "else", ":", "coloring", "[", "key", "]", "=", "utils", ".", "uniqify", "(", "[", "k", "for", "k", "in", "coloring", "[", "key", "]", "if", "k", "in", "names", "]", ")", "return", "coloring" ]
Threshold a coloring dictionary for a given list of column names. Threshold `coloring` based on `names`, a list of strings in:: coloring.values() **Parameters** **coloring** : dictionary Hierarchical structure on the columns given in the header of the file; an attribute of tabarrays. See :func:`tabular.tab.tabarray.__new__` for more information about coloring. **names** : list of strings List of strings giving column names. **Returns** **newcoloring** : dictionary The thresholded coloring dictionary.
[ "Threshold", "a", "coloring", "dictionary", "for", "a", "given", "list", "of", "column", "names", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L1838-L1873
train
yamins81/tabular
tabular/io.py
makedir
def makedir(dir_name): """ "Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_ """ if os.path.exists(dir_name): delete(dir_name) os.mkdir(dir_name)
python
def makedir(dir_name): """ "Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_ """ if os.path.exists(dir_name): delete(dir_name) os.mkdir(dir_name)
[ "def", "makedir", "(", "dir_name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "dir_name", ")", ":", "delete", "(", "dir_name", ")", "os", ".", "mkdir", "(", "dir_name", ")" ]
"Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_
[ "Strong", "directory", "maker", "." ]
1caf091c8c395960a9ad7078f95158b533cc52dd
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L1924-L1945
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
pass_community
def pass_community(f): """Decorator to pass community.""" @wraps(f) def inner(community_id, *args, **kwargs): c = Community.get(community_id) if c is None: abort(404) return f(c, *args, **kwargs) return inner
python
def pass_community(f): """Decorator to pass community.""" @wraps(f) def inner(community_id, *args, **kwargs): c = Community.get(community_id) if c is None: abort(404) return f(c, *args, **kwargs) return inner
[ "def", "pass_community", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "inner", "(", "community_id", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c", "=", "Community", ".", "get", "(", "community_id", ")", "if", "c", "is", "None", ":", "abort", "(", "404", ")", "return", "f", "(", "c", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inner" ]
Decorator to pass community.
[ "Decorator", "to", "pass", "community", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L56-L64
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
permission_required
def permission_required(action): """Decorator to require permission.""" def decorator(f): @wraps(f) def inner(community, *args, **kwargs): permission = current_permission_factory(community, action=action) if not permission.can(): abort(403) return f(community, *args, **kwargs) return inner return decorator
python
def permission_required(action): """Decorator to require permission.""" def decorator(f): @wraps(f) def inner(community, *args, **kwargs): permission = current_permission_factory(community, action=action) if not permission.can(): abort(403) return f(community, *args, **kwargs) return inner return decorator
[ "def", "permission_required", "(", "action", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "inner", "(", "community", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "permission", "=", "current_permission_factory", "(", "community", ",", "action", "=", "action", ")", "if", "not", "permission", ".", "can", "(", ")", ":", "abort", "(", "403", ")", "return", "f", "(", "community", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "inner", "return", "decorator" ]
Decorator to require permission.
[ "Decorator", "to", "require", "permission", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L67-L77
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
format_item
def format_item(item, template, name='item'): """Render a template to a string with the provided item in context.""" ctx = {name: item} return render_template_to_string(template, **ctx)
python
def format_item(item, template, name='item'): """Render a template to a string with the provided item in context.""" ctx = {name: item} return render_template_to_string(template, **ctx)
[ "def", "format_item", "(", "item", ",", "template", ",", "name", "=", "'item'", ")", ":", "ctx", "=", "{", "name", ":", "item", "}", "return", "render_template_to_string", "(", "template", ",", "*", "*", "ctx", ")" ]
Render a template to a string with the provided item in context.
[ "Render", "a", "template", "to", "a", "string", "with", "the", "provided", "item", "in", "context", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L81-L84
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
new
def new(): """Create a new community.""" form = CommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ 'form': form, 'is_new': True, 'community': None, }) if form.validate_on_submit(): data = copy.deepcopy(form.data) community_id = data.pop('identifier') del data['logo'] community = Community.create( community_id, current_user.get_id(), **data) file = request.files.get('logo', None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( 'Cannot add this file as a logo. Supported formats: ' 'PNG, JPG and SVG. Max file size: 1.5 MB.')) db.session.rollback() community = None if community: db.session.commit() flash("Community was successfully created.", category='success') return redirect(url_for('.edit', community_id=community.id)) return render_template( current_app.config['COMMUNITIES_NEW_TEMPLATE'], community_form=form, **ctx )
python
def new(): """Create a new community.""" form = CommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ 'form': form, 'is_new': True, 'community': None, }) if form.validate_on_submit(): data = copy.deepcopy(form.data) community_id = data.pop('identifier') del data['logo'] community = Community.create( community_id, current_user.get_id(), **data) file = request.files.get('logo', None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( 'Cannot add this file as a logo. Supported formats: ' 'PNG, JPG and SVG. Max file size: 1.5 MB.')) db.session.rollback() community = None if community: db.session.commit() flash("Community was successfully created.", category='success') return redirect(url_for('.edit', community_id=community.id)) return render_template( current_app.config['COMMUNITIES_NEW_TEMPLATE'], community_form=form, **ctx )
[ "def", "new", "(", ")", ":", "form", "=", "CommunityForm", "(", "formdata", "=", "request", ".", "values", ")", "ctx", "=", "mycommunities_ctx", "(", ")", "ctx", ".", "update", "(", "{", "'form'", ":", "form", ",", "'is_new'", ":", "True", ",", "'community'", ":", "None", ",", "}", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "form", ".", "data", ")", "community_id", "=", "data", ".", "pop", "(", "'identifier'", ")", "del", "data", "[", "'logo'", "]", "community", "=", "Community", ".", "create", "(", "community_id", ",", "current_user", ".", "get_id", "(", ")", ",", "*", "*", "data", ")", "file", "=", "request", ".", "files", ".", "get", "(", "'logo'", ",", "None", ")", "if", "file", ":", "if", "not", "community", ".", "save_logo", "(", "file", ".", "stream", ",", "file", ".", "filename", ")", ":", "form", ".", "logo", ".", "errors", ".", "append", "(", "_", "(", "'Cannot add this file as a logo. Supported formats: '", "'PNG, JPG and SVG. Max file size: 1.5 MB.'", ")", ")", "db", ".", "session", ".", "rollback", "(", ")", "community", "=", "None", "if", "community", ":", "db", ".", "session", ".", "commit", "(", ")", "flash", "(", "\"Community was successfully created.\"", ",", "category", "=", "'success'", ")", "return", "redirect", "(", "url_for", "(", "'.edit'", ",", "community_id", "=", "community", ".", "id", ")", ")", "return", "render_template", "(", "current_app", ".", "config", "[", "'COMMUNITIES_NEW_TEMPLATE'", "]", ",", "community_form", "=", "form", ",", "*", "*", "ctx", ")" ]
Create a new community.
[ "Create", "a", "new", "community", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L171-L209
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
edit
def edit(community): """Create or edit a community.""" form = EditCommunityForm(formdata=request.values, obj=community) deleteform = DeleteCommunityForm() ctx = mycommunities_ctx() ctx.update({ 'form': form, 'is_new': False, 'community': community, 'deleteform': deleteform, }) if form.validate_on_submit(): for field, val in form.data.items(): setattr(community, field, val) file = request.files.get('logo', None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( 'Cannot add this file as a logo. Supported formats: ' 'PNG, JPG and SVG. Max file size: 1.5 MB.')) if not form.logo.errors: db.session.commit() flash("Community successfully edited.", category='success') return redirect(url_for('.edit', community_id=community.id)) return render_template( current_app.config['COMMUNITIES_EDIT_TEMPLATE'], **ctx )
python
def edit(community): """Create or edit a community.""" form = EditCommunityForm(formdata=request.values, obj=community) deleteform = DeleteCommunityForm() ctx = mycommunities_ctx() ctx.update({ 'form': form, 'is_new': False, 'community': community, 'deleteform': deleteform, }) if form.validate_on_submit(): for field, val in form.data.items(): setattr(community, field, val) file = request.files.get('logo', None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( 'Cannot add this file as a logo. Supported formats: ' 'PNG, JPG and SVG. Max file size: 1.5 MB.')) if not form.logo.errors: db.session.commit() flash("Community successfully edited.", category='success') return redirect(url_for('.edit', community_id=community.id)) return render_template( current_app.config['COMMUNITIES_EDIT_TEMPLATE'], **ctx )
[ "def", "edit", "(", "community", ")", ":", "form", "=", "EditCommunityForm", "(", "formdata", "=", "request", ".", "values", ",", "obj", "=", "community", ")", "deleteform", "=", "DeleteCommunityForm", "(", ")", "ctx", "=", "mycommunities_ctx", "(", ")", "ctx", ".", "update", "(", "{", "'form'", ":", "form", ",", "'is_new'", ":", "False", ",", "'community'", ":", "community", ",", "'deleteform'", ":", "deleteform", ",", "}", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "for", "field", ",", "val", "in", "form", ".", "data", ".", "items", "(", ")", ":", "setattr", "(", "community", ",", "field", ",", "val", ")", "file", "=", "request", ".", "files", ".", "get", "(", "'logo'", ",", "None", ")", "if", "file", ":", "if", "not", "community", ".", "save_logo", "(", "file", ".", "stream", ",", "file", ".", "filename", ")", ":", "form", ".", "logo", ".", "errors", ".", "append", "(", "_", "(", "'Cannot add this file as a logo. Supported formats: '", "'PNG, JPG and SVG. Max file size: 1.5 MB.'", ")", ")", "if", "not", "form", ".", "logo", ".", "errors", ":", "db", ".", "session", ".", "commit", "(", ")", "flash", "(", "\"Community successfully edited.\"", ",", "category", "=", "'success'", ")", "return", "redirect", "(", "url_for", "(", "'.edit'", ",", "community_id", "=", "community", ".", "id", ")", ")", "return", "render_template", "(", "current_app", ".", "config", "[", "'COMMUNITIES_EDIT_TEMPLATE'", "]", ",", "*", "*", "ctx", ")" ]
Create or edit a community.
[ "Create", "or", "edit", "a", "community", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L216-L247
train
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
delete
def delete(community): """Delete a community.""" deleteform = DeleteCommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ 'deleteform': deleteform, 'is_new': False, 'community': community, }) if deleteform.validate_on_submit(): community.delete() db.session.commit() flash("Community was deleted.", category='success') return redirect(url_for('.index')) else: flash("Community could not be deleted.", category='warning') return redirect(url_for('.edit', community_id=community.id))
python
def delete(community): """Delete a community.""" deleteform = DeleteCommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ 'deleteform': deleteform, 'is_new': False, 'community': community, }) if deleteform.validate_on_submit(): community.delete() db.session.commit() flash("Community was deleted.", category='success') return redirect(url_for('.index')) else: flash("Community could not be deleted.", category='warning') return redirect(url_for('.edit', community_id=community.id))
[ "def", "delete", "(", "community", ")", ":", "deleteform", "=", "DeleteCommunityForm", "(", "formdata", "=", "request", ".", "values", ")", "ctx", "=", "mycommunities_ctx", "(", ")", "ctx", ".", "update", "(", "{", "'deleteform'", ":", "deleteform", ",", "'is_new'", ":", "False", ",", "'community'", ":", "community", ",", "}", ")", "if", "deleteform", ".", "validate_on_submit", "(", ")", ":", "community", ".", "delete", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "flash", "(", "\"Community was deleted.\"", ",", "category", "=", "'success'", ")", "return", "redirect", "(", "url_for", "(", "'.index'", ")", ")", "else", ":", "flash", "(", "\"Community could not be deleted.\"", ",", "category", "=", "'warning'", ")", "return", "redirect", "(", "url_for", "(", "'.edit'", ",", "community_id", "=", "community", ".", "id", ")", ")" ]
Delete a community.
[ "Delete", "a", "community", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L254-L271
train
OpenTreeOfLife/peyotl
tutorials/ot-oti-find-tree.py
ot_find_tree
def ot_find_tree(arg_dict, exact=True, verbose=False, oti_wrapper=None): """Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`. The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used. All other arguments correspond to the arguments of the web-service call. """ if oti_wrapper is None: from peyotl.sugar import oti oti_wrapper = oti return oti_wrapper.find_trees(arg_dict, exact=exact, verbose=verbose, wrap_response=True)
python
def ot_find_tree(arg_dict, exact=True, verbose=False, oti_wrapper=None): """Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`. The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used. All other arguments correspond to the arguments of the web-service call. """ if oti_wrapper is None: from peyotl.sugar import oti oti_wrapper = oti return oti_wrapper.find_trees(arg_dict, exact=exact, verbose=verbose, wrap_response=True)
[ "def", "ot_find_tree", "(", "arg_dict", ",", "exact", "=", "True", ",", "verbose", "=", "False", ",", "oti_wrapper", "=", "None", ")", ":", "if", "oti_wrapper", "is", "None", ":", "from", "peyotl", ".", "sugar", "import", "oti", "oti_wrapper", "=", "oti", "return", "oti_wrapper", ".", "find_trees", "(", "arg_dict", ",", "exact", "=", "exact", ",", "verbose", "=", "verbose", ",", "wrap_response", "=", "True", ")" ]
Uses a peyotl wrapper around an Open Tree web service to get a list of trees including values `value` for a given property to be searched on `porperty`. The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used. All other arguments correspond to the arguments of the web-service call.
[ "Uses", "a", "peyotl", "wrapper", "around", "an", "Open", "Tree", "web", "service", "to", "get", "a", "list", "of", "trees", "including", "values", "value", "for", "a", "given", "property", "to", "be", "searched", "on", "porperty", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-tree.py#L12-L24
train
hsolbrig/pyjsg
pyjsg/jsglib/typing_patch_36.py
is_iterable
def is_iterable(etype) -> bool: """ Determine whether etype is a List or other iterable """ return type(etype) is GenericMeta and issubclass(etype.__extra__, Iterable)
python
def is_iterable(etype) -> bool: """ Determine whether etype is a List or other iterable """ return type(etype) is GenericMeta and issubclass(etype.__extra__, Iterable)
[ "def", "is_iterable", "(", "etype", ")", "->", "bool", ":", "return", "type", "(", "etype", ")", "is", "GenericMeta", "and", "issubclass", "(", "etype", ".", "__extra__", ",", "Iterable", ")" ]
Determine whether etype is a List or other iterable
[ "Determine", "whether", "etype", "is", "a", "List", "or", "other", "iterable" ]
9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/jsglib/typing_patch_36.py#L29-L31
train
OpenTreeOfLife/peyotl
tutorials/ot-tree-of-life-mrca.py
main
def main(argv): """This function sets up a command-line option parser and then calls fetch_and_write_mrca to do all of the real work. """ import argparse description = 'Uses Open Tree of Life web services to the MRCA for a set of OTT IDs.' parser = argparse.ArgumentParser(prog='ot-tree-of-life-mrca', description=description) parser.add_argument('ottid', nargs='*', type=int, help='OTT IDs') parser.add_argument('--subtree', action='store_true', default=False, required=False, help='write a newick representation of the subtree rooted at this mrca') parser.add_argument('--induced-subtree', action='store_true', default=False, required=False, help='write a newick representation of the topology of the requested taxa in the synthetic tree (the subtree pruned to just the queried taxa)') parser.add_argument('--details', action='store_true', default=False, required=False, help='report more details about the mrca node') args = parser.parse_args(argv) id_list = args.ottid if not id_list: sys.stderr.write('No OTT IDs provided. Running a dummy query with 770302 770315\n') id_list = [770302, 770315] fetch_and_write_mrca(id_list, args.details, args.subtree, args.induced_subtree, sys.stdout, sys.stderr)
python
def main(argv): """This function sets up a command-line option parser and then calls fetch_and_write_mrca to do all of the real work. """ import argparse description = 'Uses Open Tree of Life web services to the MRCA for a set of OTT IDs.' parser = argparse.ArgumentParser(prog='ot-tree-of-life-mrca', description=description) parser.add_argument('ottid', nargs='*', type=int, help='OTT IDs') parser.add_argument('--subtree', action='store_true', default=False, required=False, help='write a newick representation of the subtree rooted at this mrca') parser.add_argument('--induced-subtree', action='store_true', default=False, required=False, help='write a newick representation of the topology of the requested taxa in the synthetic tree (the subtree pruned to just the queried taxa)') parser.add_argument('--details', action='store_true', default=False, required=False, help='report more details about the mrca node') args = parser.parse_args(argv) id_list = args.ottid if not id_list: sys.stderr.write('No OTT IDs provided. Running a dummy query with 770302 770315\n') id_list = [770302, 770315] fetch_and_write_mrca(id_list, args.details, args.subtree, args.induced_subtree, sys.stdout, sys.stderr)
[ "def", "main", "(", "argv", ")", ":", "import", "argparse", "description", "=", "'Uses Open Tree of Life web services to the MRCA for a set of OTT IDs.'", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'ot-tree-of-life-mrca'", ",", "description", "=", "description", ")", "parser", ".", "add_argument", "(", "'ottid'", ",", "nargs", "=", "'*'", ",", "type", "=", "int", ",", "help", "=", "'OTT IDs'", ")", "parser", ".", "add_argument", "(", "'--subtree'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ",", "help", "=", "'write a newick representation of the subtree rooted at this mrca'", ")", "parser", ".", "add_argument", "(", "'--induced-subtree'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ",", "help", "=", "'write a newick representation of the topology of the requested taxa in the synthetic tree (the subtree pruned to just the queried taxa)'", ")", "parser", ".", "add_argument", "(", "'--details'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "required", "=", "False", ",", "help", "=", "'report more details about the mrca node'", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", ")", "id_list", "=", "args", ".", "ottid", "if", "not", "id_list", ":", "sys", ".", "stderr", ".", "write", "(", "'No OTT IDs provided. Running a dummy query with 770302 770315\\n'", ")", "id_list", "=", "[", "770302", ",", "770315", "]", "fetch_and_write_mrca", "(", "id_list", ",", "args", ".", "details", ",", "args", ".", "subtree", ",", "args", ".", "induced_subtree", ",", "sys", ".", "stdout", ",", "sys", ".", "stderr", ")" ]
This function sets up a command-line option parser and then calls fetch_and_write_mrca to do all of the real work.
[ "This", "function", "sets", "up", "a", "command", "-", "line", "option", "parser", "and", "then", "calls", "fetch_and_write_mrca", "to", "do", "all", "of", "the", "real", "work", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-tree-of-life-mrca.py#L63-L82
train
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/origin.py
Origin.send_schema
async def send_schema(self, schema_data_json: str) -> str: """ Send schema to ledger, then retrieve it as written to the ledger and return it. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori) """ LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json) schema_data = json.loads(schema_data_json) s_key = schema_key(schema_id(self.did, schema_data['name'], schema_data['version'])) with SCHEMA_CACHE.lock: try: rv_json = await self.get_schema(s_key) LOGGER.error( 'Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) except AbsentSchema: # OK - about to create and send it (_, schema_json) = await anoncreds.issuer_create_schema( self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names'])) req_json = await ledger.build_schema_request(self.did, schema_json) resp_json = await self._sign_submit(req_json) resp = json.loads(resp_json) resp_result_txn = resp['result']['txn'] rv_json = await self.get_schema(schema_key(schema_id( resp_result_txn['metadata']['from'], resp_result_txn['data']['data']['name'], resp_result_txn['data']['data']['version']))) # add to cache en passant LOGGER.debug('Origin.send_schema <<< %s', rv_json) return rv_json
python
async def send_schema(self, schema_data_json: str) -> str: """ Send schema to ledger, then retrieve it as written to the ledger and return it. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori) """ LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json) schema_data = json.loads(schema_data_json) s_key = schema_key(schema_id(self.did, schema_data['name'], schema_data['version'])) with SCHEMA_CACHE.lock: try: rv_json = await self.get_schema(s_key) LOGGER.error( 'Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) except AbsentSchema: # OK - about to create and send it (_, schema_json) = await anoncreds.issuer_create_schema( self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names'])) req_json = await ledger.build_schema_request(self.did, schema_json) resp_json = await self._sign_submit(req_json) resp = json.loads(resp_json) resp_result_txn = resp['result']['txn'] rv_json = await self.get_schema(schema_key(schema_id( resp_result_txn['metadata']['from'], resp_result_txn['data']['data']['name'], resp_result_txn['data']['data']['version']))) # add to cache en passant LOGGER.debug('Origin.send_schema <<< %s', rv_json) return rv_json
[ "async", "def", "send_schema", "(", "self", ",", "schema_data_json", ":", "str", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'Origin.send_schema >>> schema_data_json: %s'", ",", "schema_data_json", ")", "schema_data", "=", "json", ".", "loads", "(", "schema_data_json", ")", "s_key", "=", "schema_key", "(", "schema_id", "(", "self", ".", "did", ",", "schema_data", "[", "'name'", "]", ",", "schema_data", "[", "'version'", "]", ")", ")", "with", "SCHEMA_CACHE", ".", "lock", ":", "try", ":", "rv_json", "=", "await", "self", ".", "get_schema", "(", "s_key", ")", "LOGGER", ".", "error", "(", "'Schema %s version %s already exists on ledger for origin-did %s: not sending'", ",", "schema_data", "[", "'name'", "]", ",", "schema_data", "[", "'version'", "]", ",", "self", ".", "did", ")", "except", "AbsentSchema", ":", "# OK - about to create and send it", "(", "_", ",", "schema_json", ")", "=", "await", "anoncreds", ".", "issuer_create_schema", "(", "self", ".", "did", ",", "schema_data", "[", "'name'", "]", ",", "schema_data", "[", "'version'", "]", ",", "json", ".", "dumps", "(", "schema_data", "[", "'attr_names'", "]", ")", ")", "req_json", "=", "await", "ledger", ".", "build_schema_request", "(", "self", ".", "did", ",", "schema_json", ")", "resp_json", "=", "await", "self", ".", "_sign_submit", "(", "req_json", ")", "resp", "=", "json", ".", "loads", "(", "resp_json", ")", "resp_result_txn", "=", "resp", "[", "'result'", "]", "[", "'txn'", "]", "rv_json", "=", "await", "self", ".", "get_schema", "(", "schema_key", "(", "schema_id", "(", "resp_result_txn", "[", "'metadata'", "]", "[", "'from'", "]", ",", "resp_result_txn", "[", "'data'", "]", "[", "'data'", "]", "[", "'name'", "]", ",", "resp_result_txn", "[", "'data'", "]", "[", "'data'", "]", "[", "'version'", "]", ")", ")", ")", "# add to cache en passant", "LOGGER", ".", "debug", "(", "'Origin.send_schema <<< %s'", ",", "rv_json", ")", "return", "rv_json" ]
Send schema to ledger, then retrieve it as written to the ledger and return it. If schema already exists on ledger, log error and return schema. :param schema_data_json: schema data json with name, version, attribute names; e.g., :: { 'name': 'my-schema', 'version': '1.234', 'attr_names': ['favourite_drink', 'height', 'last_visit_date'] } :return: schema json as written to ledger (or existed a priori)
[ "Send", "schema", "to", "ledger", "then", "retrieve", "it", "as", "written", "to", "the", "ledger", "and", "return", "it", ".", "If", "schema", "already", "exists", "on", "ledger", "log", "error", "and", "return", "schema", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/origin.py#L36-L82
train
OpenTreeOfLife/peyotl
peyotl/git_storage/type_aware_doc_store.py
TypeAwareDocStore._locked_refresh_doc_ids
def _locked_refresh_doc_ids(self): """Assumes that the caller has the _index_lock ! """ d = {} for s in self._shards: for k in s.doc_index.keys(): if k in d: raise KeyError('doc "{i}" found in multiple repos'.format(i=k)) d[k] = s self._doc2shard_map = d
python
def _locked_refresh_doc_ids(self): """Assumes that the caller has the _index_lock ! """ d = {} for s in self._shards: for k in s.doc_index.keys(): if k in d: raise KeyError('doc "{i}" found in multiple repos'.format(i=k)) d[k] = s self._doc2shard_map = d
[ "def", "_locked_refresh_doc_ids", "(", "self", ")", ":", "d", "=", "{", "}", "for", "s", "in", "self", ".", "_shards", ":", "for", "k", "in", "s", ".", "doc_index", ".", "keys", "(", ")", ":", "if", "k", "in", "d", ":", "raise", "KeyError", "(", "'doc \"{i}\" found in multiple repos'", ".", "format", "(", "i", "=", "k", ")", ")", "d", "[", "k", "]", "=", "s", "self", ".", "_doc2shard_map", "=", "d" ]
Assumes that the caller has the _index_lock !
[ "Assumes", "that", "the", "caller", "has", "the", "_index_lock", "!" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/type_aware_doc_store.py#L157-L166
train
OpenTreeOfLife/peyotl
peyotl/git_storage/type_aware_doc_store.py
TypeAwareDocStore.push_doc_to_remote
def push_doc_to_remote(self, remote_name, doc_id=None): """This will push the master branch to the remote named `remote_name` using the mirroring strategy to cut down on locking of the working repo. `doc_id` is used to determine which shard should be pushed. if `doc_id` is None, all shards are pushed. """ if doc_id is None: ret = True # @TODO should spawn a thread of each shard... for shard in self._shards: if not shard.push_to_remote(remote_name): ret = False return ret shard = self.get_shard(doc_id) return shard.push_to_remote(remote_name)
python
def push_doc_to_remote(self, remote_name, doc_id=None): """This will push the master branch to the remote named `remote_name` using the mirroring strategy to cut down on locking of the working repo. `doc_id` is used to determine which shard should be pushed. if `doc_id` is None, all shards are pushed. """ if doc_id is None: ret = True # @TODO should spawn a thread of each shard... for shard in self._shards: if not shard.push_to_remote(remote_name): ret = False return ret shard = self.get_shard(doc_id) return shard.push_to_remote(remote_name)
[ "def", "push_doc_to_remote", "(", "self", ",", "remote_name", ",", "doc_id", "=", "None", ")", ":", "if", "doc_id", "is", "None", ":", "ret", "=", "True", "# @TODO should spawn a thread of each shard...", "for", "shard", "in", "self", ".", "_shards", ":", "if", "not", "shard", ".", "push_to_remote", "(", "remote_name", ")", ":", "ret", "=", "False", "return", "ret", "shard", "=", "self", ".", "get_shard", "(", "doc_id", ")", "return", "shard", ".", "push_to_remote", "(", "remote_name", ")" ]
This will push the master branch to the remote named `remote_name` using the mirroring strategy to cut down on locking of the working repo. `doc_id` is used to determine which shard should be pushed. if `doc_id` is None, all shards are pushed.
[ "This", "will", "push", "the", "master", "branch", "to", "the", "remote", "named", "remote_name", "using", "the", "mirroring", "strategy", "to", "cut", "down", "on", "locking", "of", "the", "working", "repo", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/type_aware_doc_store.py#L214-L229
train
OpenTreeOfLife/peyotl
peyotl/git_storage/type_aware_doc_store.py
TypeAwareDocStore.iter_doc_filepaths
def iter_doc_filepaths(self, **kwargs): """Generator that iterates over all detected documents. and returns the filesystem path to each doc. Order is by shard, but arbitrary within shards. @TEMP not locked to prevent doc creation/deletion """ for shard in self._shards: for doc_id, blob in shard.iter_doc_filepaths(**kwargs): yield doc_id, blob
python
def iter_doc_filepaths(self, **kwargs): """Generator that iterates over all detected documents. and returns the filesystem path to each doc. Order is by shard, but arbitrary within shards. @TEMP not locked to prevent doc creation/deletion """ for shard in self._shards: for doc_id, blob in shard.iter_doc_filepaths(**kwargs): yield doc_id, blob
[ "def", "iter_doc_filepaths", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "shard", "in", "self", ".", "_shards", ":", "for", "doc_id", ",", "blob", "in", "shard", ".", "iter_doc_filepaths", "(", "*", "*", "kwargs", ")", ":", "yield", "doc_id", ",", "blob" ]
Generator that iterates over all detected documents. and returns the filesystem path to each doc. Order is by shard, but arbitrary within shards. @TEMP not locked to prevent doc creation/deletion
[ "Generator", "that", "iterates", "over", "all", "detected", "documents", ".", "and", "returns", "the", "filesystem", "path", "to", "each", "doc", ".", "Order", "is", "by", "shard", "but", "arbitrary", "within", "shards", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/type_aware_doc_store.py#L323-L331
train
inveniosoftware/invenio-communities
invenio_communities/forms.py
CommunityForm.data
def data(self): """Form data.""" d = super(CommunityForm, self).data d.pop('csrf_token', None) return d
python
def data(self): """Form data.""" d = super(CommunityForm, self).data d.pop('csrf_token', None) return d
[ "def", "data", "(", "self", ")", ":", "d", "=", "super", "(", "CommunityForm", ",", "self", ")", ".", "data", "d", ".", "pop", "(", "'csrf_token'", ",", "None", ")", "return", "d" ]
Form data.
[ "Form", "data", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/forms.py#L54-L58
train
inveniosoftware/invenio-communities
invenio_communities/forms.py
CommunityForm.validate_identifier
def validate_identifier(self, field): """Validate field identifier.""" if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
python
def validate_identifier(self, field): """Validate field identifier.""" if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
[ "def", "validate_identifier", "(", "self", ",", "field", ")", ":", "if", "field", ".", "data", ":", "field", ".", "data", "=", "field", ".", "data", ".", "lower", "(", ")", "if", "Community", ".", "get", "(", "field", ".", "data", ",", "with_deleted", "=", "True", ")", ":", "raise", "validators", ".", "ValidationError", "(", "_", "(", "'The identifier already exists. '", "'Please choose a different one.'", ")", ")" ]
Validate field identifier.
[ "Validate", "field", "identifier", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/forms.py#L154-L161
train
OpenTreeOfLife/peyotl
peyotl/utility/input_output.py
read_filepath
def read_filepath(filepath, encoding='utf-8'): """Returns the text content of `filepath`""" with codecs.open(filepath, 'r', encoding=encoding) as fo: return fo.read()
python
def read_filepath(filepath, encoding='utf-8'): """Returns the text content of `filepath`""" with codecs.open(filepath, 'r', encoding=encoding) as fo: return fo.read()
[ "def", "read_filepath", "(", "filepath", ",", "encoding", "=", "'utf-8'", ")", ":", "with", "codecs", ".", "open", "(", "filepath", ",", "'r'", ",", "encoding", "=", "encoding", ")", "as", "fo", ":", "return", "fo", ".", "read", "(", ")" ]
Returns the text content of `filepath`
[ "Returns", "the", "text", "content", "of", "filepath" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/input_output.py#L25-L28
train
OpenTreeOfLife/peyotl
peyotl/utility/input_output.py
download
def download(url, encoding='utf-8'): """Returns the text fetched via http GET from URL, read as `encoding`""" import requests response = requests.get(url) response.encoding = encoding return response.text
python
def download(url, encoding='utf-8'): """Returns the text fetched via http GET from URL, read as `encoding`""" import requests response = requests.get(url) response.encoding = encoding return response.text
[ "def", "download", "(", "url", ",", "encoding", "=", "'utf-8'", ")", ":", "import", "requests", "response", "=", "requests", ".", "get", "(", "url", ")", "response", ".", "encoding", "=", "encoding", "return", "response", ".", "text" ]
Returns the text fetched via http GET from URL, read as `encoding`
[ "Returns", "the", "text", "fetched", "via", "http", "GET", "from", "URL", "read", "as", "encoding" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/input_output.py#L53-L58
train
OpenTreeOfLife/peyotl
peyotl/utility/input_output.py
pretty_dict_str
def pretty_dict_str(d, indent=2): """shows JSON indented representation of d""" b = StringIO() write_pretty_dict_str(b, d, indent=indent) return b.getvalue()
python
def pretty_dict_str(d, indent=2): """shows JSON indented representation of d""" b = StringIO() write_pretty_dict_str(b, d, indent=indent) return b.getvalue()
[ "def", "pretty_dict_str", "(", "d", ",", "indent", "=", "2", ")", ":", "b", "=", "StringIO", "(", ")", "write_pretty_dict_str", "(", "b", ",", "d", ",", "indent", "=", "indent", ")", "return", "b", ".", "getvalue", "(", ")" ]
shows JSON indented representation of d
[ "shows", "JSON", "indented", "representation", "of", "d" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/input_output.py#L80-L84
train
OpenTreeOfLife/peyotl
peyotl/utility/input_output.py
write_pretty_dict_str
def write_pretty_dict_str(out, obj, indent=2): """writes JSON indented representation of `obj` to `out`""" json.dump(obj, out, indent=indent, sort_keys=True, separators=(',', ': '), ensure_ascii=False, encoding="utf-8")
python
def write_pretty_dict_str(out, obj, indent=2): """writes JSON indented representation of `obj` to `out`""" json.dump(obj, out, indent=indent, sort_keys=True, separators=(',', ': '), ensure_ascii=False, encoding="utf-8")
[ "def", "write_pretty_dict_str", "(", "out", ",", "obj", ",", "indent", "=", "2", ")", ":", "json", ".", "dump", "(", "obj", ",", "out", ",", "indent", "=", "indent", ",", "sort_keys", "=", "True", ",", "separators", "=", "(", "','", ",", "': '", ")", ",", "ensure_ascii", "=", "False", ",", "encoding", "=", "\"utf-8\"", ")" ]
writes JSON indented representation of `obj` to `out`
[ "writes", "JSON", "indented", "representation", "of", "obj", "to", "out" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/input_output.py#L87-L95
train
inveniosoftware/invenio-communities
invenio_communities/serializers/response.py
community_responsify
def community_responsify(schema_class, mimetype): """Create a community response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. """ def view(data, code=200, headers=None, links_item_factory=None, page=None, urlkwargs=None, links_pagination_factory=None): """Generate the response object.""" if isinstance(data, Community): last_modified = data.updated response_data = schema_class( context=dict(item_links_factory=links_item_factory) ).dump(data).data else: last_modified = None response_data = schema_class( context=dict( total=data.query.count(), item_links_factory=links_item_factory, page=page, urlkwargs=urlkwargs, pagination_links_factory=links_pagination_factory) ).dump(data.items, many=True).data response = current_app.response_class( json.dumps(response_data, **_format_args()), mimetype=mimetype) response.status_code = code if last_modified: response.last_modified = last_modified if headers is not None: response.headers.extend(headers) return response return view
python
def community_responsify(schema_class, mimetype): """Create a community response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. """ def view(data, code=200, headers=None, links_item_factory=None, page=None, urlkwargs=None, links_pagination_factory=None): """Generate the response object.""" if isinstance(data, Community): last_modified = data.updated response_data = schema_class( context=dict(item_links_factory=links_item_factory) ).dump(data).data else: last_modified = None response_data = schema_class( context=dict( total=data.query.count(), item_links_factory=links_item_factory, page=page, urlkwargs=urlkwargs, pagination_links_factory=links_pagination_factory) ).dump(data.items, many=True).data response = current_app.response_class( json.dumps(response_data, **_format_args()), mimetype=mimetype) response.status_code = code if last_modified: response.last_modified = last_modified if headers is not None: response.headers.extend(headers) return response return view
[ "def", "community_responsify", "(", "schema_class", ",", "mimetype", ")", ":", "def", "view", "(", "data", ",", "code", "=", "200", ",", "headers", "=", "None", ",", "links_item_factory", "=", "None", ",", "page", "=", "None", ",", "urlkwargs", "=", "None", ",", "links_pagination_factory", "=", "None", ")", ":", "\"\"\"Generate the response object.\"\"\"", "if", "isinstance", "(", "data", ",", "Community", ")", ":", "last_modified", "=", "data", ".", "updated", "response_data", "=", "schema_class", "(", "context", "=", "dict", "(", "item_links_factory", "=", "links_item_factory", ")", ")", ".", "dump", "(", "data", ")", ".", "data", "else", ":", "last_modified", "=", "None", "response_data", "=", "schema_class", "(", "context", "=", "dict", "(", "total", "=", "data", ".", "query", ".", "count", "(", ")", ",", "item_links_factory", "=", "links_item_factory", ",", "page", "=", "page", ",", "urlkwargs", "=", "urlkwargs", ",", "pagination_links_factory", "=", "links_pagination_factory", ")", ")", ".", "dump", "(", "data", ".", "items", ",", "many", "=", "True", ")", ".", "data", "response", "=", "current_app", ".", "response_class", "(", "json", ".", "dumps", "(", "response_data", ",", "*", "*", "_format_args", "(", ")", ")", ",", "mimetype", "=", "mimetype", ")", "response", ".", "status_code", "=", "code", "if", "last_modified", ":", "response", ".", "last_modified", "=", "last_modified", "if", "headers", "is", "not", "None", ":", "response", ".", "headers", ".", "extend", "(", "headers", ")", "return", "response", "return", "view" ]
Create a community response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response.
[ "Create", "a", "community", "response", "serializer", "." ]
5c4de6783724d276ae1b6dd13a399a9e22fadc7a
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/serializers/response.py#L58-L94
train
palantir/typedjsonrpc
typedjsonrpc/errors.py
InternalError.from_error
def from_error(exc_info, json_encoder, debug_url=None): """Wraps another Exception in an InternalError. :param exc_info: The exception info for the wrapped exception :type exc_info: (type, object, traceback) :type json_encoder: json.JSONEncoder :type debug_url: str | None :rtype: InternalError .. versionadded:: 0.1.0 .. versionchanged:: 0.2.0 Stringifies non-JSON-serializable objects """ exc = exc_info[1] data = exc.__dict__.copy() for key, value in data.items(): try: json_encoder.encode(value) except TypeError: data[key] = repr(value) data["traceback"] = "".join(traceback.format_exception(*exc_info)) if debug_url is not None: data["debug_url"] = debug_url return InternalError(data)
python
def from_error(exc_info, json_encoder, debug_url=None): """Wraps another Exception in an InternalError. :param exc_info: The exception info for the wrapped exception :type exc_info: (type, object, traceback) :type json_encoder: json.JSONEncoder :type debug_url: str | None :rtype: InternalError .. versionadded:: 0.1.0 .. versionchanged:: 0.2.0 Stringifies non-JSON-serializable objects """ exc = exc_info[1] data = exc.__dict__.copy() for key, value in data.items(): try: json_encoder.encode(value) except TypeError: data[key] = repr(value) data["traceback"] = "".join(traceback.format_exception(*exc_info)) if debug_url is not None: data["debug_url"] = debug_url return InternalError(data)
[ "def", "from_error", "(", "exc_info", ",", "json_encoder", ",", "debug_url", "=", "None", ")", ":", "exc", "=", "exc_info", "[", "1", "]", "data", "=", "exc", ".", "__dict__", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "try", ":", "json_encoder", ".", "encode", "(", "value", ")", "except", "TypeError", ":", "data", "[", "key", "]", "=", "repr", "(", "value", ")", "data", "[", "\"traceback\"", "]", "=", "\"\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "exc_info", ")", ")", "if", "debug_url", "is", "not", "None", ":", "data", "[", "\"debug_url\"", "]", "=", "debug_url", "return", "InternalError", "(", "data", ")" ]
Wraps another Exception in an InternalError. :param exc_info: The exception info for the wrapped exception :type exc_info: (type, object, traceback) :type json_encoder: json.JSONEncoder :type debug_url: str | None :rtype: InternalError .. versionadded:: 0.1.0 .. versionchanged:: 0.2.0 Stringifies non-JSON-serializable objects
[ "Wraps", "another", "Exception", "in", "an", "InternalError", "." ]
274218fcd236ff9643506caa629029c9ba25a0fb
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/errors.py#L99-L122
train
PSPC-SPAC-buyandsell/von_agent
von_agent/cache.py
SchemaCache.contains
def contains(self, index: Union[SchemaKey, int, str]) -> bool: """ Return whether the cache contains a schema for the input key, sequence number, or schema identifier. :param index: schema key, sequence number, or sequence identifier :return: whether the cache contains a schema for the input index """ LOGGER.debug('SchemaCache.contains >>> index: %s', index) rv = None if isinstance(index, SchemaKey): rv = (index in self._schema_key2schema) elif isinstance(index, int) or (isinstance(index, str) and ':2:' not in index): rv = (int(index) in self._seq_no2schema_key) elif isinstance(index, str): rv = (schema_key(index) in self._schema_key2schema) else: rv = False LOGGER.debug('SchemaCache.contains <<< %s', rv) return rv
python
def contains(self, index: Union[SchemaKey, int, str]) -> bool: """ Return whether the cache contains a schema for the input key, sequence number, or schema identifier. :param index: schema key, sequence number, or sequence identifier :return: whether the cache contains a schema for the input index """ LOGGER.debug('SchemaCache.contains >>> index: %s', index) rv = None if isinstance(index, SchemaKey): rv = (index in self._schema_key2schema) elif isinstance(index, int) or (isinstance(index, str) and ':2:' not in index): rv = (int(index) in self._seq_no2schema_key) elif isinstance(index, str): rv = (schema_key(index) in self._schema_key2schema) else: rv = False LOGGER.debug('SchemaCache.contains <<< %s', rv) return rv
[ "def", "contains", "(", "self", ",", "index", ":", "Union", "[", "SchemaKey", ",", "int", ",", "str", "]", ")", "->", "bool", ":", "LOGGER", ".", "debug", "(", "'SchemaCache.contains >>> index: %s'", ",", "index", ")", "rv", "=", "None", "if", "isinstance", "(", "index", ",", "SchemaKey", ")", ":", "rv", "=", "(", "index", "in", "self", ".", "_schema_key2schema", ")", "elif", "isinstance", "(", "index", ",", "int", ")", "or", "(", "isinstance", "(", "index", ",", "str", ")", "and", "':2:'", "not", "in", "index", ")", ":", "rv", "=", "(", "int", "(", "index", ")", "in", "self", ".", "_seq_no2schema_key", ")", "elif", "isinstance", "(", "index", ",", "str", ")", ":", "rv", "=", "(", "schema_key", "(", "index", ")", "in", "self", ".", "_schema_key2schema", ")", "else", ":", "rv", "=", "False", "LOGGER", ".", "debug", "(", "'SchemaCache.contains <<< %s'", ",", "rv", ")", "return", "rv" ]
Return whether the cache contains a schema for the input key, sequence number, or schema identifier. :param index: schema key, sequence number, or sequence identifier :return: whether the cache contains a schema for the input index
[ "Return", "whether", "the", "cache", "contains", "a", "schema", "for", "the", "input", "key", "sequence", "number", "or", "schema", "identifier", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/cache.py#L115-L136
train
PSPC-SPAC-buyandsell/von_agent
von_agent/cache.py
RevoCacheEntry.cull
def cull(self, delta: bool) -> None: """ Cull cache entry frame list to size, favouring most recent query time. :param delta: True to operate on rev reg deltas, False for rev reg states """ LOGGER.debug('RevoCacheEntry.cull >>> delta: %s', delta) rr_frames = self.rr_delta_frames if delta else self.rr_state_frames mark = 4096**0.5 # max rev reg size = 4096; heuristic: hover max around sqrt(4096) = 64 if len(rr_frames) > int(mark * 1.25): rr_frames.sort(key=lambda x: -x.qtime) # order by descending query time del rr_frames[int(mark * 0.75):] # retain most recent, grow again from here LOGGER.info( 'Pruned revocation cache entry %s to %s %s frames', self.rev_reg_def['id'], len(rr_frames), 'delta' if delta else 'state') LOGGER.debug('RevoCacheEntry.cull <<<')
python
def cull(self, delta: bool) -> None: """ Cull cache entry frame list to size, favouring most recent query time. :param delta: True to operate on rev reg deltas, False for rev reg states """ LOGGER.debug('RevoCacheEntry.cull >>> delta: %s', delta) rr_frames = self.rr_delta_frames if delta else self.rr_state_frames mark = 4096**0.5 # max rev reg size = 4096; heuristic: hover max around sqrt(4096) = 64 if len(rr_frames) > int(mark * 1.25): rr_frames.sort(key=lambda x: -x.qtime) # order by descending query time del rr_frames[int(mark * 0.75):] # retain most recent, grow again from here LOGGER.info( 'Pruned revocation cache entry %s to %s %s frames', self.rev_reg_def['id'], len(rr_frames), 'delta' if delta else 'state') LOGGER.debug('RevoCacheEntry.cull <<<')
[ "def", "cull", "(", "self", ",", "delta", ":", "bool", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'RevoCacheEntry.cull >>> delta: %s'", ",", "delta", ")", "rr_frames", "=", "self", ".", "rr_delta_frames", "if", "delta", "else", "self", ".", "rr_state_frames", "mark", "=", "4096", "**", "0.5", "# max rev reg size = 4096; heuristic: hover max around sqrt(4096) = 64", "if", "len", "(", "rr_frames", ")", ">", "int", "(", "mark", "*", "1.25", ")", ":", "rr_frames", ".", "sort", "(", "key", "=", "lambda", "x", ":", "-", "x", ".", "qtime", ")", "# order by descending query time", "del", "rr_frames", "[", "int", "(", "mark", "*", "0.75", ")", ":", "]", "# retain most recent, grow again from here", "LOGGER", ".", "info", "(", "'Pruned revocation cache entry %s to %s %s frames'", ",", "self", ".", "rev_reg_def", "[", "'id'", "]", ",", "len", "(", "rr_frames", ")", ",", "'delta'", "if", "delta", "else", "'state'", ")", "LOGGER", ".", "debug", "(", "'RevoCacheEntry.cull <<<'", ")" ]
Cull cache entry frame list to size, favouring most recent query time. :param delta: True to operate on rev reg deltas, False for rev reg states
[ "Cull", "cache", "entry", "frame", "list", "to", "size", "favouring", "most", "recent", "query", "time", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/cache.py#L413-L433
train
PSPC-SPAC-buyandsell/von_agent
von_agent/cache.py
RevocationCache.dflt_interval
def dflt_interval(self, cd_id: str) -> (int, int): """ Return default non-revocation interval from latest 'to' times on delta frames of revocation cache entries on indices stemming from input cred def id. Compute the 'from'/'to' values as the earliest/latest 'to' values of all cached delta frames on all rev reg ids stemming from the input cred def id. E.g., on frames for rev-reg-0: -[xx]---[xxxx]-[x]---[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]--> time rev-reg-1: ----------------------[xxxx]----[xxx]---[xxxxxxxxxxxxxxxxxxxx]---------> time rev-reg-2: -------------------------------------------[xx]-----[xxxx]-----[xxxxx]-> time rev-reg-3: -----------------------------------------------------------[xxxxxxxx]--> time return the most recent interval covering all matching revocation registries in the cache; i.e.,: interval: -------------------------------------------------------------[*******]-> time Raise CacheIndex if there are no matching entries. :param cd_id: cred def identifier to match :return: default non-revocation interval as 2-tuple (fro, to) """ LOGGER.debug('RevocationCache.dflt_interval >>>') fro = None to = None for rr_id in self: if cd_id != rev_reg_id2cred_def_id(rr_id): continue entry = self[rr_id] if entry.rr_delta_frames: to = max(entry.rr_delta_frames, key=lambda f: f.to).to fro = min(fro or to, to) if not (fro and to): LOGGER.debug( 'RevocationCache.dflt_interval <!< No data for default non-revoc interval on cred def id %s', cd_id) raise CacheIndex('No data for default non-revoc interval on cred def id {}'.format(cd_id)) rv = (fro, to) LOGGER.debug('RevocationCache.dflt_interval <<< %s', rv) return rv
python
def dflt_interval(self, cd_id: str) -> (int, int): """ Return default non-revocation interval from latest 'to' times on delta frames of revocation cache entries on indices stemming from input cred def id. Compute the 'from'/'to' values as the earliest/latest 'to' values of all cached delta frames on all rev reg ids stemming from the input cred def id. E.g., on frames for rev-reg-0: -[xx]---[xxxx]-[x]---[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]--> time rev-reg-1: ----------------------[xxxx]----[xxx]---[xxxxxxxxxxxxxxxxxxxx]---------> time rev-reg-2: -------------------------------------------[xx]-----[xxxx]-----[xxxxx]-> time rev-reg-3: -----------------------------------------------------------[xxxxxxxx]--> time return the most recent interval covering all matching revocation registries in the cache; i.e.,: interval: -------------------------------------------------------------[*******]-> time Raise CacheIndex if there are no matching entries. :param cd_id: cred def identifier to match :return: default non-revocation interval as 2-tuple (fro, to) """ LOGGER.debug('RevocationCache.dflt_interval >>>') fro = None to = None for rr_id in self: if cd_id != rev_reg_id2cred_def_id(rr_id): continue entry = self[rr_id] if entry.rr_delta_frames: to = max(entry.rr_delta_frames, key=lambda f: f.to).to fro = min(fro or to, to) if not (fro and to): LOGGER.debug( 'RevocationCache.dflt_interval <!< No data for default non-revoc interval on cred def id %s', cd_id) raise CacheIndex('No data for default non-revoc interval on cred def id {}'.format(cd_id)) rv = (fro, to) LOGGER.debug('RevocationCache.dflt_interval <<< %s', rv) return rv
[ "def", "dflt_interval", "(", "self", ",", "cd_id", ":", "str", ")", "->", "(", "int", ",", "int", ")", ":", "LOGGER", ".", "debug", "(", "'RevocationCache.dflt_interval >>>'", ")", "fro", "=", "None", "to", "=", "None", "for", "rr_id", "in", "self", ":", "if", "cd_id", "!=", "rev_reg_id2cred_def_id", "(", "rr_id", ")", ":", "continue", "entry", "=", "self", "[", "rr_id", "]", "if", "entry", ".", "rr_delta_frames", ":", "to", "=", "max", "(", "entry", ".", "rr_delta_frames", ",", "key", "=", "lambda", "f", ":", "f", ".", "to", ")", ".", "to", "fro", "=", "min", "(", "fro", "or", "to", ",", "to", ")", "if", "not", "(", "fro", "and", "to", ")", ":", "LOGGER", ".", "debug", "(", "'RevocationCache.dflt_interval <!< No data for default non-revoc interval on cred def id %s'", ",", "cd_id", ")", "raise", "CacheIndex", "(", "'No data for default non-revoc interval on cred def id {}'", ".", "format", "(", "cd_id", ")", ")", "rv", "=", "(", "fro", ",", "to", ")", "LOGGER", ".", "debug", "(", "'RevocationCache.dflt_interval <<< %s'", ",", "rv", ")", "return", "rv" ]
Return default non-revocation interval from latest 'to' times on delta frames of revocation cache entries on indices stemming from input cred def id. Compute the 'from'/'to' values as the earliest/latest 'to' values of all cached delta frames on all rev reg ids stemming from the input cred def id. E.g., on frames for rev-reg-0: -[xx]---[xxxx]-[x]---[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]--> time rev-reg-1: ----------------------[xxxx]----[xxx]---[xxxxxxxxxxxxxxxxxxxx]---------> time rev-reg-2: -------------------------------------------[xx]-----[xxxx]-----[xxxxx]-> time rev-reg-3: -----------------------------------------------------------[xxxxxxxx]--> time return the most recent interval covering all matching revocation registries in the cache; i.e.,: interval: -------------------------------------------------------------[*******]-> time Raise CacheIndex if there are no matching entries. :param cd_id: cred def identifier to match :return: default non-revocation interval as 2-tuple (fro, to)
[ "Return", "default", "non", "-", "revocation", "interval", "from", "latest", "to", "times", "on", "delta", "frames", "of", "revocation", "cache", "entries", "on", "indices", "stemming", "from", "input", "cred", "def", "id", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/cache.py#L662-L706
train
PSPC-SPAC-buyandsell/von_agent
von_agent/cache.py
Caches.parse
def parse(base_dir: str, timestamp: int = None) -> int: """ Parse and update from archived cache files. Only accept new content; do not overwrite any existing cache content. :param base_dir: archive base directory :param timestamp: epoch time of cache serving as subdirectory, default most recent :return: epoch time of cache serving as subdirectory, None if there is no such archive. """ LOGGER.debug('parse >>> base_dir: %s, timestamp: %s', base_dir, timestamp) if not isdir(base_dir): LOGGER.info('No cache archives available: not feeding cache') LOGGER.debug('parse <<< None') return None if not timestamp: timestamps = [int(t) for t in listdir(base_dir) if t.isdigit()] if timestamps: timestamp = max(timestamps) else: LOGGER.info('No cache archives available: not feeding cache') LOGGER.debug('parse <<< None') return None timestamp_dir = join(base_dir, str(timestamp)) if not isdir(timestamp_dir): LOGGER.error('No such archived cache directory: %s', timestamp_dir) LOGGER.debug('parse <<< None') return None with SCHEMA_CACHE.lock: with open(join(timestamp_dir, 'schema'), 'r') as archive: schemata = json.loads(archive.read()) SCHEMA_CACHE.feed(schemata) with CRED_DEF_CACHE.lock: with open(join(timestamp_dir, 'cred_def'), 'r') as archive: cred_defs = json.loads(archive.read()) for cd_id in cred_defs: if cd_id in CRED_DEF_CACHE: LOGGER.warning('Cred def cache already has cred def on %s: skipping', cd_id) else: CRED_DEF_CACHE[cd_id] = cred_defs[cd_id] LOGGER.info('Cred def cache imported cred def for cred def id %s', cd_id) with REVO_CACHE.lock: with open(join(timestamp_dir, 'revocation'), 'r') as archive: rr_cache_entries = json.loads(archive.read()) for (rr_id, entry) in rr_cache_entries.items(): if rr_id in REVO_CACHE: LOGGER.warning('Revocation cache already has entry on %s: skipping', rr_id) else: rr_cache_entry = RevoCacheEntry(entry['rev_reg_def']) rr_cache_entry.rr_delta_frames = [ RevRegUpdateFrame( f['_to'], f['_timestamp'], f['_rr_update']) for f in entry['rr_delta_frames'] ] rr_cache_entry.cull(True) rr_cache_entry.rr_state_frames = [ RevRegUpdateFrame( f['_to'], f['_timestamp'], f['_rr_update']) for f in entry['rr_state_frames'] ] rr_cache_entry.cull(False) REVO_CACHE[rr_id] = rr_cache_entry LOGGER.info('Revocation cache imported entry for rev reg id %s', rr_id) LOGGER.debug('parse <<< %s', timestamp) return timestamp
python
def parse(base_dir: str, timestamp: int = None) -> int: """ Parse and update from archived cache files. Only accept new content; do not overwrite any existing cache content. :param base_dir: archive base directory :param timestamp: epoch time of cache serving as subdirectory, default most recent :return: epoch time of cache serving as subdirectory, None if there is no such archive. """ LOGGER.debug('parse >>> base_dir: %s, timestamp: %s', base_dir, timestamp) if not isdir(base_dir): LOGGER.info('No cache archives available: not feeding cache') LOGGER.debug('parse <<< None') return None if not timestamp: timestamps = [int(t) for t in listdir(base_dir) if t.isdigit()] if timestamps: timestamp = max(timestamps) else: LOGGER.info('No cache archives available: not feeding cache') LOGGER.debug('parse <<< None') return None timestamp_dir = join(base_dir, str(timestamp)) if not isdir(timestamp_dir): LOGGER.error('No such archived cache directory: %s', timestamp_dir) LOGGER.debug('parse <<< None') return None with SCHEMA_CACHE.lock: with open(join(timestamp_dir, 'schema'), 'r') as archive: schemata = json.loads(archive.read()) SCHEMA_CACHE.feed(schemata) with CRED_DEF_CACHE.lock: with open(join(timestamp_dir, 'cred_def'), 'r') as archive: cred_defs = json.loads(archive.read()) for cd_id in cred_defs: if cd_id in CRED_DEF_CACHE: LOGGER.warning('Cred def cache already has cred def on %s: skipping', cd_id) else: CRED_DEF_CACHE[cd_id] = cred_defs[cd_id] LOGGER.info('Cred def cache imported cred def for cred def id %s', cd_id) with REVO_CACHE.lock: with open(join(timestamp_dir, 'revocation'), 'r') as archive: rr_cache_entries = json.loads(archive.read()) for (rr_id, entry) in rr_cache_entries.items(): if rr_id in REVO_CACHE: LOGGER.warning('Revocation cache already has entry on %s: skipping', rr_id) else: rr_cache_entry = RevoCacheEntry(entry['rev_reg_def']) rr_cache_entry.rr_delta_frames = [ RevRegUpdateFrame( f['_to'], f['_timestamp'], f['_rr_update']) for f in entry['rr_delta_frames'] ] rr_cache_entry.cull(True) rr_cache_entry.rr_state_frames = [ RevRegUpdateFrame( f['_to'], f['_timestamp'], f['_rr_update']) for f in entry['rr_state_frames'] ] rr_cache_entry.cull(False) REVO_CACHE[rr_id] = rr_cache_entry LOGGER.info('Revocation cache imported entry for rev reg id %s', rr_id) LOGGER.debug('parse <<< %s', timestamp) return timestamp
[ "def", "parse", "(", "base_dir", ":", "str", ",", "timestamp", ":", "int", "=", "None", ")", "->", "int", ":", "LOGGER", ".", "debug", "(", "'parse >>> base_dir: %s, timestamp: %s'", ",", "base_dir", ",", "timestamp", ")", "if", "not", "isdir", "(", "base_dir", ")", ":", "LOGGER", ".", "info", "(", "'No cache archives available: not feeding cache'", ")", "LOGGER", ".", "debug", "(", "'parse <<< None'", ")", "return", "None", "if", "not", "timestamp", ":", "timestamps", "=", "[", "int", "(", "t", ")", "for", "t", "in", "listdir", "(", "base_dir", ")", "if", "t", ".", "isdigit", "(", ")", "]", "if", "timestamps", ":", "timestamp", "=", "max", "(", "timestamps", ")", "else", ":", "LOGGER", ".", "info", "(", "'No cache archives available: not feeding cache'", ")", "LOGGER", ".", "debug", "(", "'parse <<< None'", ")", "return", "None", "timestamp_dir", "=", "join", "(", "base_dir", ",", "str", "(", "timestamp", ")", ")", "if", "not", "isdir", "(", "timestamp_dir", ")", ":", "LOGGER", ".", "error", "(", "'No such archived cache directory: %s'", ",", "timestamp_dir", ")", "LOGGER", ".", "debug", "(", "'parse <<< None'", ")", "return", "None", "with", "SCHEMA_CACHE", ".", "lock", ":", "with", "open", "(", "join", "(", "timestamp_dir", ",", "'schema'", ")", ",", "'r'", ")", "as", "archive", ":", "schemata", "=", "json", ".", "loads", "(", "archive", ".", "read", "(", ")", ")", "SCHEMA_CACHE", ".", "feed", "(", "schemata", ")", "with", "CRED_DEF_CACHE", ".", "lock", ":", "with", "open", "(", "join", "(", "timestamp_dir", ",", "'cred_def'", ")", ",", "'r'", ")", "as", "archive", ":", "cred_defs", "=", "json", ".", "loads", "(", "archive", ".", "read", "(", ")", ")", "for", "cd_id", "in", "cred_defs", ":", "if", "cd_id", "in", "CRED_DEF_CACHE", ":", "LOGGER", ".", "warning", "(", "'Cred def cache already has cred def on %s: skipping'", ",", "cd_id", ")", "else", ":", "CRED_DEF_CACHE", "[", "cd_id", "]", "=", "cred_defs", "[", "cd_id", "]", "LOGGER", ".", "info", "(", "'Cred def cache imported cred def for cred def id %s'", ",", "cd_id", ")", "with", "REVO_CACHE", ".", "lock", ":", "with", "open", "(", "join", "(", "timestamp_dir", ",", "'revocation'", ")", ",", "'r'", ")", "as", "archive", ":", "rr_cache_entries", "=", "json", ".", "loads", "(", "archive", ".", "read", "(", ")", ")", "for", "(", "rr_id", ",", "entry", ")", "in", "rr_cache_entries", ".", "items", "(", ")", ":", "if", "rr_id", "in", "REVO_CACHE", ":", "LOGGER", ".", "warning", "(", "'Revocation cache already has entry on %s: skipping'", ",", "rr_id", ")", "else", ":", "rr_cache_entry", "=", "RevoCacheEntry", "(", "entry", "[", "'rev_reg_def'", "]", ")", "rr_cache_entry", ".", "rr_delta_frames", "=", "[", "RevRegUpdateFrame", "(", "f", "[", "'_to'", "]", ",", "f", "[", "'_timestamp'", "]", ",", "f", "[", "'_rr_update'", "]", ")", "for", "f", "in", "entry", "[", "'rr_delta_frames'", "]", "]", "rr_cache_entry", ".", "cull", "(", "True", ")", "rr_cache_entry", ".", "rr_state_frames", "=", "[", "RevRegUpdateFrame", "(", "f", "[", "'_to'", "]", ",", "f", "[", "'_timestamp'", "]", ",", "f", "[", "'_rr_update'", "]", ")", "for", "f", "in", "entry", "[", "'rr_state_frames'", "]", "]", "rr_cache_entry", ".", "cull", "(", "False", ")", "REVO_CACHE", "[", "rr_id", "]", "=", "rr_cache_entry", "LOGGER", ".", "info", "(", "'Revocation cache imported entry for rev reg id %s'", ",", "rr_id", ")", "LOGGER", ".", "debug", "(", "'parse <<< %s'", ",", "timestamp", ")", "return", "timestamp" ]
Parse and update from archived cache files. Only accept new content; do not overwrite any existing cache content. :param base_dir: archive base directory :param timestamp: epoch time of cache serving as subdirectory, default most recent :return: epoch time of cache serving as subdirectory, None if there is no such archive.
[ "Parse", "and", "update", "from", "archived", "cache", "files", ".", "Only", "accept", "new", "content", ";", "do", "not", "overwrite", "any", "existing", "cache", "content", "." ]
0b1c17cca3bd178b6e6974af84dbac1dfce5cf45
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/cache.py#L774-L850
train
OpenTreeOfLife/peyotl
peyotl/nexson_syntax/helper.py
detect_nexson_version
def detect_nexson_version(blob): """Returns the nexml2json attribute or the default code for badgerfish""" n = get_nexml_el(blob) assert isinstance(n, dict) return n.get('@nexml2json', BADGER_FISH_NEXSON_VERSION)
python
def detect_nexson_version(blob): """Returns the nexml2json attribute or the default code for badgerfish""" n = get_nexml_el(blob) assert isinstance(n, dict) return n.get('@nexml2json', BADGER_FISH_NEXSON_VERSION)
[ "def", "detect_nexson_version", "(", "blob", ")", ":", "n", "=", "get_nexml_el", "(", "blob", ")", "assert", "isinstance", "(", "n", ",", "dict", ")", "return", "n", ".", "get", "(", "'@nexml2json'", ",", "BADGER_FISH_NEXSON_VERSION", ")" ]
Returns the nexml2json attribute or the default code for badgerfish
[ "Returns", "the", "nexml2json", "attribute", "or", "the", "default", "code", "for", "badgerfish" ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L40-L44
train
OpenTreeOfLife/peyotl
peyotl/nexson_syntax/helper.py
_add_value_to_dict_bf
def _add_value_to_dict_bf(d, k, v): """Adds the `k`->`v` mapping to `d`, but if a previous element exists it changes the value of for the key to list. This is used in the BadgerFish mapping convention. This is a simple multi-dict that is only suitable when you know that you'll never store a list or `None` as a value in the dict. """ prev = d.get(k) if prev is None: d[k] = v elif isinstance(prev, list): if isinstance(v, list): prev.extend(v) else: prev.append(v) else: if isinstance(v, list): x = [prev] x.extend(v) d[k] = x else: d[k] = [prev, v]
python
def _add_value_to_dict_bf(d, k, v): """Adds the `k`->`v` mapping to `d`, but if a previous element exists it changes the value of for the key to list. This is used in the BadgerFish mapping convention. This is a simple multi-dict that is only suitable when you know that you'll never store a list or `None` as a value in the dict. """ prev = d.get(k) if prev is None: d[k] = v elif isinstance(prev, list): if isinstance(v, list): prev.extend(v) else: prev.append(v) else: if isinstance(v, list): x = [prev] x.extend(v) d[k] = x else: d[k] = [prev, v]
[ "def", "_add_value_to_dict_bf", "(", "d", ",", "k", ",", "v", ")", ":", "prev", "=", "d", ".", "get", "(", "k", ")", "if", "prev", "is", "None", ":", "d", "[", "k", "]", "=", "v", "elif", "isinstance", "(", "prev", ",", "list", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "prev", ".", "extend", "(", "v", ")", "else", ":", "prev", ".", "append", "(", "v", ")", "else", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "x", "=", "[", "prev", "]", "x", ".", "extend", "(", "v", ")", "d", "[", "k", "]", "=", "x", "else", ":", "d", "[", "k", "]", "=", "[", "prev", ",", "v", "]" ]
Adds the `k`->`v` mapping to `d`, but if a previous element exists it changes the value of for the key to list. This is used in the BadgerFish mapping convention. This is a simple multi-dict that is only suitable when you know that you'll never store a list or `None` as a value in the dict.
[ "Adds", "the", "k", "-", ">", "v", "mapping", "to", "d", "but", "if", "a", "previous", "element", "exists", "it", "changes", "the", "value", "of", "for", "the", "key", "to", "list", "." ]
5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L110-L133
train