repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
davenquinn/Attitude
docs/scripts/generate-json.py
serialize
def serialize(pca, **kwargs): """ Serialize an orientation object to a dict suitable for JSON """ strike, dip, rake = pca.strike_dip_rake() hyp_axes = sampling_axes(pca) return dict( **kwargs, principal_axes = pca.axes.tolist(), hyperbolic_axes = hyp_axes.tolist(), n_samples = pca.n, strike=strike, dip=dip, rake=rake, angular_errors=[2*N.degrees(i) for i in angular_errors(hyp_axes)])
python
def serialize(pca, **kwargs): """ Serialize an orientation object to a dict suitable for JSON """ strike, dip, rake = pca.strike_dip_rake() hyp_axes = sampling_axes(pca) return dict( **kwargs, principal_axes = pca.axes.tolist(), hyperbolic_axes = hyp_axes.tolist(), n_samples = pca.n, strike=strike, dip=dip, rake=rake, angular_errors=[2*N.degrees(i) for i in angular_errors(hyp_axes)])
[ "def", "serialize", "(", "pca", ",", "*", "*", "kwargs", ")", ":", "strike", ",", "dip", ",", "rake", "=", "pca", ".", "strike_dip_rake", "(", ")", "hyp_axes", "=", "sampling_axes", "(", "pca", ")", "return", "dict", "(", "*", "*", "kwargs", ",", "principal_axes", "=", "pca", ".", "axes", ".", "tolist", "(", ")", ",", "hyperbolic_axes", "=", "hyp_axes", ".", "tolist", "(", ")", ",", "n_samples", "=", "pca", ".", "n", ",", "strike", "=", "strike", ",", "dip", "=", "dip", ",", "rake", "=", "rake", ",", "angular_errors", "=", "[", "2", "*", "N", ".", "degrees", "(", "i", ")", "for", "i", "in", "angular_errors", "(", "hyp_axes", ")", "]", ")" ]
Serialize an orientation object to a dict suitable for JSON
[ "Serialize", "an", "orientation", "object", "to", "a", "dict", "suitable", "for", "JSON" ]
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/docs/scripts/generate-json.py#L11-L26
RudolfCardinal/pythonlib
cardinal_pythonlib/getch.py
_kbhit_unix
def _kbhit_unix() -> bool: """ Under UNIX: is a keystroke available? """ dr, dw, de = select.select([sys.stdin], [], [], 0) return dr != []
python
def _kbhit_unix() -> bool: """ Under UNIX: is a keystroke available? """ dr, dw, de = select.select([sys.stdin], [], [], 0) return dr != []
[ "def", "_kbhit_unix", "(", ")", "->", "bool", ":", "dr", ",", "dw", ",", "de", "=", "select", ".", "select", "(", "[", "sys", ".", "stdin", "]", ",", "[", "]", ",", "[", "]", ",", "0", ")", "return", "dr", "!=", "[", "]" ]
Under UNIX: is a keystroke available?
[ "Under", "UNIX", ":", "is", "a", "keystroke", "available?" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/getch.py#L82-L87
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/insert_on_duplicate.py
insert_on_duplicate
def insert_on_duplicate(tablename: str, values: Any = None, inline: bool = False, **kwargs): """ Command to produce an :class:`InsertOnDuplicate` object. Args: tablename: name of the table values: values to ``INSERT`` inline: as per http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.insert kwargs: additional parameters Returns: an :class:`InsertOnDuplicate` object """ # noqa return InsertOnDuplicate(tablename, values, inline=inline, **kwargs)
python
def insert_on_duplicate(tablename: str, values: Any = None, inline: bool = False, **kwargs): """ Command to produce an :class:`InsertOnDuplicate` object. Args: tablename: name of the table values: values to ``INSERT`` inline: as per http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.insert kwargs: additional parameters Returns: an :class:`InsertOnDuplicate` object """ # noqa return InsertOnDuplicate(tablename, values, inline=inline, **kwargs)
[ "def", "insert_on_duplicate", "(", "tablename", ":", "str", ",", "values", ":", "Any", "=", "None", ",", "inline", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# noqa", "return", "InsertOnDuplicate", "(", "tablename", ",", "values", ",", "inline", "=", "inline", ",", "*", "*", "kwargs", ")" ]
Command to produce an :class:`InsertOnDuplicate` object. Args: tablename: name of the table values: values to ``INSERT`` inline: as per http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.insert kwargs: additional parameters Returns: an :class:`InsertOnDuplicate` object
[ "Command", "to", "produce", "an", ":", "class", ":", "InsertOnDuplicate", "object", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/insert_on_duplicate.py#L70-L88
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/insert_on_duplicate.py
compile_insert_on_duplicate_key_update
def compile_insert_on_duplicate_key_update(insert: Insert, compiler: SQLCompiler, **kw) -> str: """ Hooks into the use of the :class:`InsertOnDuplicate` class for the MySQL dialect. Compiles the relevant SQL for an ``INSERT... ON DUPLICATE KEY UPDATE`` statement. Notes: - We can't get the fieldnames directly from ``insert`` or ``compiler``. - We could rewrite the innards of the visit_insert statement (https://github.com/bedwards/sqlalchemy_mysql_ext/blob/master/duplicate.py)... but, like that, it will get outdated. - We could use a hack-in-by-hand method (http://stackoverflow.com/questions/6611563/sqlalchemy-on-duplicate-key-update) ... but a little automation would be nice. - So, regex to the rescue. - NOTE THAT COLUMNS ARE ALREADY QUOTED by this stage; no need to repeat. """ # noqa # log.critical(compiler.__dict__) # log.critical(compiler.dialect.__dict__) # log.critical(insert.__dict__) s = compiler.visit_insert(insert, **kw) # log.critical(s) m = RE_INSERT_FIELDNAMES.match(s) if m is None: raise ValueError("compile_insert_on_duplicate_key_update: no match") columns = [c.strip() for c in m.group('columns').split(",")] # log.critical(columns) updates = ", ".join( ["{c} = VALUES({c})".format(c=c) for c in columns]) s += ' ON DUPLICATE KEY UPDATE {}'.format(updates) # log.critical(s) return s
python
def compile_insert_on_duplicate_key_update(insert: Insert, compiler: SQLCompiler, **kw) -> str: """ Hooks into the use of the :class:`InsertOnDuplicate` class for the MySQL dialect. Compiles the relevant SQL for an ``INSERT... ON DUPLICATE KEY UPDATE`` statement. Notes: - We can't get the fieldnames directly from ``insert`` or ``compiler``. - We could rewrite the innards of the visit_insert statement (https://github.com/bedwards/sqlalchemy_mysql_ext/blob/master/duplicate.py)... but, like that, it will get outdated. - We could use a hack-in-by-hand method (http://stackoverflow.com/questions/6611563/sqlalchemy-on-duplicate-key-update) ... but a little automation would be nice. - So, regex to the rescue. - NOTE THAT COLUMNS ARE ALREADY QUOTED by this stage; no need to repeat. """ # noqa # log.critical(compiler.__dict__) # log.critical(compiler.dialect.__dict__) # log.critical(insert.__dict__) s = compiler.visit_insert(insert, **kw) # log.critical(s) m = RE_INSERT_FIELDNAMES.match(s) if m is None: raise ValueError("compile_insert_on_duplicate_key_update: no match") columns = [c.strip() for c in m.group('columns').split(",")] # log.critical(columns) updates = ", ".join( ["{c} = VALUES({c})".format(c=c) for c in columns]) s += ' ON DUPLICATE KEY UPDATE {}'.format(updates) # log.critical(s) return s
[ "def", "compile_insert_on_duplicate_key_update", "(", "insert", ":", "Insert", ",", "compiler", ":", "SQLCompiler", ",", "*", "*", "kw", ")", "->", "str", ":", "# noqa", "# log.critical(compiler.__dict__)", "# log.critical(compiler.dialect.__dict__)", "# log.critical(insert.__dict__)", "s", "=", "compiler", ".", "visit_insert", "(", "insert", ",", "*", "*", "kw", ")", "# log.critical(s)", "m", "=", "RE_INSERT_FIELDNAMES", ".", "match", "(", "s", ")", "if", "m", "is", "None", ":", "raise", "ValueError", "(", "\"compile_insert_on_duplicate_key_update: no match\"", ")", "columns", "=", "[", "c", ".", "strip", "(", ")", "for", "c", "in", "m", ".", "group", "(", "'columns'", ")", ".", "split", "(", "\",\"", ")", "]", "# log.critical(columns)", "updates", "=", "\", \"", ".", "join", "(", "[", "\"{c} = VALUES({c})\"", ".", "format", "(", "c", "=", "c", ")", "for", "c", "in", "columns", "]", ")", "s", "+=", "' ON DUPLICATE KEY UPDATE {}'", ".", "format", "(", "updates", ")", "# log.critical(s)", "return", "s" ]
Hooks into the use of the :class:`InsertOnDuplicate` class for the MySQL dialect. Compiles the relevant SQL for an ``INSERT... ON DUPLICATE KEY UPDATE`` statement. Notes: - We can't get the fieldnames directly from ``insert`` or ``compiler``. - We could rewrite the innards of the visit_insert statement (https://github.com/bedwards/sqlalchemy_mysql_ext/blob/master/duplicate.py)... but, like that, it will get outdated. - We could use a hack-in-by-hand method (http://stackoverflow.com/questions/6611563/sqlalchemy-on-duplicate-key-update) ... but a little automation would be nice. - So, regex to the rescue. - NOTE THAT COLUMNS ARE ALREADY QUOTED by this stage; no need to repeat.
[ "Hooks", "into", "the", "use", "of", "the", ":", "class", ":", "InsertOnDuplicate", "class", "for", "the", "MySQL", "dialect", ".", "Compiles", "the", "relevant", "SQL", "for", "an", "INSERT", "...", "ON", "DUPLICATE", "KEY", "UPDATE", "statement", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/insert_on_duplicate.py#L123-L157
davenquinn/Attitude
attitude/orientation/grouped.py
create_groups
def create_groups(orientations, *groups, **kwargs): """ Create groups of an orientation measurement dataset """ grouped = [] # Copy all datasets to be safe (this could be bad for # memory usage, so can be disabled). if kwargs.pop('copy', True): orientations = [copy(o) for o in orientations] for o in orientations: # Get rid of and recreate group membership o.member_of = None try: grouped += o.members for a in o.members: a.member_of = o except AttributeError: pass def find(uid): try: val = next(x for x in orientations if x.hash == uid) if val in grouped: raise GroupedPlaneError("{} is already in a group." .format(val.hash)) return val except StopIteration: raise KeyError("No measurement of with hash {} found" .format(uid)) for uid_list in groups: vals = [find(uid) for uid in uid_list] o = GroupedOrientation(*vals, **kwargs) orientations.append(o) return orientations
python
def create_groups(orientations, *groups, **kwargs): """ Create groups of an orientation measurement dataset """ grouped = [] # Copy all datasets to be safe (this could be bad for # memory usage, so can be disabled). if kwargs.pop('copy', True): orientations = [copy(o) for o in orientations] for o in orientations: # Get rid of and recreate group membership o.member_of = None try: grouped += o.members for a in o.members: a.member_of = o except AttributeError: pass def find(uid): try: val = next(x for x in orientations if x.hash == uid) if val in grouped: raise GroupedPlaneError("{} is already in a group." .format(val.hash)) return val except StopIteration: raise KeyError("No measurement of with hash {} found" .format(uid)) for uid_list in groups: vals = [find(uid) for uid in uid_list] o = GroupedOrientation(*vals, **kwargs) orientations.append(o) return orientations
[ "def", "create_groups", "(", "orientations", ",", "*", "groups", ",", "*", "*", "kwargs", ")", ":", "grouped", "=", "[", "]", "# Copy all datasets to be safe (this could be bad for", "# memory usage, so can be disabled).", "if", "kwargs", ".", "pop", "(", "'copy'", ",", "True", ")", ":", "orientations", "=", "[", "copy", "(", "o", ")", "for", "o", "in", "orientations", "]", "for", "o", "in", "orientations", ":", "# Get rid of and recreate group membership", "o", ".", "member_of", "=", "None", "try", ":", "grouped", "+=", "o", ".", "members", "for", "a", "in", "o", ".", "members", ":", "a", ".", "member_of", "=", "o", "except", "AttributeError", ":", "pass", "def", "find", "(", "uid", ")", ":", "try", ":", "val", "=", "next", "(", "x", "for", "x", "in", "orientations", "if", "x", ".", "hash", "==", "uid", ")", "if", "val", "in", "grouped", ":", "raise", "GroupedPlaneError", "(", "\"{} is already in a group.\"", ".", "format", "(", "val", ".", "hash", ")", ")", "return", "val", "except", "StopIteration", ":", "raise", "KeyError", "(", "\"No measurement of with hash {} found\"", ".", "format", "(", "uid", ")", ")", "for", "uid_list", "in", "groups", ":", "vals", "=", "[", "find", "(", "uid", ")", "for", "uid", "in", "uid_list", "]", "o", "=", "GroupedOrientation", "(", "*", "vals", ",", "*", "*", "kwargs", ")", "orientations", ".", "append", "(", "o", ")", "return", "orientations" ]
Create groups of an orientation measurement dataset
[ "Create", "groups", "of", "an", "orientation", "measurement", "dataset" ]
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/grouped.py#L35-L71
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/merge_csv.py
merge_csv
def merge_csv(filenames: List[str], outfile: TextIO = sys.stdout, input_dialect: str = 'excel', output_dialect: str = 'excel', debug: bool = False, headers: bool = True) -> None: """ Amalgamate multiple CSV/TSV/similar files into one. Args: filenames: list of filenames to process outfile: file-like object to write output to input_dialect: dialect of input files, as passed to ``csv.reader`` output_dialect: dialect to write, as passed to ``csv.writer`` debug: be verbose? headers: do the files have header lines? """ writer = csv.writer(outfile, dialect=output_dialect) written_header = False header_items = [] # type: List[str] for filename in filenames: log.info("Processing file " + repr(filename)) with open(filename, 'r') as f: reader = csv.reader(f, dialect=input_dialect) if headers: if not written_header: header_items = next(reader) if debug: log.debug("Header row: {!r}", header_items) writer.writerow(header_items) written_header = True else: new_headers = next(reader) if new_headers != header_items: raise ValueError( "Header line in file {filename} doesn't match - " "it was {new} but previous was {old}".format( filename=repr(filename), new=repr(new_headers), old=repr(header_items), )) if debug: log.debug("Header row matches previous") else: if debug: log.debug("No headers in use") for row in reader: if debug: log.debug("Data row: {!r}", row) writer.writerow(row)
python
def merge_csv(filenames: List[str], outfile: TextIO = sys.stdout, input_dialect: str = 'excel', output_dialect: str = 'excel', debug: bool = False, headers: bool = True) -> None: """ Amalgamate multiple CSV/TSV/similar files into one. Args: filenames: list of filenames to process outfile: file-like object to write output to input_dialect: dialect of input files, as passed to ``csv.reader`` output_dialect: dialect to write, as passed to ``csv.writer`` debug: be verbose? headers: do the files have header lines? """ writer = csv.writer(outfile, dialect=output_dialect) written_header = False header_items = [] # type: List[str] for filename in filenames: log.info("Processing file " + repr(filename)) with open(filename, 'r') as f: reader = csv.reader(f, dialect=input_dialect) if headers: if not written_header: header_items = next(reader) if debug: log.debug("Header row: {!r}", header_items) writer.writerow(header_items) written_header = True else: new_headers = next(reader) if new_headers != header_items: raise ValueError( "Header line in file {filename} doesn't match - " "it was {new} but previous was {old}".format( filename=repr(filename), new=repr(new_headers), old=repr(header_items), )) if debug: log.debug("Header row matches previous") else: if debug: log.debug("No headers in use") for row in reader: if debug: log.debug("Data row: {!r}", row) writer.writerow(row)
[ "def", "merge_csv", "(", "filenames", ":", "List", "[", "str", "]", ",", "outfile", ":", "TextIO", "=", "sys", ".", "stdout", ",", "input_dialect", ":", "str", "=", "'excel'", ",", "output_dialect", ":", "str", "=", "'excel'", ",", "debug", ":", "bool", "=", "False", ",", "headers", ":", "bool", "=", "True", ")", "->", "None", ":", "writer", "=", "csv", ".", "writer", "(", "outfile", ",", "dialect", "=", "output_dialect", ")", "written_header", "=", "False", "header_items", "=", "[", "]", "# type: List[str]", "for", "filename", "in", "filenames", ":", "log", ".", "info", "(", "\"Processing file \"", "+", "repr", "(", "filename", ")", ")", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "reader", "=", "csv", ".", "reader", "(", "f", ",", "dialect", "=", "input_dialect", ")", "if", "headers", ":", "if", "not", "written_header", ":", "header_items", "=", "next", "(", "reader", ")", "if", "debug", ":", "log", ".", "debug", "(", "\"Header row: {!r}\"", ",", "header_items", ")", "writer", ".", "writerow", "(", "header_items", ")", "written_header", "=", "True", "else", ":", "new_headers", "=", "next", "(", "reader", ")", "if", "new_headers", "!=", "header_items", ":", "raise", "ValueError", "(", "\"Header line in file {filename} doesn't match - \"", "\"it was {new} but previous was {old}\"", ".", "format", "(", "filename", "=", "repr", "(", "filename", ")", ",", "new", "=", "repr", "(", "new_headers", ")", ",", "old", "=", "repr", "(", "header_items", ")", ",", ")", ")", "if", "debug", ":", "log", ".", "debug", "(", "\"Header row matches previous\"", ")", "else", ":", "if", "debug", ":", "log", ".", "debug", "(", "\"No headers in use\"", ")", "for", "row", "in", "reader", ":", "if", "debug", ":", "log", ".", "debug", "(", "\"Data row: {!r}\"", ",", "row", ")", "writer", ".", "writerow", "(", "row", ")" ]
Amalgamate multiple CSV/TSV/similar files into one. Args: filenames: list of filenames to process outfile: file-like object to write output to input_dialect: dialect of input files, as passed to ``csv.reader`` output_dialect: dialect to write, as passed to ``csv.writer`` debug: be verbose? headers: do the files have header lines?
[ "Amalgamate", "multiple", "CSV", "/", "TSV", "/", "similar", "files", "into", "one", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/merge_csv.py#L45-L94
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/merge_csv.py
main
def main(): """ Command-line processor. See ``--help`` for details. """ main_only_quicksetup_rootlogger() parser = argparse.ArgumentParser() parser.add_argument( "filenames", nargs="+", help="Names of CSV/TSV files to merge" ) parser.add_argument( "--outfile", default="-", help="Specify an output filename. If omitted or '-', stdout is used.", ) parser.add_argument( "--inputdialect", default="excel", help="The input files' CSV/TSV dialect. Default: %(default)s.", choices=csv.list_dialects(), ) parser.add_argument( "--outputdialect", default="excel", help="The output file's CSV/TSV dialect. Default: %(default)s.", choices=csv.list_dialects(), ) parser.add_argument( "--noheaders", action="store_true", help="By default, files are assumed to have column headers. " "Specify this option to assume no headers.", ) parser.add_argument( "--debug", action="store_true", help="Verbose debugging output.", ) progargs = parser.parse_args() kwargs = { "filenames": progargs.filenames, "input_dialect": progargs.inputdialect, "output_dialect": progargs.outputdialect, "debug": progargs.debug, "headers": not progargs.noheaders, } if progargs.outfile == '-': log.info("Writing to stdout") merge_csv(outfile=sys.stdout, **kwargs) else: log.info("Writing to " + repr(progargs.outfile)) with open(progargs.outfile, 'w') as outfile: # noinspection PyTypeChecker merge_csv(outfile=outfile, **kwargs)
python
def main(): """ Command-line processor. See ``--help`` for details. """ main_only_quicksetup_rootlogger() parser = argparse.ArgumentParser() parser.add_argument( "filenames", nargs="+", help="Names of CSV/TSV files to merge" ) parser.add_argument( "--outfile", default="-", help="Specify an output filename. If omitted or '-', stdout is used.", ) parser.add_argument( "--inputdialect", default="excel", help="The input files' CSV/TSV dialect. Default: %(default)s.", choices=csv.list_dialects(), ) parser.add_argument( "--outputdialect", default="excel", help="The output file's CSV/TSV dialect. Default: %(default)s.", choices=csv.list_dialects(), ) parser.add_argument( "--noheaders", action="store_true", help="By default, files are assumed to have column headers. " "Specify this option to assume no headers.", ) parser.add_argument( "--debug", action="store_true", help="Verbose debugging output.", ) progargs = parser.parse_args() kwargs = { "filenames": progargs.filenames, "input_dialect": progargs.inputdialect, "output_dialect": progargs.outputdialect, "debug": progargs.debug, "headers": not progargs.noheaders, } if progargs.outfile == '-': log.info("Writing to stdout") merge_csv(outfile=sys.stdout, **kwargs) else: log.info("Writing to " + repr(progargs.outfile)) with open(progargs.outfile, 'w') as outfile: # noinspection PyTypeChecker merge_csv(outfile=outfile, **kwargs)
[ "def", "main", "(", ")", ":", "main_only_quicksetup_rootlogger", "(", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"filenames\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Names of CSV/TSV files to merge\"", ")", "parser", ".", "add_argument", "(", "\"--outfile\"", ",", "default", "=", "\"-\"", ",", "help", "=", "\"Specify an output filename. If omitted or '-', stdout is used.\"", ",", ")", "parser", ".", "add_argument", "(", "\"--inputdialect\"", ",", "default", "=", "\"excel\"", ",", "help", "=", "\"The input files' CSV/TSV dialect. Default: %(default)s.\"", ",", "choices", "=", "csv", ".", "list_dialects", "(", ")", ",", ")", "parser", ".", "add_argument", "(", "\"--outputdialect\"", ",", "default", "=", "\"excel\"", ",", "help", "=", "\"The output file's CSV/TSV dialect. Default: %(default)s.\"", ",", "choices", "=", "csv", ".", "list_dialects", "(", ")", ",", ")", "parser", ".", "add_argument", "(", "\"--noheaders\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"By default, files are assumed to have column headers. \"", "\"Specify this option to assume no headers.\"", ",", ")", "parser", ".", "add_argument", "(", "\"--debug\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Verbose debugging output.\"", ",", ")", "progargs", "=", "parser", ".", "parse_args", "(", ")", "kwargs", "=", "{", "\"filenames\"", ":", "progargs", ".", "filenames", ",", "\"input_dialect\"", ":", "progargs", ".", "inputdialect", ",", "\"output_dialect\"", ":", "progargs", ".", "outputdialect", ",", "\"debug\"", ":", "progargs", ".", "debug", ",", "\"headers\"", ":", "not", "progargs", ".", "noheaders", ",", "}", "if", "progargs", ".", "outfile", "==", "'-'", ":", "log", ".", "info", "(", "\"Writing to stdout\"", ")", "merge_csv", "(", "outfile", "=", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")", "else", ":", "log", ".", "info", "(", "\"Writing to \"", "+", "repr", "(", "progargs", ".", "outfile", ")", ")", "with", "open", "(", "progargs", ".", "outfile", ",", "'w'", ")", "as", "outfile", ":", "# noinspection PyTypeChecker", "merge_csv", "(", "outfile", "=", "outfile", ",", "*", "*", "kwargs", ")" ]
Command-line processor. See ``--help`` for details.
[ "Command", "-", "line", "processor", ".", "See", "--", "help", "for", "details", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/merge_csv.py#L97-L153
RudolfCardinal/pythonlib
cardinal_pythonlib/maths_numpy.py
softmax
def softmax(x: np.ndarray, b: float = 1.0) -> np.ndarray: r""" Standard softmax function: .. math:: P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} } Args: x: vector (``numpy.array``) of values b: exploration parameter :math:`\beta`, or inverse temperature [Daw2009], or :math:`1/t`; see below Returns: vector of probabilities corresponding to the input values where: - :math:`t` is temperature (towards infinity: all actions equally likely; towards zero: probability of action with highest value tends to 1) - Temperature is not used directly as optimizers may take it to zero, giving an infinity; use inverse temperature instead. - [Daw2009] Daw ND, "Trial-by-trial data analysis using computational methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds), Oxford University Press. """ constant = np.mean(x) products = x * b - constant # ... softmax is invariant to addition of a constant: Daw article and # http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-12.html#b # noinspection PyUnresolvedReferences if products.max() > sys.float_info.max_exp: # ... max_exp for base e; max_10_exp for base 10 log.warning("OVERFLOW in softmax(): x = {}, b = {}, constant = {}, " "x*b - constant = {}".format(x, b, constant, products)) # map the maximum to 1, other things to zero n = len(x) index_of_max = np.argmax(products) answer = np.zeros(n) answer[index_of_max] = 1.0 else: # noinspection PyUnresolvedReferences exponented = np.exp(products) answer = exponented / np.sum(exponented) return answer
python
def softmax(x: np.ndarray, b: float = 1.0) -> np.ndarray: r""" Standard softmax function: .. math:: P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} } Args: x: vector (``numpy.array``) of values b: exploration parameter :math:`\beta`, or inverse temperature [Daw2009], or :math:`1/t`; see below Returns: vector of probabilities corresponding to the input values where: - :math:`t` is temperature (towards infinity: all actions equally likely; towards zero: probability of action with highest value tends to 1) - Temperature is not used directly as optimizers may take it to zero, giving an infinity; use inverse temperature instead. - [Daw2009] Daw ND, "Trial-by-trial data analysis using computational methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds), Oxford University Press. """ constant = np.mean(x) products = x * b - constant # ... softmax is invariant to addition of a constant: Daw article and # http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-12.html#b # noinspection PyUnresolvedReferences if products.max() > sys.float_info.max_exp: # ... max_exp for base e; max_10_exp for base 10 log.warning("OVERFLOW in softmax(): x = {}, b = {}, constant = {}, " "x*b - constant = {}".format(x, b, constant, products)) # map the maximum to 1, other things to zero n = len(x) index_of_max = np.argmax(products) answer = np.zeros(n) answer[index_of_max] = 1.0 else: # noinspection PyUnresolvedReferences exponented = np.exp(products) answer = exponented / np.sum(exponented) return answer
[ "def", "softmax", "(", "x", ":", "np", ".", "ndarray", ",", "b", ":", "float", "=", "1.0", ")", "->", "np", ".", "ndarray", ":", "constant", "=", "np", ".", "mean", "(", "x", ")", "products", "=", "x", "*", "b", "-", "constant", "# ... softmax is invariant to addition of a constant: Daw article and", "# http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-12.html#b", "# noinspection PyUnresolvedReferences", "if", "products", ".", "max", "(", ")", ">", "sys", ".", "float_info", ".", "max_exp", ":", "# ... max_exp for base e; max_10_exp for base 10", "log", ".", "warning", "(", "\"OVERFLOW in softmax(): x = {}, b = {}, constant = {}, \"", "\"x*b - constant = {}\"", ".", "format", "(", "x", ",", "b", ",", "constant", ",", "products", ")", ")", "# map the maximum to 1, other things to zero", "n", "=", "len", "(", "x", ")", "index_of_max", "=", "np", ".", "argmax", "(", "products", ")", "answer", "=", "np", ".", "zeros", "(", "n", ")", "answer", "[", "index_of_max", "]", "=", "1.0", "else", ":", "# noinspection PyUnresolvedReferences", "exponented", "=", "np", ".", "exp", "(", "products", ")", "answer", "=", "exponented", "/", "np", ".", "sum", "(", "exponented", ")", "return", "answer" ]
r""" Standard softmax function: .. math:: P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} } Args: x: vector (``numpy.array``) of values b: exploration parameter :math:`\beta`, or inverse temperature [Daw2009], or :math:`1/t`; see below Returns: vector of probabilities corresponding to the input values where: - :math:`t` is temperature (towards infinity: all actions equally likely; towards zero: probability of action with highest value tends to 1) - Temperature is not used directly as optimizers may take it to zero, giving an infinity; use inverse temperature instead. - [Daw2009] Daw ND, "Trial-by-trial data analysis using computational methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds), Oxford University Press.
[ "r", "Standard", "softmax", "function", ":" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/maths_numpy.py#L48-L95
RudolfCardinal/pythonlib
cardinal_pythonlib/maths_numpy.py
logistic
def logistic(x: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y` """ # https://www.sharelatex.com/learn/List_of_Greek_letters_and_math_symbols if x is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return 1 / (1 + np.exp(-k * (x - theta)))
python
def logistic(x: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y` """ # https://www.sharelatex.com/learn/List_of_Greek_letters_and_math_symbols if x is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return 1 / (1 + np.exp(-k * (x - theta)))
[ "def", "logistic", "(", "x", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "k", ":", "float", ",", "theta", ":", "float", ")", "->", "Optional", "[", "float", "]", ":", "# https://www.sharelatex.com/learn/List_of_Greek_letters_and_math_symbols", "if", "x", "is", "None", "or", "k", "is", "None", "or", "theta", "is", "None", ":", "return", "None", "# noinspection PyUnresolvedReferences", "return", "1", "/", "(", "1", "+", "np", ".", "exp", "(", "-", "k", "*", "(", "x", "-", "theta", ")", ")", ")" ]
r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y`
[ "r", "Standard", "logistic", "function", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/maths_numpy.py#L102-L125
RudolfCardinal/pythonlib
cardinal_pythonlib/maths_numpy.py
inv_logistic
def inv_logistic(y: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r""" Inverse standard logistic function: .. math:: x = ( log( \frac {1} {y} - 1) / -k ) + \theta Args: y: :math:`y` k: :math:`k` theta: :math:`\theta` Returns: :math:`x` """ if y is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return (np.log((1 / y) - 1) / -k) + theta
python
def inv_logistic(y: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r""" Inverse standard logistic function: .. math:: x = ( log( \frac {1} {y} - 1) / -k ) + \theta Args: y: :math:`y` k: :math:`k` theta: :math:`\theta` Returns: :math:`x` """ if y is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return (np.log((1 / y) - 1) / -k) + theta
[ "def", "inv_logistic", "(", "y", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "k", ":", "float", ",", "theta", ":", "float", ")", "->", "Optional", "[", "float", "]", ":", "if", "y", "is", "None", "or", "k", "is", "None", "or", "theta", "is", "None", ":", "return", "None", "# noinspection PyUnresolvedReferences", "return", "(", "np", ".", "log", "(", "(", "1", "/", "y", ")", "-", "1", ")", "/", "-", "k", ")", "+", "theta" ]
r""" Inverse standard logistic function: .. math:: x = ( log( \frac {1} {y} - 1) / -k ) + \theta Args: y: :math:`y` k: :math:`k` theta: :math:`\theta` Returns: :math:`x`
[ "r", "Inverse", "standard", "logistic", "function", ":" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/maths_numpy.py#L128-L150
meyersj/geotweet
geotweet/mapreduce/utils/lookup.py
SpatialLookup.get_object
def get_object(self, point, buffer_size=0, multiple=False): """ lookup object based on point as [longitude, latitude] """ # first search bounding boxes # idx.intersection method modifies input if it is a list try: tmp = tuple(point) except TypeError: return None # point must be in the form (minx, miny, maxx, maxy) or (x, y) if len(tmp) not in [2, 4]: return None # buffer point if size is specified geom = tmp = Point(tmp) if buffer_size: geom = tmp.buffer(buffer_size) if multiple: return self._get_all_near(geom) return self._get_nearest(tmp, geom)
python
def get_object(self, point, buffer_size=0, multiple=False): """ lookup object based on point as [longitude, latitude] """ # first search bounding boxes # idx.intersection method modifies input if it is a list try: tmp = tuple(point) except TypeError: return None # point must be in the form (minx, miny, maxx, maxy) or (x, y) if len(tmp) not in [2, 4]: return None # buffer point if size is specified geom = tmp = Point(tmp) if buffer_size: geom = tmp.buffer(buffer_size) if multiple: return self._get_all_near(geom) return self._get_nearest(tmp, geom)
[ "def", "get_object", "(", "self", ",", "point", ",", "buffer_size", "=", "0", ",", "multiple", "=", "False", ")", ":", "# first search bounding boxes", "# idx.intersection method modifies input if it is a list", "try", ":", "tmp", "=", "tuple", "(", "point", ")", "except", "TypeError", ":", "return", "None", "# point must be in the form (minx, miny, maxx, maxy) or (x, y)", "if", "len", "(", "tmp", ")", "not", "in", "[", "2", ",", "4", "]", ":", "return", "None", "# buffer point if size is specified", "geom", "=", "tmp", "=", "Point", "(", "tmp", ")", "if", "buffer_size", ":", "geom", "=", "tmp", ".", "buffer", "(", "buffer_size", ")", "if", "multiple", ":", "return", "self", ".", "_get_all_near", "(", "geom", ")", "return", "self", ".", "_get_nearest", "(", "tmp", ",", "geom", ")" ]
lookup object based on point as [longitude, latitude]
[ "lookup", "object", "based", "on", "point", "as", "[", "longitude", "latitude", "]" ]
train
https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/utils/lookup.py#L83-L101
meyersj/geotweet
geotweet/mapreduce/utils/lookup.py
SpatialLookup._build_from_geojson
def _build_from_geojson(self, src): """ Build a RTree index to disk using bounding box of each feature """ geojson = json.loads(self.read(src)) idx = index.Index() data_store = {} for i, feature in enumerate(geojson['features']): feature = self._build_obj(feature) idx.insert(i, feature['geometry'].bounds) data_store[i] = feature return data_store, idx
python
def _build_from_geojson(self, src): """ Build a RTree index to disk using bounding box of each feature """ geojson = json.loads(self.read(src)) idx = index.Index() data_store = {} for i, feature in enumerate(geojson['features']): feature = self._build_obj(feature) idx.insert(i, feature['geometry'].bounds) data_store[i] = feature return data_store, idx
[ "def", "_build_from_geojson", "(", "self", ",", "src", ")", ":", "geojson", "=", "json", ".", "loads", "(", "self", ".", "read", "(", "src", ")", ")", "idx", "=", "index", ".", "Index", "(", ")", "data_store", "=", "{", "}", "for", "i", ",", "feature", "in", "enumerate", "(", "geojson", "[", "'features'", "]", ")", ":", "feature", "=", "self", ".", "_build_obj", "(", "feature", ")", "idx", ".", "insert", "(", "i", ",", "feature", "[", "'geometry'", "]", ".", "bounds", ")", "data_store", "[", "i", "]", "=", "feature", "return", "data_store", ",", "idx" ]
Build a RTree index to disk using bounding box of each feature
[ "Build", "a", "RTree", "index", "to", "disk", "using", "bounding", "box", "of", "each", "feature" ]
train
https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/utils/lookup.py#L107-L116
meyersj/geotweet
geotweet/mapreduce/utils/lookup.py
CachedLookup.get
def get(self, point, buffer_size=0, multiple=False): """ lookup state and county based on geohash of coordinates from tweet """ lon, lat = point geohash = Geohash.encode(lat, lon, precision=self.precision) key = (geohash, buffer_size, multiple) if key in self.geohash_cache: # cache hit on geohash self.hit += 1 #print self.hit, self.miss return self.geohash_cache[key] self.miss += 1 # cache miss on geohash # project point to ESRI:102005 lat, lon = Geohash.decode(geohash) proj_point = project([float(lon), float(lat)]) args = dict(buffer_size=buffer_size, multiple=multiple) payload = self.get_object(proj_point, **args) self.geohash_cache[key] = payload return payload
python
def get(self, point, buffer_size=0, multiple=False): """ lookup state and county based on geohash of coordinates from tweet """ lon, lat = point geohash = Geohash.encode(lat, lon, precision=self.precision) key = (geohash, buffer_size, multiple) if key in self.geohash_cache: # cache hit on geohash self.hit += 1 #print self.hit, self.miss return self.geohash_cache[key] self.miss += 1 # cache miss on geohash # project point to ESRI:102005 lat, lon = Geohash.decode(geohash) proj_point = project([float(lon), float(lat)]) args = dict(buffer_size=buffer_size, multiple=multiple) payload = self.get_object(proj_point, **args) self.geohash_cache[key] = payload return payload
[ "def", "get", "(", "self", ",", "point", ",", "buffer_size", "=", "0", ",", "multiple", "=", "False", ")", ":", "lon", ",", "lat", "=", "point", "geohash", "=", "Geohash", ".", "encode", "(", "lat", ",", "lon", ",", "precision", "=", "self", ".", "precision", ")", "key", "=", "(", "geohash", ",", "buffer_size", ",", "multiple", ")", "if", "key", "in", "self", ".", "geohash_cache", ":", "# cache hit on geohash", "self", ".", "hit", "+=", "1", "#print self.hit, self.miss", "return", "self", ".", "geohash_cache", "[", "key", "]", "self", ".", "miss", "+=", "1", "# cache miss on geohash", "# project point to ESRI:102005", "lat", ",", "lon", "=", "Geohash", ".", "decode", "(", "geohash", ")", "proj_point", "=", "project", "(", "[", "float", "(", "lon", ")", ",", "float", "(", "lat", ")", "]", ")", "args", "=", "dict", "(", "buffer_size", "=", "buffer_size", ",", "multiple", "=", "multiple", ")", "payload", "=", "self", ".", "get_object", "(", "proj_point", ",", "*", "*", "args", ")", "self", ".", "geohash_cache", "[", "key", "]", "=", "payload", "return", "payload" ]
lookup state and county based on geohash of coordinates from tweet
[ "lookup", "state", "and", "county", "based", "on", "geohash", "of", "coordinates", "from", "tweet" ]
train
https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/utils/lookup.py#L139-L157
KarrLab/nose2unitth
nose2unitth/core.py
Converter.run
def run(in_file_nose, out_dir_unitth): """ Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files Args: in_file_nose (:obj:`str`): path to nose-style test report out_file_unitth (:obj:`str`): path to save UnitTH-style test reports """ suites = Converter.read_nose(in_file_nose) Converter.write_unitth(suites, out_dir_unitth)
python
def run(in_file_nose, out_dir_unitth): """ Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files Args: in_file_nose (:obj:`str`): path to nose-style test report out_file_unitth (:obj:`str`): path to save UnitTH-style test reports """ suites = Converter.read_nose(in_file_nose) Converter.write_unitth(suites, out_dir_unitth)
[ "def", "run", "(", "in_file_nose", ",", "out_dir_unitth", ")", ":", "suites", "=", "Converter", ".", "read_nose", "(", "in_file_nose", ")", "Converter", ".", "write_unitth", "(", "suites", ",", "out_dir_unitth", ")" ]
Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files Args: in_file_nose (:obj:`str`): path to nose-style test report out_file_unitth (:obj:`str`): path to save UnitTH-style test reports
[ "Convert", "nose", "-", "style", "test", "reports", "to", "UnitTH", "-", "style", "test", "reports", "by", "splitting", "modules", "into", "separate", "XML", "files" ]
train
https://github.com/KarrLab/nose2unitth/blob/c37f10a8b74b291b3a12669113f4404b01b97586/nose2unitth/core.py#L18-L26
KarrLab/nose2unitth
nose2unitth/core.py
Converter.read_nose
def read_nose(in_file): """ Parse nose-style test reports into a `dict` Args: in_file (:obj:`str`): path to nose-style test report Returns: :obj:`dict`: dictionary of test suites """ suites = {} doc_xml = minidom.parse(in_file) suite_xml = doc_xml.getElementsByTagName("testsuite")[0] for case_xml in suite_xml.getElementsByTagName('testcase'): classname = case_xml.getAttribute('classname') if classname not in suites: suites[classname] = [] case = { 'name': case_xml.getAttribute('name'), 'time': float(case_xml.getAttribute('time')), } skipped_xml = case_xml.getElementsByTagName('skipped') if skipped_xml: if skipped_xml[0].hasAttribute('type'): type = skipped_xml[0].getAttribute('type') else: type = '' case['skipped'] = { 'type': type, 'message': skipped_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in skipped_xml[0].childNodes]), } failure_xml = case_xml.getElementsByTagName('failure') if failure_xml: if failure_xml[0].hasAttribute('type'): type = failure_xml[0].getAttribute('type') else: type = '' case['failure'] = { 'type': type, 'message': failure_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in failure_xml[0].childNodes]), } error_xml = case_xml.getElementsByTagName('error') if error_xml: if error_xml[0].hasAttribute('type'): type = error_xml[0].getAttribute('type') else: type = '' case['error'] = { 'type': type, 'message': error_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in error_xml[0].childNodes]), } suites[classname].append(case) return suites
python
def read_nose(in_file): """ Parse nose-style test reports into a `dict` Args: in_file (:obj:`str`): path to nose-style test report Returns: :obj:`dict`: dictionary of test suites """ suites = {} doc_xml = minidom.parse(in_file) suite_xml = doc_xml.getElementsByTagName("testsuite")[0] for case_xml in suite_xml.getElementsByTagName('testcase'): classname = case_xml.getAttribute('classname') if classname not in suites: suites[classname] = [] case = { 'name': case_xml.getAttribute('name'), 'time': float(case_xml.getAttribute('time')), } skipped_xml = case_xml.getElementsByTagName('skipped') if skipped_xml: if skipped_xml[0].hasAttribute('type'): type = skipped_xml[0].getAttribute('type') else: type = '' case['skipped'] = { 'type': type, 'message': skipped_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in skipped_xml[0].childNodes]), } failure_xml = case_xml.getElementsByTagName('failure') if failure_xml: if failure_xml[0].hasAttribute('type'): type = failure_xml[0].getAttribute('type') else: type = '' case['failure'] = { 'type': type, 'message': failure_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in failure_xml[0].childNodes]), } error_xml = case_xml.getElementsByTagName('error') if error_xml: if error_xml[0].hasAttribute('type'): type = error_xml[0].getAttribute('type') else: type = '' case['error'] = { 'type': type, 'message': error_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in error_xml[0].childNodes]), } suites[classname].append(case) return suites
[ "def", "read_nose", "(", "in_file", ")", ":", "suites", "=", "{", "}", "doc_xml", "=", "minidom", ".", "parse", "(", "in_file", ")", "suite_xml", "=", "doc_xml", ".", "getElementsByTagName", "(", "\"testsuite\"", ")", "[", "0", "]", "for", "case_xml", "in", "suite_xml", ".", "getElementsByTagName", "(", "'testcase'", ")", ":", "classname", "=", "case_xml", ".", "getAttribute", "(", "'classname'", ")", "if", "classname", "not", "in", "suites", ":", "suites", "[", "classname", "]", "=", "[", "]", "case", "=", "{", "'name'", ":", "case_xml", ".", "getAttribute", "(", "'name'", ")", ",", "'time'", ":", "float", "(", "case_xml", ".", "getAttribute", "(", "'time'", ")", ")", ",", "}", "skipped_xml", "=", "case_xml", ".", "getElementsByTagName", "(", "'skipped'", ")", "if", "skipped_xml", ":", "if", "skipped_xml", "[", "0", "]", ".", "hasAttribute", "(", "'type'", ")", ":", "type", "=", "skipped_xml", "[", "0", "]", ".", "getAttribute", "(", "'type'", ")", "else", ":", "type", "=", "''", "case", "[", "'skipped'", "]", "=", "{", "'type'", ":", "type", ",", "'message'", ":", "skipped_xml", "[", "0", "]", ".", "getAttribute", "(", "'message'", ")", ",", "'text'", ":", "\"\"", ".", "join", "(", "[", "child", ".", "nodeValue", "for", "child", "in", "skipped_xml", "[", "0", "]", ".", "childNodes", "]", ")", ",", "}", "failure_xml", "=", "case_xml", ".", "getElementsByTagName", "(", "'failure'", ")", "if", "failure_xml", ":", "if", "failure_xml", "[", "0", "]", ".", "hasAttribute", "(", "'type'", ")", ":", "type", "=", "failure_xml", "[", "0", "]", ".", "getAttribute", "(", "'type'", ")", "else", ":", "type", "=", "''", "case", "[", "'failure'", "]", "=", "{", "'type'", ":", "type", ",", "'message'", ":", "failure_xml", "[", "0", "]", ".", "getAttribute", "(", "'message'", ")", ",", "'text'", ":", "\"\"", ".", "join", "(", "[", "child", ".", "nodeValue", "for", "child", "in", "failure_xml", "[", "0", "]", ".", "childNodes", "]", ")", ",", "}", "error_xml", "=", "case_xml", ".", "getElementsByTagName", "(", "'error'", ")", "if", "error_xml", ":", "if", "error_xml", "[", "0", "]", ".", "hasAttribute", "(", "'type'", ")", ":", "type", "=", "error_xml", "[", "0", "]", ".", "getAttribute", "(", "'type'", ")", "else", ":", "type", "=", "''", "case", "[", "'error'", "]", "=", "{", "'type'", ":", "type", ",", "'message'", ":", "error_xml", "[", "0", "]", ".", "getAttribute", "(", "'message'", ")", ",", "'text'", ":", "\"\"", ".", "join", "(", "[", "child", ".", "nodeValue", "for", "child", "in", "error_xml", "[", "0", "]", ".", "childNodes", "]", ")", ",", "}", "suites", "[", "classname", "]", ".", "append", "(", "case", ")", "return", "suites" ]
Parse nose-style test reports into a `dict` Args: in_file (:obj:`str`): path to nose-style test report Returns: :obj:`dict`: dictionary of test suites
[ "Parse", "nose", "-", "style", "test", "reports", "into", "a", "dict" ]
train
https://github.com/KarrLab/nose2unitth/blob/c37f10a8b74b291b3a12669113f4404b01b97586/nose2unitth/core.py#L29-L88
KarrLab/nose2unitth
nose2unitth/core.py
Converter.write_unitth
def write_unitth(suites, out_dir): """ Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports """ if not os.path.isdir(out_dir): os.mkdir(out_dir) for classname, cases in suites.items(): doc_xml = minidom.Document() suite_xml = doc_xml.createElement('testsuite') suite_xml.setAttribute('name', classname) suite_xml.setAttribute('tests', str(len(cases))) suite_xml.setAttribute('errors', str(sum('error' in case for case in cases))) suite_xml.setAttribute('failures', str(sum('failure' in case for case in cases))) suite_xml.setAttribute('skipped', str(sum('skipped' in case for case in cases))) suite_xml.setAttribute('time', '{:.3f}'.format(sum(case['time'] for case in cases))) doc_xml.appendChild(suite_xml) for case in cases: case_xml = doc_xml.createElement('testcase') case_xml.setAttribute('classname', classname) case_xml.setAttribute('name', case['name']) case_xml.setAttribute('time', '{:.3f}'.format(case['time'])) suite_xml.appendChild(case_xml) if 'skipped' in case: skipped_xml = doc_xml.createElement('skipped') skipped_xml.setAttribute('type', case['skipped']['type']) skipped_xml.setAttribute('message', case['skipped']['message']) case_xml.appendChild(skipped_xml) skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text']) skipped_xml.appendChild(skipped_text_xml) if 'failure' in case: failure_xml = doc_xml.createElement('failure') failure_xml.setAttribute('type', case['failure']['type']) failure_xml.setAttribute('message', case['failure']['message']) case_xml.appendChild(failure_xml) failure_text_xml = doc_xml.createCDATASection(case['failure']['text']) failure_xml.appendChild(failure_text_xml) if 'error' in case: error_xml = doc_xml.createElement('error') error_xml.setAttribute('type', case['error']['type']) error_xml.setAttribute('message', case['error']['message']) case_xml.appendChild(error_xml) error_text_xml = doc_xml.createCDATASection(case['error']['text']) error_xml.appendChild(error_text_xml) with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output: doc_xml.writexml(output, encoding='utf-8', addindent='', newl="") doc_xml.unlink()
python
def write_unitth(suites, out_dir): """ Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports """ if not os.path.isdir(out_dir): os.mkdir(out_dir) for classname, cases in suites.items(): doc_xml = minidom.Document() suite_xml = doc_xml.createElement('testsuite') suite_xml.setAttribute('name', classname) suite_xml.setAttribute('tests', str(len(cases))) suite_xml.setAttribute('errors', str(sum('error' in case for case in cases))) suite_xml.setAttribute('failures', str(sum('failure' in case for case in cases))) suite_xml.setAttribute('skipped', str(sum('skipped' in case for case in cases))) suite_xml.setAttribute('time', '{:.3f}'.format(sum(case['time'] for case in cases))) doc_xml.appendChild(suite_xml) for case in cases: case_xml = doc_xml.createElement('testcase') case_xml.setAttribute('classname', classname) case_xml.setAttribute('name', case['name']) case_xml.setAttribute('time', '{:.3f}'.format(case['time'])) suite_xml.appendChild(case_xml) if 'skipped' in case: skipped_xml = doc_xml.createElement('skipped') skipped_xml.setAttribute('type', case['skipped']['type']) skipped_xml.setAttribute('message', case['skipped']['message']) case_xml.appendChild(skipped_xml) skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text']) skipped_xml.appendChild(skipped_text_xml) if 'failure' in case: failure_xml = doc_xml.createElement('failure') failure_xml.setAttribute('type', case['failure']['type']) failure_xml.setAttribute('message', case['failure']['message']) case_xml.appendChild(failure_xml) failure_text_xml = doc_xml.createCDATASection(case['failure']['text']) failure_xml.appendChild(failure_text_xml) if 'error' in case: error_xml = doc_xml.createElement('error') error_xml.setAttribute('type', case['error']['type']) error_xml.setAttribute('message', case['error']['message']) case_xml.appendChild(error_xml) error_text_xml = doc_xml.createCDATASection(case['error']['text']) error_xml.appendChild(error_text_xml) with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output: doc_xml.writexml(output, encoding='utf-8', addindent='', newl="") doc_xml.unlink()
[ "def", "write_unitth", "(", "suites", ",", "out_dir", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "out_dir", ")", ":", "os", ".", "mkdir", "(", "out_dir", ")", "for", "classname", ",", "cases", "in", "suites", ".", "items", "(", ")", ":", "doc_xml", "=", "minidom", ".", "Document", "(", ")", "suite_xml", "=", "doc_xml", ".", "createElement", "(", "'testsuite'", ")", "suite_xml", ".", "setAttribute", "(", "'name'", ",", "classname", ")", "suite_xml", ".", "setAttribute", "(", "'tests'", ",", "str", "(", "len", "(", "cases", ")", ")", ")", "suite_xml", ".", "setAttribute", "(", "'errors'", ",", "str", "(", "sum", "(", "'error'", "in", "case", "for", "case", "in", "cases", ")", ")", ")", "suite_xml", ".", "setAttribute", "(", "'failures'", ",", "str", "(", "sum", "(", "'failure'", "in", "case", "for", "case", "in", "cases", ")", ")", ")", "suite_xml", ".", "setAttribute", "(", "'skipped'", ",", "str", "(", "sum", "(", "'skipped'", "in", "case", "for", "case", "in", "cases", ")", ")", ")", "suite_xml", ".", "setAttribute", "(", "'time'", ",", "'{:.3f}'", ".", "format", "(", "sum", "(", "case", "[", "'time'", "]", "for", "case", "in", "cases", ")", ")", ")", "doc_xml", ".", "appendChild", "(", "suite_xml", ")", "for", "case", "in", "cases", ":", "case_xml", "=", "doc_xml", ".", "createElement", "(", "'testcase'", ")", "case_xml", ".", "setAttribute", "(", "'classname'", ",", "classname", ")", "case_xml", ".", "setAttribute", "(", "'name'", ",", "case", "[", "'name'", "]", ")", "case_xml", ".", "setAttribute", "(", "'time'", ",", "'{:.3f}'", ".", "format", "(", "case", "[", "'time'", "]", ")", ")", "suite_xml", ".", "appendChild", "(", "case_xml", ")", "if", "'skipped'", "in", "case", ":", "skipped_xml", "=", "doc_xml", ".", "createElement", "(", "'skipped'", ")", "skipped_xml", ".", "setAttribute", "(", "'type'", ",", "case", "[", "'skipped'", "]", "[", "'type'", "]", ")", "skipped_xml", ".", "setAttribute", "(", "'message'", ",", "case", "[", "'skipped'", "]", "[", "'message'", "]", ")", "case_xml", ".", "appendChild", "(", "skipped_xml", ")", "skipped_text_xml", "=", "doc_xml", ".", "createCDATASection", "(", "case", "[", "'skipped'", "]", "[", "'text'", "]", ")", "skipped_xml", ".", "appendChild", "(", "skipped_text_xml", ")", "if", "'failure'", "in", "case", ":", "failure_xml", "=", "doc_xml", ".", "createElement", "(", "'failure'", ")", "failure_xml", ".", "setAttribute", "(", "'type'", ",", "case", "[", "'failure'", "]", "[", "'type'", "]", ")", "failure_xml", ".", "setAttribute", "(", "'message'", ",", "case", "[", "'failure'", "]", "[", "'message'", "]", ")", "case_xml", ".", "appendChild", "(", "failure_xml", ")", "failure_text_xml", "=", "doc_xml", ".", "createCDATASection", "(", "case", "[", "'failure'", "]", "[", "'text'", "]", ")", "failure_xml", ".", "appendChild", "(", "failure_text_xml", ")", "if", "'error'", "in", "case", ":", "error_xml", "=", "doc_xml", ".", "createElement", "(", "'error'", ")", "error_xml", ".", "setAttribute", "(", "'type'", ",", "case", "[", "'error'", "]", "[", "'type'", "]", ")", "error_xml", ".", "setAttribute", "(", "'message'", ",", "case", "[", "'error'", "]", "[", "'message'", "]", ")", "case_xml", ".", "appendChild", "(", "error_xml", ")", "error_text_xml", "=", "doc_xml", ".", "createCDATASection", "(", "case", "[", "'error'", "]", "[", "'text'", "]", ")", "error_xml", ".", "appendChild", "(", "error_text_xml", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "'{}.xml'", ".", "format", "(", "classname", ")", ")", ",", "'w'", ")", "as", "output", ":", "doc_xml", ".", "writexml", "(", "output", ",", "encoding", "=", "'utf-8'", ",", "addindent", "=", "''", ",", "newl", "=", "\"\"", ")", "doc_xml", ".", "unlink", "(", ")" ]
Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports
[ "Write", "UnitTH", "-", "style", "test", "reports" ]
train
https://github.com/KarrLab/nose2unitth/blob/c37f10a8b74b291b3a12669113f4404b01b97586/nose2unitth/core.py#L91-L149
davenquinn/Attitude
attitude/display/plot/__init__.py
error_asymptotes
def error_asymptotes(pca,**kwargs): """ Plots asymptotic error bounds for hyperbola on a stereonet. """ ax = kwargs.pop("ax",current_axes()) lon,lat = pca.plane_errors('upper', n=1000) ax.plot(lon,lat,'-') lon,lat = pca.plane_errors('lower', n=1000) ax.plot(lon,lat,'-') ax.plane(*pca.strike_dip())
python
def error_asymptotes(pca,**kwargs): """ Plots asymptotic error bounds for hyperbola on a stereonet. """ ax = kwargs.pop("ax",current_axes()) lon,lat = pca.plane_errors('upper', n=1000) ax.plot(lon,lat,'-') lon,lat = pca.plane_errors('lower', n=1000) ax.plot(lon,lat,'-') ax.plane(*pca.strike_dip())
[ "def", "error_asymptotes", "(", "pca", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "kwargs", ".", "pop", "(", "\"ax\"", ",", "current_axes", "(", ")", ")", "lon", ",", "lat", "=", "pca", ".", "plane_errors", "(", "'upper'", ",", "n", "=", "1000", ")", "ax", ".", "plot", "(", "lon", ",", "lat", ",", "'-'", ")", "lon", ",", "lat", "=", "pca", ".", "plane_errors", "(", "'lower'", ",", "n", "=", "1000", ")", "ax", ".", "plot", "(", "lon", ",", "lat", ",", "'-'", ")", "ax", ".", "plane", "(", "*", "pca", ".", "strike_dip", "(", ")", ")" ]
Plots asymptotic error bounds for hyperbola on a stereonet.
[ "Plots", "asymptotic", "error", "bounds", "for", "hyperbola", "on", "a", "stereonet", "." ]
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/plot/__init__.py#L150-L163
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/core_query.py
get_rows_fieldnames_from_raw_sql
def get_rows_fieldnames_from_raw_sql( session: Union[Session, Engine, Connection], sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]: """ Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields. """ result = session.execute(sql) # type: ResultProxy fieldnames = result.keys() rows = result.fetchall() return rows, fieldnames
python
def get_rows_fieldnames_from_raw_sql( session: Union[Session, Engine, Connection], sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]: """ Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields. """ result = session.execute(sql) # type: ResultProxy fieldnames = result.keys() rows = result.fetchall() return rows, fieldnames
[ "def", "get_rows_fieldnames_from_raw_sql", "(", "session", ":", "Union", "[", "Session", ",", "Engine", ",", "Connection", "]", ",", "sql", ":", "str", ")", "->", "Tuple", "[", "Sequence", "[", "Sequence", "[", "Any", "]", "]", ",", "Sequence", "[", "str", "]", "]", ":", "result", "=", "session", ".", "execute", "(", "sql", ")", "# type: ResultProxy", "fieldnames", "=", "result", ".", "keys", "(", ")", "rows", "=", "result", ".", "fetchall", "(", ")", "return", "rows", ",", "fieldnames" ]
Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields.
[ "Returns", "results", "and", "column", "names", "from", "a", "query", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/core_query.py#L51-L70
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/core_query.py
count_star
def count_star(session: Union[Session, Engine, Connection], tablename: str, *criteria: Any) -> int: """ Returns the result of ``COUNT(*)`` from the specified table (with additional ``WHERE`` criteria if desired). Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a scalar """ # works if you pass a connection or a session or an engine; all have # the execute() method query = select([func.count()]).select_from(table(tablename)) for criterion in criteria: query = query.where(criterion) return session.execute(query).scalar()
python
def count_star(session: Union[Session, Engine, Connection], tablename: str, *criteria: Any) -> int: """ Returns the result of ``COUNT(*)`` from the specified table (with additional ``WHERE`` criteria if desired). Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a scalar """ # works if you pass a connection or a session or an engine; all have # the execute() method query = select([func.count()]).select_from(table(tablename)) for criterion in criteria: query = query.where(criterion) return session.execute(query).scalar()
[ "def", "count_star", "(", "session", ":", "Union", "[", "Session", ",", "Engine", ",", "Connection", "]", ",", "tablename", ":", "str", ",", "*", "criteria", ":", "Any", ")", "->", "int", ":", "# works if you pass a connection or a session or an engine; all have", "# the execute() method", "query", "=", "select", "(", "[", "func", ".", "count", "(", ")", "]", ")", ".", "select_from", "(", "table", "(", "tablename", ")", ")", "for", "criterion", "in", "criteria", ":", "query", "=", "query", ".", "where", "(", "criterion", ")", "return", "session", ".", "execute", "(", "query", ")", ".", "scalar", "(", ")" ]
Returns the result of ``COUNT(*)`` from the specified table (with additional ``WHERE`` criteria if desired). Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a scalar
[ "Returns", "the", "result", "of", "COUNT", "(", "*", ")", "from", "the", "specified", "table", "(", "with", "additional", "WHERE", "criteria", "if", "desired", ")", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/core_query.py#L78-L99
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/core_query.py
exists_in_table
def exists_in_table(session: Session, table_: Table, *criteria: Any) -> bool: """ Implements an efficient way of detecting if a record or records exist; should be faster than ``COUNT(*)`` in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object table_: SQLAlchemy :class:`Table` object criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_in_table(session, table, column(fieldname1) == value2, column(fieldname2) == value2) """ exists_clause = exists().select_from(table_) # ... EXISTS (SELECT * FROM tablename) for criterion in criteria: exists_clause = exists_clause.where(criterion) # ... EXISTS (SELECT * FROM tablename WHERE ...) if session.get_bind().dialect.name == SqlaDialectName.MSSQL: query = select([literal(True)]).where(exists_clause) # ... SELECT 1 WHERE EXISTS (SELECT * FROM tablename WHERE ...) else: query = select([exists_clause]) # ... SELECT EXISTS (SELECT * FROM tablename WHERE ...) result = session.execute(query).scalar() return bool(result)
python
def exists_in_table(session: Session, table_: Table, *criteria: Any) -> bool: """ Implements an efficient way of detecting if a record or records exist; should be faster than ``COUNT(*)`` in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object table_: SQLAlchemy :class:`Table` object criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_in_table(session, table, column(fieldname1) == value2, column(fieldname2) == value2) """ exists_clause = exists().select_from(table_) # ... EXISTS (SELECT * FROM tablename) for criterion in criteria: exists_clause = exists_clause.where(criterion) # ... EXISTS (SELECT * FROM tablename WHERE ...) if session.get_bind().dialect.name == SqlaDialectName.MSSQL: query = select([literal(True)]).where(exists_clause) # ... SELECT 1 WHERE EXISTS (SELECT * FROM tablename WHERE ...) else: query = select([exists_clause]) # ... SELECT EXISTS (SELECT * FROM tablename WHERE ...) result = session.execute(query).scalar() return bool(result)
[ "def", "exists_in_table", "(", "session", ":", "Session", ",", "table_", ":", "Table", ",", "*", "criteria", ":", "Any", ")", "->", "bool", ":", "exists_clause", "=", "exists", "(", ")", ".", "select_from", "(", "table_", ")", "# ... EXISTS (SELECT * FROM tablename)", "for", "criterion", "in", "criteria", ":", "exists_clause", "=", "exists_clause", ".", "where", "(", "criterion", ")", "# ... EXISTS (SELECT * FROM tablename WHERE ...)", "if", "session", ".", "get_bind", "(", ")", ".", "dialect", ".", "name", "==", "SqlaDialectName", ".", "MSSQL", ":", "query", "=", "select", "(", "[", "literal", "(", "True", ")", "]", ")", ".", "where", "(", "exists_clause", ")", "# ... SELECT 1 WHERE EXISTS (SELECT * FROM tablename WHERE ...)", "else", ":", "query", "=", "select", "(", "[", "exists_clause", "]", ")", "# ... SELECT EXISTS (SELECT * FROM tablename WHERE ...)", "result", "=", "session", ".", "execute", "(", "query", ")", ".", "scalar", "(", ")", "return", "bool", "(", "result", ")" ]
Implements an efficient way of detecting if a record or records exist; should be faster than ``COUNT(*)`` in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object table_: SQLAlchemy :class:`Table` object criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_in_table(session, table, column(fieldname1) == value2, column(fieldname2) == value2)
[ "Implements", "an", "efficient", "way", "of", "detecting", "if", "a", "record", "or", "records", "exist", ";", "should", "be", "faster", "than", "COUNT", "(", "*", ")", "in", "some", "circumstances", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/core_query.py#L139-L176
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/core_query.py
exists_plain
def exists_plain(session: Session, tablename: str, *criteria: Any) -> bool: """ Implements an efficient way of detecting if a record or records exist; should be faster than COUNT(*) in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_plain(config.destdb.session, dest_table_name, column(fieldname1) == value2, column(fieldname2) == value2) """ return exists_in_table(session, table(tablename), *criteria)
python
def exists_plain(session: Session, tablename: str, *criteria: Any) -> bool: """ Implements an efficient way of detecting if a record or records exist; should be faster than COUNT(*) in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_plain(config.destdb.session, dest_table_name, column(fieldname1) == value2, column(fieldname2) == value2) """ return exists_in_table(session, table(tablename), *criteria)
[ "def", "exists_plain", "(", "session", ":", "Session", ",", "tablename", ":", "str", ",", "*", "criteria", ":", "Any", ")", "->", "bool", ":", "return", "exists_in_table", "(", "session", ",", "table", "(", "tablename", ")", ",", "*", "criteria", ")" ]
Implements an efficient way of detecting if a record or records exist; should be faster than COUNT(*) in some circumstances. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a boolean Prototypical use: .. code-block:: python return exists_plain(config.destdb.session, dest_table_name, column(fieldname1) == value2, column(fieldname2) == value2)
[ "Implements", "an", "efficient", "way", "of", "detecting", "if", "a", "record", "or", "records", "exist", ";", "should", "be", "faster", "than", "COUNT", "(", "*", ")", "in", "some", "circumstances", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/core_query.py#L179-L202
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/core_query.py
fetch_all_first_values
def fetch_all_first_values(session: Session, select_statement: Select) -> List[Any]: """ Returns a list of the first values in each row returned by a ``SELECT`` query. A Core version of this sort of thing: http://xion.io/post/code/sqlalchemy-query-values.html Args: session: SQLAlchemy :class:`Session` object select_statement: SQLAlchemy :class:`Select` object Returns: a list of the first value of each result row """ rows = session.execute(select_statement) # type: ResultProxy try: return [row[0] for row in rows] except ValueError as e: raise MultipleResultsFound(str(e))
python
def fetch_all_first_values(session: Session, select_statement: Select) -> List[Any]: """ Returns a list of the first values in each row returned by a ``SELECT`` query. A Core version of this sort of thing: http://xion.io/post/code/sqlalchemy-query-values.html Args: session: SQLAlchemy :class:`Session` object select_statement: SQLAlchemy :class:`Select` object Returns: a list of the first value of each result row """ rows = session.execute(select_statement) # type: ResultProxy try: return [row[0] for row in rows] except ValueError as e: raise MultipleResultsFound(str(e))
[ "def", "fetch_all_first_values", "(", "session", ":", "Session", ",", "select_statement", ":", "Select", ")", "->", "List", "[", "Any", "]", ":", "rows", "=", "session", ".", "execute", "(", "select_statement", ")", "# type: ResultProxy", "try", ":", "return", "[", "row", "[", "0", "]", "for", "row", "in", "rows", "]", "except", "ValueError", "as", "e", ":", "raise", "MultipleResultsFound", "(", "str", "(", "e", ")", ")" ]
Returns a list of the first values in each row returned by a ``SELECT`` query. A Core version of this sort of thing: http://xion.io/post/code/sqlalchemy-query-values.html Args: session: SQLAlchemy :class:`Session` object select_statement: SQLAlchemy :class:`Select` object Returns: a list of the first value of each result row
[ "Returns", "a", "list", "of", "the", "first", "values", "in", "each", "row", "returned", "by", "a", "SELECT", "query", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/core_query.py#L209-L230
davenquinn/Attitude
attitude/display/parametric.py
hyperbola
def hyperbola(axes, **kwargs): """ Plots a hyperbola that opens along y axis """ opens_up = kwargs.pop('opens_up', True) center = kwargs.pop('center', defaults['center']) th = N.linspace(0,2*N.pi,kwargs.pop('n', 500)) vals = [N.tan(th),1/N.cos(th)] if not opens_up: vals = vals[::-1] x = axes[0]*vals[0]+center[0] y = axes[1]*vals[1]+center[1] extrema = [N.argmin(x),N.argmax(x)] def remove_asymptotes(arr): arr[extrema] = N.nan return arr xy = tuple(remove_asymptotes(i) for i in (x,y)) return xy
python
def hyperbola(axes, **kwargs): """ Plots a hyperbola that opens along y axis """ opens_up = kwargs.pop('opens_up', True) center = kwargs.pop('center', defaults['center']) th = N.linspace(0,2*N.pi,kwargs.pop('n', 500)) vals = [N.tan(th),1/N.cos(th)] if not opens_up: vals = vals[::-1] x = axes[0]*vals[0]+center[0] y = axes[1]*vals[1]+center[1] extrema = [N.argmin(x),N.argmax(x)] def remove_asymptotes(arr): arr[extrema] = N.nan return arr xy = tuple(remove_asymptotes(i) for i in (x,y)) return xy
[ "def", "hyperbola", "(", "axes", ",", "*", "*", "kwargs", ")", ":", "opens_up", "=", "kwargs", ".", "pop", "(", "'opens_up'", ",", "True", ")", "center", "=", "kwargs", ".", "pop", "(", "'center'", ",", "defaults", "[", "'center'", "]", ")", "th", "=", "N", ".", "linspace", "(", "0", ",", "2", "*", "N", ".", "pi", ",", "kwargs", ".", "pop", "(", "'n'", ",", "500", ")", ")", "vals", "=", "[", "N", ".", "tan", "(", "th", ")", ",", "1", "/", "N", ".", "cos", "(", "th", ")", "]", "if", "not", "opens_up", ":", "vals", "=", "vals", "[", ":", ":", "-", "1", "]", "x", "=", "axes", "[", "0", "]", "*", "vals", "[", "0", "]", "+", "center", "[", "0", "]", "y", "=", "axes", "[", "1", "]", "*", "vals", "[", "1", "]", "+", "center", "[", "1", "]", "extrema", "=", "[", "N", ".", "argmin", "(", "x", ")", ",", "N", ".", "argmax", "(", "x", ")", "]", "def", "remove_asymptotes", "(", "arr", ")", ":", "arr", "[", "extrema", "]", "=", "N", ".", "nan", "return", "arr", "xy", "=", "tuple", "(", "remove_asymptotes", "(", "i", ")", "for", "i", "in", "(", "x", ",", "y", ")", ")", "return", "xy" ]
Plots a hyperbola that opens along y axis
[ "Plots", "a", "hyperbola", "that", "opens", "along", "y", "axis" ]
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/parametric.py#L18-L41
davenquinn/Attitude
attitude/display/parametric.py
__reverse_ellipse
def __reverse_ellipse(axes, scalar=1): """ This method doesn't work as well """ ax1 = axes.copy()[::-1]*scalar center = ax1[1]*N.sqrt(2)*scalar return ax1, center
python
def __reverse_ellipse(axes, scalar=1): """ This method doesn't work as well """ ax1 = axes.copy()[::-1]*scalar center = ax1[1]*N.sqrt(2)*scalar return ax1, center
[ "def", "__reverse_ellipse", "(", "axes", ",", "scalar", "=", "1", ")", ":", "ax1", "=", "axes", ".", "copy", "(", ")", "[", ":", ":", "-", "1", "]", "*", "scalar", "center", "=", "ax1", "[", "1", "]", "*", "N", ".", "sqrt", "(", "2", ")", "*", "scalar", "return", "ax1", ",", "center" ]
This method doesn't work as well
[ "This", "method", "doesn", "t", "work", "as", "well" ]
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/parametric.py#L63-L69
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/sqlserver.py
if_sqlserver_disable_constraints
def if_sqlserver_disable_constraints(session: SqlASession, tablename: str) -> None: """ If we're running under SQL Server, disable constraint checking for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name See https://stackoverflow.com/questions/123558/sql-server-2005-t-sql-to-temporarily-disable-a-trigger """ # noqa engine = get_engine_from_session(session) if is_sqlserver(engine): quoted_tablename = quote_identifier(tablename, engine) session.execute( "ALTER TABLE {} NOCHECK CONSTRAINT all".format( quoted_tablename)) yield session.execute( "ALTER TABLE {} WITH CHECK CHECK CONSTRAINT all".format( quoted_tablename)) else: yield
python
def if_sqlserver_disable_constraints(session: SqlASession, tablename: str) -> None: """ If we're running under SQL Server, disable constraint checking for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name See https://stackoverflow.com/questions/123558/sql-server-2005-t-sql-to-temporarily-disable-a-trigger """ # noqa engine = get_engine_from_session(session) if is_sqlserver(engine): quoted_tablename = quote_identifier(tablename, engine) session.execute( "ALTER TABLE {} NOCHECK CONSTRAINT all".format( quoted_tablename)) yield session.execute( "ALTER TABLE {} WITH CHECK CHECK CONSTRAINT all".format( quoted_tablename)) else: yield
[ "def", "if_sqlserver_disable_constraints", "(", "session", ":", "SqlASession", ",", "tablename", ":", "str", ")", "->", "None", ":", "# noqa", "engine", "=", "get_engine_from_session", "(", "session", ")", "if", "is_sqlserver", "(", "engine", ")", ":", "quoted_tablename", "=", "quote_identifier", "(", "tablename", ",", "engine", ")", "session", ".", "execute", "(", "\"ALTER TABLE {} NOCHECK CONSTRAINT all\"", ".", "format", "(", "quoted_tablename", ")", ")", "yield", "session", ".", "execute", "(", "\"ALTER TABLE {} WITH CHECK CHECK CONSTRAINT all\"", ".", "format", "(", "quoted_tablename", ")", ")", "else", ":", "yield" ]
If we're running under SQL Server, disable constraint checking for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name See https://stackoverflow.com/questions/123558/sql-server-2005-t-sql-to-temporarily-disable-a-trigger
[ "If", "we", "re", "running", "under", "SQL", "Server", "disable", "constraint", "checking", "for", "the", "specified", "table", "while", "the", "resource", "is", "held", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/sqlserver.py#L43-L67
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/sqlserver.py
if_sqlserver_disable_constraints_triggers
def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None: """ If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name """ with if_sqlserver_disable_constraints(session, tablename): with if_sqlserver_disable_triggers(session, tablename): yield
python
def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None: """ If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name """ with if_sqlserver_disable_constraints(session, tablename): with if_sqlserver_disable_triggers(session, tablename): yield
[ "def", "if_sqlserver_disable_constraints_triggers", "(", "session", ":", "SqlASession", ",", "tablename", ":", "str", ")", "->", "None", ":", "with", "if_sqlserver_disable_constraints", "(", "session", ",", "tablename", ")", ":", "with", "if_sqlserver_disable_triggers", "(", "session", ",", "tablename", ")", ":", "yield" ]
If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name
[ "If", "we", "re", "running", "under", "SQL", "Server", "disable", "triggers", "AND", "constraints", "for", "the", "specified", "table", "while", "the", "resource", "is", "held", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/sqlserver.py#L97-L109
avihad/twistes
twistes/bulk_utils.py
ActionParser.expand_action
def expand_action(data): """ From one document or action definition passed in by the user extract the action/data lines needed for elasticsearch's :meth:`~elasticsearch.Elasticsearch.bulk` api. :return es format to bulk doc """ # when given a string, assume user wants to index raw json if isinstance(data, string_types): return '{"index": {}}', data # make sure we don't alter the action data = data.copy() op_type = data.pop(EsBulk.OP_TYPE, EsBulk.INDEX) action = ActionParser._get_relevant_action_params(data, op_type) # no data payload for delete if op_type == EsBulk.DELETE: return action, None return action, data.get(EsDocProperties.SOURCE, data)
python
def expand_action(data): """ From one document or action definition passed in by the user extract the action/data lines needed for elasticsearch's :meth:`~elasticsearch.Elasticsearch.bulk` api. :return es format to bulk doc """ # when given a string, assume user wants to index raw json if isinstance(data, string_types): return '{"index": {}}', data # make sure we don't alter the action data = data.copy() op_type = data.pop(EsBulk.OP_TYPE, EsBulk.INDEX) action = ActionParser._get_relevant_action_params(data, op_type) # no data payload for delete if op_type == EsBulk.DELETE: return action, None return action, data.get(EsDocProperties.SOURCE, data)
[ "def", "expand_action", "(", "data", ")", ":", "# when given a string, assume user wants to index raw json", "if", "isinstance", "(", "data", ",", "string_types", ")", ":", "return", "'{\"index\": {}}'", ",", "data", "# make sure we don't alter the action", "data", "=", "data", ".", "copy", "(", ")", "op_type", "=", "data", ".", "pop", "(", "EsBulk", ".", "OP_TYPE", ",", "EsBulk", ".", "INDEX", ")", "action", "=", "ActionParser", ".", "_get_relevant_action_params", "(", "data", ",", "op_type", ")", "# no data payload for delete", "if", "op_type", "==", "EsBulk", ".", "DELETE", ":", "return", "action", ",", "None", "return", "action", ",", "data", ".", "get", "(", "EsDocProperties", ".", "SOURCE", ",", "data", ")" ]
From one document or action definition passed in by the user extract the action/data lines needed for elasticsearch's :meth:`~elasticsearch.Elasticsearch.bulk` api. :return es format to bulk doc
[ "From", "one", "document", "or", "action", "definition", "passed", "in", "by", "the", "user", "extract", "the", "action", "/", "data", "lines", "needed", "for", "elasticsearch", "s", ":", "meth", ":", "~elasticsearch", ".", "Elasticsearch", ".", "bulk", "api", ".", ":", "return", "es", "format", "to", "bulk", "doc" ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/bulk_utils.py#L17-L38
avihad/twistes
twistes/bulk_utils.py
BulkUtility.bulk
def bulk(self, actions, stats_only=False, verbose=False, **kwargs): """ Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if `stats_only` is set to `True`. See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :arg verbose: return verbose data: (inserted, errors) :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation. """ inserted = [] errors = [] all = [] for deferred_bulk in self.streaming_bulk(actions, **kwargs): bulk_results = yield deferred_bulk for ok, item in bulk_results: # go through request-response pairs and detect failures all.append((ok, item)) l = inserted if ok else errors l.append(item) if verbose: returnValue(all) if stats_only: returnValue((len(inserted), len(errors))) # here for backwards compatibility returnValue((len(inserted), errors))
python
def bulk(self, actions, stats_only=False, verbose=False, **kwargs): """ Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if `stats_only` is set to `True`. See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :arg verbose: return verbose data: (inserted, errors) :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation. """ inserted = [] errors = [] all = [] for deferred_bulk in self.streaming_bulk(actions, **kwargs): bulk_results = yield deferred_bulk for ok, item in bulk_results: # go through request-response pairs and detect failures all.append((ok, item)) l = inserted if ok else errors l.append(item) if verbose: returnValue(all) if stats_only: returnValue((len(inserted), len(errors))) # here for backwards compatibility returnValue((len(inserted), errors))
[ "def", "bulk", "(", "self", ",", "actions", ",", "stats_only", "=", "False", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "inserted", "=", "[", "]", "errors", "=", "[", "]", "all", "=", "[", "]", "for", "deferred_bulk", "in", "self", ".", "streaming_bulk", "(", "actions", ",", "*", "*", "kwargs", ")", ":", "bulk_results", "=", "yield", "deferred_bulk", "for", "ok", ",", "item", "in", "bulk_results", ":", "# go through request-response pairs and detect failures", "all", ".", "append", "(", "(", "ok", ",", "item", ")", ")", "l", "=", "inserted", "if", "ok", "else", "errors", "l", ".", "append", "(", "item", ")", "if", "verbose", ":", "returnValue", "(", "all", ")", "if", "stats_only", ":", "returnValue", "(", "(", "len", "(", "inserted", ")", ",", "len", "(", "errors", ")", ")", ")", "# here for backwards compatibility", "returnValue", "(", "(", "len", "(", "inserted", ")", ",", "errors", ")", ")" ]
Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if `stats_only` is set to `True`. See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :arg verbose: return verbose data: (inserted, errors) :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation.
[ "Helper", "for", "the", ":", "meth", ":", "~elasticsearch", ".", "Elasticsearch", ".", "bulk", "api", "that", "provides", "a", "more", "human", "friendly", "interface", "-", "it", "consumes", "an", "iterator", "of", "actions", "and", "sends", "them", "to", "elasticsearch", "in", "chunks", ".", "It", "returns", "a", "tuple", "with", "summary", "information", "-", "number", "of", "successfully", "executed", "actions", "and", "either", "list", "of", "errors", "or", "number", "of", "errors", "if", "stats_only", "is", "set", "to", "True", ".", "See", ":", "func", ":", "~elasticsearch", ".", "helpers", ".", "streaming_bulk", "for", "more", "accepted", "parameters", ":", "arg", "actions", ":", "iterator", "containing", "the", "actions", ":", "arg", "stats_only", ":", "if", "True", "only", "report", "number", "of", "successful", "/", "failed", "operations", "instead", "of", "just", "number", "of", "successful", "and", "a", "list", "of", "error", "responses", "Any", "additional", "keyword", "arguments", "will", "be", "passed", "to", ":", "arg", "verbose", ":", "return", "verbose", "data", ":", "(", "inserted", "errors", ")", ":", "func", ":", "~elasticsearch", ".", "helpers", ".", "streaming_bulk", "which", "is", "used", "to", "execute", "the", "operation", "." ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/bulk_utils.py#L56-L92
avihad/twistes
twistes/bulk_utils.py
BulkUtility.streaming_bulk
def streaming_bulk(self, actions, chunk_size=500, max_chunk_bytes=100 * 1024 * 1024, raise_on_error=True, expand_action_callback=ActionParser.expand_action, raise_on_exception=True, **kwargs): """ Streaming bulk consumes actions from the iterable passed in and return the results of all bulk data :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). """ actions = list(map(expand_action_callback, actions)) for bulk_actions in self._chunk_actions(actions, chunk_size, max_chunk_bytes): yield self._process_bulk_chunk(bulk_actions, raise_on_exception, raise_on_error, **kwargs)
python
def streaming_bulk(self, actions, chunk_size=500, max_chunk_bytes=100 * 1024 * 1024, raise_on_error=True, expand_action_callback=ActionParser.expand_action, raise_on_exception=True, **kwargs): """ Streaming bulk consumes actions from the iterable passed in and return the results of all bulk data :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). """ actions = list(map(expand_action_callback, actions)) for bulk_actions in self._chunk_actions(actions, chunk_size, max_chunk_bytes): yield self._process_bulk_chunk(bulk_actions, raise_on_exception, raise_on_error, **kwargs)
[ "def", "streaming_bulk", "(", "self", ",", "actions", ",", "chunk_size", "=", "500", ",", "max_chunk_bytes", "=", "100", "*", "1024", "*", "1024", ",", "raise_on_error", "=", "True", ",", "expand_action_callback", "=", "ActionParser", ".", "expand_action", ",", "raise_on_exception", "=", "True", ",", "*", "*", "kwargs", ")", ":", "actions", "=", "list", "(", "map", "(", "expand_action_callback", ",", "actions", ")", ")", "for", "bulk_actions", "in", "self", ".", "_chunk_actions", "(", "actions", ",", "chunk_size", ",", "max_chunk_bytes", ")", ":", "yield", "self", ".", "_process_bulk_chunk", "(", "bulk_actions", ",", "raise_on_exception", ",", "raise_on_error", ",", "*", "*", "kwargs", ")" ]
Streaming bulk consumes actions from the iterable passed in and return the results of all bulk data :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted).
[ "Streaming", "bulk", "consumes", "actions", "from", "the", "iterable", "passed", "in", "and", "return", "the", "results", "of", "all", "bulk", "data", ":", "func", ":", "~elasticsearch", ".", "helpers", ".", "bulk", "which", "is", "a", "wrapper", "around", "streaming", "bulk", "that", "returns", "summary", "information", "about", "the", "bulk", "operation", "once", "the", "entire", "input", "is", "consumed", "and", "sent", ".", ":", "arg", "actions", ":", "iterable", "containing", "the", "actions", "to", "be", "executed", ":", "arg", "chunk_size", ":", "number", "of", "docs", "in", "one", "chunk", "sent", "to", "es", "(", "default", ":", "500", ")", ":", "arg", "max_chunk_bytes", ":", "the", "maximum", "size", "of", "the", "request", "in", "bytes", "(", "default", ":", "100MB", ")", ":", "arg", "raise_on_error", ":", "raise", "BulkIndexError", "containing", "errors", "(", "as", ".", "errors", ")", "from", "the", "execution", "of", "the", "last", "chunk", "when", "some", "occur", ".", "By", "default", "we", "raise", ".", ":", "arg", "raise_on_exception", ":", "if", "False", "then", "don", "t", "propagate", "exceptions", "from", "call", "to", "bulk", "and", "just", "report", "the", "items", "that", "failed", "as", "failed", ".", ":", "arg", "expand_action_callback", ":", "callback", "executed", "on", "each", "action", "passed", "in", "should", "return", "a", "tuple", "containing", "the", "action", "line", "and", "the", "data", "line", "(", "None", "if", "data", "line", "should", "be", "omitted", ")", "." ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/bulk_utils.py#L94-L116
avihad/twistes
twistes/bulk_utils.py
BulkUtility._process_bulk_chunk
def _process_bulk_chunk(self, bulk_actions, raise_on_exception=True, raise_on_error=True, **kwargs): """ Send a bulk request to elasticsearch and process the output. """ # if raise on error is set, we need to collect errors per chunk before # raising them resp = None try: # send the actual request actions = "{}\n".format('\n'.join(bulk_actions)) resp = yield self.client.bulk(actions, **kwargs) except ConnectionTimeout as e: # default behavior - just propagate exception if raise_on_exception: raise self._handle_transport_error(bulk_actions, e, raise_on_error) returnValue([]) # go through request-response pairs and detect failures errors = [] results = [] for op_type, item in map(methodcaller('popitem'), resp['items']): ok = 200 <= item.get('status', 500) < 300 if not ok and raise_on_error: errors.append({op_type: item}) if ok or not errors: # if we are not just recording all errors to be able to raise # them all at once, yield items individually results.append((ok, {op_type: item})) if errors: msg_fmt = '{num} document(s) failed to index.' raise BulkIndexError(msg_fmt.format(num=len(errors)), errors) else: returnValue(results)
python
def _process_bulk_chunk(self, bulk_actions, raise_on_exception=True, raise_on_error=True, **kwargs): """ Send a bulk request to elasticsearch and process the output. """ # if raise on error is set, we need to collect errors per chunk before # raising them resp = None try: # send the actual request actions = "{}\n".format('\n'.join(bulk_actions)) resp = yield self.client.bulk(actions, **kwargs) except ConnectionTimeout as e: # default behavior - just propagate exception if raise_on_exception: raise self._handle_transport_error(bulk_actions, e, raise_on_error) returnValue([]) # go through request-response pairs and detect failures errors = [] results = [] for op_type, item in map(methodcaller('popitem'), resp['items']): ok = 200 <= item.get('status', 500) < 300 if not ok and raise_on_error: errors.append({op_type: item}) if ok or not errors: # if we are not just recording all errors to be able to raise # them all at once, yield items individually results.append((ok, {op_type: item})) if errors: msg_fmt = '{num} document(s) failed to index.' raise BulkIndexError(msg_fmt.format(num=len(errors)), errors) else: returnValue(results)
[ "def", "_process_bulk_chunk", "(", "self", ",", "bulk_actions", ",", "raise_on_exception", "=", "True", ",", "raise_on_error", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# if raise on error is set, we need to collect errors per chunk before", "# raising them", "resp", "=", "None", "try", ":", "# send the actual request", "actions", "=", "\"{}\\n\"", ".", "format", "(", "'\\n'", ".", "join", "(", "bulk_actions", ")", ")", "resp", "=", "yield", "self", ".", "client", ".", "bulk", "(", "actions", ",", "*", "*", "kwargs", ")", "except", "ConnectionTimeout", "as", "e", ":", "# default behavior - just propagate exception", "if", "raise_on_exception", ":", "raise", "self", ".", "_handle_transport_error", "(", "bulk_actions", ",", "e", ",", "raise_on_error", ")", "returnValue", "(", "[", "]", ")", "# go through request-response pairs and detect failures", "errors", "=", "[", "]", "results", "=", "[", "]", "for", "op_type", ",", "item", "in", "map", "(", "methodcaller", "(", "'popitem'", ")", ",", "resp", "[", "'items'", "]", ")", ":", "ok", "=", "200", "<=", "item", ".", "get", "(", "'status'", ",", "500", ")", "<", "300", "if", "not", "ok", "and", "raise_on_error", ":", "errors", ".", "append", "(", "{", "op_type", ":", "item", "}", ")", "if", "ok", "or", "not", "errors", ":", "# if we are not just recording all errors to be able to raise", "# them all at once, yield items individually", "results", ".", "append", "(", "(", "ok", ",", "{", "op_type", ":", "item", "}", ")", ")", "if", "errors", ":", "msg_fmt", "=", "'{num} document(s) failed to index.'", "raise", "BulkIndexError", "(", "msg_fmt", ".", "format", "(", "num", "=", "len", "(", "errors", ")", ")", ",", "errors", ")", "else", ":", "returnValue", "(", "results", ")" ]
Send a bulk request to elasticsearch and process the output.
[ "Send", "a", "bulk", "request", "to", "elasticsearch", "and", "process", "the", "output", "." ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/bulk_utils.py#L151-L188
oursky/norecaptcha
norecaptcha/captcha.py
displayhtml
def displayhtml(site_key, language='', theme='light', fallback=False, d_type='image', size='normal'): """ Gets the HTML to display for reCAPTCHA site_key -- The site key language -- The language code for the widget. theme -- The color theme of the widget. `light` or `dark` fallback -- Old version recaptcha. d_type -- The type of CAPTCHA to serve. `image` or `audio` size -- The size of the dispalyed CAPTCHA, 'normal' or 'compact' For more detail, refer to: - https://developers.google.com/recaptcha/docs/display """ return """ <script src="https://www.google.com/recaptcha/api.js?hl=%(LanguageCode)s&fallback=%(Fallback)s&" async="async" defer="defer"></script> <div class="g-recaptcha" data-sitekey="%(SiteKey)s" data-theme="%(Theme)s" data-type="%(Type)s" data-size="%(Size)s"> </div> <noscript> <div style="width: 302px; height: 480px;"> <div style="width: 302px; height: 422px; position: relative;"> <div style="width: 302px; height: 422px; position: relative;"> <iframe src="https://www.google.com/recaptcha/api/fallback?k=%(SiteKey)s&hl=%(LanguageCode)s" frameborder="0" scrolling="no" style="width: 302px; height:422px; border-style: none;"> </iframe> </div> <div style="border-style: none; bottom: 12px; left: 25px; margin: 0px; padding: 0px; right: 25px; background: #f9f9f9; border: 1px solid #c1c1c1; border-radius: 3px; height: 60px; width: 300px;"> <textarea id="g-recaptcha-response" name="g-recaptcha-response" class="g-recaptcha-response" style="width: 250px; height: 40px; border: 1px solid #c1c1c1; margin: 10px 25px; padding: 0px; resize: none;" value=""></textarea> </div> </div> </div> </noscript> """ % { 'LanguageCode': language, 'SiteKey': site_key, 'Theme': theme, 'Type': d_type, 'Size': size, 'Fallback': fallback, }
python
def displayhtml(site_key, language='', theme='light', fallback=False, d_type='image', size='normal'): """ Gets the HTML to display for reCAPTCHA site_key -- The site key language -- The language code for the widget. theme -- The color theme of the widget. `light` or `dark` fallback -- Old version recaptcha. d_type -- The type of CAPTCHA to serve. `image` or `audio` size -- The size of the dispalyed CAPTCHA, 'normal' or 'compact' For more detail, refer to: - https://developers.google.com/recaptcha/docs/display """ return """ <script src="https://www.google.com/recaptcha/api.js?hl=%(LanguageCode)s&fallback=%(Fallback)s&" async="async" defer="defer"></script> <div class="g-recaptcha" data-sitekey="%(SiteKey)s" data-theme="%(Theme)s" data-type="%(Type)s" data-size="%(Size)s"> </div> <noscript> <div style="width: 302px; height: 480px;"> <div style="width: 302px; height: 422px; position: relative;"> <div style="width: 302px; height: 422px; position: relative;"> <iframe src="https://www.google.com/recaptcha/api/fallback?k=%(SiteKey)s&hl=%(LanguageCode)s" frameborder="0" scrolling="no" style="width: 302px; height:422px; border-style: none;"> </iframe> </div> <div style="border-style: none; bottom: 12px; left: 25px; margin: 0px; padding: 0px; right: 25px; background: #f9f9f9; border: 1px solid #c1c1c1; border-radius: 3px; height: 60px; width: 300px;"> <textarea id="g-recaptcha-response" name="g-recaptcha-response" class="g-recaptcha-response" style="width: 250px; height: 40px; border: 1px solid #c1c1c1; margin: 10px 25px; padding: 0px; resize: none;" value=""></textarea> </div> </div> </div> </noscript> """ % { 'LanguageCode': language, 'SiteKey': site_key, 'Theme': theme, 'Type': d_type, 'Size': size, 'Fallback': fallback, }
[ "def", "displayhtml", "(", "site_key", ",", "language", "=", "''", ",", "theme", "=", "'light'", ",", "fallback", "=", "False", ",", "d_type", "=", "'image'", ",", "size", "=", "'normal'", ")", ":", "return", "\"\"\"\n<script\n src=\"https://www.google.com/recaptcha/api.js?hl=%(LanguageCode)s&fallback=%(Fallback)s&\"\n async=\"async\" defer=\"defer\"></script>\n<div class=\"g-recaptcha\"\n data-sitekey=\"%(SiteKey)s\"\n data-theme=\"%(Theme)s\"\n data-type=\"%(Type)s\"\n data-size=\"%(Size)s\">\n</div>\n<noscript>\n <div style=\"width: 302px; height: 480px;\">\n <div style=\"width: 302px; height: 422px; position: relative;\">\n <div style=\"width: 302px; height: 422px; position: relative;\">\n <iframe\n src=\"https://www.google.com/recaptcha/api/fallback?k=%(SiteKey)s&hl=%(LanguageCode)s\"\n frameborder=\"0\" scrolling=\"no\"\n style=\"width: 302px; height:422px; border-style: none;\">\n </iframe>\n </div>\n <div\n style=\"border-style: none; bottom: 12px; left: 25px;\n margin: 0px; padding: 0px; right: 25px;\n background: #f9f9f9; border: 1px solid #c1c1c1;\n border-radius: 3px; height: 60px; width: 300px;\">\n <textarea\n id=\"g-recaptcha-response\" name=\"g-recaptcha-response\"\n class=\"g-recaptcha-response\"\n style=\"width: 250px; height: 40px; border: 1px solid #c1c1c1;\n margin: 10px 25px; padding: 0px; resize: none;\"\n value=\"\"></textarea>\n </div>\n </div>\n </div>\n</noscript>\n\"\"\"", "%", "{", "'LanguageCode'", ":", "language", ",", "'SiteKey'", ":", "site_key", ",", "'Theme'", ":", "theme", ",", "'Type'", ":", "d_type", ",", "'Size'", ":", "size", ",", "'Fallback'", ":", "fallback", ",", "}" ]
Gets the HTML to display for reCAPTCHA site_key -- The site key language -- The language code for the widget. theme -- The color theme of the widget. `light` or `dark` fallback -- Old version recaptcha. d_type -- The type of CAPTCHA to serve. `image` or `audio` size -- The size of the dispalyed CAPTCHA, 'normal' or 'compact' For more detail, refer to: - https://developers.google.com/recaptcha/docs/display
[ "Gets", "the", "HTML", "to", "display", "for", "reCAPTCHA" ]
train
https://github.com/oursky/norecaptcha/blob/6323054bf42c1bf35c5d7a7def4729cb32518860/norecaptcha/captcha.py#L33-L95
oursky/norecaptcha
norecaptcha/captcha.py
submit
def submit(recaptcha_response_field, secret_key, remoteip, verify_server=VERIFY_SERVER): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_response_field -- The value from the form secret_key -- your reCAPTCHA secret key remoteip -- the user's ip address """ if not (recaptcha_response_field and len(recaptcha_response_field)): return RecaptchaResponse( is_valid=False, error_code='incorrect-captcha-sol' ) def encode_if_necessary(s): if isinstance(s, unicode): return s.encode('utf-8') return s params = urllib.urlencode({ 'secret': encode_if_necessary(secret_key), 'remoteip': encode_if_necessary(remoteip), 'response': encode_if_necessary(recaptcha_response_field), }) request = Request( url="https://%s/recaptcha/api/siteverify" % verify_server, data=params, headers={ "Content-type": "application/x-www-form-urlencoded", "User-agent": "noReCAPTCHA Python" } ) httpresp = urlopen(request) return_values = json.loads(httpresp.read()) httpresp.close() return_code = return_values['success'] error_codes = return_values.get('error-codes', []) if return_code: return RecaptchaResponse(is_valid=True) else: return RecaptchaResponse( is_valid=False, error_code=error_codes )
python
def submit(recaptcha_response_field, secret_key, remoteip, verify_server=VERIFY_SERVER): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_response_field -- The value from the form secret_key -- your reCAPTCHA secret key remoteip -- the user's ip address """ if not (recaptcha_response_field and len(recaptcha_response_field)): return RecaptchaResponse( is_valid=False, error_code='incorrect-captcha-sol' ) def encode_if_necessary(s): if isinstance(s, unicode): return s.encode('utf-8') return s params = urllib.urlencode({ 'secret': encode_if_necessary(secret_key), 'remoteip': encode_if_necessary(remoteip), 'response': encode_if_necessary(recaptcha_response_field), }) request = Request( url="https://%s/recaptcha/api/siteverify" % verify_server, data=params, headers={ "Content-type": "application/x-www-form-urlencoded", "User-agent": "noReCAPTCHA Python" } ) httpresp = urlopen(request) return_values = json.loads(httpresp.read()) httpresp.close() return_code = return_values['success'] error_codes = return_values.get('error-codes', []) if return_code: return RecaptchaResponse(is_valid=True) else: return RecaptchaResponse( is_valid=False, error_code=error_codes )
[ "def", "submit", "(", "recaptcha_response_field", ",", "secret_key", ",", "remoteip", ",", "verify_server", "=", "VERIFY_SERVER", ")", ":", "if", "not", "(", "recaptcha_response_field", "and", "len", "(", "recaptcha_response_field", ")", ")", ":", "return", "RecaptchaResponse", "(", "is_valid", "=", "False", ",", "error_code", "=", "'incorrect-captcha-sol'", ")", "def", "encode_if_necessary", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", ".", "encode", "(", "'utf-8'", ")", "return", "s", "params", "=", "urllib", ".", "urlencode", "(", "{", "'secret'", ":", "encode_if_necessary", "(", "secret_key", ")", ",", "'remoteip'", ":", "encode_if_necessary", "(", "remoteip", ")", ",", "'response'", ":", "encode_if_necessary", "(", "recaptcha_response_field", ")", ",", "}", ")", "request", "=", "Request", "(", "url", "=", "\"https://%s/recaptcha/api/siteverify\"", "%", "verify_server", ",", "data", "=", "params", ",", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"User-agent\"", ":", "\"noReCAPTCHA Python\"", "}", ")", "httpresp", "=", "urlopen", "(", "request", ")", "return_values", "=", "json", ".", "loads", "(", "httpresp", ".", "read", "(", ")", ")", "httpresp", ".", "close", "(", ")", "return_code", "=", "return_values", "[", "'success'", "]", "error_codes", "=", "return_values", ".", "get", "(", "'error-codes'", ",", "[", "]", ")", "if", "return_code", ":", "return", "RecaptchaResponse", "(", "is_valid", "=", "True", ")", "else", ":", "return", "RecaptchaResponse", "(", "is_valid", "=", "False", ",", "error_code", "=", "error_codes", ")" ]
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_response_field -- The value from the form secret_key -- your reCAPTCHA secret key remoteip -- the user's ip address
[ "Submits", "a", "reCAPTCHA", "request", "for", "verification", ".", "Returns", "RecaptchaResponse", "for", "the", "request" ]
train
https://github.com/oursky/norecaptcha/blob/6323054bf42c1bf35c5d7a7def4729cb32518860/norecaptcha/captcha.py#L98-L151
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
get_head_revision_from_alembic
def get_head_revision_from_alembic( alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: """ Ask Alembic what its head revision is (i.e. where the Python code would like the database to be at). Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions """ if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) with EnvironmentContext(config, script, version_table=version_table): return script.get_current_head()
python
def get_head_revision_from_alembic( alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: """ Ask Alembic what its head revision is (i.e. where the Python code would like the database to be at). Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions """ if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) with EnvironmentContext(config, script, version_table=version_table): return script.get_current_head()
[ "def", "get_head_revision_from_alembic", "(", "alembic_config_filename", ":", "str", ",", "alembic_base_dir", ":", "str", "=", "None", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ")", "->", "str", ":", "if", "alembic_base_dir", "is", "None", ":", "alembic_base_dir", "=", "os", ".", "path", ".", "dirname", "(", "alembic_config_filename", ")", "os", ".", "chdir", "(", "alembic_base_dir", ")", "# so the directory in the config file works", "config", "=", "Config", "(", "alembic_config_filename", ")", "script", "=", "ScriptDirectory", ".", "from_config", "(", "config", ")", "with", "EnvironmentContext", "(", "config", ",", "script", ",", "version_table", "=", "version_table", ")", ":", "return", "script", ".", "get_current_head", "(", ")" ]
Ask Alembic what its head revision is (i.e. where the Python code would like the database to be at). Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
[ "Ask", "Alembic", "what", "its", "head", "revision", "is", "(", "i", ".", "e", ".", "where", "the", "Python", "code", "would", "like", "the", "database", "to", "be", "at", ")", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L71-L93
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
get_current_revision
def get_current_revision( database_url: str, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: """ Ask the database what its current revision is. Arguments: database_url: SQLAlchemy URL for the database version_table: table name for Alembic versions """ engine = create_engine(database_url) conn = engine.connect() opts = {'version_table': version_table} mig_context = MigrationContext.configure(conn, opts=opts) return mig_context.get_current_revision()
python
def get_current_revision( database_url: str, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: """ Ask the database what its current revision is. Arguments: database_url: SQLAlchemy URL for the database version_table: table name for Alembic versions """ engine = create_engine(database_url) conn = engine.connect() opts = {'version_table': version_table} mig_context = MigrationContext.configure(conn, opts=opts) return mig_context.get_current_revision()
[ "def", "get_current_revision", "(", "database_url", ":", "str", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ")", "->", "str", ":", "engine", "=", "create_engine", "(", "database_url", ")", "conn", "=", "engine", ".", "connect", "(", ")", "opts", "=", "{", "'version_table'", ":", "version_table", "}", "mig_context", "=", "MigrationContext", ".", "configure", "(", "conn", ",", "opts", "=", "opts", ")", "return", "mig_context", ".", "get_current_revision", "(", ")" ]
Ask the database what its current revision is. Arguments: database_url: SQLAlchemy URL for the database version_table: table name for Alembic versions
[ "Ask", "the", "database", "what", "its", "current", "revision", "is", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L96-L110
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
get_current_and_head_revision
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: """ Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions """ # Where we are head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) # Where we want to be current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) # Are we where we want to be? return current_revision, head_revision
python
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: """ Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions """ # Where we are head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) # Where we want to be current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) # Are we where we want to be? return current_revision, head_revision
[ "def", "get_current_and_head_revision", "(", "database_url", ":", "str", ",", "alembic_config_filename", ":", "str", ",", "alembic_base_dir", ":", "str", "=", "None", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "# Where we are", "head_revision", "=", "get_head_revision_from_alembic", "(", "alembic_config_filename", "=", "alembic_config_filename", ",", "alembic_base_dir", "=", "alembic_base_dir", ",", "version_table", "=", "version_table", ")", "log", ".", "info", "(", "\"Intended database version: {}\"", ",", "head_revision", ")", "# Where we want to be", "current_revision", "=", "get_current_revision", "(", "database_url", "=", "database_url", ",", "version_table", "=", "version_table", ")", "log", ".", "info", "(", "\"Current database version: {}\"", ",", "current_revision", ")", "# Are we where we want to be?", "return", "current_revision", ",", "head_revision" ]
Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
[ "Returns", "a", "tuple", "of", "(", "current_revision", "head_revision", ")", ";", "see", ":", "func", ":", "get_current_revision", "and", ":", "func", ":", "get_head_revision_from_alembic", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L113-L145
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
upgrade_database
def upgrade_database( alembic_config_filename: str, alembic_base_dir: str = None, starting_revision: str = None, destination_revision: str = "head", version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE, as_sql: bool = False) -> None: """ Use Alembic to upgrade our database. See http://alembic.readthedocs.org/en/latest/api/runtime.html but also, in particular, ``site-packages/alembic/command.py`` Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work starting_revision: revision to start at (typically ``None`` to ask the database) destination_revision: revision to aim for (typically ``"head"`` to migrate to the latest structure) version_table: table name for Alembic versions as_sql: run in "offline" mode: print the migration SQL, rather than modifying the database. See http://alembic.zzzcomputing.com/en/latest/offline.html """ if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) # noinspection PyUnusedLocal,PyProtectedMember def upgrade(rev, context): return script._upgrade_revs(destination_revision, rev) log.info("Upgrading database to revision {!r} using Alembic", destination_revision) with EnvironmentContext(config, script, fn=upgrade, as_sql=as_sql, starting_rev=starting_revision, destination_rev=destination_revision, tag=None, version_table=version_table): script.run_env() log.info("Database upgrade completed")
python
def upgrade_database( alembic_config_filename: str, alembic_base_dir: str = None, starting_revision: str = None, destination_revision: str = "head", version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE, as_sql: bool = False) -> None: """ Use Alembic to upgrade our database. See http://alembic.readthedocs.org/en/latest/api/runtime.html but also, in particular, ``site-packages/alembic/command.py`` Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work starting_revision: revision to start at (typically ``None`` to ask the database) destination_revision: revision to aim for (typically ``"head"`` to migrate to the latest structure) version_table: table name for Alembic versions as_sql: run in "offline" mode: print the migration SQL, rather than modifying the database. See http://alembic.zzzcomputing.com/en/latest/offline.html """ if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) # noinspection PyUnusedLocal,PyProtectedMember def upgrade(rev, context): return script._upgrade_revs(destination_revision, rev) log.info("Upgrading database to revision {!r} using Alembic", destination_revision) with EnvironmentContext(config, script, fn=upgrade, as_sql=as_sql, starting_rev=starting_revision, destination_rev=destination_revision, tag=None, version_table=version_table): script.run_env() log.info("Database upgrade completed")
[ "def", "upgrade_database", "(", "alembic_config_filename", ":", "str", ",", "alembic_base_dir", ":", "str", "=", "None", ",", "starting_revision", ":", "str", "=", "None", ",", "destination_revision", ":", "str", "=", "\"head\"", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ",", "as_sql", ":", "bool", "=", "False", ")", "->", "None", ":", "if", "alembic_base_dir", "is", "None", ":", "alembic_base_dir", "=", "os", ".", "path", ".", "dirname", "(", "alembic_config_filename", ")", "os", ".", "chdir", "(", "alembic_base_dir", ")", "# so the directory in the config file works", "config", "=", "Config", "(", "alembic_config_filename", ")", "script", "=", "ScriptDirectory", ".", "from_config", "(", "config", ")", "# noinspection PyUnusedLocal,PyProtectedMember", "def", "upgrade", "(", "rev", ",", "context", ")", ":", "return", "script", ".", "_upgrade_revs", "(", "destination_revision", ",", "rev", ")", "log", ".", "info", "(", "\"Upgrading database to revision {!r} using Alembic\"", ",", "destination_revision", ")", "with", "EnvironmentContext", "(", "config", ",", "script", ",", "fn", "=", "upgrade", ",", "as_sql", "=", "as_sql", ",", "starting_rev", "=", "starting_revision", ",", "destination_rev", "=", "destination_revision", ",", "tag", "=", "None", ",", "version_table", "=", "version_table", ")", ":", "script", ".", "run_env", "(", ")", "log", ".", "info", "(", "\"Database upgrade completed\"", ")" ]
Use Alembic to upgrade our database. See http://alembic.readthedocs.org/en/latest/api/runtime.html but also, in particular, ``site-packages/alembic/command.py`` Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work starting_revision: revision to start at (typically ``None`` to ask the database) destination_revision: revision to aim for (typically ``"head"`` to migrate to the latest structure) version_table: table name for Alembic versions as_sql: run in "offline" mode: print the migration SQL, rather than modifying the database. See http://alembic.zzzcomputing.com/en/latest/offline.html
[ "Use", "Alembic", "to", "upgrade", "our", "database", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L149-L208
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
create_database_migration_numbered_style
def create_database_migration_numbered_style( alembic_ini_file: str, alembic_versions_dir: str, message: str, n_sequence_chars: int = 4) -> None: """ Create a new Alembic migration script. Alembic compares the **state of the database** to the **state of the metadata**, and generates a migration that brings the former up to the latter. (It does **not** compare the most recent revision to the current metadata, so make sure your database is up to date with the most recent revision before running this!) You **must check** that the autogenerated revisions are sensible. How does it know where to look for the database? 1. This function changes into the directory of the Alembic ``.ini`` file and calls the external program .. code-block:: bash alembic -c ALEMBIC_INI_FILE revision --autogenerate -m MESSAGE --rev-id REVISION_ID 2. The Alembic ``.ini`` file points (via the ``script_location`` variable) to a directory containing your ``env.py``. Alembic loads this script. 3. That script typically works out the database URL and calls further into the Alembic code. See http://alembic.zzzcomputing.com/en/latest/autogenerate.html. Regarding filenames: the default ``n_sequence_chars`` of 4 is like Django and gives files with names like .. code-block:: none 0001_x.py, 0002_y.py, ... NOTE THAT TO USE A NON-STANDARD ALEMBIC VERSION TABLE, YOU MUST SPECIFY THAT IN YOUR ``env.py`` (see e.g. CamCOPS). Args: alembic_ini_file: filename of Alembic ``alembic.ini`` file alembic_versions_dir: directory in which you keep your Python scripts, one per Alembic revision message: message to be associated with this revision n_sequence_chars: number of numerical sequence characters to use in the filename/revision (see above). """ # noqa _, _, existing_version_filenames = next(os.walk(alembic_versions_dir), (None, None, [])) existing_version_filenames = [ x for x in existing_version_filenames if x != "__init__.py"] log.debug("Existing Alembic version script filenames: {!r}", existing_version_filenames) current_seq_strs = [x[:n_sequence_chars] for x in existing_version_filenames] current_seq_strs.sort() if not current_seq_strs: current_seq_str = None new_seq_no = 1 else: current_seq_str = current_seq_strs[-1] new_seq_no = max(int(x) for x in current_seq_strs) + 1 new_seq_str = str(new_seq_no).zfill(n_sequence_chars) log.info( """ Generating new revision with Alembic... Last revision was: {} New revision will be: {} [If it fails with "Can't locate revision identified by...", you might need to DROP the Alembic version table (by default named 'alembic_version', but you may have elected to change that in your env.py.] """, current_seq_str, new_seq_str ) alembic_ini_dir = os.path.dirname(alembic_ini_file) os.chdir(alembic_ini_dir) cmdargs = ['alembic', '-c', alembic_ini_file, 'revision', '--autogenerate', '-m', message, '--rev-id', new_seq_str] log.info("From directory {!r}, calling: {!r}", alembic_ini_dir, cmdargs) subprocess.call(cmdargs)
python
def create_database_migration_numbered_style( alembic_ini_file: str, alembic_versions_dir: str, message: str, n_sequence_chars: int = 4) -> None: """ Create a new Alembic migration script. Alembic compares the **state of the database** to the **state of the metadata**, and generates a migration that brings the former up to the latter. (It does **not** compare the most recent revision to the current metadata, so make sure your database is up to date with the most recent revision before running this!) You **must check** that the autogenerated revisions are sensible. How does it know where to look for the database? 1. This function changes into the directory of the Alembic ``.ini`` file and calls the external program .. code-block:: bash alembic -c ALEMBIC_INI_FILE revision --autogenerate -m MESSAGE --rev-id REVISION_ID 2. The Alembic ``.ini`` file points (via the ``script_location`` variable) to a directory containing your ``env.py``. Alembic loads this script. 3. That script typically works out the database URL and calls further into the Alembic code. See http://alembic.zzzcomputing.com/en/latest/autogenerate.html. Regarding filenames: the default ``n_sequence_chars`` of 4 is like Django and gives files with names like .. code-block:: none 0001_x.py, 0002_y.py, ... NOTE THAT TO USE A NON-STANDARD ALEMBIC VERSION TABLE, YOU MUST SPECIFY THAT IN YOUR ``env.py`` (see e.g. CamCOPS). Args: alembic_ini_file: filename of Alembic ``alembic.ini`` file alembic_versions_dir: directory in which you keep your Python scripts, one per Alembic revision message: message to be associated with this revision n_sequence_chars: number of numerical sequence characters to use in the filename/revision (see above). """ # noqa _, _, existing_version_filenames = next(os.walk(alembic_versions_dir), (None, None, [])) existing_version_filenames = [ x for x in existing_version_filenames if x != "__init__.py"] log.debug("Existing Alembic version script filenames: {!r}", existing_version_filenames) current_seq_strs = [x[:n_sequence_chars] for x in existing_version_filenames] current_seq_strs.sort() if not current_seq_strs: current_seq_str = None new_seq_no = 1 else: current_seq_str = current_seq_strs[-1] new_seq_no = max(int(x) for x in current_seq_strs) + 1 new_seq_str = str(new_seq_no).zfill(n_sequence_chars) log.info( """ Generating new revision with Alembic... Last revision was: {} New revision will be: {} [If it fails with "Can't locate revision identified by...", you might need to DROP the Alembic version table (by default named 'alembic_version', but you may have elected to change that in your env.py.] """, current_seq_str, new_seq_str ) alembic_ini_dir = os.path.dirname(alembic_ini_file) os.chdir(alembic_ini_dir) cmdargs = ['alembic', '-c', alembic_ini_file, 'revision', '--autogenerate', '-m', message, '--rev-id', new_seq_str] log.info("From directory {!r}, calling: {!r}", alembic_ini_dir, cmdargs) subprocess.call(cmdargs)
[ "def", "create_database_migration_numbered_style", "(", "alembic_ini_file", ":", "str", ",", "alembic_versions_dir", ":", "str", ",", "message", ":", "str", ",", "n_sequence_chars", ":", "int", "=", "4", ")", "->", "None", ":", "# noqa", "_", ",", "_", ",", "existing_version_filenames", "=", "next", "(", "os", ".", "walk", "(", "alembic_versions_dir", ")", ",", "(", "None", ",", "None", ",", "[", "]", ")", ")", "existing_version_filenames", "=", "[", "x", "for", "x", "in", "existing_version_filenames", "if", "x", "!=", "\"__init__.py\"", "]", "log", ".", "debug", "(", "\"Existing Alembic version script filenames: {!r}\"", ",", "existing_version_filenames", ")", "current_seq_strs", "=", "[", "x", "[", ":", "n_sequence_chars", "]", "for", "x", "in", "existing_version_filenames", "]", "current_seq_strs", ".", "sort", "(", ")", "if", "not", "current_seq_strs", ":", "current_seq_str", "=", "None", "new_seq_no", "=", "1", "else", ":", "current_seq_str", "=", "current_seq_strs", "[", "-", "1", "]", "new_seq_no", "=", "max", "(", "int", "(", "x", ")", "for", "x", "in", "current_seq_strs", ")", "+", "1", "new_seq_str", "=", "str", "(", "new_seq_no", ")", ".", "zfill", "(", "n_sequence_chars", ")", "log", ".", "info", "(", "\"\"\"\nGenerating new revision with Alembic...\n Last revision was: {}\n New revision will be: {}\n [If it fails with \"Can't locate revision identified by...\", you might need\n to DROP the Alembic version table (by default named 'alembic_version', but\n you may have elected to change that in your env.py.]\n \"\"\"", ",", "current_seq_str", ",", "new_seq_str", ")", "alembic_ini_dir", "=", "os", ".", "path", ".", "dirname", "(", "alembic_ini_file", ")", "os", ".", "chdir", "(", "alembic_ini_dir", ")", "cmdargs", "=", "[", "'alembic'", ",", "'-c'", ",", "alembic_ini_file", ",", "'revision'", ",", "'--autogenerate'", ",", "'-m'", ",", "message", ",", "'--rev-id'", ",", "new_seq_str", "]", "log", ".", "info", "(", "\"From directory {!r}, calling: {!r}\"", ",", "alembic_ini_dir", ",", "cmdargs", ")", "subprocess", ".", "call", "(", "cmdargs", ")" ]
Create a new Alembic migration script. Alembic compares the **state of the database** to the **state of the metadata**, and generates a migration that brings the former up to the latter. (It does **not** compare the most recent revision to the current metadata, so make sure your database is up to date with the most recent revision before running this!) You **must check** that the autogenerated revisions are sensible. How does it know where to look for the database? 1. This function changes into the directory of the Alembic ``.ini`` file and calls the external program .. code-block:: bash alembic -c ALEMBIC_INI_FILE revision --autogenerate -m MESSAGE --rev-id REVISION_ID 2. The Alembic ``.ini`` file points (via the ``script_location`` variable) to a directory containing your ``env.py``. Alembic loads this script. 3. That script typically works out the database URL and calls further into the Alembic code. See http://alembic.zzzcomputing.com/en/latest/autogenerate.html. Regarding filenames: the default ``n_sequence_chars`` of 4 is like Django and gives files with names like .. code-block:: none 0001_x.py, 0002_y.py, ... NOTE THAT TO USE A NON-STANDARD ALEMBIC VERSION TABLE, YOU MUST SPECIFY THAT IN YOUR ``env.py`` (see e.g. CamCOPS). Args: alembic_ini_file: filename of Alembic ``alembic.ini`` file alembic_versions_dir: directory in which you keep your Python scripts, one per Alembic revision message: message to be associated with this revision n_sequence_chars: number of numerical sequence characters to use in the filename/revision (see above).
[ "Create", "a", "new", "Alembic", "migration", "script", ".", "Alembic", "compares", "the", "**", "state", "of", "the", "database", "**", "to", "the", "**", "state", "of", "the", "metadata", "**", "and", "generates", "a", "migration", "that", "brings", "the", "former", "up", "to", "the", "latter", ".", "(", "It", "does", "**", "not", "**", "compare", "the", "most", "recent", "revision", "to", "the", "current", "metadata", "so", "make", "sure", "your", "database", "is", "up", "to", "date", "with", "the", "most", "recent", "revision", "before", "running", "this!", ")", "You", "**", "must", "check", "**", "that", "the", "autogenerated", "revisions", "are", "sensible", ".", "How", "does", "it", "know", "where", "to", "look", "for", "the", "database?", "1", ".", "This", "function", "changes", "into", "the", "directory", "of", "the", "Alembic", ".", "ini", "file", "and", "calls", "the", "external", "program", "..", "code", "-", "block", "::", "bash", "alembic", "-", "c", "ALEMBIC_INI_FILE", "revision", "--", "autogenerate", "-", "m", "MESSAGE", "--", "rev", "-", "id", "REVISION_ID", "2", ".", "The", "Alembic", ".", "ini", "file", "points", "(", "via", "the", "script_location", "variable", ")", "to", "a", "directory", "containing", "your", "env", ".", "py", ".", "Alembic", "loads", "this", "script", ".", "3", ".", "That", "script", "typically", "works", "out", "the", "database", "URL", "and", "calls", "further", "into", "the", "Alembic", "code", ".", "See", "http", ":", "//", "alembic", ".", "zzzcomputing", ".", "com", "/", "en", "/", "latest", "/", "autogenerate", ".", "html", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L275-L366
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/alembic_func.py
stamp_allowing_unusual_version_table
def stamp_allowing_unusual_version_table( config: Config, revision: str, sql: bool = False, tag: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> None: """ Stamps the Alembic version table with the given revision; don't run any migrations. This function is a clone of ``alembic.command.stamp()``, but allowing ``version_table`` to change. See http://alembic.zzzcomputing.com/en/latest/api/commands.html#alembic.command.stamp """ # noqa script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise CommandError("Range revision not allowed") starting_rev, revision = revision.split(':', 2) # noinspection PyUnusedLocal def do_stamp(rev: str, context): # noinspection PyProtectedMember return script._stamp_revs(revision, rev) with EnvironmentContext(config, script, fn=do_stamp, as_sql=sql, destination_rev=revision, starting_rev=starting_rev, tag=tag, version_table=version_table): script.run_env()
python
def stamp_allowing_unusual_version_table( config: Config, revision: str, sql: bool = False, tag: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> None: """ Stamps the Alembic version table with the given revision; don't run any migrations. This function is a clone of ``alembic.command.stamp()``, but allowing ``version_table`` to change. See http://alembic.zzzcomputing.com/en/latest/api/commands.html#alembic.command.stamp """ # noqa script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise CommandError("Range revision not allowed") starting_rev, revision = revision.split(':', 2) # noinspection PyUnusedLocal def do_stamp(rev: str, context): # noinspection PyProtectedMember return script._stamp_revs(revision, rev) with EnvironmentContext(config, script, fn=do_stamp, as_sql=sql, destination_rev=revision, starting_rev=starting_rev, tag=tag, version_table=version_table): script.run_env()
[ "def", "stamp_allowing_unusual_version_table", "(", "config", ":", "Config", ",", "revision", ":", "str", ",", "sql", ":", "bool", "=", "False", ",", "tag", ":", "str", "=", "None", ",", "version_table", ":", "str", "=", "DEFAULT_ALEMBIC_VERSION_TABLE", ")", "->", "None", ":", "# noqa", "script", "=", "ScriptDirectory", ".", "from_config", "(", "config", ")", "starting_rev", "=", "None", "if", "\":\"", "in", "revision", ":", "if", "not", "sql", ":", "raise", "CommandError", "(", "\"Range revision not allowed\"", ")", "starting_rev", ",", "revision", "=", "revision", ".", "split", "(", "':'", ",", "2", ")", "# noinspection PyUnusedLocal", "def", "do_stamp", "(", "rev", ":", "str", ",", "context", ")", ":", "# noinspection PyProtectedMember", "return", "script", ".", "_stamp_revs", "(", "revision", ",", "rev", ")", "with", "EnvironmentContext", "(", "config", ",", "script", ",", "fn", "=", "do_stamp", ",", "as_sql", "=", "sql", ",", "destination_rev", "=", "revision", ",", "starting_rev", "=", "starting_rev", ",", "tag", "=", "tag", ",", "version_table", "=", "version_table", ")", ":", "script", ".", "run_env", "(", ")" ]
Stamps the Alembic version table with the given revision; don't run any migrations. This function is a clone of ``alembic.command.stamp()``, but allowing ``version_table`` to change. See http://alembic.zzzcomputing.com/en/latest/api/commands.html#alembic.command.stamp
[ "Stamps", "the", "Alembic", "version", "table", "with", "the", "given", "revision", ";", "don", "t", "run", "any", "migrations", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/alembic_func.py#L369-L405
ivanprjcts/sdklib
sdklib/behave/requests.py
set_url_path_with_params
def set_url_path_with_params(context, url_path_str_format): """ Parameters: +------+--------+ | key | value | +======+========+ | key1 | value1 | +------+--------+ | key2 | value2 | +------+--------+ """ safe_add_http_request_context_to_behave_context(context) table_as_json = dict(context.table) url_path = url_path_str_format % table_as_json context.http_request_context.url_path = url_path
python
def set_url_path_with_params(context, url_path_str_format): """ Parameters: +------+--------+ | key | value | +======+========+ | key1 | value1 | +------+--------+ | key2 | value2 | +------+--------+ """ safe_add_http_request_context_to_behave_context(context) table_as_json = dict(context.table) url_path = url_path_str_format % table_as_json context.http_request_context.url_path = url_path
[ "def", "set_url_path_with_params", "(", "context", ",", "url_path_str_format", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "table_as_json", "=", "dict", "(", "context", ".", "table", ")", "url_path", "=", "url_path_str_format", "%", "table_as_json", "context", ".", "http_request_context", ".", "url_path", "=", "url_path" ]
Parameters: +------+--------+ | key | value | +======+========+ | key1 | value1 | +------+--------+ | key2 | value2 | +------+--------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L46-L61
ivanprjcts/sdklib
sdklib/behave/requests.py
set_headers
def set_headers(context): """ Parameters: +--------------+---------------+ | header_name | header_value | +==============+===============+ | header1 | value1 | +--------------+---------------+ | header2 | value2 | +--------------+---------------+ """ safe_add_http_request_context_to_behave_context(context) headers = dict() for row in context.table: headers[row["header_name"]] = row["header_value"] context.http_request_context.headers = headers
python
def set_headers(context): """ Parameters: +--------------+---------------+ | header_name | header_value | +==============+===============+ | header1 | value1 | +--------------+---------------+ | header2 | value2 | +--------------+---------------+ """ safe_add_http_request_context_to_behave_context(context) headers = dict() for row in context.table: headers[row["header_name"]] = row["header_value"] context.http_request_context.headers = headers
[ "def", "set_headers", "(", "context", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "headers", "=", "dict", "(", ")", "for", "row", "in", "context", ".", "table", ":", "headers", "[", "row", "[", "\"header_name\"", "]", "]", "=", "row", "[", "\"header_value\"", "]", "context", ".", "http_request_context", ".", "headers", "=", "headers" ]
Parameters: +--------------+---------------+ | header_name | header_value | +==============+===============+ | header1 | value1 | +--------------+---------------+ | header2 | value2 | +--------------+---------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L77-L93
ivanprjcts/sdklib
sdklib/behave/requests.py
set_form_parameters
def set_form_parameters(context): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) context.http_request_context.body_params = get_parameters(context) context.http_request_context.renderer = FormRenderer()
python
def set_form_parameters(context): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) context.http_request_context.body_params = get_parameters(context) context.http_request_context.renderer = FormRenderer()
[ "def", "set_form_parameters", "(", "context", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "context", ".", "http_request_context", ".", "body_params", "=", "get_parameters", "(", "context", ")", "context", ".", "http_request_context", ".", "renderer", "=", "FormRenderer", "(", ")" ]
Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L131-L145
ivanprjcts/sdklib
sdklib/behave/requests.py
set_body_files
def set_body_files(context): """ Parameters: +-------------+--------------+ | param_name | path_to_file | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) files = dict() for row in context.table: files[row["param_name"]] = row["path_to_file"] context.http_request_context.files = files
python
def set_body_files(context): """ Parameters: +-------------+--------------+ | param_name | path_to_file | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) files = dict() for row in context.table: files[row["param_name"]] = row["path_to_file"] context.http_request_context.files = files
[ "def", "set_body_files", "(", "context", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "files", "=", "dict", "(", ")", "for", "row", "in", "context", ".", "table", ":", "files", "[", "row", "[", "\"param_name\"", "]", "]", "=", "row", "[", "\"path_to_file\"", "]", "context", ".", "http_request_context", ".", "files", "=", "files" ]
Parameters: +-------------+--------------+ | param_name | path_to_file | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L159-L175
ivanprjcts/sdklib
sdklib/behave/requests.py
send_http_request_with_query_parameters
def send_http_request_with_query_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_query_parameters(context) send_http_request(context, method)
python
def send_http_request_with_query_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_query_parameters(context) send_http_request(context, method)
[ "def", "send_http_request_with_query_parameters", "(", "context", ",", "method", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "set_query_parameters", "(", "context", ")", "send_http_request", "(", "context", ",", "method", ")" ]
Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L197-L211
ivanprjcts/sdklib
sdklib/behave/requests.py
send_http_request_with_body_parameters
def send_http_request_with_body_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_body_parameters(context) send_http_request(context, method)
python
def send_http_request_with_body_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_body_parameters(context) send_http_request(context, method)
[ "def", "send_http_request_with_body_parameters", "(", "context", ",", "method", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "set_body_parameters", "(", "context", ")", "send_http_request", "(", "context", ",", "method", ")" ]
Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L215-L229
ivanprjcts/sdklib
sdklib/behave/requests.py
send_http_request_with_form_parameters
def send_http_request_with_form_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_form_parameters(context) send_http_request(context, method)
python
def send_http_request_with_form_parameters(context, method): """ Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+ """ safe_add_http_request_context_to_behave_context(context) set_form_parameters(context) send_http_request(context, method)
[ "def", "send_http_request_with_form_parameters", "(", "context", ",", "method", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "set_form_parameters", "(", "context", ")", "send_http_request", "(", "context", ",", "method", ")" ]
Parameters: +-------------+--------------+ | param_name | param_value | +=============+==============+ | param1 | value1 | +-------------+--------------+ | param2 | value2 | +-------------+--------------+
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L233-L247
ivanprjcts/sdklib
sdklib/behave/requests.py
send_http_request_with_json
def send_http_request_with_json(context, method): """ Parameters: .. code-block:: json { "param1": "value1", "param2": "value2", "param3": { "param31": "value31" } } """ safe_add_http_request_context_to_behave_context(context) context.http_request_context.body_params = json.loads(context.text) context.http_request_context.renderer = JSONRenderer() send_http_request(context, method)
python
def send_http_request_with_json(context, method): """ Parameters: .. code-block:: json { "param1": "value1", "param2": "value2", "param3": { "param31": "value31" } } """ safe_add_http_request_context_to_behave_context(context) context.http_request_context.body_params = json.loads(context.text) context.http_request_context.renderer = JSONRenderer() send_http_request(context, method)
[ "def", "send_http_request_with_json", "(", "context", ",", "method", ")", ":", "safe_add_http_request_context_to_behave_context", "(", "context", ")", "context", ".", "http_request_context", ".", "body_params", "=", "json", ".", "loads", "(", "context", ".", "text", ")", "context", ".", "http_request_context", ".", "renderer", "=", "JSONRenderer", "(", ")", "send_http_request", "(", "context", ",", "method", ")" ]
Parameters: .. code-block:: json { "param1": "value1", "param2": "value2", "param3": { "param31": "value31" } }
[ "Parameters", ":" ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/requests.py#L261-L278
RudolfCardinal/pythonlib
cardinal_pythonlib/process.py
get_external_command_output
def get_external_command_output(command: str) -> bytes: """ Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes`` """ args = shlex.split(command) ret = subprocess.check_output(args) # this needs Python 2.7 or higher return ret
python
def get_external_command_output(command: str) -> bytes: """ Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes`` """ args = shlex.split(command) ret = subprocess.check_output(args) # this needs Python 2.7 or higher return ret
[ "def", "get_external_command_output", "(", "command", ":", "str", ")", "->", "bytes", ":", "args", "=", "shlex", ".", "split", "(", "command", ")", "ret", "=", "subprocess", ".", "check_output", "(", "args", ")", "# this needs Python 2.7 or higher", "return", "ret" ]
Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes``
[ "Takes", "a", "command", "-", "line", "command", "executes", "it", "and", "returns", "its", "stdout", "output", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/process.py#L46-L60
RudolfCardinal/pythonlib
cardinal_pythonlib/process.py
get_pipe_series_output
def get_pipe_series_output(commands: Sequence[str], stdinput: BinaryIO = None) -> bytes: """ Get the output from a piped series of commands. Args: commands: sequence of command strings stdinput: optional ``stdin`` data to feed into the start of the pipe Returns: ``stdout`` from the end of the pipe """ # Python arrays indexes are zero-based, i.e. an array is indexed from # 0 to len(array)-1. # The range/xrange commands, by default, start at 0 and go to one less # than the maximum specified. # print commands processes = [] # type: List[subprocess.Popen] for i in range(len(commands)): if i == 0: # first processes processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=subprocess.PIPE, stdout=subprocess.PIPE ) ) else: # subsequent ones processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=processes[i - 1].stdout, stdout=subprocess.PIPE ) ) return processes[len(processes) - 1].communicate(stdinput)[0]
python
def get_pipe_series_output(commands: Sequence[str], stdinput: BinaryIO = None) -> bytes: """ Get the output from a piped series of commands. Args: commands: sequence of command strings stdinput: optional ``stdin`` data to feed into the start of the pipe Returns: ``stdout`` from the end of the pipe """ # Python arrays indexes are zero-based, i.e. an array is indexed from # 0 to len(array)-1. # The range/xrange commands, by default, start at 0 and go to one less # than the maximum specified. # print commands processes = [] # type: List[subprocess.Popen] for i in range(len(commands)): if i == 0: # first processes processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=subprocess.PIPE, stdout=subprocess.PIPE ) ) else: # subsequent ones processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=processes[i - 1].stdout, stdout=subprocess.PIPE ) ) return processes[len(processes) - 1].communicate(stdinput)[0]
[ "def", "get_pipe_series_output", "(", "commands", ":", "Sequence", "[", "str", "]", ",", "stdinput", ":", "BinaryIO", "=", "None", ")", "->", "bytes", ":", "# Python arrays indexes are zero-based, i.e. an array is indexed from", "# 0 to len(array)-1.", "# The range/xrange commands, by default, start at 0 and go to one less", "# than the maximum specified.", "# print commands", "processes", "=", "[", "]", "# type: List[subprocess.Popen]", "for", "i", "in", "range", "(", "len", "(", "commands", ")", ")", ":", "if", "i", "==", "0", ":", "# first processes", "processes", ".", "append", "(", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "commands", "[", "i", "]", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ")", "else", ":", "# subsequent ones", "processes", ".", "append", "(", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "commands", "[", "i", "]", ")", ",", "stdin", "=", "processes", "[", "i", "-", "1", "]", ".", "stdout", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ")", "return", "processes", "[", "len", "(", "processes", ")", "-", "1", "]", ".", "communicate", "(", "stdinput", ")", "[", "0", "]" ]
Get the output from a piped series of commands. Args: commands: sequence of command strings stdinput: optional ``stdin`` data to feed into the start of the pipe Returns: ``stdout`` from the end of the pipe
[ "Get", "the", "output", "from", "a", "piped", "series", "of", "commands", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/process.py#L63-L100
RudolfCardinal/pythonlib
cardinal_pythonlib/process.py
launch_external_file
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None: """ Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed. """ log.info("Launching external file: {!r}", filename) try: if sys.platform.startswith('linux'): cmdargs = ["xdg-open", filename] # log.debug("... command: {!r}", cmdargs) subprocess.call(cmdargs) else: # log.debug("... with os.startfile()") # noinspection PyUnresolvedReferences os.startfile(filename) except Exception as e: log.critical("Error launching {!r}: error was {}.\n\n{}", filename, str(e), traceback.format_exc()) if raise_if_fails: raise
python
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None: """ Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed. """ log.info("Launching external file: {!r}", filename) try: if sys.platform.startswith('linux'): cmdargs = ["xdg-open", filename] # log.debug("... command: {!r}", cmdargs) subprocess.call(cmdargs) else: # log.debug("... with os.startfile()") # noinspection PyUnresolvedReferences os.startfile(filename) except Exception as e: log.critical("Error launching {!r}: error was {}.\n\n{}", filename, str(e), traceback.format_exc()) if raise_if_fails: raise
[ "def", "launch_external_file", "(", "filename", ":", "str", ",", "raise_if_fails", ":", "bool", "=", "False", ")", "->", "None", ":", "log", ".", "info", "(", "\"Launching external file: {!r}\"", ",", "filename", ")", "try", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", ":", "cmdargs", "=", "[", "\"xdg-open\"", ",", "filename", "]", "# log.debug(\"... command: {!r}\", cmdargs)", "subprocess", ".", "call", "(", "cmdargs", ")", "else", ":", "# log.debug(\"... with os.startfile()\")", "# noinspection PyUnresolvedReferences", "os", ".", "startfile", "(", "filename", ")", "except", "Exception", "as", "e", ":", "log", ".", "critical", "(", "\"Error launching {!r}: error was {}.\\n\\n{}\"", ",", "filename", ",", "str", "(", "e", ")", ",", "traceback", ".", "format_exc", "(", ")", ")", "if", "raise_if_fails", ":", "raise" ]
Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed.
[ "Launches", "a", "file", "using", "the", "operating", "system", "s", "standard", "launcher", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/process.py#L110-L136
RudolfCardinal/pythonlib
cardinal_pythonlib/process.py
kill_proc_tree
def kill_proc_tree(pid: int, including_parent: bool = True, timeout_s: float = 5) \ -> Tuple[Set[psutil.Process], Set[psutil.Process]]: """ Kills a tree of processes, starting with the parent. Slightly modified from https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows. Args: pid: process ID of the parent including_parent: kill the parent too? timeout_s: timeout to wait for processes to close Returns: tuple: ``(gone, still_alive)``, where both are sets of :class:`psutil.Process` objects """ # noqa parent = psutil.Process(pid) to_kill = parent.children(recursive=True) # type: List[psutil.Process] if including_parent: to_kill.append(parent) for proc in to_kill: proc.kill() # SIGKILL gone, still_alive = psutil.wait_procs(to_kill, timeout=timeout_s) return gone, still_alive
python
def kill_proc_tree(pid: int, including_parent: bool = True, timeout_s: float = 5) \ -> Tuple[Set[psutil.Process], Set[psutil.Process]]: """ Kills a tree of processes, starting with the parent. Slightly modified from https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows. Args: pid: process ID of the parent including_parent: kill the parent too? timeout_s: timeout to wait for processes to close Returns: tuple: ``(gone, still_alive)``, where both are sets of :class:`psutil.Process` objects """ # noqa parent = psutil.Process(pid) to_kill = parent.children(recursive=True) # type: List[psutil.Process] if including_parent: to_kill.append(parent) for proc in to_kill: proc.kill() # SIGKILL gone, still_alive = psutil.wait_procs(to_kill, timeout=timeout_s) return gone, still_alive
[ "def", "kill_proc_tree", "(", "pid", ":", "int", ",", "including_parent", ":", "bool", "=", "True", ",", "timeout_s", ":", "float", "=", "5", ")", "->", "Tuple", "[", "Set", "[", "psutil", ".", "Process", "]", ",", "Set", "[", "psutil", ".", "Process", "]", "]", ":", "# noqa", "parent", "=", "psutil", ".", "Process", "(", "pid", ")", "to_kill", "=", "parent", ".", "children", "(", "recursive", "=", "True", ")", "# type: List[psutil.Process]", "if", "including_parent", ":", "to_kill", ".", "append", "(", "parent", ")", "for", "proc", "in", "to_kill", ":", "proc", ".", "kill", "(", ")", "# SIGKILL", "gone", ",", "still_alive", "=", "psutil", ".", "wait_procs", "(", "to_kill", ",", "timeout", "=", "timeout_s", ")", "return", "gone", ",", "still_alive" ]
Kills a tree of processes, starting with the parent. Slightly modified from https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows. Args: pid: process ID of the parent including_parent: kill the parent too? timeout_s: timeout to wait for processes to close Returns: tuple: ``(gone, still_alive)``, where both are sets of :class:`psutil.Process` objects
[ "Kills", "a", "tree", "of", "processes", "starting", "with", "the", "parent", ".", "Slightly", "modified", "from", "https", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "1230669", "/", "subprocess", "-", "deleting", "-", "child", "-", "processes", "-", "in", "-", "windows", ".", "Args", ":", "pid", ":", "process", "ID", "of", "the", "parent", "including_parent", ":", "kill", "the", "parent", "too?", "timeout_s", ":", "timeout", "to", "wait", "for", "processes", "to", "close" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/process.py#L144-L169
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/session.py
make_mysql_url
def make_mysql_url(username: str, password: str, dbname: str, driver: str = "mysqldb", host: str = "localhost", port: int = 3306, charset: str = "utf8") -> str: """ Makes an SQLAlchemy URL for a MySQL database. """ return "mysql+{driver}://{u}:{p}@{host}:{port}/{db}?charset={cs}".format( driver=driver, host=host, port=port, db=dbname, u=username, p=password, cs=charset, )
python
def make_mysql_url(username: str, password: str, dbname: str, driver: str = "mysqldb", host: str = "localhost", port: int = 3306, charset: str = "utf8") -> str: """ Makes an SQLAlchemy URL for a MySQL database. """ return "mysql+{driver}://{u}:{p}@{host}:{port}/{db}?charset={cs}".format( driver=driver, host=host, port=port, db=dbname, u=username, p=password, cs=charset, )
[ "def", "make_mysql_url", "(", "username", ":", "str", ",", "password", ":", "str", ",", "dbname", ":", "str", ",", "driver", ":", "str", "=", "\"mysqldb\"", ",", "host", ":", "str", "=", "\"localhost\"", ",", "port", ":", "int", "=", "3306", ",", "charset", ":", "str", "=", "\"utf8\"", ")", "->", "str", ":", "return", "\"mysql+{driver}://{u}:{p}@{host}:{port}/{db}?charset={cs}\"", ".", "format", "(", "driver", "=", "driver", ",", "host", "=", "host", ",", "port", "=", "port", ",", "db", "=", "dbname", ",", "u", "=", "username", ",", "p", "=", "password", ",", "cs", "=", "charset", ",", ")" ]
Makes an SQLAlchemy URL for a MySQL database.
[ "Makes", "an", "SQLAlchemy", "URL", "for", "a", "MySQL", "database", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/session.py#L52-L66
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/session.py
make_sqlite_url
def make_sqlite_url(filename: str) -> str: """ Makes an SQLAlchemy URL for a SQLite database. """ absfile = os.path.abspath(filename) return "sqlite://{host}/{path}".format(host="", path=absfile)
python
def make_sqlite_url(filename: str) -> str: """ Makes an SQLAlchemy URL for a SQLite database. """ absfile = os.path.abspath(filename) return "sqlite://{host}/{path}".format(host="", path=absfile)
[ "def", "make_sqlite_url", "(", "filename", ":", "str", ")", "->", "str", ":", "absfile", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "return", "\"sqlite://{host}/{path}\"", ".", "format", "(", "host", "=", "\"\"", ",", "path", "=", "absfile", ")" ]
Makes an SQLAlchemy URL for a SQLite database.
[ "Makes", "an", "SQLAlchemy", "URL", "for", "a", "SQLite", "database", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/session.py#L69-L74
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/session.py
get_engine_from_session
def get_engine_from_session(dbsession: Session) -> Engine: """ Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`. """ engine = dbsession.bind assert isinstance(engine, Engine) return engine
python
def get_engine_from_session(dbsession: Session) -> Engine: """ Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`. """ engine = dbsession.bind assert isinstance(engine, Engine) return engine
[ "def", "get_engine_from_session", "(", "dbsession", ":", "Session", ")", "->", "Engine", ":", "engine", "=", "dbsession", ".", "bind", "assert", "isinstance", "(", "engine", ",", "Engine", ")", "return", "engine" ]
Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`.
[ "Gets", "the", "SQLAlchemy", ":", "class", ":", "Engine", "from", "a", "SQLAlchemy", ":", "class", ":", "Session", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/session.py#L84-L90
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/session.py
get_safe_url_from_engine
def get_safe_url_from_engine(engine: Engine) -> str: """ Gets a URL from an :class:`Engine`, obscuring the password. """ raw_url = engine.url # type: str url_obj = make_url(raw_url) # type: URL return repr(url_obj)
python
def get_safe_url_from_engine(engine: Engine) -> str: """ Gets a URL from an :class:`Engine`, obscuring the password. """ raw_url = engine.url # type: str url_obj = make_url(raw_url) # type: URL return repr(url_obj)
[ "def", "get_safe_url_from_engine", "(", "engine", ":", "Engine", ")", "->", "str", ":", "raw_url", "=", "engine", ".", "url", "# type: str", "url_obj", "=", "make_url", "(", "raw_url", ")", "# type: URL", "return", "repr", "(", "url_obj", ")" ]
Gets a URL from an :class:`Engine`, obscuring the password.
[ "Gets", "a", "URL", "from", "an", ":", "class", ":", "Engine", "obscuring", "the", "password", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/session.py#L93-L99
RudolfCardinal/pythonlib
cardinal_pythonlib/sort.py
atoi
def atoi(text: str) -> Union[int, str]: """ Converts strings to integers if they're composed of digits; otherwise returns the strings unchanged. One way of sorting strings with numbers; it will mean that ``"11"`` is more than ``"2"``. """ return int(text) if text.isdigit() else text
python
def atoi(text: str) -> Union[int, str]: """ Converts strings to integers if they're composed of digits; otherwise returns the strings unchanged. One way of sorting strings with numbers; it will mean that ``"11"`` is more than ``"2"``. """ return int(text) if text.isdigit() else text
[ "def", "atoi", "(", "text", ":", "str", ")", "->", "Union", "[", "int", ",", "str", "]", ":", "return", "int", "(", "text", ")", "if", "text", ".", "isdigit", "(", ")", "else", "text" ]
Converts strings to integers if they're composed of digits; otherwise returns the strings unchanged. One way of sorting strings with numbers; it will mean that ``"11"`` is more than ``"2"``.
[ "Converts", "strings", "to", "integers", "if", "they", "re", "composed", "of", "digits", ";", "otherwise", "returns", "the", "strings", "unchanged", ".", "One", "way", "of", "sorting", "strings", "with", "numbers", ";", "it", "will", "mean", "that", "11", "is", "more", "than", "2", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sort.py#L39-L45
RudolfCardinal/pythonlib
cardinal_pythonlib/sort.py
natural_keys
def natural_keys(text: str) -> List[Union[int, str]]: """ Sort key function. Returns text split into string/number parts, for natural sorting; as per http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside Example (as per the source above): .. code-block:: python >>> from cardinal_pythonlib.sort import natural_keys >>> alist=[ ... "something1", ... "something12", ... "something17", ... "something2", ... "something25", ... "something29" ... ] >>> alist.sort(key=natural_keys) >>> alist ['something1', 'something2', 'something12', 'something17', 'something25', 'something29'] """ # noqa return [atoi(c) for c in re.split(r'(\d+)', text)]
python
def natural_keys(text: str) -> List[Union[int, str]]: """ Sort key function. Returns text split into string/number parts, for natural sorting; as per http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside Example (as per the source above): .. code-block:: python >>> from cardinal_pythonlib.sort import natural_keys >>> alist=[ ... "something1", ... "something12", ... "something17", ... "something2", ... "something25", ... "something29" ... ] >>> alist.sort(key=natural_keys) >>> alist ['something1', 'something2', 'something12', 'something17', 'something25', 'something29'] """ # noqa return [atoi(c) for c in re.split(r'(\d+)', text)]
[ "def", "natural_keys", "(", "text", ":", "str", ")", "->", "List", "[", "Union", "[", "int", ",", "str", "]", "]", ":", "# noqa", "return", "[", "atoi", "(", "c", ")", "for", "c", "in", "re", ".", "split", "(", "r'(\\d+)'", ",", "text", ")", "]" ]
Sort key function. Returns text split into string/number parts, for natural sorting; as per http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside Example (as per the source above): .. code-block:: python >>> from cardinal_pythonlib.sort import natural_keys >>> alist=[ ... "something1", ... "something12", ... "something17", ... "something2", ... "something25", ... "something29" ... ] >>> alist.sort(key=natural_keys) >>> alist ['something1', 'something2', 'something12', 'something17', 'something25', 'something29']
[ "Sort", "key", "function", ".", "Returns", "text", "split", "into", "string", "/", "number", "parts", "for", "natural", "sorting", ";", "as", "per", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "5967500", "/", "how", "-", "to", "-", "correctly", "-", "sort", "-", "a", "-", "string", "-", "with", "-", "a", "-", "number", "-", "inside", "Example", "(", "as", "per", "the", "source", "above", ")", ":", "..", "code", "-", "block", "::", "python", ">>>", "from", "cardinal_pythonlib", ".", "sort", "import", "natural_keys", ">>>", "alist", "=", "[", "...", "something1", "...", "something12", "...", "something17", "...", "something2", "...", "something25", "...", "something29", "...", "]", ">>>", "alist", ".", "sort", "(", "key", "=", "natural_keys", ")", ">>>", "alist", "[", "something1", "something2", "something12", "something17", "something25", "something29", "]" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sort.py#L48-L72
ivanprjcts/sdklib
sdklib/util/logger.py
_get_pretty_body
def _get_pretty_body(headers, body): """ Return a pretty printed body using the Content-Type header information. :param headers: Headers for the request/response (dict) :param body: Body to pretty print (string) :return: Body pretty printed (string) """ try: if CONTENT_TYPE_HEADER_NAME in headers: if XMLRenderer.DEFAULT_CONTENT_TYPE == headers[CONTENT_TYPE_HEADER_NAME]: xml_parsed = parseString(body) pretty_xml_as_string = xml_parsed.toprettyxml() return pretty_xml_as_string elif JSONRenderer.DEFAULT_CONTENT_TYPE == headers[CONTENT_TYPE_HEADER_NAME]: decoded_body = body.decode('utf-8') parsed = json.loads(decoded_body) return json.dumps(parsed, sort_keys=True, indent=4) except: pass finally: return body
python
def _get_pretty_body(headers, body): """ Return a pretty printed body using the Content-Type header information. :param headers: Headers for the request/response (dict) :param body: Body to pretty print (string) :return: Body pretty printed (string) """ try: if CONTENT_TYPE_HEADER_NAME in headers: if XMLRenderer.DEFAULT_CONTENT_TYPE == headers[CONTENT_TYPE_HEADER_NAME]: xml_parsed = parseString(body) pretty_xml_as_string = xml_parsed.toprettyxml() return pretty_xml_as_string elif JSONRenderer.DEFAULT_CONTENT_TYPE == headers[CONTENT_TYPE_HEADER_NAME]: decoded_body = body.decode('utf-8') parsed = json.loads(decoded_body) return json.dumps(parsed, sort_keys=True, indent=4) except: pass finally: return body
[ "def", "_get_pretty_body", "(", "headers", ",", "body", ")", ":", "try", ":", "if", "CONTENT_TYPE_HEADER_NAME", "in", "headers", ":", "if", "XMLRenderer", ".", "DEFAULT_CONTENT_TYPE", "==", "headers", "[", "CONTENT_TYPE_HEADER_NAME", "]", ":", "xml_parsed", "=", "parseString", "(", "body", ")", "pretty_xml_as_string", "=", "xml_parsed", ".", "toprettyxml", "(", ")", "return", "pretty_xml_as_string", "elif", "JSONRenderer", ".", "DEFAULT_CONTENT_TYPE", "==", "headers", "[", "CONTENT_TYPE_HEADER_NAME", "]", ":", "decoded_body", "=", "body", ".", "decode", "(", "'utf-8'", ")", "parsed", "=", "json", ".", "loads", "(", "decoded_body", ")", "return", "json", ".", "dumps", "(", "parsed", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", "except", ":", "pass", "finally", ":", "return", "body" ]
Return a pretty printed body using the Content-Type header information. :param headers: Headers for the request/response (dict) :param body: Body to pretty print (string) :return: Body pretty printed (string)
[ "Return", "a", "pretty", "printed", "body", "using", "the", "Content", "-", "Type", "header", "information", "." ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/logger.py#L14-L35
ivanprjcts/sdklib
sdklib/util/logger.py
log_print_request
def log_print_request(method, url, query_params=None, headers=None, body=None): """ Log an HTTP request data in a user-friendly representation. :param method: HTTP method :param url: URL :param query_params: Query parameters in the URL :param headers: Headers (dict) :param body: Body (raw body, string) :return: None """ log_msg = '\n>>>>>>>>>>>>>>>>>>>>> Request >>>>>>>>>>>>>>>>>>> \n' log_msg += '\t> Method: %s\n' % method log_msg += '\t> Url: %s\n' % url if query_params is not None: log_msg += '\t> Query params: {}\n'.format(str(query_params)) if headers is not None: log_msg += '\t> Headers:\n{}\n'.format(json.dumps(dict(headers), sort_keys=True, indent=4)) if body is not None: try: log_msg += '\t> Payload sent:\n{}\n'.format(_get_pretty_body(headers, body)) except: log_msg += "\t> Payload could't be formatted" logger.debug(log_msg)
python
def log_print_request(method, url, query_params=None, headers=None, body=None): """ Log an HTTP request data in a user-friendly representation. :param method: HTTP method :param url: URL :param query_params: Query parameters in the URL :param headers: Headers (dict) :param body: Body (raw body, string) :return: None """ log_msg = '\n>>>>>>>>>>>>>>>>>>>>> Request >>>>>>>>>>>>>>>>>>> \n' log_msg += '\t> Method: %s\n' % method log_msg += '\t> Url: %s\n' % url if query_params is not None: log_msg += '\t> Query params: {}\n'.format(str(query_params)) if headers is not None: log_msg += '\t> Headers:\n{}\n'.format(json.dumps(dict(headers), sort_keys=True, indent=4)) if body is not None: try: log_msg += '\t> Payload sent:\n{}\n'.format(_get_pretty_body(headers, body)) except: log_msg += "\t> Payload could't be formatted" logger.debug(log_msg)
[ "def", "log_print_request", "(", "method", ",", "url", ",", "query_params", "=", "None", ",", "headers", "=", "None", ",", "body", "=", "None", ")", ":", "log_msg", "=", "'\\n>>>>>>>>>>>>>>>>>>>>> Request >>>>>>>>>>>>>>>>>>> \\n'", "log_msg", "+=", "'\\t> Method: %s\\n'", "%", "method", "log_msg", "+=", "'\\t> Url: %s\\n'", "%", "url", "if", "query_params", "is", "not", "None", ":", "log_msg", "+=", "'\\t> Query params: {}\\n'", ".", "format", "(", "str", "(", "query_params", ")", ")", "if", "headers", "is", "not", "None", ":", "log_msg", "+=", "'\\t> Headers:\\n{}\\n'", ".", "format", "(", "json", ".", "dumps", "(", "dict", "(", "headers", ")", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", ")", "if", "body", "is", "not", "None", ":", "try", ":", "log_msg", "+=", "'\\t> Payload sent:\\n{}\\n'", ".", "format", "(", "_get_pretty_body", "(", "headers", ",", "body", ")", ")", "except", ":", "log_msg", "+=", "\"\\t> Payload could't be formatted\"", "logger", ".", "debug", "(", "log_msg", ")" ]
Log an HTTP request data in a user-friendly representation. :param method: HTTP method :param url: URL :param query_params: Query parameters in the URL :param headers: Headers (dict) :param body: Body (raw body, string) :return: None
[ "Log", "an", "HTTP", "request", "data", "in", "a", "user", "-", "friendly", "representation", "." ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/logger.py#L38-L63
ivanprjcts/sdklib
sdklib/util/logger.py
log_print_response
def log_print_response(status_code, response, headers=None): """ Log an HTTP response data in a user-friendly representation. :param status_code: HTTP Status Code :param response: Raw response content (string) :param headers: Headers in the response (dict) :return: None """ log_msg = '\n<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\n' log_msg += '\t< Response code: {}\n'.format(str(status_code)) if headers is not None: log_msg += '\t< Headers:\n{}\n'.format(json.dumps(dict(headers), sort_keys=True, indent=4)) try: log_msg += '\t< Payload received:\n{}'.format(_get_pretty_body(headers, response)) except: log_msg += '\t< Payload received:\n{}'.format(response) logger.debug(log_msg)
python
def log_print_response(status_code, response, headers=None): """ Log an HTTP response data in a user-friendly representation. :param status_code: HTTP Status Code :param response: Raw response content (string) :param headers: Headers in the response (dict) :return: None """ log_msg = '\n<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\n' log_msg += '\t< Response code: {}\n'.format(str(status_code)) if headers is not None: log_msg += '\t< Headers:\n{}\n'.format(json.dumps(dict(headers), sort_keys=True, indent=4)) try: log_msg += '\t< Payload received:\n{}'.format(_get_pretty_body(headers, response)) except: log_msg += '\t< Payload received:\n{}'.format(response) logger.debug(log_msg)
[ "def", "log_print_response", "(", "status_code", ",", "response", ",", "headers", "=", "None", ")", ":", "log_msg", "=", "'\\n<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\\n'", "log_msg", "+=", "'\\t< Response code: {}\\n'", ".", "format", "(", "str", "(", "status_code", ")", ")", "if", "headers", "is", "not", "None", ":", "log_msg", "+=", "'\\t< Headers:\\n{}\\n'", ".", "format", "(", "json", ".", "dumps", "(", "dict", "(", "headers", ")", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", ")", "try", ":", "log_msg", "+=", "'\\t< Payload received:\\n{}'", ".", "format", "(", "_get_pretty_body", "(", "headers", ",", "response", ")", ")", "except", ":", "log_msg", "+=", "'\\t< Payload received:\\n{}'", ".", "format", "(", "response", ")", "logger", ".", "debug", "(", "log_msg", ")" ]
Log an HTTP response data in a user-friendly representation. :param status_code: HTTP Status Code :param response: Raw response content (string) :param headers: Headers in the response (dict) :return: None
[ "Log", "an", "HTTP", "response", "data", "in", "a", "user", "-", "friendly", "representation", "." ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/logger.py#L66-L84
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
args_kwargs_to_initdict
def args_kwargs_to_initdict(args: ArgsList, kwargs: KwargsDict) -> InitDict: """ Converts a set of ``args`` and ``kwargs`` to an ``InitDict``. """ return {ARGS_LABEL: args, KWARGS_LABEL: kwargs}
python
def args_kwargs_to_initdict(args: ArgsList, kwargs: KwargsDict) -> InitDict: """ Converts a set of ``args`` and ``kwargs`` to an ``InitDict``. """ return {ARGS_LABEL: args, KWARGS_LABEL: kwargs}
[ "def", "args_kwargs_to_initdict", "(", "args", ":", "ArgsList", ",", "kwargs", ":", "KwargsDict", ")", "->", "InitDict", ":", "return", "{", "ARGS_LABEL", ":", "args", ",", "KWARGS_LABEL", ":", "kwargs", "}" ]
Converts a set of ``args`` and ``kwargs`` to an ``InitDict``.
[ "Converts", "a", "set", "of", "args", "and", "kwargs", "to", "an", "InitDict", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L122-L127
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
strip_leading_underscores_from_keys
def strip_leading_underscores_from_keys(d: Dict) -> Dict: """ Clones a dictionary, removing leading underscores from key names. Raises ``ValueError`` if this causes an attribute conflict. """ newdict = {} for k, v in d.items(): if k.startswith('_'): k = k[1:] if k in newdict: raise ValueError("Attribute conflict: _{k}, {k}".format(k=k)) newdict[k] = v return newdict
python
def strip_leading_underscores_from_keys(d: Dict) -> Dict: """ Clones a dictionary, removing leading underscores from key names. Raises ``ValueError`` if this causes an attribute conflict. """ newdict = {} for k, v in d.items(): if k.startswith('_'): k = k[1:] if k in newdict: raise ValueError("Attribute conflict: _{k}, {k}".format(k=k)) newdict[k] = v return newdict
[ "def", "strip_leading_underscores_from_keys", "(", "d", ":", "Dict", ")", "->", "Dict", ":", "newdict", "=", "{", "}", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", ":", "k", "=", "k", "[", "1", ":", "]", "if", "k", "in", "newdict", ":", "raise", "ValueError", "(", "\"Attribute conflict: _{k}, {k}\"", ".", "format", "(", "k", "=", "k", ")", ")", "newdict", "[", "k", "]", "=", "v", "return", "newdict" ]
Clones a dictionary, removing leading underscores from key names. Raises ``ValueError`` if this causes an attribute conflict.
[ "Clones", "a", "dictionary", "removing", "leading", "underscores", "from", "key", "names", ".", "Raises", "ValueError", "if", "this", "causes", "an", "attribute", "conflict", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L149-L161
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
verify_initdict
def verify_initdict(initdict: InitDict) -> None: """ Ensures that its parameter is a proper ``InitDict``, or raises ``ValueError``. """ if (not isinstance(initdict, dict) or ARGS_LABEL not in initdict or KWARGS_LABEL not in initdict): raise ValueError("Not an InitDict dictionary")
python
def verify_initdict(initdict: InitDict) -> None: """ Ensures that its parameter is a proper ``InitDict``, or raises ``ValueError``. """ if (not isinstance(initdict, dict) or ARGS_LABEL not in initdict or KWARGS_LABEL not in initdict): raise ValueError("Not an InitDict dictionary")
[ "def", "verify_initdict", "(", "initdict", ":", "InitDict", ")", "->", "None", ":", "if", "(", "not", "isinstance", "(", "initdict", ",", "dict", ")", "or", "ARGS_LABEL", "not", "in", "initdict", "or", "KWARGS_LABEL", "not", "in", "initdict", ")", ":", "raise", "ValueError", "(", "\"Not an InitDict dictionary\"", ")" ]
Ensures that its parameter is a proper ``InitDict``, or raises ``ValueError``.
[ "Ensures", "that", "its", "parameter", "is", "a", "proper", "InitDict", "or", "raises", "ValueError", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L164-L172
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
initdict_to_instance
def initdict_to_instance(d: InitDict, cls: ClassType) -> Any: """ Converse of simple_to_dict(). Given that JSON dictionary, we will end up re-instantiating the class with .. code-block:: python d = {'a': 1, 'b': 2, 'c': 3} new_x = SimpleClass(**d) We'll also support arbitrary creation, by using both ``*args`` and ``**kwargs``. """ args = d.get(ARGS_LABEL, []) kwargs = d.get(KWARGS_LABEL, {}) # noinspection PyArgumentList return cls(*args, **kwargs)
python
def initdict_to_instance(d: InitDict, cls: ClassType) -> Any: """ Converse of simple_to_dict(). Given that JSON dictionary, we will end up re-instantiating the class with .. code-block:: python d = {'a': 1, 'b': 2, 'c': 3} new_x = SimpleClass(**d) We'll also support arbitrary creation, by using both ``*args`` and ``**kwargs``. """ args = d.get(ARGS_LABEL, []) kwargs = d.get(KWARGS_LABEL, {}) # noinspection PyArgumentList return cls(*args, **kwargs)
[ "def", "initdict_to_instance", "(", "d", ":", "InitDict", ",", "cls", ":", "ClassType", ")", "->", "Any", ":", "args", "=", "d", ".", "get", "(", "ARGS_LABEL", ",", "[", "]", ")", "kwargs", "=", "d", ".", "get", "(", "KWARGS_LABEL", ",", "{", "}", ")", "# noinspection PyArgumentList", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Converse of simple_to_dict(). Given that JSON dictionary, we will end up re-instantiating the class with .. code-block:: python d = {'a': 1, 'b': 2, 'c': 3} new_x = SimpleClass(**d) We'll also support arbitrary creation, by using both ``*args`` and ``**kwargs``.
[ "Converse", "of", "simple_to_dict", "()", ".", "Given", "that", "JSON", "dictionary", "we", "will", "end", "up", "re", "-", "instantiating", "the", "class", "with" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L179-L195
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
wrap_kwargs_to_initdict
def wrap_kwargs_to_initdict(init_kwargs_fn: InitKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_kwargs_fn(obj) if check_result: if not isinstance(result, dict): raise ValueError( "Class {} failed to provide a kwargs dict and " "provided instead: {}".format(typename, repr(result))) return kwargs_to_initdict(init_kwargs_fn(obj)) return wrapper
python
def wrap_kwargs_to_initdict(init_kwargs_fn: InitKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_kwargs_fn(obj) if check_result: if not isinstance(result, dict): raise ValueError( "Class {} failed to provide a kwargs dict and " "provided instead: {}".format(typename, repr(result))) return kwargs_to_initdict(init_kwargs_fn(obj)) return wrapper
[ "def", "wrap_kwargs_to_initdict", "(", "init_kwargs_fn", ":", "InitKwargsFnType", ",", "typename", ":", "str", ",", "check_result", ":", "bool", "=", "True", ")", "->", "InstanceToInitDictFnType", ":", "def", "wrapper", "(", "obj", ":", "Instance", ")", "->", "InitDict", ":", "result", "=", "init_kwargs_fn", "(", "obj", ")", "if", "check_result", ":", "if", "not", "isinstance", "(", "result", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Class {} failed to provide a kwargs dict and \"", "\"provided instead: {}\"", ".", "format", "(", "typename", ",", "repr", "(", "result", ")", ")", ")", "return", "kwargs_to_initdict", "(", "init_kwargs_fn", "(", "obj", ")", ")", "return", "wrapper" ]
Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``.
[ "Wraps", "a", "function", "producing", "a", "KwargsDict", "making", "it", "into", "a", "function", "producing", "an", "InitDict", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L270-L287
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
wrap_args_kwargs_to_initdict
def wrap_args_kwargs_to_initdict(init_args_kwargs_fn: InitArgsKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_args_kwargs_fn(obj) if check_result: if (not isinstance(result, tuple) or not len(result) == 2 or not isinstance(result[0], list) or not isinstance(result[1], dict)): raise ValueError( "Class {} failed to provide an (args, kwargs) tuple and " "provided instead: {}".format(typename, repr(result))) return args_kwargs_to_initdict(*result) return wrapper
python
def wrap_args_kwargs_to_initdict(init_args_kwargs_fn: InitArgsKwargsFnType, typename: str, check_result: bool = True) \ -> InstanceToInitDictFnType: """ Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``. """ def wrapper(obj: Instance) -> InitDict: result = init_args_kwargs_fn(obj) if check_result: if (not isinstance(result, tuple) or not len(result) == 2 or not isinstance(result[0], list) or not isinstance(result[1], dict)): raise ValueError( "Class {} failed to provide an (args, kwargs) tuple and " "provided instead: {}".format(typename, repr(result))) return args_kwargs_to_initdict(*result) return wrapper
[ "def", "wrap_args_kwargs_to_initdict", "(", "init_args_kwargs_fn", ":", "InitArgsKwargsFnType", ",", "typename", ":", "str", ",", "check_result", ":", "bool", "=", "True", ")", "->", "InstanceToInitDictFnType", ":", "def", "wrapper", "(", "obj", ":", "Instance", ")", "->", "InitDict", ":", "result", "=", "init_args_kwargs_fn", "(", "obj", ")", "if", "check_result", ":", "if", "(", "not", "isinstance", "(", "result", ",", "tuple", ")", "or", "not", "len", "(", "result", ")", "==", "2", "or", "not", "isinstance", "(", "result", "[", "0", "]", ",", "list", ")", "or", "not", "isinstance", "(", "result", "[", "1", "]", ",", "dict", ")", ")", ":", "raise", "ValueError", "(", "\"Class {} failed to provide an (args, kwargs) tuple and \"", "\"provided instead: {}\"", ".", "format", "(", "typename", ",", "repr", "(", "result", ")", ")", ")", "return", "args_kwargs_to_initdict", "(", "*", "result", ")", "return", "wrapper" ]
Wraps a function producing a ``KwargsDict``, making it into a function producing an ``InitDict``.
[ "Wraps", "a", "function", "producing", "a", "KwargsDict", "making", "it", "into", "a", "function", "producing", "an", "InitDict", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L290-L310
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
make_instance_to_initdict
def make_instance_to_initdict(attributes: List[str]) -> InstanceToDictFnType: """ Returns a function that takes an object (instance) and produces an ``InitDict`` enabling its re-creation. """ def custom_instance_to_initdict(x: Instance) -> InitDict: kwargs = {} for a in attributes: kwargs[a] = getattr(x, a) return kwargs_to_initdict(kwargs) return custom_instance_to_initdict
python
def make_instance_to_initdict(attributes: List[str]) -> InstanceToDictFnType: """ Returns a function that takes an object (instance) and produces an ``InitDict`` enabling its re-creation. """ def custom_instance_to_initdict(x: Instance) -> InitDict: kwargs = {} for a in attributes: kwargs[a] = getattr(x, a) return kwargs_to_initdict(kwargs) return custom_instance_to_initdict
[ "def", "make_instance_to_initdict", "(", "attributes", ":", "List", "[", "str", "]", ")", "->", "InstanceToDictFnType", ":", "def", "custom_instance_to_initdict", "(", "x", ":", "Instance", ")", "->", "InitDict", ":", "kwargs", "=", "{", "}", "for", "a", "in", "attributes", ":", "kwargs", "[", "a", "]", "=", "getattr", "(", "x", ",", "a", ")", "return", "kwargs_to_initdict", "(", "kwargs", ")", "return", "custom_instance_to_initdict" ]
Returns a function that takes an object (instance) and produces an ``InitDict`` enabling its re-creation.
[ "Returns", "a", "function", "that", "takes", "an", "object", "(", "instance", ")", "and", "produces", "an", "InitDict", "enabling", "its", "re", "-", "creation", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L317-L328
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
register_class_for_json
def register_class_for_json( cls: ClassType, method: str = METHOD_SIMPLE, obj_to_dict_fn: InstanceToDictFnType = None, dict_to_obj_fn: DictToInstanceFnType = initdict_to_instance, default_factory: DefaultFactoryFnType = None) -> None: """ Registers the class cls for JSON serialization. - If both ``obj_to_dict_fn`` and dict_to_obj_fn are registered, the framework uses these to convert instances of the class to/from Python dictionaries, which are in turn serialized to JSON. - Otherwise: .. code-block:: python if method == 'simple': # ... uses simple_to_dict and simple_from_dict (q.v.) if method == 'strip_underscore': # ... uses strip_underscore_to_dict and simple_from_dict (q.v.) """ typename = cls.__qualname__ # preferable to __name__ # ... __name__ looks like "Thing" and is ambiguous # ... __qualname__ looks like "my.module.Thing" and is not if obj_to_dict_fn and dict_to_obj_fn: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=obj_to_dict_fn, dict_to_obj_fn=dict_to_obj_fn, cls=cls, default_factory=default_factory) elif method == METHOD_SIMPLE: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=instance_to_initdict_simple, dict_to_obj_fn=initdict_to_instance, cls=cls, default_factory=default_factory) elif method == METHOD_STRIP_UNDERSCORE: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=instance_to_initdict_stripping_underscores, dict_to_obj_fn=initdict_to_instance, cls=cls, default_factory=default_factory) else: raise ValueError("Unknown method, and functions not fully specified") global TYPE_MAP TYPE_MAP[typename] = descriptor
python
def register_class_for_json( cls: ClassType, method: str = METHOD_SIMPLE, obj_to_dict_fn: InstanceToDictFnType = None, dict_to_obj_fn: DictToInstanceFnType = initdict_to_instance, default_factory: DefaultFactoryFnType = None) -> None: """ Registers the class cls for JSON serialization. - If both ``obj_to_dict_fn`` and dict_to_obj_fn are registered, the framework uses these to convert instances of the class to/from Python dictionaries, which are in turn serialized to JSON. - Otherwise: .. code-block:: python if method == 'simple': # ... uses simple_to_dict and simple_from_dict (q.v.) if method == 'strip_underscore': # ... uses strip_underscore_to_dict and simple_from_dict (q.v.) """ typename = cls.__qualname__ # preferable to __name__ # ... __name__ looks like "Thing" and is ambiguous # ... __qualname__ looks like "my.module.Thing" and is not if obj_to_dict_fn and dict_to_obj_fn: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=obj_to_dict_fn, dict_to_obj_fn=dict_to_obj_fn, cls=cls, default_factory=default_factory) elif method == METHOD_SIMPLE: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=instance_to_initdict_simple, dict_to_obj_fn=initdict_to_instance, cls=cls, default_factory=default_factory) elif method == METHOD_STRIP_UNDERSCORE: descriptor = JsonDescriptor( typename=typename, obj_to_dict_fn=instance_to_initdict_stripping_underscores, dict_to_obj_fn=initdict_to_instance, cls=cls, default_factory=default_factory) else: raise ValueError("Unknown method, and functions not fully specified") global TYPE_MAP TYPE_MAP[typename] = descriptor
[ "def", "register_class_for_json", "(", "cls", ":", "ClassType", ",", "method", ":", "str", "=", "METHOD_SIMPLE", ",", "obj_to_dict_fn", ":", "InstanceToDictFnType", "=", "None", ",", "dict_to_obj_fn", ":", "DictToInstanceFnType", "=", "initdict_to_instance", ",", "default_factory", ":", "DefaultFactoryFnType", "=", "None", ")", "->", "None", ":", "typename", "=", "cls", ".", "__qualname__", "# preferable to __name__", "# ... __name__ looks like \"Thing\" and is ambiguous", "# ... __qualname__ looks like \"my.module.Thing\" and is not", "if", "obj_to_dict_fn", "and", "dict_to_obj_fn", ":", "descriptor", "=", "JsonDescriptor", "(", "typename", "=", "typename", ",", "obj_to_dict_fn", "=", "obj_to_dict_fn", ",", "dict_to_obj_fn", "=", "dict_to_obj_fn", ",", "cls", "=", "cls", ",", "default_factory", "=", "default_factory", ")", "elif", "method", "==", "METHOD_SIMPLE", ":", "descriptor", "=", "JsonDescriptor", "(", "typename", "=", "typename", ",", "obj_to_dict_fn", "=", "instance_to_initdict_simple", ",", "dict_to_obj_fn", "=", "initdict_to_instance", ",", "cls", "=", "cls", ",", "default_factory", "=", "default_factory", ")", "elif", "method", "==", "METHOD_STRIP_UNDERSCORE", ":", "descriptor", "=", "JsonDescriptor", "(", "typename", "=", "typename", ",", "obj_to_dict_fn", "=", "instance_to_initdict_stripping_underscores", ",", "dict_to_obj_fn", "=", "initdict_to_instance", ",", "cls", "=", "cls", ",", "default_factory", "=", "default_factory", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown method, and functions not fully specified\"", ")", "global", "TYPE_MAP", "TYPE_MAP", "[", "typename", "]", "=", "descriptor" ]
Registers the class cls for JSON serialization. - If both ``obj_to_dict_fn`` and dict_to_obj_fn are registered, the framework uses these to convert instances of the class to/from Python dictionaries, which are in turn serialized to JSON. - Otherwise: .. code-block:: python if method == 'simple': # ... uses simple_to_dict and simple_from_dict (q.v.) if method == 'strip_underscore': # ... uses strip_underscore_to_dict and simple_from_dict (q.v.)
[ "Registers", "the", "class", "cls", "for", "JSON", "serialization", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L395-L445
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
register_for_json
def register_for_json(*args, **kwargs) -> Any: """ Class decorator to register classes with our JSON system. - If method is ``'provides_init_args_kwargs'``, the class provides a function .. code-block:: python def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]] that returns an ``(args, kwargs)`` tuple, suitable for passing to its ``__init__()`` function as ``__init__(*args, **kwargs)``. - If method is ``'provides_init_kwargs'``, the class provides a function .. code-block:: python def init_kwargs(self) -> Dict that returns a dictionary ``kwargs`` suitable for passing to its ``__init__()`` function as ``__init__(**kwargs)``. - Otherwise, the method argument is as for ``register_class_for_json()``. Usage looks like: .. code-block:: python @register_for_json(method=METHOD_STRIP_UNDERSCORE) class TableId(object): def __init__(self, db: str = '', schema: str = '', table: str = '') -> None: self._db = db self._schema = schema self._table = table """ if DEBUG: print("register_for_json: args = {}".format(repr(args))) print("register_for_json: kwargs = {}".format(repr(kwargs))) # http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet # noqa # In brief, # @decorator # x # # means # x = decorator(x) # # so # @decorator(args) # x # # means # x = decorator(args)(x) if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): if DEBUG: print("... called as @register_for_json") # called as @decorator # ... the single argument is the class itself, e.g. Thing in: # @decorator # class Thing(object): # # ... # ... e.g.: # args = (<class '__main__.unit_tests.<locals>.SimpleThing'>,) # kwargs = {} cls = args[0] # type: ClassType register_class_for_json(cls, method=METHOD_SIMPLE) return cls # Otherwise: if DEBUG: print("... called as @register_for_json(*args, **kwargs)") # called as @decorator(*args, **kwargs) # ... e.g.: # args = () # kwargs = {'method': 'provides_to_init_args_kwargs_dict'} method = kwargs.pop('method', METHOD_SIMPLE) # type: str obj_to_dict_fn = kwargs.pop('obj_to_dict_fn', None) # type: InstanceToDictFnType # noqa dict_to_obj_fn = kwargs.pop('dict_to_obj_fn', initdict_to_instance) # type: DictToInstanceFnType # noqa default_factory = kwargs.pop('default_factory', None) # type: DefaultFactoryFnType # noqa check_result = kwargs.pop('check_results', True) # type: bool def register_json_class(cls_: ClassType) -> ClassType: odf = obj_to_dict_fn dof = dict_to_obj_fn if method == METHOD_PROVIDES_INIT_ARGS_KWARGS: if hasattr(cls_, INIT_ARGS_KWARGS_FN_NAME): odf = wrap_args_kwargs_to_initdict( getattr(cls_, INIT_ARGS_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result ) else: raise ValueError( "Class type {} does not provide function {}".format( cls_, INIT_ARGS_KWARGS_FN_NAME)) elif method == METHOD_PROVIDES_INIT_KWARGS: if hasattr(cls_, INIT_KWARGS_FN_NAME): odf = wrap_kwargs_to_initdict( getattr(cls_, INIT_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result ) else: raise ValueError( "Class type {} does not provide function {}".format( cls_, INIT_KWARGS_FN_NAME)) elif method == METHOD_NO_ARGS: odf = obj_with_no_args_to_init_dict register_class_for_json(cls_, method=method, obj_to_dict_fn=odf, dict_to_obj_fn=dof, default_factory=default_factory) return cls_ return register_json_class
python
def register_for_json(*args, **kwargs) -> Any: """ Class decorator to register classes with our JSON system. - If method is ``'provides_init_args_kwargs'``, the class provides a function .. code-block:: python def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]] that returns an ``(args, kwargs)`` tuple, suitable for passing to its ``__init__()`` function as ``__init__(*args, **kwargs)``. - If method is ``'provides_init_kwargs'``, the class provides a function .. code-block:: python def init_kwargs(self) -> Dict that returns a dictionary ``kwargs`` suitable for passing to its ``__init__()`` function as ``__init__(**kwargs)``. - Otherwise, the method argument is as for ``register_class_for_json()``. Usage looks like: .. code-block:: python @register_for_json(method=METHOD_STRIP_UNDERSCORE) class TableId(object): def __init__(self, db: str = '', schema: str = '', table: str = '') -> None: self._db = db self._schema = schema self._table = table """ if DEBUG: print("register_for_json: args = {}".format(repr(args))) print("register_for_json: kwargs = {}".format(repr(kwargs))) # http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet # noqa # In brief, # @decorator # x # # means # x = decorator(x) # # so # @decorator(args) # x # # means # x = decorator(args)(x) if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): if DEBUG: print("... called as @register_for_json") # called as @decorator # ... the single argument is the class itself, e.g. Thing in: # @decorator # class Thing(object): # # ... # ... e.g.: # args = (<class '__main__.unit_tests.<locals>.SimpleThing'>,) # kwargs = {} cls = args[0] # type: ClassType register_class_for_json(cls, method=METHOD_SIMPLE) return cls # Otherwise: if DEBUG: print("... called as @register_for_json(*args, **kwargs)") # called as @decorator(*args, **kwargs) # ... e.g.: # args = () # kwargs = {'method': 'provides_to_init_args_kwargs_dict'} method = kwargs.pop('method', METHOD_SIMPLE) # type: str obj_to_dict_fn = kwargs.pop('obj_to_dict_fn', None) # type: InstanceToDictFnType # noqa dict_to_obj_fn = kwargs.pop('dict_to_obj_fn', initdict_to_instance) # type: DictToInstanceFnType # noqa default_factory = kwargs.pop('default_factory', None) # type: DefaultFactoryFnType # noqa check_result = kwargs.pop('check_results', True) # type: bool def register_json_class(cls_: ClassType) -> ClassType: odf = obj_to_dict_fn dof = dict_to_obj_fn if method == METHOD_PROVIDES_INIT_ARGS_KWARGS: if hasattr(cls_, INIT_ARGS_KWARGS_FN_NAME): odf = wrap_args_kwargs_to_initdict( getattr(cls_, INIT_ARGS_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result ) else: raise ValueError( "Class type {} does not provide function {}".format( cls_, INIT_ARGS_KWARGS_FN_NAME)) elif method == METHOD_PROVIDES_INIT_KWARGS: if hasattr(cls_, INIT_KWARGS_FN_NAME): odf = wrap_kwargs_to_initdict( getattr(cls_, INIT_KWARGS_FN_NAME), typename=cls_.__qualname__, check_result=check_result ) else: raise ValueError( "Class type {} does not provide function {}".format( cls_, INIT_KWARGS_FN_NAME)) elif method == METHOD_NO_ARGS: odf = obj_with_no_args_to_init_dict register_class_for_json(cls_, method=method, obj_to_dict_fn=odf, dict_to_obj_fn=dof, default_factory=default_factory) return cls_ return register_json_class
[ "def", "register_for_json", "(", "*", "args", ",", "*", "*", "kwargs", ")", "->", "Any", ":", "if", "DEBUG", ":", "print", "(", "\"register_for_json: args = {}\"", ".", "format", "(", "repr", "(", "args", ")", ")", ")", "print", "(", "\"register_for_json: kwargs = {}\"", ".", "format", "(", "repr", "(", "kwargs", ")", ")", ")", "# http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet # noqa", "# In brief,", "# @decorator", "# x", "#", "# means", "# x = decorator(x)", "#", "# so", "# @decorator(args)", "# x", "#", "# means", "# x = decorator(args)(x)", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "if", "DEBUG", ":", "print", "(", "\"... called as @register_for_json\"", ")", "# called as @decorator", "# ... the single argument is the class itself, e.g. Thing in:", "# @decorator", "# class Thing(object):", "# # ...", "# ... e.g.:", "# args = (<class '__main__.unit_tests.<locals>.SimpleThing'>,)", "# kwargs = {}", "cls", "=", "args", "[", "0", "]", "# type: ClassType", "register_class_for_json", "(", "cls", ",", "method", "=", "METHOD_SIMPLE", ")", "return", "cls", "# Otherwise:", "if", "DEBUG", ":", "print", "(", "\"... called as @register_for_json(*args, **kwargs)\"", ")", "# called as @decorator(*args, **kwargs)", "# ... e.g.:", "# args = ()", "# kwargs = {'method': 'provides_to_init_args_kwargs_dict'}", "method", "=", "kwargs", ".", "pop", "(", "'method'", ",", "METHOD_SIMPLE", ")", "# type: str", "obj_to_dict_fn", "=", "kwargs", ".", "pop", "(", "'obj_to_dict_fn'", ",", "None", ")", "# type: InstanceToDictFnType # noqa", "dict_to_obj_fn", "=", "kwargs", ".", "pop", "(", "'dict_to_obj_fn'", ",", "initdict_to_instance", ")", "# type: DictToInstanceFnType # noqa", "default_factory", "=", "kwargs", ".", "pop", "(", "'default_factory'", ",", "None", ")", "# type: DefaultFactoryFnType # noqa", "check_result", "=", "kwargs", ".", "pop", "(", "'check_results'", ",", "True", ")", "# type: bool", "def", "register_json_class", "(", "cls_", ":", "ClassType", ")", "->", "ClassType", ":", "odf", "=", "obj_to_dict_fn", "dof", "=", "dict_to_obj_fn", "if", "method", "==", "METHOD_PROVIDES_INIT_ARGS_KWARGS", ":", "if", "hasattr", "(", "cls_", ",", "INIT_ARGS_KWARGS_FN_NAME", ")", ":", "odf", "=", "wrap_args_kwargs_to_initdict", "(", "getattr", "(", "cls_", ",", "INIT_ARGS_KWARGS_FN_NAME", ")", ",", "typename", "=", "cls_", ".", "__qualname__", ",", "check_result", "=", "check_result", ")", "else", ":", "raise", "ValueError", "(", "\"Class type {} does not provide function {}\"", ".", "format", "(", "cls_", ",", "INIT_ARGS_KWARGS_FN_NAME", ")", ")", "elif", "method", "==", "METHOD_PROVIDES_INIT_KWARGS", ":", "if", "hasattr", "(", "cls_", ",", "INIT_KWARGS_FN_NAME", ")", ":", "odf", "=", "wrap_kwargs_to_initdict", "(", "getattr", "(", "cls_", ",", "INIT_KWARGS_FN_NAME", ")", ",", "typename", "=", "cls_", ".", "__qualname__", ",", "check_result", "=", "check_result", ")", "else", ":", "raise", "ValueError", "(", "\"Class type {} does not provide function {}\"", ".", "format", "(", "cls_", ",", "INIT_KWARGS_FN_NAME", ")", ")", "elif", "method", "==", "METHOD_NO_ARGS", ":", "odf", "=", "obj_with_no_args_to_init_dict", "register_class_for_json", "(", "cls_", ",", "method", "=", "method", ",", "obj_to_dict_fn", "=", "odf", ",", "dict_to_obj_fn", "=", "dof", ",", "default_factory", "=", "default_factory", ")", "return", "cls_", "return", "register_json_class" ]
Class decorator to register classes with our JSON system. - If method is ``'provides_init_args_kwargs'``, the class provides a function .. code-block:: python def init_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]] that returns an ``(args, kwargs)`` tuple, suitable for passing to its ``__init__()`` function as ``__init__(*args, **kwargs)``. - If method is ``'provides_init_kwargs'``, the class provides a function .. code-block:: python def init_kwargs(self) -> Dict that returns a dictionary ``kwargs`` suitable for passing to its ``__init__()`` function as ``__init__(**kwargs)``. - Otherwise, the method argument is as for ``register_class_for_json()``. Usage looks like: .. code-block:: python @register_for_json(method=METHOD_STRIP_UNDERSCORE) class TableId(object): def __init__(self, db: str = '', schema: str = '', table: str = '') -> None: self._db = db self._schema = schema self._table = table
[ "Class", "decorator", "to", "register", "classes", "with", "our", "JSON", "system", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L448-L567
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
dump_map
def dump_map(file: TextIO = sys.stdout) -> None: """ Prints the JSON "registered types" map to the specified file. """ pp = pprint.PrettyPrinter(indent=4, stream=file) print("Type map: ", file=file) pp.pprint(TYPE_MAP)
python
def dump_map(file: TextIO = sys.stdout) -> None: """ Prints the JSON "registered types" map to the specified file. """ pp = pprint.PrettyPrinter(indent=4, stream=file) print("Type map: ", file=file) pp.pprint(TYPE_MAP)
[ "def", "dump_map", "(", "file", ":", "TextIO", "=", "sys", ".", "stdout", ")", "->", "None", ":", "pp", "=", "pprint", ".", "PrettyPrinter", "(", "indent", "=", "4", ",", "stream", "=", "file", ")", "print", "(", "\"Type map: \"", ",", "file", "=", "file", ")", "pp", ".", "pprint", "(", "TYPE_MAP", ")" ]
Prints the JSON "registered types" map to the specified file.
[ "Prints", "the", "JSON", "registered", "types", "map", "to", "the", "specified", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L570-L576
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
json_class_decoder_hook
def json_class_decoder_hook(d: Dict) -> Any: """ Provides a JSON decoder that converts dictionaries to Python objects if suitable methods are found in our ``TYPE_MAP``. """ if TYPE_LABEL in d: typename = d.get(TYPE_LABEL) if typename in TYPE_MAP: if DEBUG: log.debug("Deserializing: {!r}", d) d.pop(TYPE_LABEL) descriptor = TYPE_MAP[typename] obj = descriptor.to_obj(d) if DEBUG: log.debug("... to: {!r}", obj) return obj return d
python
def json_class_decoder_hook(d: Dict) -> Any: """ Provides a JSON decoder that converts dictionaries to Python objects if suitable methods are found in our ``TYPE_MAP``. """ if TYPE_LABEL in d: typename = d.get(TYPE_LABEL) if typename in TYPE_MAP: if DEBUG: log.debug("Deserializing: {!r}", d) d.pop(TYPE_LABEL) descriptor = TYPE_MAP[typename] obj = descriptor.to_obj(d) if DEBUG: log.debug("... to: {!r}", obj) return obj return d
[ "def", "json_class_decoder_hook", "(", "d", ":", "Dict", ")", "->", "Any", ":", "if", "TYPE_LABEL", "in", "d", ":", "typename", "=", "d", ".", "get", "(", "TYPE_LABEL", ")", "if", "typename", "in", "TYPE_MAP", ":", "if", "DEBUG", ":", "log", ".", "debug", "(", "\"Deserializing: {!r}\"", ",", "d", ")", "d", ".", "pop", "(", "TYPE_LABEL", ")", "descriptor", "=", "TYPE_MAP", "[", "typename", "]", "obj", "=", "descriptor", ".", "to_obj", "(", "d", ")", "if", "DEBUG", ":", "log", ".", "debug", "(", "\"... to: {!r}\"", ",", "obj", ")", "return", "obj", "return", "d" ]
Provides a JSON decoder that converts dictionaries to Python objects if suitable methods are found in our ``TYPE_MAP``.
[ "Provides", "a", "JSON", "decoder", "that", "converts", "dictionaries", "to", "Python", "objects", "if", "suitable", "methods", "are", "found", "in", "our", "TYPE_MAP", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L603-L619
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
json_encode
def json_encode(obj: Instance, **kwargs) -> str: """ Encodes an object to JSON using our custom encoder. The ``**kwargs`` can be used to pass things like ``'indent'``, for formatting. """ return json.dumps(obj, cls=JsonClassEncoder, **kwargs)
python
def json_encode(obj: Instance, **kwargs) -> str: """ Encodes an object to JSON using our custom encoder. The ``**kwargs`` can be used to pass things like ``'indent'``, for formatting. """ return json.dumps(obj, cls=JsonClassEncoder, **kwargs)
[ "def", "json_encode", "(", "obj", ":", "Instance", ",", "*", "*", "kwargs", ")", "->", "str", ":", "return", "json", ".", "dumps", "(", "obj", ",", "cls", "=", "JsonClassEncoder", ",", "*", "*", "kwargs", ")" ]
Encodes an object to JSON using our custom encoder. The ``**kwargs`` can be used to pass things like ``'indent'``, for formatting.
[ "Encodes", "an", "object", "to", "JSON", "using", "our", "custom", "encoder", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L626-L633
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
json_decode
def json_decode(s: str) -> Any: """ Decodes an object from JSON using our custom decoder. """ try: return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s) except json.JSONDecodeError: log.warning("Failed to decode JSON (returning None): {!r}", s) return None
python
def json_decode(s: str) -> Any: """ Decodes an object from JSON using our custom decoder. """ try: return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s) except json.JSONDecodeError: log.warning("Failed to decode JSON (returning None): {!r}", s) return None
[ "def", "json_decode", "(", "s", ":", "str", ")", "->", "Any", ":", "try", ":", "return", "json", ".", "JSONDecoder", "(", "object_hook", "=", "json_class_decoder_hook", ")", ".", "decode", "(", "s", ")", "except", "json", ".", "JSONDecodeError", ":", "log", ".", "warning", "(", "\"Failed to decode JSON (returning None): {!r}\"", ",", "s", ")", "return", "None" ]
Decodes an object from JSON using our custom decoder.
[ "Decodes", "an", "object", "from", "JSON", "using", "our", "custom", "decoder", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L636-L644
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
dict_to_enum_fn
def dict_to_enum_fn(d: Dict[str, Any], enum_class: Type[Enum]) -> Enum: """ Converts an ``dict`` to a ``Enum``. """ return enum_class[d['name']]
python
def dict_to_enum_fn(d: Dict[str, Any], enum_class: Type[Enum]) -> Enum: """ Converts an ``dict`` to a ``Enum``. """ return enum_class[d['name']]
[ "def", "dict_to_enum_fn", "(", "d", ":", "Dict", "[", "str", ",", "Any", "]", ",", "enum_class", ":", "Type", "[", "Enum", "]", ")", "->", "Enum", ":", "return", "enum_class", "[", "d", "[", "'name'", "]", "]" ]
Converts an ``dict`` to a ``Enum``.
[ "Converts", "an", "dict", "to", "a", "Enum", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L702-L706
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
register_enum_for_json
def register_enum_for_json(*args, **kwargs) -> Any: """ Class decorator to register ``Enum``-derived classes with our JSON system. See comments/help for ``@register_for_json``, above. """ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # called as @register_enum_for_json cls = args[0] # type: ClassType register_class_for_json( cls, obj_to_dict_fn=enum_to_dict_fn, dict_to_obj_fn=dict_to_enum_fn ) return cls else: # called as @register_enum_for_json(*args, **kwargs) raise AssertionError("Use as plain @register_enum_for_json, " "without arguments")
python
def register_enum_for_json(*args, **kwargs) -> Any: """ Class decorator to register ``Enum``-derived classes with our JSON system. See comments/help for ``@register_for_json``, above. """ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # called as @register_enum_for_json cls = args[0] # type: ClassType register_class_for_json( cls, obj_to_dict_fn=enum_to_dict_fn, dict_to_obj_fn=dict_to_enum_fn ) return cls else: # called as @register_enum_for_json(*args, **kwargs) raise AssertionError("Use as plain @register_enum_for_json, " "without arguments")
[ "def", "register_enum_for_json", "(", "*", "args", ",", "*", "*", "kwargs", ")", "->", "Any", ":", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "# called as @register_enum_for_json", "cls", "=", "args", "[", "0", "]", "# type: ClassType", "register_class_for_json", "(", "cls", ",", "obj_to_dict_fn", "=", "enum_to_dict_fn", ",", "dict_to_obj_fn", "=", "dict_to_enum_fn", ")", "return", "cls", "else", ":", "# called as @register_enum_for_json(*args, **kwargs)", "raise", "AssertionError", "(", "\"Use as plain @register_enum_for_json, \"", "\"without arguments\"", ")" ]
Class decorator to register ``Enum``-derived classes with our JSON system. See comments/help for ``@register_for_json``, above.
[ "Class", "decorator", "to", "register", "Enum", "-", "derived", "classes", "with", "our", "JSON", "system", ".", "See", "comments", "/", "help", "for" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L709-L726
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
dict_to_pendulum
def dict_to_pendulum(d: Dict[str, Any], pendulum_class: ClassType) -> DateTime: """ Converts a ``dict`` object back to a ``Pendulum``. """ return pendulum.parse(d['iso'])
python
def dict_to_pendulum(d: Dict[str, Any], pendulum_class: ClassType) -> DateTime: """ Converts a ``dict`` object back to a ``Pendulum``. """ return pendulum.parse(d['iso'])
[ "def", "dict_to_pendulum", "(", "d", ":", "Dict", "[", "str", ",", "Any", "]", ",", "pendulum_class", ":", "ClassType", ")", "->", "DateTime", ":", "return", "pendulum", ".", "parse", "(", "d", "[", "'iso'", "]", ")" ]
Converts a ``dict`` object back to a ``Pendulum``.
[ "Converts", "a", "dict", "object", "back", "to", "a", "Pendulum", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L743-L748
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
dict_to_pendulumdate
def dict_to_pendulumdate(d: Dict[str, Any], pendulumdate_class: ClassType) -> Date: """ Converts a ``dict`` object back to a ``pendulum.Date``. """ # noinspection PyTypeChecker return pendulum.parse(d['iso']).date()
python
def dict_to_pendulumdate(d: Dict[str, Any], pendulumdate_class: ClassType) -> Date: """ Converts a ``dict`` object back to a ``pendulum.Date``. """ # noinspection PyTypeChecker return pendulum.parse(d['iso']).date()
[ "def", "dict_to_pendulumdate", "(", "d", ":", "Dict", "[", "str", ",", "Any", "]", ",", "pendulumdate_class", ":", "ClassType", ")", "->", "Date", ":", "# noinspection PyTypeChecker", "return", "pendulum", ".", "parse", "(", "d", "[", "'iso'", "]", ")", ".", "date", "(", ")" ]
Converts a ``dict`` object back to a ``pendulum.Date``.
[ "Converts", "a", "dict", "object", "back", "to", "a", "pendulum", ".", "Date", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L772-L778
RudolfCardinal/pythonlib
cardinal_pythonlib/json/serialize.py
simple_eq
def simple_eq(one: Instance, two: Instance, attrs: List[str]) -> bool: """ Test if two objects are equal, based on a comparison of the specified attributes ``attrs``. """ return all(getattr(one, a) == getattr(two, a) for a in attrs)
python
def simple_eq(one: Instance, two: Instance, attrs: List[str]) -> bool: """ Test if two objects are equal, based on a comparison of the specified attributes ``attrs``. """ return all(getattr(one, a) == getattr(two, a) for a in attrs)
[ "def", "simple_eq", "(", "one", ":", "Instance", ",", "two", ":", "Instance", ",", "attrs", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "return", "all", "(", "getattr", "(", "one", ",", "a", ")", "==", "getattr", "(", "two", ",", "a", ")", "for", "a", "in", "attrs", ")" ]
Test if two objects are equal, based on a comparison of the specified attributes ``attrs``.
[ "Test", "if", "two", "objects", "are", "equal", "based", "on", "a", "comparison", "of", "the", "specified", "attributes", "attrs", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/json/serialize.py#L832-L837
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
smart_open
def smart_open(filename: str, mode: str = 'Ur', buffering: int = -1, encoding: str = None, errors: str = None, newline: str = None, closefd: bool = True) -> IO: """ Context manager (for use with ``with``) that opens a filename and provides a :class:`IO` object. If the filename is ``'-'``, however, then ``sys.stdin`` is used for reading and ``sys.stdout`` is used for writing. """ # https://stackoverflow.com/questions/17602878/how-to-handle-both-with-open-and-sys-stdout-nicely # noqa # https://stackoverflow.com/questions/1744989/read-from-file-or-stdin/29824059#29824059 # noqa if filename == '-': if mode is None or mode == '' or 'r' in mode: fh = sys.stdin else: fh = sys.stdout else: fh = open(filename, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, closefd=closefd) try: yield fh finally: if filename is not '-': fh.close()
python
def smart_open(filename: str, mode: str = 'Ur', buffering: int = -1, encoding: str = None, errors: str = None, newline: str = None, closefd: bool = True) -> IO: """ Context manager (for use with ``with``) that opens a filename and provides a :class:`IO` object. If the filename is ``'-'``, however, then ``sys.stdin`` is used for reading and ``sys.stdout`` is used for writing. """ # https://stackoverflow.com/questions/17602878/how-to-handle-both-with-open-and-sys-stdout-nicely # noqa # https://stackoverflow.com/questions/1744989/read-from-file-or-stdin/29824059#29824059 # noqa if filename == '-': if mode is None or mode == '' or 'r' in mode: fh = sys.stdin else: fh = sys.stdout else: fh = open(filename, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, closefd=closefd) try: yield fh finally: if filename is not '-': fh.close()
[ "def", "smart_open", "(", "filename", ":", "str", ",", "mode", ":", "str", "=", "'Ur'", ",", "buffering", ":", "int", "=", "-", "1", ",", "encoding", ":", "str", "=", "None", ",", "errors", ":", "str", "=", "None", ",", "newline", ":", "str", "=", "None", ",", "closefd", ":", "bool", "=", "True", ")", "->", "IO", ":", "# https://stackoverflow.com/questions/17602878/how-to-handle-both-with-open-and-sys-stdout-nicely # noqa", "# https://stackoverflow.com/questions/1744989/read-from-file-or-stdin/29824059#29824059 # noqa", "if", "filename", "==", "'-'", ":", "if", "mode", "is", "None", "or", "mode", "==", "''", "or", "'r'", "in", "mode", ":", "fh", "=", "sys", ".", "stdin", "else", ":", "fh", "=", "sys", ".", "stdout", "else", ":", "fh", "=", "open", "(", "filename", ",", "mode", "=", "mode", ",", "buffering", "=", "buffering", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ",", "closefd", "=", "closefd", ")", "try", ":", "yield", "fh", "finally", ":", "if", "filename", "is", "not", "'-'", ":", "fh", ".", "close", "(", ")" ]
Context manager (for use with ``with``) that opens a filename and provides a :class:`IO` object. If the filename is ``'-'``, however, then ``sys.stdin`` is used for reading and ``sys.stdout`` is used for writing.
[ "Context", "manager", "(", "for", "use", "with", "with", ")", "that", "opens", "a", "filename", "and", "provides", "a", ":", "class", ":", "IO", "object", ".", "If", "the", "filename", "is", "-", "however", "then", "sys", ".", "stdin", "is", "used", "for", "reading", "and", "sys", ".", "stdout", "is", "used", "for", "writing", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L57-L80
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
writelines_nl
def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None: """ Writes lines, plus terminating newline characters, to the file. (Since :func:`fileobj.writelines` doesn't add newlines... http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file) """ # noqa fileobj.write('\n'.join(lines) + '\n')
python
def writelines_nl(fileobj: TextIO, lines: Iterable[str]) -> None: """ Writes lines, plus terminating newline characters, to the file. (Since :func:`fileobj.writelines` doesn't add newlines... http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file) """ # noqa fileobj.write('\n'.join(lines) + '\n')
[ "def", "writelines_nl", "(", "fileobj", ":", "TextIO", ",", "lines", ":", "Iterable", "[", "str", "]", ")", "->", "None", ":", "# noqa", "fileobj", ".", "write", "(", "'\\n'", ".", "join", "(", "lines", ")", "+", "'\\n'", ")" ]
Writes lines, plus terminating newline characters, to the file. (Since :func:`fileobj.writelines` doesn't add newlines... http://stackoverflow.com/questions/13730107/writelines-writes-lines-without-newline-just-fills-the-file)
[ "Writes", "lines", "plus", "terminating", "newline", "characters", "to", "the", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L94-L101
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
write_text
def write_text(filename: str, text: str) -> None: """ Writes text to a file. """ with open(filename, 'w') as f: # type: TextIO print(text, file=f)
python
def write_text(filename: str, text: str) -> None: """ Writes text to a file. """ with open(filename, 'w') as f: # type: TextIO print(text, file=f)
[ "def", "write_text", "(", "filename", ":", "str", ",", "text", ":", "str", ")", "->", "None", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "# type: TextIO", "print", "(", "text", ",", "file", "=", "f", ")" ]
Writes text to a file.
[ "Writes", "text", "to", "a", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L104-L109
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
write_gzipped_text
def write_gzipped_text(basefilename: str, text: str) -> None: """ Writes text to a file compressed with ``gzip`` (a ``.gz`` file). The filename is used directly for the "inner" file and the extension ``.gz`` is appended to the "outer" (zipped) file's name. This function exists primarily because Lintian wants non-timestamped gzip files, or it complains: - https://lintian.debian.org/tags/package-contains-timestamped-gzip.html - See http://stackoverflow.com/questions/25728472/python-gzip-omit-the-original-filename-and-timestamp """ # noqa zipfilename = basefilename + '.gz' compresslevel = 9 mtime = 0 with open(zipfilename, 'wb') as f: with gzip.GzipFile(basefilename, 'wb', compresslevel, f, mtime) as gz: with io.TextIOWrapper(gz) as tw: tw.write(text)
python
def write_gzipped_text(basefilename: str, text: str) -> None: """ Writes text to a file compressed with ``gzip`` (a ``.gz`` file). The filename is used directly for the "inner" file and the extension ``.gz`` is appended to the "outer" (zipped) file's name. This function exists primarily because Lintian wants non-timestamped gzip files, or it complains: - https://lintian.debian.org/tags/package-contains-timestamped-gzip.html - See http://stackoverflow.com/questions/25728472/python-gzip-omit-the-original-filename-and-timestamp """ # noqa zipfilename = basefilename + '.gz' compresslevel = 9 mtime = 0 with open(zipfilename, 'wb') as f: with gzip.GzipFile(basefilename, 'wb', compresslevel, f, mtime) as gz: with io.TextIOWrapper(gz) as tw: tw.write(text)
[ "def", "write_gzipped_text", "(", "basefilename", ":", "str", ",", "text", ":", "str", ")", "->", "None", ":", "# noqa", "zipfilename", "=", "basefilename", "+", "'.gz'", "compresslevel", "=", "9", "mtime", "=", "0", "with", "open", "(", "zipfilename", ",", "'wb'", ")", "as", "f", ":", "with", "gzip", ".", "GzipFile", "(", "basefilename", ",", "'wb'", ",", "compresslevel", ",", "f", ",", "mtime", ")", "as", "gz", ":", "with", "io", ".", "TextIOWrapper", "(", "gz", ")", "as", "tw", ":", "tw", ".", "write", "(", "text", ")" ]
Writes text to a file compressed with ``gzip`` (a ``.gz`` file). The filename is used directly for the "inner" file and the extension ``.gz`` is appended to the "outer" (zipped) file's name. This function exists primarily because Lintian wants non-timestamped gzip files, or it complains: - https://lintian.debian.org/tags/package-contains-timestamped-gzip.html - See http://stackoverflow.com/questions/25728472/python-gzip-omit-the-original-filename-and-timestamp
[ "Writes", "text", "to", "a", "file", "compressed", "with", "gzip", "(", "a", ".", "gz", "file", ")", ".", "The", "filename", "is", "used", "directly", "for", "the", "inner", "file", "and", "the", "extension", ".", "gz", "is", "appended", "to", "the", "outer", "(", "zipped", ")", "file", "s", "name", ".", "This", "function", "exists", "primarily", "because", "Lintian", "wants", "non", "-", "timestamped", "gzip", "files", "or", "it", "complains", ":", "-", "https", ":", "//", "lintian", ".", "debian", ".", "org", "/", "tags", "/", "package", "-", "contains", "-", "timestamped", "-", "gzip", ".", "html", "-", "See", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "25728472", "/", "python", "-", "gzip", "-", "omit", "-", "the", "-", "original", "-", "filename", "-", "and", "-", "timestamp" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L112-L129
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
get_lines_without_comments
def get_lines_without_comments(filename: str) -> List[str]: """ Reads a file, and returns all lines as a list, left- and right-stripping the lines and removing everything on a line after the first ``#``. NOTE: does not cope well with quoted ``#`` symbols! """ lines = [] with open(filename) as f: for line in f: line = line.partition('#')[0] # the part before the first # line = line.rstrip() line = line.lstrip() if line: lines.append(line) return lines
python
def get_lines_without_comments(filename: str) -> List[str]: """ Reads a file, and returns all lines as a list, left- and right-stripping the lines and removing everything on a line after the first ``#``. NOTE: does not cope well with quoted ``#`` symbols! """ lines = [] with open(filename) as f: for line in f: line = line.partition('#')[0] # the part before the first # line = line.rstrip() line = line.lstrip() if line: lines.append(line) return lines
[ "def", "get_lines_without_comments", "(", "filename", ":", "str", ")", "->", "List", "[", "str", "]", ":", "lines", "=", "[", "]", "with", "open", "(", "filename", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "partition", "(", "'#'", ")", "[", "0", "]", "# the part before the first #", "line", "=", "line", ".", "rstrip", "(", ")", "line", "=", "line", ".", "lstrip", "(", ")", "if", "line", ":", "lines", ".", "append", "(", "line", ")", "return", "lines" ]
Reads a file, and returns all lines as a list, left- and right-stripping the lines and removing everything on a line after the first ``#``. NOTE: does not cope well with quoted ``#`` symbols!
[ "Reads", "a", "file", "and", "returns", "all", "lines", "as", "a", "list", "left", "-", "and", "right", "-", "stripping", "the", "lines", "and", "removing", "everything", "on", "a", "line", "after", "the", "first", "#", ".", "NOTE", ":", "does", "not", "cope", "well", "with", "quoted", "#", "symbols!" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L136-L150
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_textfiles_from_filenames
def gen_textfiles_from_filenames( filenames: Iterable[str]) -> Generator[TextIO, None, None]: """ Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object """ for filename in filenames: with open(filename) as f: yield f
python
def gen_textfiles_from_filenames( filenames: Iterable[str]) -> Generator[TextIO, None, None]: """ Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object """ for filename in filenames: with open(filename) as f: yield f
[ "def", "gen_textfiles_from_filenames", "(", "filenames", ":", "Iterable", "[", "str", "]", ")", "->", "Generator", "[", "TextIO", ",", "None", ",", "None", "]", ":", "for", "filename", "in", "filenames", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "yield", "f" ]
Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object
[ "Generates", "file", "-", "like", "objects", "from", "a", "list", "of", "filenames", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L157-L171
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_lines_from_textfiles
def gen_lines_from_textfiles( files: Iterable[TextIO]) -> Generator[str, None, None]: """ Generates lines from file-like objects. Args: files: iterable of :class:`TextIO` objects Yields: each line of all the files """ for file in files: for line in file: yield line
python
def gen_lines_from_textfiles( files: Iterable[TextIO]) -> Generator[str, None, None]: """ Generates lines from file-like objects. Args: files: iterable of :class:`TextIO` objects Yields: each line of all the files """ for file in files: for line in file: yield line
[ "def", "gen_lines_from_textfiles", "(", "files", ":", "Iterable", "[", "TextIO", "]", ")", "->", "Generator", "[", "str", ",", "None", ",", "None", "]", ":", "for", "file", "in", "files", ":", "for", "line", "in", "file", ":", "yield", "line" ]
Generates lines from file-like objects. Args: files: iterable of :class:`TextIO` objects Yields: each line of all the files
[ "Generates", "lines", "from", "file", "-", "like", "objects", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L174-L188
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_lower
def gen_lower(x: Iterable[str]) -> Generator[str, None, None]: """ Args: x: iterable of strings Yields: each string in lower case """ for string in x: yield string.lower()
python
def gen_lower(x: Iterable[str]) -> Generator[str, None, None]: """ Args: x: iterable of strings Yields: each string in lower case """ for string in x: yield string.lower()
[ "def", "gen_lower", "(", "x", ":", "Iterable", "[", "str", "]", ")", "->", "Generator", "[", "str", ",", "None", ",", "None", "]", ":", "for", "string", "in", "x", ":", "yield", "string", ".", "lower", "(", ")" ]
Args: x: iterable of strings Yields: each string in lower case
[ "Args", ":", "x", ":", "iterable", "of", "strings" ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L191-L200
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_lines_from_binary_files
def gen_lines_from_binary_files( files: Iterable[BinaryIO], encoding: str = UTF8) -> Generator[str, None, None]: """ Generates lines from binary files. Strips out newlines. Args: files: iterable of :class:`BinaryIO` file-like objects encoding: encoding to use Yields: each line of all the files """ for file in files: for byteline in file: line = byteline.decode(encoding).strip() yield line
python
def gen_lines_from_binary_files( files: Iterable[BinaryIO], encoding: str = UTF8) -> Generator[str, None, None]: """ Generates lines from binary files. Strips out newlines. Args: files: iterable of :class:`BinaryIO` file-like objects encoding: encoding to use Yields: each line of all the files """ for file in files: for byteline in file: line = byteline.decode(encoding).strip() yield line
[ "def", "gen_lines_from_binary_files", "(", "files", ":", "Iterable", "[", "BinaryIO", "]", ",", "encoding", ":", "str", "=", "UTF8", ")", "->", "Generator", "[", "str", ",", "None", ",", "None", "]", ":", "for", "file", "in", "files", ":", "for", "byteline", "in", "file", ":", "line", "=", "byteline", ".", "decode", "(", "encoding", ")", ".", "strip", "(", ")", "yield", "line" ]
Generates lines from binary files. Strips out newlines. Args: files: iterable of :class:`BinaryIO` file-like objects encoding: encoding to use Yields: each line of all the files
[ "Generates", "lines", "from", "binary", "files", ".", "Strips", "out", "newlines", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L203-L221
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_part_from_line
def gen_part_from_line(lines: Iterable[str], part_index: int, splitter: str = None) -> Generator[str, None, None]: """ Splits lines with ``splitter`` and yields a specified part by index. Args: lines: iterable of strings part_index: index of part to yield splitter: string to split the lines on Yields: the specified part for each line """ for line in lines: parts = line.split(splitter) yield parts[part_index]
python
def gen_part_from_line(lines: Iterable[str], part_index: int, splitter: str = None) -> Generator[str, None, None]: """ Splits lines with ``splitter`` and yields a specified part by index. Args: lines: iterable of strings part_index: index of part to yield splitter: string to split the lines on Yields: the specified part for each line """ for line in lines: parts = line.split(splitter) yield parts[part_index]
[ "def", "gen_part_from_line", "(", "lines", ":", "Iterable", "[", "str", "]", ",", "part_index", ":", "int", ",", "splitter", ":", "str", "=", "None", ")", "->", "Generator", "[", "str", ",", "None", ",", "None", "]", ":", "for", "line", "in", "lines", ":", "parts", "=", "line", ".", "split", "(", "splitter", ")", "yield", "parts", "[", "part_index", "]" ]
Splits lines with ``splitter`` and yields a specified part by index. Args: lines: iterable of strings part_index: index of part to yield splitter: string to split the lines on Yields: the specified part for each line
[ "Splits", "lines", "with", "splitter", "and", "yields", "a", "specified", "part", "by", "index", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L266-L283
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_part_from_iterables
def gen_part_from_iterables(iterables: Iterable[Any], part_index: int) -> Generator[Any, None, None]: r""" Yields the *n*\ th part of each thing in ``iterables``. Args: iterables: iterable of anything part_index: part index Yields: ``item[part_index] for item in iterable`` """ # RST: make part of word bold/italic: # https://stackoverflow.com/questions/12771480/part-of-a-word-bold-in-restructuredtext # noqa for iterable in iterables: yield iterable[part_index]
python
def gen_part_from_iterables(iterables: Iterable[Any], part_index: int) -> Generator[Any, None, None]: r""" Yields the *n*\ th part of each thing in ``iterables``. Args: iterables: iterable of anything part_index: part index Yields: ``item[part_index] for item in iterable`` """ # RST: make part of word bold/italic: # https://stackoverflow.com/questions/12771480/part-of-a-word-bold-in-restructuredtext # noqa for iterable in iterables: yield iterable[part_index]
[ "def", "gen_part_from_iterables", "(", "iterables", ":", "Iterable", "[", "Any", "]", ",", "part_index", ":", "int", ")", "->", "Generator", "[", "Any", ",", "None", ",", "None", "]", ":", "# RST: make part of word bold/italic:", "# https://stackoverflow.com/questions/12771480/part-of-a-word-bold-in-restructuredtext # noqa", "for", "iterable", "in", "iterables", ":", "yield", "iterable", "[", "part_index", "]" ]
r""" Yields the *n*\ th part of each thing in ``iterables``. Args: iterables: iterable of anything part_index: part index Yields: ``item[part_index] for item in iterable``
[ "r", "Yields", "the", "*", "n", "*", "\\", "th", "part", "of", "each", "thing", "in", "iterables", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L286-L302
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
gen_rows_from_csv_binfiles
def gen_rows_from_csv_binfiles( csv_files: Iterable[BinaryIO], encoding: str = UTF8, skip_header: bool = False, **csv_reader_kwargs) -> Generator[Iterable[str], None, None]: """ Iterate through binary file-like objects that are CSV files in a specified encoding. Yield each row. Args: csv_files: iterable of :class:`BinaryIO` objects encoding: encoding to use skip_header: skip the header (first) row of each file? csv_reader_kwargs: arguments to pass to :func:`csv.reader` Yields: rows from the files """ dialect = csv_reader_kwargs.pop('dialect', None) for csv_file_bin in csv_files: # noinspection PyTypeChecker csv_file = io.TextIOWrapper(csv_file_bin, encoding=encoding) thisfile_dialect = dialect if thisfile_dialect is None: thisfile_dialect = csv.Sniffer().sniff(csv_file.read(1024)) csv_file.seek(0) reader = csv.reader(csv_file, dialect=thisfile_dialect, **csv_reader_kwargs) first = True for row in reader: if first: first = False if skip_header: continue yield row
python
def gen_rows_from_csv_binfiles( csv_files: Iterable[BinaryIO], encoding: str = UTF8, skip_header: bool = False, **csv_reader_kwargs) -> Generator[Iterable[str], None, None]: """ Iterate through binary file-like objects that are CSV files in a specified encoding. Yield each row. Args: csv_files: iterable of :class:`BinaryIO` objects encoding: encoding to use skip_header: skip the header (first) row of each file? csv_reader_kwargs: arguments to pass to :func:`csv.reader` Yields: rows from the files """ dialect = csv_reader_kwargs.pop('dialect', None) for csv_file_bin in csv_files: # noinspection PyTypeChecker csv_file = io.TextIOWrapper(csv_file_bin, encoding=encoding) thisfile_dialect = dialect if thisfile_dialect is None: thisfile_dialect = csv.Sniffer().sniff(csv_file.read(1024)) csv_file.seek(0) reader = csv.reader(csv_file, dialect=thisfile_dialect, **csv_reader_kwargs) first = True for row in reader: if first: first = False if skip_header: continue yield row
[ "def", "gen_rows_from_csv_binfiles", "(", "csv_files", ":", "Iterable", "[", "BinaryIO", "]", ",", "encoding", ":", "str", "=", "UTF8", ",", "skip_header", ":", "bool", "=", "False", ",", "*", "*", "csv_reader_kwargs", ")", "->", "Generator", "[", "Iterable", "[", "str", "]", ",", "None", ",", "None", "]", ":", "dialect", "=", "csv_reader_kwargs", ".", "pop", "(", "'dialect'", ",", "None", ")", "for", "csv_file_bin", "in", "csv_files", ":", "# noinspection PyTypeChecker", "csv_file", "=", "io", ".", "TextIOWrapper", "(", "csv_file_bin", ",", "encoding", "=", "encoding", ")", "thisfile_dialect", "=", "dialect", "if", "thisfile_dialect", "is", "None", ":", "thisfile_dialect", "=", "csv", ".", "Sniffer", "(", ")", ".", "sniff", "(", "csv_file", ".", "read", "(", "1024", ")", ")", "csv_file", ".", "seek", "(", "0", ")", "reader", "=", "csv", ".", "reader", "(", "csv_file", ",", "dialect", "=", "thisfile_dialect", ",", "*", "*", "csv_reader_kwargs", ")", "first", "=", "True", "for", "row", "in", "reader", ":", "if", "first", ":", "first", "=", "False", "if", "skip_header", ":", "continue", "yield", "row" ]
Iterate through binary file-like objects that are CSV files in a specified encoding. Yield each row. Args: csv_files: iterable of :class:`BinaryIO` objects encoding: encoding to use skip_header: skip the header (first) row of each file? csv_reader_kwargs: arguments to pass to :func:`csv.reader` Yields: rows from the files
[ "Iterate", "through", "binary", "file", "-", "like", "objects", "that", "are", "CSV", "files", "in", "a", "specified", "encoding", ".", "Yield", "each", "row", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L305-L340
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
webify_file
def webify_file(srcfilename: str, destfilename: str) -> None: """ Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process. """ with open(srcfilename) as infile, open(destfilename, 'w') as ofile: for line_ in infile: ofile.write(escape(line_))
python
def webify_file(srcfilename: str, destfilename: str) -> None: """ Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process. """ with open(srcfilename) as infile, open(destfilename, 'w') as ofile: for line_ in infile: ofile.write(escape(line_))
[ "def", "webify_file", "(", "srcfilename", ":", "str", ",", "destfilename", ":", "str", ")", "->", "None", ":", "with", "open", "(", "srcfilename", ")", "as", "infile", ",", "open", "(", "destfilename", ",", "'w'", ")", "as", "ofile", ":", "for", "line_", "in", "infile", ":", "ofile", ".", "write", "(", "escape", "(", "line_", ")", ")" ]
Rewrites a file from ``srcfilename`` to ``destfilename``, HTML-escaping it in the process.
[ "Rewrites", "a", "file", "from", "srcfilename", "to", "destfilename", "HTML", "-", "escaping", "it", "in", "the", "process", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L347-L354
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
remove_gzip_timestamp
def remove_gzip_timestamp(filename: str, gunzip_executable: str = "gunzip", gzip_executable: str = "gzip", gzip_args: List[str] = None) -> None: """ Uses external ``gunzip``/``gzip`` tools to remove a ``gzip`` timestamp. Necessary for Lintian. """ gzip_args = gzip_args or [ "-9", # maximum compression (or Lintian moans) "-n", ] # gzip/gunzip operate on SINGLE files with tempfile.TemporaryDirectory() as dir_: basezipfilename = os.path.basename(filename) newzip = os.path.join(dir_, basezipfilename) with open(newzip, 'wb') as z: log.info( "Removing gzip timestamp: " "{} -> gunzip -c -> gzip -n -> {}", basezipfilename, newzip) p1 = subprocess.Popen([gunzip_executable, "-c", filename], stdout=subprocess.PIPE) p2 = subprocess.Popen([gzip_executable] + gzip_args, stdin=p1.stdout, stdout=z) p2.communicate() shutil.copyfile(newzip, filename)
python
def remove_gzip_timestamp(filename: str, gunzip_executable: str = "gunzip", gzip_executable: str = "gzip", gzip_args: List[str] = None) -> None: """ Uses external ``gunzip``/``gzip`` tools to remove a ``gzip`` timestamp. Necessary for Lintian. """ gzip_args = gzip_args or [ "-9", # maximum compression (or Lintian moans) "-n", ] # gzip/gunzip operate on SINGLE files with tempfile.TemporaryDirectory() as dir_: basezipfilename = os.path.basename(filename) newzip = os.path.join(dir_, basezipfilename) with open(newzip, 'wb') as z: log.info( "Removing gzip timestamp: " "{} -> gunzip -c -> gzip -n -> {}", basezipfilename, newzip) p1 = subprocess.Popen([gunzip_executable, "-c", filename], stdout=subprocess.PIPE) p2 = subprocess.Popen([gzip_executable] + gzip_args, stdin=p1.stdout, stdout=z) p2.communicate() shutil.copyfile(newzip, filename)
[ "def", "remove_gzip_timestamp", "(", "filename", ":", "str", ",", "gunzip_executable", ":", "str", "=", "\"gunzip\"", ",", "gzip_executable", ":", "str", "=", "\"gzip\"", ",", "gzip_args", ":", "List", "[", "str", "]", "=", "None", ")", "->", "None", ":", "gzip_args", "=", "gzip_args", "or", "[", "\"-9\"", ",", "# maximum compression (or Lintian moans)", "\"-n\"", ",", "]", "# gzip/gunzip operate on SINGLE files", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "dir_", ":", "basezipfilename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "newzip", "=", "os", ".", "path", ".", "join", "(", "dir_", ",", "basezipfilename", ")", "with", "open", "(", "newzip", ",", "'wb'", ")", "as", "z", ":", "log", ".", "info", "(", "\"Removing gzip timestamp: \"", "\"{} -> gunzip -c -> gzip -n -> {}\"", ",", "basezipfilename", ",", "newzip", ")", "p1", "=", "subprocess", ".", "Popen", "(", "[", "gunzip_executable", ",", "\"-c\"", ",", "filename", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "p2", "=", "subprocess", ".", "Popen", "(", "[", "gzip_executable", "]", "+", "gzip_args", ",", "stdin", "=", "p1", ".", "stdout", ",", "stdout", "=", "z", ")", "p2", ".", "communicate", "(", ")", "shutil", ".", "copyfile", "(", "newzip", ",", "filename", ")" ]
Uses external ``gunzip``/``gzip`` tools to remove a ``gzip`` timestamp. Necessary for Lintian.
[ "Uses", "external", "gunzip", "/", "gzip", "tools", "to", "remove", "a", "gzip", "timestamp", ".", "Necessary", "for", "Lintian", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L357-L383
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
replace_in_file
def replace_in_file(filename: str, text_from: str, text_to: str) -> None: """ Replaces text in a file. Args: filename: filename to process (modifying it in place) text_from: original text to replace text_to: replacement text """ log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) with open(filename) as infile: contents = infile.read() contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
python
def replace_in_file(filename: str, text_from: str, text_to: str) -> None: """ Replaces text in a file. Args: filename: filename to process (modifying it in place) text_from: original text to replace text_to: replacement text """ log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) with open(filename) as infile: contents = infile.read() contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
[ "def", "replace_in_file", "(", "filename", ":", "str", ",", "text_from", ":", "str", ",", "text_to", ":", "str", ")", "->", "None", ":", "log", ".", "info", "(", "\"Amending {}: {} -> {}\"", ",", "filename", ",", "repr", "(", "text_from", ")", ",", "repr", "(", "text_to", ")", ")", "with", "open", "(", "filename", ")", "as", "infile", ":", "contents", "=", "infile", ".", "read", "(", ")", "contents", "=", "contents", ".", "replace", "(", "text_from", ",", "text_to", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "contents", ")" ]
Replaces text in a file. Args: filename: filename to process (modifying it in place) text_from: original text to replace text_to: replacement text
[ "Replaces", "text", "in", "a", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L390-L405
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
replace_multiple_in_file
def replace_multiple_in_file(filename: str, replacements: List[Tuple[str, str]]) -> None: """ Replaces multiple from/to string pairs within a single file. Args: filename: filename to process (modifying it in place) replacements: list of ``(from_text, to_text)`` tuples """ with open(filename) as infile: contents = infile.read() for text_from, text_to in replacements: log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
python
def replace_multiple_in_file(filename: str, replacements: List[Tuple[str, str]]) -> None: """ Replaces multiple from/to string pairs within a single file. Args: filename: filename to process (modifying it in place) replacements: list of ``(from_text, to_text)`` tuples """ with open(filename) as infile: contents = infile.read() for text_from, text_to in replacements: log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
[ "def", "replace_multiple_in_file", "(", "filename", ":", "str", ",", "replacements", ":", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ")", "->", "None", ":", "with", "open", "(", "filename", ")", "as", "infile", ":", "contents", "=", "infile", ".", "read", "(", ")", "for", "text_from", ",", "text_to", "in", "replacements", ":", "log", ".", "info", "(", "\"Amending {}: {} -> {}\"", ",", "filename", ",", "repr", "(", "text_from", ")", ",", "repr", "(", "text_to", ")", ")", "contents", "=", "contents", ".", "replace", "(", "text_from", ",", "text_to", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "contents", ")" ]
Replaces multiple from/to string pairs within a single file. Args: filename: filename to process (modifying it in place) replacements: list of ``(from_text, to_text)`` tuples
[ "Replaces", "multiple", "from", "/", "to", "string", "pairs", "within", "a", "single", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L408-L424
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
convert_line_endings
def convert_line_endings(filename: str, to_unix: bool = False, to_windows: bool = False) -> None: """ Converts a file (in place) from UNIX to Windows line endings, or the reverse. Args: filename: filename to modify (in place) to_unix: convert Windows (CR LF) to UNIX (LF) to_windows: convert UNIX (LF) to Windows (CR LF) """ assert to_unix != to_windows with open(filename, "rb") as f: contents = f.read() windows_eol = b"\r\n" # CR LF unix_eol = b"\n" # LF if to_unix: log.info("Converting from Windows to UNIX line endings: {!r}", filename) src = windows_eol dst = unix_eol else: # to_windows log.info("Converting from UNIX to Windows line endings: {!r}", filename) src = unix_eol dst = windows_eol if windows_eol in contents: log.info("... already contains at least one Windows line ending; " "probably converted before; skipping") return contents = contents.replace(src, dst) with open(filename, "wb") as f: f.write(contents)
python
def convert_line_endings(filename: str, to_unix: bool = False, to_windows: bool = False) -> None: """ Converts a file (in place) from UNIX to Windows line endings, or the reverse. Args: filename: filename to modify (in place) to_unix: convert Windows (CR LF) to UNIX (LF) to_windows: convert UNIX (LF) to Windows (CR LF) """ assert to_unix != to_windows with open(filename, "rb") as f: contents = f.read() windows_eol = b"\r\n" # CR LF unix_eol = b"\n" # LF if to_unix: log.info("Converting from Windows to UNIX line endings: {!r}", filename) src = windows_eol dst = unix_eol else: # to_windows log.info("Converting from UNIX to Windows line endings: {!r}", filename) src = unix_eol dst = windows_eol if windows_eol in contents: log.info("... already contains at least one Windows line ending; " "probably converted before; skipping") return contents = contents.replace(src, dst) with open(filename, "wb") as f: f.write(contents)
[ "def", "convert_line_endings", "(", "filename", ":", "str", ",", "to_unix", ":", "bool", "=", "False", ",", "to_windows", ":", "bool", "=", "False", ")", "->", "None", ":", "assert", "to_unix", "!=", "to_windows", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "windows_eol", "=", "b\"\\r\\n\"", "# CR LF", "unix_eol", "=", "b\"\\n\"", "# LF", "if", "to_unix", ":", "log", ".", "info", "(", "\"Converting from Windows to UNIX line endings: {!r}\"", ",", "filename", ")", "src", "=", "windows_eol", "dst", "=", "unix_eol", "else", ":", "# to_windows", "log", ".", "info", "(", "\"Converting from UNIX to Windows line endings: {!r}\"", ",", "filename", ")", "src", "=", "unix_eol", "dst", "=", "windows_eol", "if", "windows_eol", "in", "contents", ":", "log", ".", "info", "(", "\"... already contains at least one Windows line ending; \"", "\"probably converted before; skipping\"", ")", "return", "contents", "=", "contents", ".", "replace", "(", "src", ",", "dst", ")", "with", "open", "(", "filename", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "contents", ")" ]
Converts a file (in place) from UNIX to Windows line endings, or the reverse. Args: filename: filename to modify (in place) to_unix: convert Windows (CR LF) to UNIX (LF) to_windows: convert UNIX (LF) to Windows (CR LF)
[ "Converts", "a", "file", "(", "in", "place", ")", "from", "UNIX", "to", "Windows", "line", "endings", "or", "the", "reverse", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L427-L459
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
is_line_in_file
def is_line_in_file(filename: str, line: str) -> bool: """ Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match) """ assert "\n" not in line with open(filename, "r") as file: for fileline in file: if fileline == line: return True return False
python
def is_line_in_file(filename: str, line: str) -> bool: """ Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match) """ assert "\n" not in line with open(filename, "r") as file: for fileline in file: if fileline == line: return True return False
[ "def", "is_line_in_file", "(", "filename", ":", "str", ",", "line", ":", "str", ")", "->", "bool", ":", "assert", "\"\\n\"", "not", "in", "line", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "file", ":", "for", "fileline", "in", "file", ":", "if", "fileline", "==", "line", ":", "return", "True", "return", "False" ]
Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match)
[ "Detects", "whether", "a", "line", "is", "present", "within", "a", "file", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L462-L475
RudolfCardinal/pythonlib
cardinal_pythonlib/file_io.py
add_line_if_absent
def add_line_if_absent(filename: str, line: str) -> None: """ Adds a line (at the end) if it's not already in the file somewhere. Args: filename: filename to modify (in place) line: line to append (which must not have a newline in) """ assert "\n" not in line if not is_line_in_file(filename, line): log.info("Appending line {!r} to file {!r}", line, filename) with open(filename, "a") as file: file.writelines([line])
python
def add_line_if_absent(filename: str, line: str) -> None: """ Adds a line (at the end) if it's not already in the file somewhere. Args: filename: filename to modify (in place) line: line to append (which must not have a newline in) """ assert "\n" not in line if not is_line_in_file(filename, line): log.info("Appending line {!r} to file {!r}", line, filename) with open(filename, "a") as file: file.writelines([line])
[ "def", "add_line_if_absent", "(", "filename", ":", "str", ",", "line", ":", "str", ")", "->", "None", ":", "assert", "\"\\n\"", "not", "in", "line", "if", "not", "is_line_in_file", "(", "filename", ",", "line", ")", ":", "log", ".", "info", "(", "\"Appending line {!r} to file {!r}\"", ",", "line", ",", "filename", ")", "with", "open", "(", "filename", ",", "\"a\"", ")", "as", "file", ":", "file", ".", "writelines", "(", "[", "line", "]", ")" ]
Adds a line (at the end) if it's not already in the file somewhere. Args: filename: filename to modify (in place) line: line to append (which must not have a newline in)
[ "Adds", "a", "line", "(", "at", "the", "end", ")", "if", "it", "s", "not", "already", "in", "the", "file", "somewhere", "." ]
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/file_io.py#L478-L490
avihad/twistes
twistes/utilities.py
EsUtils.is_get_query_with_results
def is_get_query_with_results(results): """ :param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise """ return results and EsConst.FOUND in results and results[EsConst.FOUND] and EsConst.FIELDS in results
python
def is_get_query_with_results(results): """ :param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise """ return results and EsConst.FOUND in results and results[EsConst.FOUND] and EsConst.FIELDS in results
[ "def", "is_get_query_with_results", "(", "results", ")", ":", "return", "results", "and", "EsConst", ".", "FOUND", "in", "results", "and", "results", "[", "EsConst", ".", "FOUND", "]", "and", "EsConst", ".", "FIELDS", "in", "results" ]
:param results: the response from Elasticsearch :return: true if the get query returned a result, false otherwise
[ ":", "param", "results", ":", "the", "response", "from", "Elasticsearch", ":", "return", ":", "true", "if", "the", "get", "query", "returned", "a", "result", "false", "otherwise" ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/utilities.py#L39-L44
avihad/twistes
twistes/utilities.py
EsUtils.validate_scan_result
def validate_scan_result(results): """ Check if there's a failed shard in the scan query""" if results[EsConst.SHARDS][EsConst.FAILED] and results[EsConst.SHARDS][EsConst.FAILED] > 0: raise ScanError( 'Scroll request has failed on %d shards out of %d.' % (results[EsConst.SHARDS][EsConst.FAILED], results[EsConst.SHARDS][EsConst.TOTAL]) )
python
def validate_scan_result(results): """ Check if there's a failed shard in the scan query""" if results[EsConst.SHARDS][EsConst.FAILED] and results[EsConst.SHARDS][EsConst.FAILED] > 0: raise ScanError( 'Scroll request has failed on %d shards out of %d.' % (results[EsConst.SHARDS][EsConst.FAILED], results[EsConst.SHARDS][EsConst.TOTAL]) )
[ "def", "validate_scan_result", "(", "results", ")", ":", "if", "results", "[", "EsConst", ".", "SHARDS", "]", "[", "EsConst", ".", "FAILED", "]", "and", "results", "[", "EsConst", ".", "SHARDS", "]", "[", "EsConst", ".", "FAILED", "]", ">", "0", ":", "raise", "ScanError", "(", "'Scroll request has failed on %d shards out of %d.'", "%", "(", "results", "[", "EsConst", ".", "SHARDS", "]", "[", "EsConst", ".", "FAILED", "]", ",", "results", "[", "EsConst", ".", "SHARDS", "]", "[", "EsConst", ".", "TOTAL", "]", ")", ")" ]
Check if there's a failed shard in the scan query
[ "Check", "if", "there", "s", "a", "failed", "shard", "in", "the", "scan", "query" ]
train
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/utilities.py#L47-L53
ivanprjcts/sdklib
sdklib/util/structures.py
to_key_val_list
def to_key_val_list(value, sort=False, insensitive=False): """ Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list({'key': 'val'}, sort=True) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, collections.Mapping): value = value.items() if sort and not insensitive: values = sorted(value) elif sort: values = sorted(value, key=lambda t: t[0].lower()) else: values = value return list(values)
python
def to_key_val_list(value, sort=False, insensitive=False): """ Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list({'key': 'val'}, sort=True) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, collections.Mapping): value = value.items() if sort and not insensitive: values = sorted(value) elif sort: values = sorted(value, key=lambda t: t[0].lower()) else: values = value return list(values)
[ "def", "to_key_val_list", "(", "value", ",", "sort", "=", "False", ",", "insensitive", "=", "False", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "str", ",", "bytes", ",", "bool", ",", "int", ")", ")", ":", "raise", "ValueError", "(", "'cannot encode objects that are not 2-tuples'", ")", "if", "isinstance", "(", "value", ",", "collections", ".", "Mapping", ")", ":", "value", "=", "value", ".", "items", "(", ")", "if", "sort", "and", "not", "insensitive", ":", "values", "=", "sorted", "(", "value", ")", "elif", "sort", ":", "values", "=", "sorted", "(", "value", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ".", "lower", "(", ")", ")", "else", ":", "values", "=", "value", "return", "list", "(", "values", ")" ]
Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list({'key': 'val'}, sort=True) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples.
[ "Take", "an", "object", "and", "test", "to", "see", "if", "it", "can", "be", "represented", "as", "a", "dictionary", ".", "If", "it", "can", "be", "return", "a", "list", "of", "tuples", "e", ".", "g", ".", "::", ">>>", "to_key_val_list", "(", "[", "(", "key", "val", ")", "]", ")", "[", "(", "key", "val", ")", "]", ">>>", "to_key_val_list", "(", "{", "key", ":", "val", "}", ")", "[", "(", "key", "val", ")", "]", ">>>", "to_key_val_list", "(", "{", "key", ":", "val", "}", "sort", "=", "True", ")", "[", "(", "key", "val", ")", "]", ">>>", "to_key_val_list", "(", "string", ")", "ValueError", ":", "cannot", "encode", "objects", "that", "are", "not", "2", "-", "tuples", "." ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/structures.py#L19-L48
ivanprjcts/sdklib
sdklib/util/structures.py
to_key_val_dict
def to_key_val_dict(values): """ Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_dict([('key', 'val')]) {'key': 'val'} >>> to_key_val_dict({'key': 'val'}) {'key': 'val'} >>> to_key_val_dict('string') ValueError: dictionary update sequence element. """ if values is None: return {} if isinstance(values, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(values, collections.Mapping): values = values.items() dict_to_return = dict() for k, v in values: if k in dict_to_return and isinstance(dict_to_return[k], list) and isinstance(v, list): dict_to_return[k].extend(v) elif k in dict_to_return and isinstance(dict_to_return[k], list): dict_to_return[k].append(v) elif k in dict_to_return: dict_to_return[k] = [dict_to_return[k], v] else: dict_to_return[k] = v return dict_to_return
python
def to_key_val_dict(values): """ Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_dict([('key', 'val')]) {'key': 'val'} >>> to_key_val_dict({'key': 'val'}) {'key': 'val'} >>> to_key_val_dict('string') ValueError: dictionary update sequence element. """ if values is None: return {} if isinstance(values, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(values, collections.Mapping): values = values.items() dict_to_return = dict() for k, v in values: if k in dict_to_return and isinstance(dict_to_return[k], list) and isinstance(v, list): dict_to_return[k].extend(v) elif k in dict_to_return and isinstance(dict_to_return[k], list): dict_to_return[k].append(v) elif k in dict_to_return: dict_to_return[k] = [dict_to_return[k], v] else: dict_to_return[k] = v return dict_to_return
[ "def", "to_key_val_dict", "(", "values", ")", ":", "if", "values", "is", "None", ":", "return", "{", "}", "if", "isinstance", "(", "values", ",", "(", "str", ",", "bytes", ",", "bool", ",", "int", ")", ")", ":", "raise", "ValueError", "(", "'cannot encode objects that are not 2-tuples'", ")", "if", "isinstance", "(", "values", ",", "collections", ".", "Mapping", ")", ":", "values", "=", "values", ".", "items", "(", ")", "dict_to_return", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "values", ":", "if", "k", "in", "dict_to_return", "and", "isinstance", "(", "dict_to_return", "[", "k", "]", ",", "list", ")", "and", "isinstance", "(", "v", ",", "list", ")", ":", "dict_to_return", "[", "k", "]", ".", "extend", "(", "v", ")", "elif", "k", "in", "dict_to_return", "and", "isinstance", "(", "dict_to_return", "[", "k", "]", ",", "list", ")", ":", "dict_to_return", "[", "k", "]", ".", "append", "(", "v", ")", "elif", "k", "in", "dict_to_return", ":", "dict_to_return", "[", "k", "]", "=", "[", "dict_to_return", "[", "k", "]", ",", "v", "]", "else", ":", "dict_to_return", "[", "k", "]", "=", "v", "return", "dict_to_return" ]
Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_dict([('key', 'val')]) {'key': 'val'} >>> to_key_val_dict({'key': 'val'}) {'key': 'val'} >>> to_key_val_dict('string') ValueError: dictionary update sequence element.
[ "Take", "an", "object", "and", "test", "to", "see", "if", "it", "can", "be", "represented", "as", "a", "dictionary", ".", "If", "it", "can", "be", "return", "a", "list", "of", "tuples", "e", ".", "g", ".", "::", ">>>", "to_key_val_dict", "(", "[", "(", "key", "val", ")", "]", ")", "{", "key", ":", "val", "}", ">>>", "to_key_val_dict", "(", "{", "key", ":", "val", "}", ")", "{", "key", ":", "val", "}", ">>>", "to_key_val_dict", "(", "string", ")", "ValueError", ":", "dictionary", "update", "sequence", "element", "." ]
train
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/util/structures.py#L51-L83