repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
RudolfCardinal/pythonlib | cardinal_pythonlib/modules.py | import_submodules | def import_submodules(package: Union[str, ModuleType],
base_package_for_relative_import: str = None,
recursive: bool = True) -> Dict[str, ModuleType]:
"""
Import all submodules of a module, recursively, including subpackages.
Args:
package: package (name or actual module)
base_package_for_relative_import: path to prepend?
recursive: import submodules too?
Returns:
dict: mapping from full module name to module
"""
# http://stackoverflow.com/questions/3365740/how-to-import-all-submodules
if isinstance(package, str):
package = importlib.import_module(package,
base_package_for_relative_import)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
log.debug("importing: {}", full_name)
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results | python | def import_submodules(package: Union[str, ModuleType],
base_package_for_relative_import: str = None,
recursive: bool = True) -> Dict[str, ModuleType]:
"""
Import all submodules of a module, recursively, including subpackages.
Args:
package: package (name or actual module)
base_package_for_relative_import: path to prepend?
recursive: import submodules too?
Returns:
dict: mapping from full module name to module
"""
# http://stackoverflow.com/questions/3365740/how-to-import-all-submodules
if isinstance(package, str):
package = importlib.import_module(package,
base_package_for_relative_import)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
log.debug("importing: {}", full_name)
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results | [
"def",
"import_submodules",
"(",
"package",
":",
"Union",
"[",
"str",
",",
"ModuleType",
"]",
",",
"base_package_for_relative_import",
":",
"str",
"=",
"None",
",",
"recursive",
":",
"bool",
"=",
"True",
")",
"->",
"Dict",
"[",
"str",
",",
"ModuleType",
"]",
":",
"# http://stackoverflow.com/questions/3365740/how-to-import-all-submodules",
"if",
"isinstance",
"(",
"package",
",",
"str",
")",
":",
"package",
"=",
"importlib",
".",
"import_module",
"(",
"package",
",",
"base_package_for_relative_import",
")",
"results",
"=",
"{",
"}",
"for",
"loader",
",",
"name",
",",
"is_pkg",
"in",
"pkgutil",
".",
"walk_packages",
"(",
"package",
".",
"__path__",
")",
":",
"full_name",
"=",
"package",
".",
"__name__",
"+",
"'.'",
"+",
"name",
"log",
".",
"debug",
"(",
"\"importing: {}\"",
",",
"full_name",
")",
"results",
"[",
"full_name",
"]",
"=",
"importlib",
".",
"import_module",
"(",
"full_name",
")",
"if",
"recursive",
"and",
"is_pkg",
":",
"results",
".",
"update",
"(",
"import_submodules",
"(",
"full_name",
")",
")",
"return",
"results"
] | Import all submodules of a module, recursively, including subpackages.
Args:
package: package (name or actual module)
base_package_for_relative_import: path to prepend?
recursive: import submodules too?
Returns:
dict: mapping from full module name to module | [
"Import",
"all",
"submodules",
"of",
"a",
"module",
"recursively",
"including",
"subpackages",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/modules.py#L48-L74 |
RudolfCardinal/pythonlib | cardinal_pythonlib/modules.py | is_c_extension | def is_c_extension(module: ModuleType) -> bool:
"""
Modified from
https://stackoverflow.com/questions/20339053/in-python-how-can-one-tell-if-a-module-comes-from-a-c-extension.
``True`` only if the passed module is a C extension implemented as a
dynamically linked shared library specific to the current platform.
Args:
module: Previously imported module object to be tested.
Returns:
bool: ``True`` only if this module is a C extension.
Examples:
.. code-block:: python
from cardinal_pythonlib.modules import is_c_extension
import os
import _elementtree as et
import numpy
import numpy.core.multiarray as numpy_multiarray
is_c_extension(os) # False
is_c_extension(numpy) # False
is_c_extension(et) # False on my system (Python 3.5.6). True in the original example.
is_c_extension(numpy_multiarray) # True
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
# If this module was loaded by a PEP 302-compliant CPython-specific loader
# loading only C extensions, this module is a C extension.
if isinstance(getattr(module, '__loader__', None), ExtensionFileLoader):
return True
# If it's built-in, it's not a C extension.
if is_builtin_module(module):
return False
# Else, fallback to filetype matching heuristics.
#
# Absolute path of the file defining this module.
module_filename = inspect.getfile(module)
# "."-prefixed filetype of this path if any or the empty string otherwise.
module_filetype = os.path.splitext(module_filename)[1]
# This module is only a C extension if this path's filetype is that of a
# C extension specific to the current platform.
return module_filetype in EXTENSION_SUFFIXES | python | def is_c_extension(module: ModuleType) -> bool:
"""
Modified from
https://stackoverflow.com/questions/20339053/in-python-how-can-one-tell-if-a-module-comes-from-a-c-extension.
``True`` only if the passed module is a C extension implemented as a
dynamically linked shared library specific to the current platform.
Args:
module: Previously imported module object to be tested.
Returns:
bool: ``True`` only if this module is a C extension.
Examples:
.. code-block:: python
from cardinal_pythonlib.modules import is_c_extension
import os
import _elementtree as et
import numpy
import numpy.core.multiarray as numpy_multiarray
is_c_extension(os) # False
is_c_extension(numpy) # False
is_c_extension(et) # False on my system (Python 3.5.6). True in the original example.
is_c_extension(numpy_multiarray) # True
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
# If this module was loaded by a PEP 302-compliant CPython-specific loader
# loading only C extensions, this module is a C extension.
if isinstance(getattr(module, '__loader__', None), ExtensionFileLoader):
return True
# If it's built-in, it's not a C extension.
if is_builtin_module(module):
return False
# Else, fallback to filetype matching heuristics.
#
# Absolute path of the file defining this module.
module_filename = inspect.getfile(module)
# "."-prefixed filetype of this path if any or the empty string otherwise.
module_filetype = os.path.splitext(module_filename)[1]
# This module is only a C extension if this path's filetype is that of a
# C extension specific to the current platform.
return module_filetype in EXTENSION_SUFFIXES | [
"def",
"is_c_extension",
"(",
"module",
":",
"ModuleType",
")",
"->",
"bool",
":",
"# noqa",
"assert",
"inspect",
".",
"ismodule",
"(",
"module",
")",
",",
"'\"{}\" not a module.'",
".",
"format",
"(",
"module",
")",
"# If this module was loaded by a PEP 302-compliant CPython-specific loader",
"# loading only C extensions, this module is a C extension.",
"if",
"isinstance",
"(",
"getattr",
"(",
"module",
",",
"'__loader__'",
",",
"None",
")",
",",
"ExtensionFileLoader",
")",
":",
"return",
"True",
"# If it's built-in, it's not a C extension.",
"if",
"is_builtin_module",
"(",
"module",
")",
":",
"return",
"False",
"# Else, fallback to filetype matching heuristics.",
"#",
"# Absolute path of the file defining this module.",
"module_filename",
"=",
"inspect",
".",
"getfile",
"(",
"module",
")",
"# \".\"-prefixed filetype of this path if any or the empty string otherwise.",
"module_filetype",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"module_filename",
")",
"[",
"1",
"]",
"# This module is only a C extension if this path's filetype is that of a",
"# C extension specific to the current platform.",
"return",
"module_filetype",
"in",
"EXTENSION_SUFFIXES"
] | Modified from
https://stackoverflow.com/questions/20339053/in-python-how-can-one-tell-if-a-module-comes-from-a-c-extension.
``True`` only if the passed module is a C extension implemented as a
dynamically linked shared library specific to the current platform.
Args:
module: Previously imported module object to be tested.
Returns:
bool: ``True`` only if this module is a C extension.
Examples:
.. code-block:: python
from cardinal_pythonlib.modules import is_c_extension
import os
import _elementtree as et
import numpy
import numpy.core.multiarray as numpy_multiarray
is_c_extension(os) # False
is_c_extension(numpy) # False
is_c_extension(et) # False on my system (Python 3.5.6). True in the original example.
is_c_extension(numpy_multiarray) # True | [
"Modified",
"from",
"https",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"20339053",
"/",
"in",
"-",
"python",
"-",
"how",
"-",
"can",
"-",
"one",
"-",
"tell",
"-",
"if",
"-",
"a",
"-",
"module",
"-",
"comes",
"-",
"from",
"-",
"a",
"-",
"c",
"-",
"extension",
".",
"True",
"only",
"if",
"the",
"passed",
"module",
"is",
"a",
"C",
"extension",
"implemented",
"as",
"a",
"dynamically",
"linked",
"shared",
"library",
"specific",
"to",
"the",
"current",
"platform",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/modules.py#L102-L154 |
RudolfCardinal/pythonlib | cardinal_pythonlib/modules.py | contains_c_extension | def contains_c_extension(module: ModuleType,
import_all_submodules: bool = True,
include_external_imports: bool = False,
seen: List[ModuleType] = None) -> bool:
"""
Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django)
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
if seen is None: # only true for the top-level call
seen = [] # type: List[ModuleType]
if module in seen: # modules can "contain" themselves
# already inspected; avoid infinite loops
return False
seen.append(module)
# Check the thing we were asked about
is_c_ext = is_c_extension(module)
log.info("Is module {!r} a C extension? {}", module, is_c_ext)
if is_c_ext:
return True
if is_builtin_module(module):
# built-in, therefore we stop searching it
return False
# Now check any children, in a couple of ways
top_level_module = seen[0]
top_path = os.path.dirname(top_level_module.__file__)
# Recurse using dir(). This picks up modules that are automatically
# imported by our top-level model. But it won't pick up all submodules;
# try e.g. for django.
for candidate_name in dir(module):
candidate = getattr(module, candidate_name)
# noinspection PyBroadException
try:
if not inspect.ismodule(candidate):
# not a module
continue
except Exception:
# e.g. a Django module that won't import until we configure its
# settings
log.error("Failed to test ismodule() status of {!r}", candidate)
continue
if is_builtin_module(candidate):
# built-in, therefore we stop searching it
continue
candidate_fname = getattr(candidate, "__file__")
if not include_external_imports:
if os.path.commonpath([top_path, candidate_fname]) != top_path:
log.debug("Skipping, not within the top-level module's "
"directory: {!r}", candidate)
continue
# Recurse:
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level, below # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
if import_all_submodules:
if not is_module_a_package(module):
log.debug("Top-level module is not a package: {!r}", module)
return False
# Otherwise, for things like Django, we need to recurse in a different
# way to scan everything.
# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa
log.debug("Walking path: {!r}", top_path)
# noinspection PyBroadException
try:
for loader, module_name, is_pkg in pkgutil.walk_packages([top_path]): # noqa
if not is_pkg:
log.debug("Skipping, not a package: {!r}", module_name)
continue
log.debug("Manually importing: {!r}", module_name)
# noinspection PyBroadException
try:
candidate = loader.find_module(module_name)\
.load_module(module_name) # noqa
except Exception:
# e.g. Alembic "autogenerate" gives: "ValueError: attempted
# relative import beyond top-level package"; or Django
# "django.core.exceptions.ImproperlyConfigured"
log.error("Package failed to import: {!r}", module_name)
continue
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
except Exception:
log.error("Unable to walk packages further; no C extensions "
"detected so far!")
raise
return False | python | def contains_c_extension(module: ModuleType,
import_all_submodules: bool = True,
include_external_imports: bool = False,
seen: List[ModuleType] = None) -> bool:
"""
Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django)
""" # noqa
assert inspect.ismodule(module), '"{}" not a module.'.format(module)
if seen is None: # only true for the top-level call
seen = [] # type: List[ModuleType]
if module in seen: # modules can "contain" themselves
# already inspected; avoid infinite loops
return False
seen.append(module)
# Check the thing we were asked about
is_c_ext = is_c_extension(module)
log.info("Is module {!r} a C extension? {}", module, is_c_ext)
if is_c_ext:
return True
if is_builtin_module(module):
# built-in, therefore we stop searching it
return False
# Now check any children, in a couple of ways
top_level_module = seen[0]
top_path = os.path.dirname(top_level_module.__file__)
# Recurse using dir(). This picks up modules that are automatically
# imported by our top-level model. But it won't pick up all submodules;
# try e.g. for django.
for candidate_name in dir(module):
candidate = getattr(module, candidate_name)
# noinspection PyBroadException
try:
if not inspect.ismodule(candidate):
# not a module
continue
except Exception:
# e.g. a Django module that won't import until we configure its
# settings
log.error("Failed to test ismodule() status of {!r}", candidate)
continue
if is_builtin_module(candidate):
# built-in, therefore we stop searching it
continue
candidate_fname = getattr(candidate, "__file__")
if not include_external_imports:
if os.path.commonpath([top_path, candidate_fname]) != top_path:
log.debug("Skipping, not within the top-level module's "
"directory: {!r}", candidate)
continue
# Recurse:
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level, below # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
if import_all_submodules:
if not is_module_a_package(module):
log.debug("Top-level module is not a package: {!r}", module)
return False
# Otherwise, for things like Django, we need to recurse in a different
# way to scan everything.
# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa
log.debug("Walking path: {!r}", top_path)
# noinspection PyBroadException
try:
for loader, module_name, is_pkg in pkgutil.walk_packages([top_path]): # noqa
if not is_pkg:
log.debug("Skipping, not a package: {!r}", module_name)
continue
log.debug("Manually importing: {!r}", module_name)
# noinspection PyBroadException
try:
candidate = loader.find_module(module_name)\
.load_module(module_name) # noqa
except Exception:
# e.g. Alembic "autogenerate" gives: "ValueError: attempted
# relative import beyond top-level package"; or Django
# "django.core.exceptions.ImproperlyConfigured"
log.error("Package failed to import: {!r}", module_name)
continue
if contains_c_extension(
module=candidate,
import_all_submodules=False, # only done at the top level # noqa
include_external_imports=include_external_imports,
seen=seen):
return True
except Exception:
log.error("Unable to walk packages further; no C extensions "
"detected so far!")
raise
return False | [
"def",
"contains_c_extension",
"(",
"module",
":",
"ModuleType",
",",
"import_all_submodules",
":",
"bool",
"=",
"True",
",",
"include_external_imports",
":",
"bool",
"=",
"False",
",",
"seen",
":",
"List",
"[",
"ModuleType",
"]",
"=",
"None",
")",
"->",
"bool",
":",
"# noqa",
"assert",
"inspect",
".",
"ismodule",
"(",
"module",
")",
",",
"'\"{}\" not a module.'",
".",
"format",
"(",
"module",
")",
"if",
"seen",
"is",
"None",
":",
"# only true for the top-level call",
"seen",
"=",
"[",
"]",
"# type: List[ModuleType]",
"if",
"module",
"in",
"seen",
":",
"# modules can \"contain\" themselves",
"# already inspected; avoid infinite loops",
"return",
"False",
"seen",
".",
"append",
"(",
"module",
")",
"# Check the thing we were asked about",
"is_c_ext",
"=",
"is_c_extension",
"(",
"module",
")",
"log",
".",
"info",
"(",
"\"Is module {!r} a C extension? {}\"",
",",
"module",
",",
"is_c_ext",
")",
"if",
"is_c_ext",
":",
"return",
"True",
"if",
"is_builtin_module",
"(",
"module",
")",
":",
"# built-in, therefore we stop searching it",
"return",
"False",
"# Now check any children, in a couple of ways",
"top_level_module",
"=",
"seen",
"[",
"0",
"]",
"top_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"top_level_module",
".",
"__file__",
")",
"# Recurse using dir(). This picks up modules that are automatically",
"# imported by our top-level model. But it won't pick up all submodules;",
"# try e.g. for django.",
"for",
"candidate_name",
"in",
"dir",
"(",
"module",
")",
":",
"candidate",
"=",
"getattr",
"(",
"module",
",",
"candidate_name",
")",
"# noinspection PyBroadException",
"try",
":",
"if",
"not",
"inspect",
".",
"ismodule",
"(",
"candidate",
")",
":",
"# not a module",
"continue",
"except",
"Exception",
":",
"# e.g. a Django module that won't import until we configure its",
"# settings",
"log",
".",
"error",
"(",
"\"Failed to test ismodule() status of {!r}\"",
",",
"candidate",
")",
"continue",
"if",
"is_builtin_module",
"(",
"candidate",
")",
":",
"# built-in, therefore we stop searching it",
"continue",
"candidate_fname",
"=",
"getattr",
"(",
"candidate",
",",
"\"__file__\"",
")",
"if",
"not",
"include_external_imports",
":",
"if",
"os",
".",
"path",
".",
"commonpath",
"(",
"[",
"top_path",
",",
"candidate_fname",
"]",
")",
"!=",
"top_path",
":",
"log",
".",
"debug",
"(",
"\"Skipping, not within the top-level module's \"",
"\"directory: {!r}\"",
",",
"candidate",
")",
"continue",
"# Recurse:",
"if",
"contains_c_extension",
"(",
"module",
"=",
"candidate",
",",
"import_all_submodules",
"=",
"False",
",",
"# only done at the top level, below # noqa",
"include_external_imports",
"=",
"include_external_imports",
",",
"seen",
"=",
"seen",
")",
":",
"return",
"True",
"if",
"import_all_submodules",
":",
"if",
"not",
"is_module_a_package",
"(",
"module",
")",
":",
"log",
".",
"debug",
"(",
"\"Top-level module is not a package: {!r}\"",
",",
"module",
")",
"return",
"False",
"# Otherwise, for things like Django, we need to recurse in a different",
"# way to scan everything.",
"# See https://stackoverflow.com/questions/3365740/how-to-import-all-submodules. # noqa",
"log",
".",
"debug",
"(",
"\"Walking path: {!r}\"",
",",
"top_path",
")",
"# noinspection PyBroadException",
"try",
":",
"for",
"loader",
",",
"module_name",
",",
"is_pkg",
"in",
"pkgutil",
".",
"walk_packages",
"(",
"[",
"top_path",
"]",
")",
":",
"# noqa",
"if",
"not",
"is_pkg",
":",
"log",
".",
"debug",
"(",
"\"Skipping, not a package: {!r}\"",
",",
"module_name",
")",
"continue",
"log",
".",
"debug",
"(",
"\"Manually importing: {!r}\"",
",",
"module_name",
")",
"# noinspection PyBroadException",
"try",
":",
"candidate",
"=",
"loader",
".",
"find_module",
"(",
"module_name",
")",
".",
"load_module",
"(",
"module_name",
")",
"# noqa",
"except",
"Exception",
":",
"# e.g. Alembic \"autogenerate\" gives: \"ValueError: attempted",
"# relative import beyond top-level package\"; or Django",
"# \"django.core.exceptions.ImproperlyConfigured\"",
"log",
".",
"error",
"(",
"\"Package failed to import: {!r}\"",
",",
"module_name",
")",
"continue",
"if",
"contains_c_extension",
"(",
"module",
"=",
"candidate",
",",
"import_all_submodules",
"=",
"False",
",",
"# only done at the top level # noqa",
"include_external_imports",
"=",
"include_external_imports",
",",
"seen",
"=",
"seen",
")",
":",
"return",
"True",
"except",
"Exception",
":",
"log",
".",
"error",
"(",
"\"Unable to walk packages further; no C extensions \"",
"\"detected so far!\"",
")",
"raise",
"return",
"False"
] | Extends :func:`is_c_extension` by asking: is this module, or any of its
submodules, a C extension?
Args:
module: Previously imported module object to be tested.
import_all_submodules: explicitly import all submodules of this module?
include_external_imports: check modules in other packages that this
module imports?
seen: used internally for recursion (to deal with recursive modules);
should be ``None`` when called by users
Returns:
bool: ``True`` only if this module or one of its submodules is a C
extension.
Examples:
.. code-block:: python
import logging
from cardinal_pythonlib.modules import contains_c_extension
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
import _elementtree as et
import os
import arrow
import alembic
import django
import numpy
import numpy.core.multiarray as numpy_multiarray
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG) # be verbose
main_only_quicksetup_rootlogger(level=logging.DEBUG)
contains_c_extension(os) # False
contains_c_extension(et) # False
contains_c_extension(numpy) # True -- different from is_c_extension()
contains_c_extension(numpy_multiarray) # True
contains_c_extension(arrow) # False
contains_c_extension(alembic) # False
contains_c_extension(alembic, include_external_imports=True) # True
# ... this example shows that Alembic imports hashlib, which can import
# _hashlib, which is a C extension; however, that doesn't stop us (for
# example) installing Alembic on a machine with no C compiler
contains_c_extension(django) | [
"Extends",
":",
"func",
":",
"is_c_extension",
"by",
"asking",
":",
"is",
"this",
"module",
"or",
"any",
"of",
"its",
"submodules",
"a",
"C",
"extension?"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/modules.py#L157-L308 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | which_with_envpath | def which_with_envpath(executable: str, env: Dict[str, str]) -> str:
"""
Performs a :func:`shutil.which` command using the PATH from the specified
environment.
Reason: when you use ``run([executable, ...], env)`` and therefore
``subprocess.run([executable, ...], env=env)``, the PATH that's searched
for ``executable`` is the parent's, not the new child's -- so you have to
find the executable manually.
Args:
executable: executable to find
env: environment to fetch the PATH variable from
"""
oldpath = os.environ.get("PATH", "")
os.environ["PATH"] = env.get("PATH")
which = shutil.which(executable)
os.environ["PATH"] = oldpath
return which | python | def which_with_envpath(executable: str, env: Dict[str, str]) -> str:
"""
Performs a :func:`shutil.which` command using the PATH from the specified
environment.
Reason: when you use ``run([executable, ...], env)`` and therefore
``subprocess.run([executable, ...], env=env)``, the PATH that's searched
for ``executable`` is the parent's, not the new child's -- so you have to
find the executable manually.
Args:
executable: executable to find
env: environment to fetch the PATH variable from
"""
oldpath = os.environ.get("PATH", "")
os.environ["PATH"] = env.get("PATH")
which = shutil.which(executable)
os.environ["PATH"] = oldpath
return which | [
"def",
"which_with_envpath",
"(",
"executable",
":",
"str",
",",
"env",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
"->",
"str",
":",
"oldpath",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
",",
"\"\"",
")",
"os",
".",
"environ",
"[",
"\"PATH\"",
"]",
"=",
"env",
".",
"get",
"(",
"\"PATH\"",
")",
"which",
"=",
"shutil",
".",
"which",
"(",
"executable",
")",
"os",
".",
"environ",
"[",
"\"PATH\"",
"]",
"=",
"oldpath",
"return",
"which"
] | Performs a :func:`shutil.which` command using the PATH from the specified
environment.
Reason: when you use ``run([executable, ...], env)`` and therefore
``subprocess.run([executable, ...], env=env)``, the PATH that's searched
for ``executable`` is the parent's, not the new child's -- so you have to
find the executable manually.
Args:
executable: executable to find
env: environment to fetch the PATH variable from | [
"Performs",
"a",
":",
"func",
":",
"shutil",
".",
"which",
"command",
"using",
"the",
"PATH",
"from",
"the",
"specified",
"environment",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L47-L65 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | require_executable | def require_executable(executable: str) -> None:
"""
If ``executable`` is not found by :func:`shutil.which`, raise
:exc:`FileNotFoundError`.
"""
if shutil.which(executable):
return
errmsg = "Missing command (must be on the PATH): " + executable
log.critical(errmsg)
raise FileNotFoundError(errmsg) | python | def require_executable(executable: str) -> None:
"""
If ``executable`` is not found by :func:`shutil.which`, raise
:exc:`FileNotFoundError`.
"""
if shutil.which(executable):
return
errmsg = "Missing command (must be on the PATH): " + executable
log.critical(errmsg)
raise FileNotFoundError(errmsg) | [
"def",
"require_executable",
"(",
"executable",
":",
"str",
")",
"->",
"None",
":",
"if",
"shutil",
".",
"which",
"(",
"executable",
")",
":",
"return",
"errmsg",
"=",
"\"Missing command (must be on the PATH): \"",
"+",
"executable",
"log",
".",
"critical",
"(",
"errmsg",
")",
"raise",
"FileNotFoundError",
"(",
"errmsg",
")"
] | If ``executable`` is not found by :func:`shutil.which`, raise
:exc:`FileNotFoundError`. | [
"If",
"executable",
"is",
"not",
"found",
"by",
":",
"func",
":",
"shutil",
".",
"which",
"raise",
":",
"exc",
":",
"FileNotFoundError",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L68-L77 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | mkdir_p | def mkdir_p(path: str) -> None:
"""
Makes a directory, and any intermediate (parent) directories if required.
This is the UNIX ``mkdir -p DIRECTORY`` command; of course, we use
:func:`os.makedirs` instead, for portability.
"""
log.debug("mkdir -p " + path)
os.makedirs(path, exist_ok=True) | python | def mkdir_p(path: str) -> None:
"""
Makes a directory, and any intermediate (parent) directories if required.
This is the UNIX ``mkdir -p DIRECTORY`` command; of course, we use
:func:`os.makedirs` instead, for portability.
"""
log.debug("mkdir -p " + path)
os.makedirs(path, exist_ok=True) | [
"def",
"mkdir_p",
"(",
"path",
":",
"str",
")",
"->",
"None",
":",
"log",
".",
"debug",
"(",
"\"mkdir -p \"",
"+",
"path",
")",
"os",
".",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")"
] | Makes a directory, and any intermediate (parent) directories if required.
This is the UNIX ``mkdir -p DIRECTORY`` command; of course, we use
:func:`os.makedirs` instead, for portability. | [
"Makes",
"a",
"directory",
"and",
"any",
"intermediate",
"(",
"parent",
")",
"directories",
"if",
"required",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L84-L92 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | pushd | def pushd(directory: str) -> None:
"""
Context manager: changes directory and preserves the original on exit.
Example:
.. code-block:: python
with pushd(new_directory):
# do things
"""
previous_dir = os.getcwd()
os.chdir(directory)
yield
os.chdir(previous_dir) | python | def pushd(directory: str) -> None:
"""
Context manager: changes directory and preserves the original on exit.
Example:
.. code-block:: python
with pushd(new_directory):
# do things
"""
previous_dir = os.getcwd()
os.chdir(directory)
yield
os.chdir(previous_dir) | [
"def",
"pushd",
"(",
"directory",
":",
"str",
")",
"->",
"None",
":",
"previous_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"directory",
")",
"yield",
"os",
".",
"chdir",
"(",
"previous_dir",
")"
] | Context manager: changes directory and preserves the original on exit.
Example:
.. code-block:: python
with pushd(new_directory):
# do things | [
"Context",
"manager",
":",
"changes",
"directory",
"and",
"preserves",
"the",
"original",
"on",
"exit",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L100-L114 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | preserve_cwd | def preserve_cwd(func: Callable) -> Callable:
"""
Decorator to preserve the current working directory in calls to the
decorated function.
Example:
.. code-block:: python
@preserve_cwd
def myfunc():
os.chdir("/faraway")
os.chdir("/home")
myfunc()
assert os.getcwd() == "/home"
"""
# http://stackoverflow.com/questions/169070/python-how-do-i-write-a-decorator-that-restores-the-cwd # noqa
def decorator(*args_, **kwargs) -> Any:
cwd = os.getcwd()
result = func(*args_, **kwargs)
os.chdir(cwd)
return result
return decorator | python | def preserve_cwd(func: Callable) -> Callable:
"""
Decorator to preserve the current working directory in calls to the
decorated function.
Example:
.. code-block:: python
@preserve_cwd
def myfunc():
os.chdir("/faraway")
os.chdir("/home")
myfunc()
assert os.getcwd() == "/home"
"""
# http://stackoverflow.com/questions/169070/python-how-do-i-write-a-decorator-that-restores-the-cwd # noqa
def decorator(*args_, **kwargs) -> Any:
cwd = os.getcwd()
result = func(*args_, **kwargs)
os.chdir(cwd)
return result
return decorator | [
"def",
"preserve_cwd",
"(",
"func",
":",
"Callable",
")",
"->",
"Callable",
":",
"# http://stackoverflow.com/questions/169070/python-how-do-i-write-a-decorator-that-restores-the-cwd # noqa",
"def",
"decorator",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs",
")",
"->",
"Any",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs",
")",
"os",
".",
"chdir",
"(",
"cwd",
")",
"return",
"result",
"return",
"decorator"
] | Decorator to preserve the current working directory in calls to the
decorated function.
Example:
.. code-block:: python
@preserve_cwd
def myfunc():
os.chdir("/faraway")
os.chdir("/home")
myfunc()
assert os.getcwd() == "/home" | [
"Decorator",
"to",
"preserve",
"the",
"current",
"working",
"directory",
"in",
"calls",
"to",
"the",
"decorated",
"function",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L117-L140 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | copyglob | def copyglob(src: str, dest: str, allow_nothing: bool = False,
allow_nonfiles: bool = False) -> None:
"""
Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set
"""
something = False
for filename in glob.glob(src):
if allow_nonfiles or os.path.isfile(filename):
shutil.copy(filename, dest)
something = True
if something or allow_nothing:
return
raise ValueError("No files found matching: {}".format(src)) | python | def copyglob(src: str, dest: str, allow_nothing: bool = False,
allow_nonfiles: bool = False) -> None:
"""
Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set
"""
something = False
for filename in glob.glob(src):
if allow_nonfiles or os.path.isfile(filename):
shutil.copy(filename, dest)
something = True
if something or allow_nothing:
return
raise ValueError("No files found matching: {}".format(src)) | [
"def",
"copyglob",
"(",
"src",
":",
"str",
",",
"dest",
":",
"str",
",",
"allow_nothing",
":",
"bool",
"=",
"False",
",",
"allow_nonfiles",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"something",
"=",
"False",
"for",
"filename",
"in",
"glob",
".",
"glob",
"(",
"src",
")",
":",
"if",
"allow_nonfiles",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"shutil",
".",
"copy",
"(",
"filename",
",",
"dest",
")",
"something",
"=",
"True",
"if",
"something",
"or",
"allow_nothing",
":",
"return",
"raise",
"ValueError",
"(",
"\"No files found matching: {}\"",
".",
"format",
"(",
"src",
")",
")"
] | Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set | [
"Copies",
"files",
"whose",
"filenames",
"match",
"the",
"glob",
"src",
"into",
"the",
"directory",
"dest",
".",
"Raises",
"an",
"error",
"if",
"no",
"files",
"are",
"copied",
"unless",
"allow_nothing",
"is",
"True",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L155-L179 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | copy_tree_root | def copy_tree_root(src_dir: str, dest_parent: str) -> None:
"""
Copies a directory ``src_dir`` into the directory ``dest_parent``.
That is, with a file structure like:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_root("/source/thing", "/dest")
ends up creating
.. code-block:: none
/dest/thing/a.txt
/dest/thing/b.txt
/dest/thing/somedir/c.txt
"""
dirname = os.path.basename(os.path.normpath(src_dir))
dest_dir = os.path.join(dest_parent, dirname)
shutil.copytree(src_dir, dest_dir) | python | def copy_tree_root(src_dir: str, dest_parent: str) -> None:
"""
Copies a directory ``src_dir`` into the directory ``dest_parent``.
That is, with a file structure like:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_root("/source/thing", "/dest")
ends up creating
.. code-block:: none
/dest/thing/a.txt
/dest/thing/b.txt
/dest/thing/somedir/c.txt
"""
dirname = os.path.basename(os.path.normpath(src_dir))
dest_dir = os.path.join(dest_parent, dirname)
shutil.copytree(src_dir, dest_dir) | [
"def",
"copy_tree_root",
"(",
"src_dir",
":",
"str",
",",
"dest_parent",
":",
"str",
")",
"->",
"None",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"src_dir",
")",
")",
"dest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_parent",
",",
"dirname",
")",
"shutil",
".",
"copytree",
"(",
"src_dir",
",",
"dest_dir",
")"
] | Copies a directory ``src_dir`` into the directory ``dest_parent``.
That is, with a file structure like:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_root("/source/thing", "/dest")
ends up creating
.. code-block:: none
/dest/thing/a.txt
/dest/thing/b.txt
/dest/thing/somedir/c.txt | [
"Copies",
"a",
"directory",
"src_dir",
"into",
"the",
"directory",
"dest_parent",
".",
"That",
"is",
"with",
"a",
"file",
"structure",
"like",
":"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L197-L224 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | copy_tree_contents | def copy_tree_contents(srcdir: str, destdir: str,
destroy: bool = False) -> None:
"""
Recursive copy. Unlike :func:`copy_tree_root`, :func:`copy_tree_contents`
works as follows. With the file structure:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_contents("/source/thing", "/dest")
ends up creating:
.. code-block:: none
/dest/a.txt
/dest/b.txt
/dest/somedir/c.txt
"""
log.info("Copying directory {} -> {}", srcdir, destdir)
if os.path.exists(destdir):
if not destroy:
raise ValueError("Destination exists!")
if not os.path.isdir(destdir):
raise ValueError("Destination exists but isn't a directory!")
log.debug("... removing old contents")
rmtree(destdir)
log.debug("... now copying")
shutil.copytree(srcdir, destdir) | python | def copy_tree_contents(srcdir: str, destdir: str,
destroy: bool = False) -> None:
"""
Recursive copy. Unlike :func:`copy_tree_root`, :func:`copy_tree_contents`
works as follows. With the file structure:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_contents("/source/thing", "/dest")
ends up creating:
.. code-block:: none
/dest/a.txt
/dest/b.txt
/dest/somedir/c.txt
"""
log.info("Copying directory {} -> {}", srcdir, destdir)
if os.path.exists(destdir):
if not destroy:
raise ValueError("Destination exists!")
if not os.path.isdir(destdir):
raise ValueError("Destination exists but isn't a directory!")
log.debug("... removing old contents")
rmtree(destdir)
log.debug("... now copying")
shutil.copytree(srcdir, destdir) | [
"def",
"copy_tree_contents",
"(",
"srcdir",
":",
"str",
",",
"destdir",
":",
"str",
",",
"destroy",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"log",
".",
"info",
"(",
"\"Copying directory {} -> {}\"",
",",
"srcdir",
",",
"destdir",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"destdir",
")",
":",
"if",
"not",
"destroy",
":",
"raise",
"ValueError",
"(",
"\"Destination exists!\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"destdir",
")",
":",
"raise",
"ValueError",
"(",
"\"Destination exists but isn't a directory!\"",
")",
"log",
".",
"debug",
"(",
"\"... removing old contents\"",
")",
"rmtree",
"(",
"destdir",
")",
"log",
".",
"debug",
"(",
"\"... now copying\"",
")",
"shutil",
".",
"copytree",
"(",
"srcdir",
",",
"destdir",
")"
] | Recursive copy. Unlike :func:`copy_tree_root`, :func:`copy_tree_contents`
works as follows. With the file structure:
.. code-block:: none
/source/thing/a.txt
/source/thing/b.txt
/source/thing/somedir/c.txt
the command
.. code-block:: python
copy_tree_contents("/source/thing", "/dest")
ends up creating:
.. code-block:: none
/dest/a.txt
/dest/b.txt
/dest/somedir/c.txt | [
"Recursive",
"copy",
".",
"Unlike",
":",
"func",
":",
"copy_tree_root",
":",
"func",
":",
"copy_tree_contents",
"works",
"as",
"follows",
".",
"With",
"the",
"file",
"structure",
":"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L227-L263 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | rmglob | def rmglob(pattern: str) -> None:
"""
Deletes all files whose filename matches the glob ``pattern`` (via
:func:`glob.glob`).
"""
for f in glob.glob(pattern):
os.remove(f) | python | def rmglob(pattern: str) -> None:
"""
Deletes all files whose filename matches the glob ``pattern`` (via
:func:`glob.glob`).
"""
for f in glob.glob(pattern):
os.remove(f) | [
"def",
"rmglob",
"(",
"pattern",
":",
"str",
")",
"->",
"None",
":",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"pattern",
")",
":",
"os",
".",
"remove",
"(",
"f",
")"
] | Deletes all files whose filename matches the glob ``pattern`` (via
:func:`glob.glob`). | [
"Deletes",
"all",
"files",
"whose",
"filename",
"matches",
"the",
"glob",
"pattern",
"(",
"via",
":",
"func",
":",
"glob",
".",
"glob",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L270-L276 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | purge | def purge(path: str, pattern: str) -> None:
"""
Deletes all files in ``path`` matching ``pattern`` (via
:func:`fnmatch.fnmatch`).
"""
for f in find(pattern, path):
log.info("Deleting {}", f)
os.remove(f) | python | def purge(path: str, pattern: str) -> None:
"""
Deletes all files in ``path`` matching ``pattern`` (via
:func:`fnmatch.fnmatch`).
"""
for f in find(pattern, path):
log.info("Deleting {}", f)
os.remove(f) | [
"def",
"purge",
"(",
"path",
":",
"str",
",",
"pattern",
":",
"str",
")",
"->",
"None",
":",
"for",
"f",
"in",
"find",
"(",
"pattern",
",",
"path",
")",
":",
"log",
".",
"info",
"(",
"\"Deleting {}\"",
",",
"f",
")",
"os",
".",
"remove",
"(",
"f",
")"
] | Deletes all files in ``path`` matching ``pattern`` (via
:func:`fnmatch.fnmatch`). | [
"Deletes",
"all",
"files",
"in",
"path",
"matching",
"pattern",
"(",
"via",
":",
"func",
":",
"fnmatch",
".",
"fnmatch",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L279-L286 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | delete_files_within_dir | def delete_files_within_dir(directory: str, filenames: List[str]) -> None:
"""
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
"""
for dirpath, dirnames, fnames in os.walk(directory):
for f in fnames:
if f in filenames:
fullpath = os.path.join(dirpath, f)
log.debug("Deleting {!r}", fullpath)
os.remove(fullpath) | python | def delete_files_within_dir(directory: str, filenames: List[str]) -> None:
"""
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
"""
for dirpath, dirnames, fnames in os.walk(directory):
for f in fnames:
if f in filenames:
fullpath = os.path.join(dirpath, f)
log.debug("Deleting {!r}", fullpath)
os.remove(fullpath) | [
"def",
"delete_files_within_dir",
"(",
"directory",
":",
"str",
",",
"filenames",
":",
"List",
"[",
"str",
"]",
")",
"->",
"None",
":",
"for",
"dirpath",
",",
"dirnames",
",",
"fnames",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"for",
"f",
"in",
"fnames",
":",
"if",
"f",
"in",
"filenames",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"f",
")",
"log",
".",
"debug",
"(",
"\"Deleting {!r}\"",
",",
"fullpath",
")",
"os",
".",
"remove",
"(",
"fullpath",
")"
] | Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``. | [
"Delete",
"files",
"within",
"directory",
"whose",
"filename",
"*",
"exactly",
"*",
"matches",
"one",
"of",
"filenames",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L289-L299 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | shutil_rmtree_onerror | def shutil_rmtree_onerror(func: Callable[[str], None],
path: str,
exc_info: EXC_INFO_TYPE) -> None:
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
""" # noqa
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
exc = exc_info[1]
raise exc | python | def shutil_rmtree_onerror(func: Callable[[str], None],
path: str,
exc_info: EXC_INFO_TYPE) -> None:
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
""" # noqa
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
exc = exc_info[1]
raise exc | [
"def",
"shutil_rmtree_onerror",
"(",
"func",
":",
"Callable",
"[",
"[",
"str",
"]",
",",
"None",
"]",
",",
"path",
":",
"str",
",",
"exc_info",
":",
"EXC_INFO_TYPE",
")",
"->",
"None",
":",
"# noqa",
"if",
"not",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"W_OK",
")",
":",
"# Is the error an access error ?",
"os",
".",
"chmod",
"(",
"path",
",",
"stat",
".",
"S_IWUSR",
")",
"func",
"(",
"path",
")",
"else",
":",
"exc",
"=",
"exc_info",
"[",
"1",
"]",
"raise",
"exc"
] | Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied | [
"Error",
"handler",
"for",
"shutil",
".",
"rmtree",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L310-L332 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | rmtree | def rmtree(directory: str) -> None:
"""
Deletes a directory tree.
"""
log.debug("Deleting directory {!r}", directory)
shutil.rmtree(directory, onerror=shutil_rmtree_onerror) | python | def rmtree(directory: str) -> None:
"""
Deletes a directory tree.
"""
log.debug("Deleting directory {!r}", directory)
shutil.rmtree(directory, onerror=shutil_rmtree_onerror) | [
"def",
"rmtree",
"(",
"directory",
":",
"str",
")",
"->",
"None",
":",
"log",
".",
"debug",
"(",
"\"Deleting directory {!r}\"",
",",
"directory",
")",
"shutil",
".",
"rmtree",
"(",
"directory",
",",
"onerror",
"=",
"shutil_rmtree_onerror",
")"
] | Deletes a directory tree. | [
"Deletes",
"a",
"directory",
"tree",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L335-L340 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | chown_r | def chown_r(path: str, user: str, group: str) -> None:
"""
Performs a recursive ``chown``.
Args:
path: path to walk down
user: user name or ID
group: group name or ID
As per http://stackoverflow.com/questions/2853723
"""
for root, dirs, files in os.walk(path):
for x in dirs:
shutil.chown(os.path.join(root, x), user, group)
for x in files:
shutil.chown(os.path.join(root, x), user, group) | python | def chown_r(path: str, user: str, group: str) -> None:
"""
Performs a recursive ``chown``.
Args:
path: path to walk down
user: user name or ID
group: group name or ID
As per http://stackoverflow.com/questions/2853723
"""
for root, dirs, files in os.walk(path):
for x in dirs:
shutil.chown(os.path.join(root, x), user, group)
for x in files:
shutil.chown(os.path.join(root, x), user, group) | [
"def",
"chown_r",
"(",
"path",
":",
"str",
",",
"user",
":",
"str",
",",
"group",
":",
"str",
")",
"->",
"None",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"x",
"in",
"dirs",
":",
"shutil",
".",
"chown",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"x",
")",
",",
"user",
",",
"group",
")",
"for",
"x",
"in",
"files",
":",
"shutil",
".",
"chown",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"x",
")",
",",
"user",
",",
"group",
")"
] | Performs a recursive ``chown``.
Args:
path: path to walk down
user: user name or ID
group: group name or ID
As per http://stackoverflow.com/questions/2853723 | [
"Performs",
"a",
"recursive",
"chown",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L347-L362 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | chmod_r | def chmod_r(root: str, permission: int) -> None:
"""
Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR``
"""
os.chmod(root, permission)
for dirpath, dirnames, filenames in os.walk(root):
for d in dirnames:
os.chmod(os.path.join(dirpath, d), permission)
for f in filenames:
os.chmod(os.path.join(dirpath, f), permission) | python | def chmod_r(root: str, permission: int) -> None:
"""
Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR``
"""
os.chmod(root, permission)
for dirpath, dirnames, filenames in os.walk(root):
for d in dirnames:
os.chmod(os.path.join(dirpath, d), permission)
for f in filenames:
os.chmod(os.path.join(dirpath, f), permission) | [
"def",
"chmod_r",
"(",
"root",
":",
"str",
",",
"permission",
":",
"int",
")",
"->",
"None",
":",
"os",
".",
"chmod",
"(",
"root",
",",
"permission",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"root",
")",
":",
"for",
"d",
"in",
"dirnames",
":",
"os",
".",
"chmod",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"d",
")",
",",
"permission",
")",
"for",
"f",
"in",
"filenames",
":",
"os",
".",
"chmod",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"f",
")",
",",
"permission",
")"
] | Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR`` | [
"Recursive",
"chmod",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L365-L378 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | find | def find(pattern: str, path: str) -> List[str]:
"""
Finds files in ``path`` whose filenames match ``pattern`` (via
:func:`fnmatch.fnmatch`).
"""
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result | python | def find(pattern: str, path: str) -> List[str]:
"""
Finds files in ``path`` whose filenames match ``pattern`` (via
:func:`fnmatch.fnmatch`).
"""
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result | [
"def",
"find",
"(",
"pattern",
":",
"str",
",",
"path",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"result",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"name",
"in",
"files",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"name",
",",
"pattern",
")",
":",
"result",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
")",
"return",
"result"
] | Finds files in ``path`` whose filenames match ``pattern`` (via
:func:`fnmatch.fnmatch`). | [
"Finds",
"files",
"in",
"path",
"whose",
"filenames",
"match",
"pattern",
"(",
"via",
":",
"func",
":",
"fnmatch",
".",
"fnmatch",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L385-L395 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | find_first | def find_first(pattern: str, path: str) -> str:
"""
Finds first file in ``path`` whose filename matches ``pattern`` (via
:func:`fnmatch.fnmatch`), or raises :exc:`IndexError`.
"""
try:
return find(pattern, path)[0]
except IndexError:
log.critical('''Couldn't find "{}" in "{}"''', pattern, path)
raise | python | def find_first(pattern: str, path: str) -> str:
"""
Finds first file in ``path`` whose filename matches ``pattern`` (via
:func:`fnmatch.fnmatch`), or raises :exc:`IndexError`.
"""
try:
return find(pattern, path)[0]
except IndexError:
log.critical('''Couldn't find "{}" in "{}"''', pattern, path)
raise | [
"def",
"find_first",
"(",
"pattern",
":",
"str",
",",
"path",
":",
"str",
")",
"->",
"str",
":",
"try",
":",
"return",
"find",
"(",
"pattern",
",",
"path",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"log",
".",
"critical",
"(",
"'''Couldn't find \"{}\" in \"{}\"'''",
",",
"pattern",
",",
"path",
")",
"raise"
] | Finds first file in ``path`` whose filename matches ``pattern`` (via
:func:`fnmatch.fnmatch`), or raises :exc:`IndexError`. | [
"Finds",
"first",
"file",
"in",
"path",
"whose",
"filename",
"matches",
"pattern",
"(",
"via",
":",
"func",
":",
"fnmatch",
".",
"fnmatch",
")",
"or",
"raises",
":",
"exc",
":",
"IndexError",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L398-L407 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | gen_filenames | def gen_filenames(starting_filenames: List[str],
recursive: bool) -> Generator[str, None, None]:
"""
From a starting list of files and/or directories, generates filenames of
all files in the list, and (if ``recursive`` is set) all files within
directories in the list.
Args:
starting_filenames: files and/or directories
recursive: walk down any directories in the starting list, recursively?
Yields:
each filename
"""
for base_filename in starting_filenames:
if os.path.isfile(base_filename):
yield os.path.abspath(base_filename)
elif os.path.isdir(base_filename) and recursive:
for dirpath, dirnames, filenames in os.walk(base_filename):
for fname in filenames:
yield os.path.abspath(os.path.join(dirpath, fname)) | python | def gen_filenames(starting_filenames: List[str],
recursive: bool) -> Generator[str, None, None]:
"""
From a starting list of files and/or directories, generates filenames of
all files in the list, and (if ``recursive`` is set) all files within
directories in the list.
Args:
starting_filenames: files and/or directories
recursive: walk down any directories in the starting list, recursively?
Yields:
each filename
"""
for base_filename in starting_filenames:
if os.path.isfile(base_filename):
yield os.path.abspath(base_filename)
elif os.path.isdir(base_filename) and recursive:
for dirpath, dirnames, filenames in os.walk(base_filename):
for fname in filenames:
yield os.path.abspath(os.path.join(dirpath, fname)) | [
"def",
"gen_filenames",
"(",
"starting_filenames",
":",
"List",
"[",
"str",
"]",
",",
"recursive",
":",
"bool",
")",
"->",
"Generator",
"[",
"str",
",",
"None",
",",
"None",
"]",
":",
"for",
"base_filename",
"in",
"starting_filenames",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"base_filename",
")",
":",
"yield",
"os",
".",
"path",
".",
"abspath",
"(",
"base_filename",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"base_filename",
")",
"and",
"recursive",
":",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"base_filename",
")",
":",
"for",
"fname",
"in",
"filenames",
":",
"yield",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"fname",
")",
")"
] | From a starting list of files and/or directories, generates filenames of
all files in the list, and (if ``recursive`` is set) all files within
directories in the list.
Args:
starting_filenames: files and/or directories
recursive: walk down any directories in the starting list, recursively?
Yields:
each filename | [
"From",
"a",
"starting",
"list",
"of",
"files",
"and",
"/",
"or",
"directories",
"generates",
"filenames",
"of",
"all",
"files",
"in",
"the",
"list",
"and",
"(",
"if",
"recursive",
"is",
"set",
")",
"all",
"files",
"within",
"directories",
"in",
"the",
"list",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L410-L431 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | exists_locked | def exists_locked(filepath: str) -> Tuple[bool, bool]:
"""
Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
"""
exists = False
locked = None
file_object = None
if os.path.exists(filepath):
exists = True
locked = True
try:
buffer_size = 8
# Opening file in append mode and read the first 8 characters.
file_object = open(filepath, 'a', buffer_size)
if file_object:
locked = False # exists and not locked
except IOError:
pass
finally:
if file_object:
file_object.close()
return exists, locked | python | def exists_locked(filepath: str) -> Tuple[bool, bool]:
"""
Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
"""
exists = False
locked = None
file_object = None
if os.path.exists(filepath):
exists = True
locked = True
try:
buffer_size = 8
# Opening file in append mode and read the first 8 characters.
file_object = open(filepath, 'a', buffer_size)
if file_object:
locked = False # exists and not locked
except IOError:
pass
finally:
if file_object:
file_object.close()
return exists, locked | [
"def",
"exists_locked",
"(",
"filepath",
":",
"str",
")",
"->",
"Tuple",
"[",
"bool",
",",
"bool",
"]",
":",
"exists",
"=",
"False",
"locked",
"=",
"None",
"file_object",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"exists",
"=",
"True",
"locked",
"=",
"True",
"try",
":",
"buffer_size",
"=",
"8",
"# Opening file in append mode and read the first 8 characters.",
"file_object",
"=",
"open",
"(",
"filepath",
",",
"'a'",
",",
"buffer_size",
")",
"if",
"file_object",
":",
"locked",
"=",
"False",
"# exists and not locked",
"except",
"IOError",
":",
"pass",
"finally",
":",
"if",
"file_object",
":",
"file_object",
".",
"close",
"(",
")",
"return",
"exists",
",",
"locked"
] | Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/. | [
"Checks",
"if",
"a",
"file",
"is",
"locked",
"by",
"opening",
"it",
"in",
"append",
"mode",
".",
"(",
"If",
"no",
"exception",
"is",
"thrown",
"in",
"that",
"situation",
"then",
"the",
"file",
"is",
"not",
"locked",
".",
")"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L438-L468 |
RudolfCardinal/pythonlib | cardinal_pythonlib/fileops.py | relative_filename_within_dir | def relative_filename_within_dir(filename: str, directory: str) -> str:
"""
Starting with a (typically absolute) ``filename``, returns the part of the
filename that is relative to the directory ``directory``.
If the file is *not* within the directory, returns an empty string.
"""
filename = os.path.abspath(filename)
directory = os.path.abspath(directory)
if os.path.commonpath([directory, filename]) != directory:
# Filename is not within directory
return ""
return os.path.relpath(filename, start=directory) | python | def relative_filename_within_dir(filename: str, directory: str) -> str:
"""
Starting with a (typically absolute) ``filename``, returns the part of the
filename that is relative to the directory ``directory``.
If the file is *not* within the directory, returns an empty string.
"""
filename = os.path.abspath(filename)
directory = os.path.abspath(directory)
if os.path.commonpath([directory, filename]) != directory:
# Filename is not within directory
return ""
return os.path.relpath(filename, start=directory) | [
"def",
"relative_filename_within_dir",
"(",
"filename",
":",
"str",
",",
"directory",
":",
"str",
")",
"->",
"str",
":",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
"directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"if",
"os",
".",
"path",
".",
"commonpath",
"(",
"[",
"directory",
",",
"filename",
"]",
")",
"!=",
"directory",
":",
"# Filename is not within directory",
"return",
"\"\"",
"return",
"os",
".",
"path",
".",
"relpath",
"(",
"filename",
",",
"start",
"=",
"directory",
")"
] | Starting with a (typically absolute) ``filename``, returns the part of the
filename that is relative to the directory ``directory``.
If the file is *not* within the directory, returns an empty string. | [
"Starting",
"with",
"a",
"(",
"typically",
"absolute",
")",
"filename",
"returns",
"the",
"part",
"of",
"the",
"filename",
"that",
"is",
"relative",
"to",
"the",
"directory",
"directory",
".",
"If",
"the",
"file",
"is",
"*",
"not",
"*",
"within",
"the",
"directory",
"returns",
"an",
"empty",
"string",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L475-L486 |
smtp2go-oss/smtp2go-python | smtp2go/core.py | Smtp2goResponse._get_errors | def _get_errors(self):
"""
Gets errors from HTTP response
"""
errors = self.json.get('data').get('failures')
if errors:
logger.error(errors)
return errors | python | def _get_errors(self):
"""
Gets errors from HTTP response
"""
errors = self.json.get('data').get('failures')
if errors:
logger.error(errors)
return errors | [
"def",
"_get_errors",
"(",
"self",
")",
":",
"errors",
"=",
"self",
".",
"json",
".",
"get",
"(",
"'data'",
")",
".",
"get",
"(",
"'failures'",
")",
"if",
"errors",
":",
"logger",
".",
"error",
"(",
"errors",
")",
"return",
"errors"
] | Gets errors from HTTP response | [
"Gets",
"errors",
"from",
"HTTP",
"response"
] | train | https://github.com/smtp2go-oss/smtp2go-python/blob/581cc33b1c6f4ca2882535a51a787c33e5cfcce7/smtp2go/core.py#L120-L127 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | page_sequence | def page_sequence(n_sheets: int, one_based: bool = True) -> List[int]:
"""
Generates the final page sequence from the starting number of sheets.
"""
n_pages = calc_n_virtual_pages(n_sheets)
assert n_pages % 4 == 0
half_n_pages = n_pages // 2
firsthalf = list(range(half_n_pages))
secondhalf = list(reversed(range(half_n_pages, n_pages)))
# Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's
# come out of your printer), "firsthalf" are on the right (from top to
# bottom: recto facing up, then verso facing down, then recto, then verso)
# and "secondhalf" are on the left (from top to bottom: verso facing up,
# then recto facing down, etc.).
sequence = [] # type: List[int]
top = True
for left, right in zip(secondhalf, firsthalf):
if not top:
left, right = right, left
sequence += [left, right]
top = not top
if one_based:
sequence = [x + 1 for x in sequence]
log.debug("{} sheets => page sequence {!r}", n_sheets, sequence)
return sequence | python | def page_sequence(n_sheets: int, one_based: bool = True) -> List[int]:
"""
Generates the final page sequence from the starting number of sheets.
"""
n_pages = calc_n_virtual_pages(n_sheets)
assert n_pages % 4 == 0
half_n_pages = n_pages // 2
firsthalf = list(range(half_n_pages))
secondhalf = list(reversed(range(half_n_pages, n_pages)))
# Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's
# come out of your printer), "firsthalf" are on the right (from top to
# bottom: recto facing up, then verso facing down, then recto, then verso)
# and "secondhalf" are on the left (from top to bottom: verso facing up,
# then recto facing down, etc.).
sequence = [] # type: List[int]
top = True
for left, right in zip(secondhalf, firsthalf):
if not top:
left, right = right, left
sequence += [left, right]
top = not top
if one_based:
sequence = [x + 1 for x in sequence]
log.debug("{} sheets => page sequence {!r}", n_sheets, sequence)
return sequence | [
"def",
"page_sequence",
"(",
"n_sheets",
":",
"int",
",",
"one_based",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"int",
"]",
":",
"n_pages",
"=",
"calc_n_virtual_pages",
"(",
"n_sheets",
")",
"assert",
"n_pages",
"%",
"4",
"==",
"0",
"half_n_pages",
"=",
"n_pages",
"//",
"2",
"firsthalf",
"=",
"list",
"(",
"range",
"(",
"half_n_pages",
")",
")",
"secondhalf",
"=",
"list",
"(",
"reversed",
"(",
"range",
"(",
"half_n_pages",
",",
"n_pages",
")",
")",
")",
"# Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's",
"# come out of your printer), \"firsthalf\" are on the right (from top to",
"# bottom: recto facing up, then verso facing down, then recto, then verso)",
"# and \"secondhalf\" are on the left (from top to bottom: verso facing up,",
"# then recto facing down, etc.).",
"sequence",
"=",
"[",
"]",
"# type: List[int]",
"top",
"=",
"True",
"for",
"left",
",",
"right",
"in",
"zip",
"(",
"secondhalf",
",",
"firsthalf",
")",
":",
"if",
"not",
"top",
":",
"left",
",",
"right",
"=",
"right",
",",
"left",
"sequence",
"+=",
"[",
"left",
",",
"right",
"]",
"top",
"=",
"not",
"top",
"if",
"one_based",
":",
"sequence",
"=",
"[",
"x",
"+",
"1",
"for",
"x",
"in",
"sequence",
"]",
"log",
".",
"debug",
"(",
"\"{} sheets => page sequence {!r}\"",
",",
"n_sheets",
",",
"sequence",
")",
"return",
"sequence"
] | Generates the final page sequence from the starting number of sheets. | [
"Generates",
"the",
"final",
"page",
"sequence",
"from",
"the",
"starting",
"number",
"of",
"sheets",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L161-L185 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | require | def require(executable: str, explanation: str = "") -> None:
"""
Ensures that the external tool is available.
Asserts upon failure.
"""
assert shutil.which(executable), "Need {!r} on the PATH.{}".format(
executable, "\n" + explanation if explanation else "") | python | def require(executable: str, explanation: str = "") -> None:
"""
Ensures that the external tool is available.
Asserts upon failure.
"""
assert shutil.which(executable), "Need {!r} on the PATH.{}".format(
executable, "\n" + explanation if explanation else "") | [
"def",
"require",
"(",
"executable",
":",
"str",
",",
"explanation",
":",
"str",
"=",
"\"\"",
")",
"->",
"None",
":",
"assert",
"shutil",
".",
"which",
"(",
"executable",
")",
",",
"\"Need {!r} on the PATH.{}\"",
".",
"format",
"(",
"executable",
",",
"\"\\n\"",
"+",
"explanation",
"if",
"explanation",
"else",
"\"\"",
")"
] | Ensures that the external tool is available.
Asserts upon failure. | [
"Ensures",
"that",
"the",
"external",
"tool",
"is",
"available",
".",
"Asserts",
"upon",
"failure",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L192-L198 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | run | def run(args: List[str],
get_output: bool = False,
encoding: str = sys.getdefaultencoding()) -> Tuple[str, str]:
"""
Run an external command +/- return the results.
Returns a ``(stdout, stderr)`` tuple (both are blank strings if the output
wasn't wanted).
"""
printable = " ".join(shlex.quote(x) for x in args).replace("\n", r"\n")
log.debug("Running external command: {}", printable)
if get_output:
p = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, check=True)
stdout, stderr = p.stdout.decode(encoding), p.stderr.decode(encoding)
else:
subprocess.check_call(args)
stdout, stderr = "", ""
return stdout, stderr | python | def run(args: List[str],
get_output: bool = False,
encoding: str = sys.getdefaultencoding()) -> Tuple[str, str]:
"""
Run an external command +/- return the results.
Returns a ``(stdout, stderr)`` tuple (both are blank strings if the output
wasn't wanted).
"""
printable = " ".join(shlex.quote(x) for x in args).replace("\n", r"\n")
log.debug("Running external command: {}", printable)
if get_output:
p = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, check=True)
stdout, stderr = p.stdout.decode(encoding), p.stderr.decode(encoding)
else:
subprocess.check_call(args)
stdout, stderr = "", ""
return stdout, stderr | [
"def",
"run",
"(",
"args",
":",
"List",
"[",
"str",
"]",
",",
"get_output",
":",
"bool",
"=",
"False",
",",
"encoding",
":",
"str",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"printable",
"=",
"\" \"",
".",
"join",
"(",
"shlex",
".",
"quote",
"(",
"x",
")",
"for",
"x",
"in",
"args",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"r\"\\n\"",
")",
"log",
".",
"debug",
"(",
"\"Running external command: {}\"",
",",
"printable",
")",
"if",
"get_output",
":",
"p",
"=",
"subprocess",
".",
"run",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"check",
"=",
"True",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"stdout",
".",
"decode",
"(",
"encoding",
")",
",",
"p",
".",
"stderr",
".",
"decode",
"(",
"encoding",
")",
"else",
":",
"subprocess",
".",
"check_call",
"(",
"args",
")",
"stdout",
",",
"stderr",
"=",
"\"\"",
",",
"\"\"",
"return",
"stdout",
",",
"stderr"
] | Run an external command +/- return the results.
Returns a ``(stdout, stderr)`` tuple (both are blank strings if the output
wasn't wanted). | [
"Run",
"an",
"external",
"command",
"+",
"/",
"-",
"return",
"the",
"results",
".",
"Returns",
"a",
"(",
"stdout",
"stderr",
")",
"tuple",
"(",
"both",
"are",
"blank",
"strings",
"if",
"the",
"output",
"wasn",
"t",
"wanted",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L201-L218 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | get_page_count | def get_page_count(filename: str) -> int:
"""
How many pages are in a PDF?
"""
log.debug("Getting page count for {!r}", filename)
require(PDFTK, HELP_MISSING_PDFTK)
stdout, _ = run([PDFTK, filename, "dump_data"], get_output=True)
regex = re.compile(r"^NumberOfPages: (\d+)$", re.MULTILINE)
m = regex.search(stdout)
if m:
return int(m.group(1))
raise ValueError("Can't get PDF page count for: {!r}".format(filename)) | python | def get_page_count(filename: str) -> int:
"""
How many pages are in a PDF?
"""
log.debug("Getting page count for {!r}", filename)
require(PDFTK, HELP_MISSING_PDFTK)
stdout, _ = run([PDFTK, filename, "dump_data"], get_output=True)
regex = re.compile(r"^NumberOfPages: (\d+)$", re.MULTILINE)
m = regex.search(stdout)
if m:
return int(m.group(1))
raise ValueError("Can't get PDF page count for: {!r}".format(filename)) | [
"def",
"get_page_count",
"(",
"filename",
":",
"str",
")",
"->",
"int",
":",
"log",
".",
"debug",
"(",
"\"Getting page count for {!r}\"",
",",
"filename",
")",
"require",
"(",
"PDFTK",
",",
"HELP_MISSING_PDFTK",
")",
"stdout",
",",
"_",
"=",
"run",
"(",
"[",
"PDFTK",
",",
"filename",
",",
"\"dump_data\"",
"]",
",",
"get_output",
"=",
"True",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"r\"^NumberOfPages: (\\d+)$\"",
",",
"re",
".",
"MULTILINE",
")",
"m",
"=",
"regex",
".",
"search",
"(",
"stdout",
")",
"if",
"m",
":",
"return",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"raise",
"ValueError",
"(",
"\"Can't get PDF page count for: {!r}\"",
".",
"format",
"(",
"filename",
")",
")"
] | How many pages are in a PDF? | [
"How",
"many",
"pages",
"are",
"in",
"a",
"PDF?"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L221-L232 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | make_blank_pdf | def make_blank_pdf(filename: str, paper: str = "A4") -> None:
"""
NOT USED.
Makes a blank single-page PDF, using ImageMagick's ``convert``.
"""
# https://unix.stackexchange.com/questions/277892/how-do-i-create-a-blank-pdf-from-the-command-line # noqa
require(CONVERT, HELP_MISSING_IMAGEMAGICK)
run([CONVERT, "xc:none", "-page", paper, filename]) | python | def make_blank_pdf(filename: str, paper: str = "A4") -> None:
"""
NOT USED.
Makes a blank single-page PDF, using ImageMagick's ``convert``.
"""
# https://unix.stackexchange.com/questions/277892/how-do-i-create-a-blank-pdf-from-the-command-line # noqa
require(CONVERT, HELP_MISSING_IMAGEMAGICK)
run([CONVERT, "xc:none", "-page", paper, filename]) | [
"def",
"make_blank_pdf",
"(",
"filename",
":",
"str",
",",
"paper",
":",
"str",
"=",
"\"A4\"",
")",
"->",
"None",
":",
"# https://unix.stackexchange.com/questions/277892/how-do-i-create-a-blank-pdf-from-the-command-line # noqa",
"require",
"(",
"CONVERT",
",",
"HELP_MISSING_IMAGEMAGICK",
")",
"run",
"(",
"[",
"CONVERT",
",",
"\"xc:none\"",
",",
"\"-page\"",
",",
"paper",
",",
"filename",
"]",
")"
] | NOT USED.
Makes a blank single-page PDF, using ImageMagick's ``convert``. | [
"NOT",
"USED",
".",
"Makes",
"a",
"blank",
"single",
"-",
"page",
"PDF",
"using",
"ImageMagick",
"s",
"convert",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L235-L242 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | slice_pdf | def slice_pdf(input_filename: str, output_filename: str,
slice_horiz: int, slice_vert: int) -> str:
"""
Slice each page of the original, to convert to "one real page per PDF
page". Return the output filename.
"""
if slice_horiz == 1 and slice_vert == 1:
log.debug("No slicing required")
return input_filename # nothing to do
log.info("Slicing each source page mv into {} horizontally x {} vertically",
slice_horiz, slice_vert)
log.debug("... from {!r} to {!r}", input_filename, output_filename)
require(MUTOOL, HELP_MISSING_MUTOOL)
run([
MUTOOL,
"poster",
"-x", str(slice_horiz),
"-y", str(slice_vert),
input_filename,
output_filename
])
return output_filename | python | def slice_pdf(input_filename: str, output_filename: str,
slice_horiz: int, slice_vert: int) -> str:
"""
Slice each page of the original, to convert to "one real page per PDF
page". Return the output filename.
"""
if slice_horiz == 1 and slice_vert == 1:
log.debug("No slicing required")
return input_filename # nothing to do
log.info("Slicing each source page mv into {} horizontally x {} vertically",
slice_horiz, slice_vert)
log.debug("... from {!r} to {!r}", input_filename, output_filename)
require(MUTOOL, HELP_MISSING_MUTOOL)
run([
MUTOOL,
"poster",
"-x", str(slice_horiz),
"-y", str(slice_vert),
input_filename,
output_filename
])
return output_filename | [
"def",
"slice_pdf",
"(",
"input_filename",
":",
"str",
",",
"output_filename",
":",
"str",
",",
"slice_horiz",
":",
"int",
",",
"slice_vert",
":",
"int",
")",
"->",
"str",
":",
"if",
"slice_horiz",
"==",
"1",
"and",
"slice_vert",
"==",
"1",
":",
"log",
".",
"debug",
"(",
"\"No slicing required\"",
")",
"return",
"input_filename",
"# nothing to do",
"log",
".",
"info",
"(",
"\"Slicing each source page mv into {} horizontally x {} vertically\"",
",",
"slice_horiz",
",",
"slice_vert",
")",
"log",
".",
"debug",
"(",
"\"... from {!r} to {!r}\"",
",",
"input_filename",
",",
"output_filename",
")",
"require",
"(",
"MUTOOL",
",",
"HELP_MISSING_MUTOOL",
")",
"run",
"(",
"[",
"MUTOOL",
",",
"\"poster\"",
",",
"\"-x\"",
",",
"str",
"(",
"slice_horiz",
")",
",",
"\"-y\"",
",",
"str",
"(",
"slice_vert",
")",
",",
"input_filename",
",",
"output_filename",
"]",
")",
"return",
"output_filename"
] | Slice each page of the original, to convert to "one real page per PDF
page". Return the output filename. | [
"Slice",
"each",
"page",
"of",
"the",
"original",
"to",
"convert",
"to",
"one",
"real",
"page",
"per",
"PDF",
"page",
".",
"Return",
"the",
"output",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L245-L266 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | booklet_nup_pdf | def booklet_nup_pdf(input_filename: str, output_filename: str,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> str:
"""
Takes a PDF (e.g. A4) and makes a 2x1 booklet (e.g. 2xA5 per A4).
The booklet can be folded like a book and the final pages will be in order.
Returns the output filename.
"""
log.info("Creating booklet")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFJAM, HELP_MISSING_PDFJAM)
n_pages = get_page_count(input_filename)
n_sheets = calc_n_sheets(n_pages)
log.debug("{} pages => {} sheets", n_pages, n_sheets)
pagenums = page_sequence(n_sheets, one_based=True)
pagespeclist = [str(p) if p <= n_pages else "{}"
for p in pagenums]
# ... switches empty pages to "{}", which is pdfjam notation for
# an empty page.
pagespec = ",".join(pagespeclist)
pdfjam_tidy = True # clean up after yourself?
args = [
PDFJAM,
"--paper", latex_paper_size,
"--landscape",
"--nup", "2x1",
"--keepinfo", # e.g. author information
"--outfile", output_filename,
"--tidy" if pdfjam_tidy else "--no-tidy",
"--", # "no more options"
input_filename, pagespec
]
run(args)
return output_filename | python | def booklet_nup_pdf(input_filename: str, output_filename: str,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> str:
"""
Takes a PDF (e.g. A4) and makes a 2x1 booklet (e.g. 2xA5 per A4).
The booklet can be folded like a book and the final pages will be in order.
Returns the output filename.
"""
log.info("Creating booklet")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFJAM, HELP_MISSING_PDFJAM)
n_pages = get_page_count(input_filename)
n_sheets = calc_n_sheets(n_pages)
log.debug("{} pages => {} sheets", n_pages, n_sheets)
pagenums = page_sequence(n_sheets, one_based=True)
pagespeclist = [str(p) if p <= n_pages else "{}"
for p in pagenums]
# ... switches empty pages to "{}", which is pdfjam notation for
# an empty page.
pagespec = ",".join(pagespeclist)
pdfjam_tidy = True # clean up after yourself?
args = [
PDFJAM,
"--paper", latex_paper_size,
"--landscape",
"--nup", "2x1",
"--keepinfo", # e.g. author information
"--outfile", output_filename,
"--tidy" if pdfjam_tidy else "--no-tidy",
"--", # "no more options"
input_filename, pagespec
]
run(args)
return output_filename | [
"def",
"booklet_nup_pdf",
"(",
"input_filename",
":",
"str",
",",
"output_filename",
":",
"str",
",",
"latex_paper_size",
":",
"str",
"=",
"LATEX_PAPER_SIZE_A4",
")",
"->",
"str",
":",
"log",
".",
"info",
"(",
"\"Creating booklet\"",
")",
"log",
".",
"debug",
"(",
"\"... {!r} -> {!r}\"",
",",
"input_filename",
",",
"output_filename",
")",
"require",
"(",
"PDFJAM",
",",
"HELP_MISSING_PDFJAM",
")",
"n_pages",
"=",
"get_page_count",
"(",
"input_filename",
")",
"n_sheets",
"=",
"calc_n_sheets",
"(",
"n_pages",
")",
"log",
".",
"debug",
"(",
"\"{} pages => {} sheets\"",
",",
"n_pages",
",",
"n_sheets",
")",
"pagenums",
"=",
"page_sequence",
"(",
"n_sheets",
",",
"one_based",
"=",
"True",
")",
"pagespeclist",
"=",
"[",
"str",
"(",
"p",
")",
"if",
"p",
"<=",
"n_pages",
"else",
"\"{}\"",
"for",
"p",
"in",
"pagenums",
"]",
"# ... switches empty pages to \"{}\", which is pdfjam notation for",
"# an empty page.",
"pagespec",
"=",
"\",\"",
".",
"join",
"(",
"pagespeclist",
")",
"pdfjam_tidy",
"=",
"True",
"# clean up after yourself?",
"args",
"=",
"[",
"PDFJAM",
",",
"\"--paper\"",
",",
"latex_paper_size",
",",
"\"--landscape\"",
",",
"\"--nup\"",
",",
"\"2x1\"",
",",
"\"--keepinfo\"",
",",
"# e.g. author information",
"\"--outfile\"",
",",
"output_filename",
",",
"\"--tidy\"",
"if",
"pdfjam_tidy",
"else",
"\"--no-tidy\"",
",",
"\"--\"",
",",
"# \"no more options\"",
"input_filename",
",",
"pagespec",
"]",
"run",
"(",
"args",
")",
"return",
"output_filename"
] | Takes a PDF (e.g. A4) and makes a 2x1 booklet (e.g. 2xA5 per A4).
The booklet can be folded like a book and the final pages will be in order.
Returns the output filename. | [
"Takes",
"a",
"PDF",
"(",
"e",
".",
"g",
".",
"A4",
")",
"and",
"makes",
"a",
"2x1",
"booklet",
"(",
"e",
".",
"g",
".",
"2xA5",
"per",
"A4",
")",
".",
"The",
"booklet",
"can",
"be",
"folded",
"like",
"a",
"book",
"and",
"the",
"final",
"pages",
"will",
"be",
"in",
"order",
".",
"Returns",
"the",
"output",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L269-L301 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | rotate_even_pages_180 | def rotate_even_pages_180(input_filename: str, output_filename: str) -> str:
"""
Rotates even-numbered pages 180 degrees.
Returns the output filename.
"""
log.info("Rotating even-numbered pages 180 degrees for long-edge "
"duplex printing")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFTK, HELP_MISSING_PDFTK)
args = [
PDFTK,
"A=" + input_filename, # give it handle 'A'
# handles are one or more UPPER CASE letters
"shuffle",
"Aoddnorth", # for 'A', keep odd pages as they are
"Aevensouth", # for 'A', rotate even pages 180 degrees
"output", output_filename,
]
run(args)
return output_filename | python | def rotate_even_pages_180(input_filename: str, output_filename: str) -> str:
"""
Rotates even-numbered pages 180 degrees.
Returns the output filename.
"""
log.info("Rotating even-numbered pages 180 degrees for long-edge "
"duplex printing")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFTK, HELP_MISSING_PDFTK)
args = [
PDFTK,
"A=" + input_filename, # give it handle 'A'
# handles are one or more UPPER CASE letters
"shuffle",
"Aoddnorth", # for 'A', keep odd pages as they are
"Aevensouth", # for 'A', rotate even pages 180 degrees
"output", output_filename,
]
run(args)
return output_filename | [
"def",
"rotate_even_pages_180",
"(",
"input_filename",
":",
"str",
",",
"output_filename",
":",
"str",
")",
"->",
"str",
":",
"log",
".",
"info",
"(",
"\"Rotating even-numbered pages 180 degrees for long-edge \"",
"\"duplex printing\"",
")",
"log",
".",
"debug",
"(",
"\"... {!r} -> {!r}\"",
",",
"input_filename",
",",
"output_filename",
")",
"require",
"(",
"PDFTK",
",",
"HELP_MISSING_PDFTK",
")",
"args",
"=",
"[",
"PDFTK",
",",
"\"A=\"",
"+",
"input_filename",
",",
"# give it handle 'A'",
"# handles are one or more UPPER CASE letters",
"\"shuffle\"",
",",
"\"Aoddnorth\"",
",",
"# for 'A', keep odd pages as they are",
"\"Aevensouth\"",
",",
"# for 'A', rotate even pages 180 degrees",
"\"output\"",
",",
"output_filename",
",",
"]",
"run",
"(",
"args",
")",
"return",
"output_filename"
] | Rotates even-numbered pages 180 degrees.
Returns the output filename. | [
"Rotates",
"even",
"-",
"numbered",
"pages",
"180",
"degrees",
".",
"Returns",
"the",
"output",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L304-L323 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | convert_to_foldable | def convert_to_foldable(input_filename: str,
output_filename: str,
slice_horiz: int,
slice_vert: int,
overwrite: bool = False,
longedge: bool = False,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> bool:
"""
Runs a chain of tasks to convert a PDF to a useful booklet PDF.
"""
if not os.path.isfile(input_filename):
log.warning("Input file does not exist or is not a file")
return False
if not overwrite and os.path.isfile(output_filename):
log.error("Output file exists; not authorized to overwrite (use "
"--overwrite if you are sure)")
return False
log.info("Processing {!r}", input_filename)
with tempfile.TemporaryDirectory() as tmpdir:
log.debug("Using temporary directory {!r}", tmpdir)
intermediate_num = 0
def make_intermediate() -> str:
nonlocal intermediate_num
intermediate_num += 1
return os.path.join(tmpdir,
"intermediate_{}.pdf".format(intermediate_num))
# Run this as a chain, rewriting input_filename at each step:
# Slice, if necessary.
input_filename = slice_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
slice_horiz=slice_horiz,
slice_vert=slice_vert
)
# Make the final n-up
input_filename = booklet_nup_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
latex_paper_size=latex_paper_size
)
# Rotate?
if longedge:
input_filename = rotate_even_pages_180(
input_filename=input_filename,
output_filename=make_intermediate(),
)
# Done.
log.info("Writing to {!r}", output_filename)
shutil.move(input_filename, output_filename)
return True | python | def convert_to_foldable(input_filename: str,
output_filename: str,
slice_horiz: int,
slice_vert: int,
overwrite: bool = False,
longedge: bool = False,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> bool:
"""
Runs a chain of tasks to convert a PDF to a useful booklet PDF.
"""
if not os.path.isfile(input_filename):
log.warning("Input file does not exist or is not a file")
return False
if not overwrite and os.path.isfile(output_filename):
log.error("Output file exists; not authorized to overwrite (use "
"--overwrite if you are sure)")
return False
log.info("Processing {!r}", input_filename)
with tempfile.TemporaryDirectory() as tmpdir:
log.debug("Using temporary directory {!r}", tmpdir)
intermediate_num = 0
def make_intermediate() -> str:
nonlocal intermediate_num
intermediate_num += 1
return os.path.join(tmpdir,
"intermediate_{}.pdf".format(intermediate_num))
# Run this as a chain, rewriting input_filename at each step:
# Slice, if necessary.
input_filename = slice_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
slice_horiz=slice_horiz,
slice_vert=slice_vert
)
# Make the final n-up
input_filename = booklet_nup_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
latex_paper_size=latex_paper_size
)
# Rotate?
if longedge:
input_filename = rotate_even_pages_180(
input_filename=input_filename,
output_filename=make_intermediate(),
)
# Done.
log.info("Writing to {!r}", output_filename)
shutil.move(input_filename, output_filename)
return True | [
"def",
"convert_to_foldable",
"(",
"input_filename",
":",
"str",
",",
"output_filename",
":",
"str",
",",
"slice_horiz",
":",
"int",
",",
"slice_vert",
":",
"int",
",",
"overwrite",
":",
"bool",
"=",
"False",
",",
"longedge",
":",
"bool",
"=",
"False",
",",
"latex_paper_size",
":",
"str",
"=",
"LATEX_PAPER_SIZE_A4",
")",
"->",
"bool",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"input_filename",
")",
":",
"log",
".",
"warning",
"(",
"\"Input file does not exist or is not a file\"",
")",
"return",
"False",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"output_filename",
")",
":",
"log",
".",
"error",
"(",
"\"Output file exists; not authorized to overwrite (use \"",
"\"--overwrite if you are sure)\"",
")",
"return",
"False",
"log",
".",
"info",
"(",
"\"Processing {!r}\"",
",",
"input_filename",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"tmpdir",
":",
"log",
".",
"debug",
"(",
"\"Using temporary directory {!r}\"",
",",
"tmpdir",
")",
"intermediate_num",
"=",
"0",
"def",
"make_intermediate",
"(",
")",
"->",
"str",
":",
"nonlocal",
"intermediate_num",
"intermediate_num",
"+=",
"1",
"return",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"\"intermediate_{}.pdf\"",
".",
"format",
"(",
"intermediate_num",
")",
")",
"# Run this as a chain, rewriting input_filename at each step:",
"# Slice, if necessary.",
"input_filename",
"=",
"slice_pdf",
"(",
"input_filename",
"=",
"input_filename",
",",
"output_filename",
"=",
"make_intermediate",
"(",
")",
",",
"slice_horiz",
"=",
"slice_horiz",
",",
"slice_vert",
"=",
"slice_vert",
")",
"# Make the final n-up",
"input_filename",
"=",
"booklet_nup_pdf",
"(",
"input_filename",
"=",
"input_filename",
",",
"output_filename",
"=",
"make_intermediate",
"(",
")",
",",
"latex_paper_size",
"=",
"latex_paper_size",
")",
"# Rotate?",
"if",
"longedge",
":",
"input_filename",
"=",
"rotate_even_pages_180",
"(",
"input_filename",
"=",
"input_filename",
",",
"output_filename",
"=",
"make_intermediate",
"(",
")",
",",
")",
"# Done.",
"log",
".",
"info",
"(",
"\"Writing to {!r}\"",
",",
"output_filename",
")",
"shutil",
".",
"move",
"(",
"input_filename",
",",
"output_filename",
")",
"return",
"True"
] | Runs a chain of tasks to convert a PDF to a useful booklet PDF. | [
"Runs",
"a",
"chain",
"of",
"tasks",
"to",
"convert",
"a",
"PDF",
"to",
"a",
"useful",
"booklet",
"PDF",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L326-L377 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/pdf_to_booklet.py | main | def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_file",
help="Input PDF (which is not modified by this program)")
parser.add_argument(
"output_file",
help="Output PDF")
parser.add_argument(
"--slice_horiz", type=int, default=1,
help="Slice the input PDF first into this many parts horizontally")
parser.add_argument(
"--slice_vert", type=int, default=1,
help="Slice the input PDF first into this many parts vertically")
parser.add_argument(
"--longedge", action="store_true",
help="Create PDF for long-edge duplex printing, not short edge")
parser.add_argument(
"--overwrite", action="store_true",
help="Allow overwriting of an existing output file")
parser.add_argument(
"--unittest", action="store_true",
help="Run unit tests and exit (you must pass dummy values for "
"input/output files to use these tests)")
# ... because requiring dummy input/output filenames for unit testing
# is less confusing for the majority of users than showing syntax in
# which they are optional!
args = parser.parse_args()
if args.unittest:
log.warning("Performing unit tests")
# unittest.main() doesn't play nicely with argparse; they both
# use sys.argv by default (and we end up with what looks like garbage
# from the argparse help facility); but this works:
unittest.main(argv=[sys.argv[0]])
sys.exit(EXIT_SUCCESS)
success = convert_to_foldable(
input_filename=os.path.abspath(args.input_file),
output_filename=os.path.abspath(args.output_file),
slice_horiz=args.slice_horiz,
slice_vert=args.slice_vert,
overwrite=args.overwrite,
longedge=args.longedge
)
sys.exit(EXIT_SUCCESS if success else EXIT_FAILURE) | python | def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_file",
help="Input PDF (which is not modified by this program)")
parser.add_argument(
"output_file",
help="Output PDF")
parser.add_argument(
"--slice_horiz", type=int, default=1,
help="Slice the input PDF first into this many parts horizontally")
parser.add_argument(
"--slice_vert", type=int, default=1,
help="Slice the input PDF first into this many parts vertically")
parser.add_argument(
"--longedge", action="store_true",
help="Create PDF for long-edge duplex printing, not short edge")
parser.add_argument(
"--overwrite", action="store_true",
help="Allow overwriting of an existing output file")
parser.add_argument(
"--unittest", action="store_true",
help="Run unit tests and exit (you must pass dummy values for "
"input/output files to use these tests)")
# ... because requiring dummy input/output filenames for unit testing
# is less confusing for the majority of users than showing syntax in
# which they are optional!
args = parser.parse_args()
if args.unittest:
log.warning("Performing unit tests")
# unittest.main() doesn't play nicely with argparse; they both
# use sys.argv by default (and we end up with what looks like garbage
# from the argparse help facility); but this works:
unittest.main(argv=[sys.argv[0]])
sys.exit(EXIT_SUCCESS)
success = convert_to_foldable(
input_filename=os.path.abspath(args.input_file),
output_filename=os.path.abspath(args.output_file),
slice_horiz=args.slice_horiz,
slice_vert=args.slice_vert,
overwrite=args.overwrite,
longedge=args.longedge
)
sys.exit(EXIT_SUCCESS if success else EXIT_FAILURE) | [
"def",
"main",
"(",
")",
"->",
"None",
":",
"main_only_quicksetup_rootlogger",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"\"input_file\"",
",",
"help",
"=",
"\"Input PDF (which is not modified by this program)\"",
")",
"parser",
".",
"add_argument",
"(",
"\"output_file\"",
",",
"help",
"=",
"\"Output PDF\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--slice_horiz\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"Slice the input PDF first into this many parts horizontally\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--slice_vert\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"Slice the input PDF first into this many parts vertically\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--longedge\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Create PDF for long-edge duplex printing, not short edge\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--overwrite\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Allow overwriting of an existing output file\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--unittest\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Run unit tests and exit (you must pass dummy values for \"",
"\"input/output files to use these tests)\"",
")",
"# ... because requiring dummy input/output filenames for unit testing",
"# is less confusing for the majority of users than showing syntax in",
"# which they are optional!",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"unittest",
":",
"log",
".",
"warning",
"(",
"\"Performing unit tests\"",
")",
"# unittest.main() doesn't play nicely with argparse; they both",
"# use sys.argv by default (and we end up with what looks like garbage",
"# from the argparse help facility); but this works:",
"unittest",
".",
"main",
"(",
"argv",
"=",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
"]",
")",
"sys",
".",
"exit",
"(",
"EXIT_SUCCESS",
")",
"success",
"=",
"convert_to_foldable",
"(",
"input_filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"input_file",
")",
",",
"output_filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"output_file",
")",
",",
"slice_horiz",
"=",
"args",
".",
"slice_horiz",
",",
"slice_vert",
"=",
"args",
".",
"slice_vert",
",",
"overwrite",
"=",
"args",
".",
"overwrite",
",",
"longedge",
"=",
"args",
".",
"longedge",
")",
"sys",
".",
"exit",
"(",
"EXIT_SUCCESS",
"if",
"success",
"else",
"EXIT_FAILURE",
")"
] | Command-line processor. See ``--help`` for details. | [
"Command",
"-",
"line",
"processor",
".",
"See",
"--",
"help",
"for",
"details",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/pdf_to_booklet.py#L397-L448 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/estimate_mysql_memory_usage.py | get_mysql_vars | def get_mysql_vars(mysql: str,
host: str,
port: int,
user: str) -> Dict[str, str]:
"""
Asks MySQL for its variables and status.
Args:
mysql: ``mysql`` executable filename
host: host name
port: TCP/IP port number
user: username
Returns:
dictionary of MySQL variables/values
"""
cmdargs = [
mysql,
"-h", host,
"-P", str(port),
"-e", "SHOW VARIABLES; SHOW STATUS",
"-u", user,
"-p" # prompt for password
]
log.info("Connecting to MySQL with user: {}", user)
log.debug(cmdargs)
process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE)
out, err = process.communicate()
lines = out.decode("utf8").splitlines()
mysqlvars = {}
for line in lines:
var, val = line.split("\t")
mysqlvars[var] = val
return mysqlvars | python | def get_mysql_vars(mysql: str,
host: str,
port: int,
user: str) -> Dict[str, str]:
"""
Asks MySQL for its variables and status.
Args:
mysql: ``mysql`` executable filename
host: host name
port: TCP/IP port number
user: username
Returns:
dictionary of MySQL variables/values
"""
cmdargs = [
mysql,
"-h", host,
"-P", str(port),
"-e", "SHOW VARIABLES; SHOW STATUS",
"-u", user,
"-p" # prompt for password
]
log.info("Connecting to MySQL with user: {}", user)
log.debug(cmdargs)
process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE)
out, err = process.communicate()
lines = out.decode("utf8").splitlines()
mysqlvars = {}
for line in lines:
var, val = line.split("\t")
mysqlvars[var] = val
return mysqlvars | [
"def",
"get_mysql_vars",
"(",
"mysql",
":",
"str",
",",
"host",
":",
"str",
",",
"port",
":",
"int",
",",
"user",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"cmdargs",
"=",
"[",
"mysql",
",",
"\"-h\"",
",",
"host",
",",
"\"-P\"",
",",
"str",
"(",
"port",
")",
",",
"\"-e\"",
",",
"\"SHOW VARIABLES; SHOW STATUS\"",
",",
"\"-u\"",
",",
"user",
",",
"\"-p\"",
"# prompt for password",
"]",
"log",
".",
"info",
"(",
"\"Connecting to MySQL with user: {}\"",
",",
"user",
")",
"log",
".",
"debug",
"(",
"cmdargs",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmdargs",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"process",
".",
"communicate",
"(",
")",
"lines",
"=",
"out",
".",
"decode",
"(",
"\"utf8\"",
")",
".",
"splitlines",
"(",
")",
"mysqlvars",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"var",
",",
"val",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"mysqlvars",
"[",
"var",
"]",
"=",
"val",
"return",
"mysqlvars"
] | Asks MySQL for its variables and status.
Args:
mysql: ``mysql`` executable filename
host: host name
port: TCP/IP port number
user: username
Returns:
dictionary of MySQL variables/values | [
"Asks",
"MySQL",
"for",
"its",
"variables",
"and",
"status",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/estimate_mysql_memory_usage.py#L54-L88 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/estimate_mysql_memory_usage.py | val_mb | def val_mb(valstr: Union[int, str]) -> str:
"""
Converts a value in bytes (in string format) to megabytes.
"""
try:
return "{:.3f}".format(int(valstr) / (1024 * 1024))
except (TypeError, ValueError):
return '?' | python | def val_mb(valstr: Union[int, str]) -> str:
"""
Converts a value in bytes (in string format) to megabytes.
"""
try:
return "{:.3f}".format(int(valstr) / (1024 * 1024))
except (TypeError, ValueError):
return '?' | [
"def",
"val_mb",
"(",
"valstr",
":",
"Union",
"[",
"int",
",",
"str",
"]",
")",
"->",
"str",
":",
"try",
":",
"return",
"\"{:.3f}\"",
".",
"format",
"(",
"int",
"(",
"valstr",
")",
"/",
"(",
"1024",
"*",
"1024",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"'?'"
] | Converts a value in bytes (in string format) to megabytes. | [
"Converts",
"a",
"value",
"in",
"bytes",
"(",
"in",
"string",
"format",
")",
"to",
"megabytes",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/estimate_mysql_memory_usage.py#L91-L98 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/estimate_mysql_memory_usage.py | add_var_mb | def add_var_mb(table: PrettyTable,
vardict: Dict[str, str],
varname: str) -> None:
"""
Adds a row to ``table`` for ``varname``, in megabytes.
"""
valstr = vardict.get(varname, None)
table.add_row([varname, val_mb(valstr), UNITS_MB]) | python | def add_var_mb(table: PrettyTable,
vardict: Dict[str, str],
varname: str) -> None:
"""
Adds a row to ``table`` for ``varname``, in megabytes.
"""
valstr = vardict.get(varname, None)
table.add_row([varname, val_mb(valstr), UNITS_MB]) | [
"def",
"add_var_mb",
"(",
"table",
":",
"PrettyTable",
",",
"vardict",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"varname",
":",
"str",
")",
"->",
"None",
":",
"valstr",
"=",
"vardict",
".",
"get",
"(",
"varname",
",",
"None",
")",
"table",
".",
"add_row",
"(",
"[",
"varname",
",",
"val_mb",
"(",
"valstr",
")",
",",
"UNITS_MB",
"]",
")"
] | Adds a row to ``table`` for ``varname``, in megabytes. | [
"Adds",
"a",
"row",
"to",
"table",
"for",
"varname",
"in",
"megabytes",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/estimate_mysql_memory_usage.py#L108-L115 |
RudolfCardinal/pythonlib | cardinal_pythonlib/tools/estimate_mysql_memory_usage.py | main | def main():
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument(
"--mysql", default="mysql",
help="MySQL program (default=mysql)")
parser.add_argument(
"--host", default="127.0.0.1",
help="MySQL server/host (prefer '127.0.0.1' to 'localhost')")
parser.add_argument(
"--port", type=int, default=MYSQL_DEFAULT_PORT,
help="MySQL port (default={})".format(MYSQL_DEFAULT_PORT))
parser.add_argument(
"--user", default=MYSQL_DEFAULT_USER,
help="MySQL user (default={})".format(MYSQL_DEFAULT_USER))
args = parser.parse_args()
vardict = get_mysql_vars(
mysql=args.mysql,
host=args.host,
port=args.port,
user=args.user,
)
max_conn = int(vardict["max_connections"])
max_used_conn = int(vardict["Max_used_connections"])
base_mem = (
int(vardict["key_buffer_size"]) +
int(vardict["query_cache_size"]) +
int(vardict["innodb_buffer_pool_size"]) +
# int(vardict["innodb_additional_mem_pool_size"]) +
int(vardict["innodb_log_buffer_size"])
)
mem_per_conn = (
int(vardict["read_buffer_size"]) +
int(vardict["read_rnd_buffer_size"]) +
int(vardict["sort_buffer_size"]) +
int(vardict["join_buffer_size"]) +
int(vardict["binlog_cache_size"]) +
int(vardict["thread_stack"]) +
int(vardict["tmp_table_size"])
)
mem_total_min = base_mem + mem_per_conn * max_used_conn
mem_total_max = base_mem + mem_per_conn * max_conn
table = PrettyTable(["Variable", "Value", "Units"])
table.align["Variable"] = "l"
table.align["Value"] = "r"
table.align["Units"] = "l"
add_var_mb(table, vardict, "key_buffer_size")
add_var_mb(table, vardict, "query_cache_size")
add_var_mb(table, vardict, "innodb_buffer_pool_size")
# print_var_mb(table, vardict, "innodb_additional_mem_pool_size")
add_var_mb(table, vardict, "innodb_log_buffer_size")
add_blank_row(table)
table.add_row(["BASE MEMORY", val_mb(base_mem), UNITS_MB])
add_blank_row(table)
add_var_mb(table, vardict, "sort_buffer_size")
add_var_mb(table, vardict, "read_buffer_size")
add_var_mb(table, vardict, "read_rnd_buffer_size")
add_var_mb(table, vardict, "join_buffer_size")
add_var_mb(table, vardict, "thread_stack")
add_var_mb(table, vardict, "binlog_cache_size")
add_var_mb(table, vardict, "tmp_table_size")
add_blank_row(table)
table.add_row(["MEMORY PER CONNECTION", val_mb(mem_per_conn), UNITS_MB])
add_blank_row(table)
table.add_row(["Max_used_connections", val_int(max_used_conn), ''])
table.add_row(["max_connections", val_int(max_conn), ''])
add_blank_row(table)
table.add_row(["TOTAL (MIN)", val_mb(mem_total_min), UNITS_MB])
table.add_row(["TOTAL (MAX)", val_mb(mem_total_max), UNITS_MB])
print(table.get_string()) | python | def main():
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument(
"--mysql", default="mysql",
help="MySQL program (default=mysql)")
parser.add_argument(
"--host", default="127.0.0.1",
help="MySQL server/host (prefer '127.0.0.1' to 'localhost')")
parser.add_argument(
"--port", type=int, default=MYSQL_DEFAULT_PORT,
help="MySQL port (default={})".format(MYSQL_DEFAULT_PORT))
parser.add_argument(
"--user", default=MYSQL_DEFAULT_USER,
help="MySQL user (default={})".format(MYSQL_DEFAULT_USER))
args = parser.parse_args()
vardict = get_mysql_vars(
mysql=args.mysql,
host=args.host,
port=args.port,
user=args.user,
)
max_conn = int(vardict["max_connections"])
max_used_conn = int(vardict["Max_used_connections"])
base_mem = (
int(vardict["key_buffer_size"]) +
int(vardict["query_cache_size"]) +
int(vardict["innodb_buffer_pool_size"]) +
# int(vardict["innodb_additional_mem_pool_size"]) +
int(vardict["innodb_log_buffer_size"])
)
mem_per_conn = (
int(vardict["read_buffer_size"]) +
int(vardict["read_rnd_buffer_size"]) +
int(vardict["sort_buffer_size"]) +
int(vardict["join_buffer_size"]) +
int(vardict["binlog_cache_size"]) +
int(vardict["thread_stack"]) +
int(vardict["tmp_table_size"])
)
mem_total_min = base_mem + mem_per_conn * max_used_conn
mem_total_max = base_mem + mem_per_conn * max_conn
table = PrettyTable(["Variable", "Value", "Units"])
table.align["Variable"] = "l"
table.align["Value"] = "r"
table.align["Units"] = "l"
add_var_mb(table, vardict, "key_buffer_size")
add_var_mb(table, vardict, "query_cache_size")
add_var_mb(table, vardict, "innodb_buffer_pool_size")
# print_var_mb(table, vardict, "innodb_additional_mem_pool_size")
add_var_mb(table, vardict, "innodb_log_buffer_size")
add_blank_row(table)
table.add_row(["BASE MEMORY", val_mb(base_mem), UNITS_MB])
add_blank_row(table)
add_var_mb(table, vardict, "sort_buffer_size")
add_var_mb(table, vardict, "read_buffer_size")
add_var_mb(table, vardict, "read_rnd_buffer_size")
add_var_mb(table, vardict, "join_buffer_size")
add_var_mb(table, vardict, "thread_stack")
add_var_mb(table, vardict, "binlog_cache_size")
add_var_mb(table, vardict, "tmp_table_size")
add_blank_row(table)
table.add_row(["MEMORY PER CONNECTION", val_mb(mem_per_conn), UNITS_MB])
add_blank_row(table)
table.add_row(["Max_used_connections", val_int(max_used_conn), ''])
table.add_row(["max_connections", val_int(max_conn), ''])
add_blank_row(table)
table.add_row(["TOTAL (MIN)", val_mb(mem_total_min), UNITS_MB])
table.add_row(["TOTAL (MAX)", val_mb(mem_total_max), UNITS_MB])
print(table.get_string()) | [
"def",
"main",
"(",
")",
":",
"main_only_quicksetup_rootlogger",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--mysql\"",
",",
"default",
"=",
"\"mysql\"",
",",
"help",
"=",
"\"MySQL program (default=mysql)\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--host\"",
",",
"default",
"=",
"\"127.0.0.1\"",
",",
"help",
"=",
"\"MySQL server/host (prefer '127.0.0.1' to 'localhost')\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--port\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"MYSQL_DEFAULT_PORT",
",",
"help",
"=",
"\"MySQL port (default={})\"",
".",
"format",
"(",
"MYSQL_DEFAULT_PORT",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"--user\"",
",",
"default",
"=",
"MYSQL_DEFAULT_USER",
",",
"help",
"=",
"\"MySQL user (default={})\"",
".",
"format",
"(",
"MYSQL_DEFAULT_USER",
")",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"vardict",
"=",
"get_mysql_vars",
"(",
"mysql",
"=",
"args",
".",
"mysql",
",",
"host",
"=",
"args",
".",
"host",
",",
"port",
"=",
"args",
".",
"port",
",",
"user",
"=",
"args",
".",
"user",
",",
")",
"max_conn",
"=",
"int",
"(",
"vardict",
"[",
"\"max_connections\"",
"]",
")",
"max_used_conn",
"=",
"int",
"(",
"vardict",
"[",
"\"Max_used_connections\"",
"]",
")",
"base_mem",
"=",
"(",
"int",
"(",
"vardict",
"[",
"\"key_buffer_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"query_cache_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"innodb_buffer_pool_size\"",
"]",
")",
"+",
"# int(vardict[\"innodb_additional_mem_pool_size\"]) +",
"int",
"(",
"vardict",
"[",
"\"innodb_log_buffer_size\"",
"]",
")",
")",
"mem_per_conn",
"=",
"(",
"int",
"(",
"vardict",
"[",
"\"read_buffer_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"read_rnd_buffer_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"sort_buffer_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"join_buffer_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"binlog_cache_size\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"thread_stack\"",
"]",
")",
"+",
"int",
"(",
"vardict",
"[",
"\"tmp_table_size\"",
"]",
")",
")",
"mem_total_min",
"=",
"base_mem",
"+",
"mem_per_conn",
"*",
"max_used_conn",
"mem_total_max",
"=",
"base_mem",
"+",
"mem_per_conn",
"*",
"max_conn",
"table",
"=",
"PrettyTable",
"(",
"[",
"\"Variable\"",
",",
"\"Value\"",
",",
"\"Units\"",
"]",
")",
"table",
".",
"align",
"[",
"\"Variable\"",
"]",
"=",
"\"l\"",
"table",
".",
"align",
"[",
"\"Value\"",
"]",
"=",
"\"r\"",
"table",
".",
"align",
"[",
"\"Units\"",
"]",
"=",
"\"l\"",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"key_buffer_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"query_cache_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"innodb_buffer_pool_size\"",
")",
"# print_var_mb(table, vardict, \"innodb_additional_mem_pool_size\")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"innodb_log_buffer_size\"",
")",
"add_blank_row",
"(",
"table",
")",
"table",
".",
"add_row",
"(",
"[",
"\"BASE MEMORY\"",
",",
"val_mb",
"(",
"base_mem",
")",
",",
"UNITS_MB",
"]",
")",
"add_blank_row",
"(",
"table",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"sort_buffer_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"read_buffer_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"read_rnd_buffer_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"join_buffer_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"thread_stack\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"binlog_cache_size\"",
")",
"add_var_mb",
"(",
"table",
",",
"vardict",
",",
"\"tmp_table_size\"",
")",
"add_blank_row",
"(",
"table",
")",
"table",
".",
"add_row",
"(",
"[",
"\"MEMORY PER CONNECTION\"",
",",
"val_mb",
"(",
"mem_per_conn",
")",
",",
"UNITS_MB",
"]",
")",
"add_blank_row",
"(",
"table",
")",
"table",
".",
"add_row",
"(",
"[",
"\"Max_used_connections\"",
",",
"val_int",
"(",
"max_used_conn",
")",
",",
"''",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"\"max_connections\"",
",",
"val_int",
"(",
"max_conn",
")",
",",
"''",
"]",
")",
"add_blank_row",
"(",
"table",
")",
"table",
".",
"add_row",
"(",
"[",
"\"TOTAL (MIN)\"",
",",
"val_mb",
"(",
"mem_total_min",
")",
",",
"UNITS_MB",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"\"TOTAL (MAX)\"",
",",
"val_mb",
"(",
"mem_total_max",
")",
",",
"UNITS_MB",
"]",
")",
"print",
"(",
"table",
".",
"get_string",
"(",
")",
")"
] | Command-line processor. See ``--help`` for details. | [
"Command",
"-",
"line",
"processor",
".",
"See",
"--",
"help",
"for",
"details",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/estimate_mysql_memory_usage.py#L125-L201 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/grep_in_openxml.py | report_hit_filename | def report_hit_filename(zipfilename: str, contentsfilename: str,
show_inner_file: bool) -> None:
"""
For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename
and the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
show_inner_file: if ``True``, show both; if ``False``, show just the
``.zip`` filename
Returns:
"""
if show_inner_file:
print("{} [{}]".format(zipfilename, contentsfilename))
else:
print(zipfilename) | python | def report_hit_filename(zipfilename: str, contentsfilename: str,
show_inner_file: bool) -> None:
"""
For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename
and the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
show_inner_file: if ``True``, show both; if ``False``, show just the
``.zip`` filename
Returns:
"""
if show_inner_file:
print("{} [{}]".format(zipfilename, contentsfilename))
else:
print(zipfilename) | [
"def",
"report_hit_filename",
"(",
"zipfilename",
":",
"str",
",",
"contentsfilename",
":",
"str",
",",
"show_inner_file",
":",
"bool",
")",
"->",
"None",
":",
"if",
"show_inner_file",
":",
"print",
"(",
"\"{} [{}]\"",
".",
"format",
"(",
"zipfilename",
",",
"contentsfilename",
")",
")",
"else",
":",
"print",
"(",
"zipfilename",
")"
] | For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename
and the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
show_inner_file: if ``True``, show both; if ``False``, show just the
``.zip`` filename
Returns: | [
"For",
"hits",
":",
"prints",
"either",
"the",
".",
"zip",
"filename",
"or",
"the",
".",
"zip",
"filename",
"and",
"the",
"inner",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/grep_in_openxml.py#L57-L75 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/grep_in_openxml.py | report_line | def report_line(zipfilename: str, contentsfilename: str, line: str,
show_inner_file: bool) -> None:
"""
Prints a line from a file, with the ``.zip`` filename and optionally also
the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
line: the line from the inner file
show_inner_file: if ``True``, show both filenames; if ``False``, show
just the ``.zip`` filename
"""
if show_inner_file:
print("{} [{}]: {}".format(zipfilename, contentsfilename, line))
else:
print("{}: {}".format(zipfilename, line)) | python | def report_line(zipfilename: str, contentsfilename: str, line: str,
show_inner_file: bool) -> None:
"""
Prints a line from a file, with the ``.zip`` filename and optionally also
the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
line: the line from the inner file
show_inner_file: if ``True``, show both filenames; if ``False``, show
just the ``.zip`` filename
"""
if show_inner_file:
print("{} [{}]: {}".format(zipfilename, contentsfilename, line))
else:
print("{}: {}".format(zipfilename, line)) | [
"def",
"report_line",
"(",
"zipfilename",
":",
"str",
",",
"contentsfilename",
":",
"str",
",",
"line",
":",
"str",
",",
"show_inner_file",
":",
"bool",
")",
"->",
"None",
":",
"if",
"show_inner_file",
":",
"print",
"(",
"\"{} [{}]: {}\"",
".",
"format",
"(",
"zipfilename",
",",
"contentsfilename",
",",
"line",
")",
")",
"else",
":",
"print",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"zipfilename",
",",
"line",
")",
")"
] | Prints a line from a file, with the ``.zip`` filename and optionally also
the inner filename.
Args:
zipfilename: filename of the ``.zip`` file
contentsfilename: filename of the inner file
line: the line from the inner file
show_inner_file: if ``True``, show both filenames; if ``False``, show
just the ``.zip`` filename | [
"Prints",
"a",
"line",
"from",
"a",
"file",
"with",
"the",
".",
"zip",
"filename",
"and",
"optionally",
"also",
"the",
"inner",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/grep_in_openxml.py#L85-L101 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/grep_in_openxml.py | parse_zip | def parse_zip(zipfilename: str,
regex: Pattern,
invert_match: bool,
files_with_matches: bool,
files_without_match: bool,
grep_inner_file_name: bool,
show_inner_file: bool) -> None:
"""
Implement a "grep within an OpenXML file" for a single OpenXML file, which
is by definition a ``.zip`` file.
Args:
zipfilename: name of the OpenXML (zip) file
regex: regular expression to match
invert_match: find files that do NOT match, instead of ones that do?
files_with_matches: show filenames of files with a match?
files_without_match: show filenames of files with no match?
grep_inner_file_name: search the names of "inner" files, rather than
their contents?
show_inner_file: show the names of the "inner" files, not just the
"outer" (OpenXML) file?
"""
assert not (files_without_match and files_with_matches)
report_lines = (not files_without_match) and (not files_with_matches)
report_hit_lines = report_lines and not invert_match
report_miss_lines = report_lines and invert_match
log.debug("Checking ZIP: " + zipfilename)
found_in_zip = False
try:
with ZipFile(zipfilename, 'r') as zf:
for contentsfilename in zf.namelist():
log.debug("... checking file: " + contentsfilename)
if grep_inner_file_name:
found_in_filename = bool(regex.search(contentsfilename))
found_in_zip = found_in_zip or found_in_filename
if files_with_matches and found_in_zip:
report_hit_filename(zipfilename, contentsfilename,
show_inner_file)
return
if ((report_hit_lines and found_in_filename) or
(report_miss_lines and not found_in_filename)):
report_line(zipfilename, contentsfilename,
contentsfilename, show_inner_file)
else:
try:
with zf.open(contentsfilename, 'r') as file:
try:
for line in file.readlines():
# log.debug("line: {!r}", line)
found_in_line = bool(regex.search(line))
found_in_zip = found_in_zip or found_in_line
if files_with_matches and found_in_zip:
report_hit_filename(zipfilename,
contentsfilename,
show_inner_file)
return
if ((report_hit_lines and found_in_line) or
(report_miss_lines and
not found_in_line)):
report_line(zipfilename,
contentsfilename,
line, show_inner_file)
except EOFError:
pass
except RuntimeError as e:
log.warning(
"RuntimeError whilst processing {} [{}]: probably "
"encrypted contents; error was {!r}",
zipfilename, contentsfilename, e)
except (zlib.error, BadZipFile) as e:
log.debug("Invalid zip: {}; error was {!r}", zipfilename, e)
if files_without_match and not found_in_zip:
report_miss_filename(zipfilename) | python | def parse_zip(zipfilename: str,
regex: Pattern,
invert_match: bool,
files_with_matches: bool,
files_without_match: bool,
grep_inner_file_name: bool,
show_inner_file: bool) -> None:
"""
Implement a "grep within an OpenXML file" for a single OpenXML file, which
is by definition a ``.zip`` file.
Args:
zipfilename: name of the OpenXML (zip) file
regex: regular expression to match
invert_match: find files that do NOT match, instead of ones that do?
files_with_matches: show filenames of files with a match?
files_without_match: show filenames of files with no match?
grep_inner_file_name: search the names of "inner" files, rather than
their contents?
show_inner_file: show the names of the "inner" files, not just the
"outer" (OpenXML) file?
"""
assert not (files_without_match and files_with_matches)
report_lines = (not files_without_match) and (not files_with_matches)
report_hit_lines = report_lines and not invert_match
report_miss_lines = report_lines and invert_match
log.debug("Checking ZIP: " + zipfilename)
found_in_zip = False
try:
with ZipFile(zipfilename, 'r') as zf:
for contentsfilename in zf.namelist():
log.debug("... checking file: " + contentsfilename)
if grep_inner_file_name:
found_in_filename = bool(regex.search(contentsfilename))
found_in_zip = found_in_zip or found_in_filename
if files_with_matches and found_in_zip:
report_hit_filename(zipfilename, contentsfilename,
show_inner_file)
return
if ((report_hit_lines and found_in_filename) or
(report_miss_lines and not found_in_filename)):
report_line(zipfilename, contentsfilename,
contentsfilename, show_inner_file)
else:
try:
with zf.open(contentsfilename, 'r') as file:
try:
for line in file.readlines():
# log.debug("line: {!r}", line)
found_in_line = bool(regex.search(line))
found_in_zip = found_in_zip or found_in_line
if files_with_matches and found_in_zip:
report_hit_filename(zipfilename,
contentsfilename,
show_inner_file)
return
if ((report_hit_lines and found_in_line) or
(report_miss_lines and
not found_in_line)):
report_line(zipfilename,
contentsfilename,
line, show_inner_file)
except EOFError:
pass
except RuntimeError as e:
log.warning(
"RuntimeError whilst processing {} [{}]: probably "
"encrypted contents; error was {!r}",
zipfilename, contentsfilename, e)
except (zlib.error, BadZipFile) as e:
log.debug("Invalid zip: {}; error was {!r}", zipfilename, e)
if files_without_match and not found_in_zip:
report_miss_filename(zipfilename) | [
"def",
"parse_zip",
"(",
"zipfilename",
":",
"str",
",",
"regex",
":",
"Pattern",
",",
"invert_match",
":",
"bool",
",",
"files_with_matches",
":",
"bool",
",",
"files_without_match",
":",
"bool",
",",
"grep_inner_file_name",
":",
"bool",
",",
"show_inner_file",
":",
"bool",
")",
"->",
"None",
":",
"assert",
"not",
"(",
"files_without_match",
"and",
"files_with_matches",
")",
"report_lines",
"=",
"(",
"not",
"files_without_match",
")",
"and",
"(",
"not",
"files_with_matches",
")",
"report_hit_lines",
"=",
"report_lines",
"and",
"not",
"invert_match",
"report_miss_lines",
"=",
"report_lines",
"and",
"invert_match",
"log",
".",
"debug",
"(",
"\"Checking ZIP: \"",
"+",
"zipfilename",
")",
"found_in_zip",
"=",
"False",
"try",
":",
"with",
"ZipFile",
"(",
"zipfilename",
",",
"'r'",
")",
"as",
"zf",
":",
"for",
"contentsfilename",
"in",
"zf",
".",
"namelist",
"(",
")",
":",
"log",
".",
"debug",
"(",
"\"... checking file: \"",
"+",
"contentsfilename",
")",
"if",
"grep_inner_file_name",
":",
"found_in_filename",
"=",
"bool",
"(",
"regex",
".",
"search",
"(",
"contentsfilename",
")",
")",
"found_in_zip",
"=",
"found_in_zip",
"or",
"found_in_filename",
"if",
"files_with_matches",
"and",
"found_in_zip",
":",
"report_hit_filename",
"(",
"zipfilename",
",",
"contentsfilename",
",",
"show_inner_file",
")",
"return",
"if",
"(",
"(",
"report_hit_lines",
"and",
"found_in_filename",
")",
"or",
"(",
"report_miss_lines",
"and",
"not",
"found_in_filename",
")",
")",
":",
"report_line",
"(",
"zipfilename",
",",
"contentsfilename",
",",
"contentsfilename",
",",
"show_inner_file",
")",
"else",
":",
"try",
":",
"with",
"zf",
".",
"open",
"(",
"contentsfilename",
",",
"'r'",
")",
"as",
"file",
":",
"try",
":",
"for",
"line",
"in",
"file",
".",
"readlines",
"(",
")",
":",
"# log.debug(\"line: {!r}\", line)",
"found_in_line",
"=",
"bool",
"(",
"regex",
".",
"search",
"(",
"line",
")",
")",
"found_in_zip",
"=",
"found_in_zip",
"or",
"found_in_line",
"if",
"files_with_matches",
"and",
"found_in_zip",
":",
"report_hit_filename",
"(",
"zipfilename",
",",
"contentsfilename",
",",
"show_inner_file",
")",
"return",
"if",
"(",
"(",
"report_hit_lines",
"and",
"found_in_line",
")",
"or",
"(",
"report_miss_lines",
"and",
"not",
"found_in_line",
")",
")",
":",
"report_line",
"(",
"zipfilename",
",",
"contentsfilename",
",",
"line",
",",
"show_inner_file",
")",
"except",
"EOFError",
":",
"pass",
"except",
"RuntimeError",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"\"RuntimeError whilst processing {} [{}]: probably \"",
"\"encrypted contents; error was {!r}\"",
",",
"zipfilename",
",",
"contentsfilename",
",",
"e",
")",
"except",
"(",
"zlib",
".",
"error",
",",
"BadZipFile",
")",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"\"Invalid zip: {}; error was {!r}\"",
",",
"zipfilename",
",",
"e",
")",
"if",
"files_without_match",
"and",
"not",
"found_in_zip",
":",
"report_miss_filename",
"(",
"zipfilename",
")"
] | Implement a "grep within an OpenXML file" for a single OpenXML file, which
is by definition a ``.zip`` file.
Args:
zipfilename: name of the OpenXML (zip) file
regex: regular expression to match
invert_match: find files that do NOT match, instead of ones that do?
files_with_matches: show filenames of files with a match?
files_without_match: show filenames of files with no match?
grep_inner_file_name: search the names of "inner" files, rather than
their contents?
show_inner_file: show the names of the "inner" files, not just the
"outer" (OpenXML) file? | [
"Implement",
"a",
"grep",
"within",
"an",
"OpenXML",
"file",
"for",
"a",
"single",
"OpenXML",
"file",
"which",
"is",
"by",
"definition",
"a",
".",
"zip",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/grep_in_openxml.py#L104-L176 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/grep_in_openxml.py | main | def main() -> None:
"""
Command-line handler for the ``grep_in_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Performs a grep (global-regular-expression-print) search of files in OpenXML
format, which is to say inside ZIP files.
Note that you can chain; for example, to search for OpenXML files containing
both "armadillo" and "bonobo", you can do:
grep_in_openxml -l armadillo *.pptx | grep_in_openxml -x -l bonobo
^^ ^^
print filenames read filenames from stdin
"""
)
parser.add_argument(
"pattern",
help="Regular expression pattern to apply."
)
parser.add_argument(
"filename", nargs="*",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--filenames_from_stdin", "-x", action="store_true",
help="Take filenames from stdin instead, one line per filename "
"(useful for chained grep)."
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
# Flag abbreviations to match grep:
parser.add_argument(
"--ignore_case", "-i", action="store_true",
help="Ignore case"
)
parser.add_argument(
"--invert_match", "-v", action="store_true",
help="Invert match"
)
parser.add_argument(
"--files_with_matches", "-l", action="store_true",
help="Show filenames of files with matches"
)
parser.add_argument(
"--files_without_match", "-L", action="store_true",
help="Show filenames of files with no match"
)
parser.add_argument(
"--grep_inner_file_name", action="store_true",
help="Search the NAMES of the inner files, not their contents."
)
parser.add_argument(
"--show_inner_file", action="store_true",
help="For hits, show the filenames of inner files, within each ZIP."
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
if args.files_with_matches and args.files_without_match:
raise ValueError("Can't specify both --files_with_matches (-l) and "
"--files_without_match (-L)!")
if bool(args.filenames_from_stdin) == bool(args.filename):
raise ValueError("Specify --filenames_from_stdin or filenames on the "
"command line, but not both")
# Compile regular expression
if args.grep_inner_file_name:
final_pattern = args.pattern
else:
encoding = getdefaultencoding()
final_pattern = args.pattern.encode(encoding)
flags = re.IGNORECASE if args.ignore_case else 0
log.debug("Using regular expression {!r} with flags {!r}",
final_pattern, flags)
regex = re.compile(final_pattern, flags)
# Set up pool for parallel processing
pool = multiprocessing.Pool(processes=args.nprocesses)
# Iterate through files
parse_kwargs = dict(
regex=regex,
invert_match=args.invert_match,
files_with_matches=args.files_with_matches,
files_without_match=args.files_without_match,
grep_inner_file_name=args.grep_inner_file_name,
show_inner_file=args.show_inner_file
)
if args.filenames_from_stdin:
for line in stdin.readlines():
zipfilename = line.strip()
parallel_kwargs = {'zipfilename': zipfilename}
parallel_kwargs.update(**parse_kwargs)
pool.apply_async(parse_zip, [], parallel_kwargs)
else:
for zipfilename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
parallel_kwargs = {'zipfilename': zipfilename}
parallel_kwargs.update(**parse_kwargs)
pool.apply_async(parse_zip, [], parallel_kwargs)
pool.close()
pool.join() | python | def main() -> None:
"""
Command-line handler for the ``grep_in_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Performs a grep (global-regular-expression-print) search of files in OpenXML
format, which is to say inside ZIP files.
Note that you can chain; for example, to search for OpenXML files containing
both "armadillo" and "bonobo", you can do:
grep_in_openxml -l armadillo *.pptx | grep_in_openxml -x -l bonobo
^^ ^^
print filenames read filenames from stdin
"""
)
parser.add_argument(
"pattern",
help="Regular expression pattern to apply."
)
parser.add_argument(
"filename", nargs="*",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--filenames_from_stdin", "-x", action="store_true",
help="Take filenames from stdin instead, one line per filename "
"(useful for chained grep)."
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
# Flag abbreviations to match grep:
parser.add_argument(
"--ignore_case", "-i", action="store_true",
help="Ignore case"
)
parser.add_argument(
"--invert_match", "-v", action="store_true",
help="Invert match"
)
parser.add_argument(
"--files_with_matches", "-l", action="store_true",
help="Show filenames of files with matches"
)
parser.add_argument(
"--files_without_match", "-L", action="store_true",
help="Show filenames of files with no match"
)
parser.add_argument(
"--grep_inner_file_name", action="store_true",
help="Search the NAMES of the inner files, not their contents."
)
parser.add_argument(
"--show_inner_file", action="store_true",
help="For hits, show the filenames of inner files, within each ZIP."
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
if args.files_with_matches and args.files_without_match:
raise ValueError("Can't specify both --files_with_matches (-l) and "
"--files_without_match (-L)!")
if bool(args.filenames_from_stdin) == bool(args.filename):
raise ValueError("Specify --filenames_from_stdin or filenames on the "
"command line, but not both")
# Compile regular expression
if args.grep_inner_file_name:
final_pattern = args.pattern
else:
encoding = getdefaultencoding()
final_pattern = args.pattern.encode(encoding)
flags = re.IGNORECASE if args.ignore_case else 0
log.debug("Using regular expression {!r} with flags {!r}",
final_pattern, flags)
regex = re.compile(final_pattern, flags)
# Set up pool for parallel processing
pool = multiprocessing.Pool(processes=args.nprocesses)
# Iterate through files
parse_kwargs = dict(
regex=regex,
invert_match=args.invert_match,
files_with_matches=args.files_with_matches,
files_without_match=args.files_without_match,
grep_inner_file_name=args.grep_inner_file_name,
show_inner_file=args.show_inner_file
)
if args.filenames_from_stdin:
for line in stdin.readlines():
zipfilename = line.strip()
parallel_kwargs = {'zipfilename': zipfilename}
parallel_kwargs.update(**parse_kwargs)
pool.apply_async(parse_zip, [], parallel_kwargs)
else:
for zipfilename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
parallel_kwargs = {'zipfilename': zipfilename}
parallel_kwargs.update(**parse_kwargs)
pool.apply_async(parse_zip, [], parallel_kwargs)
pool.close()
pool.join() | [
"def",
"main",
"(",
")",
"->",
"None",
":",
"parser",
"=",
"ArgumentParser",
"(",
"formatter_class",
"=",
"RawDescriptionHelpFormatter",
",",
"description",
"=",
"\"\"\"\nPerforms a grep (global-regular-expression-print) search of files in OpenXML\nformat, which is to say inside ZIP files.\n\nNote that you can chain; for example, to search for OpenXML files containing\nboth \"armadillo\" and \"bonobo\", you can do:\n\n grep_in_openxml -l armadillo *.pptx | grep_in_openxml -x -l bonobo\n ^^ ^^\n print filenames read filenames from stdin\n\n\"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"\"pattern\"",
",",
"help",
"=",
"\"Regular expression pattern to apply.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"filename\"",
",",
"nargs",
"=",
"\"*\"",
",",
"help",
"=",
"\"File(s) to check. You can also specify directores if you use \"",
"\"--recursive\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--filenames_from_stdin\"",
",",
"\"-x\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Take filenames from stdin instead, one line per filename \"",
"\"(useful for chained grep).\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--recursive\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Allow search to descend recursively into any directories \"",
"\"encountered.\"",
")",
"# Flag abbreviations to match grep:",
"parser",
".",
"add_argument",
"(",
"\"--ignore_case\"",
",",
"\"-i\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Ignore case\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--invert_match\"",
",",
"\"-v\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Invert match\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--files_with_matches\"",
",",
"\"-l\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Show filenames of files with matches\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--files_without_match\"",
",",
"\"-L\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Show filenames of files with no match\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--grep_inner_file_name\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Search the NAMES of the inner files, not their contents.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--show_inner_file\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"For hits, show the filenames of inner files, within each ZIP.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--nprocesses\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
",",
"help",
"=",
"\"Specify the number of processes to run in parallel.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Verbose output\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"main_only_quicksetup_rootlogger",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
"if",
"args",
".",
"verbose",
"else",
"logging",
".",
"INFO",
")",
"if",
"args",
".",
"files_with_matches",
"and",
"args",
".",
"files_without_match",
":",
"raise",
"ValueError",
"(",
"\"Can't specify both --files_with_matches (-l) and \"",
"\"--files_without_match (-L)!\"",
")",
"if",
"bool",
"(",
"args",
".",
"filenames_from_stdin",
")",
"==",
"bool",
"(",
"args",
".",
"filename",
")",
":",
"raise",
"ValueError",
"(",
"\"Specify --filenames_from_stdin or filenames on the \"",
"\"command line, but not both\"",
")",
"# Compile regular expression",
"if",
"args",
".",
"grep_inner_file_name",
":",
"final_pattern",
"=",
"args",
".",
"pattern",
"else",
":",
"encoding",
"=",
"getdefaultencoding",
"(",
")",
"final_pattern",
"=",
"args",
".",
"pattern",
".",
"encode",
"(",
"encoding",
")",
"flags",
"=",
"re",
".",
"IGNORECASE",
"if",
"args",
".",
"ignore_case",
"else",
"0",
"log",
".",
"debug",
"(",
"\"Using regular expression {!r} with flags {!r}\"",
",",
"final_pattern",
",",
"flags",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"final_pattern",
",",
"flags",
")",
"# Set up pool for parallel processing",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"args",
".",
"nprocesses",
")",
"# Iterate through files",
"parse_kwargs",
"=",
"dict",
"(",
"regex",
"=",
"regex",
",",
"invert_match",
"=",
"args",
".",
"invert_match",
",",
"files_with_matches",
"=",
"args",
".",
"files_with_matches",
",",
"files_without_match",
"=",
"args",
".",
"files_without_match",
",",
"grep_inner_file_name",
"=",
"args",
".",
"grep_inner_file_name",
",",
"show_inner_file",
"=",
"args",
".",
"show_inner_file",
")",
"if",
"args",
".",
"filenames_from_stdin",
":",
"for",
"line",
"in",
"stdin",
".",
"readlines",
"(",
")",
":",
"zipfilename",
"=",
"line",
".",
"strip",
"(",
")",
"parallel_kwargs",
"=",
"{",
"'zipfilename'",
":",
"zipfilename",
"}",
"parallel_kwargs",
".",
"update",
"(",
"*",
"*",
"parse_kwargs",
")",
"pool",
".",
"apply_async",
"(",
"parse_zip",
",",
"[",
"]",
",",
"parallel_kwargs",
")",
"else",
":",
"for",
"zipfilename",
"in",
"gen_filenames",
"(",
"starting_filenames",
"=",
"args",
".",
"filename",
",",
"recursive",
"=",
"args",
".",
"recursive",
")",
":",
"parallel_kwargs",
"=",
"{",
"'zipfilename'",
":",
"zipfilename",
"}",
"parallel_kwargs",
".",
"update",
"(",
"*",
"*",
"parse_kwargs",
")",
"pool",
".",
"apply_async",
"(",
"parse_zip",
",",
"[",
"]",
",",
"parallel_kwargs",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] | Command-line handler for the ``grep_in_openxml`` tool.
Use the ``--help`` option for help. | [
"Command",
"-",
"line",
"handler",
"for",
"the",
"grep_in_openxml",
"tool",
".",
"Use",
"the",
"--",
"help",
"option",
"for",
"help",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/grep_in_openxml.py#L179-L297 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/timeline.py | drug_timelines | def drug_timelines(
drug_events_df: DataFrame,
event_lasts_for: datetime.timedelta,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME) \
-> Dict[Any, IntervalList]:
"""
Takes a set of drug event start times (one or more per patient), plus a
fixed time that each event is presumed to last for, and returns an
:class:`IntervalList` for each patient representing the set of events
(which may overlap, in which case they will be amalgamated).
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data
event_lasts_for:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
patient_colname:
name of the column in ``drug_events_df`` containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
Returns:
dict: mapping patient ID to a :class:`IntervalList` object indicating
the amalgamated intervals from the events
"""
sourcecolnum_pt = drug_events_df.columns.get_loc(patient_colname)
sourcecolnum_when = drug_events_df.columns.get_loc(event_datetime_colname)
timelines = defaultdict(IntervalList)
nrows = len(drug_events_df)
for rowidx in range(nrows):
patient_id = drug_events_df.iat[rowidx, sourcecolnum_pt]
event_when = drug_events_df.iat[rowidx, sourcecolnum_when]
interval = Interval(event_when, event_when + event_lasts_for)
ivlist = timelines[patient_id] # will create if unknown
ivlist.add(interval)
return timelines | python | def drug_timelines(
drug_events_df: DataFrame,
event_lasts_for: datetime.timedelta,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME) \
-> Dict[Any, IntervalList]:
"""
Takes a set of drug event start times (one or more per patient), plus a
fixed time that each event is presumed to last for, and returns an
:class:`IntervalList` for each patient representing the set of events
(which may overlap, in which case they will be amalgamated).
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data
event_lasts_for:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
patient_colname:
name of the column in ``drug_events_df`` containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
Returns:
dict: mapping patient ID to a :class:`IntervalList` object indicating
the amalgamated intervals from the events
"""
sourcecolnum_pt = drug_events_df.columns.get_loc(patient_colname)
sourcecolnum_when = drug_events_df.columns.get_loc(event_datetime_colname)
timelines = defaultdict(IntervalList)
nrows = len(drug_events_df)
for rowidx in range(nrows):
patient_id = drug_events_df.iat[rowidx, sourcecolnum_pt]
event_when = drug_events_df.iat[rowidx, sourcecolnum_when]
interval = Interval(event_when, event_when + event_lasts_for)
ivlist = timelines[patient_id] # will create if unknown
ivlist.add(interval)
return timelines | [
"def",
"drug_timelines",
"(",
"drug_events_df",
":",
"DataFrame",
",",
"event_lasts_for",
":",
"datetime",
".",
"timedelta",
",",
"patient_colname",
":",
"str",
"=",
"DEFAULT_PATIENT_COLNAME",
",",
"event_datetime_colname",
":",
"str",
"=",
"DEFAULT_DRUG_EVENT_DATETIME_COLNAME",
")",
"->",
"Dict",
"[",
"Any",
",",
"IntervalList",
"]",
":",
"sourcecolnum_pt",
"=",
"drug_events_df",
".",
"columns",
".",
"get_loc",
"(",
"patient_colname",
")",
"sourcecolnum_when",
"=",
"drug_events_df",
".",
"columns",
".",
"get_loc",
"(",
"event_datetime_colname",
")",
"timelines",
"=",
"defaultdict",
"(",
"IntervalList",
")",
"nrows",
"=",
"len",
"(",
"drug_events_df",
")",
"for",
"rowidx",
"in",
"range",
"(",
"nrows",
")",
":",
"patient_id",
"=",
"drug_events_df",
".",
"iat",
"[",
"rowidx",
",",
"sourcecolnum_pt",
"]",
"event_when",
"=",
"drug_events_df",
".",
"iat",
"[",
"rowidx",
",",
"sourcecolnum_when",
"]",
"interval",
"=",
"Interval",
"(",
"event_when",
",",
"event_when",
"+",
"event_lasts_for",
")",
"ivlist",
"=",
"timelines",
"[",
"patient_id",
"]",
"# will create if unknown\r",
"ivlist",
".",
"add",
"(",
"interval",
")",
"return",
"timelines"
] | Takes a set of drug event start times (one or more per patient), plus a
fixed time that each event is presumed to last for, and returns an
:class:`IntervalList` for each patient representing the set of events
(which may overlap, in which case they will be amalgamated).
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data
event_lasts_for:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
patient_colname:
name of the column in ``drug_events_df`` containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
Returns:
dict: mapping patient ID to a :class:`IntervalList` object indicating
the amalgamated intervals from the events | [
"Takes",
"a",
"set",
"of",
"drug",
"event",
"start",
"times",
"(",
"one",
"or",
"more",
"per",
"patient",
")",
"plus",
"a",
"fixed",
"time",
"that",
"each",
"event",
"is",
"presumed",
"to",
"last",
"for",
"and",
"returns",
"an",
":",
"class",
":",
"IntervalList",
"for",
"each",
"patient",
"representing",
"the",
"set",
"of",
"events",
"(",
"which",
"may",
"overlap",
"in",
"which",
"case",
"they",
"will",
"be",
"amalgamated",
")",
".",
"Args",
":",
"drug_events_df",
":",
"pandas",
":",
"class",
":",
"DataFrame",
"containing",
"the",
"event",
"data",
"event_lasts_for",
":",
"when",
"an",
"event",
"occurs",
"how",
"long",
"is",
"it",
"assumed",
"to",
"last",
"for?",
"For",
"example",
"if",
"a",
"prescription",
"of",
"lithium",
"occurs",
"on",
"2001",
"-",
"01",
"-",
"01",
"how",
"long",
"is",
"the",
"patient",
"presumed",
"to",
"be",
"taking",
"lithium",
"as",
"a",
"consequence",
"(",
"e",
".",
"g",
".",
"1",
"day?",
"28",
"days?",
"6",
"months?",
")",
"patient_colname",
":",
"name",
"of",
"the",
"column",
"in",
"drug_events_df",
"containing",
"the",
"patient",
"ID",
"event_datetime_colname",
":",
"name",
"of",
"the",
"column",
"in",
"drug_events_df",
"containing",
"the",
"date",
"/",
"time",
"of",
"each",
"event",
"Returns",
":",
"dict",
":",
"mapping",
"patient",
"ID",
"to",
"a",
":",
"class",
":",
"IntervalList",
"object",
"indicating",
"the",
"amalgamated",
"intervals",
"from",
"the",
"events"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/timeline.py#L252-L293 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/timeline.py | cumulative_time_on_drug | def cumulative_time_on_drug(
drug_events_df: DataFrame,
query_times_df: DataFrame,
event_lasts_for_timedelta: datetime.timedelta = None,
event_lasts_for_quantity: float = None,
event_lasts_for_units: str = None,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME,
start_colname: str = DEFAULT_START_DATETIME_COLNAME,
when_colname: str = DEFAULT_QUERY_DATETIME_COLNAME,
include_timedelta_in_output: bool = False,
debug: bool = False) \
-> DataFrame:
"""
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data, with columns
named according to ``patient_colname``, ``event_datetime_colname``
event_lasts_for_timedelta:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
event_lasts_for_quantity:
as an alternative to ``event_lasts_for_timedelta``, particularly if
you are calling from R to Python via ``reticulate`` (which doesn't
convert R ``as.difftime()`` to Python ``datetime.timedelta``), you
can specify ``event_lasts_for_quantity``, a number and
``event_lasts_for_units`` (q.v.).
event_lasts_for_units:
specify the units for ``event_lasts_for_quantity`` (q.v.), if used;
e.g. ``"days"``. The string value must be the name of an argument
to the Python ``datetime.timedelta`` constructor.
query_times_df:
times to query for, with columns named according to
``patient_colname``, ``start_colname``, and ``when_colname``
patient_colname:
name of the column in ``drug_events_df`` and ``query_time_df``
containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
start_colname:
name of the column in ``query_time_df`` containing the date/time
representing the overall start time for the relevant patient (from
which cumulative times are calculated)
when_colname:
name of the column in ``query_time_df`` containing date/time
values at which to query
include_timedelta_in_output:
include ``datetime.timedelta`` values in the output? The default is
``False`` as this isn't supported by R/``reticulate``.
debug:
print debugging information to the log?
Returns:
:class:`DataFrame` with the requested data
"""
if event_lasts_for_timedelta is None:
assert event_lasts_for_quantity and event_lasts_for_units
timedelta_dict = {event_lasts_for_units: event_lasts_for_quantity}
event_lasts_for_timedelta = datetime.timedelta(**timedelta_dict)
if debug:
log.critical("drug_events_df:\n{!r}", drug_events_df)
log.critical("event_lasts_for:\n{!r}", event_lasts_for_timedelta)
log.critical("query_times_df:\n{!r}", query_times_df)
timelines = drug_timelines(
drug_events_df=drug_events_df,
event_lasts_for=event_lasts_for_timedelta,
patient_colname=patient_colname,
event_datetime_colname=event_datetime_colname,
)
query_nrow = len(query_times_df)
ct_coldefs = [ # column definitions:
(RCN_PATIENT_ID, DTYPE_STRING),
(RCN_START, DTYPE_DATETIME),
(RCN_TIME, DTYPE_DATETIME),
(RCN_BEFORE_DAYS, DTYPE_FLOAT),
(RCN_DURING_DAYS, DTYPE_FLOAT),
(RCN_AFTER_DAYS, DTYPE_FLOAT),
]
if include_timedelta_in_output:
ct_coldefs.extend([
(RCN_BEFORE_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_DURING_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_AFTER_TIMEDELTA, DTYPE_TIMEDELTA),
])
ct_arr = array([None] * query_nrow, dtype=ct_coldefs)
# log.debug("ct_arr:\n{!r}", ct_arr)
cumulative_times = DataFrame(ct_arr, index=list(range(query_nrow)))
# log.debug("cumulative_times:\n{!r}", cumulative_times)
# So we can use the fast "iat" function.
sourcecolnum_pt = query_times_df.columns.get_loc(patient_colname)
sourcecolnum_start = query_times_df.columns.get_loc(start_colname)
sourcecolnum_when = query_times_df.columns.get_loc(when_colname)
dest_colnum_pt = cumulative_times.columns.get_loc(RCN_PATIENT_ID)
dest_colnum_start = cumulative_times.columns.get_loc(RCN_START)
dest_colnum_t = cumulative_times.columns.get_loc(RCN_TIME)
dest_colnum_before_days = cumulative_times.columns.get_loc(RCN_BEFORE_DAYS)
dest_colnum_during_days = cumulative_times.columns.get_loc(RCN_DURING_DAYS)
dest_colnum_after_days = cumulative_times.columns.get_loc(RCN_AFTER_DAYS)
if include_timedelta_in_output:
dest_colnum_before_dt = cumulative_times.columns.get_loc(RCN_BEFORE_TIMEDELTA) # noqa
dest_colnum_during_dt = cumulative_times.columns.get_loc(RCN_DURING_TIMEDELTA) # noqa
dest_colnum_after_dt = cumulative_times.columns.get_loc(RCN_AFTER_TIMEDELTA) # noqa
else:
# for type checker
dest_colnum_before_dt = 0
dest_colnum_during_dt = 0
dest_colnum_after_dt = 0
for rowidx in range(query_nrow):
patient_id = query_times_df.iat[rowidx, sourcecolnum_pt]
start = query_times_df.iat[rowidx, sourcecolnum_start]
when = query_times_df.iat[rowidx, sourcecolnum_when]
ivlist = timelines[patient_id]
# log.critical("ivlist: {!r}", ivlist)
before, during, after = ivlist.cumulative_before_during_after(start,
when)
# log.critical(
# "{!r}.cumulative_before_during_after(start={!r}, when={!r}) "
# "-> {!r}, {!r}, {!r}",
# ivlist, start, when,
# before, during, after
# )
cumulative_times.iat[rowidx, dest_colnum_pt] = patient_id
cumulative_times.iat[rowidx, dest_colnum_start] = start
cumulative_times.iat[rowidx, dest_colnum_t] = when
cumulative_times.iat[rowidx, dest_colnum_before_days] = before.days
cumulative_times.iat[rowidx, dest_colnum_during_days] = during.days
cumulative_times.iat[rowidx, dest_colnum_after_days] = after.days
if include_timedelta_in_output:
cumulative_times.iat[rowidx, dest_colnum_before_dt] = before
cumulative_times.iat[rowidx, dest_colnum_during_dt] = during
cumulative_times.iat[rowidx, dest_colnum_after_dt] = after
return cumulative_times | python | def cumulative_time_on_drug(
drug_events_df: DataFrame,
query_times_df: DataFrame,
event_lasts_for_timedelta: datetime.timedelta = None,
event_lasts_for_quantity: float = None,
event_lasts_for_units: str = None,
patient_colname: str = DEFAULT_PATIENT_COLNAME,
event_datetime_colname: str = DEFAULT_DRUG_EVENT_DATETIME_COLNAME,
start_colname: str = DEFAULT_START_DATETIME_COLNAME,
when_colname: str = DEFAULT_QUERY_DATETIME_COLNAME,
include_timedelta_in_output: bool = False,
debug: bool = False) \
-> DataFrame:
"""
Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data, with columns
named according to ``patient_colname``, ``event_datetime_colname``
event_lasts_for_timedelta:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
event_lasts_for_quantity:
as an alternative to ``event_lasts_for_timedelta``, particularly if
you are calling from R to Python via ``reticulate`` (which doesn't
convert R ``as.difftime()`` to Python ``datetime.timedelta``), you
can specify ``event_lasts_for_quantity``, a number and
``event_lasts_for_units`` (q.v.).
event_lasts_for_units:
specify the units for ``event_lasts_for_quantity`` (q.v.), if used;
e.g. ``"days"``. The string value must be the name of an argument
to the Python ``datetime.timedelta`` constructor.
query_times_df:
times to query for, with columns named according to
``patient_colname``, ``start_colname``, and ``when_colname``
patient_colname:
name of the column in ``drug_events_df`` and ``query_time_df``
containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
start_colname:
name of the column in ``query_time_df`` containing the date/time
representing the overall start time for the relevant patient (from
which cumulative times are calculated)
when_colname:
name of the column in ``query_time_df`` containing date/time
values at which to query
include_timedelta_in_output:
include ``datetime.timedelta`` values in the output? The default is
``False`` as this isn't supported by R/``reticulate``.
debug:
print debugging information to the log?
Returns:
:class:`DataFrame` with the requested data
"""
if event_lasts_for_timedelta is None:
assert event_lasts_for_quantity and event_lasts_for_units
timedelta_dict = {event_lasts_for_units: event_lasts_for_quantity}
event_lasts_for_timedelta = datetime.timedelta(**timedelta_dict)
if debug:
log.critical("drug_events_df:\n{!r}", drug_events_df)
log.critical("event_lasts_for:\n{!r}", event_lasts_for_timedelta)
log.critical("query_times_df:\n{!r}", query_times_df)
timelines = drug_timelines(
drug_events_df=drug_events_df,
event_lasts_for=event_lasts_for_timedelta,
patient_colname=patient_colname,
event_datetime_colname=event_datetime_colname,
)
query_nrow = len(query_times_df)
ct_coldefs = [ # column definitions:
(RCN_PATIENT_ID, DTYPE_STRING),
(RCN_START, DTYPE_DATETIME),
(RCN_TIME, DTYPE_DATETIME),
(RCN_BEFORE_DAYS, DTYPE_FLOAT),
(RCN_DURING_DAYS, DTYPE_FLOAT),
(RCN_AFTER_DAYS, DTYPE_FLOAT),
]
if include_timedelta_in_output:
ct_coldefs.extend([
(RCN_BEFORE_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_DURING_TIMEDELTA, DTYPE_TIMEDELTA),
(RCN_AFTER_TIMEDELTA, DTYPE_TIMEDELTA),
])
ct_arr = array([None] * query_nrow, dtype=ct_coldefs)
# log.debug("ct_arr:\n{!r}", ct_arr)
cumulative_times = DataFrame(ct_arr, index=list(range(query_nrow)))
# log.debug("cumulative_times:\n{!r}", cumulative_times)
# So we can use the fast "iat" function.
sourcecolnum_pt = query_times_df.columns.get_loc(patient_colname)
sourcecolnum_start = query_times_df.columns.get_loc(start_colname)
sourcecolnum_when = query_times_df.columns.get_loc(when_colname)
dest_colnum_pt = cumulative_times.columns.get_loc(RCN_PATIENT_ID)
dest_colnum_start = cumulative_times.columns.get_loc(RCN_START)
dest_colnum_t = cumulative_times.columns.get_loc(RCN_TIME)
dest_colnum_before_days = cumulative_times.columns.get_loc(RCN_BEFORE_DAYS)
dest_colnum_during_days = cumulative_times.columns.get_loc(RCN_DURING_DAYS)
dest_colnum_after_days = cumulative_times.columns.get_loc(RCN_AFTER_DAYS)
if include_timedelta_in_output:
dest_colnum_before_dt = cumulative_times.columns.get_loc(RCN_BEFORE_TIMEDELTA) # noqa
dest_colnum_during_dt = cumulative_times.columns.get_loc(RCN_DURING_TIMEDELTA) # noqa
dest_colnum_after_dt = cumulative_times.columns.get_loc(RCN_AFTER_TIMEDELTA) # noqa
else:
# for type checker
dest_colnum_before_dt = 0
dest_colnum_during_dt = 0
dest_colnum_after_dt = 0
for rowidx in range(query_nrow):
patient_id = query_times_df.iat[rowidx, sourcecolnum_pt]
start = query_times_df.iat[rowidx, sourcecolnum_start]
when = query_times_df.iat[rowidx, sourcecolnum_when]
ivlist = timelines[patient_id]
# log.critical("ivlist: {!r}", ivlist)
before, during, after = ivlist.cumulative_before_during_after(start,
when)
# log.critical(
# "{!r}.cumulative_before_during_after(start={!r}, when={!r}) "
# "-> {!r}, {!r}, {!r}",
# ivlist, start, when,
# before, during, after
# )
cumulative_times.iat[rowidx, dest_colnum_pt] = patient_id
cumulative_times.iat[rowidx, dest_colnum_start] = start
cumulative_times.iat[rowidx, dest_colnum_t] = when
cumulative_times.iat[rowidx, dest_colnum_before_days] = before.days
cumulative_times.iat[rowidx, dest_colnum_during_days] = during.days
cumulative_times.iat[rowidx, dest_colnum_after_days] = after.days
if include_timedelta_in_output:
cumulative_times.iat[rowidx, dest_colnum_before_dt] = before
cumulative_times.iat[rowidx, dest_colnum_during_dt] = during
cumulative_times.iat[rowidx, dest_colnum_after_dt] = after
return cumulative_times | [
"def",
"cumulative_time_on_drug",
"(",
"drug_events_df",
":",
"DataFrame",
",",
"query_times_df",
":",
"DataFrame",
",",
"event_lasts_for_timedelta",
":",
"datetime",
".",
"timedelta",
"=",
"None",
",",
"event_lasts_for_quantity",
":",
"float",
"=",
"None",
",",
"event_lasts_for_units",
":",
"str",
"=",
"None",
",",
"patient_colname",
":",
"str",
"=",
"DEFAULT_PATIENT_COLNAME",
",",
"event_datetime_colname",
":",
"str",
"=",
"DEFAULT_DRUG_EVENT_DATETIME_COLNAME",
",",
"start_colname",
":",
"str",
"=",
"DEFAULT_START_DATETIME_COLNAME",
",",
"when_colname",
":",
"str",
"=",
"DEFAULT_QUERY_DATETIME_COLNAME",
",",
"include_timedelta_in_output",
":",
"bool",
"=",
"False",
",",
"debug",
":",
"bool",
"=",
"False",
")",
"->",
"DataFrame",
":",
"if",
"event_lasts_for_timedelta",
"is",
"None",
":",
"assert",
"event_lasts_for_quantity",
"and",
"event_lasts_for_units",
"timedelta_dict",
"=",
"{",
"event_lasts_for_units",
":",
"event_lasts_for_quantity",
"}",
"event_lasts_for_timedelta",
"=",
"datetime",
".",
"timedelta",
"(",
"*",
"*",
"timedelta_dict",
")",
"if",
"debug",
":",
"log",
".",
"critical",
"(",
"\"drug_events_df:\\n{!r}\"",
",",
"drug_events_df",
")",
"log",
".",
"critical",
"(",
"\"event_lasts_for:\\n{!r}\"",
",",
"event_lasts_for_timedelta",
")",
"log",
".",
"critical",
"(",
"\"query_times_df:\\n{!r}\"",
",",
"query_times_df",
")",
"timelines",
"=",
"drug_timelines",
"(",
"drug_events_df",
"=",
"drug_events_df",
",",
"event_lasts_for",
"=",
"event_lasts_for_timedelta",
",",
"patient_colname",
"=",
"patient_colname",
",",
"event_datetime_colname",
"=",
"event_datetime_colname",
",",
")",
"query_nrow",
"=",
"len",
"(",
"query_times_df",
")",
"ct_coldefs",
"=",
"[",
"# column definitions:\r",
"(",
"RCN_PATIENT_ID",
",",
"DTYPE_STRING",
")",
",",
"(",
"RCN_START",
",",
"DTYPE_DATETIME",
")",
",",
"(",
"RCN_TIME",
",",
"DTYPE_DATETIME",
")",
",",
"(",
"RCN_BEFORE_DAYS",
",",
"DTYPE_FLOAT",
")",
",",
"(",
"RCN_DURING_DAYS",
",",
"DTYPE_FLOAT",
")",
",",
"(",
"RCN_AFTER_DAYS",
",",
"DTYPE_FLOAT",
")",
",",
"]",
"if",
"include_timedelta_in_output",
":",
"ct_coldefs",
".",
"extend",
"(",
"[",
"(",
"RCN_BEFORE_TIMEDELTA",
",",
"DTYPE_TIMEDELTA",
")",
",",
"(",
"RCN_DURING_TIMEDELTA",
",",
"DTYPE_TIMEDELTA",
")",
",",
"(",
"RCN_AFTER_TIMEDELTA",
",",
"DTYPE_TIMEDELTA",
")",
",",
"]",
")",
"ct_arr",
"=",
"array",
"(",
"[",
"None",
"]",
"*",
"query_nrow",
",",
"dtype",
"=",
"ct_coldefs",
")",
"# log.debug(\"ct_arr:\\n{!r}\", ct_arr)\r",
"cumulative_times",
"=",
"DataFrame",
"(",
"ct_arr",
",",
"index",
"=",
"list",
"(",
"range",
"(",
"query_nrow",
")",
")",
")",
"# log.debug(\"cumulative_times:\\n{!r}\", cumulative_times)\r",
"# So we can use the fast \"iat\" function.\r",
"sourcecolnum_pt",
"=",
"query_times_df",
".",
"columns",
".",
"get_loc",
"(",
"patient_colname",
")",
"sourcecolnum_start",
"=",
"query_times_df",
".",
"columns",
".",
"get_loc",
"(",
"start_colname",
")",
"sourcecolnum_when",
"=",
"query_times_df",
".",
"columns",
".",
"get_loc",
"(",
"when_colname",
")",
"dest_colnum_pt",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_PATIENT_ID",
")",
"dest_colnum_start",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_START",
")",
"dest_colnum_t",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_TIME",
")",
"dest_colnum_before_days",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_BEFORE_DAYS",
")",
"dest_colnum_during_days",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_DURING_DAYS",
")",
"dest_colnum_after_days",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_AFTER_DAYS",
")",
"if",
"include_timedelta_in_output",
":",
"dest_colnum_before_dt",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_BEFORE_TIMEDELTA",
")",
"# noqa\r",
"dest_colnum_during_dt",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_DURING_TIMEDELTA",
")",
"# noqa\r",
"dest_colnum_after_dt",
"=",
"cumulative_times",
".",
"columns",
".",
"get_loc",
"(",
"RCN_AFTER_TIMEDELTA",
")",
"# noqa\r",
"else",
":",
"# for type checker\r",
"dest_colnum_before_dt",
"=",
"0",
"dest_colnum_during_dt",
"=",
"0",
"dest_colnum_after_dt",
"=",
"0",
"for",
"rowidx",
"in",
"range",
"(",
"query_nrow",
")",
":",
"patient_id",
"=",
"query_times_df",
".",
"iat",
"[",
"rowidx",
",",
"sourcecolnum_pt",
"]",
"start",
"=",
"query_times_df",
".",
"iat",
"[",
"rowidx",
",",
"sourcecolnum_start",
"]",
"when",
"=",
"query_times_df",
".",
"iat",
"[",
"rowidx",
",",
"sourcecolnum_when",
"]",
"ivlist",
"=",
"timelines",
"[",
"patient_id",
"]",
"# log.critical(\"ivlist: {!r}\", ivlist)\r",
"before",
",",
"during",
",",
"after",
"=",
"ivlist",
".",
"cumulative_before_during_after",
"(",
"start",
",",
"when",
")",
"# log.critical(\r",
"# \"{!r}.cumulative_before_during_after(start={!r}, when={!r}) \"\r",
"# \"-> {!r}, {!r}, {!r}\",\r",
"# ivlist, start, when,\r",
"# before, during, after\r",
"# )\r",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_pt",
"]",
"=",
"patient_id",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_start",
"]",
"=",
"start",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_t",
"]",
"=",
"when",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_before_days",
"]",
"=",
"before",
".",
"days",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_during_days",
"]",
"=",
"during",
".",
"days",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_after_days",
"]",
"=",
"after",
".",
"days",
"if",
"include_timedelta_in_output",
":",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_before_dt",
"]",
"=",
"before",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_during_dt",
"]",
"=",
"during",
"cumulative_times",
".",
"iat",
"[",
"rowidx",
",",
"dest_colnum_after_dt",
"]",
"=",
"after",
"return",
"cumulative_times"
] | Args:
drug_events_df:
pandas :class:`DataFrame` containing the event data, with columns
named according to ``patient_colname``, ``event_datetime_colname``
event_lasts_for_timedelta:
when an event occurs, how long is it assumed to last for? For
example, if a prescription of lithium occurs on 2001-01-01, how
long is the patient presumed to be taking lithium as a consequence
(e.g. 1 day? 28 days? 6 months?)
event_lasts_for_quantity:
as an alternative to ``event_lasts_for_timedelta``, particularly if
you are calling from R to Python via ``reticulate`` (which doesn't
convert R ``as.difftime()`` to Python ``datetime.timedelta``), you
can specify ``event_lasts_for_quantity``, a number and
``event_lasts_for_units`` (q.v.).
event_lasts_for_units:
specify the units for ``event_lasts_for_quantity`` (q.v.), if used;
e.g. ``"days"``. The string value must be the name of an argument
to the Python ``datetime.timedelta`` constructor.
query_times_df:
times to query for, with columns named according to
``patient_colname``, ``start_colname``, and ``when_colname``
patient_colname:
name of the column in ``drug_events_df`` and ``query_time_df``
containing the patient ID
event_datetime_colname:
name of the column in ``drug_events_df`` containing the date/time
of each event
start_colname:
name of the column in ``query_time_df`` containing the date/time
representing the overall start time for the relevant patient (from
which cumulative times are calculated)
when_colname:
name of the column in ``query_time_df`` containing date/time
values at which to query
include_timedelta_in_output:
include ``datetime.timedelta`` values in the output? The default is
``False`` as this isn't supported by R/``reticulate``.
debug:
print debugging information to the log?
Returns:
:class:`DataFrame` with the requested data | [
"Args",
":",
"drug_events_df",
":",
"pandas",
":",
"class",
":",
"DataFrame",
"containing",
"the",
"event",
"data",
"with",
"columns",
"named",
"according",
"to",
"patient_colname",
"event_datetime_colname",
"event_lasts_for_timedelta",
":",
"when",
"an",
"event",
"occurs",
"how",
"long",
"is",
"it",
"assumed",
"to",
"last",
"for?",
"For",
"example",
"if",
"a",
"prescription",
"of",
"lithium",
"occurs",
"on",
"2001",
"-",
"01",
"-",
"01",
"how",
"long",
"is",
"the",
"patient",
"presumed",
"to",
"be",
"taking",
"lithium",
"as",
"a",
"consequence",
"(",
"e",
".",
"g",
".",
"1",
"day?",
"28",
"days?",
"6",
"months?",
")",
"event_lasts_for_quantity",
":",
"as",
"an",
"alternative",
"to",
"event_lasts_for_timedelta",
"particularly",
"if",
"you",
"are",
"calling",
"from",
"R",
"to",
"Python",
"via",
"reticulate",
"(",
"which",
"doesn",
"t",
"convert",
"R",
"as",
".",
"difftime",
"()",
"to",
"Python",
"datetime",
".",
"timedelta",
")",
"you",
"can",
"specify",
"event_lasts_for_quantity",
"a",
"number",
"and",
"event_lasts_for_units",
"(",
"q",
".",
"v",
".",
")",
".",
"event_lasts_for_units",
":",
"specify",
"the",
"units",
"for",
"event_lasts_for_quantity",
"(",
"q",
".",
"v",
".",
")",
"if",
"used",
";",
"e",
".",
"g",
".",
"days",
".",
"The",
"string",
"value",
"must",
"be",
"the",
"name",
"of",
"an",
"argument",
"to",
"the",
"Python",
"datetime",
".",
"timedelta",
"constructor",
".",
"query_times_df",
":",
"times",
"to",
"query",
"for",
"with",
"columns",
"named",
"according",
"to",
"patient_colname",
"start_colname",
"and",
"when_colname",
"patient_colname",
":",
"name",
"of",
"the",
"column",
"in",
"drug_events_df",
"and",
"query_time_df",
"containing",
"the",
"patient",
"ID",
"event_datetime_colname",
":",
"name",
"of",
"the",
"column",
"in",
"drug_events_df",
"containing",
"the",
"date",
"/",
"time",
"of",
"each",
"event",
"start_colname",
":",
"name",
"of",
"the",
"column",
"in",
"query_time_df",
"containing",
"the",
"date",
"/",
"time",
"representing",
"the",
"overall",
"start",
"time",
"for",
"the",
"relevant",
"patient",
"(",
"from",
"which",
"cumulative",
"times",
"are",
"calculated",
")",
"when_colname",
":",
"name",
"of",
"the",
"column",
"in",
"query_time_df",
"containing",
"date",
"/",
"time",
"values",
"at",
"which",
"to",
"query",
"include_timedelta_in_output",
":",
"include",
"datetime",
".",
"timedelta",
"values",
"in",
"the",
"output?",
"The",
"default",
"is",
"False",
"as",
"this",
"isn",
"t",
"supported",
"by",
"R",
"/",
"reticulate",
".",
"debug",
":",
"print",
"debugging",
"information",
"to",
"the",
"log?",
"Returns",
":",
":",
"class",
":",
"DataFrame",
"with",
"the",
"requested",
"data"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/timeline.py#L315-L451 |
ivanprjcts/sdklib | sdklib/html/base.py | HTMLLxmlMixin.find_element_by_xpath | def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
elems = self.find_elements_by_xpath(xpath)
if isinstance(elems, list) and len(elems) > 0:
return elems[0] | python | def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
elems = self.find_elements_by_xpath(xpath)
if isinstance(elems, list) and len(elems) > 0:
return elems[0] | [
"def",
"find_element_by_xpath",
"(",
"self",
",",
"xpath",
")",
":",
"elems",
"=",
"self",
".",
"find_elements_by_xpath",
"(",
"xpath",
")",
"if",
"isinstance",
"(",
"elems",
",",
"list",
")",
"and",
"len",
"(",
"elems",
")",
">",
"0",
":",
"return",
"elems",
"[",
"0",
"]"
] | Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return: ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_ | [
"Finds",
"an",
"element",
"by",
"xpath",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/html/base.py#L88-L99 |
ivanprjcts/sdklib | sdklib/html/base.py | HTMLLxmlMixin.find_elements_by_xpath | def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return: list of ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
from sdklib.html.elem import ElemLxml
elements = self.html_obj.xpath(xpath)
return [ElemLxml(e) for e in elements] | python | def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return: list of ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_
"""
from sdklib.html.elem import ElemLxml
elements = self.html_obj.xpath(xpath)
return [ElemLxml(e) for e in elements] | [
"def",
"find_elements_by_xpath",
"(",
"self",
",",
"xpath",
")",
":",
"from",
"sdklib",
".",
"html",
".",
"elem",
"import",
"ElemLxml",
"elements",
"=",
"self",
".",
"html_obj",
".",
"xpath",
"(",
"xpath",
")",
"return",
"[",
"ElemLxml",
"(",
"e",
")",
"for",
"e",
"in",
"elements",
"]"
] | Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return: list of ElemLxml
See lxml xpath expressions `here <http://lxml.de/xpathxslt.html#xpath>`_ | [
"Finds",
"multiple",
"elements",
"by",
"xpath",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/html/base.py#L101-L113 |
ivanprjcts/sdklib | sdklib/html/base.py | HTML5libMixin.find_element_by_xpath | def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
"""
from sdklib.html.elem import Elem5lib
return Elem5lib(self.html_obj.find(self._convert_xpath(xpath))) | python | def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
"""
from sdklib.html.elem import Elem5lib
return Elem5lib(self.html_obj.find(self._convert_xpath(xpath))) | [
"def",
"find_element_by_xpath",
"(",
"self",
",",
"xpath",
")",
":",
"from",
"sdklib",
".",
"html",
".",
"elem",
"import",
"Elem5lib",
"return",
"Elem5lib",
"(",
"self",
".",
"html_obj",
".",
"find",
"(",
"self",
".",
"_convert_xpath",
"(",
"xpath",
")",
")",
")"
] | Finds an element by xpath.
:param xpath: The xpath locator of the element to find.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_ | [
"Finds",
"an",
"element",
"by",
"xpath",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/html/base.py#L121-L132 |
ivanprjcts/sdklib | sdklib/html/base.py | HTML5libMixin.find_elements_by_xpath | def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
"""
from sdklib.html.elem import Elem5lib
return [Elem5lib(e) for e in self.html_obj.findall(self._convert_xpath(xpath))] | python | def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_
"""
from sdklib.html.elem import Elem5lib
return [Elem5lib(e) for e in self.html_obj.findall(self._convert_xpath(xpath))] | [
"def",
"find_elements_by_xpath",
"(",
"self",
",",
"xpath",
")",
":",
"from",
"sdklib",
".",
"html",
".",
"elem",
"import",
"Elem5lib",
"return",
"[",
"Elem5lib",
"(",
"e",
")",
"for",
"e",
"in",
"self",
".",
"html_obj",
".",
"findall",
"(",
"self",
".",
"_convert_xpath",
"(",
"xpath",
")",
")",
"]"
] | Finds multiple elements by xpath.
:param xpath: The xpath locator of the elements to be found.
:return:
See html5lib xpath expressions `here <https://docs.python.org/2/library/xml.etree.elementtree.html#supported-xpath-syntax>`_ | [
"Finds",
"multiple",
"elements",
"by",
"xpath",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/html/base.py#L134-L145 |
ivanprjcts/sdklib | sdklib/behave/response.py | http_response_body_should_be_this_json | def http_response_body_should_be_this_json(context):
"""
Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
}
"""
body_params = json.loads(context.text)
assert body_params == context.api_response.data, \
"Expected: {}; Message: {}".format(body_params, context.api_response.data) | python | def http_response_body_should_be_this_json(context):
"""
Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
}
"""
body_params = json.loads(context.text)
assert body_params == context.api_response.data, \
"Expected: {}; Message: {}".format(body_params, context.api_response.data) | [
"def",
"http_response_body_should_be_this_json",
"(",
"context",
")",
":",
"body_params",
"=",
"json",
".",
"loads",
"(",
"context",
".",
"text",
")",
"assert",
"body_params",
"==",
"context",
".",
"api_response",
".",
"data",
",",
"\"Expected: {}; Message: {}\"",
".",
"format",
"(",
"body_params",
",",
"context",
".",
"api_response",
".",
"data",
")"
] | Parameters:
.. code-block:: json
{
"param1": "value1",
"param2": "value2",
"param3": {
"param31": "value31"
}
} | [
"Parameters",
":"
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/behave/response.py#L63-L79 |
calston/rhumba | rhumba/backends/zk.py | Backend.queue | def queue(self, queue, message, params={}, uids=[]):
"""
Queue a job in Rhumba
"""
d = {
'id': uuid.uuid1().get_hex(),
'version': 1,
'message': message,
'params': params
}
ser = json.dumps(d)
if uids:
for uid in uids:
path = '/dq/%s/%s' % (uid, queue)
yield self._put_queue(path, ser)
else:
path = '/q/%s' % queue
yield self._put_queue(path, ser)
defer.returnValue(d['id']) | python | def queue(self, queue, message, params={}, uids=[]):
"""
Queue a job in Rhumba
"""
d = {
'id': uuid.uuid1().get_hex(),
'version': 1,
'message': message,
'params': params
}
ser = json.dumps(d)
if uids:
for uid in uids:
path = '/dq/%s/%s' % (uid, queue)
yield self._put_queue(path, ser)
else:
path = '/q/%s' % queue
yield self._put_queue(path, ser)
defer.returnValue(d['id']) | [
"def",
"queue",
"(",
"self",
",",
"queue",
",",
"message",
",",
"params",
"=",
"{",
"}",
",",
"uids",
"=",
"[",
"]",
")",
":",
"d",
"=",
"{",
"'id'",
":",
"uuid",
".",
"uuid1",
"(",
")",
".",
"get_hex",
"(",
")",
",",
"'version'",
":",
"1",
",",
"'message'",
":",
"message",
",",
"'params'",
":",
"params",
"}",
"ser",
"=",
"json",
".",
"dumps",
"(",
"d",
")",
"if",
"uids",
":",
"for",
"uid",
"in",
"uids",
":",
"path",
"=",
"'/dq/%s/%s'",
"%",
"(",
"uid",
",",
"queue",
")",
"yield",
"self",
".",
"_put_queue",
"(",
"path",
",",
"ser",
")",
"else",
":",
"path",
"=",
"'/q/%s'",
"%",
"queue",
"yield",
"self",
".",
"_put_queue",
"(",
"path",
",",
"ser",
")",
"defer",
".",
"returnValue",
"(",
"d",
"[",
"'id'",
"]",
")"
] | Queue a job in Rhumba | [
"Queue",
"a",
"job",
"in",
"Rhumba"
] | train | https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/backends/zk.py#L168-L191 |
calston/rhumba | rhumba/backends/zk.py | Backend.clusterStatus | def clusterStatus(self):
"""
Returns a dict of cluster nodes and their status information
"""
servers = yield self.getClusterServers()
d = {
'workers': {},
'crons': {},
'queues': {}
}
now = time.time()
reverse_map = {}
for sname in servers:
last = yield self._get_key('/server/%s/heartbeat' % sname)
status = yield self._get_key('/server/%s/status' % sname)
uuid = yield self._get_key('/server/%s/uuid' % sname)
reverse_map[uuid] = sname
if not last:
last = 0
last = float(last)
if (status == 'ready') and (now - last > 5):
status = 'offline'
if not sname in d['workers']:
d['workers'][sname] = []
d['workers'][sname].append({
'lastseen': last,
'status': status,
'id': uuid
})
# Crons
crons = yield self.keys('/crons')
for queue in crons:
if queue not in d['crons']:
d['crons'][queue] = {'methods': {}}
methods = yield self.keys('/crons/%s' % queue)
for method in methods:
last = yield self._get_key('/crons/%s/%s' % (queue, method))
if last:
d['crons'][queue]['methods'][method] = float(last)
uid = yield self._get_key('/croner/%s' % queue)
if uid:
d['crons'][queue]['master'] = '%s:%s' % (uid, reverse_map[uid])
# Queues
queue_keys = yield self.keys('/qstats')
for qname in queue_keys:
if qname not in d['queues']:
qlen = yield self.queueSize(qname)
stats = yield self.getQueueMessageStats(qname)
d['queues'][qname] = {
'waiting': qlen,
'messages': stats
}
defer.returnValue(d) | python | def clusterStatus(self):
"""
Returns a dict of cluster nodes and their status information
"""
servers = yield self.getClusterServers()
d = {
'workers': {},
'crons': {},
'queues': {}
}
now = time.time()
reverse_map = {}
for sname in servers:
last = yield self._get_key('/server/%s/heartbeat' % sname)
status = yield self._get_key('/server/%s/status' % sname)
uuid = yield self._get_key('/server/%s/uuid' % sname)
reverse_map[uuid] = sname
if not last:
last = 0
last = float(last)
if (status == 'ready') and (now - last > 5):
status = 'offline'
if not sname in d['workers']:
d['workers'][sname] = []
d['workers'][sname].append({
'lastseen': last,
'status': status,
'id': uuid
})
# Crons
crons = yield self.keys('/crons')
for queue in crons:
if queue not in d['crons']:
d['crons'][queue] = {'methods': {}}
methods = yield self.keys('/crons/%s' % queue)
for method in methods:
last = yield self._get_key('/crons/%s/%s' % (queue, method))
if last:
d['crons'][queue]['methods'][method] = float(last)
uid = yield self._get_key('/croner/%s' % queue)
if uid:
d['crons'][queue]['master'] = '%s:%s' % (uid, reverse_map[uid])
# Queues
queue_keys = yield self.keys('/qstats')
for qname in queue_keys:
if qname not in d['queues']:
qlen = yield self.queueSize(qname)
stats = yield self.getQueueMessageStats(qname)
d['queues'][qname] = {
'waiting': qlen,
'messages': stats
}
defer.returnValue(d) | [
"def",
"clusterStatus",
"(",
"self",
")",
":",
"servers",
"=",
"yield",
"self",
".",
"getClusterServers",
"(",
")",
"d",
"=",
"{",
"'workers'",
":",
"{",
"}",
",",
"'crons'",
":",
"{",
"}",
",",
"'queues'",
":",
"{",
"}",
"}",
"now",
"=",
"time",
".",
"time",
"(",
")",
"reverse_map",
"=",
"{",
"}",
"for",
"sname",
"in",
"servers",
":",
"last",
"=",
"yield",
"self",
".",
"_get_key",
"(",
"'/server/%s/heartbeat'",
"%",
"sname",
")",
"status",
"=",
"yield",
"self",
".",
"_get_key",
"(",
"'/server/%s/status'",
"%",
"sname",
")",
"uuid",
"=",
"yield",
"self",
".",
"_get_key",
"(",
"'/server/%s/uuid'",
"%",
"sname",
")",
"reverse_map",
"[",
"uuid",
"]",
"=",
"sname",
"if",
"not",
"last",
":",
"last",
"=",
"0",
"last",
"=",
"float",
"(",
"last",
")",
"if",
"(",
"status",
"==",
"'ready'",
")",
"and",
"(",
"now",
"-",
"last",
">",
"5",
")",
":",
"status",
"=",
"'offline'",
"if",
"not",
"sname",
"in",
"d",
"[",
"'workers'",
"]",
":",
"d",
"[",
"'workers'",
"]",
"[",
"sname",
"]",
"=",
"[",
"]",
"d",
"[",
"'workers'",
"]",
"[",
"sname",
"]",
".",
"append",
"(",
"{",
"'lastseen'",
":",
"last",
",",
"'status'",
":",
"status",
",",
"'id'",
":",
"uuid",
"}",
")",
"# Crons",
"crons",
"=",
"yield",
"self",
".",
"keys",
"(",
"'/crons'",
")",
"for",
"queue",
"in",
"crons",
":",
"if",
"queue",
"not",
"in",
"d",
"[",
"'crons'",
"]",
":",
"d",
"[",
"'crons'",
"]",
"[",
"queue",
"]",
"=",
"{",
"'methods'",
":",
"{",
"}",
"}",
"methods",
"=",
"yield",
"self",
".",
"keys",
"(",
"'/crons/%s'",
"%",
"queue",
")",
"for",
"method",
"in",
"methods",
":",
"last",
"=",
"yield",
"self",
".",
"_get_key",
"(",
"'/crons/%s/%s'",
"%",
"(",
"queue",
",",
"method",
")",
")",
"if",
"last",
":",
"d",
"[",
"'crons'",
"]",
"[",
"queue",
"]",
"[",
"'methods'",
"]",
"[",
"method",
"]",
"=",
"float",
"(",
"last",
")",
"uid",
"=",
"yield",
"self",
".",
"_get_key",
"(",
"'/croner/%s'",
"%",
"queue",
")",
"if",
"uid",
":",
"d",
"[",
"'crons'",
"]",
"[",
"queue",
"]",
"[",
"'master'",
"]",
"=",
"'%s:%s'",
"%",
"(",
"uid",
",",
"reverse_map",
"[",
"uid",
"]",
")",
"# Queues",
"queue_keys",
"=",
"yield",
"self",
".",
"keys",
"(",
"'/qstats'",
")",
"for",
"qname",
"in",
"queue_keys",
":",
"if",
"qname",
"not",
"in",
"d",
"[",
"'queues'",
"]",
":",
"qlen",
"=",
"yield",
"self",
".",
"queueSize",
"(",
"qname",
")",
"stats",
"=",
"yield",
"self",
".",
"getQueueMessageStats",
"(",
"qname",
")",
"d",
"[",
"'queues'",
"]",
"[",
"qname",
"]",
"=",
"{",
"'waiting'",
":",
"qlen",
",",
"'messages'",
":",
"stats",
"}",
"defer",
".",
"returnValue",
"(",
"d",
")"
] | Returns a dict of cluster nodes and their status information | [
"Returns",
"a",
"dict",
"of",
"cluster",
"nodes",
"and",
"their",
"status",
"information"
] | train | https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/backends/zk.py#L382-L454 |
RudolfCardinal/pythonlib | cardinal_pythonlib/deform_utils.py | get_head_form_html | def get_head_form_html(req: "Request", forms: List[Form]) -> str:
"""
Returns the extra HTML that needs to be injected into the ``<head>``
section for a Deform form to work properly.
"""
# https://docs.pylonsproject.org/projects/deform/en/latest/widget.html#widget-requirements
js_resources = [] # type: List[str]
css_resources = [] # type: List[str]
for form in forms:
resources = form.get_widget_resources() # type: Dict[str, List[str]]
# Add, ignoring duplicates:
js_resources.extend(x for x in resources['js']
if x not in js_resources)
css_resources.extend(x for x in resources['css']
if x not in css_resources)
js_links = [req.static_url(r) for r in js_resources]
css_links = [req.static_url(r) for r in css_resources]
js_tags = ['<script type="text/javascript" src="%s"></script>' % link
for link in js_links]
css_tags = ['<link rel="stylesheet" href="%s"/>' % link
for link in css_links]
tags = js_tags + css_tags
head_html = "\n".join(tags)
return head_html | python | def get_head_form_html(req: "Request", forms: List[Form]) -> str:
"""
Returns the extra HTML that needs to be injected into the ``<head>``
section for a Deform form to work properly.
"""
# https://docs.pylonsproject.org/projects/deform/en/latest/widget.html#widget-requirements
js_resources = [] # type: List[str]
css_resources = [] # type: List[str]
for form in forms:
resources = form.get_widget_resources() # type: Dict[str, List[str]]
# Add, ignoring duplicates:
js_resources.extend(x for x in resources['js']
if x not in js_resources)
css_resources.extend(x for x in resources['css']
if x not in css_resources)
js_links = [req.static_url(r) for r in js_resources]
css_links = [req.static_url(r) for r in css_resources]
js_tags = ['<script type="text/javascript" src="%s"></script>' % link
for link in js_links]
css_tags = ['<link rel="stylesheet" href="%s"/>' % link
for link in css_links]
tags = js_tags + css_tags
head_html = "\n".join(tags)
return head_html | [
"def",
"get_head_form_html",
"(",
"req",
":",
"\"Request\"",
",",
"forms",
":",
"List",
"[",
"Form",
"]",
")",
"->",
"str",
":",
"# https://docs.pylonsproject.org/projects/deform/en/latest/widget.html#widget-requirements",
"js_resources",
"=",
"[",
"]",
"# type: List[str]",
"css_resources",
"=",
"[",
"]",
"# type: List[str]",
"for",
"form",
"in",
"forms",
":",
"resources",
"=",
"form",
".",
"get_widget_resources",
"(",
")",
"# type: Dict[str, List[str]]",
"# Add, ignoring duplicates:",
"js_resources",
".",
"extend",
"(",
"x",
"for",
"x",
"in",
"resources",
"[",
"'js'",
"]",
"if",
"x",
"not",
"in",
"js_resources",
")",
"css_resources",
".",
"extend",
"(",
"x",
"for",
"x",
"in",
"resources",
"[",
"'css'",
"]",
"if",
"x",
"not",
"in",
"css_resources",
")",
"js_links",
"=",
"[",
"req",
".",
"static_url",
"(",
"r",
")",
"for",
"r",
"in",
"js_resources",
"]",
"css_links",
"=",
"[",
"req",
".",
"static_url",
"(",
"r",
")",
"for",
"r",
"in",
"css_resources",
"]",
"js_tags",
"=",
"[",
"'<script type=\"text/javascript\" src=\"%s\"></script>'",
"%",
"link",
"for",
"link",
"in",
"js_links",
"]",
"css_tags",
"=",
"[",
"'<link rel=\"stylesheet\" href=\"%s\"/>'",
"%",
"link",
"for",
"link",
"in",
"css_links",
"]",
"tags",
"=",
"js_tags",
"+",
"css_tags",
"head_html",
"=",
"\"\\n\"",
".",
"join",
"(",
"tags",
")",
"return",
"head_html"
] | Returns the extra HTML that needs to be injected into the ``<head>``
section for a Deform form to work properly. | [
"Returns",
"the",
"extra",
"HTML",
"that",
"needs",
"to",
"be",
"injected",
"into",
"the",
"<head",
">",
"section",
"for",
"a",
"Deform",
"form",
"to",
"work",
"properly",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/deform_utils.py#L58-L81 |
RudolfCardinal/pythonlib | cardinal_pythonlib/deform_utils.py | debug_validator | def debug_validator(validator: ValidatorType) -> ValidatorType:
"""
Use as a wrapper around a validator, e.g.
.. code-block:: python
self.validator = debug_validator(OneOf(["some", "values"]))
If you do this, the log will show the thinking of the validator (what it's
trying to validate, and whether it accepted or rejected the value).
"""
def _validate(node: SchemaNode, value: Any) -> None:
log.debug("Validating: {!r}", value)
try:
validator(node, value)
log.debug("... accepted")
except Invalid:
log.debug("... rejected")
raise
return _validate | python | def debug_validator(validator: ValidatorType) -> ValidatorType:
"""
Use as a wrapper around a validator, e.g.
.. code-block:: python
self.validator = debug_validator(OneOf(["some", "values"]))
If you do this, the log will show the thinking of the validator (what it's
trying to validate, and whether it accepted or rejected the value).
"""
def _validate(node: SchemaNode, value: Any) -> None:
log.debug("Validating: {!r}", value)
try:
validator(node, value)
log.debug("... accepted")
except Invalid:
log.debug("... rejected")
raise
return _validate | [
"def",
"debug_validator",
"(",
"validator",
":",
"ValidatorType",
")",
"->",
"ValidatorType",
":",
"def",
"_validate",
"(",
"node",
":",
"SchemaNode",
",",
"value",
":",
"Any",
")",
"->",
"None",
":",
"log",
".",
"debug",
"(",
"\"Validating: {!r}\"",
",",
"value",
")",
"try",
":",
"validator",
"(",
"node",
",",
"value",
")",
"log",
".",
"debug",
"(",
"\"... accepted\"",
")",
"except",
"Invalid",
":",
"log",
".",
"debug",
"(",
"\"... rejected\"",
")",
"raise",
"return",
"_validate"
] | Use as a wrapper around a validator, e.g.
.. code-block:: python
self.validator = debug_validator(OneOf(["some", "values"]))
If you do this, the log will show the thinking of the validator (what it's
trying to validate, and whether it accepted or rejected the value). | [
"Use",
"as",
"a",
"wrapper",
"around",
"a",
"validator",
"e",
".",
"g",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/deform_utils.py#L178-L198 |
RudolfCardinal/pythonlib | cardinal_pythonlib/deform_utils.py | gen_fields | def gen_fields(field: Field) -> Generator[Field, None, None]:
"""
Starting with a Deform :class:`Field`, yield the field itself and any
children.
"""
yield field
for c in field.children:
for f in gen_fields(c):
yield f | python | def gen_fields(field: Field) -> Generator[Field, None, None]:
"""
Starting with a Deform :class:`Field`, yield the field itself and any
children.
"""
yield field
for c in field.children:
for f in gen_fields(c):
yield f | [
"def",
"gen_fields",
"(",
"field",
":",
"Field",
")",
"->",
"Generator",
"[",
"Field",
",",
"None",
",",
"None",
"]",
":",
"yield",
"field",
"for",
"c",
"in",
"field",
".",
"children",
":",
"for",
"f",
"in",
"gen_fields",
"(",
"c",
")",
":",
"yield",
"f"
] | Starting with a Deform :class:`Field`, yield the field itself and any
children. | [
"Starting",
"with",
"a",
"Deform",
":",
"class",
":",
"Field",
"yield",
"the",
"field",
"itself",
"and",
"any",
"children",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/deform_utils.py#L205-L213 |
RudolfCardinal/pythonlib | cardinal_pythonlib/deform_utils.py | InformativeForm.validate | def validate(self,
controls: Iterable[Tuple[str, str]],
subcontrol: str = None) -> Any:
"""
Validates the form.
Args:
controls: an iterable of ``(key, value)`` tuples
subcontrol:
Returns:
a Colander ``appstruct``
Raises:
ValidationFailure: on failure
"""
try:
return super().validate(controls, subcontrol)
except ValidationFailure as e:
if DEBUG_FORM_VALIDATION:
log.warning("Validation failure: {!r}; {}",
e, self._get_form_errors())
self._show_hidden_widgets_for_fields_with_errors(self)
raise | python | def validate(self,
controls: Iterable[Tuple[str, str]],
subcontrol: str = None) -> Any:
"""
Validates the form.
Args:
controls: an iterable of ``(key, value)`` tuples
subcontrol:
Returns:
a Colander ``appstruct``
Raises:
ValidationFailure: on failure
"""
try:
return super().validate(controls, subcontrol)
except ValidationFailure as e:
if DEBUG_FORM_VALIDATION:
log.warning("Validation failure: {!r}; {}",
e, self._get_form_errors())
self._show_hidden_widgets_for_fields_with_errors(self)
raise | [
"def",
"validate",
"(",
"self",
",",
"controls",
":",
"Iterable",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
",",
"subcontrol",
":",
"str",
"=",
"None",
")",
"->",
"Any",
":",
"try",
":",
"return",
"super",
"(",
")",
".",
"validate",
"(",
"controls",
",",
"subcontrol",
")",
"except",
"ValidationFailure",
"as",
"e",
":",
"if",
"DEBUG_FORM_VALIDATION",
":",
"log",
".",
"warning",
"(",
"\"Validation failure: {!r}; {}\"",
",",
"e",
",",
"self",
".",
"_get_form_errors",
"(",
")",
")",
"self",
".",
"_show_hidden_widgets_for_fields_with_errors",
"(",
"self",
")",
"raise"
] | Validates the form.
Args:
controls: an iterable of ``(key, value)`` tuples
subcontrol:
Returns:
a Colander ``appstruct``
Raises:
ValidationFailure: on failure | [
"Validates",
"the",
"form",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/deform_utils.py#L112-L135 |
The-Politico/politico-civic-geography | geography/models/division_level.py | DivisionLevel.save | def save(self, *args, **kwargs):
"""
**uid**: :code:`{levelcode}`
"""
self.slug = slugify(self.name)
self.uid = self.slug
super(DivisionLevel, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
"""
**uid**: :code:`{levelcode}`
"""
self.slug = slugify(self.name)
self.uid = self.slug
super(DivisionLevel, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"slug",
"=",
"slugify",
"(",
"self",
".",
"name",
")",
"self",
".",
"uid",
"=",
"self",
".",
"slug",
"super",
"(",
"DivisionLevel",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | **uid**: :code:`{levelcode}` | [
"**",
"uid",
"**",
":",
":",
"code",
":",
"{",
"levelcode",
"}"
] | train | https://github.com/The-Politico/politico-civic-geography/blob/032b3ee773b50b65cfe672f230dda772df0f89e0/geography/models/division_level.py#L47-L53 |
Vauxoo/pstats-print2list | pstats_print2list/pstats_print2list.py | get_pstats_print2list | def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None,
sort=None, sort_reverse=None, limit=None):
"""Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited.
"""
if isinstance(fnames, basestring):
fnames = [fnames]
fnames_expanded = [
os.path.expandvars(os.path.expanduser(fname)) for fname in fnames]
stream = StringIO()
try:
stats = pstats.Stats(fnames[0], stream=stream)
for fname in fnames_expanded[1:]:
stats.add(fname)
except TypeError:
print("No cProfile stats valid.")
return False
except EOFError:
print("Empty file cProfile stats valid.")
return False
except IOError:
print("Error to open file.")
return False
stats.print_stats()
stream.seek(0)
field_list = get_field_list()
line_stats_re = re.compile(
r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+'
r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)'
r'\((?P<%s>.*)\)' % tuple(field_list))
stats_list = []
count = 0
for line in stream:
line = line.strip('\r\n ')
line_stats_match = line_stats_re.match(line) if line else None
fname = line_stats_match.group('file') if line_stats_match else None
if fname and is_fname_match(fname, filter_fnames) and \
not is_exclude(fname, exclude_fnames):
data = dict([(field, line_stats_match.group(field))
for field in field_list])
data['rcalls'], data['calls'] = (
data.get('ncalls', '') + '/' + data.get('ncalls', '')
).split('/')[:2]
data['factor'] = "%.2f" % (
(float(data['rcalls']) - float(data['calls']) + 1) *
float(data['cumtime']))
data['cumulative'] = data['cumtime']
stats_list.append(data)
count += 1
return sorted(stats_list, key=lambda key: float(key[sort or 'factor']),
reverse=not sort_reverse)[:limit] | python | def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None,
sort=None, sort_reverse=None, limit=None):
"""Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited.
"""
if isinstance(fnames, basestring):
fnames = [fnames]
fnames_expanded = [
os.path.expandvars(os.path.expanduser(fname)) for fname in fnames]
stream = StringIO()
try:
stats = pstats.Stats(fnames[0], stream=stream)
for fname in fnames_expanded[1:]:
stats.add(fname)
except TypeError:
print("No cProfile stats valid.")
return False
except EOFError:
print("Empty file cProfile stats valid.")
return False
except IOError:
print("Error to open file.")
return False
stats.print_stats()
stream.seek(0)
field_list = get_field_list()
line_stats_re = re.compile(
r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+'
r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)'
r'\((?P<%s>.*)\)' % tuple(field_list))
stats_list = []
count = 0
for line in stream:
line = line.strip('\r\n ')
line_stats_match = line_stats_re.match(line) if line else None
fname = line_stats_match.group('file') if line_stats_match else None
if fname and is_fname_match(fname, filter_fnames) and \
not is_exclude(fname, exclude_fnames):
data = dict([(field, line_stats_match.group(field))
for field in field_list])
data['rcalls'], data['calls'] = (
data.get('ncalls', '') + '/' + data.get('ncalls', '')
).split('/')[:2]
data['factor'] = "%.2f" % (
(float(data['rcalls']) - float(data['calls']) + 1) *
float(data['cumtime']))
data['cumulative'] = data['cumtime']
stats_list.append(data)
count += 1
return sorted(stats_list, key=lambda key: float(key[sort or 'factor']),
reverse=not sort_reverse)[:limit] | [
"def",
"get_pstats_print2list",
"(",
"fnames",
",",
"filter_fnames",
"=",
"None",
",",
"exclude_fnames",
"=",
"None",
",",
"sort",
"=",
"None",
",",
"sort_reverse",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"fnames",
",",
"basestring",
")",
":",
"fnames",
"=",
"[",
"fnames",
"]",
"fnames_expanded",
"=",
"[",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"fname",
")",
")",
"for",
"fname",
"in",
"fnames",
"]",
"stream",
"=",
"StringIO",
"(",
")",
"try",
":",
"stats",
"=",
"pstats",
".",
"Stats",
"(",
"fnames",
"[",
"0",
"]",
",",
"stream",
"=",
"stream",
")",
"for",
"fname",
"in",
"fnames_expanded",
"[",
"1",
":",
"]",
":",
"stats",
".",
"add",
"(",
"fname",
")",
"except",
"TypeError",
":",
"print",
"(",
"\"No cProfile stats valid.\"",
")",
"return",
"False",
"except",
"EOFError",
":",
"print",
"(",
"\"Empty file cProfile stats valid.\"",
")",
"return",
"False",
"except",
"IOError",
":",
"print",
"(",
"\"Error to open file.\"",
")",
"return",
"False",
"stats",
".",
"print_stats",
"(",
")",
"stream",
".",
"seek",
"(",
"0",
")",
"field_list",
"=",
"get_field_list",
"(",
")",
"line_stats_re",
"=",
"re",
".",
"compile",
"(",
"r'(?P<%s>\\d+/?\\d+|\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+'",
"r'(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>\\d+\\.?\\d+)\\s+(?P<%s>.*):(?P<%s>\\d+)'",
"r'\\((?P<%s>.*)\\)'",
"%",
"tuple",
"(",
"field_list",
")",
")",
"stats_list",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"line",
"in",
"stream",
":",
"line",
"=",
"line",
".",
"strip",
"(",
"'\\r\\n '",
")",
"line_stats_match",
"=",
"line_stats_re",
".",
"match",
"(",
"line",
")",
"if",
"line",
"else",
"None",
"fname",
"=",
"line_stats_match",
".",
"group",
"(",
"'file'",
")",
"if",
"line_stats_match",
"else",
"None",
"if",
"fname",
"and",
"is_fname_match",
"(",
"fname",
",",
"filter_fnames",
")",
"and",
"not",
"is_exclude",
"(",
"fname",
",",
"exclude_fnames",
")",
":",
"data",
"=",
"dict",
"(",
"[",
"(",
"field",
",",
"line_stats_match",
".",
"group",
"(",
"field",
")",
")",
"for",
"field",
"in",
"field_list",
"]",
")",
"data",
"[",
"'rcalls'",
"]",
",",
"data",
"[",
"'calls'",
"]",
"=",
"(",
"data",
".",
"get",
"(",
"'ncalls'",
",",
"''",
")",
"+",
"'/'",
"+",
"data",
".",
"get",
"(",
"'ncalls'",
",",
"''",
")",
")",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"2",
"]",
"data",
"[",
"'factor'",
"]",
"=",
"\"%.2f\"",
"%",
"(",
"(",
"float",
"(",
"data",
"[",
"'rcalls'",
"]",
")",
"-",
"float",
"(",
"data",
"[",
"'calls'",
"]",
")",
"+",
"1",
")",
"*",
"float",
"(",
"data",
"[",
"'cumtime'",
"]",
")",
")",
"data",
"[",
"'cumulative'",
"]",
"=",
"data",
"[",
"'cumtime'",
"]",
"stats_list",
".",
"append",
"(",
"data",
")",
"count",
"+=",
"1",
"return",
"sorted",
"(",
"stats_list",
",",
"key",
"=",
"lambda",
"key",
":",
"float",
"(",
"key",
"[",
"sort",
"or",
"'factor'",
"]",
")",
",",
"reverse",
"=",
"not",
"sort_reverse",
")",
"[",
":",
"limit",
"]"
] | Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited. | [
"Print",
"stats",
"with",
"a",
"filter",
"or",
"exclude",
"filenames",
"sort",
"index",
"and",
"limit",
".",
":",
"param",
"list",
"fnames",
":",
"cProfile",
"standard",
"files",
"to",
"process",
".",
":",
"param",
"list",
"filter_fnames",
":",
"Relative",
"paths",
"to",
"filter",
"and",
"show",
"them",
".",
":",
"param",
"list",
"exclude_fnames",
":",
"Relative",
"paths",
"to",
"avoid",
"show",
"them",
".",
":",
"param",
"str",
"sort",
":",
"Standard",
"pstats",
"key",
"of",
"value",
"to",
"sort",
"the",
"result",
".",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"calls",
"(",
"call",
"count",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"cumulative",
"(",
"cumulative",
"time",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"cumtime",
"(",
"cumulative",
"time",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"file",
"(",
"file",
"name",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"filename",
"(",
"file",
"name",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"module",
"(",
"file",
"name",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"ncalls",
"(",
"call",
"count",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"pcalls",
"(",
"primitive",
"call",
"count",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"line",
"(",
"line",
"number",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"name",
"(",
"function",
"name",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"nfl",
"(",
"name",
"/",
"file",
"/",
"line",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"stdname",
"(",
"standard",
"name",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"time",
"(",
"internal",
"time",
")",
"\\",
"n",
"\\",
"t",
"\\",
"t",
"\\",
"t",
"tottime",
"(",
"internal",
"time",
")",
":",
"param",
"bool",
"sort_reverse",
":",
"Reverse",
"sort",
"order",
".",
":",
"param",
"int",
"limit",
":",
"Limit",
"max",
"result",
".",
":",
"returns",
":",
"List",
"of",
"dicts",
"with",
"pstats",
"print",
"result",
"after",
"filters",
"sorted",
"and",
"limited",
"."
] | train | https://github.com/Vauxoo/pstats-print2list/blob/a5ebf1790ed450c12103a665b7f49eb1982a8428/pstats_print2list/pstats_print2list.py#L51-L124 |
Vauxoo/pstats-print2list | pstats_print2list/pstats_print2list.py | print_pstats_list | def print_pstats_list(pstats, pformat=None):
"""Print list of pstats dict formatted
:param list pstats: pstats dicts to print
:param str format: String.format style to show fields with keys:
ncalls, tottime, tt_percall, cumtime, ct_percall, file, lineno, method
rcalls, calls
:return: Directly print of result formatted and return True"""
if not pstats:
return False
if pformat is None:
pformat = ("{method:<40s} {factor:>16s} {cumtime:>10s} "
"{calls:>10s} {rcalls:>10s} {tottime:>10s} "
"{tt_percall:>10s} {ct_percall:>10s} "
"<{file}:{lineno}")
for pstat_line in [dict(zip(pstats[0].keys(), pstats[0].keys()))] + pstats:
print(pformat.format(**pstat_line))
return True | python | def print_pstats_list(pstats, pformat=None):
"""Print list of pstats dict formatted
:param list pstats: pstats dicts to print
:param str format: String.format style to show fields with keys:
ncalls, tottime, tt_percall, cumtime, ct_percall, file, lineno, method
rcalls, calls
:return: Directly print of result formatted and return True"""
if not pstats:
return False
if pformat is None:
pformat = ("{method:<40s} {factor:>16s} {cumtime:>10s} "
"{calls:>10s} {rcalls:>10s} {tottime:>10s} "
"{tt_percall:>10s} {ct_percall:>10s} "
"<{file}:{lineno}")
for pstat_line in [dict(zip(pstats[0].keys(), pstats[0].keys()))] + pstats:
print(pformat.format(**pstat_line))
return True | [
"def",
"print_pstats_list",
"(",
"pstats",
",",
"pformat",
"=",
"None",
")",
":",
"if",
"not",
"pstats",
":",
"return",
"False",
"if",
"pformat",
"is",
"None",
":",
"pformat",
"=",
"(",
"\"{method:<40s} {factor:>16s} {cumtime:>10s} \"",
"\"{calls:>10s} {rcalls:>10s} {tottime:>10s} \"",
"\"{tt_percall:>10s} {ct_percall:>10s} \"",
"\"<{file}:{lineno}\"",
")",
"for",
"pstat_line",
"in",
"[",
"dict",
"(",
"zip",
"(",
"pstats",
"[",
"0",
"]",
".",
"keys",
"(",
")",
",",
"pstats",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
")",
"]",
"+",
"pstats",
":",
"print",
"(",
"pformat",
".",
"format",
"(",
"*",
"*",
"pstat_line",
")",
")",
"return",
"True"
] | Print list of pstats dict formatted
:param list pstats: pstats dicts to print
:param str format: String.format style to show fields with keys:
ncalls, tottime, tt_percall, cumtime, ct_percall, file, lineno, method
rcalls, calls
:return: Directly print of result formatted and return True | [
"Print",
"list",
"of",
"pstats",
"dict",
"formatted",
":",
"param",
"list",
"pstats",
":",
"pstats",
"dicts",
"to",
"print",
":",
"param",
"str",
"format",
":",
"String",
".",
"format",
"style",
"to",
"show",
"fields",
"with",
"keys",
":",
"ncalls",
"tottime",
"tt_percall",
"cumtime",
"ct_percall",
"file",
"lineno",
"method",
"rcalls",
"calls",
":",
"return",
":",
"Directly",
"print",
"of",
"result",
"formatted",
"and",
"return",
"True"
] | train | https://github.com/Vauxoo/pstats-print2list/blob/a5ebf1790ed450c12103a665b7f49eb1982a8428/pstats_print2list/pstats_print2list.py#L127-L143 |
Netuitive/netuitive-client-python | netuitive/element.py | Element.merge_metrics | def merge_metrics(self):
"""
Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics
"""
self.metrics.extend(self._metrics.values())
del self._metrics | python | def merge_metrics(self):
"""
Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics
"""
self.metrics.extend(self._metrics.values())
del self._metrics | [
"def",
"merge_metrics",
"(",
"self",
")",
":",
"self",
".",
"metrics",
".",
"extend",
"(",
"self",
".",
"_metrics",
".",
"values",
"(",
")",
")",
"del",
"self",
".",
"_metrics"
] | Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics | [
"Merge",
"metrics",
"in",
"the",
"internal",
"_metrics",
"dict",
"to",
"metrics",
"list",
"and",
"delete",
"the",
"internal",
"_metrics"
] | train | https://github.com/Netuitive/netuitive-client-python/blob/16426ade6a5dc0888ce978c97b02663a9713fc16/netuitive/element.py#L43-L50 |
Netuitive/netuitive-client-python | netuitive/element.py | Element.add_attribute | def add_attribute(self, name, value):
"""
:param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string
"""
self.attributes.append(Attribute(name, value)) | python | def add_attribute(self, name, value):
"""
:param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string
"""
self.attributes.append(Attribute(name, value)) | [
"def",
"add_attribute",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"Attribute",
"(",
"name",
",",
"value",
")",
")"
] | :param name: Name of the attribute
:type name: string
:param value: Value of the attribute
:type value: string | [
":",
"param",
"name",
":",
"Name",
"of",
"the",
"attribute",
":",
"type",
"name",
":",
"string",
":",
"param",
"value",
":",
"Value",
"of",
"the",
"attribute",
":",
"type",
"value",
":",
"string"
] | train | https://github.com/Netuitive/netuitive-client-python/blob/16426ade6a5dc0888ce978c97b02663a9713fc16/netuitive/element.py#L52-L60 |
Netuitive/netuitive-client-python | netuitive/element.py | Element.add_tag | def add_tag(self, name, value):
"""
:param name: Name of the tag
:type name: string
:param value: Value of the tag
:type value: string
"""
self.tags.append(Tag(name, value)) | python | def add_tag(self, name, value):
"""
:param name: Name of the tag
:type name: string
:param value: Value of the tag
:type value: string
"""
self.tags.append(Tag(name, value)) | [
"def",
"add_tag",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"self",
".",
"tags",
".",
"append",
"(",
"Tag",
"(",
"name",
",",
"value",
")",
")"
] | :param name: Name of the tag
:type name: string
:param value: Value of the tag
:type value: string | [
":",
"param",
"name",
":",
"Name",
"of",
"the",
"tag",
":",
"type",
"name",
":",
"string",
":",
"param",
"value",
":",
"Value",
"of",
"the",
"tag",
":",
"type",
"value",
":",
"string"
] | train | https://github.com/Netuitive/netuitive-client-python/blob/16426ade6a5dc0888ce978c97b02663a9713fc16/netuitive/element.py#L70-L78 |
Netuitive/netuitive-client-python | netuitive/element.py | Element.add_sample | def add_sample(self,
metricId,
timestamp,
value,
metricType=None,
host=None,
sparseDataStrategy='None',
unit='',
tags=None,
min=None,
max=None,
avg=None,
sum=None,
cnt=None,
ts_is_ms=False):
"""
:param metricId: Metric FQN
:type metricId: string
:param timestamp: Timestamp for the sample
:type timestamp: int
:param value: Value of the sample
:type value: float
:param metricType: Metric Type
:type metricType: string
:param host: Element FQN
:type host: string
:param sparseDataStrategy: Sparse data strategy
:type sparseDataStrategy: string
:param unit: Metric Unit type
:type unit: string
:param tags: List of dicts
:type tags: list
:param min: Minimum of the sample
:type min: float
:param max: Maximum of the sample
:type max: float
:param avg: Average of the sample
:type avg: float
:param sum: Sum of the sample
:type sum: float
:param cnt: Count of the sample
:type cnt: float
:param ts_is_ms: Is the timestamp in milliseconds
:type ts_is_ms: bool
"""
if self.id is None and host is not None:
self.id = host
if self.name is None and host is not None:
self.name = host
if tags is not None:
Tags = []
for i in tags:
for k in i:
Tags.append(Tag(k, i[k]))
else:
Tags = None
metricIdSan = self._sanitize(metricId)
if not hasattr(self, "_metrics"):
setattr(self, "_metrics", {})
if self._metrics.get(metricIdSan) is None:
self._metrics[metricIdSan] = Metric(metricIdSan,
metricType,
sparseDataStrategy,
unit,
Tags)
if timestamp is None:
ts = to_ms_timestamp_int(datetime.datetime.utcnow())
else:
if ts_is_ms:
ts = int(timestamp)
else:
ts = int(timestamp * 1000)
self.samples.append(Sample(metricIdSan,
ts,
value,
min,
max,
avg,
sum,
cnt)) | python | def add_sample(self,
metricId,
timestamp,
value,
metricType=None,
host=None,
sparseDataStrategy='None',
unit='',
tags=None,
min=None,
max=None,
avg=None,
sum=None,
cnt=None,
ts_is_ms=False):
"""
:param metricId: Metric FQN
:type metricId: string
:param timestamp: Timestamp for the sample
:type timestamp: int
:param value: Value of the sample
:type value: float
:param metricType: Metric Type
:type metricType: string
:param host: Element FQN
:type host: string
:param sparseDataStrategy: Sparse data strategy
:type sparseDataStrategy: string
:param unit: Metric Unit type
:type unit: string
:param tags: List of dicts
:type tags: list
:param min: Minimum of the sample
:type min: float
:param max: Maximum of the sample
:type max: float
:param avg: Average of the sample
:type avg: float
:param sum: Sum of the sample
:type sum: float
:param cnt: Count of the sample
:type cnt: float
:param ts_is_ms: Is the timestamp in milliseconds
:type ts_is_ms: bool
"""
if self.id is None and host is not None:
self.id = host
if self.name is None and host is not None:
self.name = host
if tags is not None:
Tags = []
for i in tags:
for k in i:
Tags.append(Tag(k, i[k]))
else:
Tags = None
metricIdSan = self._sanitize(metricId)
if not hasattr(self, "_metrics"):
setattr(self, "_metrics", {})
if self._metrics.get(metricIdSan) is None:
self._metrics[metricIdSan] = Metric(metricIdSan,
metricType,
sparseDataStrategy,
unit,
Tags)
if timestamp is None:
ts = to_ms_timestamp_int(datetime.datetime.utcnow())
else:
if ts_is_ms:
ts = int(timestamp)
else:
ts = int(timestamp * 1000)
self.samples.append(Sample(metricIdSan,
ts,
value,
min,
max,
avg,
sum,
cnt)) | [
"def",
"add_sample",
"(",
"self",
",",
"metricId",
",",
"timestamp",
",",
"value",
",",
"metricType",
"=",
"None",
",",
"host",
"=",
"None",
",",
"sparseDataStrategy",
"=",
"'None'",
",",
"unit",
"=",
"''",
",",
"tags",
"=",
"None",
",",
"min",
"=",
"None",
",",
"max",
"=",
"None",
",",
"avg",
"=",
"None",
",",
"sum",
"=",
"None",
",",
"cnt",
"=",
"None",
",",
"ts_is_ms",
"=",
"False",
")",
":",
"if",
"self",
".",
"id",
"is",
"None",
"and",
"host",
"is",
"not",
"None",
":",
"self",
".",
"id",
"=",
"host",
"if",
"self",
".",
"name",
"is",
"None",
"and",
"host",
"is",
"not",
"None",
":",
"self",
".",
"name",
"=",
"host",
"if",
"tags",
"is",
"not",
"None",
":",
"Tags",
"=",
"[",
"]",
"for",
"i",
"in",
"tags",
":",
"for",
"k",
"in",
"i",
":",
"Tags",
".",
"append",
"(",
"Tag",
"(",
"k",
",",
"i",
"[",
"k",
"]",
")",
")",
"else",
":",
"Tags",
"=",
"None",
"metricIdSan",
"=",
"self",
".",
"_sanitize",
"(",
"metricId",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_metrics\"",
")",
":",
"setattr",
"(",
"self",
",",
"\"_metrics\"",
",",
"{",
"}",
")",
"if",
"self",
".",
"_metrics",
".",
"get",
"(",
"metricIdSan",
")",
"is",
"None",
":",
"self",
".",
"_metrics",
"[",
"metricIdSan",
"]",
"=",
"Metric",
"(",
"metricIdSan",
",",
"metricType",
",",
"sparseDataStrategy",
",",
"unit",
",",
"Tags",
")",
"if",
"timestamp",
"is",
"None",
":",
"ts",
"=",
"to_ms_timestamp_int",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
")",
"else",
":",
"if",
"ts_is_ms",
":",
"ts",
"=",
"int",
"(",
"timestamp",
")",
"else",
":",
"ts",
"=",
"int",
"(",
"timestamp",
"*",
"1000",
")",
"self",
".",
"samples",
".",
"append",
"(",
"Sample",
"(",
"metricIdSan",
",",
"ts",
",",
"value",
",",
"min",
",",
"max",
",",
"avg",
",",
"sum",
",",
"cnt",
")",
")"
] | :param metricId: Metric FQN
:type metricId: string
:param timestamp: Timestamp for the sample
:type timestamp: int
:param value: Value of the sample
:type value: float
:param metricType: Metric Type
:type metricType: string
:param host: Element FQN
:type host: string
:param sparseDataStrategy: Sparse data strategy
:type sparseDataStrategy: string
:param unit: Metric Unit type
:type unit: string
:param tags: List of dicts
:type tags: list
:param min: Minimum of the sample
:type min: float
:param max: Maximum of the sample
:type max: float
:param avg: Average of the sample
:type avg: float
:param sum: Sum of the sample
:type sum: float
:param cnt: Count of the sample
:type cnt: float
:param ts_is_ms: Is the timestamp in milliseconds
:type ts_is_ms: bool | [
":",
"param",
"metricId",
":",
"Metric",
"FQN",
":",
"type",
"metricId",
":",
"string",
":",
"param",
"timestamp",
":",
"Timestamp",
"for",
"the",
"sample",
":",
"type",
"timestamp",
":",
"int",
":",
"param",
"value",
":",
"Value",
"of",
"the",
"sample",
":",
"type",
"value",
":",
"float",
":",
"param",
"metricType",
":",
"Metric",
"Type",
":",
"type",
"metricType",
":",
"string",
":",
"param",
"host",
":",
"Element",
"FQN",
":",
"type",
"host",
":",
"string",
":",
"param",
"sparseDataStrategy",
":",
"Sparse",
"data",
"strategy",
":",
"type",
"sparseDataStrategy",
":",
"string",
":",
"param",
"unit",
":",
"Metric",
"Unit",
"type",
":",
"type",
"unit",
":",
"string",
":",
"param",
"tags",
":",
"List",
"of",
"dicts",
":",
"type",
"tags",
":",
"list",
":",
"param",
"min",
":",
"Minimum",
"of",
"the",
"sample",
":",
"type",
"min",
":",
"float",
":",
"param",
"max",
":",
"Maximum",
"of",
"the",
"sample",
":",
"type",
"max",
":",
"float",
":",
"param",
"avg",
":",
"Average",
"of",
"the",
"sample",
":",
"type",
"avg",
":",
"float",
":",
"param",
"sum",
":",
"Sum",
"of",
"the",
"sample",
":",
"type",
"sum",
":",
"float",
":",
"param",
"cnt",
":",
"Count",
"of",
"the",
"sample",
":",
"type",
"cnt",
":",
"float",
":",
"param",
"ts_is_ms",
":",
"Is",
"the",
"timestamp",
"in",
"milliseconds",
":",
"type",
"ts_is_ms",
":",
"bool"
] | train | https://github.com/Netuitive/netuitive-client-python/blob/16426ade6a5dc0888ce978c97b02663a9713fc16/netuitive/element.py#L80-L172 |
nxdevel/nx_itertools | nx_itertools/extra.py | pairwise | def pairwise(iterable):
"""Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
"""
iterable = iter(iterable)
left = next(iterable)
for right in iterable:
yield left, right
left = right | python | def pairwise(iterable):
"""Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
"""
iterable = iter(iterable)
left = next(iterable)
for right in iterable:
yield left, right
left = right | [
"def",
"pairwise",
"(",
"iterable",
")",
":",
"iterable",
"=",
"iter",
"(",
"iterable",
")",
"left",
"=",
"next",
"(",
"iterable",
")",
"for",
"right",
"in",
"iterable",
":",
"yield",
"left",
",",
"right",
"left",
"=",
"right"
] | Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor. | [
"Pair",
"each",
"element",
"with",
"its",
"neighbors",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L21-L37 |
nxdevel/nx_itertools | nx_itertools/extra.py | partition | def partition(pred, iterable):
"""Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists.
"""
pos, neg = [], []
pos_append, neg_append = pos.append, neg.append
for elem in iterable:
if pred(elem):
pos_append(elem)
else:
neg_append(elem)
return neg, pos | python | def partition(pred, iterable):
"""Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists.
"""
pos, neg = [], []
pos_append, neg_append = pos.append, neg.append
for elem in iterable:
if pred(elem):
pos_append(elem)
else:
neg_append(elem)
return neg, pos | [
"def",
"partition",
"(",
"pred",
",",
"iterable",
")",
":",
"pos",
",",
"neg",
"=",
"[",
"]",
",",
"[",
"]",
"pos_append",
",",
"neg_append",
"=",
"pos",
".",
"append",
",",
"neg",
".",
"append",
"for",
"elem",
"in",
"iterable",
":",
"if",
"pred",
"(",
"elem",
")",
":",
"pos_append",
"(",
"elem",
")",
"else",
":",
"neg_append",
"(",
"elem",
")",
"return",
"neg",
",",
"pos"
] | Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists. | [
"Partition",
"an",
"iterable",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L40-L66 |
nxdevel/nx_itertools | nx_itertools/extra.py | powerset | def powerset(iterable, *, reverse=False):
"""Return the powerset.
Arguments
---------
iterable : iterable
reverse : boolean
Indicates whether the powerset should be returned descending by
size
Returns
-------
A generator producing each element of the powerset.
"""
lst = list(iterable)
if reverse:
rng = range(len(lst), -1, -1)
else:
rng = range(len(lst) + 1)
return chain.from_iterable(combinations(lst, r) for r in rng) | python | def powerset(iterable, *, reverse=False):
"""Return the powerset.
Arguments
---------
iterable : iterable
reverse : boolean
Indicates whether the powerset should be returned descending by
size
Returns
-------
A generator producing each element of the powerset.
"""
lst = list(iterable)
if reverse:
rng = range(len(lst), -1, -1)
else:
rng = range(len(lst) + 1)
return chain.from_iterable(combinations(lst, r) for r in rng) | [
"def",
"powerset",
"(",
"iterable",
",",
"*",
",",
"reverse",
"=",
"False",
")",
":",
"lst",
"=",
"list",
"(",
"iterable",
")",
"if",
"reverse",
":",
"rng",
"=",
"range",
"(",
"len",
"(",
"lst",
")",
",",
"-",
"1",
",",
"-",
"1",
")",
"else",
":",
"rng",
"=",
"range",
"(",
"len",
"(",
"lst",
")",
"+",
"1",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"combinations",
"(",
"lst",
",",
"r",
")",
"for",
"r",
"in",
"rng",
")"
] | Return the powerset.
Arguments
---------
iterable : iterable
reverse : boolean
Indicates whether the powerset should be returned descending by
size
Returns
-------
A generator producing each element of the powerset. | [
"Return",
"the",
"powerset",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L69-L88 |
nxdevel/nx_itertools | nx_itertools/extra.py | multi_map | def multi_map(key, iterable, *, default_dict=False):
"""Collect data into a multi-map.
Arguments
----------
key : function
A function that accepts an element retrieved from the
iterable and returns the key to be used in the multi-map
iterable : iterable
default_dict : boolean
Indicates whether or not the returned multi-map is an
instance of defaultdict(list)
Returns
-------
A dictionary of lists where the dictionary is either an instance of dict()
or defaultdict(list) based on the *default_dict* boolean and each list
contains the elements that are associated with the key in the order in
which they occur in the iterable.
"""
result = collections.defaultdict(list)
for rec in iterable:
result[key(rec)].append(rec)
return result if default_dict else dict(result) | python | def multi_map(key, iterable, *, default_dict=False):
"""Collect data into a multi-map.
Arguments
----------
key : function
A function that accepts an element retrieved from the
iterable and returns the key to be used in the multi-map
iterable : iterable
default_dict : boolean
Indicates whether or not the returned multi-map is an
instance of defaultdict(list)
Returns
-------
A dictionary of lists where the dictionary is either an instance of dict()
or defaultdict(list) based on the *default_dict* boolean and each list
contains the elements that are associated with the key in the order in
which they occur in the iterable.
"""
result = collections.defaultdict(list)
for rec in iterable:
result[key(rec)].append(rec)
return result if default_dict else dict(result) | [
"def",
"multi_map",
"(",
"key",
",",
"iterable",
",",
"*",
",",
"default_dict",
"=",
"False",
")",
":",
"result",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"rec",
"in",
"iterable",
":",
"result",
"[",
"key",
"(",
"rec",
")",
"]",
".",
"append",
"(",
"rec",
")",
"return",
"result",
"if",
"default_dict",
"else",
"dict",
"(",
"result",
")"
] | Collect data into a multi-map.
Arguments
----------
key : function
A function that accepts an element retrieved from the
iterable and returns the key to be used in the multi-map
iterable : iterable
default_dict : boolean
Indicates whether or not the returned multi-map is an
instance of defaultdict(list)
Returns
-------
A dictionary of lists where the dictionary is either an instance of dict()
or defaultdict(list) based on the *default_dict* boolean and each list
contains the elements that are associated with the key in the order in
which they occur in the iterable. | [
"Collect",
"data",
"into",
"a",
"multi",
"-",
"map",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L91-L114 |
nxdevel/nx_itertools | nx_itertools/extra.py | split | def split(pred, iterable, *, trailing=True):
"""Split the iterable.
Arguments
----------
pred : function
A function that accepts an element retrieved from the iterable
and returns a boolean indicating if it is the element on which
to split
iterable : iterable
trailing : boolean
Indicates whether the split should occur on the leading edge
of the match on the split or on the trailing edge of the match
on the split
Returns
-------
The generator produces a list for each split.
If *trailing* is True then the element that was identified by the predicate
will be returned at the end of each split.
If *trailing* is False then the element that was identified by the
predicate will be returned at the beginning of the following split.
No guarantee is made regarding the state of the iterable during operation.
"""
result = []
result_append = result.append
if trailing:
for elem in iterable:
result_append(elem)
if pred(elem):
yield result
result = []
result_append = result.append
else:
for elem in iterable:
if pred(elem):
if result:
yield result
result = []
result_append = result.append
result_append(elem)
if result:
yield result | python | def split(pred, iterable, *, trailing=True):
"""Split the iterable.
Arguments
----------
pred : function
A function that accepts an element retrieved from the iterable
and returns a boolean indicating if it is the element on which
to split
iterable : iterable
trailing : boolean
Indicates whether the split should occur on the leading edge
of the match on the split or on the trailing edge of the match
on the split
Returns
-------
The generator produces a list for each split.
If *trailing* is True then the element that was identified by the predicate
will be returned at the end of each split.
If *trailing* is False then the element that was identified by the
predicate will be returned at the beginning of the following split.
No guarantee is made regarding the state of the iterable during operation.
"""
result = []
result_append = result.append
if trailing:
for elem in iterable:
result_append(elem)
if pred(elem):
yield result
result = []
result_append = result.append
else:
for elem in iterable:
if pred(elem):
if result:
yield result
result = []
result_append = result.append
result_append(elem)
if result:
yield result | [
"def",
"split",
"(",
"pred",
",",
"iterable",
",",
"*",
",",
"trailing",
"=",
"True",
")",
":",
"result",
"=",
"[",
"]",
"result_append",
"=",
"result",
".",
"append",
"if",
"trailing",
":",
"for",
"elem",
"in",
"iterable",
":",
"result_append",
"(",
"elem",
")",
"if",
"pred",
"(",
"elem",
")",
":",
"yield",
"result",
"result",
"=",
"[",
"]",
"result_append",
"=",
"result",
".",
"append",
"else",
":",
"for",
"elem",
"in",
"iterable",
":",
"if",
"pred",
"(",
"elem",
")",
":",
"if",
"result",
":",
"yield",
"result",
"result",
"=",
"[",
"]",
"result_append",
"=",
"result",
".",
"append",
"result_append",
"(",
"elem",
")",
"if",
"result",
":",
"yield",
"result"
] | Split the iterable.
Arguments
----------
pred : function
A function that accepts an element retrieved from the iterable
and returns a boolean indicating if it is the element on which
to split
iterable : iterable
trailing : boolean
Indicates whether the split should occur on the leading edge
of the match on the split or on the trailing edge of the match
on the split
Returns
-------
The generator produces a list for each split.
If *trailing* is True then the element that was identified by the predicate
will be returned at the end of each split.
If *trailing* is False then the element that was identified by the
predicate will be returned at the beginning of the following split.
No guarantee is made regarding the state of the iterable during operation. | [
"Split",
"the",
"iterable",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L117-L161 |
nxdevel/nx_itertools | nx_itertools/extra.py | chunk | def chunk(iterable, length):
"""Collect data into chunks.
Arguments
---------
iterable : iterable
length : integer
Maximum size of each chunk to return
Returns
-------
The generator produces a tuple of elements whose size is at least one but
no more than *length*.
If the number of elements in the iterable is not a multiple of *length*
then the final tuple will be less than *length*.
This function is meant to be a variant of the recipe's function grouper()
that does not pad the last tuple with a fill-value if the number elements
in the iterable is not a multiple of the specified length.
"""
if length < 0:
return ()
iterable = iter(iterable)
result = tuple(islice(iterable, length))
while result:
yield result
result = tuple(islice(iterable, length)) | python | def chunk(iterable, length):
"""Collect data into chunks.
Arguments
---------
iterable : iterable
length : integer
Maximum size of each chunk to return
Returns
-------
The generator produces a tuple of elements whose size is at least one but
no more than *length*.
If the number of elements in the iterable is not a multiple of *length*
then the final tuple will be less than *length*.
This function is meant to be a variant of the recipe's function grouper()
that does not pad the last tuple with a fill-value if the number elements
in the iterable is not a multiple of the specified length.
"""
if length < 0:
return ()
iterable = iter(iterable)
result = tuple(islice(iterable, length))
while result:
yield result
result = tuple(islice(iterable, length)) | [
"def",
"chunk",
"(",
"iterable",
",",
"length",
")",
":",
"if",
"length",
"<",
"0",
":",
"return",
"(",
")",
"iterable",
"=",
"iter",
"(",
"iterable",
")",
"result",
"=",
"tuple",
"(",
"islice",
"(",
"iterable",
",",
"length",
")",
")",
"while",
"result",
":",
"yield",
"result",
"result",
"=",
"tuple",
"(",
"islice",
"(",
"iterable",
",",
"length",
")",
")"
] | Collect data into chunks.
Arguments
---------
iterable : iterable
length : integer
Maximum size of each chunk to return
Returns
-------
The generator produces a tuple of elements whose size is at least one but
no more than *length*.
If the number of elements in the iterable is not a multiple of *length*
then the final tuple will be less than *length*.
This function is meant to be a variant of the recipe's function grouper()
that does not pad the last tuple with a fill-value if the number elements
in the iterable is not a multiple of the specified length. | [
"Collect",
"data",
"into",
"chunks",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L164-L191 |
nxdevel/nx_itertools | nx_itertools/extra.py | divide | def divide(iterable, n): # pylint: disable=invalid-name
"""Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable.
"""
if n <= 0:
return []
data = list(iterable)
base, rem = divmod(len(data), n)
iterable = iter(data)
for i in range(n):
yield tuple(islice(iterable, base + 1 if i < rem else base)) | python | def divide(iterable, n): # pylint: disable=invalid-name
"""Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable.
"""
if n <= 0:
return []
data = list(iterable)
base, rem = divmod(len(data), n)
iterable = iter(data)
for i in range(n):
yield tuple(islice(iterable, base + 1 if i < rem else base)) | [
"def",
"divide",
"(",
"iterable",
",",
"n",
")",
":",
"# pylint: disable=invalid-name",
"if",
"n",
"<=",
"0",
":",
"return",
"[",
"]",
"data",
"=",
"list",
"(",
"iterable",
")",
"base",
",",
"rem",
"=",
"divmod",
"(",
"len",
"(",
"data",
")",
",",
"n",
")",
"iterable",
"=",
"iter",
"(",
"data",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"yield",
"tuple",
"(",
"islice",
"(",
"iterable",
",",
"base",
"+",
"1",
"if",
"i",
"<",
"rem",
"else",
"base",
")",
")"
] | Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable. | [
"Evenly",
"divide",
"elements",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L194-L222 |
nxdevel/nx_itertools | nx_itertools/extra.py | divide_sizes | def divide_sizes(count, n): # pylint: disable=invalid-name
"""Evenly divide a count.
Arguments
---------
count : integer
The number to be evenly divided
n : integer
The number of buckets in which to divide the number
Returns
-------
A list of integers indicating what size each bucket should be for an even
distribution of *count*.
The number of integers returned is always *n* and, thus, may be 0.
Useful for calculating slices for generators that might be too large to
convert into a list as happens in divide().
"""
if n <= 0:
return []
if count < 0:
return [0] * n
base, rem = divmod(count, n)
return [base + 1 if i < rem else base for i in range(n)] | python | def divide_sizes(count, n): # pylint: disable=invalid-name
"""Evenly divide a count.
Arguments
---------
count : integer
The number to be evenly divided
n : integer
The number of buckets in which to divide the number
Returns
-------
A list of integers indicating what size each bucket should be for an even
distribution of *count*.
The number of integers returned is always *n* and, thus, may be 0.
Useful for calculating slices for generators that might be too large to
convert into a list as happens in divide().
"""
if n <= 0:
return []
if count < 0:
return [0] * n
base, rem = divmod(count, n)
return [base + 1 if i < rem else base for i in range(n)] | [
"def",
"divide_sizes",
"(",
"count",
",",
"n",
")",
":",
"# pylint: disable=invalid-name",
"if",
"n",
"<=",
"0",
":",
"return",
"[",
"]",
"if",
"count",
"<",
"0",
":",
"return",
"[",
"0",
"]",
"*",
"n",
"base",
",",
"rem",
"=",
"divmod",
"(",
"count",
",",
"n",
")",
"return",
"[",
"base",
"+",
"1",
"if",
"i",
"<",
"rem",
"else",
"base",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"]"
] | Evenly divide a count.
Arguments
---------
count : integer
The number to be evenly divided
n : integer
The number of buckets in which to divide the number
Returns
-------
A list of integers indicating what size each bucket should be for an even
distribution of *count*.
The number of integers returned is always *n* and, thus, may be 0.
Useful for calculating slices for generators that might be too large to
convert into a list as happens in divide(). | [
"Evenly",
"divide",
"a",
"count",
"."
] | train | https://github.com/nxdevel/nx_itertools/blob/744da75c616a8a7991b963a549152fe9c434abd9/nx_itertools/extra.py#L225-L250 |
AndrewWalker/glud | glud/display.py | dump | def dump(cursor):
""" Display the AST represented by the cursor
"""
def node_children(node):
return list(node.get_children())
def print_node(node):
text = node.spelling or node.displayname
kind = str(node.kind).split('.')[1]
return '{} {}'.format(kind, text)
return draw_tree(cursor, node_children, print_node) | python | def dump(cursor):
""" Display the AST represented by the cursor
"""
def node_children(node):
return list(node.get_children())
def print_node(node):
text = node.spelling or node.displayname
kind = str(node.kind).split('.')[1]
return '{} {}'.format(kind, text)
return draw_tree(cursor, node_children, print_node) | [
"def",
"dump",
"(",
"cursor",
")",
":",
"def",
"node_children",
"(",
"node",
")",
":",
"return",
"list",
"(",
"node",
".",
"get_children",
"(",
")",
")",
"def",
"print_node",
"(",
"node",
")",
":",
"text",
"=",
"node",
".",
"spelling",
"or",
"node",
".",
"displayname",
"kind",
"=",
"str",
"(",
"node",
".",
"kind",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"return",
"'{} {}'",
".",
"format",
"(",
"kind",
",",
"text",
")",
"return",
"draw_tree",
"(",
"cursor",
",",
"node_children",
",",
"print_node",
")"
] | Display the AST represented by the cursor | [
"Display",
"the",
"AST",
"represented",
"by",
"the",
"cursor"
] | train | https://github.com/AndrewWalker/glud/blob/57de000627fed13d0c383f131163795b09549257/glud/display.py#L4-L16 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | coerce_to_pendulum | def coerce_to_pendulum(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[DateTime]:
"""
Converts something to a :class:`pendulum.DateTime`.
Args:
x: something that may be coercible to a datetime
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.DateTime`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
if not x: # None and blank string
return None
if isinstance(x, DateTime):
return x
tz = get_tz_local() if assume_local else get_tz_utc()
if isinstance(x, datetime.datetime):
return pendulum.instance(x, tz=tz) # (*)
elif isinstance(x, datetime.date):
# BEWARE: datetime subclasses date. The order is crucial here.
# Can also use: type(x) is datetime.date
# noinspection PyUnresolvedReferences
midnight = DateTime.min.time()
dt = DateTime.combine(x, midnight)
return pendulum.instance(dt, tz=tz) # (*)
elif isinstance(x, str):
return pendulum.parse(x, tz=tz) # (*) # may raise
else:
raise ValueError("Don't know how to convert to DateTime: "
"{!r}".format(x)) | python | def coerce_to_pendulum(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[DateTime]:
"""
Converts something to a :class:`pendulum.DateTime`.
Args:
x: something that may be coercible to a datetime
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.DateTime`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
if not x: # None and blank string
return None
if isinstance(x, DateTime):
return x
tz = get_tz_local() if assume_local else get_tz_utc()
if isinstance(x, datetime.datetime):
return pendulum.instance(x, tz=tz) # (*)
elif isinstance(x, datetime.date):
# BEWARE: datetime subclasses date. The order is crucial here.
# Can also use: type(x) is datetime.date
# noinspection PyUnresolvedReferences
midnight = DateTime.min.time()
dt = DateTime.combine(x, midnight)
return pendulum.instance(dt, tz=tz) # (*)
elif isinstance(x, str):
return pendulum.parse(x, tz=tz) # (*) # may raise
else:
raise ValueError("Don't know how to convert to DateTime: "
"{!r}".format(x)) | [
"def",
"coerce_to_pendulum",
"(",
"x",
":",
"PotentialDatetimeType",
",",
"assume_local",
":",
"bool",
"=",
"False",
")",
"->",
"Optional",
"[",
"DateTime",
"]",
":",
"if",
"not",
"x",
":",
"# None and blank string",
"return",
"None",
"if",
"isinstance",
"(",
"x",
",",
"DateTime",
")",
":",
"return",
"x",
"tz",
"=",
"get_tz_local",
"(",
")",
"if",
"assume_local",
"else",
"get_tz_utc",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"pendulum",
".",
"instance",
"(",
"x",
",",
"tz",
"=",
"tz",
")",
"# (*)",
"elif",
"isinstance",
"(",
"x",
",",
"datetime",
".",
"date",
")",
":",
"# BEWARE: datetime subclasses date. The order is crucial here.",
"# Can also use: type(x) is datetime.date",
"# noinspection PyUnresolvedReferences",
"midnight",
"=",
"DateTime",
".",
"min",
".",
"time",
"(",
")",
"dt",
"=",
"DateTime",
".",
"combine",
"(",
"x",
",",
"midnight",
")",
"return",
"pendulum",
".",
"instance",
"(",
"dt",
",",
"tz",
"=",
"tz",
")",
"# (*)",
"elif",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"return",
"pendulum",
".",
"parse",
"(",
"x",
",",
"tz",
"=",
"tz",
")",
"# (*) # may raise",
"else",
":",
"raise",
"ValueError",
"(",
"\"Don't know how to convert to DateTime: \"",
"\"{!r}\"",
".",
"format",
"(",
"x",
")",
")"
] | Converts something to a :class:`pendulum.DateTime`.
Args:
x: something that may be coercible to a datetime
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.DateTime`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible | [
"Converts",
"something",
"to",
"a",
":",
"class",
":",
"pendulum",
".",
"DateTime",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L57-L92 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | coerce_to_pendulum_date | def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | python | def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | [
"def",
"coerce_to_pendulum_date",
"(",
"x",
":",
"PotentialDatetimeType",
",",
"assume_local",
":",
"bool",
"=",
"False",
")",
"->",
"Optional",
"[",
"Date",
"]",
":",
"p",
"=",
"coerce_to_pendulum",
"(",
"x",
",",
"assume_local",
"=",
"assume_local",
")",
"return",
"None",
"if",
"p",
"is",
"None",
"else",
"p",
".",
"date",
"(",
")"
] | Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible | [
"Converts",
"something",
"to",
"a",
":",
"class",
":",
"pendulum",
".",
"Date",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L97-L115 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | pendulum_to_datetime | def pendulum_to_datetime(x: DateTime) -> datetime.datetime:
"""
Used, for example, where a database backend insists on datetime.datetime.
Compare code in :meth:`pendulum.datetime.DateTime.int_timestamp`.
"""
return datetime.datetime(
x.year, x.month, x.day,
x.hour, x.minute, x.second, x.microsecond,
tzinfo=x.tzinfo
) | python | def pendulum_to_datetime(x: DateTime) -> datetime.datetime:
"""
Used, for example, where a database backend insists on datetime.datetime.
Compare code in :meth:`pendulum.datetime.DateTime.int_timestamp`.
"""
return datetime.datetime(
x.year, x.month, x.day,
x.hour, x.minute, x.second, x.microsecond,
tzinfo=x.tzinfo
) | [
"def",
"pendulum_to_datetime",
"(",
"x",
":",
"DateTime",
")",
"->",
"datetime",
".",
"datetime",
":",
"return",
"datetime",
".",
"datetime",
"(",
"x",
".",
"year",
",",
"x",
".",
"month",
",",
"x",
".",
"day",
",",
"x",
".",
"hour",
",",
"x",
".",
"minute",
",",
"x",
".",
"second",
",",
"x",
".",
"microsecond",
",",
"tzinfo",
"=",
"x",
".",
"tzinfo",
")"
] | Used, for example, where a database backend insists on datetime.datetime.
Compare code in :meth:`pendulum.datetime.DateTime.int_timestamp`. | [
"Used",
"for",
"example",
"where",
"a",
"database",
"backend",
"insists",
"on",
"datetime",
".",
"datetime",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L118-L128 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | pendulum_to_utc_datetime_without_tz | def pendulum_to_utc_datetime_without_tz(x: DateTime) -> datetime.datetime:
"""
Converts a Pendulum ``DateTime`` (which will have timezone information) to
a ``datetime.datetime`` that (a) has no timezone information, and (b) is
in UTC.
Example:
.. code-block:: python
import pendulum
from cardinal_pythonlib.datetimefunc import *
in_moscow = pendulum.parse("2018-01-01T09:00+0300") # 9am in Moscow
in_london = pendulum.UTC.convert(in_moscow) # 6am in UTC
dt_utc_from_moscow = pendulum_to_utc_datetime_without_tz(in_moscow) # 6am, no timezone info
dt_utc_from_london = pendulum_to_utc_datetime_without_tz(in_london) # 6am, no timezone info
""" # noqa
pendulum_in_utc = pendulum.UTC.convert(x)
return pendulum_to_datetime_stripping_tz(pendulum_in_utc) | python | def pendulum_to_utc_datetime_without_tz(x: DateTime) -> datetime.datetime:
"""
Converts a Pendulum ``DateTime`` (which will have timezone information) to
a ``datetime.datetime`` that (a) has no timezone information, and (b) is
in UTC.
Example:
.. code-block:: python
import pendulum
from cardinal_pythonlib.datetimefunc import *
in_moscow = pendulum.parse("2018-01-01T09:00+0300") # 9am in Moscow
in_london = pendulum.UTC.convert(in_moscow) # 6am in UTC
dt_utc_from_moscow = pendulum_to_utc_datetime_without_tz(in_moscow) # 6am, no timezone info
dt_utc_from_london = pendulum_to_utc_datetime_without_tz(in_london) # 6am, no timezone info
""" # noqa
pendulum_in_utc = pendulum.UTC.convert(x)
return pendulum_to_datetime_stripping_tz(pendulum_in_utc) | [
"def",
"pendulum_to_utc_datetime_without_tz",
"(",
"x",
":",
"DateTime",
")",
"->",
"datetime",
".",
"datetime",
":",
"# noqa",
"pendulum_in_utc",
"=",
"pendulum",
".",
"UTC",
".",
"convert",
"(",
"x",
")",
"return",
"pendulum_to_datetime_stripping_tz",
"(",
"pendulum_in_utc",
")"
] | Converts a Pendulum ``DateTime`` (which will have timezone information) to
a ``datetime.datetime`` that (a) has no timezone information, and (b) is
in UTC.
Example:
.. code-block:: python
import pendulum
from cardinal_pythonlib.datetimefunc import *
in_moscow = pendulum.parse("2018-01-01T09:00+0300") # 9am in Moscow
in_london = pendulum.UTC.convert(in_moscow) # 6am in UTC
dt_utc_from_moscow = pendulum_to_utc_datetime_without_tz(in_moscow) # 6am, no timezone info
dt_utc_from_london = pendulum_to_utc_datetime_without_tz(in_london) # 6am, no timezone info | [
"Converts",
"a",
"Pendulum",
"DateTime",
"(",
"which",
"will",
"have",
"timezone",
"information",
")",
"to",
"a",
"datetime",
".",
"datetime",
"that",
"(",
"a",
")",
"has",
"no",
"timezone",
"information",
"and",
"(",
"b",
")",
"is",
"in",
"UTC",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L143-L162 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | pendulum_date_to_datetime_date | def pendulum_date_to_datetime_date(x: Date) -> datetime.date:
"""
Takes a :class:`pendulum.Date` and returns a :class:`datetime.date`.
Used, for example, where a database backend insists on
:class:`datetime.date`.
"""
return datetime.date(year=x.year, month=x.month, day=x.day) | python | def pendulum_date_to_datetime_date(x: Date) -> datetime.date:
"""
Takes a :class:`pendulum.Date` and returns a :class:`datetime.date`.
Used, for example, where a database backend insists on
:class:`datetime.date`.
"""
return datetime.date(year=x.year, month=x.month, day=x.day) | [
"def",
"pendulum_date_to_datetime_date",
"(",
"x",
":",
"Date",
")",
"->",
"datetime",
".",
"date",
":",
"return",
"datetime",
".",
"date",
"(",
"year",
"=",
"x",
".",
"year",
",",
"month",
"=",
"x",
".",
"month",
",",
"day",
"=",
"x",
".",
"day",
")"
] | Takes a :class:`pendulum.Date` and returns a :class:`datetime.date`.
Used, for example, where a database backend insists on
:class:`datetime.date`. | [
"Takes",
"a",
":",
"class",
":",
"pendulum",
".",
"Date",
"and",
"returns",
"a",
":",
"class",
":",
"datetime",
".",
"date",
".",
"Used",
"for",
"example",
"where",
"a",
"database",
"backend",
"insists",
"on",
":",
"class",
":",
"datetime",
".",
"date",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L165-L171 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | pendulum_time_to_datetime_time | def pendulum_time_to_datetime_time(x: Time) -> datetime.time:
"""
Takes a :class:`pendulum.Time` and returns a :class:`datetime.time`.
Used, for example, where a database backend insists on
:class:`datetime.time`.
"""
return datetime.time(
hour=x.hour, minute=x.minute, second=x.second,
microsecond=x.microsecond,
tzinfo=x.tzinfo
) | python | def pendulum_time_to_datetime_time(x: Time) -> datetime.time:
"""
Takes a :class:`pendulum.Time` and returns a :class:`datetime.time`.
Used, for example, where a database backend insists on
:class:`datetime.time`.
"""
return datetime.time(
hour=x.hour, minute=x.minute, second=x.second,
microsecond=x.microsecond,
tzinfo=x.tzinfo
) | [
"def",
"pendulum_time_to_datetime_time",
"(",
"x",
":",
"Time",
")",
"->",
"datetime",
".",
"time",
":",
"return",
"datetime",
".",
"time",
"(",
"hour",
"=",
"x",
".",
"hour",
",",
"minute",
"=",
"x",
".",
"minute",
",",
"second",
"=",
"x",
".",
"second",
",",
"microsecond",
"=",
"x",
".",
"microsecond",
",",
"tzinfo",
"=",
"x",
".",
"tzinfo",
")"
] | Takes a :class:`pendulum.Time` and returns a :class:`datetime.time`.
Used, for example, where a database backend insists on
:class:`datetime.time`. | [
"Takes",
"a",
":",
"class",
":",
"pendulum",
".",
"Time",
"and",
"returns",
"a",
":",
"class",
":",
"datetime",
".",
"time",
".",
"Used",
"for",
"example",
"where",
"a",
"database",
"backend",
"insists",
"on",
":",
"class",
":",
"datetime",
".",
"time",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L174-L184 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | format_datetime | def format_datetime(d: PotentialDatetimeType,
fmt: str,
default: str = None) -> Optional[str]:
"""
Format a datetime with a ``strftime`` format specification string, or
return ``default`` if the input is ``None``.
"""
d = coerce_to_pendulum(d)
if d is None:
return default
return d.strftime(fmt) | python | def format_datetime(d: PotentialDatetimeType,
fmt: str,
default: str = None) -> Optional[str]:
"""
Format a datetime with a ``strftime`` format specification string, or
return ``default`` if the input is ``None``.
"""
d = coerce_to_pendulum(d)
if d is None:
return default
return d.strftime(fmt) | [
"def",
"format_datetime",
"(",
"d",
":",
"PotentialDatetimeType",
",",
"fmt",
":",
"str",
",",
"default",
":",
"str",
"=",
"None",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"d",
"=",
"coerce_to_pendulum",
"(",
"d",
")",
"if",
"d",
"is",
"None",
":",
"return",
"default",
"return",
"d",
".",
"strftime",
"(",
"fmt",
")"
] | Format a datetime with a ``strftime`` format specification string, or
return ``default`` if the input is ``None``. | [
"Format",
"a",
"datetime",
"with",
"a",
"strftime",
"format",
"specification",
"string",
"or",
"return",
"default",
"if",
"the",
"input",
"is",
"None",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L191-L201 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | strfdelta | def strfdelta(tdelta: Union[datetime.timedelta, int, float, str],
fmt='{D:02}d {H:02}h {M:02}m {S:02}s',
inputtype='timedelta'):
"""
Convert a ``datetime.timedelta`` object or a regular number to a custom-
formatted string, just like the ``strftime()`` method does for
``datetime.datetime`` objects.
The ``fmt`` argument allows custom formatting to be specified. Fields can
include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each
field is optional.
Some examples:
.. code-block:: none
'{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)
'{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'
'{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'
'{H}h {S}s' --> '72h 800s'
The ``inputtype`` argument allows ``tdelta`` to be a regular number,
instead of the default behaviour of treating it as a ``datetime.timedelta``
object. Valid ``inputtype`` strings:
.. code-block:: none
'timedelta', # treats input as a datetime.timedelta
's', 'seconds',
'm', 'minutes',
'h', 'hours',
'd', 'days',
'w', 'weeks'
Modified from
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
""" # noqa
# Convert tdelta to integer seconds.
if inputtype == 'timedelta':
remainder = int(tdelta.total_seconds())
elif inputtype in ['s', 'seconds']:
remainder = int(tdelta)
elif inputtype in ['m', 'minutes']:
remainder = int(tdelta) * 60
elif inputtype in ['h', 'hours']:
remainder = int(tdelta) * 3600
elif inputtype in ['d', 'days']:
remainder = int(tdelta) * 86400
elif inputtype in ['w', 'weeks']:
remainder = int(tdelta) * 604800
else:
raise ValueError("Bad inputtype: {}".format(inputtype))
f = Formatter()
desired_fields = [field_tuple[1] for field_tuple in f.parse(fmt)]
possible_fields = ('W', 'D', 'H', 'M', 'S')
constants = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
values = {}
for field in possible_fields:
if field in desired_fields and field in constants:
values[field], remainder = divmod(remainder, constants[field])
return f.format(fmt, **values) | python | def strfdelta(tdelta: Union[datetime.timedelta, int, float, str],
fmt='{D:02}d {H:02}h {M:02}m {S:02}s',
inputtype='timedelta'):
"""
Convert a ``datetime.timedelta`` object or a regular number to a custom-
formatted string, just like the ``strftime()`` method does for
``datetime.datetime`` objects.
The ``fmt`` argument allows custom formatting to be specified. Fields can
include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each
field is optional.
Some examples:
.. code-block:: none
'{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)
'{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'
'{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'
'{H}h {S}s' --> '72h 800s'
The ``inputtype`` argument allows ``tdelta`` to be a regular number,
instead of the default behaviour of treating it as a ``datetime.timedelta``
object. Valid ``inputtype`` strings:
.. code-block:: none
'timedelta', # treats input as a datetime.timedelta
's', 'seconds',
'm', 'minutes',
'h', 'hours',
'd', 'days',
'w', 'weeks'
Modified from
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
""" # noqa
# Convert tdelta to integer seconds.
if inputtype == 'timedelta':
remainder = int(tdelta.total_seconds())
elif inputtype in ['s', 'seconds']:
remainder = int(tdelta)
elif inputtype in ['m', 'minutes']:
remainder = int(tdelta) * 60
elif inputtype in ['h', 'hours']:
remainder = int(tdelta) * 3600
elif inputtype in ['d', 'days']:
remainder = int(tdelta) * 86400
elif inputtype in ['w', 'weeks']:
remainder = int(tdelta) * 604800
else:
raise ValueError("Bad inputtype: {}".format(inputtype))
f = Formatter()
desired_fields = [field_tuple[1] for field_tuple in f.parse(fmt)]
possible_fields = ('W', 'D', 'H', 'M', 'S')
constants = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
values = {}
for field in possible_fields:
if field in desired_fields and field in constants:
values[field], remainder = divmod(remainder, constants[field])
return f.format(fmt, **values) | [
"def",
"strfdelta",
"(",
"tdelta",
":",
"Union",
"[",
"datetime",
".",
"timedelta",
",",
"int",
",",
"float",
",",
"str",
"]",
",",
"fmt",
"=",
"'{D:02}d {H:02}h {M:02}m {S:02}s'",
",",
"inputtype",
"=",
"'timedelta'",
")",
":",
"# noqa",
"# Convert tdelta to integer seconds.",
"if",
"inputtype",
"==",
"'timedelta'",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
".",
"total_seconds",
"(",
")",
")",
"elif",
"inputtype",
"in",
"[",
"'s'",
",",
"'seconds'",
"]",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
")",
"elif",
"inputtype",
"in",
"[",
"'m'",
",",
"'minutes'",
"]",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
")",
"*",
"60",
"elif",
"inputtype",
"in",
"[",
"'h'",
",",
"'hours'",
"]",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
")",
"*",
"3600",
"elif",
"inputtype",
"in",
"[",
"'d'",
",",
"'days'",
"]",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
")",
"*",
"86400",
"elif",
"inputtype",
"in",
"[",
"'w'",
",",
"'weeks'",
"]",
":",
"remainder",
"=",
"int",
"(",
"tdelta",
")",
"*",
"604800",
"else",
":",
"raise",
"ValueError",
"(",
"\"Bad inputtype: {}\"",
".",
"format",
"(",
"inputtype",
")",
")",
"f",
"=",
"Formatter",
"(",
")",
"desired_fields",
"=",
"[",
"field_tuple",
"[",
"1",
"]",
"for",
"field_tuple",
"in",
"f",
".",
"parse",
"(",
"fmt",
")",
"]",
"possible_fields",
"=",
"(",
"'W'",
",",
"'D'",
",",
"'H'",
",",
"'M'",
",",
"'S'",
")",
"constants",
"=",
"{",
"'W'",
":",
"604800",
",",
"'D'",
":",
"86400",
",",
"'H'",
":",
"3600",
",",
"'M'",
":",
"60",
",",
"'S'",
":",
"1",
"}",
"values",
"=",
"{",
"}",
"for",
"field",
"in",
"possible_fields",
":",
"if",
"field",
"in",
"desired_fields",
"and",
"field",
"in",
"constants",
":",
"values",
"[",
"field",
"]",
",",
"remainder",
"=",
"divmod",
"(",
"remainder",
",",
"constants",
"[",
"field",
"]",
")",
"return",
"f",
".",
"format",
"(",
"fmt",
",",
"*",
"*",
"values",
")"
] | Convert a ``datetime.timedelta`` object or a regular number to a custom-
formatted string, just like the ``strftime()`` method does for
``datetime.datetime`` objects.
The ``fmt`` argument allows custom formatting to be specified. Fields can
include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each
field is optional.
Some examples:
.. code-block:: none
'{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)
'{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'
'{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'
'{H}h {S}s' --> '72h 800s'
The ``inputtype`` argument allows ``tdelta`` to be a regular number,
instead of the default behaviour of treating it as a ``datetime.timedelta``
object. Valid ``inputtype`` strings:
.. code-block:: none
'timedelta', # treats input as a datetime.timedelta
's', 'seconds',
'm', 'minutes',
'h', 'hours',
'd', 'days',
'w', 'weeks'
Modified from
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string | [
"Convert",
"a",
"datetime",
".",
"timedelta",
"object",
"or",
"a",
"regular",
"number",
"to",
"a",
"custom",
"-",
"formatted",
"string",
"just",
"like",
"the",
"strftime",
"()",
"method",
"does",
"for",
"datetime",
".",
"datetime",
"objects",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L204-L266 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | convert_datetime_to_utc | def convert_datetime_to_utc(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to UTC (with UTC timezone).
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_utc()
return dt.in_tz(tz) | python | def convert_datetime_to_utc(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to UTC (with UTC timezone).
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_utc()
return dt.in_tz(tz) | [
"def",
"convert_datetime_to_utc",
"(",
"dt",
":",
"PotentialDatetimeType",
")",
"->",
"DateTime",
":",
"dt",
"=",
"coerce_to_pendulum",
"(",
"dt",
")",
"tz",
"=",
"get_tz_utc",
"(",
")",
"return",
"dt",
".",
"in_tz",
"(",
"tz",
")"
] | Convert date/time with timezone to UTC (with UTC timezone). | [
"Convert",
"date",
"/",
"time",
"with",
"timezone",
"to",
"UTC",
"(",
"with",
"UTC",
"timezone",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L320-L326 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | convert_datetime_to_local | def convert_datetime_to_local(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to local timezone.
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_local()
return dt.in_tz(tz) | python | def convert_datetime_to_local(dt: PotentialDatetimeType) -> DateTime:
"""
Convert date/time with timezone to local timezone.
"""
dt = coerce_to_pendulum(dt)
tz = get_tz_local()
return dt.in_tz(tz) | [
"def",
"convert_datetime_to_local",
"(",
"dt",
":",
"PotentialDatetimeType",
")",
"->",
"DateTime",
":",
"dt",
"=",
"coerce_to_pendulum",
"(",
"dt",
")",
"tz",
"=",
"get_tz_local",
"(",
")",
"return",
"dt",
".",
"in_tz",
"(",
"tz",
")"
] | Convert date/time with timezone to local timezone. | [
"Convert",
"date",
"/",
"time",
"with",
"timezone",
"to",
"local",
"timezone",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L329-L335 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | get_duration_h_m | def get_duration_h_m(start: Union[str, DateTime],
end: Union[str, DateTime],
default: str = "N/A") -> str:
"""
Calculate the time between two dates/times expressed as strings.
Args:
start: start date/time
end: end date/time
default: string value to return in case either of the inputs is
``None``
Returns:
a string that is one of
.. code-block:
'hh:mm'
'-hh:mm'
default
"""
start = coerce_to_pendulum(start)
end = coerce_to_pendulum(end)
if start is None or end is None:
return default
duration = end - start
minutes = duration.in_minutes()
(hours, minutes) = divmod(minutes, 60)
if hours < 0:
# negative... trickier
# Python's divmod does interesting things with negative numbers:
# Hours will be negative, and minutes always positive
hours += 1
minutes = 60 - minutes
return "-{}:{}".format(hours, "00" if minutes == 0 else minutes)
else:
return "{}:{}".format(hours, "00" if minutes == 0 else minutes) | python | def get_duration_h_m(start: Union[str, DateTime],
end: Union[str, DateTime],
default: str = "N/A") -> str:
"""
Calculate the time between two dates/times expressed as strings.
Args:
start: start date/time
end: end date/time
default: string value to return in case either of the inputs is
``None``
Returns:
a string that is one of
.. code-block:
'hh:mm'
'-hh:mm'
default
"""
start = coerce_to_pendulum(start)
end = coerce_to_pendulum(end)
if start is None or end is None:
return default
duration = end - start
minutes = duration.in_minutes()
(hours, minutes) = divmod(minutes, 60)
if hours < 0:
# negative... trickier
# Python's divmod does interesting things with negative numbers:
# Hours will be negative, and minutes always positive
hours += 1
minutes = 60 - minutes
return "-{}:{}".format(hours, "00" if minutes == 0 else minutes)
else:
return "{}:{}".format(hours, "00" if minutes == 0 else minutes) | [
"def",
"get_duration_h_m",
"(",
"start",
":",
"Union",
"[",
"str",
",",
"DateTime",
"]",
",",
"end",
":",
"Union",
"[",
"str",
",",
"DateTime",
"]",
",",
"default",
":",
"str",
"=",
"\"N/A\"",
")",
"->",
"str",
":",
"start",
"=",
"coerce_to_pendulum",
"(",
"start",
")",
"end",
"=",
"coerce_to_pendulum",
"(",
"end",
")",
"if",
"start",
"is",
"None",
"or",
"end",
"is",
"None",
":",
"return",
"default",
"duration",
"=",
"end",
"-",
"start",
"minutes",
"=",
"duration",
".",
"in_minutes",
"(",
")",
"(",
"hours",
",",
"minutes",
")",
"=",
"divmod",
"(",
"minutes",
",",
"60",
")",
"if",
"hours",
"<",
"0",
":",
"# negative... trickier",
"# Python's divmod does interesting things with negative numbers:",
"# Hours will be negative, and minutes always positive",
"hours",
"+=",
"1",
"minutes",
"=",
"60",
"-",
"minutes",
"return",
"\"-{}:{}\"",
".",
"format",
"(",
"hours",
",",
"\"00\"",
"if",
"minutes",
"==",
"0",
"else",
"minutes",
")",
"else",
":",
"return",
"\"{}:{}\"",
".",
"format",
"(",
"hours",
",",
"\"00\"",
"if",
"minutes",
"==",
"0",
"else",
"minutes",
")"
] | Calculate the time between two dates/times expressed as strings.
Args:
start: start date/time
end: end date/time
default: string value to return in case either of the inputs is
``None``
Returns:
a string that is one of
.. code-block:
'hh:mm'
'-hh:mm'
default | [
"Calculate",
"the",
"time",
"between",
"two",
"dates",
"/",
"times",
"expressed",
"as",
"strings",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L342-L379 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | get_age | def get_age(dob: PotentialDatetimeType,
when: PotentialDatetimeType,
default: str = "") -> Union[int, str]:
"""
Age (in whole years) at a particular date, or ``default``.
Args:
dob: date of birth
when: date/time at which to calculate age
default: value to return if either input is ``None``
Returns:
age in whole years (rounded down), or ``default``
"""
dob = coerce_to_pendulum_date(dob)
when = coerce_to_pendulum_date(when)
if dob is None or when is None:
return default
return (when - dob).years | python | def get_age(dob: PotentialDatetimeType,
when: PotentialDatetimeType,
default: str = "") -> Union[int, str]:
"""
Age (in whole years) at a particular date, or ``default``.
Args:
dob: date of birth
when: date/time at which to calculate age
default: value to return if either input is ``None``
Returns:
age in whole years (rounded down), or ``default``
"""
dob = coerce_to_pendulum_date(dob)
when = coerce_to_pendulum_date(when)
if dob is None or when is None:
return default
return (when - dob).years | [
"def",
"get_age",
"(",
"dob",
":",
"PotentialDatetimeType",
",",
"when",
":",
"PotentialDatetimeType",
",",
"default",
":",
"str",
"=",
"\"\"",
")",
"->",
"Union",
"[",
"int",
",",
"str",
"]",
":",
"dob",
"=",
"coerce_to_pendulum_date",
"(",
"dob",
")",
"when",
"=",
"coerce_to_pendulum_date",
"(",
"when",
")",
"if",
"dob",
"is",
"None",
"or",
"when",
"is",
"None",
":",
"return",
"default",
"return",
"(",
"when",
"-",
"dob",
")",
".",
"years"
] | Age (in whole years) at a particular date, or ``default``.
Args:
dob: date of birth
when: date/time at which to calculate age
default: value to return if either input is ``None``
Returns:
age in whole years (rounded down), or ``default`` | [
"Age",
"(",
"in",
"whole",
"years",
")",
"at",
"a",
"particular",
"date",
"or",
"default",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L382-L401 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | truncate_date_to_first_of_month | def truncate_date_to_first_of_month(
dt: Optional[DateLikeType]) -> Optional[DateLikeType]:
"""
Change the day to the first of the month.
"""
if dt is None:
return None
return dt.replace(day=1) | python | def truncate_date_to_first_of_month(
dt: Optional[DateLikeType]) -> Optional[DateLikeType]:
"""
Change the day to the first of the month.
"""
if dt is None:
return None
return dt.replace(day=1) | [
"def",
"truncate_date_to_first_of_month",
"(",
"dt",
":",
"Optional",
"[",
"DateLikeType",
"]",
")",
"->",
"Optional",
"[",
"DateLikeType",
"]",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"None",
"return",
"dt",
".",
"replace",
"(",
"day",
"=",
"1",
")"
] | Change the day to the first of the month. | [
"Change",
"the",
"day",
"to",
"the",
"first",
"of",
"the",
"month",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L408-L415 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | get_now_utc_notz_datetime | def get_now_utc_notz_datetime() -> datetime.datetime:
"""
Get the UTC time now, but with no timezone information,
in :class:`datetime.datetime` format.
"""
now = datetime.datetime.utcnow()
return now.replace(tzinfo=None) | python | def get_now_utc_notz_datetime() -> datetime.datetime:
"""
Get the UTC time now, but with no timezone information,
in :class:`datetime.datetime` format.
"""
now = datetime.datetime.utcnow()
return now.replace(tzinfo=None) | [
"def",
"get_now_utc_notz_datetime",
"(",
")",
"->",
"datetime",
".",
"datetime",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"return",
"now",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")"
] | Get the UTC time now, but with no timezone information,
in :class:`datetime.datetime` format. | [
"Get",
"the",
"UTC",
"time",
"now",
"but",
"with",
"no",
"timezone",
"information",
"in",
":",
"class",
":",
"datetime",
".",
"datetime",
"format",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L422-L428 |
RudolfCardinal/pythonlib | cardinal_pythonlib/datetimefunc.py | coerce_to_datetime | def coerce_to_datetime(x: Any) -> Optional[datetime.datetime]:
"""
Ensure an object is a :class:`datetime.datetime`, or coerce to one, or
raise :exc:`ValueError` or :exc:`OverflowError` (as per
http://dateutil.readthedocs.org/en/latest/parser.html).
"""
if x is None:
return None
elif isinstance(x, DateTime):
return pendulum_to_datetime(x)
elif isinstance(x, datetime.datetime):
return x
elif isinstance(x, datetime.date):
return datetime.datetime(x.year, x.month, x.day)
else:
return dateutil.parser.parse(x) | python | def coerce_to_datetime(x: Any) -> Optional[datetime.datetime]:
"""
Ensure an object is a :class:`datetime.datetime`, or coerce to one, or
raise :exc:`ValueError` or :exc:`OverflowError` (as per
http://dateutil.readthedocs.org/en/latest/parser.html).
"""
if x is None:
return None
elif isinstance(x, DateTime):
return pendulum_to_datetime(x)
elif isinstance(x, datetime.datetime):
return x
elif isinstance(x, datetime.date):
return datetime.datetime(x.year, x.month, x.day)
else:
return dateutil.parser.parse(x) | [
"def",
"coerce_to_datetime",
"(",
"x",
":",
"Any",
")",
"->",
"Optional",
"[",
"datetime",
".",
"datetime",
"]",
":",
"if",
"x",
"is",
"None",
":",
"return",
"None",
"elif",
"isinstance",
"(",
"x",
",",
"DateTime",
")",
":",
"return",
"pendulum_to_datetime",
"(",
"x",
")",
"elif",
"isinstance",
"(",
"x",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"x",
"elif",
"isinstance",
"(",
"x",
",",
"datetime",
".",
"date",
")",
":",
"return",
"datetime",
".",
"datetime",
"(",
"x",
".",
"year",
",",
"x",
".",
"month",
",",
"x",
".",
"day",
")",
"else",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"x",
")"
] | Ensure an object is a :class:`datetime.datetime`, or coerce to one, or
raise :exc:`ValueError` or :exc:`OverflowError` (as per
http://dateutil.readthedocs.org/en/latest/parser.html). | [
"Ensure",
"an",
"object",
"is",
"a",
":",
"class",
":",
"datetime",
".",
"datetime",
"or",
"coerce",
"to",
"one",
"or",
"raise",
":",
"exc",
":",
"ValueError",
"or",
":",
"exc",
":",
"OverflowError",
"(",
"as",
"per",
"http",
":",
"//",
"dateutil",
".",
"readthedocs",
".",
"org",
"/",
"en",
"/",
"latest",
"/",
"parser",
".",
"html",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/datetimefunc.py#L431-L446 |
ivanprjcts/sdklib | sdklib/http/base.py | request_from_context | def request_from_context(context):
"""
Do http requests from context.
:param context: request context.
"""
new_context = copy.deepcopy(context)
assert new_context.method in ALLOWED_METHODS
new_context.url_path = generate_url_path(
new_context.url_path,
prefix=new_context.prefix_url_path,
format_suffix=new_context.url_path_format,
**new_context.url_path_params
)
if new_context.body_params or new_context.files:
body, content_type = new_context.renderer.encode_params(new_context.body_params, files=new_context.files)
if new_context.update_content_type and HttpSdk.CONTENT_TYPE_HEADER_NAME not in new_context.headers:
new_context.headers[HttpSdk.CONTENT_TYPE_HEADER_NAME] = content_type
else:
body = None
authentication_instances = new_context.authentication_instances
for auth_obj in authentication_instances:
new_context = auth_obj.apply_authentication(new_context)
if HttpSdk.COOKIE_HEADER_NAME not in new_context.headers and not new_context.cookie.is_empty():
new_context.headers[HttpSdk.COOKIE_HEADER_NAME] = new_context.cookie.as_cookie_header_value()
url = "%s%s" % (new_context.host, new_context.url_path)
if new_context.query_params:
url += "?%s" % (urlencode(new_context.query_params))
log_print_request(new_context.method, url, new_context.query_params, new_context.headers, body)
# ensure method and url are native str
r = HttpSdk.get_pool_manager(new_context.proxy).request(
convert_unicode_to_native_str(new_context.method),
convert_unicode_to_native_str(url),
body=body,
headers=HttpSdk.convert_headers_to_native_str(new_context.headers),
redirect=new_context.redirect,
timeout=new_context.timeout
)
log_print_response(r.status, r.data, r.headers)
r = new_context.response_class(r)
return r | python | def request_from_context(context):
"""
Do http requests from context.
:param context: request context.
"""
new_context = copy.deepcopy(context)
assert new_context.method in ALLOWED_METHODS
new_context.url_path = generate_url_path(
new_context.url_path,
prefix=new_context.prefix_url_path,
format_suffix=new_context.url_path_format,
**new_context.url_path_params
)
if new_context.body_params or new_context.files:
body, content_type = new_context.renderer.encode_params(new_context.body_params, files=new_context.files)
if new_context.update_content_type and HttpSdk.CONTENT_TYPE_HEADER_NAME not in new_context.headers:
new_context.headers[HttpSdk.CONTENT_TYPE_HEADER_NAME] = content_type
else:
body = None
authentication_instances = new_context.authentication_instances
for auth_obj in authentication_instances:
new_context = auth_obj.apply_authentication(new_context)
if HttpSdk.COOKIE_HEADER_NAME not in new_context.headers and not new_context.cookie.is_empty():
new_context.headers[HttpSdk.COOKIE_HEADER_NAME] = new_context.cookie.as_cookie_header_value()
url = "%s%s" % (new_context.host, new_context.url_path)
if new_context.query_params:
url += "?%s" % (urlencode(new_context.query_params))
log_print_request(new_context.method, url, new_context.query_params, new_context.headers, body)
# ensure method and url are native str
r = HttpSdk.get_pool_manager(new_context.proxy).request(
convert_unicode_to_native_str(new_context.method),
convert_unicode_to_native_str(url),
body=body,
headers=HttpSdk.convert_headers_to_native_str(new_context.headers),
redirect=new_context.redirect,
timeout=new_context.timeout
)
log_print_response(r.status, r.data, r.headers)
r = new_context.response_class(r)
return r | [
"def",
"request_from_context",
"(",
"context",
")",
":",
"new_context",
"=",
"copy",
".",
"deepcopy",
"(",
"context",
")",
"assert",
"new_context",
".",
"method",
"in",
"ALLOWED_METHODS",
"new_context",
".",
"url_path",
"=",
"generate_url_path",
"(",
"new_context",
".",
"url_path",
",",
"prefix",
"=",
"new_context",
".",
"prefix_url_path",
",",
"format_suffix",
"=",
"new_context",
".",
"url_path_format",
",",
"*",
"*",
"new_context",
".",
"url_path_params",
")",
"if",
"new_context",
".",
"body_params",
"or",
"new_context",
".",
"files",
":",
"body",
",",
"content_type",
"=",
"new_context",
".",
"renderer",
".",
"encode_params",
"(",
"new_context",
".",
"body_params",
",",
"files",
"=",
"new_context",
".",
"files",
")",
"if",
"new_context",
".",
"update_content_type",
"and",
"HttpSdk",
".",
"CONTENT_TYPE_HEADER_NAME",
"not",
"in",
"new_context",
".",
"headers",
":",
"new_context",
".",
"headers",
"[",
"HttpSdk",
".",
"CONTENT_TYPE_HEADER_NAME",
"]",
"=",
"content_type",
"else",
":",
"body",
"=",
"None",
"authentication_instances",
"=",
"new_context",
".",
"authentication_instances",
"for",
"auth_obj",
"in",
"authentication_instances",
":",
"new_context",
"=",
"auth_obj",
".",
"apply_authentication",
"(",
"new_context",
")",
"if",
"HttpSdk",
".",
"COOKIE_HEADER_NAME",
"not",
"in",
"new_context",
".",
"headers",
"and",
"not",
"new_context",
".",
"cookie",
".",
"is_empty",
"(",
")",
":",
"new_context",
".",
"headers",
"[",
"HttpSdk",
".",
"COOKIE_HEADER_NAME",
"]",
"=",
"new_context",
".",
"cookie",
".",
"as_cookie_header_value",
"(",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"new_context",
".",
"host",
",",
"new_context",
".",
"url_path",
")",
"if",
"new_context",
".",
"query_params",
":",
"url",
"+=",
"\"?%s\"",
"%",
"(",
"urlencode",
"(",
"new_context",
".",
"query_params",
")",
")",
"log_print_request",
"(",
"new_context",
".",
"method",
",",
"url",
",",
"new_context",
".",
"query_params",
",",
"new_context",
".",
"headers",
",",
"body",
")",
"# ensure method and url are native str",
"r",
"=",
"HttpSdk",
".",
"get_pool_manager",
"(",
"new_context",
".",
"proxy",
")",
".",
"request",
"(",
"convert_unicode_to_native_str",
"(",
"new_context",
".",
"method",
")",
",",
"convert_unicode_to_native_str",
"(",
"url",
")",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"HttpSdk",
".",
"convert_headers_to_native_str",
"(",
"new_context",
".",
"headers",
")",
",",
"redirect",
"=",
"new_context",
".",
"redirect",
",",
"timeout",
"=",
"new_context",
".",
"timeout",
")",
"log_print_response",
"(",
"r",
".",
"status",
",",
"r",
".",
"data",
",",
"r",
".",
"headers",
")",
"r",
"=",
"new_context",
".",
"response_class",
"(",
"r",
")",
"return",
"r"
] | Do http requests from context.
:param context: request context. | [
"Do",
"http",
"requests",
"from",
"context",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L33-L79 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpRequestContext.clear | def clear(self, *args):
"""
Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear.
:param args: extra fields to clear.
"""
for field in self.fields_to_clear + list(args):
setattr(self, field, None) | python | def clear(self, *args):
"""
Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear.
:param args: extra fields to clear.
"""
for field in self.fields_to_clear + list(args):
setattr(self, field, None) | [
"def",
"clear",
"(",
"self",
",",
"*",
"args",
")",
":",
"for",
"field",
"in",
"self",
".",
"fields_to_clear",
"+",
"list",
"(",
"args",
")",
":",
"setattr",
"(",
"self",
",",
"field",
",",
"None",
")"
] | Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear.
:param args: extra fields to clear. | [
"Set",
"default",
"values",
"to",
"**",
"self",
".",
"fields_to_clear",
"**",
".",
"In",
"addition",
"it",
"is",
"possible",
"to",
"pass",
"extra",
"fields",
"to",
"clear",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L232-L239 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk.host | def host(self, value):
"""
A string that will be automatically included at the beginning of the url generated for doing each http request.
:param value: The host to be connected with, e.g. (http://hostname) or (https://X.X.X.X:port)
"""
scheme, host, port = get_hostname_parameters_from_url(value)
self._host = "%s://%s:%s" % (scheme, host, port) | python | def host(self, value):
"""
A string that will be automatically included at the beginning of the url generated for doing each http request.
:param value: The host to be connected with, e.g. (http://hostname) or (https://X.X.X.X:port)
"""
scheme, host, port = get_hostname_parameters_from_url(value)
self._host = "%s://%s:%s" % (scheme, host, port) | [
"def",
"host",
"(",
"self",
",",
"value",
")",
":",
"scheme",
",",
"host",
",",
"port",
"=",
"get_hostname_parameters_from_url",
"(",
"value",
")",
"self",
".",
"_host",
"=",
"\"%s://%s:%s\"",
"%",
"(",
"scheme",
",",
"host",
",",
"port",
")"
] | A string that will be automatically included at the beginning of the url generated for doing each http request.
:param value: The host to be connected with, e.g. (http://hostname) or (https://X.X.X.X:port) | [
"A",
"string",
"that",
"will",
"be",
"automatically",
"included",
"at",
"the",
"beginning",
"of",
"the",
"url",
"generated",
"for",
"doing",
"each",
"http",
"request",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L281-L288 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk.cookie | def cookie(self, value):
"""
Set cookie.
:param value:
"""
if value and not value.is_empty():
self._cookie = value
else:
self._cookie = Cookie() | python | def cookie(self, value):
"""
Set cookie.
:param value:
"""
if value and not value.is_empty():
self._cookie = value
else:
self._cookie = Cookie() | [
"def",
"cookie",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"and",
"not",
"value",
".",
"is_empty",
"(",
")",
":",
"self",
".",
"_cookie",
"=",
"value",
"else",
":",
"self",
".",
"_cookie",
"=",
"Cookie",
"(",
")"
] | Set cookie.
:param value: | [
"Set",
"cookie",
".",
":",
"param",
"value",
":"
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L316-L324 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk.set_default_host | def set_default_host(cls, value):
"""
Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request.
"""
if value is None:
cls.DEFAULT_HOST = "http://127.0.0.1:80"
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port) | python | def set_default_host(cls, value):
"""
Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request.
"""
if value is None:
cls.DEFAULT_HOST = "http://127.0.0.1:80"
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port) | [
"def",
"set_default_host",
"(",
"cls",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"cls",
".",
"DEFAULT_HOST",
"=",
"\"http://127.0.0.1:80\"",
"else",
":",
"scheme",
",",
"host",
",",
"port",
"=",
"get_hostname_parameters_from_url",
"(",
"value",
")",
"cls",
".",
"DEFAULT_HOST",
"=",
"\"%s://%s:%s\"",
"%",
"(",
"scheme",
",",
"host",
",",
"port",
")"
] | Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request. | [
"Default",
":",
"http",
":",
"//",
"127",
".",
"0",
".",
"0",
".",
"1",
":",
"80"
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L353-L363 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk.set_default_proxy | def set_default_proxy(cls, value):
"""
Default: None (no proxy)
A string that will be used to tell each request must be sent through this proxy server.
Use the scheme://hostname:port form.
If you need to use a proxy, you can configure individual requests with the proxies argument to any request
method.
"""
if value is None:
cls.DEFAULT_PROXY = None
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_PROXY = "%s://%s:%s" % (scheme, host, port) | python | def set_default_proxy(cls, value):
"""
Default: None (no proxy)
A string that will be used to tell each request must be sent through this proxy server.
Use the scheme://hostname:port form.
If you need to use a proxy, you can configure individual requests with the proxies argument to any request
method.
"""
if value is None:
cls.DEFAULT_PROXY = None
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_PROXY = "%s://%s:%s" % (scheme, host, port) | [
"def",
"set_default_proxy",
"(",
"cls",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"cls",
".",
"DEFAULT_PROXY",
"=",
"None",
"else",
":",
"scheme",
",",
"host",
",",
"port",
"=",
"get_hostname_parameters_from_url",
"(",
"value",
")",
"cls",
".",
"DEFAULT_PROXY",
"=",
"\"%s://%s:%s\"",
"%",
"(",
"scheme",
",",
"host",
",",
"port",
")"
] | Default: None (no proxy)
A string that will be used to tell each request must be sent through this proxy server.
Use the scheme://hostname:port form.
If you need to use a proxy, you can configure individual requests with the proxies argument to any request
method. | [
"Default",
":",
"None",
"(",
"no",
"proxy",
")"
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L366-L379 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk._http_request | def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs):
"""
Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return:
"""
host = kwargs.get('host', self.host)
proxy = kwargs.get('proxy', self.proxy)
renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer)
prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path)
authentication_instances = kwargs.get('authentication_instances', self.authentication_instances)
url_path_format = kwargs.get('url_path_format', self.url_path_format)
update_content_type = kwargs.get('update_content_type', True)
redirect = kwargs.get('redirect', False)
if headers is None:
headers = self.default_headers()
context = HttpRequestContext(
host=host, proxy=proxy, method=method,
prefix_url_path=prefix_url_path,
url_path=url_path,
url_path_params=self.url_path_params,
url_path_format=url_path_format,
headers=headers,
query_params=query_params,
body_params=body_params,
files=files,
renderer=renderer,
response_class=self.response_class,
authentication_instances=authentication_instances,
update_content_type=update_content_type,
redirect=redirect
)
res = self.http_request_from_context(context)
self.cookie.update(res.cookie)
return res | python | def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs):
"""
Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return:
"""
host = kwargs.get('host', self.host)
proxy = kwargs.get('proxy', self.proxy)
renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer)
prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path)
authentication_instances = kwargs.get('authentication_instances', self.authentication_instances)
url_path_format = kwargs.get('url_path_format', self.url_path_format)
update_content_type = kwargs.get('update_content_type', True)
redirect = kwargs.get('redirect', False)
if headers is None:
headers = self.default_headers()
context = HttpRequestContext(
host=host, proxy=proxy, method=method,
prefix_url_path=prefix_url_path,
url_path=url_path,
url_path_params=self.url_path_params,
url_path_format=url_path_format,
headers=headers,
query_params=query_params,
body_params=body_params,
files=files,
renderer=renderer,
response_class=self.response_class,
authentication_instances=authentication_instances,
update_content_type=update_content_type,
redirect=redirect
)
res = self.http_request_from_context(context)
self.cookie.update(res.cookie)
return res | [
"def",
"_http_request",
"(",
"self",
",",
"method",
",",
"url_path",
",",
"headers",
"=",
"None",
",",
"query_params",
"=",
"None",
",",
"body_params",
"=",
"None",
",",
"files",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"host",
"=",
"kwargs",
".",
"get",
"(",
"'host'",
",",
"self",
".",
"host",
")",
"proxy",
"=",
"kwargs",
".",
"get",
"(",
"'proxy'",
",",
"self",
".",
"proxy",
")",
"renderer",
"=",
"kwargs",
".",
"get",
"(",
"'renderer'",
",",
"MultiPartRenderer",
"(",
")",
"if",
"files",
"else",
"self",
".",
"default_renderer",
")",
"prefix_url_path",
"=",
"kwargs",
".",
"get",
"(",
"'prefix_url_path'",
",",
"self",
".",
"prefix_url_path",
")",
"authentication_instances",
"=",
"kwargs",
".",
"get",
"(",
"'authentication_instances'",
",",
"self",
".",
"authentication_instances",
")",
"url_path_format",
"=",
"kwargs",
".",
"get",
"(",
"'url_path_format'",
",",
"self",
".",
"url_path_format",
")",
"update_content_type",
"=",
"kwargs",
".",
"get",
"(",
"'update_content_type'",
",",
"True",
")",
"redirect",
"=",
"kwargs",
".",
"get",
"(",
"'redirect'",
",",
"False",
")",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"self",
".",
"default_headers",
"(",
")",
"context",
"=",
"HttpRequestContext",
"(",
"host",
"=",
"host",
",",
"proxy",
"=",
"proxy",
",",
"method",
"=",
"method",
",",
"prefix_url_path",
"=",
"prefix_url_path",
",",
"url_path",
"=",
"url_path",
",",
"url_path_params",
"=",
"self",
".",
"url_path_params",
",",
"url_path_format",
"=",
"url_path_format",
",",
"headers",
"=",
"headers",
",",
"query_params",
"=",
"query_params",
",",
"body_params",
"=",
"body_params",
",",
"files",
"=",
"files",
",",
"renderer",
"=",
"renderer",
",",
"response_class",
"=",
"self",
".",
"response_class",
",",
"authentication_instances",
"=",
"authentication_instances",
",",
"update_content_type",
"=",
"update_content_type",
",",
"redirect",
"=",
"redirect",
")",
"res",
"=",
"self",
".",
"http_request_from_context",
"(",
"context",
")",
"self",
".",
"cookie",
".",
"update",
"(",
"res",
".",
"cookie",
")",
"return",
"res"
] | Method to do http requests.
:param method:
:param url_path:
:param headers:
:param body_params:
:param query_params:
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart
encoding upload.
``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')``
or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file.
:param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value
according to the rendered body. By default: True.
:return: | [
"Method",
"to",
"do",
"http",
"requests",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L394-L443 |
ivanprjcts/sdklib | sdklib/http/base.py | HttpSdk.login | def login(self, **kwargs):
"""
Login abstract method with default implementation.
:param kwargs: parameters
:return: SdkResponse
"""
assert self.LOGIN_URL_PATH is not None
render_name = kwargs.pop("render", "json")
render = get_renderer(render_name)
params = parse_args(**kwargs)
return self.post(self.LOGIN_URL_PATH, body_params=params, render=render) | python | def login(self, **kwargs):
"""
Login abstract method with default implementation.
:param kwargs: parameters
:return: SdkResponse
"""
assert self.LOGIN_URL_PATH is not None
render_name = kwargs.pop("render", "json")
render = get_renderer(render_name)
params = parse_args(**kwargs)
return self.post(self.LOGIN_URL_PATH, body_params=params, render=render) | [
"def",
"login",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"LOGIN_URL_PATH",
"is",
"not",
"None",
"render_name",
"=",
"kwargs",
".",
"pop",
"(",
"\"render\"",
",",
"\"json\"",
")",
"render",
"=",
"get_renderer",
"(",
"render_name",
")",
"params",
"=",
"parse_args",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"post",
"(",
"self",
".",
"LOGIN_URL_PATH",
",",
"body_params",
"=",
"params",
",",
"render",
"=",
"render",
")"
] | Login abstract method with default implementation.
:param kwargs: parameters
:return: SdkResponse | [
"Login",
"abstract",
"method",
"with",
"default",
"implementation",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L463-L475 |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | nhs_check_digit | def nhs_check_digit(ninedigits: Union[str, List[Union[str, int]]]) -> int:
"""
Calculates an NHS number check digit.
Args:
ninedigits: string or list
Returns:
check digit
Method:
1. Multiply each of the first nine digits by the corresponding
digit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`).
2. Sum the results.
3. Take remainder after division by 11.
4. Subtract the remainder from 11
5. If this is 11, use 0 instead
If it's 10, the number is invalid
If it doesn't match the actual check digit, the number is invalid
"""
if len(ninedigits) != 9 or not all(str(x).isdigit() for x in ninedigits):
raise ValueError("bad string to nhs_check_digit")
check_digit = 11 - (sum([
int(d) * f
for (d, f) in zip(ninedigits, NHS_DIGIT_WEIGHTINGS)
]) % 11)
# ... % 11 yields something in the range 0-10
# ... 11 - that yields something in the range 1-11
if check_digit == 11:
check_digit = 0
return check_digit | python | def nhs_check_digit(ninedigits: Union[str, List[Union[str, int]]]) -> int:
"""
Calculates an NHS number check digit.
Args:
ninedigits: string or list
Returns:
check digit
Method:
1. Multiply each of the first nine digits by the corresponding
digit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`).
2. Sum the results.
3. Take remainder after division by 11.
4. Subtract the remainder from 11
5. If this is 11, use 0 instead
If it's 10, the number is invalid
If it doesn't match the actual check digit, the number is invalid
"""
if len(ninedigits) != 9 or not all(str(x).isdigit() for x in ninedigits):
raise ValueError("bad string to nhs_check_digit")
check_digit = 11 - (sum([
int(d) * f
for (d, f) in zip(ninedigits, NHS_DIGIT_WEIGHTINGS)
]) % 11)
# ... % 11 yields something in the range 0-10
# ... 11 - that yields something in the range 1-11
if check_digit == 11:
check_digit = 0
return check_digit | [
"def",
"nhs_check_digit",
"(",
"ninedigits",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"Union",
"[",
"str",
",",
"int",
"]",
"]",
"]",
")",
"->",
"int",
":",
"if",
"len",
"(",
"ninedigits",
")",
"!=",
"9",
"or",
"not",
"all",
"(",
"str",
"(",
"x",
")",
".",
"isdigit",
"(",
")",
"for",
"x",
"in",
"ninedigits",
")",
":",
"raise",
"ValueError",
"(",
"\"bad string to nhs_check_digit\"",
")",
"check_digit",
"=",
"11",
"-",
"(",
"sum",
"(",
"[",
"int",
"(",
"d",
")",
"*",
"f",
"for",
"(",
"d",
",",
"f",
")",
"in",
"zip",
"(",
"ninedigits",
",",
"NHS_DIGIT_WEIGHTINGS",
")",
"]",
")",
"%",
"11",
")",
"# ... % 11 yields something in the range 0-10",
"# ... 11 - that yields something in the range 1-11",
"if",
"check_digit",
"==",
"11",
":",
"check_digit",
"=",
"0",
"return",
"check_digit"
] | Calculates an NHS number check digit.
Args:
ninedigits: string or list
Returns:
check digit
Method:
1. Multiply each of the first nine digits by the corresponding
digit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`).
2. Sum the results.
3. Take remainder after division by 11.
4. Subtract the remainder from 11
5. If this is 11, use 0 instead
If it's 10, the number is invalid
If it doesn't match the actual check digit, the number is invalid | [
"Calculates",
"an",
"NHS",
"number",
"check",
"digit",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L45-L77 |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | is_valid_nhs_number | def is_valid_nhs_number(n: int) -> bool:
"""
Validates an integer as an NHS number.
Args:
n: NHS number
Returns:
valid?
Checksum details are at
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp
""" # noqa
if not isinstance(n, int):
log.debug("is_valid_nhs_number: parameter was not of integer type")
return False
s = str(n)
# Not 10 digits long?
if len(s) != 10:
log.debug("is_valid_nhs_number: not 10 digits")
return False
main_digits = [int(s[i]) for i in range(9)]
actual_check_digit = int(s[9]) # tenth digit
expected_check_digit = nhs_check_digit(main_digits)
if expected_check_digit == 10:
log.debug("is_valid_nhs_number: calculated check digit invalid")
return False
if expected_check_digit != actual_check_digit:
log.debug("is_valid_nhs_number: check digit mismatch")
return False
# Hooray!
return True | python | def is_valid_nhs_number(n: int) -> bool:
"""
Validates an integer as an NHS number.
Args:
n: NHS number
Returns:
valid?
Checksum details are at
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp
""" # noqa
if not isinstance(n, int):
log.debug("is_valid_nhs_number: parameter was not of integer type")
return False
s = str(n)
# Not 10 digits long?
if len(s) != 10:
log.debug("is_valid_nhs_number: not 10 digits")
return False
main_digits = [int(s[i]) for i in range(9)]
actual_check_digit = int(s[9]) # tenth digit
expected_check_digit = nhs_check_digit(main_digits)
if expected_check_digit == 10:
log.debug("is_valid_nhs_number: calculated check digit invalid")
return False
if expected_check_digit != actual_check_digit:
log.debug("is_valid_nhs_number: check digit mismatch")
return False
# Hooray!
return True | [
"def",
"is_valid_nhs_number",
"(",
"n",
":",
"int",
")",
"->",
"bool",
":",
"# noqa",
"if",
"not",
"isinstance",
"(",
"n",
",",
"int",
")",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: parameter was not of integer type\"",
")",
"return",
"False",
"s",
"=",
"str",
"(",
"n",
")",
"# Not 10 digits long?",
"if",
"len",
"(",
"s",
")",
"!=",
"10",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: not 10 digits\"",
")",
"return",
"False",
"main_digits",
"=",
"[",
"int",
"(",
"s",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"9",
")",
"]",
"actual_check_digit",
"=",
"int",
"(",
"s",
"[",
"9",
"]",
")",
"# tenth digit",
"expected_check_digit",
"=",
"nhs_check_digit",
"(",
"main_digits",
")",
"if",
"expected_check_digit",
"==",
"10",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: calculated check digit invalid\"",
")",
"return",
"False",
"if",
"expected_check_digit",
"!=",
"actual_check_digit",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: check digit mismatch\"",
")",
"return",
"False",
"# Hooray!",
"return",
"True"
] | Validates an integer as an NHS number.
Args:
n: NHS number
Returns:
valid?
Checksum details are at
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp | [
"Validates",
"an",
"integer",
"as",
"an",
"NHS",
"number",
".",
"Args",
":",
"n",
":",
"NHS",
"number"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L80-L113 |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | generate_random_nhs_number | def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) | python | def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) | [
"def",
"generate_random_nhs_number",
"(",
")",
"->",
"int",
":",
"check_digit",
"=",
"10",
"# NHS numbers with this check digit are all invalid",
"while",
"check_digit",
"==",
"10",
":",
"digits",
"=",
"[",
"random",
".",
"randint",
"(",
"1",
",",
"9",
")",
"]",
"# don't start with a zero",
"digits",
".",
"extend",
"(",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"9",
")",
"for",
"_",
"in",
"range",
"(",
"8",
")",
"]",
")",
"# ... length now 9",
"check_digit",
"=",
"nhs_check_digit",
"(",
"digits",
")",
"# noinspection PyUnboundLocalVariable",
"digits",
".",
"append",
"(",
"check_digit",
")",
"return",
"int",
"(",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"digits",
"]",
")",
")"
] | Returns a random valid NHS number, as an ``int``. | [
"Returns",
"a",
"random",
"valid",
"NHS",
"number",
"as",
"an",
"int",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L116-L128 |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | generate_nhs_number_from_first_9_digits | def generate_nhs_number_from_first_9_digits(first9digits: str) -> Optional[int]:
"""
Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999
"""
if len(first9digits) != 9:
log.warning("Not 9 digits")
return None
try:
first9int = int(first9digits)
except (TypeError, ValueError):
log.warning("Not an integer")
return None # not an int
if len(str(first9int)) != len(first9digits):
# e.g. leading zeros, or some such
log.warning("Leading zeros?")
return None
check_digit = nhs_check_digit(first9digits)
if check_digit == 10: # NHS numbers with this check digit are all invalid
log.warning("Can't have check digit of 10")
return None
return int(first9digits + str(check_digit)) | python | def generate_nhs_number_from_first_9_digits(first9digits: str) -> Optional[int]:
"""
Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999
"""
if len(first9digits) != 9:
log.warning("Not 9 digits")
return None
try:
first9int = int(first9digits)
except (TypeError, ValueError):
log.warning("Not an integer")
return None # not an int
if len(str(first9int)) != len(first9digits):
# e.g. leading zeros, or some such
log.warning("Leading zeros?")
return None
check_digit = nhs_check_digit(first9digits)
if check_digit == 10: # NHS numbers with this check digit are all invalid
log.warning("Can't have check digit of 10")
return None
return int(first9digits + str(check_digit)) | [
"def",
"generate_nhs_number_from_first_9_digits",
"(",
"first9digits",
":",
"str",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"if",
"len",
"(",
"first9digits",
")",
"!=",
"9",
":",
"log",
".",
"warning",
"(",
"\"Not 9 digits\"",
")",
"return",
"None",
"try",
":",
"first9int",
"=",
"int",
"(",
"first9digits",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"log",
".",
"warning",
"(",
"\"Not an integer\"",
")",
"return",
"None",
"# not an int",
"if",
"len",
"(",
"str",
"(",
"first9int",
")",
")",
"!=",
"len",
"(",
"first9digits",
")",
":",
"# e.g. leading zeros, or some such",
"log",
".",
"warning",
"(",
"\"Leading zeros?\"",
")",
"return",
"None",
"check_digit",
"=",
"nhs_check_digit",
"(",
"first9digits",
")",
"if",
"check_digit",
"==",
"10",
":",
"# NHS numbers with this check digit are all invalid",
"log",
".",
"warning",
"(",
"\"Can't have check digit of 10\"",
")",
"return",
"None",
"return",
"int",
"(",
"first9digits",
"+",
"str",
"(",
"check_digit",
")",
")"
] | Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999 | [
"Returns",
"a",
"valid",
"NHS",
"number",
"as",
"an",
"int",
"given",
"the",
"first",
"9",
"digits",
".",
"The",
"particular",
"purpose",
"is",
"to",
"make",
"NHS",
"numbers",
"that",
"*",
"look",
"*",
"fake",
"(",
"rather",
"than",
"truly",
"random",
"NHS",
"numbers",
"which",
"might",
"accidentally",
"be",
"real",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L138-L168 |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | nhs_number_from_text_or_none | def nhs_number_from_text_or_none(s: str) -> Optional[int]:
"""
Returns a validated NHS number (as an integer) from a string, or ``None``
if it is not valid.
It's a 10-digit number, so note that database 32-bit INT values are
insufficient; use BIGINT. Python will handle large integers happily.
NHS number rules:
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp?shownav=0
""" # noqa
# None in, None out.
funcname = "nhs_number_from_text_or_none: "
if not s:
log.debug(funcname + "incoming parameter was None")
return None
# (a) If it's not a 10-digit number, bye-bye.
# Remove whitespace
s = WHITESPACE_REGEX.sub("", s) # replaces all instances
# Contains non-numeric characters?
if NON_NUMERIC_REGEX.search(s):
log.debug(funcname + "contains non-numeric characters")
return None
# Not 10 digits long?
if len(s) != 10:
log.debug(funcname + "not 10 digits long")
return None
# (b) Validation
n = int(s)
if not is_valid_nhs_number(n):
log.debug(funcname + "failed validation")
return None
# Happy!
return n | python | def nhs_number_from_text_or_none(s: str) -> Optional[int]:
"""
Returns a validated NHS number (as an integer) from a string, or ``None``
if it is not valid.
It's a 10-digit number, so note that database 32-bit INT values are
insufficient; use BIGINT. Python will handle large integers happily.
NHS number rules:
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp?shownav=0
""" # noqa
# None in, None out.
funcname = "nhs_number_from_text_or_none: "
if not s:
log.debug(funcname + "incoming parameter was None")
return None
# (a) If it's not a 10-digit number, bye-bye.
# Remove whitespace
s = WHITESPACE_REGEX.sub("", s) # replaces all instances
# Contains non-numeric characters?
if NON_NUMERIC_REGEX.search(s):
log.debug(funcname + "contains non-numeric characters")
return None
# Not 10 digits long?
if len(s) != 10:
log.debug(funcname + "not 10 digits long")
return None
# (b) Validation
n = int(s)
if not is_valid_nhs_number(n):
log.debug(funcname + "failed validation")
return None
# Happy!
return n | [
"def",
"nhs_number_from_text_or_none",
"(",
"s",
":",
"str",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"# noqa",
"# None in, None out.",
"funcname",
"=",
"\"nhs_number_from_text_or_none: \"",
"if",
"not",
"s",
":",
"log",
".",
"debug",
"(",
"funcname",
"+",
"\"incoming parameter was None\"",
")",
"return",
"None",
"# (a) If it's not a 10-digit number, bye-bye.",
"# Remove whitespace",
"s",
"=",
"WHITESPACE_REGEX",
".",
"sub",
"(",
"\"\"",
",",
"s",
")",
"# replaces all instances",
"# Contains non-numeric characters?",
"if",
"NON_NUMERIC_REGEX",
".",
"search",
"(",
"s",
")",
":",
"log",
".",
"debug",
"(",
"funcname",
"+",
"\"contains non-numeric characters\"",
")",
"return",
"None",
"# Not 10 digits long?",
"if",
"len",
"(",
"s",
")",
"!=",
"10",
":",
"log",
".",
"debug",
"(",
"funcname",
"+",
"\"not 10 digits long\"",
")",
"return",
"None",
"# (b) Validation",
"n",
"=",
"int",
"(",
"s",
")",
"if",
"not",
"is_valid_nhs_number",
"(",
"n",
")",
":",
"log",
".",
"debug",
"(",
"funcname",
"+",
"\"failed validation\"",
")",
"return",
"None",
"# Happy!",
"return",
"n"
] | Returns a validated NHS number (as an integer) from a string, or ``None``
if it is not valid.
It's a 10-digit number, so note that database 32-bit INT values are
insufficient; use BIGINT. Python will handle large integers happily.
NHS number rules:
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp?shownav=0 | [
"Returns",
"a",
"validated",
"NHS",
"number",
"(",
"as",
"an",
"integer",
")",
"from",
"a",
"string",
"or",
"None",
"if",
"it",
"is",
"not",
"valid",
".",
"It",
"s",
"a",
"10",
"-",
"digit",
"number",
"so",
"note",
"that",
"database",
"32",
"-",
"bit",
"INT",
"values",
"are",
"insufficient",
";",
"use",
"BIGINT",
".",
"Python",
"will",
"handle",
"large",
"integers",
"happily",
".",
"NHS",
"number",
"rules",
":",
"http",
":",
"//",
"www",
".",
"datadictionary",
".",
"nhs",
".",
"uk",
"/",
"version2",
"/",
"data_dictionary",
"/",
"data_field_notes",
"/",
"n",
"/",
"nhs_number_de",
".",
"asp?shownav",
"=",
"0"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L179-L216 |
RudolfCardinal/pythonlib | cardinal_pythonlib/email/sendmail.py | make_email | def make_email(from_addr: str,
date: str = None,
sender: str = "",
reply_to: Union[str, List[str]] = "",
to: Union[str, List[str]] = "",
cc: Union[str, List[str]] = "",
bcc: Union[str, List[str]] = "",
subject: str = "",
body: str = "",
content_type: str = CONTENT_TYPE_TEXT,
charset: str = "utf8",
attachment_filenames: Sequence[str] = None,
attachment_binaries: Sequence[bytes] = None,
attachment_binary_filenames: Sequence[str] = None,
verbose: bool = False) -> email.mime.multipart.MIMEMultipart:
"""
Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError`
"""
def _csv_list_to_list(x: str) -> List[str]:
stripped = [item.strip() for item in x.split(COMMA)]
return [item for item in stripped if item]
def _assert_nocomma(x: Union[str, List[str]]) -> None:
if isinstance(x, str):
x = [x]
for _addr in x:
assert COMMA not in _addr, (
"Commas not allowed in e-mail addresses: {!r}".format(_addr)
)
# -------------------------------------------------------------------------
# Arguments
# -------------------------------------------------------------------------
if not date:
date = email.utils.formatdate(localtime=True)
assert isinstance(from_addr, str), (
"'From:' can only be a single address "
"(for Python sendmail, not RFC 2822); was {!r}".format(from_addr)
)
_assert_nocomma(from_addr)
assert isinstance(sender, str), (
"'Sender:' can only be a single address; was {!r}".format(sender)
)
_assert_nocomma(sender)
if isinstance(reply_to, str):
reply_to = [reply_to] if reply_to else [] # type: List[str]
_assert_nocomma(reply_to)
if isinstance(to, str):
to = _csv_list_to_list(to)
if isinstance(cc, str):
cc = _csv_list_to_list(cc)
if isinstance(bcc, str):
bcc = _csv_list_to_list(bcc)
assert to or cc or bcc, "No recipients (must have some of: To, Cc, Bcc)"
_assert_nocomma(to)
_assert_nocomma(cc)
_assert_nocomma(bcc)
attachment_filenames = attachment_filenames or [] # type: List[str]
assert all(attachment_filenames), (
"Missing attachment filenames: {!r}".format(attachment_filenames)
)
attachment_binaries = attachment_binaries or [] # type: List[bytes]
attachment_binary_filenames = attachment_binary_filenames or [] # type: List[str] # noqa
assert len(attachment_binaries) == len(attachment_binary_filenames), (
"If you specify attachment_binaries or attachment_binary_filenames, "
"they must be iterables of the same length."
)
assert all(attachment_binary_filenames), (
"Missing filenames for attached binaries: {!r}".format(
attachment_binary_filenames)
)
# -------------------------------------------------------------------------
# Make message
# -------------------------------------------------------------------------
msg = email.mime.multipart.MIMEMultipart()
# Headers: mandatory
msg["From"] = from_addr
msg["Date"] = date
msg["Subject"] = subject
# Headers: optional
if sender:
msg["Sender"] = sender # Single only, not a list
if reply_to:
msg["Reply-To"] = COMMASPACE.join(reply_to)
if to:
msg["To"] = COMMASPACE.join(to)
if cc:
msg["Cc"] = COMMASPACE.join(cc)
if bcc:
msg["Bcc"] = COMMASPACE.join(bcc)
# Body
if content_type == CONTENT_TYPE_TEXT:
msgbody = email.mime.text.MIMEText(body, "plain", charset)
elif content_type == CONTENT_TYPE_HTML:
msgbody = email.mime.text.MIMEText(body, "html", charset)
else:
raise ValueError("unknown content_type")
msg.attach(msgbody)
# Attachments
# noinspection PyPep8,PyBroadException
try:
if attachment_filenames:
# -----------------------------------------------------------------
# Attach things by filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_filenames: {}", attachment_filenames)
# noinspection PyTypeChecker
for f in attachment_filenames:
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(open(f, "rb").read())
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % os.path.basename(f)
)
msg.attach(part)
if attachment_binaries:
# -----------------------------------------------------------------
# Binary attachments, which have a notional filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_binary_filenames: {}",
attachment_binary_filenames)
for i in range(len(attachment_binaries)):
blob = attachment_binaries[i]
filename = attachment_binary_filenames[i]
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(blob)
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % filename)
msg.attach(part)
except Exception as e:
raise ValueError("send_email: Failed to attach files: {}".format(e))
return msg | python | def make_email(from_addr: str,
date: str = None,
sender: str = "",
reply_to: Union[str, List[str]] = "",
to: Union[str, List[str]] = "",
cc: Union[str, List[str]] = "",
bcc: Union[str, List[str]] = "",
subject: str = "",
body: str = "",
content_type: str = CONTENT_TYPE_TEXT,
charset: str = "utf8",
attachment_filenames: Sequence[str] = None,
attachment_binaries: Sequence[bytes] = None,
attachment_binary_filenames: Sequence[str] = None,
verbose: bool = False) -> email.mime.multipart.MIMEMultipart:
"""
Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError`
"""
def _csv_list_to_list(x: str) -> List[str]:
stripped = [item.strip() for item in x.split(COMMA)]
return [item for item in stripped if item]
def _assert_nocomma(x: Union[str, List[str]]) -> None:
if isinstance(x, str):
x = [x]
for _addr in x:
assert COMMA not in _addr, (
"Commas not allowed in e-mail addresses: {!r}".format(_addr)
)
# -------------------------------------------------------------------------
# Arguments
# -------------------------------------------------------------------------
if not date:
date = email.utils.formatdate(localtime=True)
assert isinstance(from_addr, str), (
"'From:' can only be a single address "
"(for Python sendmail, not RFC 2822); was {!r}".format(from_addr)
)
_assert_nocomma(from_addr)
assert isinstance(sender, str), (
"'Sender:' can only be a single address; was {!r}".format(sender)
)
_assert_nocomma(sender)
if isinstance(reply_to, str):
reply_to = [reply_to] if reply_to else [] # type: List[str]
_assert_nocomma(reply_to)
if isinstance(to, str):
to = _csv_list_to_list(to)
if isinstance(cc, str):
cc = _csv_list_to_list(cc)
if isinstance(bcc, str):
bcc = _csv_list_to_list(bcc)
assert to or cc or bcc, "No recipients (must have some of: To, Cc, Bcc)"
_assert_nocomma(to)
_assert_nocomma(cc)
_assert_nocomma(bcc)
attachment_filenames = attachment_filenames or [] # type: List[str]
assert all(attachment_filenames), (
"Missing attachment filenames: {!r}".format(attachment_filenames)
)
attachment_binaries = attachment_binaries or [] # type: List[bytes]
attachment_binary_filenames = attachment_binary_filenames or [] # type: List[str] # noqa
assert len(attachment_binaries) == len(attachment_binary_filenames), (
"If you specify attachment_binaries or attachment_binary_filenames, "
"they must be iterables of the same length."
)
assert all(attachment_binary_filenames), (
"Missing filenames for attached binaries: {!r}".format(
attachment_binary_filenames)
)
# -------------------------------------------------------------------------
# Make message
# -------------------------------------------------------------------------
msg = email.mime.multipart.MIMEMultipart()
# Headers: mandatory
msg["From"] = from_addr
msg["Date"] = date
msg["Subject"] = subject
# Headers: optional
if sender:
msg["Sender"] = sender # Single only, not a list
if reply_to:
msg["Reply-To"] = COMMASPACE.join(reply_to)
if to:
msg["To"] = COMMASPACE.join(to)
if cc:
msg["Cc"] = COMMASPACE.join(cc)
if bcc:
msg["Bcc"] = COMMASPACE.join(bcc)
# Body
if content_type == CONTENT_TYPE_TEXT:
msgbody = email.mime.text.MIMEText(body, "plain", charset)
elif content_type == CONTENT_TYPE_HTML:
msgbody = email.mime.text.MIMEText(body, "html", charset)
else:
raise ValueError("unknown content_type")
msg.attach(msgbody)
# Attachments
# noinspection PyPep8,PyBroadException
try:
if attachment_filenames:
# -----------------------------------------------------------------
# Attach things by filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_filenames: {}", attachment_filenames)
# noinspection PyTypeChecker
for f in attachment_filenames:
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(open(f, "rb").read())
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % os.path.basename(f)
)
msg.attach(part)
if attachment_binaries:
# -----------------------------------------------------------------
# Binary attachments, which have a notional filename
# -----------------------------------------------------------------
if verbose:
log.debug("attachment_binary_filenames: {}",
attachment_binary_filenames)
for i in range(len(attachment_binaries)):
blob = attachment_binaries[i]
filename = attachment_binary_filenames[i]
part = email.mime.base.MIMEBase("application", "octet-stream")
part.set_payload(blob)
email.encoders.encode_base64(part)
part.add_header(
'Content-Disposition',
'attachment; filename="%s"' % filename)
msg.attach(part)
except Exception as e:
raise ValueError("send_email: Failed to attach files: {}".format(e))
return msg | [
"def",
"make_email",
"(",
"from_addr",
":",
"str",
",",
"date",
":",
"str",
"=",
"None",
",",
"sender",
":",
"str",
"=",
"\"\"",
",",
"reply_to",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"\"\"",
",",
"to",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"\"\"",
",",
"cc",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"\"\"",
",",
"bcc",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"\"\"",
",",
"subject",
":",
"str",
"=",
"\"\"",
",",
"body",
":",
"str",
"=",
"\"\"",
",",
"content_type",
":",
"str",
"=",
"CONTENT_TYPE_TEXT",
",",
"charset",
":",
"str",
"=",
"\"utf8\"",
",",
"attachment_filenames",
":",
"Sequence",
"[",
"str",
"]",
"=",
"None",
",",
"attachment_binaries",
":",
"Sequence",
"[",
"bytes",
"]",
"=",
"None",
",",
"attachment_binary_filenames",
":",
"Sequence",
"[",
"str",
"]",
"=",
"None",
",",
"verbose",
":",
"bool",
"=",
"False",
")",
"->",
"email",
".",
"mime",
".",
"multipart",
".",
"MIMEMultipart",
":",
"def",
"_csv_list_to_list",
"(",
"x",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"stripped",
"=",
"[",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"x",
".",
"split",
"(",
"COMMA",
")",
"]",
"return",
"[",
"item",
"for",
"item",
"in",
"stripped",
"if",
"item",
"]",
"def",
"_assert_nocomma",
"(",
"x",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"None",
":",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"x",
"=",
"[",
"x",
"]",
"for",
"_addr",
"in",
"x",
":",
"assert",
"COMMA",
"not",
"in",
"_addr",
",",
"(",
"\"Commas not allowed in e-mail addresses: {!r}\"",
".",
"format",
"(",
"_addr",
")",
")",
"# -------------------------------------------------------------------------",
"# Arguments",
"# -------------------------------------------------------------------------",
"if",
"not",
"date",
":",
"date",
"=",
"email",
".",
"utils",
".",
"formatdate",
"(",
"localtime",
"=",
"True",
")",
"assert",
"isinstance",
"(",
"from_addr",
",",
"str",
")",
",",
"(",
"\"'From:' can only be a single address \"",
"\"(for Python sendmail, not RFC 2822); was {!r}\"",
".",
"format",
"(",
"from_addr",
")",
")",
"_assert_nocomma",
"(",
"from_addr",
")",
"assert",
"isinstance",
"(",
"sender",
",",
"str",
")",
",",
"(",
"\"'Sender:' can only be a single address; was {!r}\"",
".",
"format",
"(",
"sender",
")",
")",
"_assert_nocomma",
"(",
"sender",
")",
"if",
"isinstance",
"(",
"reply_to",
",",
"str",
")",
":",
"reply_to",
"=",
"[",
"reply_to",
"]",
"if",
"reply_to",
"else",
"[",
"]",
"# type: List[str]",
"_assert_nocomma",
"(",
"reply_to",
")",
"if",
"isinstance",
"(",
"to",
",",
"str",
")",
":",
"to",
"=",
"_csv_list_to_list",
"(",
"to",
")",
"if",
"isinstance",
"(",
"cc",
",",
"str",
")",
":",
"cc",
"=",
"_csv_list_to_list",
"(",
"cc",
")",
"if",
"isinstance",
"(",
"bcc",
",",
"str",
")",
":",
"bcc",
"=",
"_csv_list_to_list",
"(",
"bcc",
")",
"assert",
"to",
"or",
"cc",
"or",
"bcc",
",",
"\"No recipients (must have some of: To, Cc, Bcc)\"",
"_assert_nocomma",
"(",
"to",
")",
"_assert_nocomma",
"(",
"cc",
")",
"_assert_nocomma",
"(",
"bcc",
")",
"attachment_filenames",
"=",
"attachment_filenames",
"or",
"[",
"]",
"# type: List[str]",
"assert",
"all",
"(",
"attachment_filenames",
")",
",",
"(",
"\"Missing attachment filenames: {!r}\"",
".",
"format",
"(",
"attachment_filenames",
")",
")",
"attachment_binaries",
"=",
"attachment_binaries",
"or",
"[",
"]",
"# type: List[bytes]",
"attachment_binary_filenames",
"=",
"attachment_binary_filenames",
"or",
"[",
"]",
"# type: List[str] # noqa",
"assert",
"len",
"(",
"attachment_binaries",
")",
"==",
"len",
"(",
"attachment_binary_filenames",
")",
",",
"(",
"\"If you specify attachment_binaries or attachment_binary_filenames, \"",
"\"they must be iterables of the same length.\"",
")",
"assert",
"all",
"(",
"attachment_binary_filenames",
")",
",",
"(",
"\"Missing filenames for attached binaries: {!r}\"",
".",
"format",
"(",
"attachment_binary_filenames",
")",
")",
"# -------------------------------------------------------------------------",
"# Make message",
"# -------------------------------------------------------------------------",
"msg",
"=",
"email",
".",
"mime",
".",
"multipart",
".",
"MIMEMultipart",
"(",
")",
"# Headers: mandatory",
"msg",
"[",
"\"From\"",
"]",
"=",
"from_addr",
"msg",
"[",
"\"Date\"",
"]",
"=",
"date",
"msg",
"[",
"\"Subject\"",
"]",
"=",
"subject",
"# Headers: optional",
"if",
"sender",
":",
"msg",
"[",
"\"Sender\"",
"]",
"=",
"sender",
"# Single only, not a list",
"if",
"reply_to",
":",
"msg",
"[",
"\"Reply-To\"",
"]",
"=",
"COMMASPACE",
".",
"join",
"(",
"reply_to",
")",
"if",
"to",
":",
"msg",
"[",
"\"To\"",
"]",
"=",
"COMMASPACE",
".",
"join",
"(",
"to",
")",
"if",
"cc",
":",
"msg",
"[",
"\"Cc\"",
"]",
"=",
"COMMASPACE",
".",
"join",
"(",
"cc",
")",
"if",
"bcc",
":",
"msg",
"[",
"\"Bcc\"",
"]",
"=",
"COMMASPACE",
".",
"join",
"(",
"bcc",
")",
"# Body",
"if",
"content_type",
"==",
"CONTENT_TYPE_TEXT",
":",
"msgbody",
"=",
"email",
".",
"mime",
".",
"text",
".",
"MIMEText",
"(",
"body",
",",
"\"plain\"",
",",
"charset",
")",
"elif",
"content_type",
"==",
"CONTENT_TYPE_HTML",
":",
"msgbody",
"=",
"email",
".",
"mime",
".",
"text",
".",
"MIMEText",
"(",
"body",
",",
"\"html\"",
",",
"charset",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown content_type\"",
")",
"msg",
".",
"attach",
"(",
"msgbody",
")",
"# Attachments",
"# noinspection PyPep8,PyBroadException",
"try",
":",
"if",
"attachment_filenames",
":",
"# -----------------------------------------------------------------",
"# Attach things by filename",
"# -----------------------------------------------------------------",
"if",
"verbose",
":",
"log",
".",
"debug",
"(",
"\"attachment_filenames: {}\"",
",",
"attachment_filenames",
")",
"# noinspection PyTypeChecker",
"for",
"f",
"in",
"attachment_filenames",
":",
"part",
"=",
"email",
".",
"mime",
".",
"base",
".",
"MIMEBase",
"(",
"\"application\"",
",",
"\"octet-stream\"",
")",
"part",
".",
"set_payload",
"(",
"open",
"(",
"f",
",",
"\"rb\"",
")",
".",
"read",
"(",
")",
")",
"email",
".",
"encoders",
".",
"encode_base64",
"(",
"part",
")",
"part",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment; filename=\"%s\"'",
"%",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"msg",
".",
"attach",
"(",
"part",
")",
"if",
"attachment_binaries",
":",
"# -----------------------------------------------------------------",
"# Binary attachments, which have a notional filename",
"# -----------------------------------------------------------------",
"if",
"verbose",
":",
"log",
".",
"debug",
"(",
"\"attachment_binary_filenames: {}\"",
",",
"attachment_binary_filenames",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"attachment_binaries",
")",
")",
":",
"blob",
"=",
"attachment_binaries",
"[",
"i",
"]",
"filename",
"=",
"attachment_binary_filenames",
"[",
"i",
"]",
"part",
"=",
"email",
".",
"mime",
".",
"base",
".",
"MIMEBase",
"(",
"\"application\"",
",",
"\"octet-stream\"",
")",
"part",
".",
"set_payload",
"(",
"blob",
")",
"email",
".",
"encoders",
".",
"encode_base64",
"(",
"part",
")",
"part",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment; filename=\"%s\"'",
"%",
"filename",
")",
"msg",
".",
"attach",
"(",
"part",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"send_email: Failed to attach files: {}\"",
".",
"format",
"(",
"e",
")",
")",
"return",
"msg"
] | Makes an e-mail message.
Arguments that can be multiple e-mail addresses are (a) a single e-mail
address as a string, or (b) a list of strings (each a single e-mail
address), or (c) a comma-separated list of multiple e-mail addresses.
Args:
from_addr: name of the sender for the "From:" field
date: e-mail date in RFC 2822 format, or ``None`` for "now"
sender: name of the sender for the "Sender:" field
reply_to: name of the sender for the "Reply-To:" field
to: e-mail address(es) of the recipients for "To:" field
cc: e-mail address(es) of the recipients for "Cc:" field
bcc: e-mail address(es) of the recipients for "Bcc:" field
subject: e-mail subject
body: e-mail body
content_type: MIME type for body content, default ``text/plain``
charset: character set for body; default ``utf8``
attachment_filenames: filenames of attachments to add
attachment_binaries: binary objects to add as attachments
attachment_binary_filenames: filenames corresponding to
``attachment_binaries``
verbose: be verbose?
Returns:
a :class:`email.mime.multipart.MIMEMultipart`
Raises:
:exc:`AssertionError`, :exc:`ValueError` | [
"Makes",
"an",
"e",
"-",
"mail",
"message",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/email/sendmail.py#L66-L240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.