repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | FileToAutodocument.write_rst | def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
"""
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock) | python | def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
"""
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock) | [
"def",
"write_rst",
"(",
"self",
",",
"prefix",
":",
"str",
"=",
"\"\"",
",",
"suffix",
":",
"str",
"=",
"\"\"",
",",
"heading_underline_char",
":",
"str",
"=",
"\"=\"",
",",
"method",
":",
"AutodocMethod",
"=",
"None",
",",
"overwrite",
":",
"bool",
"=",
"False",
",",
"mock",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"content",
"=",
"self",
".",
"rst_content",
"(",
"prefix",
"=",
"prefix",
",",
"suffix",
"=",
"suffix",
",",
"heading_underline_char",
"=",
"heading_underline_char",
",",
"method",
"=",
"method",
")",
"write_if_allowed",
"(",
"self",
".",
"target_rst_filename",
",",
"content",
",",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
")"
] | Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't | [
"Writes",
"the",
"RST",
"file",
"to",
"our",
"destination",
"RST",
"filename",
"making",
"any",
"necessary",
"directories",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L414-L440 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.add_source_files | def add_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
method: AutodocMethod = None,
recursive: bool = None,
source_rst_title_style_python: bool = None,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Adds source files to the index.
Args:
source_filenames_or_globs: string containing a filename or a
glob, describing the file(s) to be added, or a list of such
strings
method: optional method to override ``self.method``
recursive: use :func:`glob.glob` in recursive mode? (If ``None``,
the default, uses the version from the constructor.)
source_rst_title_style_python: optional to override
``self.source_rst_title_style_python``
pygments_language_override: optional to override
``self.pygments_language_override``
"""
if not source_filenames_or_globs:
return
if method is None:
# Use the default
method = self.method
if recursive is None:
recursive = self.recursive
if source_rst_title_style_python is None:
source_rst_title_style_python = self.source_rst_title_style_python
if pygments_language_override is None:
pygments_language_override = self.pygments_language_override
# Get a sorted list of filenames
final_filenames = self.get_sorted_source_files(
source_filenames_or_globs,
recursive=recursive
)
# Process that sorted list
for source_filename in final_filenames:
self.files_to_index.append(FileToAutodocument(
source_filename=source_filename,
project_root_dir=self.project_root_dir,
python_package_root_dir=self.python_package_root_dir,
target_rst_filename=self.specific_file_rst_filename(
source_filename
),
method=method,
source_rst_title_style_python=source_rst_title_style_python,
pygments_language_override=pygments_language_override,
)) | python | def add_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
method: AutodocMethod = None,
recursive: bool = None,
source_rst_title_style_python: bool = None,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Adds source files to the index.
Args:
source_filenames_or_globs: string containing a filename or a
glob, describing the file(s) to be added, or a list of such
strings
method: optional method to override ``self.method``
recursive: use :func:`glob.glob` in recursive mode? (If ``None``,
the default, uses the version from the constructor.)
source_rst_title_style_python: optional to override
``self.source_rst_title_style_python``
pygments_language_override: optional to override
``self.pygments_language_override``
"""
if not source_filenames_or_globs:
return
if method is None:
# Use the default
method = self.method
if recursive is None:
recursive = self.recursive
if source_rst_title_style_python is None:
source_rst_title_style_python = self.source_rst_title_style_python
if pygments_language_override is None:
pygments_language_override = self.pygments_language_override
# Get a sorted list of filenames
final_filenames = self.get_sorted_source_files(
source_filenames_or_globs,
recursive=recursive
)
# Process that sorted list
for source_filename in final_filenames:
self.files_to_index.append(FileToAutodocument(
source_filename=source_filename,
project_root_dir=self.project_root_dir,
python_package_root_dir=self.python_package_root_dir,
target_rst_filename=self.specific_file_rst_filename(
source_filename
),
method=method,
source_rst_title_style_python=source_rst_title_style_python,
pygments_language_override=pygments_language_override,
)) | [
"def",
"add_source_files",
"(",
"self",
",",
"source_filenames_or_globs",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"method",
":",
"AutodocMethod",
"=",
"None",
",",
"recursive",
":",
"bool",
"=",
"None",
",",
"source_rst_title_style_python",
":",
"bool",
"=",
"None",
",",
"pygments_language_override",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"not",
"source_filenames_or_globs",
":",
"return",
"if",
"method",
"is",
"None",
":",
"# Use the default",
"method",
"=",
"self",
".",
"method",
"if",
"recursive",
"is",
"None",
":",
"recursive",
"=",
"self",
".",
"recursive",
"if",
"source_rst_title_style_python",
"is",
"None",
":",
"source_rst_title_style_python",
"=",
"self",
".",
"source_rst_title_style_python",
"if",
"pygments_language_override",
"is",
"None",
":",
"pygments_language_override",
"=",
"self",
".",
"pygments_language_override",
"# Get a sorted list of filenames",
"final_filenames",
"=",
"self",
".",
"get_sorted_source_files",
"(",
"source_filenames_or_globs",
",",
"recursive",
"=",
"recursive",
")",
"# Process that sorted list",
"for",
"source_filename",
"in",
"final_filenames",
":",
"self",
".",
"files_to_index",
".",
"append",
"(",
"FileToAutodocument",
"(",
"source_filename",
"=",
"source_filename",
",",
"project_root_dir",
"=",
"self",
".",
"project_root_dir",
",",
"python_package_root_dir",
"=",
"self",
".",
"python_package_root_dir",
",",
"target_rst_filename",
"=",
"self",
".",
"specific_file_rst_filename",
"(",
"source_filename",
")",
",",
"method",
"=",
"method",
",",
"source_rst_title_style_python",
"=",
"source_rst_title_style_python",
",",
"pygments_language_override",
"=",
"pygments_language_override",
",",
")",
")"
] | Adds source files to the index.
Args:
source_filenames_or_globs: string containing a filename or a
glob, describing the file(s) to be added, or a list of such
strings
method: optional method to override ``self.method``
recursive: use :func:`glob.glob` in recursive mode? (If ``None``,
the default, uses the version from the constructor.)
source_rst_title_style_python: optional to override
``self.source_rst_title_style_python``
pygments_language_override: optional to override
``self.pygments_language_override`` | [
"Adds",
"source",
"files",
"to",
"the",
"index",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L654-L707 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.get_sorted_source_files | def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
"""
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
"""
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames | python | def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
"""
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
"""
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames | [
"def",
"get_sorted_source_files",
"(",
"self",
",",
"source_filenames_or_globs",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"recursive",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"isinstance",
"(",
"source_filenames_or_globs",
",",
"str",
")",
":",
"source_filenames_or_globs",
"=",
"[",
"source_filenames_or_globs",
"]",
"final_filenames",
"=",
"[",
"]",
"# type: List[str]",
"for",
"sfg",
"in",
"source_filenames_or_globs",
":",
"sfg_expanded",
"=",
"expanduser",
"(",
"sfg",
")",
"log",
".",
"debug",
"(",
"\"Looking for: {!r}\"",
",",
"sfg_expanded",
")",
"for",
"filename",
"in",
"glob",
".",
"glob",
"(",
"sfg_expanded",
",",
"recursive",
"=",
"recursive",
")",
":",
"log",
".",
"debug",
"(",
"\"Trying: {!r}\"",
",",
"filename",
")",
"if",
"self",
".",
"should_exclude",
"(",
"filename",
")",
":",
"log",
".",
"info",
"(",
"\"Skipping file {!r}\"",
",",
"filename",
")",
"continue",
"final_filenames",
".",
"append",
"(",
"filename",
")",
"final_filenames",
".",
"sort",
"(",
")",
"return",
"final_filenames"
] | Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process | [
"Returns",
"a",
"sorted",
"list",
"of",
"filenames",
"to",
"process",
"from",
"a",
"filename",
"a",
"glob",
"string",
"or",
"a",
"list",
"of",
"filenames",
"/",
"globs",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L709-L737 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.filename_matches_glob | def filename_matches_glob(filename: str, globtext: str) -> bool:
"""
The ``glob.glob`` function doesn't do exclusion very well. We don't
want to have to specify root directories for exclusion patterns. We
don't want to have to trawl a massive set of files to find exclusion
files. So let's implement a glob match.
Args:
filename: filename
globtext: glob
Returns:
does the filename match the glob?
See also:
- https://stackoverflow.com/questions/20638040/glob-exclude-pattern
"""
# Quick check on basename-only matching
if fnmatch(filename, globtext):
log.debug("{!r} matches {!r}", filename, globtext)
return True
bname = basename(filename)
if fnmatch(bname, globtext):
log.debug("{!r} matches {!r}", bname, globtext)
return True
# Directory matching: is actually accomplished by the code above!
# Otherwise:
return False | python | def filename_matches_glob(filename: str, globtext: str) -> bool:
"""
The ``glob.glob`` function doesn't do exclusion very well. We don't
want to have to specify root directories for exclusion patterns. We
don't want to have to trawl a massive set of files to find exclusion
files. So let's implement a glob match.
Args:
filename: filename
globtext: glob
Returns:
does the filename match the glob?
See also:
- https://stackoverflow.com/questions/20638040/glob-exclude-pattern
"""
# Quick check on basename-only matching
if fnmatch(filename, globtext):
log.debug("{!r} matches {!r}", filename, globtext)
return True
bname = basename(filename)
if fnmatch(bname, globtext):
log.debug("{!r} matches {!r}", bname, globtext)
return True
# Directory matching: is actually accomplished by the code above!
# Otherwise:
return False | [
"def",
"filename_matches_glob",
"(",
"filename",
":",
"str",
",",
"globtext",
":",
"str",
")",
"->",
"bool",
":",
"# Quick check on basename-only matching",
"if",
"fnmatch",
"(",
"filename",
",",
"globtext",
")",
":",
"log",
".",
"debug",
"(",
"\"{!r} matches {!r}\"",
",",
"filename",
",",
"globtext",
")",
"return",
"True",
"bname",
"=",
"basename",
"(",
"filename",
")",
"if",
"fnmatch",
"(",
"bname",
",",
"globtext",
")",
":",
"log",
".",
"debug",
"(",
"\"{!r} matches {!r}\"",
",",
"bname",
",",
"globtext",
")",
"return",
"True",
"# Directory matching: is actually accomplished by the code above!",
"# Otherwise:",
"return",
"False"
] | The ``glob.glob`` function doesn't do exclusion very well. We don't
want to have to specify root directories for exclusion patterns. We
don't want to have to trawl a massive set of files to find exclusion
files. So let's implement a glob match.
Args:
filename: filename
globtext: glob
Returns:
does the filename match the glob?
See also:
- https://stackoverflow.com/questions/20638040/glob-exclude-pattern | [
"The",
"glob",
".",
"glob",
"function",
"doesn",
"t",
"do",
"exclusion",
"very",
"well",
".",
"We",
"don",
"t",
"want",
"to",
"have",
"to",
"specify",
"root",
"directories",
"for",
"exclusion",
"patterns",
".",
"We",
"don",
"t",
"want",
"to",
"have",
"to",
"trawl",
"a",
"massive",
"set",
"of",
"files",
"to",
"find",
"exclusion",
"files",
".",
"So",
"let",
"s",
"implement",
"a",
"glob",
"match",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L740-L769 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.should_exclude | def should_exclude(self, filename) -> bool:
"""
Should we exclude this file from consideration?
"""
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False | python | def should_exclude(self, filename) -> bool:
"""
Should we exclude this file from consideration?
"""
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False | [
"def",
"should_exclude",
"(",
"self",
",",
"filename",
")",
"->",
"bool",
":",
"for",
"skip_glob",
"in",
"self",
".",
"skip_globs",
":",
"if",
"self",
".",
"filename_matches_glob",
"(",
"filename",
",",
"skip_glob",
")",
":",
"return",
"True",
"return",
"False"
] | Should we exclude this file from consideration? | [
"Should",
"we",
"exclude",
"this",
"file",
"from",
"consideration?"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L771-L778 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.specific_file_rst_filename | def specific_file_rst_filename(self, source_filename: str) -> str:
"""
Gets the RST filename corresponding to a source filename.
See the help for the constructor for more details.
Args:
source_filename: source filename within current project
Returns:
RST filename
Note in particular: the way we structure the directories means that we
won't get clashes between files with idential names in two different
directories. However, we must also incorporate the original source
filename, in particular for C++ where ``thing.h`` and ``thing.cpp``
must not generate the same RST filename. So we just add ``.rst``.
"""
highest_code_to_target = relative_filename_within_dir(
source_filename, self.highest_code_dir)
bname = basename(source_filename)
result = join(self.autodoc_rst_root_dir,
dirname(highest_code_to_target),
bname + EXT_RST)
log.debug("Source {!r} -> RST {!r}", source_filename, result)
return result | python | def specific_file_rst_filename(self, source_filename: str) -> str:
"""
Gets the RST filename corresponding to a source filename.
See the help for the constructor for more details.
Args:
source_filename: source filename within current project
Returns:
RST filename
Note in particular: the way we structure the directories means that we
won't get clashes between files with idential names in two different
directories. However, we must also incorporate the original source
filename, in particular for C++ where ``thing.h`` and ``thing.cpp``
must not generate the same RST filename. So we just add ``.rst``.
"""
highest_code_to_target = relative_filename_within_dir(
source_filename, self.highest_code_dir)
bname = basename(source_filename)
result = join(self.autodoc_rst_root_dir,
dirname(highest_code_to_target),
bname + EXT_RST)
log.debug("Source {!r} -> RST {!r}", source_filename, result)
return result | [
"def",
"specific_file_rst_filename",
"(",
"self",
",",
"source_filename",
":",
"str",
")",
"->",
"str",
":",
"highest_code_to_target",
"=",
"relative_filename_within_dir",
"(",
"source_filename",
",",
"self",
".",
"highest_code_dir",
")",
"bname",
"=",
"basename",
"(",
"source_filename",
")",
"result",
"=",
"join",
"(",
"self",
".",
"autodoc_rst_root_dir",
",",
"dirname",
"(",
"highest_code_to_target",
")",
",",
"bname",
"+",
"EXT_RST",
")",
"log",
".",
"debug",
"(",
"\"Source {!r} -> RST {!r}\"",
",",
"source_filename",
",",
"result",
")",
"return",
"result"
] | Gets the RST filename corresponding to a source filename.
See the help for the constructor for more details.
Args:
source_filename: source filename within current project
Returns:
RST filename
Note in particular: the way we structure the directories means that we
won't get clashes between files with idential names in two different
directories. However, we must also incorporate the original source
filename, in particular for C++ where ``thing.h`` and ``thing.cpp``
must not generate the same RST filename. So we just add ``.rst``. | [
"Gets",
"the",
"RST",
"filename",
"corresponding",
"to",
"a",
"source",
"filename",
".",
"See",
"the",
"help",
"for",
"the",
"constructor",
"for",
"more",
"details",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L799-L823 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.write_index_and_rst_files | def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
self.write_index(overwrite=overwrite, mock=mock) | python | def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
self.write_index(overwrite=overwrite, mock=mock) | [
"def",
"write_index_and_rst_files",
"(",
"self",
",",
"overwrite",
":",
"bool",
"=",
"False",
",",
"mock",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"for",
"f",
"in",
"self",
".",
"files_to_index",
":",
"if",
"isinstance",
"(",
"f",
",",
"FileToAutodocument",
")",
":",
"f",
".",
"write_rst",
"(",
"prefix",
"=",
"self",
".",
"rst_prefix",
",",
"suffix",
"=",
"self",
".",
"rst_suffix",
",",
"heading_underline_char",
"=",
"self",
".",
"source_rst_heading_underline_char",
",",
"# noqa",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
",",
")",
"elif",
"isinstance",
"(",
"f",
",",
"AutodocIndex",
")",
":",
"f",
".",
"write_index_and_rst_files",
"(",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
")",
"else",
":",
"fail",
"(",
"\"Unknown thing in files_to_index: {!r}\"",
".",
"format",
"(",
"f",
")",
")",
"self",
".",
"write_index",
"(",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
")"
] | Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't | [
"Writes",
"both",
"the",
"individual",
"RST",
"files",
"and",
"the",
"index",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L825-L847 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.index_filename_rel_other_index | def index_filename_rel_other_index(self, other: str) -> str:
"""
Returns the filename of this index, relative to the director of another
index. (For inserting a reference to this index into ``other``.)
Args:
other: the other index
Returns:
relative filename of our index
"""
return relpath(self.index_filename, start=dirname(other)) | python | def index_filename_rel_other_index(self, other: str) -> str:
"""
Returns the filename of this index, relative to the director of another
index. (For inserting a reference to this index into ``other``.)
Args:
other: the other index
Returns:
relative filename of our index
"""
return relpath(self.index_filename, start=dirname(other)) | [
"def",
"index_filename_rel_other_index",
"(",
"self",
",",
"other",
":",
"str",
")",
"->",
"str",
":",
"return",
"relpath",
"(",
"self",
".",
"index_filename",
",",
"start",
"=",
"dirname",
"(",
"other",
")",
")"
] | Returns the filename of this index, relative to the director of another
index. (For inserting a reference to this index into ``other``.)
Args:
other: the other index
Returns:
relative filename of our index | [
"Returns",
"the",
"filename",
"of",
"this",
"index",
"relative",
"to",
"the",
"director",
"of",
"another",
"index",
".",
"(",
"For",
"inserting",
"a",
"reference",
"to",
"this",
"index",
"into",
"other",
".",
")"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L857-L868 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.index_content | def index_content(self) -> str:
"""
Returns the contents of the index RST file.
"""
# Build the toctree command
index_filename = self.index_filename
spacer = " "
toctree_lines = [
".. toctree::",
spacer + ":maxdepth: {}".format(self.toctree_maxdepth),
""
]
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
rst_filename = spacer + f.rst_filename_rel_autodoc_index(
index_filename)
elif isinstance(f, AutodocIndex):
rst_filename = (
spacer + f.index_filename_rel_other_index(index_filename)
)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
rst_filename = "" # won't get here; for the type checker
toctree_lines.append(rst_filename)
toctree = "\n".join(toctree_lines)
# Create the whole file
content = """
.. {filename}
{AUTOGENERATED_COMMENT}
{prefix}
{underlined_title}
{introductory_rst}
{toctree}
{suffix}
""".format(
filename=self.index_filename_rel_project_root,
AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,
prefix=self.rst_prefix,
underlined_title=rst_underline(
self.title, underline_char=self.index_heading_underline_char),
introductory_rst=self.introductory_rst,
toctree=toctree,
suffix=self.rst_suffix,
).strip() + "\n"
return content | python | def index_content(self) -> str:
"""
Returns the contents of the index RST file.
"""
# Build the toctree command
index_filename = self.index_filename
spacer = " "
toctree_lines = [
".. toctree::",
spacer + ":maxdepth: {}".format(self.toctree_maxdepth),
""
]
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
rst_filename = spacer + f.rst_filename_rel_autodoc_index(
index_filename)
elif isinstance(f, AutodocIndex):
rst_filename = (
spacer + f.index_filename_rel_other_index(index_filename)
)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
rst_filename = "" # won't get here; for the type checker
toctree_lines.append(rst_filename)
toctree = "\n".join(toctree_lines)
# Create the whole file
content = """
.. {filename}
{AUTOGENERATED_COMMENT}
{prefix}
{underlined_title}
{introductory_rst}
{toctree}
{suffix}
""".format(
filename=self.index_filename_rel_project_root,
AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,
prefix=self.rst_prefix,
underlined_title=rst_underline(
self.title, underline_char=self.index_heading_underline_char),
introductory_rst=self.introductory_rst,
toctree=toctree,
suffix=self.rst_suffix,
).strip() + "\n"
return content | [
"def",
"index_content",
"(",
"self",
")",
"->",
"str",
":",
"# Build the toctree command",
"index_filename",
"=",
"self",
".",
"index_filename",
"spacer",
"=",
"\" \"",
"toctree_lines",
"=",
"[",
"\".. toctree::\"",
",",
"spacer",
"+",
"\":maxdepth: {}\"",
".",
"format",
"(",
"self",
".",
"toctree_maxdepth",
")",
",",
"\"\"",
"]",
"for",
"f",
"in",
"self",
".",
"files_to_index",
":",
"if",
"isinstance",
"(",
"f",
",",
"FileToAutodocument",
")",
":",
"rst_filename",
"=",
"spacer",
"+",
"f",
".",
"rst_filename_rel_autodoc_index",
"(",
"index_filename",
")",
"elif",
"isinstance",
"(",
"f",
",",
"AutodocIndex",
")",
":",
"rst_filename",
"=",
"(",
"spacer",
"+",
"f",
".",
"index_filename_rel_other_index",
"(",
"index_filename",
")",
")",
"else",
":",
"fail",
"(",
"\"Unknown thing in files_to_index: {!r}\"",
".",
"format",
"(",
"f",
")",
")",
"rst_filename",
"=",
"\"\"",
"# won't get here; for the type checker",
"toctree_lines",
".",
"append",
"(",
"rst_filename",
")",
"toctree",
"=",
"\"\\n\"",
".",
"join",
"(",
"toctree_lines",
")",
"# Create the whole file",
"content",
"=",
"\"\"\"\n.. {filename}\n\n{AUTOGENERATED_COMMENT}\n\n{prefix}\n\n{underlined_title}\n\n{introductory_rst}\n\n{toctree}\n\n{suffix}\n \"\"\"",
".",
"format",
"(",
"filename",
"=",
"self",
".",
"index_filename_rel_project_root",
",",
"AUTOGENERATED_COMMENT",
"=",
"AUTOGENERATED_COMMENT",
",",
"prefix",
"=",
"self",
".",
"rst_prefix",
",",
"underlined_title",
"=",
"rst_underline",
"(",
"self",
".",
"title",
",",
"underline_char",
"=",
"self",
".",
"index_heading_underline_char",
")",
",",
"introductory_rst",
"=",
"self",
".",
"introductory_rst",
",",
"toctree",
"=",
"toctree",
",",
"suffix",
"=",
"self",
".",
"rst_suffix",
",",
")",
".",
"strip",
"(",
")",
"+",
"\"\\n\"",
"return",
"content"
] | Returns the contents of the index RST file. | [
"Returns",
"the",
"contents",
"of",
"the",
"index",
"RST",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L870-L921 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sphinxtools.py | AutodocIndex.write_index | def write_index(self, overwrite: bool = False, mock: bool = False) -> None:
"""
Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
write_if_allowed(self.index_filename, self.index_content(),
overwrite=overwrite, mock=mock) | python | def write_index(self, overwrite: bool = False, mock: bool = False) -> None:
"""
Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
write_if_allowed(self.index_filename, self.index_content(),
overwrite=overwrite, mock=mock) | [
"def",
"write_index",
"(",
"self",
",",
"overwrite",
":",
"bool",
"=",
"False",
",",
"mock",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"write_if_allowed",
"(",
"self",
".",
"index_filename",
",",
"self",
".",
"index_content",
"(",
")",
",",
"overwrite",
"=",
"overwrite",
",",
"mock",
"=",
"mock",
")"
] | Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't | [
"Writes",
"the",
"index",
"file",
"if",
"permitted",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L923-L932 |
avihad/twistes | twistes/parser.py | EsParser.parse_host | def parse_host(hosts):
"""
Parsing the hosts parameter,
* currently support only one host
:param hosts: the hosts json to parse
:return: the full host and the authentication if exists
"""
hosts = EsParser._normalize_hosts(hosts)
host = hosts[0]
host_name = host[HostParsing.HOST]
host_port = host[HostParsing.PORT]
auth = None
if HostParsing.HTTP_AUTH in host:
http_auth = host[HostParsing.HTTP_AUTH]
user_pass = http_auth.split(':')
auth = (user_pass[0], user_pass[1])
full_host = "{host}:{port}".format(host=host_name, port=host_port)
if not host_name.startswith((HostParsing.HTTP + ':', HostParsing.HTTPS + ':')):
scheme = HostParsing.HTTPS if host.get(HostParsing.USE_SSL) else HostParsing.HTTP
full_host = "{scheme}://{full_host}".format(full_host=full_host, scheme=scheme)
return full_host, auth | python | def parse_host(hosts):
"""
Parsing the hosts parameter,
* currently support only one host
:param hosts: the hosts json to parse
:return: the full host and the authentication if exists
"""
hosts = EsParser._normalize_hosts(hosts)
host = hosts[0]
host_name = host[HostParsing.HOST]
host_port = host[HostParsing.PORT]
auth = None
if HostParsing.HTTP_AUTH in host:
http_auth = host[HostParsing.HTTP_AUTH]
user_pass = http_auth.split(':')
auth = (user_pass[0], user_pass[1])
full_host = "{host}:{port}".format(host=host_name, port=host_port)
if not host_name.startswith((HostParsing.HTTP + ':', HostParsing.HTTPS + ':')):
scheme = HostParsing.HTTPS if host.get(HostParsing.USE_SSL) else HostParsing.HTTP
full_host = "{scheme}://{full_host}".format(full_host=full_host, scheme=scheme)
return full_host, auth | [
"def",
"parse_host",
"(",
"hosts",
")",
":",
"hosts",
"=",
"EsParser",
".",
"_normalize_hosts",
"(",
"hosts",
")",
"host",
"=",
"hosts",
"[",
"0",
"]",
"host_name",
"=",
"host",
"[",
"HostParsing",
".",
"HOST",
"]",
"host_port",
"=",
"host",
"[",
"HostParsing",
".",
"PORT",
"]",
"auth",
"=",
"None",
"if",
"HostParsing",
".",
"HTTP_AUTH",
"in",
"host",
":",
"http_auth",
"=",
"host",
"[",
"HostParsing",
".",
"HTTP_AUTH",
"]",
"user_pass",
"=",
"http_auth",
".",
"split",
"(",
"':'",
")",
"auth",
"=",
"(",
"user_pass",
"[",
"0",
"]",
",",
"user_pass",
"[",
"1",
"]",
")",
"full_host",
"=",
"\"{host}:{port}\"",
".",
"format",
"(",
"host",
"=",
"host_name",
",",
"port",
"=",
"host_port",
")",
"if",
"not",
"host_name",
".",
"startswith",
"(",
"(",
"HostParsing",
".",
"HTTP",
"+",
"':'",
",",
"HostParsing",
".",
"HTTPS",
"+",
"':'",
")",
")",
":",
"scheme",
"=",
"HostParsing",
".",
"HTTPS",
"if",
"host",
".",
"get",
"(",
"HostParsing",
".",
"USE_SSL",
")",
"else",
"HostParsing",
".",
"HTTP",
"full_host",
"=",
"\"{scheme}://{full_host}\"",
".",
"format",
"(",
"full_host",
"=",
"full_host",
",",
"scheme",
"=",
"scheme",
")",
"return",
"full_host",
",",
"auth"
] | Parsing the hosts parameter,
* currently support only one host
:param hosts: the hosts json to parse
:return: the full host and the authentication if exists | [
"Parsing",
"the",
"hosts",
"parameter",
"*",
"currently",
"support",
"only",
"one",
"host",
":",
"param",
"hosts",
":",
"the",
"hosts",
"json",
"to",
"parse",
":",
"return",
":",
"the",
"full",
"host",
"and",
"the",
"authentication",
"if",
"exists"
] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L10-L34 |
avihad/twistes | twistes/parser.py | EsParser._update_ssl_params | def _update_ssl_params(host):
"""
Update the host ssl params (port or scheme) if needed.
:param host:
:return:
"""
if host[HostParsing.HOST] \
and EsParser._is_secure_connection_type(host):
host[HostParsing.PORT] = EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
parsed_url = urlparse(EsParser._fix_host_prefix(host[HostParsing.HOST]))
host[HostParsing.HOST] = parsed_url.hostname
host[HostParsing.SCHEME] = HostParsing.HTTPS
return host | python | def _update_ssl_params(host):
"""
Update the host ssl params (port or scheme) if needed.
:param host:
:return:
"""
if host[HostParsing.HOST] \
and EsParser._is_secure_connection_type(host):
host[HostParsing.PORT] = EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
parsed_url = urlparse(EsParser._fix_host_prefix(host[HostParsing.HOST]))
host[HostParsing.HOST] = parsed_url.hostname
host[HostParsing.SCHEME] = HostParsing.HTTPS
return host | [
"def",
"_update_ssl_params",
"(",
"host",
")",
":",
"if",
"host",
"[",
"HostParsing",
".",
"HOST",
"]",
"and",
"EsParser",
".",
"_is_secure_connection_type",
"(",
"host",
")",
":",
"host",
"[",
"HostParsing",
".",
"PORT",
"]",
"=",
"EsParser",
".",
"SSL_DEFAULT_PORT",
"host",
"[",
"HostParsing",
".",
"USE_SSL",
"]",
"=",
"True",
"parsed_url",
"=",
"urlparse",
"(",
"EsParser",
".",
"_fix_host_prefix",
"(",
"host",
"[",
"HostParsing",
".",
"HOST",
"]",
")",
")",
"host",
"[",
"HostParsing",
".",
"HOST",
"]",
"=",
"parsed_url",
".",
"hostname",
"host",
"[",
"HostParsing",
".",
"SCHEME",
"]",
"=",
"HostParsing",
".",
"HTTPS",
"return",
"host"
] | Update the host ssl params (port or scheme) if needed.
:param host:
:return: | [
"Update",
"the",
"host",
"ssl",
"params",
"(",
"port",
"or",
"scheme",
")",
"if",
"needed",
".",
":",
"param",
"host",
":",
":",
"return",
":"
] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L54-L67 |
avihad/twistes | twistes/parser.py | EsParser._parse_string_host | def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host | python | def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host | [
"def",
"_parse_string_host",
"(",
"host_str",
")",
":",
"host_str",
"=",
"EsParser",
".",
"_fix_host_prefix",
"(",
"host_str",
")",
"parsed_url",
"=",
"urlparse",
"(",
"host_str",
")",
"host",
"=",
"{",
"HostParsing",
".",
"HOST",
":",
"parsed_url",
".",
"hostname",
"}",
"if",
"parsed_url",
".",
"port",
":",
"host",
"[",
"HostParsing",
".",
"PORT",
"]",
"=",
"parsed_url",
".",
"port",
"if",
"parsed_url",
".",
"scheme",
"==",
"HostParsing",
".",
"HTTPS",
":",
"host",
"[",
"HostParsing",
".",
"PORT",
"]",
"=",
"parsed_url",
".",
"port",
"or",
"EsParser",
".",
"SSL_DEFAULT_PORT",
"host",
"[",
"HostParsing",
".",
"USE_SSL",
"]",
"=",
"True",
"host",
"[",
"HostParsing",
".",
"SCHEME",
"]",
"=",
"HostParsing",
".",
"HTTPS",
"elif",
"parsed_url",
".",
"scheme",
":",
"host",
"[",
"HostParsing",
".",
"SCHEME",
"]",
"=",
"parsed_url",
".",
"scheme",
"if",
"parsed_url",
".",
"username",
"or",
"parsed_url",
".",
"password",
":",
"host",
"[",
"HostParsing",
".",
"HTTP_AUTH",
"]",
"=",
"'%s:%s'",
"%",
"(",
"parsed_url",
".",
"username",
",",
"parsed_url",
".",
"password",
")",
"if",
"parsed_url",
".",
"path",
"and",
"parsed_url",
".",
"path",
"!=",
"'/'",
":",
"host",
"[",
"HostParsing",
".",
"URL_PREFIX",
"]",
"=",
"parsed_url",
".",
"path",
"return",
"host"
] | Parse host string into a dictionary host
:param host_str:
:return: | [
"Parse",
"host",
"string",
"into",
"a",
"dictionary",
"host",
":",
"param",
"host_str",
":",
":",
"return",
":"
] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L70-L91 |
avihad/twistes | twistes/parser.py | EsParser.make_path | def make_path(*sub_paths):
"""
Create a path from a list of sub paths.
:param sub_paths: a list of sub paths
:return:
"""
queued_params = [quote(c.encode('utf-8'), '') for c in sub_paths if c not in NULL_VALUES]
queued_params.insert(0, '')
return '/'.join(queued_params) | python | def make_path(*sub_paths):
"""
Create a path from a list of sub paths.
:param sub_paths: a list of sub paths
:return:
"""
queued_params = [quote(c.encode('utf-8'), '') for c in sub_paths if c not in NULL_VALUES]
queued_params.insert(0, '')
return '/'.join(queued_params) | [
"def",
"make_path",
"(",
"*",
"sub_paths",
")",
":",
"queued_params",
"=",
"[",
"quote",
"(",
"c",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"''",
")",
"for",
"c",
"in",
"sub_paths",
"if",
"c",
"not",
"in",
"NULL_VALUES",
"]",
"queued_params",
".",
"insert",
"(",
"0",
",",
"''",
")",
"return",
"'/'",
".",
"join",
"(",
"queued_params",
")"
] | Create a path from a list of sub paths.
:param sub_paths: a list of sub paths
:return: | [
"Create",
"a",
"path",
"from",
"a",
"list",
"of",
"sub",
"paths",
".",
":",
"param",
"sub_paths",
":",
"a",
"list",
"of",
"sub",
"paths",
":",
"return",
":"
] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L105-L113 |
avihad/twistes | twistes/parser.py | EsParser.prepare_url | def prepare_url(hostname, path, params=None):
"""
Prepare Elasticsearch request url.
:param hostname: host name
:param path: request path
:param params: optional url params
:return:
"""
url = hostname + path
if params:
url = url + '?' + urlencode(params)
if not url.startswith(('http:', 'https:')):
url = "http://" + url
return url.encode('utf-8') | python | def prepare_url(hostname, path, params=None):
"""
Prepare Elasticsearch request url.
:param hostname: host name
:param path: request path
:param params: optional url params
:return:
"""
url = hostname + path
if params:
url = url + '?' + urlencode(params)
if not url.startswith(('http:', 'https:')):
url = "http://" + url
return url.encode('utf-8') | [
"def",
"prepare_url",
"(",
"hostname",
",",
"path",
",",
"params",
"=",
"None",
")",
":",
"url",
"=",
"hostname",
"+",
"path",
"if",
"params",
":",
"url",
"=",
"url",
"+",
"'?'",
"+",
"urlencode",
"(",
"params",
")",
"if",
"not",
"url",
".",
"startswith",
"(",
"(",
"'http:'",
",",
"'https:'",
")",
")",
":",
"url",
"=",
"\"http://\"",
"+",
"url",
"return",
"url",
".",
"encode",
"(",
"'utf-8'",
")"
] | Prepare Elasticsearch request url.
:param hostname: host name
:param path: request path
:param params: optional url params
:return: | [
"Prepare",
"Elasticsearch",
"request",
"url",
".",
":",
"param",
"hostname",
":",
"host",
"name",
":",
"param",
"path",
":",
"request",
"path",
":",
"param",
"params",
":",
"optional",
"url",
"params",
":",
"return",
":"
] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L116-L132 |
carpyncho/feets | doc/source/JSAnimation/examples.py | basic_animation | def basic_animation(frames=100, interval=30):
"""Plot a basic sine wave with oscillating amplitude"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
x = np.linspace(0, 10, 1000)
def init():
line.set_data([], [])
return line,
def animate(i):
y = np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi)
line.set_data(x, y)
return line,
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | python | def basic_animation(frames=100, interval=30):
"""Plot a basic sine wave with oscillating amplitude"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
x = np.linspace(0, 10, 1000)
def init():
line.set_data([], [])
return line,
def animate(i):
y = np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi)
line.set_data(x, y)
return line,
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | [
"def",
"basic_animation",
"(",
"frames",
"=",
"100",
",",
"interval",
"=",
"30",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"plt",
".",
"axes",
"(",
"xlim",
"=",
"(",
"0",
",",
"10",
")",
",",
"ylim",
"=",
"(",
"-",
"2",
",",
"2",
")",
")",
"line",
",",
"=",
"ax",
".",
"plot",
"(",
"[",
"]",
",",
"[",
"]",
",",
"lw",
"=",
"2",
")",
"x",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"10",
",",
"1000",
")",
"def",
"init",
"(",
")",
":",
"line",
".",
"set_data",
"(",
"[",
"]",
",",
"[",
"]",
")",
"return",
"line",
",",
"def",
"animate",
"(",
"i",
")",
":",
"y",
"=",
"np",
".",
"cos",
"(",
"i",
"*",
"0.02",
"*",
"np",
".",
"pi",
")",
"*",
"np",
".",
"sin",
"(",
"x",
"-",
"i",
"*",
"0.02",
"*",
"np",
".",
"pi",
")",
"line",
".",
"set_data",
"(",
"x",
",",
"y",
")",
"return",
"line",
",",
"return",
"animation",
".",
"FuncAnimation",
"(",
"fig",
",",
"animate",
",",
"init_func",
"=",
"init",
",",
"frames",
"=",
"frames",
",",
"interval",
"=",
"interval",
")"
] | Plot a basic sine wave with oscillating amplitude | [
"Plot",
"a",
"basic",
"sine",
"wave",
"with",
"oscillating",
"amplitude"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/examples.py#L6-L24 |
carpyncho/feets | doc/source/JSAnimation/examples.py | lorenz_animation | def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
"""Plot a 3D visualization of the dynamics of the Lorenz system"""
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c, ms=4)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function: called sequentially
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | python | def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
"""Plot a 3D visualization of the dynamics of the Lorenz system"""
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c, ms=4)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function: called sequentially
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | [
"def",
"lorenz_animation",
"(",
"N_trajectories",
"=",
"20",
",",
"rseed",
"=",
"1",
",",
"frames",
"=",
"200",
",",
"interval",
"=",
"30",
")",
":",
"from",
"scipy",
"import",
"integrate",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"Axes3D",
"from",
"matplotlib",
".",
"colors",
"import",
"cnames",
"def",
"lorentz_deriv",
"(",
"coords",
",",
"t0",
",",
"sigma",
"=",
"10.",
",",
"beta",
"=",
"8.",
"/",
"3",
",",
"rho",
"=",
"28.0",
")",
":",
"\"\"\"Compute the time-derivative of a Lorentz system.\"\"\"",
"x",
",",
"y",
",",
"z",
"=",
"coords",
"return",
"[",
"sigma",
"*",
"(",
"y",
"-",
"x",
")",
",",
"x",
"*",
"(",
"rho",
"-",
"z",
")",
"-",
"y",
",",
"x",
"*",
"y",
"-",
"beta",
"*",
"z",
"]",
"# Choose random starting points, uniformly distributed from -15 to 15",
"np",
".",
"random",
".",
"seed",
"(",
"rseed",
")",
"x0",
"=",
"-",
"15",
"+",
"30",
"*",
"np",
".",
"random",
".",
"random",
"(",
"(",
"N_trajectories",
",",
"3",
")",
")",
"# Solve for the trajectories",
"t",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"2",
",",
"500",
")",
"x_t",
"=",
"np",
".",
"asarray",
"(",
"[",
"integrate",
".",
"odeint",
"(",
"lorentz_deriv",
",",
"x0i",
",",
"t",
")",
"for",
"x0i",
"in",
"x0",
"]",
")",
"# Set up figure & 3D axis for animation",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0",
",",
"0",
",",
"1",
",",
"1",
"]",
",",
"projection",
"=",
"'3d'",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"# choose a different color for each trajectory",
"colors",
"=",
"plt",
".",
"cm",
".",
"jet",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"N_trajectories",
")",
")",
"# set up lines and points",
"lines",
"=",
"sum",
"(",
"[",
"ax",
".",
"plot",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"'-'",
",",
"c",
"=",
"c",
")",
"for",
"c",
"in",
"colors",
"]",
",",
"[",
"]",
")",
"pts",
"=",
"sum",
"(",
"[",
"ax",
".",
"plot",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"'o'",
",",
"c",
"=",
"c",
",",
"ms",
"=",
"4",
")",
"for",
"c",
"in",
"colors",
"]",
",",
"[",
"]",
")",
"# prepare the axes limits",
"ax",
".",
"set_xlim",
"(",
"(",
"-",
"25",
",",
"25",
")",
")",
"ax",
".",
"set_ylim",
"(",
"(",
"-",
"35",
",",
"35",
")",
")",
"ax",
".",
"set_zlim",
"(",
"(",
"5",
",",
"55",
")",
")",
"# set point-of-view: specified by (altitude degrees, azimuth degrees)",
"ax",
".",
"view_init",
"(",
"30",
",",
"0",
")",
"# initialization function: plot the background of each frame",
"def",
"init",
"(",
")",
":",
"for",
"line",
",",
"pt",
"in",
"zip",
"(",
"lines",
",",
"pts",
")",
":",
"line",
".",
"set_data",
"(",
"[",
"]",
",",
"[",
"]",
")",
"line",
".",
"set_3d_properties",
"(",
"[",
"]",
")",
"pt",
".",
"set_data",
"(",
"[",
"]",
",",
"[",
"]",
")",
"pt",
".",
"set_3d_properties",
"(",
"[",
"]",
")",
"return",
"lines",
"+",
"pts",
"# animation function: called sequentially",
"def",
"animate",
"(",
"i",
")",
":",
"# we'll step two time-steps per frame. This leads to nice results.",
"i",
"=",
"(",
"2",
"*",
"i",
")",
"%",
"x_t",
".",
"shape",
"[",
"1",
"]",
"for",
"line",
",",
"pt",
",",
"xi",
"in",
"zip",
"(",
"lines",
",",
"pts",
",",
"x_t",
")",
":",
"x",
",",
"y",
",",
"z",
"=",
"xi",
"[",
":",
"i",
"+",
"1",
"]",
".",
"T",
"line",
".",
"set_data",
"(",
"x",
",",
"y",
")",
"line",
".",
"set_3d_properties",
"(",
"z",
")",
"pt",
".",
"set_data",
"(",
"x",
"[",
"-",
"1",
":",
"]",
",",
"y",
"[",
"-",
"1",
":",
"]",
")",
"pt",
".",
"set_3d_properties",
"(",
"z",
"[",
"-",
"1",
":",
"]",
")",
"ax",
".",
"view_init",
"(",
"30",
",",
"0.3",
"*",
"i",
")",
"fig",
".",
"canvas",
".",
"draw",
"(",
")",
"return",
"lines",
"+",
"pts",
"return",
"animation",
".",
"FuncAnimation",
"(",
"fig",
",",
"animate",
",",
"init_func",
"=",
"init",
",",
"frames",
"=",
"frames",
",",
"interval",
"=",
"interval",
")"
] | Plot a 3D visualization of the dynamics of the Lorenz system | [
"Plot",
"a",
"3D",
"visualization",
"of",
"the",
"dynamics",
"of",
"the",
"Lorenz",
"system"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/examples.py#L27-L97 |
carpyncho/feets | doc/source/JSAnimation/html_writer.py | _included_frames | def _included_frames(frame_list, frame_format):
"""frame_list should be a list of filenames"""
return INCLUDED_FRAMES.format(Nframes=len(frame_list),
frame_dir=os.path.dirname(frame_list[0]),
frame_format=frame_format) | python | def _included_frames(frame_list, frame_format):
"""frame_list should be a list of filenames"""
return INCLUDED_FRAMES.format(Nframes=len(frame_list),
frame_dir=os.path.dirname(frame_list[0]),
frame_format=frame_format) | [
"def",
"_included_frames",
"(",
"frame_list",
",",
"frame_format",
")",
":",
"return",
"INCLUDED_FRAMES",
".",
"format",
"(",
"Nframes",
"=",
"len",
"(",
"frame_list",
")",
",",
"frame_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"frame_list",
"[",
"0",
"]",
")",
",",
"frame_format",
"=",
"frame_format",
")"
] | frame_list should be a list of filenames | [
"frame_list",
"should",
"be",
"a",
"list",
"of",
"filenames"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/html_writer.py#L222-L226 |
carpyncho/feets | doc/source/JSAnimation/html_writer.py | _embedded_frames | def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
embedded = "\n"
for i, frame_data in enumerate(frame_list):
embedded += template.format(i, frame_format,
frame_data.replace('\n', '\\\n'))
return embedded | python | def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
embedded = "\n"
for i, frame_data in enumerate(frame_list):
embedded += template.format(i, frame_format,
frame_data.replace('\n', '\\\n'))
return embedded | [
"def",
"_embedded_frames",
"(",
"frame_list",
",",
"frame_format",
")",
":",
"template",
"=",
"' frames[{0}] = \"data:image/{1};base64,{2}\"\\n'",
"embedded",
"=",
"\"\\n\"",
"for",
"i",
",",
"frame_data",
"in",
"enumerate",
"(",
"frame_list",
")",
":",
"embedded",
"+=",
"template",
".",
"format",
"(",
"i",
",",
"frame_format",
",",
"frame_data",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\\\n'",
")",
")",
"return",
"embedded"
] | frame_list should be a list of base64-encoded png files | [
"frame_list",
"should",
"be",
"a",
"list",
"of",
"base64",
"-",
"encoded",
"png",
"files"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/html_writer.py#L229-L236 |
carpyncho/feets | feets/preprocess.py | remove_noise | def remove_noise(time, magnitude, error, error_limit=3, std_limit=5):
"""Points within 'std_limit' standard deviations from the mean and with
errors greater than 'error_limit' times the error mean are
considered as noise and thus are eliminated.
"""
data, mjd = magnitude, time
data_len = len(mjd)
error_mean = np.mean(error)
error_tolerance = error_limit * (error_mean or 1)
data_mean = np.mean(data)
data_std = np.std(data)
mjd_out, data_out, error_out = [], [], []
for i in range(data_len):
is_not_noise = (
error[i] < error_tolerance and
(np.absolute(data[i] - data_mean) / data_std) < std_limit)
if is_not_noise:
mjd_out.append(mjd[i])
data_out.append(data[i])
error_out.append(error[i])
data_out = np.asarray(data_out)
mjd_out = np.asarray(mjd_out)
error_out = np.asarray(error_out)
return mjd_out, data_out, error_out | python | def remove_noise(time, magnitude, error, error_limit=3, std_limit=5):
"""Points within 'std_limit' standard deviations from the mean and with
errors greater than 'error_limit' times the error mean are
considered as noise and thus are eliminated.
"""
data, mjd = magnitude, time
data_len = len(mjd)
error_mean = np.mean(error)
error_tolerance = error_limit * (error_mean or 1)
data_mean = np.mean(data)
data_std = np.std(data)
mjd_out, data_out, error_out = [], [], []
for i in range(data_len):
is_not_noise = (
error[i] < error_tolerance and
(np.absolute(data[i] - data_mean) / data_std) < std_limit)
if is_not_noise:
mjd_out.append(mjd[i])
data_out.append(data[i])
error_out.append(error[i])
data_out = np.asarray(data_out)
mjd_out = np.asarray(mjd_out)
error_out = np.asarray(error_out)
return mjd_out, data_out, error_out | [
"def",
"remove_noise",
"(",
"time",
",",
"magnitude",
",",
"error",
",",
"error_limit",
"=",
"3",
",",
"std_limit",
"=",
"5",
")",
":",
"data",
",",
"mjd",
"=",
"magnitude",
",",
"time",
"data_len",
"=",
"len",
"(",
"mjd",
")",
"error_mean",
"=",
"np",
".",
"mean",
"(",
"error",
")",
"error_tolerance",
"=",
"error_limit",
"*",
"(",
"error_mean",
"or",
"1",
")",
"data_mean",
"=",
"np",
".",
"mean",
"(",
"data",
")",
"data_std",
"=",
"np",
".",
"std",
"(",
"data",
")",
"mjd_out",
",",
"data_out",
",",
"error_out",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"data_len",
")",
":",
"is_not_noise",
"=",
"(",
"error",
"[",
"i",
"]",
"<",
"error_tolerance",
"and",
"(",
"np",
".",
"absolute",
"(",
"data",
"[",
"i",
"]",
"-",
"data_mean",
")",
"/",
"data_std",
")",
"<",
"std_limit",
")",
"if",
"is_not_noise",
":",
"mjd_out",
".",
"append",
"(",
"mjd",
"[",
"i",
"]",
")",
"data_out",
".",
"append",
"(",
"data",
"[",
"i",
"]",
")",
"error_out",
".",
"append",
"(",
"error",
"[",
"i",
"]",
")",
"data_out",
"=",
"np",
".",
"asarray",
"(",
"data_out",
")",
"mjd_out",
"=",
"np",
".",
"asarray",
"(",
"mjd_out",
")",
"error_out",
"=",
"np",
".",
"asarray",
"(",
"error_out",
")",
"return",
"mjd_out",
",",
"data_out",
",",
"error_out"
] | Points within 'std_limit' standard deviations from the mean and with
errors greater than 'error_limit' times the error mean are
considered as noise and thus are eliminated. | [
"Points",
"within",
"std_limit",
"standard",
"deviations",
"from",
"the",
"mean",
"and",
"with",
"errors",
"greater",
"than",
"error_limit",
"times",
"the",
"error",
"mean",
"are",
"considered",
"as",
"noise",
"and",
"thus",
"are",
"eliminated",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/preprocess.py#L44-L73 |
carpyncho/feets | feets/preprocess.py | align | def align(time, time2, magnitude, magnitude2, error, error2):
"""Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2
"""
error = np.zeros(time.shape) if error is None else error
error2 = np.zeros(time2.shape) if error2 is None else error2
# this asume that the first series is the short one
sserie = pd.DataFrame({"mag": magnitude, "error": error}, index=time)
lserie = pd.DataFrame({"mag": magnitude2, "error": error2}, index=time2)
# if the second serie is logest then revert
if len(time) > len(time2):
sserie, lserie = lserie, sserie
# make the merge
merged = sserie.join(lserie, how="inner", rsuffix='2')
# recreate columns
new_time = merged.index.values
new_mag, new_mag2 = merged.mag.values, merged.mag2.values
new_error, new_error2 = merged.error.values, merged.error2.values
if len(time) > len(time2):
new_mag, new_mag2 = new_mag2, new_mag
new_error, new_error2 = new_error2, new_error
return new_time, new_mag, new_mag2, new_error, new_error2 | python | def align(time, time2, magnitude, magnitude2, error, error2):
"""Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2
"""
error = np.zeros(time.shape) if error is None else error
error2 = np.zeros(time2.shape) if error2 is None else error2
# this asume that the first series is the short one
sserie = pd.DataFrame({"mag": magnitude, "error": error}, index=time)
lserie = pd.DataFrame({"mag": magnitude2, "error": error2}, index=time2)
# if the second serie is logest then revert
if len(time) > len(time2):
sserie, lserie = lserie, sserie
# make the merge
merged = sserie.join(lserie, how="inner", rsuffix='2')
# recreate columns
new_time = merged.index.values
new_mag, new_mag2 = merged.mag.values, merged.mag2.values
new_error, new_error2 = merged.error.values, merged.error2.values
if len(time) > len(time2):
new_mag, new_mag2 = new_mag2, new_mag
new_error, new_error2 = new_error2, new_error
return new_time, new_mag, new_mag2, new_error, new_error2 | [
"def",
"align",
"(",
"time",
",",
"time2",
",",
"magnitude",
",",
"magnitude2",
",",
"error",
",",
"error2",
")",
":",
"error",
"=",
"np",
".",
"zeros",
"(",
"time",
".",
"shape",
")",
"if",
"error",
"is",
"None",
"else",
"error",
"error2",
"=",
"np",
".",
"zeros",
"(",
"time2",
".",
"shape",
")",
"if",
"error2",
"is",
"None",
"else",
"error2",
"# this asume that the first series is the short one",
"sserie",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"mag\"",
":",
"magnitude",
",",
"\"error\"",
":",
"error",
"}",
",",
"index",
"=",
"time",
")",
"lserie",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"mag\"",
":",
"magnitude2",
",",
"\"error\"",
":",
"error2",
"}",
",",
"index",
"=",
"time2",
")",
"# if the second serie is logest then revert",
"if",
"len",
"(",
"time",
")",
">",
"len",
"(",
"time2",
")",
":",
"sserie",
",",
"lserie",
"=",
"lserie",
",",
"sserie",
"# make the merge",
"merged",
"=",
"sserie",
".",
"join",
"(",
"lserie",
",",
"how",
"=",
"\"inner\"",
",",
"rsuffix",
"=",
"'2'",
")",
"# recreate columns",
"new_time",
"=",
"merged",
".",
"index",
".",
"values",
"new_mag",
",",
"new_mag2",
"=",
"merged",
".",
"mag",
".",
"values",
",",
"merged",
".",
"mag2",
".",
"values",
"new_error",
",",
"new_error2",
"=",
"merged",
".",
"error",
".",
"values",
",",
"merged",
".",
"error2",
".",
"values",
"if",
"len",
"(",
"time",
")",
">",
"len",
"(",
"time2",
")",
":",
"new_mag",
",",
"new_mag2",
"=",
"new_mag2",
",",
"new_mag",
"new_error",
",",
"new_error2",
"=",
"new_error2",
",",
"new_error",
"return",
"new_time",
",",
"new_mag",
",",
"new_mag2",
",",
"new_error",
",",
"new_error2"
] | Synchronizes the light-curves in the two different bands.
Returns
-------
aligned_time
aligned_magnitude
aligned_magnitude2
aligned_error
aligned_error2 | [
"Synchronizes",
"the",
"light",
"-",
"curves",
"in",
"the",
"two",
"different",
"bands",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/preprocess.py#L76-L113 |
carpyncho/feets | feets/datasets/ogle3.py | load_OGLE3_catalog | def load_OGLE3_catalog():
"""Return the full list of variables stars of OGLE-3 as a DataFrame
"""
with bz2.BZ2File(CATALOG_PATH) as bz2fp, warnings.catch_warnings():
warnings.simplefilter("ignore")
df = pd.read_table(bz2fp, skiprows=6)
df.rename(columns={"# ID": "ID"}, inplace=True)
return df | python | def load_OGLE3_catalog():
"""Return the full list of variables stars of OGLE-3 as a DataFrame
"""
with bz2.BZ2File(CATALOG_PATH) as bz2fp, warnings.catch_warnings():
warnings.simplefilter("ignore")
df = pd.read_table(bz2fp, skiprows=6)
df.rename(columns={"# ID": "ID"}, inplace=True)
return df | [
"def",
"load_OGLE3_catalog",
"(",
")",
":",
"with",
"bz2",
".",
"BZ2File",
"(",
"CATALOG_PATH",
")",
"as",
"bz2fp",
",",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"df",
"=",
"pd",
".",
"read_table",
"(",
"bz2fp",
",",
"skiprows",
"=",
"6",
")",
"df",
".",
"rename",
"(",
"columns",
"=",
"{",
"\"# ID\"",
":",
"\"ID\"",
"}",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] | Return the full list of variables stars of OGLE-3 as a DataFrame | [
"Return",
"the",
"full",
"list",
"of",
"variables",
"stars",
"of",
"OGLE",
"-",
"3",
"as",
"a",
"DataFrame"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/ogle3.py#L144-L152 |
carpyncho/feets | feets/datasets/ogle3.py | fetch_OGLE3 | def fetch_OGLE3(ogle3_id, data_home=None,
metadata=None, download_if_missing=True):
"""Retrieve a lighte curve from OGLE-3 database
Parameters
----------
ogle3_id : str
The id of the source (see: ``load_OGLE3_catalog()`` for
available sources.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all feets data is stored in '~/feets' subfolders.
metadata : bool | None
If it's True, the row of the dataframe from ``load_OGLE3_catalog()``
with the metadata of the source are added to the result.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
A Data object.
Examples
--------
.. code-block:: pycon
>>> ds = fetch_OGLE3("OGLE-BLG-LPV-232377")
>>> ds
Data(id='OGLE-BLG-LPV-232377', ds_name='OGLE-III', bands=('I', 'V'))
>>> ds.bands
('I', 'V')
>>> ds.data.I
LightCurve(time[100], magnitude[100], error[100])
>>> ds.data.I.magnitude
array([ 13.816, 13.826, 13.818, 13.812, 13.8 , 13.827, 13.797,
13.82 , 13.804, 13.783, 13.823, 13.8 , 13.84 , 13.817,
13.802, 13.824, 13.822, 13.81 , 13.844, 13.848, 13.813,
13.836, 13.83 , 13.83 , 13.837, 13.811, 13.814, 13.82 ,
13.826, 13.822, 13.821, 13.817, 13.813, 13.809, 13.817,
13.836, 13.804, 13.801, 13.813, 13.823, 13.818, 13.831,
13.833, 13.814, 13.814, 13.812, 13.822, 13.814, 13.818,
13.817, 13.8 , 13.804, 13.799, 13.809, 13.815, 13.846,
13.796, 13.791, 13.804, 13.853, 13.839, 13.816, 13.825,
13.81 , 13.8 , 13.807, 13.819, 13.829, 13.844, 13.84 ,
13.842, 13.818, 13.801, 13.804, 13.814, 13.821, 13.821,
13.822, 13.82 , 13.803, 13.813, 13.826, 13.855, 13.865,
13.854, 13.828, 13.809, 13.828, 13.833, 13.829, 13.816,
13.82 , 13.827, 13.834, 13.811, 13.817, 13.808, 13.834,
13.814, 13.829])
"""
# retrieve the data dir for ogle
store_path = _get_OGLE3_data_home(data_home)
# the data dir for this lightcurve
file_path = os.path.join(store_path, "{}.tar".format(ogle3_id))
# members of the two bands of ogle3
members = {"I": "./{}.I.dat".format(ogle3_id),
"V": "./{}.V.dat".format(ogle3_id)}
# the url of the lightcurve
if download_if_missing:
url = URL.format(ogle3_id)
base.fetch(url, file_path)
bands = []
data = {}
with tarfile.TarFile(file_path) as tfp:
members_names = tfp.getnames()
for band_name, member_name in members.items():
if member_name in members_names:
member = tfp.getmember(member_name)
src = tfp.extractfile(member)
lc = _check_dim(np.loadtxt(src))
data[band_name] = {"time": lc[:, 0],
"magnitude": lc[:, 1],
"error": lc[:, 2]}
bands.append(band_name)
if metadata:
cat = load_OGLE3_catalog()
metadata = cat[cat.ID == ogle3_id].iloc[0].to_dict()
del cat
return Data(
id=ogle3_id, metadata=metadata, ds_name="OGLE-III",
description=DESCR, bands=bands, data=data) | python | def fetch_OGLE3(ogle3_id, data_home=None,
metadata=None, download_if_missing=True):
"""Retrieve a lighte curve from OGLE-3 database
Parameters
----------
ogle3_id : str
The id of the source (see: ``load_OGLE3_catalog()`` for
available sources.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all feets data is stored in '~/feets' subfolders.
metadata : bool | None
If it's True, the row of the dataframe from ``load_OGLE3_catalog()``
with the metadata of the source are added to the result.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
A Data object.
Examples
--------
.. code-block:: pycon
>>> ds = fetch_OGLE3("OGLE-BLG-LPV-232377")
>>> ds
Data(id='OGLE-BLG-LPV-232377', ds_name='OGLE-III', bands=('I', 'V'))
>>> ds.bands
('I', 'V')
>>> ds.data.I
LightCurve(time[100], magnitude[100], error[100])
>>> ds.data.I.magnitude
array([ 13.816, 13.826, 13.818, 13.812, 13.8 , 13.827, 13.797,
13.82 , 13.804, 13.783, 13.823, 13.8 , 13.84 , 13.817,
13.802, 13.824, 13.822, 13.81 , 13.844, 13.848, 13.813,
13.836, 13.83 , 13.83 , 13.837, 13.811, 13.814, 13.82 ,
13.826, 13.822, 13.821, 13.817, 13.813, 13.809, 13.817,
13.836, 13.804, 13.801, 13.813, 13.823, 13.818, 13.831,
13.833, 13.814, 13.814, 13.812, 13.822, 13.814, 13.818,
13.817, 13.8 , 13.804, 13.799, 13.809, 13.815, 13.846,
13.796, 13.791, 13.804, 13.853, 13.839, 13.816, 13.825,
13.81 , 13.8 , 13.807, 13.819, 13.829, 13.844, 13.84 ,
13.842, 13.818, 13.801, 13.804, 13.814, 13.821, 13.821,
13.822, 13.82 , 13.803, 13.813, 13.826, 13.855, 13.865,
13.854, 13.828, 13.809, 13.828, 13.833, 13.829, 13.816,
13.82 , 13.827, 13.834, 13.811, 13.817, 13.808, 13.834,
13.814, 13.829])
"""
# retrieve the data dir for ogle
store_path = _get_OGLE3_data_home(data_home)
# the data dir for this lightcurve
file_path = os.path.join(store_path, "{}.tar".format(ogle3_id))
# members of the two bands of ogle3
members = {"I": "./{}.I.dat".format(ogle3_id),
"V": "./{}.V.dat".format(ogle3_id)}
# the url of the lightcurve
if download_if_missing:
url = URL.format(ogle3_id)
base.fetch(url, file_path)
bands = []
data = {}
with tarfile.TarFile(file_path) as tfp:
members_names = tfp.getnames()
for band_name, member_name in members.items():
if member_name in members_names:
member = tfp.getmember(member_name)
src = tfp.extractfile(member)
lc = _check_dim(np.loadtxt(src))
data[band_name] = {"time": lc[:, 0],
"magnitude": lc[:, 1],
"error": lc[:, 2]}
bands.append(band_name)
if metadata:
cat = load_OGLE3_catalog()
metadata = cat[cat.ID == ogle3_id].iloc[0].to_dict()
del cat
return Data(
id=ogle3_id, metadata=metadata, ds_name="OGLE-III",
description=DESCR, bands=bands, data=data) | [
"def",
"fetch_OGLE3",
"(",
"ogle3_id",
",",
"data_home",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"download_if_missing",
"=",
"True",
")",
":",
"# retrieve the data dir for ogle",
"store_path",
"=",
"_get_OGLE3_data_home",
"(",
"data_home",
")",
"# the data dir for this lightcurve",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"store_path",
",",
"\"{}.tar\"",
".",
"format",
"(",
"ogle3_id",
")",
")",
"# members of the two bands of ogle3",
"members",
"=",
"{",
"\"I\"",
":",
"\"./{}.I.dat\"",
".",
"format",
"(",
"ogle3_id",
")",
",",
"\"V\"",
":",
"\"./{}.V.dat\"",
".",
"format",
"(",
"ogle3_id",
")",
"}",
"# the url of the lightcurve",
"if",
"download_if_missing",
":",
"url",
"=",
"URL",
".",
"format",
"(",
"ogle3_id",
")",
"base",
".",
"fetch",
"(",
"url",
",",
"file_path",
")",
"bands",
"=",
"[",
"]",
"data",
"=",
"{",
"}",
"with",
"tarfile",
".",
"TarFile",
"(",
"file_path",
")",
"as",
"tfp",
":",
"members_names",
"=",
"tfp",
".",
"getnames",
"(",
")",
"for",
"band_name",
",",
"member_name",
"in",
"members",
".",
"items",
"(",
")",
":",
"if",
"member_name",
"in",
"members_names",
":",
"member",
"=",
"tfp",
".",
"getmember",
"(",
"member_name",
")",
"src",
"=",
"tfp",
".",
"extractfile",
"(",
"member",
")",
"lc",
"=",
"_check_dim",
"(",
"np",
".",
"loadtxt",
"(",
"src",
")",
")",
"data",
"[",
"band_name",
"]",
"=",
"{",
"\"time\"",
":",
"lc",
"[",
":",
",",
"0",
"]",
",",
"\"magnitude\"",
":",
"lc",
"[",
":",
",",
"1",
"]",
",",
"\"error\"",
":",
"lc",
"[",
":",
",",
"2",
"]",
"}",
"bands",
".",
"append",
"(",
"band_name",
")",
"if",
"metadata",
":",
"cat",
"=",
"load_OGLE3_catalog",
"(",
")",
"metadata",
"=",
"cat",
"[",
"cat",
".",
"ID",
"==",
"ogle3_id",
"]",
".",
"iloc",
"[",
"0",
"]",
".",
"to_dict",
"(",
")",
"del",
"cat",
"return",
"Data",
"(",
"id",
"=",
"ogle3_id",
",",
"metadata",
"=",
"metadata",
",",
"ds_name",
"=",
"\"OGLE-III\"",
",",
"description",
"=",
"DESCR",
",",
"bands",
"=",
"bands",
",",
"data",
"=",
"data",
")"
] | Retrieve a lighte curve from OGLE-3 database
Parameters
----------
ogle3_id : str
The id of the source (see: ``load_OGLE3_catalog()`` for
available sources.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all feets data is stored in '~/feets' subfolders.
metadata : bool | None
If it's True, the row of the dataframe from ``load_OGLE3_catalog()``
with the metadata of the source are added to the result.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
A Data object.
Examples
--------
.. code-block:: pycon
>>> ds = fetch_OGLE3("OGLE-BLG-LPV-232377")
>>> ds
Data(id='OGLE-BLG-LPV-232377', ds_name='OGLE-III', bands=('I', 'V'))
>>> ds.bands
('I', 'V')
>>> ds.data.I
LightCurve(time[100], magnitude[100], error[100])
>>> ds.data.I.magnitude
array([ 13.816, 13.826, 13.818, 13.812, 13.8 , 13.827, 13.797,
13.82 , 13.804, 13.783, 13.823, 13.8 , 13.84 , 13.817,
13.802, 13.824, 13.822, 13.81 , 13.844, 13.848, 13.813,
13.836, 13.83 , 13.83 , 13.837, 13.811, 13.814, 13.82 ,
13.826, 13.822, 13.821, 13.817, 13.813, 13.809, 13.817,
13.836, 13.804, 13.801, 13.813, 13.823, 13.818, 13.831,
13.833, 13.814, 13.814, 13.812, 13.822, 13.814, 13.818,
13.817, 13.8 , 13.804, 13.799, 13.809, 13.815, 13.846,
13.796, 13.791, 13.804, 13.853, 13.839, 13.816, 13.825,
13.81 , 13.8 , 13.807, 13.819, 13.829, 13.844, 13.84 ,
13.842, 13.818, 13.801, 13.804, 13.814, 13.821, 13.821,
13.822, 13.82 , 13.803, 13.813, 13.826, 13.855, 13.865,
13.854, 13.828, 13.809, 13.828, 13.833, 13.829, 13.816,
13.82 , 13.827, 13.834, 13.811, 13.817, 13.808, 13.834,
13.814, 13.829]) | [
"Retrieve",
"a",
"lighte",
"curve",
"from",
"OGLE",
"-",
"3",
"database"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/ogle3.py#L155-L246 |
carpyncho/feets | feets/extractors/__init__.py | sort_by_dependencies | def sort_by_dependencies(exts, retry=None):
"""Calculate the Feature Extractor Resolution Order.
"""
sorted_ext, features_from_sorted = [], set()
pending = [(e, 0) for e in exts]
retry = len(exts) * 100 if retry is None else retry
while pending:
ext, cnt = pending.pop(0)
if not isinstance(ext, Extractor) and not issubclass(ext, Extractor):
msg = "Only Extractor instances are allowed. Found {}."
raise TypeError(msg.format(type(ext)))
deps = ext.get_dependencies()
if deps.difference(features_from_sorted):
if cnt + 1 > retry:
msg = "Maximun retry ({}) to sort achieved from extractor {}."
raise RuntimeError(msg.format(retry, type(ext)))
pending.append((ext, cnt + 1))
else:
sorted_ext.append(ext)
features_from_sorted.update(ext.get_features())
return tuple(sorted_ext) | python | def sort_by_dependencies(exts, retry=None):
"""Calculate the Feature Extractor Resolution Order.
"""
sorted_ext, features_from_sorted = [], set()
pending = [(e, 0) for e in exts]
retry = len(exts) * 100 if retry is None else retry
while pending:
ext, cnt = pending.pop(0)
if not isinstance(ext, Extractor) and not issubclass(ext, Extractor):
msg = "Only Extractor instances are allowed. Found {}."
raise TypeError(msg.format(type(ext)))
deps = ext.get_dependencies()
if deps.difference(features_from_sorted):
if cnt + 1 > retry:
msg = "Maximun retry ({}) to sort achieved from extractor {}."
raise RuntimeError(msg.format(retry, type(ext)))
pending.append((ext, cnt + 1))
else:
sorted_ext.append(ext)
features_from_sorted.update(ext.get_features())
return tuple(sorted_ext) | [
"def",
"sort_by_dependencies",
"(",
"exts",
",",
"retry",
"=",
"None",
")",
":",
"sorted_ext",
",",
"features_from_sorted",
"=",
"[",
"]",
",",
"set",
"(",
")",
"pending",
"=",
"[",
"(",
"e",
",",
"0",
")",
"for",
"e",
"in",
"exts",
"]",
"retry",
"=",
"len",
"(",
"exts",
")",
"*",
"100",
"if",
"retry",
"is",
"None",
"else",
"retry",
"while",
"pending",
":",
"ext",
",",
"cnt",
"=",
"pending",
".",
"pop",
"(",
"0",
")",
"if",
"not",
"isinstance",
"(",
"ext",
",",
"Extractor",
")",
"and",
"not",
"issubclass",
"(",
"ext",
",",
"Extractor",
")",
":",
"msg",
"=",
"\"Only Extractor instances are allowed. Found {}.\"",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"ext",
")",
")",
")",
"deps",
"=",
"ext",
".",
"get_dependencies",
"(",
")",
"if",
"deps",
".",
"difference",
"(",
"features_from_sorted",
")",
":",
"if",
"cnt",
"+",
"1",
">",
"retry",
":",
"msg",
"=",
"\"Maximun retry ({}) to sort achieved from extractor {}.\"",
"raise",
"RuntimeError",
"(",
"msg",
".",
"format",
"(",
"retry",
",",
"type",
"(",
"ext",
")",
")",
")",
"pending",
".",
"append",
"(",
"(",
"ext",
",",
"cnt",
"+",
"1",
")",
")",
"else",
":",
"sorted_ext",
".",
"append",
"(",
"ext",
")",
"features_from_sorted",
".",
"update",
"(",
"ext",
".",
"get_features",
"(",
")",
")",
"return",
"tuple",
"(",
"sorted_ext",
")"
] | Calculate the Feature Extractor Resolution Order. | [
"Calculate",
"the",
"Feature",
"Extractor",
"Resolution",
"Order",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/extractors/__init__.py#L98-L121 |
carpyncho/feets | paper/reports/fats_vs_feets/lomb.py | getSignificance | def getSignificance(wk1, wk2, nout, ofac):
""" returns the peak false alarm probabilities
Hence the lower is the probability and the more significant is the peak
"""
expy = exp(-wk2)
effm = 2.0*(nout)/ofac
sig = effm*expy
ind = (sig > 0.01).nonzero()
sig[ind] = 1.0-(1.0-expy[ind])**effm
return sig | python | def getSignificance(wk1, wk2, nout, ofac):
""" returns the peak false alarm probabilities
Hence the lower is the probability and the more significant is the peak
"""
expy = exp(-wk2)
effm = 2.0*(nout)/ofac
sig = effm*expy
ind = (sig > 0.01).nonzero()
sig[ind] = 1.0-(1.0-expy[ind])**effm
return sig | [
"def",
"getSignificance",
"(",
"wk1",
",",
"wk2",
",",
"nout",
",",
"ofac",
")",
":",
"expy",
"=",
"exp",
"(",
"-",
"wk2",
")",
"effm",
"=",
"2.0",
"*",
"(",
"nout",
")",
"/",
"ofac",
"sig",
"=",
"effm",
"*",
"expy",
"ind",
"=",
"(",
"sig",
">",
"0.01",
")",
".",
"nonzero",
"(",
")",
"sig",
"[",
"ind",
"]",
"=",
"1.0",
"-",
"(",
"1.0",
"-",
"expy",
"[",
"ind",
"]",
")",
"**",
"effm",
"return",
"sig"
] | returns the peak false alarm probabilities
Hence the lower is the probability and the more significant is the peak | [
"returns",
"the",
"peak",
"false",
"alarm",
"probabilities",
"Hence",
"the",
"lower",
"is",
"the",
"probability",
"and",
"the",
"more",
"significant",
"is",
"the",
"peak"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/paper/reports/fats_vs_feets/lomb.py#L200-L209 |
carpyncho/feets | feets/datasets/base.py | fetch | def fetch(url, dest, force=False):
"""Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter
"""
cached = True
if force or not os.path.exists(dest):
cached = False
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(dest, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return cached, dest | python | def fetch(url, dest, force=False):
"""Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter
"""
cached = True
if force or not os.path.exists(dest):
cached = False
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(dest, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
return cached, dest | [
"def",
"fetch",
"(",
"url",
",",
"dest",
",",
"force",
"=",
"False",
")",
":",
"cached",
"=",
"True",
"if",
"force",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"cached",
"=",
"False",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"with",
"open",
"(",
"dest",
",",
"'wb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"r",
".",
"iter_content",
"(",
"1024",
")",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"return",
"cached",
",",
"dest"
] | Retrieve data from an url and store it into dest.
Parameters
----------
url: str
Link to the remote data
dest: str
Path where the file must be stored
force: bool (default=False)
Overwrite if the file exists
Returns
-------
cached: bool
True if the file already exists
dest: str
The same string of the parameter | [
"Retrieve",
"data",
"from",
"an",
"url",
"and",
"store",
"it",
"into",
"dest",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/base.py#L99-L129 |
carpyncho/feets | feets/datasets/synthetic.py | create_random | def create_random(magf, magf_params, errf, errf_params,
timef=np.linspace, timef_params=None, size=DEFAULT_SIZE,
id=None, ds_name=DS_NAME, description=DESCRIPTION,
bands=BANDS, metadata=METADATA):
"""Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
"""
timef_params = (
{"start": 0., "stop": 1.}
if timef_params is None else
timef_params.copy())
timef_params.update(num=size)
magf_params = magf_params.copy()
magf_params.update(size=size)
errf_params = errf_params.copy()
errf_params.update(size=size)
data = {}
for band in bands:
data[band] = {
"time": timef(**timef_params),
"magnitude": magf(**magf_params),
"error": errf(**errf_params)}
return Data(
id=id, ds_name=ds_name, description=description,
bands=bands, metadata=metadata, data=data) | python | def create_random(magf, magf_params, errf, errf_params,
timef=np.linspace, timef_params=None, size=DEFAULT_SIZE,
id=None, ds_name=DS_NAME, description=DESCRIPTION,
bands=BANDS, metadata=METADATA):
"""Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
"""
timef_params = (
{"start": 0., "stop": 1.}
if timef_params is None else
timef_params.copy())
timef_params.update(num=size)
magf_params = magf_params.copy()
magf_params.update(size=size)
errf_params = errf_params.copy()
errf_params.update(size=size)
data = {}
for band in bands:
data[band] = {
"time": timef(**timef_params),
"magnitude": magf(**magf_params),
"error": errf(**errf_params)}
return Data(
id=id, ds_name=ds_name, description=description,
bands=bands, metadata=metadata, data=data) | [
"def",
"create_random",
"(",
"magf",
",",
"magf_params",
",",
"errf",
",",
"errf_params",
",",
"timef",
"=",
"np",
".",
"linspace",
",",
"timef_params",
"=",
"None",
",",
"size",
"=",
"DEFAULT_SIZE",
",",
"id",
"=",
"None",
",",
"ds_name",
"=",
"DS_NAME",
",",
"description",
"=",
"DESCRIPTION",
",",
"bands",
"=",
"BANDS",
",",
"metadata",
"=",
"METADATA",
")",
":",
"timef_params",
"=",
"(",
"{",
"\"start\"",
":",
"0.",
",",
"\"stop\"",
":",
"1.",
"}",
"if",
"timef_params",
"is",
"None",
"else",
"timef_params",
".",
"copy",
"(",
")",
")",
"timef_params",
".",
"update",
"(",
"num",
"=",
"size",
")",
"magf_params",
"=",
"magf_params",
".",
"copy",
"(",
")",
"magf_params",
".",
"update",
"(",
"size",
"=",
"size",
")",
"errf_params",
"=",
"errf_params",
".",
"copy",
"(",
")",
"errf_params",
".",
"update",
"(",
"size",
"=",
"size",
")",
"data",
"=",
"{",
"}",
"for",
"band",
"in",
"bands",
":",
"data",
"[",
"band",
"]",
"=",
"{",
"\"time\"",
":",
"timef",
"(",
"*",
"*",
"timef_params",
")",
",",
"\"magnitude\"",
":",
"magf",
"(",
"*",
"*",
"magf_params",
")",
",",
"\"error\"",
":",
"errf",
"(",
"*",
"*",
"errf_params",
")",
"}",
"return",
"Data",
"(",
"id",
"=",
"id",
",",
"ds_name",
"=",
"ds_name",
",",
"description",
"=",
"description",
",",
"bands",
"=",
"bands",
",",
"metadata",
"=",
"metadata",
",",
"data",
"=",
"data",
")"
] | Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V')) | [
"Generate",
"a",
"data",
"with",
"any",
"given",
"random",
"function",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/synthetic.py#L63-L135 |
carpyncho/feets | feets/datasets/synthetic.py | create_normal | def create_normal(mu=0., sigma=1., mu_err=0.,
sigma_err=1., seed=None, **kwargs):
"""Generate a data with magnitudes that follows a Gaussian
distribution. Also their errors are gaussian.
Parameters
----------
mu : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma : float (default=1)
Standar deviation of the gaussian distribution of magnitude errors
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = create_normal(0, 1, 0, .0008, seed=42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B
LightCurve(time[10000], magnitude[10000], error[10000])
>>> ds.data.B.time
array([ 0.00000000e+00, 1.00010001e-04, 2.00020002e-04, ...,
9.99799980e-01, 9.99899990e-01, 1.00000000e+00])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.normal, magf_params={"loc": mu, "scale": sigma},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) | python | def create_normal(mu=0., sigma=1., mu_err=0.,
sigma_err=1., seed=None, **kwargs):
"""Generate a data with magnitudes that follows a Gaussian
distribution. Also their errors are gaussian.
Parameters
----------
mu : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma : float (default=1)
Standar deviation of the gaussian distribution of magnitude errors
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = create_normal(0, 1, 0, .0008, seed=42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B
LightCurve(time[10000], magnitude[10000], error[10000])
>>> ds.data.B.time
array([ 0.00000000e+00, 1.00010001e-04, 2.00020002e-04, ...,
9.99799980e-01, 9.99899990e-01, 1.00000000e+00])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.normal, magf_params={"loc": mu, "scale": sigma},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) | [
"def",
"create_normal",
"(",
"mu",
"=",
"0.",
",",
"sigma",
"=",
"1.",
",",
"mu_err",
"=",
"0.",
",",
"sigma_err",
"=",
"1.",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"random",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"return",
"create_random",
"(",
"magf",
"=",
"random",
".",
"normal",
",",
"magf_params",
"=",
"{",
"\"loc\"",
":",
"mu",
",",
"\"scale\"",
":",
"sigma",
"}",
",",
"errf",
"=",
"random",
".",
"normal",
",",
"errf_params",
"=",
"{",
"\"loc\"",
":",
"mu_err",
",",
"\"scale\"",
":",
"sigma_err",
"}",
",",
"*",
"*",
"kwargs",
")"
] | Generate a data with magnitudes that follows a Gaussian
distribution. Also their errors are gaussian.
Parameters
----------
mu : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma : float (default=1)
Standar deviation of the gaussian distribution of magnitude errors
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = create_normal(0, 1, 0, .0008, seed=42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B
LightCurve(time[10000], magnitude[10000], error[10000])
>>> ds.data.B.time
array([ 0.00000000e+00, 1.00010001e-04, 2.00020002e-04, ...,
9.99799980e-01, 9.99899990e-01, 1.00000000e+00]) | [
"Generate",
"a",
"data",
"with",
"magnitudes",
"that",
"follows",
"a",
"Gaussian",
"distribution",
".",
"Also",
"their",
"errors",
"are",
"gaussian",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/synthetic.py#L138-L190 |
carpyncho/feets | feets/datasets/synthetic.py | create_uniform | def create_uniform(low=0., high=1., mu_err=0., sigma_err=1.,
seed=None, **kwargs):
"""Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.uniform, magf_params={"low": low, "high": high},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) | python | def create_uniform(low=0., high=1., mu_err=0., sigma_err=1.,
seed=None, **kwargs):
"""Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.uniform, magf_params={"low": low, "high": high},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) | [
"def",
"create_uniform",
"(",
"low",
"=",
"0.",
",",
"high",
"=",
"1.",
",",
"mu_err",
"=",
"0.",
",",
"sigma_err",
"=",
"1.",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"random",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"return",
"create_random",
"(",
"magf",
"=",
"random",
".",
"uniform",
",",
"magf_params",
"=",
"{",
"\"low\"",
":",
"low",
",",
"\"high\"",
":",
"high",
"}",
",",
"errf",
"=",
"random",
".",
"normal",
",",
"errf_params",
"=",
"{",
"\"loc\"",
":",
"mu_err",
",",
"\"scale\"",
":",
"sigma_err",
"}",
",",
"*",
"*",
"kwargs",
")"
] | Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ]) | [
"Generate",
"a",
"data",
"with",
"magnitudes",
"that",
"follows",
"a",
"uniform",
"distribution",
";",
"the",
"error",
"instead",
"are",
"gaussian",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/synthetic.py#L193-L244 |
carpyncho/feets | feets/datasets/synthetic.py | create_periodic | def create_periodic(mu_err=0., sigma_err=1., seed=None, **kwargs):
"""Generate a data with magnitudes with periodic variability
distribution; the error instead are gaussian.
Parameters
----------
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_periodic(bands=["Ks"])
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('Ks',))
>>> ds.data.Ks.magnitude
array([ 0.95428053, 0.73022685, 0.03005121, ..., -0.26305297,
2.57880082, 1.03376863])
"""
random = np.random.RandomState(seed)
size = kwargs.get("size", DEFAULT_SIZE)
times, mags, errors = [], [], []
for b in kwargs.get("bands", BANDS):
time = 100 * random.rand(size)
error = random.normal(size=size, loc=mu_err, scale=sigma_err)
mag = np.sin(2 * np.pi * time) + error * random.randn(size)
times.append(time)
errors.append(error)
mags.append(mag)
times, mags, errors = iter(times), iter(mags), iter(errors)
return create_random(
magf=lambda **k: next(mags), magf_params={},
errf=lambda **k: next(errors), errf_params={},
timef=lambda **k: next(times), timef_params={}, **kwargs) | python | def create_periodic(mu_err=0., sigma_err=1., seed=None, **kwargs):
"""Generate a data with magnitudes with periodic variability
distribution; the error instead are gaussian.
Parameters
----------
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_periodic(bands=["Ks"])
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('Ks',))
>>> ds.data.Ks.magnitude
array([ 0.95428053, 0.73022685, 0.03005121, ..., -0.26305297,
2.57880082, 1.03376863])
"""
random = np.random.RandomState(seed)
size = kwargs.get("size", DEFAULT_SIZE)
times, mags, errors = [], [], []
for b in kwargs.get("bands", BANDS):
time = 100 * random.rand(size)
error = random.normal(size=size, loc=mu_err, scale=sigma_err)
mag = np.sin(2 * np.pi * time) + error * random.randn(size)
times.append(time)
errors.append(error)
mags.append(mag)
times, mags, errors = iter(times), iter(mags), iter(errors)
return create_random(
magf=lambda **k: next(mags), magf_params={},
errf=lambda **k: next(errors), errf_params={},
timef=lambda **k: next(times), timef_params={}, **kwargs) | [
"def",
"create_periodic",
"(",
"mu_err",
"=",
"0.",
",",
"sigma_err",
"=",
"1.",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"random",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"size",
"=",
"kwargs",
".",
"get",
"(",
"\"size\"",
",",
"DEFAULT_SIZE",
")",
"times",
",",
"mags",
",",
"errors",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"b",
"in",
"kwargs",
".",
"get",
"(",
"\"bands\"",
",",
"BANDS",
")",
":",
"time",
"=",
"100",
"*",
"random",
".",
"rand",
"(",
"size",
")",
"error",
"=",
"random",
".",
"normal",
"(",
"size",
"=",
"size",
",",
"loc",
"=",
"mu_err",
",",
"scale",
"=",
"sigma_err",
")",
"mag",
"=",
"np",
".",
"sin",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"time",
")",
"+",
"error",
"*",
"random",
".",
"randn",
"(",
"size",
")",
"times",
".",
"append",
"(",
"time",
")",
"errors",
".",
"append",
"(",
"error",
")",
"mags",
".",
"append",
"(",
"mag",
")",
"times",
",",
"mags",
",",
"errors",
"=",
"iter",
"(",
"times",
")",
",",
"iter",
"(",
"mags",
")",
",",
"iter",
"(",
"errors",
")",
"return",
"create_random",
"(",
"magf",
"=",
"lambda",
"*",
"*",
"k",
":",
"next",
"(",
"mags",
")",
",",
"magf_params",
"=",
"{",
"}",
",",
"errf",
"=",
"lambda",
"*",
"*",
"k",
":",
"next",
"(",
"errors",
")",
",",
"errf_params",
"=",
"{",
"}",
",",
"timef",
"=",
"lambda",
"*",
"*",
"k",
":",
"next",
"(",
"times",
")",
",",
"timef_params",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")"
] | Generate a data with magnitudes with periodic variability
distribution; the error instead are gaussian.
Parameters
----------
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_periodic(bands=["Ks"])
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('Ks',))
>>> ds.data.Ks.magnitude
array([ 0.95428053, 0.73022685, 0.03005121, ..., -0.26305297,
2.57880082, 1.03376863]) | [
"Generate",
"a",
"data",
"with",
"magnitudes",
"with",
"periodic",
"variability",
"distribution",
";",
"the",
"error",
"instead",
"are",
"gaussian",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/synthetic.py#L247-L305 |
carpyncho/feets | feets/libs/ls_fap.py | pdf_single | def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
pdf : np.ndarray
The expected probability density function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == 'model':
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == 'log':
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization)) | python | def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
pdf : np.ndarray
The expected probability density function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == 'model':
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == 'log':
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization)) | [
"def",
"pdf_single",
"(",
"z",
",",
"N",
",",
"normalization",
",",
"dH",
"=",
"1",
",",
"dK",
"=",
"3",
")",
":",
"if",
"dK",
"-",
"dH",
"!=",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"Degrees of freedom != 2\"",
")",
"Nk",
"=",
"N",
"-",
"dK",
"if",
"normalization",
"==",
"'psd'",
":",
"return",
"np",
".",
"exp",
"(",
"-",
"z",
")",
"elif",
"normalization",
"==",
"'standard'",
":",
"return",
"0.5",
"*",
"Nk",
"*",
"(",
"1",
"-",
"z",
")",
"**",
"(",
"0.5",
"*",
"Nk",
"-",
"1",
")",
"elif",
"normalization",
"==",
"'model'",
":",
"return",
"0.5",
"*",
"Nk",
"*",
"(",
"1",
"+",
"z",
")",
"**",
"(",
"-",
"0.5",
"*",
"Nk",
"-",
"1",
")",
"elif",
"normalization",
"==",
"'log'",
":",
"return",
"0.5",
"*",
"Nk",
"*",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"Nk",
"*",
"z",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"normalization='{0}' is not recognized\"",
"\"\"",
".",
"format",
"(",
"normalization",
")",
")"
] | Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
pdf : np.ndarray
The expected probability density function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008) | [
"Probability",
"density",
"function",
"for",
"Lomb",
"-",
"Scargle",
"periodogram"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L31-L78 |
carpyncho/feets | feets/libs/ls_fap.py | cdf_single | def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK) | python | def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK) | [
"def",
"cdf_single",
"(",
"z",
",",
"N",
",",
"normalization",
",",
"dH",
"=",
"1",
",",
"dK",
"=",
"3",
")",
":",
"return",
"1",
"-",
"fap_single",
"(",
"z",
",",
"N",
",",
"normalization",
"=",
"normalization",
",",
"dH",
"=",
"dH",
",",
"dK",
"=",
"dK",
")"
] | Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
the periodogram value
N : int
the number of data points from which the periodogram was computed
normalization : string
The periodogram normalization. Must be one of
['standard', 'model', 'log', 'psd']
dH, dK : integers (optional)
The number of parameters in the null hypothesis and the model
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008) | [
"Cumulative",
"distribution",
"for",
"the",
"Lomb",
"-",
"Scargle",
"periodogram"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L132-L166 |
carpyncho/feets | feets/libs/ls_fap.py | tau_davies | def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt)
W = fmax * Teff
if normalization == 'psd':
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == 'standard':
# 'standard' normalization is Z = 2/NH * z_1
return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1))
* np.sqrt(0.5 * NH * Z))
elif normalization == 'model':
# 'model' normalization is Z = 2/NK * z_2
return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK)
* np.sqrt(0.5 * NK * Z))
elif normalization == 'log':
# 'log' normalization is Z = 2/NK * z_3
return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z)))
else:
raise NotImplementedError("normalization={0}".format(normalization)) | python | def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt)
W = fmax * Teff
if normalization == 'psd':
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == 'standard':
# 'standard' normalization is Z = 2/NH * z_1
return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1))
* np.sqrt(0.5 * NH * Z))
elif normalization == 'model':
# 'model' normalization is Z = 2/NK * z_2
return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK)
* np.sqrt(0.5 * NK * Z))
elif normalization == 'log':
# 'log' normalization is Z = 2/NK * z_3
return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z)))
else:
raise NotImplementedError("normalization={0}".format(normalization)) | [
"def",
"tau_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"'standard'",
",",
"dH",
"=",
"1",
",",
"dK",
"=",
"3",
")",
":",
"N",
"=",
"len",
"(",
"t",
")",
"NH",
"=",
"N",
"-",
"dH",
"# DOF for null hypothesis",
"NK",
"=",
"N",
"-",
"dK",
"# DOF for periodic hypothesis",
"Dt",
"=",
"_weighted_var",
"(",
"t",
",",
"dy",
")",
"Teff",
"=",
"np",
".",
"sqrt",
"(",
"4",
"*",
"np",
".",
"pi",
"*",
"Dt",
")",
"W",
"=",
"fmax",
"*",
"Teff",
"if",
"normalization",
"==",
"'psd'",
":",
"# 'psd' normalization is same as Baluev's z",
"return",
"W",
"*",
"np",
".",
"exp",
"(",
"-",
"Z",
")",
"*",
"np",
".",
"sqrt",
"(",
"Z",
")",
"elif",
"normalization",
"==",
"'standard'",
":",
"# 'standard' normalization is Z = 2/NH * z_1",
"return",
"(",
"_gamma",
"(",
"NH",
")",
"*",
"W",
"*",
"(",
"1",
"-",
"Z",
")",
"**",
"(",
"0.5",
"*",
"(",
"NK",
"-",
"1",
")",
")",
"*",
"np",
".",
"sqrt",
"(",
"0.5",
"*",
"NH",
"*",
"Z",
")",
")",
"elif",
"normalization",
"==",
"'model'",
":",
"# 'model' normalization is Z = 2/NK * z_2",
"return",
"(",
"_gamma",
"(",
"NK",
")",
"*",
"W",
"*",
"(",
"1",
"+",
"Z",
")",
"**",
"(",
"-",
"0.5",
"*",
"NK",
")",
"*",
"np",
".",
"sqrt",
"(",
"0.5",
"*",
"NK",
"*",
"Z",
")",
")",
"elif",
"normalization",
"==",
"'log'",
":",
"# 'log' normalization is Z = 2/NK * z_3",
"return",
"(",
"_gamma",
"(",
"NK",
")",
"*",
"W",
"*",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"Z",
"*",
"(",
"NK",
"-",
"0.5",
")",
")",
"*",
"np",
".",
"sqrt",
"(",
"NK",
"*",
"np",
".",
"sinh",
"(",
"0.5",
"*",
"Z",
")",
")",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"normalization={0}\"",
".",
"format",
"(",
"normalization",
")",
")"
] | tau factor for estimating Davies bound (Baluev 2008, Table 1) | [
"tau",
"factor",
"for",
"estimating",
"Davies",
"bound",
"(",
"Baluev",
"2008",
"Table",
"1",
")"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L169-L193 |
carpyncho/feets | feets/libs/ls_fap.py | fap_simple | def fap_simple(Z, fmax, t, y, dy, normalization='standard'):
"""False Alarm Probability based on estimated number of indep frequencies
"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
p_s = cdf_single(Z, N, normalization=normalization)
return 1 - p_s ** N_eff | python | def fap_simple(Z, fmax, t, y, dy, normalization='standard'):
"""False Alarm Probability based on estimated number of indep frequencies
"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
p_s = cdf_single(Z, N, normalization=normalization)
return 1 - p_s ** N_eff | [
"def",
"fap_simple",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"'standard'",
")",
":",
"N",
"=",
"len",
"(",
"t",
")",
"T",
"=",
"max",
"(",
"t",
")",
"-",
"min",
"(",
"t",
")",
"N_eff",
"=",
"fmax",
"*",
"T",
"p_s",
"=",
"cdf_single",
"(",
"Z",
",",
"N",
",",
"normalization",
"=",
"normalization",
")",
"return",
"1",
"-",
"p_s",
"**",
"N_eff"
] | False Alarm Probability based on estimated number of indep frequencies | [
"False",
"Alarm",
"Probability",
"based",
"on",
"estimated",
"number",
"of",
"indep",
"frequencies"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L196-L204 |
carpyncho/feets | feets/libs/ls_fap.py | fap_davies | def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau | python | def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau | [
"def",
"fap_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"'standard'",
")",
":",
"N",
"=",
"len",
"(",
"t",
")",
"fap_s",
"=",
"fap_single",
"(",
"Z",
",",
"N",
",",
"normalization",
"=",
"normalization",
")",
"tau",
"=",
"tau_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"normalization",
")",
"return",
"fap_s",
"+",
"tau"
] | Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008) | [
"Davies",
"upper",
"-",
"bound",
"to",
"the",
"false",
"alarm",
"probability"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L207-L215 |
carpyncho/feets | feets/libs/ls_fap.py | fap_baluev | def fap_baluev(Z, fmax, t, y, dy, normalization='standard'):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
cdf = cdf_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return 1 - cdf * np.exp(-tau) | python | def fap_baluev(Z, fmax, t, y, dy, normalization='standard'):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
cdf = cdf_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return 1 - cdf * np.exp(-tau) | [
"def",
"fap_baluev",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"'standard'",
")",
":",
"cdf",
"=",
"cdf_single",
"(",
"Z",
",",
"len",
"(",
"t",
")",
",",
"normalization",
")",
"tau",
"=",
"tau_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"normalization",
")",
"return",
"1",
"-",
"cdf",
"*",
"np",
".",
"exp",
"(",
"-",
"tau",
")"
] | Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008) | [
"Alias",
"-",
"free",
"approximation",
"to",
"false",
"alarm",
"probability"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L218-L225 |
carpyncho/feets | feets/libs/ls_fap.py | false_alarm_probability | def false_alarm_probability(Z, fmax, t, y, dy, normalization,
method='baluev', method_kwds=None):
"""Approximate the False Alarm Probability
Parameters
----------
TODO
Returns
-------
TODO
"""
if method not in METHODS:
raise ValueError("Unrecognized method: {0}".format(method))
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds) | python | def false_alarm_probability(Z, fmax, t, y, dy, normalization,
method='baluev', method_kwds=None):
"""Approximate the False Alarm Probability
Parameters
----------
TODO
Returns
-------
TODO
"""
if method not in METHODS:
raise ValueError("Unrecognized method: {0}".format(method))
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds) | [
"def",
"false_alarm_probability",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
",",
"method",
"=",
"'baluev'",
",",
"method_kwds",
"=",
"None",
")",
":",
"if",
"method",
"not",
"in",
"METHODS",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized method: {0}\"",
".",
"format",
"(",
"method",
")",
")",
"method",
"=",
"METHODS",
"[",
"method",
"]",
"method_kwds",
"=",
"method_kwds",
"or",
"{",
"}",
"return",
"method",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
",",
"*",
"*",
"method_kwds",
")"
] | Approximate the False Alarm Probability
Parameters
----------
TODO
Returns
-------
TODO | [
"Approximate",
"the",
"False",
"Alarm",
"Probability"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L250-L267 |
carpyncho/feets | feets/datasets/macho.py | load_MACHO | def load_MACHO(macho_id):
"""lightcurve of 2 bands (R, B) from the MACHO survey.
Notes
-----
The files are gathered from the original FATS project tutorial:
https://github.com/isadoranun/tsfeat
"""
tarfname = "{}.tar.bz2".format(macho_id)
tarpath = os.path.join(DATA_PATH, tarfname)
rpath = "{}.R.mjd".format(macho_id)
bpath = "{}.B.mjd".format(macho_id)
with tarfile.open(tarpath, mode="r:bz2") as tf:
rlc = np.loadtxt(tf.extractfile(rpath))
blc = np.loadtxt(tf.extractfile(bpath))
bands = ("R", "B")
data = {
"R": {
"time": rlc[:, 0],
"magnitude": rlc[:, 1],
"error": rlc[:, 2]},
"B": {
"time": blc[:, 0],
"magnitude": blc[:, 1],
"error": blc[:, 2]}
}
descr = ("The files are gathered from the original FATS project "
"tutorial: https://github.com/isadoranun/tsfeat")
return Data(
id=macho_id, metadata=None, ds_name="MACHO",
description=descr, bands=bands, data=data) | python | def load_MACHO(macho_id):
"""lightcurve of 2 bands (R, B) from the MACHO survey.
Notes
-----
The files are gathered from the original FATS project tutorial:
https://github.com/isadoranun/tsfeat
"""
tarfname = "{}.tar.bz2".format(macho_id)
tarpath = os.path.join(DATA_PATH, tarfname)
rpath = "{}.R.mjd".format(macho_id)
bpath = "{}.B.mjd".format(macho_id)
with tarfile.open(tarpath, mode="r:bz2") as tf:
rlc = np.loadtxt(tf.extractfile(rpath))
blc = np.loadtxt(tf.extractfile(bpath))
bands = ("R", "B")
data = {
"R": {
"time": rlc[:, 0],
"magnitude": rlc[:, 1],
"error": rlc[:, 2]},
"B": {
"time": blc[:, 0],
"magnitude": blc[:, 1],
"error": blc[:, 2]}
}
descr = ("The files are gathered from the original FATS project "
"tutorial: https://github.com/isadoranun/tsfeat")
return Data(
id=macho_id, metadata=None, ds_name="MACHO",
description=descr, bands=bands, data=data) | [
"def",
"load_MACHO",
"(",
"macho_id",
")",
":",
"tarfname",
"=",
"\"{}.tar.bz2\"",
".",
"format",
"(",
"macho_id",
")",
"tarpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_PATH",
",",
"tarfname",
")",
"rpath",
"=",
"\"{}.R.mjd\"",
".",
"format",
"(",
"macho_id",
")",
"bpath",
"=",
"\"{}.B.mjd\"",
".",
"format",
"(",
"macho_id",
")",
"with",
"tarfile",
".",
"open",
"(",
"tarpath",
",",
"mode",
"=",
"\"r:bz2\"",
")",
"as",
"tf",
":",
"rlc",
"=",
"np",
".",
"loadtxt",
"(",
"tf",
".",
"extractfile",
"(",
"rpath",
")",
")",
"blc",
"=",
"np",
".",
"loadtxt",
"(",
"tf",
".",
"extractfile",
"(",
"bpath",
")",
")",
"bands",
"=",
"(",
"\"R\"",
",",
"\"B\"",
")",
"data",
"=",
"{",
"\"R\"",
":",
"{",
"\"time\"",
":",
"rlc",
"[",
":",
",",
"0",
"]",
",",
"\"magnitude\"",
":",
"rlc",
"[",
":",
",",
"1",
"]",
",",
"\"error\"",
":",
"rlc",
"[",
":",
",",
"2",
"]",
"}",
",",
"\"B\"",
":",
"{",
"\"time\"",
":",
"blc",
"[",
":",
",",
"0",
"]",
",",
"\"magnitude\"",
":",
"blc",
"[",
":",
",",
"1",
"]",
",",
"\"error\"",
":",
"blc",
"[",
":",
",",
"2",
"]",
"}",
"}",
"descr",
"=",
"(",
"\"The files are gathered from the original FATS project \"",
"\"tutorial: https://github.com/isadoranun/tsfeat\"",
")",
"return",
"Data",
"(",
"id",
"=",
"macho_id",
",",
"metadata",
"=",
"None",
",",
"ds_name",
"=",
"\"MACHO\"",
",",
"description",
"=",
"descr",
",",
"bands",
"=",
"bands",
",",
"data",
"=",
"data",
")"
] | lightcurve of 2 bands (R, B) from the MACHO survey.
Notes
-----
The files are gathered from the original FATS project tutorial:
https://github.com/isadoranun/tsfeat | [
"lightcurve",
"of",
"2",
"bands",
"(",
"R",
"B",
")",
"from",
"the",
"MACHO",
"survey",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/datasets/macho.py#L84-L119 |
carpyncho/feets | doc/source/JSAnimation/IPython_display.py | anim_to_html | def anim_to_html(anim, fps=None, embed_frames=True, default_mode='loop'):
"""Generate HTML representation of the animation"""
if fps is None and hasattr(anim, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / anim._interval
plt.close(anim._fig)
if hasattr(anim, "_html_representation"):
return anim._html_representation
else:
# tempfile can't be used here: we need a filename, and this
# fails on windows. Instead, we use a custom filename generator
#with tempfile.NamedTemporaryFile(suffix='.html') as f:
with _NameOnlyTemporaryFile(suffix='.html') as f:
anim.save(f.name, writer=HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode))
html = open(f.name).read()
anim._html_representation = html
return html | python | def anim_to_html(anim, fps=None, embed_frames=True, default_mode='loop'):
"""Generate HTML representation of the animation"""
if fps is None and hasattr(anim, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / anim._interval
plt.close(anim._fig)
if hasattr(anim, "_html_representation"):
return anim._html_representation
else:
# tempfile can't be used here: we need a filename, and this
# fails on windows. Instead, we use a custom filename generator
#with tempfile.NamedTemporaryFile(suffix='.html') as f:
with _NameOnlyTemporaryFile(suffix='.html') as f:
anim.save(f.name, writer=HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode))
html = open(f.name).read()
anim._html_representation = html
return html | [
"def",
"anim_to_html",
"(",
"anim",
",",
"fps",
"=",
"None",
",",
"embed_frames",
"=",
"True",
",",
"default_mode",
"=",
"'loop'",
")",
":",
"if",
"fps",
"is",
"None",
"and",
"hasattr",
"(",
"anim",
",",
"'_interval'",
")",
":",
"# Convert interval in ms to frames per second",
"fps",
"=",
"1000.",
"/",
"anim",
".",
"_interval",
"plt",
".",
"close",
"(",
"anim",
".",
"_fig",
")",
"if",
"hasattr",
"(",
"anim",
",",
"\"_html_representation\"",
")",
":",
"return",
"anim",
".",
"_html_representation",
"else",
":",
"# tempfile can't be used here: we need a filename, and this",
"# fails on windows. Instead, we use a custom filename generator",
"#with tempfile.NamedTemporaryFile(suffix='.html') as f:",
"with",
"_NameOnlyTemporaryFile",
"(",
"suffix",
"=",
"'.html'",
")",
"as",
"f",
":",
"anim",
".",
"save",
"(",
"f",
".",
"name",
",",
"writer",
"=",
"HTMLWriter",
"(",
"fps",
"=",
"fps",
",",
"embed_frames",
"=",
"embed_frames",
",",
"default_mode",
"=",
"default_mode",
")",
")",
"html",
"=",
"open",
"(",
"f",
".",
"name",
")",
".",
"read",
"(",
")",
"anim",
".",
"_html_representation",
"=",
"html",
"return",
"html"
] | Generate HTML representation of the animation | [
"Generate",
"HTML",
"representation",
"of",
"the",
"animation"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/IPython_display.py#L60-L80 |
carpyncho/feets | doc/source/JSAnimation/IPython_display.py | display_animation | def display_animation(anim, **kwargs):
"""Display the animation with an IPython HTML object"""
from IPython.display import HTML
return HTML(anim_to_html(anim, **kwargs)) | python | def display_animation(anim, **kwargs):
"""Display the animation with an IPython HTML object"""
from IPython.display import HTML
return HTML(anim_to_html(anim, **kwargs)) | [
"def",
"display_animation",
"(",
"anim",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"IPython",
".",
"display",
"import",
"HTML",
"return",
"HTML",
"(",
"anim_to_html",
"(",
"anim",
",",
"*",
"*",
"kwargs",
")",
")"
] | Display the animation with an IPython HTML object | [
"Display",
"the",
"animation",
"with",
"an",
"IPython",
"HTML",
"object"
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/doc/source/JSAnimation/IPython_display.py#L83-L86 |
carpyncho/feets | feets/utils.py | indent | def indent(s, c=" ", n=4):
"""Indent the string 's' with the character 'c', 'n' times.
Parameters
----------
s : str
String to indent
c : str, default space
String to use as indentation
n : int, default 4
Number of chars to indent
"""
indentation = c * n
return "\n".join([indentation + l for l in s.splitlines()]) | python | def indent(s, c=" ", n=4):
"""Indent the string 's' with the character 'c', 'n' times.
Parameters
----------
s : str
String to indent
c : str, default space
String to use as indentation
n : int, default 4
Number of chars to indent
"""
indentation = c * n
return "\n".join([indentation + l for l in s.splitlines()]) | [
"def",
"indent",
"(",
"s",
",",
"c",
"=",
"\" \"",
",",
"n",
"=",
"4",
")",
":",
"indentation",
"=",
"c",
"*",
"n",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"indentation",
"+",
"l",
"for",
"l",
"in",
"s",
".",
"splitlines",
"(",
")",
"]",
")"
] | Indent the string 's' with the character 'c', 'n' times.
Parameters
----------
s : str
String to indent
c : str, default space
String to use as indentation
n : int, default 4
Number of chars to indent | [
"Indent",
"the",
"string",
"s",
"with",
"the",
"character",
"c",
"n",
"times",
"."
] | train | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/utils.py#L37-L52 |
andersinno/hayes | hayes/ext/date_tail.py | generate_date_tail_boost_queries | def generate_date_tail_boost_queries(
field, timedeltas_and_boosts, relative_to=None):
"""
Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries
"""
relative_to = relative_to or datetime.datetime.now()
times = {}
for timedelta, boost in timedeltas_and_boosts.items():
date = (relative_to - timedelta).date()
times[date] = boost
times = sorted(times.items(), key=lambda i: i[0])
queries = []
for (x, time) in enumerate(times):
kwargs = {"field": field, "boost": time[1]}
if x == 0:
kwargs["lte"] = time[0]
else:
kwargs["gt"] = time[0]
if x < len(times) - 1:
kwargs["lte"] = times[x + 1][0]
if kwargs["boost"] > 0:
q = RangeQuery()
q.add_range(**kwargs)
queries.append(q)
return queries | python | def generate_date_tail_boost_queries(
field, timedeltas_and_boosts, relative_to=None):
"""
Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries
"""
relative_to = relative_to or datetime.datetime.now()
times = {}
for timedelta, boost in timedeltas_and_boosts.items():
date = (relative_to - timedelta).date()
times[date] = boost
times = sorted(times.items(), key=lambda i: i[0])
queries = []
for (x, time) in enumerate(times):
kwargs = {"field": field, "boost": time[1]}
if x == 0:
kwargs["lte"] = time[0]
else:
kwargs["gt"] = time[0]
if x < len(times) - 1:
kwargs["lte"] = times[x + 1][0]
if kwargs["boost"] > 0:
q = RangeQuery()
q.add_range(**kwargs)
queries.append(q)
return queries | [
"def",
"generate_date_tail_boost_queries",
"(",
"field",
",",
"timedeltas_and_boosts",
",",
"relative_to",
"=",
"None",
")",
":",
"relative_to",
"=",
"relative_to",
"or",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"times",
"=",
"{",
"}",
"for",
"timedelta",
",",
"boost",
"in",
"timedeltas_and_boosts",
".",
"items",
"(",
")",
":",
"date",
"=",
"(",
"relative_to",
"-",
"timedelta",
")",
".",
"date",
"(",
")",
"times",
"[",
"date",
"]",
"=",
"boost",
"times",
"=",
"sorted",
"(",
"times",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"i",
"[",
"0",
"]",
")",
"queries",
"=",
"[",
"]",
"for",
"(",
"x",
",",
"time",
")",
"in",
"enumerate",
"(",
"times",
")",
":",
"kwargs",
"=",
"{",
"\"field\"",
":",
"field",
",",
"\"boost\"",
":",
"time",
"[",
"1",
"]",
"}",
"if",
"x",
"==",
"0",
":",
"kwargs",
"[",
"\"lte\"",
"]",
"=",
"time",
"[",
"0",
"]",
"else",
":",
"kwargs",
"[",
"\"gt\"",
"]",
"=",
"time",
"[",
"0",
"]",
"if",
"x",
"<",
"len",
"(",
"times",
")",
"-",
"1",
":",
"kwargs",
"[",
"\"lte\"",
"]",
"=",
"times",
"[",
"x",
"+",
"1",
"]",
"[",
"0",
"]",
"if",
"kwargs",
"[",
"\"boost\"",
"]",
">",
"0",
":",
"q",
"=",
"RangeQuery",
"(",
")",
"q",
".",
"add_range",
"(",
"*",
"*",
"kwargs",
")",
"queries",
".",
"append",
"(",
"q",
")",
"return",
"queries"
] | Generate a list of RangeQueries usable to boost the scores of more
recent documents.
Example:
```
queries = generate_date_tail_boost_queries("publish_date", {
timedelta(days=90): 1,
timedelta(days=30): 2,
timedelta(days=10): 4,
})
s = Search(BoolQuery(must=..., should=queries))
# ...
```
Refs:
http://elasticsearch-users.115913.n3.nabble.com/Boost-recent-documents-td2126107.html#a2126317
:param field: field name to generate the queries against
:param timedeltas_and_boosts:
dictionary of timedelta instances and their boosts. Negative or
zero boost values will not generate rangequeries.
:type timedeltas_and_boosts: dict[timedelta, float]
:param relative_to: Relative to this datetime (may be None for "now")
:return: List of RangeQueries | [
"Generate",
"a",
"list",
"of",
"RangeQueries",
"usable",
"to",
"boost",
"the",
"scores",
"of",
"more",
"recent",
"documents",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/ext/date_tail.py#L7-L58 |
andersinno/hayes | hayes/search/queries.py | _clean_dict | def _clean_dict(in_dict):
"""
Recursively remove None-valued items from dict.
:param in_dict:
:return:
"""
out = {}
for key, value in iteritems(in_dict):
if isinstance(value, dict):
value = _clean_dict(value)
if value is None:
continue
out[key] = value
return out | python | def _clean_dict(in_dict):
"""
Recursively remove None-valued items from dict.
:param in_dict:
:return:
"""
out = {}
for key, value in iteritems(in_dict):
if isinstance(value, dict):
value = _clean_dict(value)
if value is None:
continue
out[key] = value
return out | [
"def",
"_clean_dict",
"(",
"in_dict",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"in_dict",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"_clean_dict",
"(",
"value",
")",
"if",
"value",
"is",
"None",
":",
"continue",
"out",
"[",
"key",
"]",
"=",
"value",
"return",
"out"
] | Recursively remove None-valued items from dict.
:param in_dict:
:return: | [
"Recursively",
"remove",
"None",
"-",
"valued",
"items",
"from",
"dict",
".",
":",
"param",
"in_dict",
":",
":",
"return",
":"
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/search/queries.py#L9-L22 |
andersinno/hayes | hayes/utils.py | batch_iterable | def batch_iterable(iterable, count):
"""
Yield batches of `count` items from the given iterable.
>>> for x in batch([1, 2, 3, 4, 5, 6, 7], 3):
>>> print(x)
[1, 2, 3]
[4, 5, 6]
[7]
:param iterable: An iterable
:type iterable: Iterable
:param count: Number of items per batch. If <= 0, nothing is yielded.
:type count: int
:return: Iterable of lists of items
:rtype: Iterable[list[object]]
"""
if count <= 0:
return
current_batch = []
for item in iterable:
if len(current_batch) == count:
yield current_batch
current_batch = []
current_batch.append(item)
if current_batch:
yield current_batch | python | def batch_iterable(iterable, count):
"""
Yield batches of `count` items from the given iterable.
>>> for x in batch([1, 2, 3, 4, 5, 6, 7], 3):
>>> print(x)
[1, 2, 3]
[4, 5, 6]
[7]
:param iterable: An iterable
:type iterable: Iterable
:param count: Number of items per batch. If <= 0, nothing is yielded.
:type count: int
:return: Iterable of lists of items
:rtype: Iterable[list[object]]
"""
if count <= 0:
return
current_batch = []
for item in iterable:
if len(current_batch) == count:
yield current_batch
current_batch = []
current_batch.append(item)
if current_batch:
yield current_batch | [
"def",
"batch_iterable",
"(",
"iterable",
",",
"count",
")",
":",
"if",
"count",
"<=",
"0",
":",
"return",
"current_batch",
"=",
"[",
"]",
"for",
"item",
"in",
"iterable",
":",
"if",
"len",
"(",
"current_batch",
")",
"==",
"count",
":",
"yield",
"current_batch",
"current_batch",
"=",
"[",
"]",
"current_batch",
".",
"append",
"(",
"item",
")",
"if",
"current_batch",
":",
"yield",
"current_batch"
] | Yield batches of `count` items from the given iterable.
>>> for x in batch([1, 2, 3, 4, 5, 6, 7], 3):
>>> print(x)
[1, 2, 3]
[4, 5, 6]
[7]
:param iterable: An iterable
:type iterable: Iterable
:param count: Number of items per batch. If <= 0, nothing is yielded.
:type count: int
:return: Iterable of lists of items
:rtype: Iterable[list[object]] | [
"Yield",
"batches",
"of",
"count",
"items",
"from",
"the",
"given",
"iterable",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/utils.py#L31-L57 |
andersinno/hayes | hayes/models.py | DjangoResultSet.get_objects | def get_objects(self, queryset=None):
"""
Return an iterator of Django model objects in Elasticsearch order,
optionally using the given Django queryset. If no queryset is
given, a default queryset (Model.objects.all) is used.
:param queryset: Optional queryset to filter in.
:return:
"""
if not self:
return
if not queryset:
queryset = self[0].django_model.objects.all()
pks = [res.pk for res in self if res.django_model == queryset.model]
object_map = dict((text_type(obj.pk), obj)
for obj in queryset.filter(pk__in=pks))
result_map = dict((res.pk, res)
for res in self if res.pk in object_map)
for pk in pks:
obj = object_map.get(pk)
if obj:
obj._es = result_map.get(pk)
try:
obj._score = obj._es._meta.score
except AttributeError:
obj._score = None
yield obj | python | def get_objects(self, queryset=None):
"""
Return an iterator of Django model objects in Elasticsearch order,
optionally using the given Django queryset. If no queryset is
given, a default queryset (Model.objects.all) is used.
:param queryset: Optional queryset to filter in.
:return:
"""
if not self:
return
if not queryset:
queryset = self[0].django_model.objects.all()
pks = [res.pk for res in self if res.django_model == queryset.model]
object_map = dict((text_type(obj.pk), obj)
for obj in queryset.filter(pk__in=pks))
result_map = dict((res.pk, res)
for res in self if res.pk in object_map)
for pk in pks:
obj = object_map.get(pk)
if obj:
obj._es = result_map.get(pk)
try:
obj._score = obj._es._meta.score
except AttributeError:
obj._score = None
yield obj | [
"def",
"get_objects",
"(",
"self",
",",
"queryset",
"=",
"None",
")",
":",
"if",
"not",
"self",
":",
"return",
"if",
"not",
"queryset",
":",
"queryset",
"=",
"self",
"[",
"0",
"]",
".",
"django_model",
".",
"objects",
".",
"all",
"(",
")",
"pks",
"=",
"[",
"res",
".",
"pk",
"for",
"res",
"in",
"self",
"if",
"res",
".",
"django_model",
"==",
"queryset",
".",
"model",
"]",
"object_map",
"=",
"dict",
"(",
"(",
"text_type",
"(",
"obj",
".",
"pk",
")",
",",
"obj",
")",
"for",
"obj",
"in",
"queryset",
".",
"filter",
"(",
"pk__in",
"=",
"pks",
")",
")",
"result_map",
"=",
"dict",
"(",
"(",
"res",
".",
"pk",
",",
"res",
")",
"for",
"res",
"in",
"self",
"if",
"res",
".",
"pk",
"in",
"object_map",
")",
"for",
"pk",
"in",
"pks",
":",
"obj",
"=",
"object_map",
".",
"get",
"(",
"pk",
")",
"if",
"obj",
":",
"obj",
".",
"_es",
"=",
"result_map",
".",
"get",
"(",
"pk",
")",
"try",
":",
"obj",
".",
"_score",
"=",
"obj",
".",
"_es",
".",
"_meta",
".",
"score",
"except",
"AttributeError",
":",
"obj",
".",
"_score",
"=",
"None",
"yield",
"obj"
] | Return an iterator of Django model objects in Elasticsearch order,
optionally using the given Django queryset. If no queryset is
given, a default queryset (Model.objects.all) is used.
:param queryset: Optional queryset to filter in.
:return: | [
"Return",
"an",
"iterator",
"of",
"Django",
"model",
"objects",
"in",
"Elasticsearch",
"order",
"optionally",
"using",
"the",
"given",
"Django",
"queryset",
".",
"If",
"no",
"queryset",
"is",
"given",
"a",
"default",
"queryset",
"(",
"Model",
".",
"objects",
".",
"all",
")",
"is",
"used",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/models.py#L23-L51 |
andersinno/hayes | hayes/ext/word_gatherer.py | WordGatherer.reset | def reset(self):
""" Reset target collection (rebuild index).
"""
self.connection.rebuild_index(
self.index, coll_name=self.target_coll_name) | python | def reset(self):
""" Reset target collection (rebuild index).
"""
self.connection.rebuild_index(
self.index, coll_name=self.target_coll_name) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"connection",
".",
"rebuild_index",
"(",
"self",
".",
"index",
",",
"coll_name",
"=",
"self",
".",
"target_coll_name",
")"
] | Reset target collection (rebuild index). | [
"Reset",
"target",
"collection",
"(",
"rebuild",
"index",
")",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/ext/word_gatherer.py#L63-L67 |
andersinno/hayes | hayes/ext/word_gatherer.py | WordGatherer.update | def update(self, index, fields, tokenizer=default_tokenizer, cutoff=1):
"""
Update (upsert) the wordgatherer collection.
:param index: Source index.
:param fields: Fields to read.
:param tokenizer: Tokenizer callable. Should split unicode to words
:param cutoff: Ignore words with less than this many occurrences.
"""
counts_by_uid = defaultdict(Counter)
for word, count in self._gather_words(
index, fields, tokenizer=tokenizer).items():
uid = hashlib.sha1(unicodedata.normalize(
"NFKD", word.lower()).encode("UTF-8")).hexdigest()
counts_by_uid[uid][word] += count
for uid, word_to_count in counts_by_uid.items():
word = word_to_count.most_common(1)[0][0]
count = sum(word_to_count.values())
if count <= cutoff:
continue
self.connection.session.post(
"/%s/%s/%s/_update" % (self.target_coll_name,
self.target_type, uid),
data={
"script": "ctx._source.count += count",
"params": {"count": count},
"upsert": {"word": word, "count": count}
}) | python | def update(self, index, fields, tokenizer=default_tokenizer, cutoff=1):
"""
Update (upsert) the wordgatherer collection.
:param index: Source index.
:param fields: Fields to read.
:param tokenizer: Tokenizer callable. Should split unicode to words
:param cutoff: Ignore words with less than this many occurrences.
"""
counts_by_uid = defaultdict(Counter)
for word, count in self._gather_words(
index, fields, tokenizer=tokenizer).items():
uid = hashlib.sha1(unicodedata.normalize(
"NFKD", word.lower()).encode("UTF-8")).hexdigest()
counts_by_uid[uid][word] += count
for uid, word_to_count in counts_by_uid.items():
word = word_to_count.most_common(1)[0][0]
count = sum(word_to_count.values())
if count <= cutoff:
continue
self.connection.session.post(
"/%s/%s/%s/_update" % (self.target_coll_name,
self.target_type, uid),
data={
"script": "ctx._source.count += count",
"params": {"count": count},
"upsert": {"word": word, "count": count}
}) | [
"def",
"update",
"(",
"self",
",",
"index",
",",
"fields",
",",
"tokenizer",
"=",
"default_tokenizer",
",",
"cutoff",
"=",
"1",
")",
":",
"counts_by_uid",
"=",
"defaultdict",
"(",
"Counter",
")",
"for",
"word",
",",
"count",
"in",
"self",
".",
"_gather_words",
"(",
"index",
",",
"fields",
",",
"tokenizer",
"=",
"tokenizer",
")",
".",
"items",
"(",
")",
":",
"uid",
"=",
"hashlib",
".",
"sha1",
"(",
"unicodedata",
".",
"normalize",
"(",
"\"NFKD\"",
",",
"word",
".",
"lower",
"(",
")",
")",
".",
"encode",
"(",
"\"UTF-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"counts_by_uid",
"[",
"uid",
"]",
"[",
"word",
"]",
"+=",
"count",
"for",
"uid",
",",
"word_to_count",
"in",
"counts_by_uid",
".",
"items",
"(",
")",
":",
"word",
"=",
"word_to_count",
".",
"most_common",
"(",
"1",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"count",
"=",
"sum",
"(",
"word_to_count",
".",
"values",
"(",
")",
")",
"if",
"count",
"<=",
"cutoff",
":",
"continue",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"\"/%s/%s/%s/_update\"",
"%",
"(",
"self",
".",
"target_coll_name",
",",
"self",
".",
"target_type",
",",
"uid",
")",
",",
"data",
"=",
"{",
"\"script\"",
":",
"\"ctx._source.count += count\"",
",",
"\"params\"",
":",
"{",
"\"count\"",
":",
"count",
"}",
",",
"\"upsert\"",
":",
"{",
"\"word\"",
":",
"word",
",",
"\"count\"",
":",
"count",
"}",
"}",
")"
] | Update (upsert) the wordgatherer collection.
:param index: Source index.
:param fields: Fields to read.
:param tokenizer: Tokenizer callable. Should split unicode to words
:param cutoff: Ignore words with less than this many occurrences. | [
"Update",
"(",
"upsert",
")",
"the",
"wordgatherer",
"collection",
".",
":",
"param",
"index",
":",
"Source",
"index",
".",
":",
"param",
"fields",
":",
"Fields",
"to",
"read",
".",
":",
"param",
"tokenizer",
":",
"Tokenizer",
"callable",
".",
"Should",
"split",
"unicode",
"to",
"words",
":",
"param",
"cutoff",
":",
"Ignore",
"words",
"with",
"less",
"than",
"this",
"many",
"occurrences",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/ext/word_gatherer.py#L89-L116 |
andersinno/hayes | hayes/ext/word_gatherer.py | WordGatherer.search | def search(self, word, limit=30):
"""
Search for a word within the wordgatherer collection.
:param word: Word to search for.
:param limit: Maximum number of results to return.
"""
search = Search(PrefixQuery("word", word), sort={"count": "desc"})
for doc in self.connection.search(
search, indexes=[self.index], count=limit):
yield (doc["word"], doc["count"]) | python | def search(self, word, limit=30):
"""
Search for a word within the wordgatherer collection.
:param word: Word to search for.
:param limit: Maximum number of results to return.
"""
search = Search(PrefixQuery("word", word), sort={"count": "desc"})
for doc in self.connection.search(
search, indexes=[self.index], count=limit):
yield (doc["word"], doc["count"]) | [
"def",
"search",
"(",
"self",
",",
"word",
",",
"limit",
"=",
"30",
")",
":",
"search",
"=",
"Search",
"(",
"PrefixQuery",
"(",
"\"word\"",
",",
"word",
")",
",",
"sort",
"=",
"{",
"\"count\"",
":",
"\"desc\"",
"}",
")",
"for",
"doc",
"in",
"self",
".",
"connection",
".",
"search",
"(",
"search",
",",
"indexes",
"=",
"[",
"self",
".",
"index",
"]",
",",
"count",
"=",
"limit",
")",
":",
"yield",
"(",
"doc",
"[",
"\"word\"",
"]",
",",
"doc",
"[",
"\"count\"",
"]",
")"
] | Search for a word within the wordgatherer collection.
:param word: Word to search for.
:param limit: Maximum number of results to return. | [
"Search",
"for",
"a",
"word",
"within",
"the",
"wordgatherer",
"collection",
".",
":",
"param",
"word",
":",
"Word",
"to",
"search",
"for",
".",
":",
"param",
"limit",
":",
"Maximum",
"number",
"of",
"results",
"to",
"return",
"."
] | train | https://github.com/andersinno/hayes/blob/88d1f6b3e0cd993d9d9fc136506bd01165fea64b/hayes/ext/word_gatherer.py#L118-L127 |
ipython/ipynb | ipynb/utils.py | validate_nb | def validate_nb(nb):
"""
Validate that given notebook JSON is importable
- Check for nbformat == 4
- Check that language is python
Do not re-implement nbformat here :D
"""
if nb['nbformat'] != 4:
return False
language_name = (nb.get('metadata', {})
.get('kernelspec', {})
.get('language', '').lower())
return language_name == 'python' | python | def validate_nb(nb):
"""
Validate that given notebook JSON is importable
- Check for nbformat == 4
- Check that language is python
Do not re-implement nbformat here :D
"""
if nb['nbformat'] != 4:
return False
language_name = (nb.get('metadata', {})
.get('kernelspec', {})
.get('language', '').lower())
return language_name == 'python' | [
"def",
"validate_nb",
"(",
"nb",
")",
":",
"if",
"nb",
"[",
"'nbformat'",
"]",
"!=",
"4",
":",
"return",
"False",
"language_name",
"=",
"(",
"nb",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'kernelspec'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'language'",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"return",
"language_name",
"==",
"'python'"
] | Validate that given notebook JSON is importable
- Check for nbformat == 4
- Check that language is python
Do not re-implement nbformat here :D | [
"Validate",
"that",
"given",
"notebook",
"JSON",
"is",
"importable"
] | train | https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/utils.py#L25-L40 |
ipython/ipynb | ipynb/utils.py | filter_ast | def filter_ast(module_ast):
"""
Filters a given module ast, removing non-whitelisted nodes
It allows only the following top level items:
- imports
- function definitions
- class definitions
- top level assignments where all the targets on the LHS are all caps
"""
def node_predicate(node):
"""
Return true if given node is whitelisted
"""
for an in ALLOWED_NODES:
if isinstance(node, an):
return True
# Recurse through Assign node LHS targets when an id is not specified,
# otherwise check that the id is uppercase
if isinstance(node, ast.Assign):
return all([node_predicate(t) for t in node.targets if not hasattr(t, 'id')]) \
and all([t.id.isupper() for t in node.targets if hasattr(t, 'id')])
return False
module_ast.body = [n for n in module_ast.body if node_predicate(n)]
return module_ast | python | def filter_ast(module_ast):
"""
Filters a given module ast, removing non-whitelisted nodes
It allows only the following top level items:
- imports
- function definitions
- class definitions
- top level assignments where all the targets on the LHS are all caps
"""
def node_predicate(node):
"""
Return true if given node is whitelisted
"""
for an in ALLOWED_NODES:
if isinstance(node, an):
return True
# Recurse through Assign node LHS targets when an id is not specified,
# otherwise check that the id is uppercase
if isinstance(node, ast.Assign):
return all([node_predicate(t) for t in node.targets if not hasattr(t, 'id')]) \
and all([t.id.isupper() for t in node.targets if hasattr(t, 'id')])
return False
module_ast.body = [n for n in module_ast.body if node_predicate(n)]
return module_ast | [
"def",
"filter_ast",
"(",
"module_ast",
")",
":",
"def",
"node_predicate",
"(",
"node",
")",
":",
"\"\"\"\n Return true if given node is whitelisted\n \"\"\"",
"for",
"an",
"in",
"ALLOWED_NODES",
":",
"if",
"isinstance",
"(",
"node",
",",
"an",
")",
":",
"return",
"True",
"# Recurse through Assign node LHS targets when an id is not specified,",
"# otherwise check that the id is uppercase",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Assign",
")",
":",
"return",
"all",
"(",
"[",
"node_predicate",
"(",
"t",
")",
"for",
"t",
"in",
"node",
".",
"targets",
"if",
"not",
"hasattr",
"(",
"t",
",",
"'id'",
")",
"]",
")",
"and",
"all",
"(",
"[",
"t",
".",
"id",
".",
"isupper",
"(",
")",
"for",
"t",
"in",
"node",
".",
"targets",
"if",
"hasattr",
"(",
"t",
",",
"'id'",
")",
"]",
")",
"return",
"False",
"module_ast",
".",
"body",
"=",
"[",
"n",
"for",
"n",
"in",
"module_ast",
".",
"body",
"if",
"node_predicate",
"(",
"n",
")",
"]",
"return",
"module_ast"
] | Filters a given module ast, removing non-whitelisted nodes
It allows only the following top level items:
- imports
- function definitions
- class definitions
- top level assignments where all the targets on the LHS are all caps | [
"Filters",
"a",
"given",
"module",
"ast",
"removing",
"non",
"-",
"whitelisted",
"nodes"
] | train | https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/utils.py#L43-L70 |
ipython/ipynb | ipynb/utils.py | code_from_ipynb | def code_from_ipynb(nb, markdown=False):
"""
Get the code for a given notebook
nb is passed in as a dictionary that's a parsed ipynb file
"""
code = PREAMBLE
for cell in nb['cells']:
if cell['cell_type'] == 'code':
# transform the input to executable Python
code += ''.join(cell['source'])
if cell['cell_type'] == 'markdown':
code += '\n# ' + '# '.join(cell['source'])
# We want a blank newline after each cell's output.
# And the last line of source doesn't have a newline usually.
code += '\n\n'
return code | python | def code_from_ipynb(nb, markdown=False):
"""
Get the code for a given notebook
nb is passed in as a dictionary that's a parsed ipynb file
"""
code = PREAMBLE
for cell in nb['cells']:
if cell['cell_type'] == 'code':
# transform the input to executable Python
code += ''.join(cell['source'])
if cell['cell_type'] == 'markdown':
code += '\n# ' + '# '.join(cell['source'])
# We want a blank newline after each cell's output.
# And the last line of source doesn't have a newline usually.
code += '\n\n'
return code | [
"def",
"code_from_ipynb",
"(",
"nb",
",",
"markdown",
"=",
"False",
")",
":",
"code",
"=",
"PREAMBLE",
"for",
"cell",
"in",
"nb",
"[",
"'cells'",
"]",
":",
"if",
"cell",
"[",
"'cell_type'",
"]",
"==",
"'code'",
":",
"# transform the input to executable Python",
"code",
"+=",
"''",
".",
"join",
"(",
"cell",
"[",
"'source'",
"]",
")",
"if",
"cell",
"[",
"'cell_type'",
"]",
"==",
"'markdown'",
":",
"code",
"+=",
"'\\n# '",
"+",
"'# '",
".",
"join",
"(",
"cell",
"[",
"'source'",
"]",
")",
"# We want a blank newline after each cell's output.",
"# And the last line of source doesn't have a newline usually.",
"code",
"+=",
"'\\n\\n'",
"return",
"code"
] | Get the code for a given notebook
nb is passed in as a dictionary that's a parsed ipynb file | [
"Get",
"the",
"code",
"for",
"a",
"given",
"notebook"
] | train | https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/utils.py#L72-L88 |
ipython/ipynb | ipynb/fs/finder.py | FSFinder._get_paths | def _get_paths(self, fullname):
"""
Generate ordered list of paths we should look for fullname module in
"""
real_path = os.path.join(*fullname[len(self.package_prefix):].split('.'))
for base_path in sys.path:
if base_path == '':
# Empty string means process's cwd
base_path = os.getcwd()
path = os.path.join(base_path, real_path)
yield path + '.ipynb'
yield path + '.py'
yield os.path.join(path, '__init__.ipynb')
yield os.path.join(path, '__init__.py') | python | def _get_paths(self, fullname):
"""
Generate ordered list of paths we should look for fullname module in
"""
real_path = os.path.join(*fullname[len(self.package_prefix):].split('.'))
for base_path in sys.path:
if base_path == '':
# Empty string means process's cwd
base_path = os.getcwd()
path = os.path.join(base_path, real_path)
yield path + '.ipynb'
yield path + '.py'
yield os.path.join(path, '__init__.ipynb')
yield os.path.join(path, '__init__.py') | [
"def",
"_get_paths",
"(",
"self",
",",
"fullname",
")",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"fullname",
"[",
"len",
"(",
"self",
".",
"package_prefix",
")",
":",
"]",
".",
"split",
"(",
"'.'",
")",
")",
"for",
"base_path",
"in",
"sys",
".",
"path",
":",
"if",
"base_path",
"==",
"''",
":",
"# Empty string means process's cwd",
"base_path",
"=",
"os",
".",
"getcwd",
"(",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"real_path",
")",
"yield",
"path",
"+",
"'.ipynb'",
"yield",
"path",
"+",
"'.py'",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'__init__.ipynb'",
")",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'__init__.py'",
")"
] | Generate ordered list of paths we should look for fullname module in | [
"Generate",
"ordered",
"list",
"of",
"paths",
"we",
"should",
"look",
"for",
"fullname",
"module",
"in"
] | train | https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/fs/finder.py#L24-L37 |
ipython/ipynb | ipynb/fs/finder.py | FSFinder.find_spec | def find_spec(self, fullname, path, target=None):
"""
Claims modules that are under ipynb.fs
"""
if fullname.startswith(self.package_prefix):
for path in self._get_paths(fullname):
if os.path.exists(path):
return ModuleSpec(
name=fullname,
loader=self.loader_class(fullname, path),
origin=path,
is_package=(path.endswith('__init__.ipynb') or path.endswith('__init__.py')),
) | python | def find_spec(self, fullname, path, target=None):
"""
Claims modules that are under ipynb.fs
"""
if fullname.startswith(self.package_prefix):
for path in self._get_paths(fullname):
if os.path.exists(path):
return ModuleSpec(
name=fullname,
loader=self.loader_class(fullname, path),
origin=path,
is_package=(path.endswith('__init__.ipynb') or path.endswith('__init__.py')),
) | [
"def",
"find_spec",
"(",
"self",
",",
"fullname",
",",
"path",
",",
"target",
"=",
"None",
")",
":",
"if",
"fullname",
".",
"startswith",
"(",
"self",
".",
"package_prefix",
")",
":",
"for",
"path",
"in",
"self",
".",
"_get_paths",
"(",
"fullname",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"ModuleSpec",
"(",
"name",
"=",
"fullname",
",",
"loader",
"=",
"self",
".",
"loader_class",
"(",
"fullname",
",",
"path",
")",
",",
"origin",
"=",
"path",
",",
"is_package",
"=",
"(",
"path",
".",
"endswith",
"(",
"'__init__.ipynb'",
")",
"or",
"path",
".",
"endswith",
"(",
"'__init__.py'",
")",
")",
",",
")"
] | Claims modules that are under ipynb.fs | [
"Claims",
"modules",
"that",
"are",
"under",
"ipynb",
".",
"fs"
] | train | https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/fs/finder.py#L39-L51 |
sixty-north/python-transducers | transducer/_util.py | coroutine | def coroutine(func):
"""Decorator for priming generator-based coroutines.
"""
@wraps(func)
def start(*args, **kwargs):
g = func(*args, **kwargs)
next(g)
return g
return start | python | def coroutine(func):
"""Decorator for priming generator-based coroutines.
"""
@wraps(func)
def start(*args, **kwargs):
g = func(*args, **kwargs)
next(g)
return g
return start | [
"def",
"coroutine",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"start",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"g",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"next",
"(",
"g",
")",
"return",
"g",
"return",
"start"
] | Decorator for priming generator-based coroutines. | [
"Decorator",
"for",
"priming",
"generator",
"-",
"based",
"coroutines",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/_util.py#L16-L25 |
sixty-north/python-transducers | examples/cooperative.py | ticker | async def ticker(delay, to):
"""Yield numbers from 0 to `to` every `delay` seconds."""
for i in range(to):
yield i
await asyncio.sleep(delay) | python | async def ticker(delay, to):
"""Yield numbers from 0 to `to` every `delay` seconds."""
for i in range(to):
yield i
await asyncio.sleep(delay) | [
"async",
"def",
"ticker",
"(",
"delay",
",",
"to",
")",
":",
"for",
"i",
"in",
"range",
"(",
"to",
")",
":",
"yield",
"i",
"await",
"asyncio",
".",
"sleep",
"(",
"delay",
")"
] | Yield numbers from 0 to `to` every `delay` seconds. | [
"Yield",
"numbers",
"from",
"0",
"to",
"to",
"every",
"delay",
"seconds",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/examples/cooperative.py#L7-L11 |
sixty-north/python-transducers | transducer/sinks.py | rprint | def rprint(sep='\n', end='\n', file=sys.stdout, flush=False):
"""A coroutine sink which prints received items stdout
Args:
sep: Optional separator to be printed between received items.
end: Optional terminator to be printed after the last item.
file: Optional stream to which to print.
flush: Optional flag to force flushing after each item.
"""
try:
first_item = (yield)
file.write(str(first_item))
if flush:
file.flush()
while True:
item = (yield)
file.write(sep)
file.write(str(item))
if flush:
file.flush()
except GeneratorExit:
file.write(end)
if flush:
file.flush() | python | def rprint(sep='\n', end='\n', file=sys.stdout, flush=False):
"""A coroutine sink which prints received items stdout
Args:
sep: Optional separator to be printed between received items.
end: Optional terminator to be printed after the last item.
file: Optional stream to which to print.
flush: Optional flag to force flushing after each item.
"""
try:
first_item = (yield)
file.write(str(first_item))
if flush:
file.flush()
while True:
item = (yield)
file.write(sep)
file.write(str(item))
if flush:
file.flush()
except GeneratorExit:
file.write(end)
if flush:
file.flush() | [
"def",
"rprint",
"(",
"sep",
"=",
"'\\n'",
",",
"end",
"=",
"'\\n'",
",",
"file",
"=",
"sys",
".",
"stdout",
",",
"flush",
"=",
"False",
")",
":",
"try",
":",
"first_item",
"=",
"(",
"yield",
")",
"file",
".",
"write",
"(",
"str",
"(",
"first_item",
")",
")",
"if",
"flush",
":",
"file",
".",
"flush",
"(",
")",
"while",
"True",
":",
"item",
"=",
"(",
"yield",
")",
"file",
".",
"write",
"(",
"sep",
")",
"file",
".",
"write",
"(",
"str",
"(",
"item",
")",
")",
"if",
"flush",
":",
"file",
".",
"flush",
"(",
")",
"except",
"GeneratorExit",
":",
"file",
".",
"write",
"(",
"end",
")",
"if",
"flush",
":",
"file",
".",
"flush",
"(",
")"
] | A coroutine sink which prints received items stdout
Args:
sep: Optional separator to be printed between received items.
end: Optional terminator to be printed after the last item.
file: Optional stream to which to print.
flush: Optional flag to force flushing after each item. | [
"A",
"coroutine",
"sink",
"which",
"prints",
"received",
"items",
"stdout"
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/sinks.py#L14-L37 |
sixty-north/python-transducers | transducer/sources.py | iterable_source | def iterable_source(iterable, target):
"""Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | python | def iterable_source(iterable, target):
"""Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | [
"def",
"iterable_source",
"(",
"iterable",
",",
"target",
")",
":",
"it",
"=",
"iter",
"(",
"iterable",
")",
"for",
"item",
"in",
"it",
":",
"try",
":",
"target",
".",
"send",
"(",
"item",
")",
"except",
"StopIteration",
":",
"return",
"prepend",
"(",
"item",
",",
"it",
")",
"return",
"empty_iter",
"(",
")"
] | Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items. | [
"Convert",
"an",
"iterable",
"into",
"a",
"stream",
"of",
"events",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/sources.py#L6-L22 |
sixty-north/python-transducers | transducer/sources.py | poisson_source | def poisson_source(rate, iterable, target):
"""Send events at random times with uniform probability.
Args:
rate: The average number of events to send per second.
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
if rate <= 0.0:
raise ValueError("poisson_source rate {} is not positive".format(rate))
it = iter(iterable)
for item in it:
duration = random.expovariate(rate)
sleep(duration)
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | python | def poisson_source(rate, iterable, target):
"""Send events at random times with uniform probability.
Args:
rate: The average number of events to send per second.
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
if rate <= 0.0:
raise ValueError("poisson_source rate {} is not positive".format(rate))
it = iter(iterable)
for item in it:
duration = random.expovariate(rate)
sleep(duration)
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | [
"def",
"poisson_source",
"(",
"rate",
",",
"iterable",
",",
"target",
")",
":",
"if",
"rate",
"<=",
"0.0",
":",
"raise",
"ValueError",
"(",
"\"poisson_source rate {} is not positive\"",
".",
"format",
"(",
"rate",
")",
")",
"it",
"=",
"iter",
"(",
"iterable",
")",
"for",
"item",
"in",
"it",
":",
"duration",
"=",
"random",
".",
"expovariate",
"(",
"rate",
")",
"sleep",
"(",
"duration",
")",
"try",
":",
"target",
".",
"send",
"(",
"item",
")",
"except",
"StopIteration",
":",
"return",
"prepend",
"(",
"item",
",",
"it",
")",
"return",
"empty_iter",
"(",
")"
] | Send events at random times with uniform probability.
Args:
rate: The average number of events to send per second.
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items. | [
"Send",
"events",
"at",
"random",
"times",
"with",
"uniform",
"probability",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/sources.py#L25-L47 |
sixty-north/python-transducers | transducer/functional.py | compose | def compose(f, *fs):
"""Compose functions right to left.
compose(f, g, h)(x) -> f(g(h(x)))
Args:
f, *fs: The head and rest of a sequence of callables. The
rightmost function passed can accept any arguments and
the returned function will have the same signature as
this last provided function. All preceding functions
must be unary.
Returns:
The composition of the argument functions. The returned
function will accept the same arguments as the rightmost
passed in function.
"""
rfs = list(chain([f], fs))
rfs.reverse()
def composed(*args, **kwargs):
return reduce(
lambda result, fn: fn(result),
rfs[1:],
rfs[0](*args, **kwargs))
return composed | python | def compose(f, *fs):
"""Compose functions right to left.
compose(f, g, h)(x) -> f(g(h(x)))
Args:
f, *fs: The head and rest of a sequence of callables. The
rightmost function passed can accept any arguments and
the returned function will have the same signature as
this last provided function. All preceding functions
must be unary.
Returns:
The composition of the argument functions. The returned
function will accept the same arguments as the rightmost
passed in function.
"""
rfs = list(chain([f], fs))
rfs.reverse()
def composed(*args, **kwargs):
return reduce(
lambda result, fn: fn(result),
rfs[1:],
rfs[0](*args, **kwargs))
return composed | [
"def",
"compose",
"(",
"f",
",",
"*",
"fs",
")",
":",
"rfs",
"=",
"list",
"(",
"chain",
"(",
"[",
"f",
"]",
",",
"fs",
")",
")",
"rfs",
".",
"reverse",
"(",
")",
"def",
"composed",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"reduce",
"(",
"lambda",
"result",
",",
"fn",
":",
"fn",
"(",
"result",
")",
",",
"rfs",
"[",
"1",
":",
"]",
",",
"rfs",
"[",
"0",
"]",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"composed"
] | Compose functions right to left.
compose(f, g, h)(x) -> f(g(h(x)))
Args:
f, *fs: The head and rest of a sequence of callables. The
rightmost function passed can accept any arguments and
the returned function will have the same signature as
this last provided function. All preceding functions
must be unary.
Returns:
The composition of the argument functions. The returned
function will accept the same arguments as the rightmost
passed in function. | [
"Compose",
"functions",
"right",
"to",
"left",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/functional.py#L5-L31 |
sixty-north/python-transducers | transducer/transducers.py | reducing | def reducing(reducer, init=UNSET):
"""Create a reducing transducer with the given reducer.
Args:
reducer: A two-argument function which will be used to combine the
partial cumulative result in the first argument with the next
item from the input stream in the second argument.
Returns: A reducing transducer: A single argument function which,
when passed a reducing function, returns a new reducing function
which entirely reduces the input stream using 'reducer' before
passing the result to the reducing function passed to the
transducer.
"""
reducer2 = reducer
def reducing_transducer(reducer):
return Reducing(reducer, reducer2, init)
return reducing_transducer | python | def reducing(reducer, init=UNSET):
"""Create a reducing transducer with the given reducer.
Args:
reducer: A two-argument function which will be used to combine the
partial cumulative result in the first argument with the next
item from the input stream in the second argument.
Returns: A reducing transducer: A single argument function which,
when passed a reducing function, returns a new reducing function
which entirely reduces the input stream using 'reducer' before
passing the result to the reducing function passed to the
transducer.
"""
reducer2 = reducer
def reducing_transducer(reducer):
return Reducing(reducer, reducer2, init)
return reducing_transducer | [
"def",
"reducing",
"(",
"reducer",
",",
"init",
"=",
"UNSET",
")",
":",
"reducer2",
"=",
"reducer",
"def",
"reducing_transducer",
"(",
"reducer",
")",
":",
"return",
"Reducing",
"(",
"reducer",
",",
"reducer2",
",",
"init",
")",
"return",
"reducing_transducer"
] | Create a reducing transducer with the given reducer.
Args:
reducer: A two-argument function which will be used to combine the
partial cumulative result in the first argument with the next
item from the input stream in the second argument.
Returns: A reducing transducer: A single argument function which,
when passed a reducing function, returns a new reducing function
which entirely reduces the input stream using 'reducer' before
passing the result to the reducing function passed to the
transducer. | [
"Create",
"a",
"reducing",
"transducer",
"with",
"the",
"given",
"reducer",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L99-L119 |
sixty-north/python-transducers | transducer/transducers.py | scanning | def scanning(reducer, init=UNSET):
"""Create a scanning reducer."""
reducer2 = reducer
def scanning_transducer(reducer):
return Scanning(reducer, reducer2, init)
return scanning_transducer | python | def scanning(reducer, init=UNSET):
"""Create a scanning reducer."""
reducer2 = reducer
def scanning_transducer(reducer):
return Scanning(reducer, reducer2, init)
return scanning_transducer | [
"def",
"scanning",
"(",
"reducer",
",",
"init",
"=",
"UNSET",
")",
":",
"reducer2",
"=",
"reducer",
"def",
"scanning_transducer",
"(",
"reducer",
")",
":",
"return",
"Scanning",
"(",
"reducer",
",",
"reducer2",
",",
"init",
")",
"return",
"scanning_transducer"
] | Create a scanning reducer. | [
"Create",
"a",
"scanning",
"reducer",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L136-L144 |
sixty-north/python-transducers | transducer/transducers.py | taking | def taking(n):
"""Create a transducer which takes the first n items"""
if n < 0:
raise ValueError("Cannot take fewer than zero ({}) items".format(n))
def taking_transducer(reducer):
return Taking(reducer, n)
return taking_transducer | python | def taking(n):
"""Create a transducer which takes the first n items"""
if n < 0:
raise ValueError("Cannot take fewer than zero ({}) items".format(n))
def taking_transducer(reducer):
return Taking(reducer, n)
return taking_transducer | [
"def",
"taking",
"(",
"n",
")",
":",
"if",
"n",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot take fewer than zero ({}) items\"",
".",
"format",
"(",
"n",
")",
")",
"def",
"taking_transducer",
"(",
"reducer",
")",
":",
"return",
"Taking",
"(",
"reducer",
",",
"n",
")",
"return",
"taking_transducer"
] | Create a transducer which takes the first n items | [
"Create",
"a",
"transducer",
"which",
"takes",
"the",
"first",
"n",
"items"
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L207-L216 |
sixty-north/python-transducers | transducer/transducers.py | dropping | def dropping(n):
"""Create a transducer which drops the first n items"""
if n < 0:
raise ValueError("Cannot drop fewer than zero ({}) items".format(n))
def dropping_transducer(reducer):
return Dropping(reducer, n)
return dropping_transducer | python | def dropping(n):
"""Create a transducer which drops the first n items"""
if n < 0:
raise ValueError("Cannot drop fewer than zero ({}) items".format(n))
def dropping_transducer(reducer):
return Dropping(reducer, n)
return dropping_transducer | [
"def",
"dropping",
"(",
"n",
")",
":",
"if",
"n",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot drop fewer than zero ({}) items\"",
".",
"format",
"(",
"n",
")",
")",
"def",
"dropping_transducer",
"(",
"reducer",
")",
":",
"return",
"Dropping",
"(",
"reducer",
",",
"n",
")",
"return",
"dropping_transducer"
] | Create a transducer which drops the first n items | [
"Create",
"a",
"transducer",
"which",
"drops",
"the",
"first",
"n",
"items"
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L255-L264 |
sixty-north/python-transducers | transducer/transducers.py | batching | def batching(size):
"""Create a transducer which produces non-overlapping batches."""
if size < 1:
raise ValueError("batching() size must be at least 1")
def batching_transducer(reducer):
return Batching(reducer, size)
return batching_transducer | python | def batching(size):
"""Create a transducer which produces non-overlapping batches."""
if size < 1:
raise ValueError("batching() size must be at least 1")
def batching_transducer(reducer):
return Batching(reducer, size)
return batching_transducer | [
"def",
"batching",
"(",
"size",
")",
":",
"if",
"size",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"batching() size must be at least 1\"",
")",
"def",
"batching_transducer",
"(",
"reducer",
")",
":",
"return",
"Batching",
"(",
"reducer",
",",
"size",
")",
"return",
"batching_transducer"
] | Create a transducer which produces non-overlapping batches. | [
"Create",
"a",
"transducer",
"which",
"produces",
"non",
"-",
"overlapping",
"batches",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L360-L369 |
sixty-north/python-transducers | transducer/transducers.py | windowing | def windowing(size, padding=UNSET, window_type=tuple):
"""Create a transducer which produces a moving window over items."""
if size < 1:
raise ValueError("windowing() size {} is not at least 1".format(size))
def windowing_transducer(reducer):
return Windowing(reducer, size, padding, window_type)
return windowing_transducer | python | def windowing(size, padding=UNSET, window_type=tuple):
"""Create a transducer which produces a moving window over items."""
if size < 1:
raise ValueError("windowing() size {} is not at least 1".format(size))
def windowing_transducer(reducer):
return Windowing(reducer, size, padding, window_type)
return windowing_transducer | [
"def",
"windowing",
"(",
"size",
",",
"padding",
"=",
"UNSET",
",",
"window_type",
"=",
"tuple",
")",
":",
"if",
"size",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"windowing() size {} is not at least 1\"",
".",
"format",
"(",
"size",
")",
")",
"def",
"windowing_transducer",
"(",
"reducer",
")",
":",
"return",
"Windowing",
"(",
"reducer",
",",
"size",
",",
"padding",
",",
"window_type",
")",
"return",
"windowing_transducer"
] | Create a transducer which produces a moving window over items. | [
"Create",
"a",
"transducer",
"which",
"produces",
"a",
"moving",
"window",
"over",
"items",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L398-L407 |
sixty-north/python-transducers | transducer/transducers.py | first | def first(predicate=None):
"""Create a transducer which obtains the first item, then terminates."""
predicate = true if predicate is None else predicate
def first_transducer(reducer):
return First(reducer, predicate)
return first_transducer | python | def first(predicate=None):
"""Create a transducer which obtains the first item, then terminates."""
predicate = true if predicate is None else predicate
def first_transducer(reducer):
return First(reducer, predicate)
return first_transducer | [
"def",
"first",
"(",
"predicate",
"=",
"None",
")",
":",
"predicate",
"=",
"true",
"if",
"predicate",
"is",
"None",
"else",
"predicate",
"def",
"first_transducer",
"(",
"reducer",
")",
":",
"return",
"First",
"(",
"reducer",
",",
"predicate",
")",
"return",
"first_transducer"
] | Create a transducer which obtains the first item, then terminates. | [
"Create",
"a",
"transducer",
"which",
"obtains",
"the",
"first",
"item",
"then",
"terminates",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L422-L430 |
sixty-north/python-transducers | transducer/transducers.py | last | def last(predicate=None):
"""Create a transducer which obtains the last item."""
predicate = true if predicate is None else predicate
def last_transducer(reducer):
return Last(reducer, predicate)
return last_transducer | python | def last(predicate=None):
"""Create a transducer which obtains the last item."""
predicate = true if predicate is None else predicate
def last_transducer(reducer):
return Last(reducer, predicate)
return last_transducer | [
"def",
"last",
"(",
"predicate",
"=",
"None",
")",
":",
"predicate",
"=",
"true",
"if",
"predicate",
"is",
"None",
"else",
"predicate",
"def",
"last_transducer",
"(",
"reducer",
")",
":",
"return",
"Last",
"(",
"reducer",
",",
"predicate",
")",
"return",
"last_transducer"
] | Create a transducer which obtains the last item. | [
"Create",
"a",
"transducer",
"which",
"obtains",
"the",
"last",
"item",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L453-L461 |
sixty-north/python-transducers | transducer/transducers.py | element_at | def element_at(index):
"""Create a transducer which obtains the item at the specified index."""
if index < 0:
raise IndexError("element_at used with illegal index {}".format(index))
def element_at_transducer(reducer):
return ElementAt(reducer, index)
return element_at_transducer | python | def element_at(index):
"""Create a transducer which obtains the item at the specified index."""
if index < 0:
raise IndexError("element_at used with illegal index {}".format(index))
def element_at_transducer(reducer):
return ElementAt(reducer, index)
return element_at_transducer | [
"def",
"element_at",
"(",
"index",
")",
":",
"if",
"index",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"element_at used with illegal index {}\"",
".",
"format",
"(",
"index",
")",
")",
"def",
"element_at_transducer",
"(",
"reducer",
")",
":",
"return",
"ElementAt",
"(",
"reducer",
",",
"index",
")",
"return",
"element_at_transducer"
] | Create a transducer which obtains the item at the specified index. | [
"Create",
"a",
"transducer",
"which",
"obtains",
"the",
"item",
"at",
"the",
"specified",
"index",
"."
] | train | https://github.com/sixty-north/python-transducers/blob/575357e3a17ff3b4c757967afd396bf0ea042c08/transducer/transducers.py#L486-L495 |
bjodah/pycompilation | pycompilation/compilation.py | compile_sources | def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths | python | def compile_sources(files, CompilerRunner_=None,
destdir=None, cwd=None,
keep_dir_struct=False,
per_file_kwargs=None,
**kwargs):
"""
Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_
"""
_per_file_kwargs = {}
if per_file_kwargs is not None:
for k, v in per_file_kwargs.items():
if isinstance(k, Glob):
for path in glob.glob(k.pathname):
_per_file_kwargs[path] = v
elif isinstance(k, ArbitraryDepthGlob):
for path in glob_at_depth(k.filename, cwd):
_per_file_kwargs[path] = v
else:
_per_file_kwargs[k] = v
# Set up destination directory
destdir = destdir or '.'
if not os.path.isdir(destdir):
if os.path.exists(destdir):
raise IOError("{} is not a directory".format(destdir))
else:
make_dirs(destdir)
if cwd is None:
cwd = '.'
for f in files:
copy(f, destdir, only_update=True, dest_is_dir=True)
# Compile files and return list of paths to the objects
dstpaths = []
for f in files:
if keep_dir_struct:
name, ext = os.path.splitext(f)
else:
name, ext = os.path.splitext(os.path.basename(f))
file_kwargs = kwargs.copy()
file_kwargs.update(_per_file_kwargs.get(f, {}))
dstpaths.append(src2obj(
f, CompilerRunner_, cwd=cwd,
**file_kwargs
))
return dstpaths | [
"def",
"compile_sources",
"(",
"files",
",",
"CompilerRunner_",
"=",
"None",
",",
"destdir",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"keep_dir_struct",
"=",
"False",
",",
"per_file_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_per_file_kwargs",
"=",
"{",
"}",
"if",
"per_file_kwargs",
"is",
"not",
"None",
":",
"for",
"k",
",",
"v",
"in",
"per_file_kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"k",
",",
"Glob",
")",
":",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"k",
".",
"pathname",
")",
":",
"_per_file_kwargs",
"[",
"path",
"]",
"=",
"v",
"elif",
"isinstance",
"(",
"k",
",",
"ArbitraryDepthGlob",
")",
":",
"for",
"path",
"in",
"glob_at_depth",
"(",
"k",
".",
"filename",
",",
"cwd",
")",
":",
"_per_file_kwargs",
"[",
"path",
"]",
"=",
"v",
"else",
":",
"_per_file_kwargs",
"[",
"k",
"]",
"=",
"v",
"# Set up destination directory",
"destdir",
"=",
"destdir",
"or",
"'.'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"destdir",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"destdir",
")",
":",
"raise",
"IOError",
"(",
"\"{} is not a directory\"",
".",
"format",
"(",
"destdir",
")",
")",
"else",
":",
"make_dirs",
"(",
"destdir",
")",
"if",
"cwd",
"is",
"None",
":",
"cwd",
"=",
"'.'",
"for",
"f",
"in",
"files",
":",
"copy",
"(",
"f",
",",
"destdir",
",",
"only_update",
"=",
"True",
",",
"dest_is_dir",
"=",
"True",
")",
"# Compile files and return list of paths to the objects",
"dstpaths",
"=",
"[",
"]",
"for",
"f",
"in",
"files",
":",
"if",
"keep_dir_struct",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"else",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"file_kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"file_kwargs",
".",
"update",
"(",
"_per_file_kwargs",
".",
"get",
"(",
"f",
",",
"{",
"}",
")",
")",
"dstpaths",
".",
"append",
"(",
"src2obj",
"(",
"f",
",",
"CompilerRunner_",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"file_kwargs",
")",
")",
"return",
"dstpaths"
] | Compile source code files to object files.
Parameters
----------
files: iterable of path strings
source files, if cwd is given, the paths are taken as relative.
CompilerRunner_: CompilerRunner instance (optional)
could be e.g. pycompilation.FortranCompilerRunner
Will be inferred from filename extensions if missing.
destdir: path string
output directory, if cwd is given, the path is taken as relative
cwd: path string
working directory. Specify to have compiler run in other directory.
also used as root of relative paths.
keep_dir_struct: bool
Reproduce directory structure in `destdir`. default: False
per_file_kwargs: dict
dict mapping instances in `files` to keyword arguments
**kwargs: dict
default keyword arguments to pass to CompilerRunner_ | [
"Compile",
"source",
"code",
"files",
"to",
"object",
"files",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L85-L150 |
bjodah/pycompilation | pycompilation/compilation.py | link | def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file | python | def link(obj_files, out_file=None, shared=False, CompilerRunner_=None,
cwd=None, cplus=False, fort=False, **kwargs):
"""
Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable
"""
if out_file is None:
out_file, ext = os.path.splitext(os.path.basename(obj_files[-1]))
if shared:
out_file += sharedext
if not CompilerRunner_:
if fort:
CompilerRunner_, extra_kwargs, vendor = \
get_mixed_fort_c_linker(
vendor=kwargs.get('vendor', None),
metadir=kwargs.get('metadir', None),
cplus=cplus,
cwd=cwd,
)
for k, v in extra_kwargs.items():
expand_collection_in_dict(kwargs, k, v)
else:
if cplus:
CompilerRunner_ = CppCompilerRunner
else:
CompilerRunner_ = CCompilerRunner
flags = kwargs.pop('flags', [])
if shared:
if '-shared' not in flags:
flags.append('-shared')
# mimic GNU linker behavior on OS X when using -shared
# (otherwise likely Undefined symbol errors)
dl_flag = '-undefined dynamic_lookup'
if sys.platform == 'darwin' and dl_flag not in flags:
flags.append(dl_flag)
run_linker = kwargs.pop('run_linker', True)
if not run_linker:
raise ValueError("link(..., run_linker=False)!?")
out_file = get_abspath(out_file, cwd=cwd)
runner = CompilerRunner_(
obj_files, out_file, flags,
cwd=cwd,
**kwargs)
runner.run()
return out_file | [
"def",
"link",
"(",
"obj_files",
",",
"out_file",
"=",
"None",
",",
"shared",
"=",
"False",
",",
"CompilerRunner_",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"cplus",
"=",
"False",
",",
"fort",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"obj_files",
"[",
"-",
"1",
"]",
")",
")",
"if",
"shared",
":",
"out_file",
"+=",
"sharedext",
"if",
"not",
"CompilerRunner_",
":",
"if",
"fort",
":",
"CompilerRunner_",
",",
"extra_kwargs",
",",
"vendor",
"=",
"get_mixed_fort_c_linker",
"(",
"vendor",
"=",
"kwargs",
".",
"get",
"(",
"'vendor'",
",",
"None",
")",
",",
"metadir",
"=",
"kwargs",
".",
"get",
"(",
"'metadir'",
",",
"None",
")",
",",
"cplus",
"=",
"cplus",
",",
"cwd",
"=",
"cwd",
",",
")",
"for",
"k",
",",
"v",
"in",
"extra_kwargs",
".",
"items",
"(",
")",
":",
"expand_collection_in_dict",
"(",
"kwargs",
",",
"k",
",",
"v",
")",
"else",
":",
"if",
"cplus",
":",
"CompilerRunner_",
"=",
"CppCompilerRunner",
"else",
":",
"CompilerRunner_",
"=",
"CCompilerRunner",
"flags",
"=",
"kwargs",
".",
"pop",
"(",
"'flags'",
",",
"[",
"]",
")",
"if",
"shared",
":",
"if",
"'-shared'",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"'-shared'",
")",
"# mimic GNU linker behavior on OS X when using -shared",
"# (otherwise likely Undefined symbol errors)",
"dl_flag",
"=",
"'-undefined dynamic_lookup'",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
"and",
"dl_flag",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"dl_flag",
")",
"run_linker",
"=",
"kwargs",
".",
"pop",
"(",
"'run_linker'",
",",
"True",
")",
"if",
"not",
"run_linker",
":",
"raise",
"ValueError",
"(",
"\"link(..., run_linker=False)!?\"",
")",
"out_file",
"=",
"get_abspath",
"(",
"out_file",
",",
"cwd",
"=",
"cwd",
")",
"runner",
"=",
"CompilerRunner_",
"(",
"obj_files",
",",
"out_file",
",",
"flags",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"kwargs",
")",
"runner",
".",
"run",
"(",
")",
"return",
"out_file"
] | Link object files.
Parameters
----------
obj_files: iterable of path strings
out_file: path string (optional)
path to executable/shared library, if missing
it will be deduced from the last item in obj_files.
shared: bool
Generate a shared library? default: False
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
If not given the `cplus` and `fort` flags will be inspected
(fallback is the C compiler)
cwd: path string
root of relative paths and working directory for compiler
cplus: bool
C++ objects? default: False
fort: bool
Fortran objects? default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_
Returns
-------
The absolute to the generated shared object / executable | [
"Link",
"object",
"files",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L153-L225 |
bjodah/pycompilation | pycompilation/compilation.py | link_py_so | def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs) | python | def link_py_so(obj_files, so_file=None, cwd=None, libraries=None,
cplus=False, fort=False, **kwargs):
"""
Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object
"""
libraries = libraries or []
include_dirs = kwargs.pop('include_dirs', [])
library_dirs = kwargs.pop('library_dirs', [])
# from distutils/command/build_ext.py:
if sys.platform == "win32":
warnings.warn("Windows not yet supported.")
elif sys.platform == 'darwin':
# Don't use the default code below
pass
elif sys.platform[:3] == 'aix':
# Don't use the default code below
pass
else:
# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)
pylib = os.path.join(get_config_var('LIBDIR'), get_config_var('INSTSONAME'))
if os.path.exists(pylib):
libraries.append(pylib)
else:
if get_config_var('Py_ENABLE_SHARED'):
ABIFLAGS = get_config_var('ABIFLAGS')
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
ABIFLAGS or '')
libraries += [pythonlib]
else:
pass
flags = kwargs.pop('flags', [])
needed_flags = ('-pthread',)
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
# We want something like: gcc, ['-pthread', ...
# compilername, flags = cc.split()[0], cc.split()[1:]
# # Grab include_dirs
# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))
# flags = list(filter(lambda x: not x.startswith('-I'), flags))
# # Grab library_dirs
# library_dirs += [x[2:] for x in filter(
# lambda x: x.startswith('-L'), flags)]
# flags = list(filter(lambda x: not x.startswith('-L'), flags))
# flags.extend(kwargs.pop('flags', []))
return link(obj_files, shared=True, flags=flags, cwd=cwd,
cplus=cplus, fort=fort, include_dirs=include_dirs,
libraries=libraries, library_dirs=library_dirs, **kwargs) | [
"def",
"link_py_so",
"(",
"obj_files",
",",
"so_file",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"libraries",
"=",
"None",
",",
"cplus",
"=",
"False",
",",
"fort",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"libraries",
"=",
"libraries",
"or",
"[",
"]",
"include_dirs",
"=",
"kwargs",
".",
"pop",
"(",
"'include_dirs'",
",",
"[",
"]",
")",
"library_dirs",
"=",
"kwargs",
".",
"pop",
"(",
"'library_dirs'",
",",
"[",
"]",
")",
"# from distutils/command/build_ext.py:",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"warnings",
".",
"warn",
"(",
"\"Windows not yet supported.\"",
")",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# Don't use the default code below",
"pass",
"elif",
"sys",
".",
"platform",
"[",
":",
"3",
"]",
"==",
"'aix'",
":",
"# Don't use the default code below",
"pass",
"else",
":",
"# LIBDIR/INSTSONAME should always points to libpython (dynamic or static)",
"pylib",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_config_var",
"(",
"'LIBDIR'",
")",
",",
"get_config_var",
"(",
"'INSTSONAME'",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pylib",
")",
":",
"libraries",
".",
"append",
"(",
"pylib",
")",
"else",
":",
"if",
"get_config_var",
"(",
"'Py_ENABLE_SHARED'",
")",
":",
"ABIFLAGS",
"=",
"get_config_var",
"(",
"'ABIFLAGS'",
")",
"pythonlib",
"=",
"'python{}.{}{}'",
".",
"format",
"(",
"sys",
".",
"hexversion",
">>",
"24",
",",
"(",
"sys",
".",
"hexversion",
">>",
"16",
")",
"&",
"0xff",
",",
"ABIFLAGS",
"or",
"''",
")",
"libraries",
"+=",
"[",
"pythonlib",
"]",
"else",
":",
"pass",
"flags",
"=",
"kwargs",
".",
"pop",
"(",
"'flags'",
",",
"[",
"]",
")",
"needed_flags",
"=",
"(",
"'-pthread'",
",",
")",
"for",
"flag",
"in",
"needed_flags",
":",
"if",
"flag",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"flag",
")",
"# We want something like: gcc, ['-pthread', ...",
"# compilername, flags = cc.split()[0], cc.split()[1:]",
"# # Grab include_dirs",
"# include_dirs += list(filter(lambda x: x.startswith('-I'), flags))",
"# flags = list(filter(lambda x: not x.startswith('-I'), flags))",
"# # Grab library_dirs",
"# library_dirs += [x[2:] for x in filter(",
"# lambda x: x.startswith('-L'), flags)]",
"# flags = list(filter(lambda x: not x.startswith('-L'), flags))",
"# flags.extend(kwargs.pop('flags', []))",
"return",
"link",
"(",
"obj_files",
",",
"shared",
"=",
"True",
",",
"flags",
"=",
"flags",
",",
"cwd",
"=",
"cwd",
",",
"cplus",
"=",
"cplus",
",",
"fort",
"=",
"fort",
",",
"include_dirs",
"=",
"include_dirs",
",",
"libraries",
"=",
"libraries",
",",
"library_dirs",
"=",
"library_dirs",
",",
"*",
"*",
"kwargs",
")"
] | Link python extension module (shared object) for importing
Parameters
----------
obj_files: iterable of path strings
object files to be linked
so_file: path string
Name (path) of shared object file to create. If
not specified it will have the basname of the last object
file in `obj_files` but with the extension '.so' (Unix) or
'.dll' (Windows).
cwd: path string
root of relative paths and working directory of linker.
libraries: iterable of strings
libraries to link against, e.g. ['m']
cplus: bool
Any C++ objects? default: False
fort: bool
Any Fortran objects? default: False
kwargs**: dict
keyword arguments passed onto `link(...)`
Returns
-------
Absolute path to the generate shared object | [
"Link",
"python",
"extension",
"module",
"(",
"shared",
"object",
")",
"for",
"importing"
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L228-L308 |
bjodah/pycompilation | pycompilation/compilation.py | simple_cythonize | def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile | python | def simple_cythonize(src, destdir=None, cwd=None, logger=None,
full_module_name=None, only_update=False,
**cy_kwargs):
"""
Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if only_update:
if not missing_or_other_newer(dstfile, src, cwd=cwd):
msg = '{0} newer than {1}, did not re-cythonize.'.format(
dstfile, src)
if logger:
logger.info(msg)
else:
print(msg)
return dstfile
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
if logger:
logger.info("Cythonizing {0} to {1}".format(
src, dstfile))
cy_result = cy_compile([src], cy_options, full_module_name=full_module_name)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(
src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name),
destdir)
finally:
os.chdir(ori_dir)
return dstfile | [
"def",
"simple_cythonize",
"(",
"src",
",",
"destdir",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"full_module_name",
"=",
"None",
",",
"only_update",
"=",
"False",
",",
"*",
"*",
"cy_kwargs",
")",
":",
"from",
"Cython",
".",
"Compiler",
".",
"Main",
"import",
"(",
"default_options",
",",
"CompilationOptions",
")",
"from",
"Cython",
".",
"Compiler",
".",
"Main",
"import",
"compile",
"as",
"cy_compile",
"assert",
"src",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.pyx'",
")",
"or",
"src",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.py'",
")",
"cwd",
"=",
"cwd",
"or",
"'.'",
"destdir",
"=",
"destdir",
"or",
"'.'",
"ext",
"=",
"'.cpp'",
"if",
"cy_kwargs",
".",
"get",
"(",
"'cplus'",
",",
"False",
")",
"else",
"'.c'",
"c_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
"[",
"0",
"]",
"+",
"ext",
"dstfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destdir",
",",
"c_name",
")",
"if",
"only_update",
":",
"if",
"not",
"missing_or_other_newer",
"(",
"dstfile",
",",
"src",
",",
"cwd",
"=",
"cwd",
")",
":",
"msg",
"=",
"'{0} newer than {1}, did not re-cythonize.'",
".",
"format",
"(",
"dstfile",
",",
"src",
")",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"msg",
")",
"return",
"dstfile",
"if",
"cwd",
":",
"ori_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"else",
":",
"ori_dir",
"=",
"'.'",
"os",
".",
"chdir",
"(",
"cwd",
")",
"try",
":",
"cy_options",
"=",
"CompilationOptions",
"(",
"default_options",
")",
"cy_options",
".",
"__dict__",
".",
"update",
"(",
"cy_kwargs",
")",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"\"Cythonizing {0} to {1}\"",
".",
"format",
"(",
"src",
",",
"dstfile",
")",
")",
"cy_result",
"=",
"cy_compile",
"(",
"[",
"src",
"]",
",",
"cy_options",
",",
"full_module_name",
"=",
"full_module_name",
")",
"if",
"cy_result",
".",
"num_errors",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cython compilation failed.\"",
")",
"if",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"src",
")",
")",
"!=",
"os",
".",
"path",
".",
"abspath",
"(",
"destdir",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dstfile",
")",
":",
"os",
".",
"unlink",
"(",
"dstfile",
")",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"src",
")",
",",
"c_name",
")",
",",
"destdir",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"ori_dir",
")",
"return",
"dstfile"
] | Generates a C file from a Cython source file.
Parameters
----------
src: path string
path to Cython source
destdir: path string (optional)
Path to output directory (default: '.')
cwd: path string (optional)
Root of relative paths (default: '.')
logger: logging.Logger
info level used.
full_module_name: string
passed to cy_compile (default: None)
only_update: bool
Only cythonize if source is newer. default: False
**cy_kwargs:
second argument passed to cy_compile.
Generates a .cpp file if cplus=True in cy_kwargs, else a .c file. | [
"Generates",
"a",
"C",
"file",
"from",
"a",
"Cython",
"source",
"file",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L311-L381 |
bjodah/pycompilation | pycompilation/compilation.py | src2obj | def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath | python | def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath | [
"def",
"src2obj",
"(",
"srcpath",
",",
"CompilerRunner_",
"=",
"None",
",",
"objpath",
"=",
"None",
",",
"only_update",
"=",
"False",
",",
"cwd",
"=",
"None",
",",
"out_ext",
"=",
"None",
",",
"inc_py",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"srcpath",
")",
")",
"if",
"objpath",
"is",
"None",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"srcpath",
")",
":",
"objpath",
"=",
"'.'",
"else",
":",
"objpath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"srcpath",
")",
"objpath",
"=",
"objpath",
"or",
"'.'",
"# avoid objpath == ''",
"out_ext",
"=",
"out_ext",
"or",
"objext",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"objpath",
")",
":",
"objpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"objpath",
",",
"name",
"+",
"out_ext",
")",
"include_dirs",
"=",
"kwargs",
".",
"pop",
"(",
"'include_dirs'",
",",
"[",
"]",
")",
"if",
"inc_py",
":",
"from",
"distutils",
".",
"sysconfig",
"import",
"get_python_inc",
"py_inc_dir",
"=",
"get_python_inc",
"(",
")",
"if",
"py_inc_dir",
"not",
"in",
"include_dirs",
":",
"include_dirs",
".",
"append",
"(",
"py_inc_dir",
")",
"if",
"ext",
".",
"lower",
"(",
")",
"==",
"'.pyx'",
":",
"return",
"pyx2obj",
"(",
"srcpath",
",",
"objpath",
"=",
"objpath",
",",
"include_dirs",
"=",
"include_dirs",
",",
"cwd",
"=",
"cwd",
",",
"only_update",
"=",
"only_update",
",",
"*",
"*",
"kwargs",
")",
"if",
"CompilerRunner_",
"is",
"None",
":",
"CompilerRunner_",
",",
"std",
"=",
"extension_mapping",
"[",
"ext",
".",
"lower",
"(",
")",
"]",
"if",
"'std'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'std'",
"]",
"=",
"std",
"# src2obj implies not running the linker...",
"run_linker",
"=",
"kwargs",
".",
"pop",
"(",
"'run_linker'",
",",
"False",
")",
"if",
"run_linker",
":",
"raise",
"CompilationError",
"(",
"\"src2obj called with run_linker=True\"",
")",
"if",
"only_update",
":",
"if",
"not",
"missing_or_other_newer",
"(",
"objpath",
",",
"srcpath",
",",
"cwd",
"=",
"cwd",
")",
":",
"msg",
"=",
"\"Found {0}, did not recompile.\"",
".",
"format",
"(",
"objpath",
")",
"if",
"kwargs",
".",
"get",
"(",
"'logger'",
",",
"None",
")",
":",
"kwargs",
"[",
"'logger'",
"]",
".",
"info",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"msg",
")",
"return",
"objpath",
"runner",
"=",
"CompilerRunner_",
"(",
"[",
"srcpath",
"]",
",",
"objpath",
",",
"include_dirs",
"=",
"include_dirs",
",",
"run_linker",
"=",
"run_linker",
",",
"cwd",
"=",
"cwd",
",",
"*",
"*",
"kwargs",
")",
"runner",
".",
"run",
"(",
")",
"return",
"objpath"
] | Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj | [
"Compiles",
"a",
"source",
"code",
"file",
"to",
"an",
"object",
"file",
".",
"Files",
"ending",
"with",
".",
"pyx",
"assumed",
"to",
"be",
"cython",
"files",
"and",
"are",
"dispatched",
"to",
"pyx2obj",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L398-L471 |
bjodah/pycompilation | pycompilation/compilation.py | pyx2obj | def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs) | python | def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None,
logger=None, full_module_name=None, only_update=False,
metadir=None, include_numpy=False, include_dirs=None,
cy_kwargs=None, gdb=False, cplus=None, **kwargs):
"""
Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file.
"""
assert pyxpath.endswith('.pyx')
cwd = cwd or '.'
objpath = objpath or '.'
interm_c_dir = interm_c_dir or os.path.dirname(objpath)
abs_objpath = get_abspath(objpath, cwd=cwd)
if os.path.isdir(abs_objpath):
pyx_fname = os.path.basename(pyxpath)
name, ext = os.path.splitext(pyx_fname)
objpath = os.path.join(objpath, name+objext)
cy_kwargs = cy_kwargs or {}
cy_kwargs['output_dir'] = cwd
if cplus is None:
cplus = pyx_is_cplus(pyxpath)
cy_kwargs['cplus'] = cplus
if gdb:
cy_kwargs['gdb_debug'] = True
if include_dirs:
cy_kwargs['include_path'] = include_dirs
interm_c_file = simple_cythonize(
pyxpath, destdir=interm_c_dir,
cwd=cwd, logger=logger,
full_module_name=full_module_name,
only_update=only_update, **cy_kwargs)
include_dirs = include_dirs or []
if include_numpy:
import numpy
numpy_inc_dir = numpy.get_include()
if numpy_inc_dir not in include_dirs:
include_dirs.append(numpy_inc_dir)
flags = kwargs.pop('flags', [])
needed_flags = ('-fwrapv', '-pthread')
if not cplus:
needed_flags += ('-Wstrict-prototypes',) # not really needed..
for flag in needed_flags:
if flag not in flags:
flags.append(flag)
options = kwargs.pop('options', [])
if kwargs.pop('strict_aliasing', False):
raise CompilationError("Cython req. strict aliasing to be disabled.")
if 'pic' not in options:
options.append('pic')
if 'warn' not in options:
options.append('warn')
# Let's be explicit about standard
if cplus:
std = kwargs.pop('std', 'c++98')
else:
std = kwargs.pop('std', 'c99')
return src2obj(
interm_c_file,
objpath=objpath,
cwd=cwd,
only_update=only_update,
metadir=metadir,
include_dirs=include_dirs,
flags=flags,
std=std,
options=options,
logger=logger,
inc_py=True,
strict_aliasing=False,
**kwargs) | [
"def",
"pyx2obj",
"(",
"pyxpath",
",",
"objpath",
"=",
"None",
",",
"interm_c_dir",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"full_module_name",
"=",
"None",
",",
"only_update",
"=",
"False",
",",
"metadir",
"=",
"None",
",",
"include_numpy",
"=",
"False",
",",
"include_dirs",
"=",
"None",
",",
"cy_kwargs",
"=",
"None",
",",
"gdb",
"=",
"False",
",",
"cplus",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"pyxpath",
".",
"endswith",
"(",
"'.pyx'",
")",
"cwd",
"=",
"cwd",
"or",
"'.'",
"objpath",
"=",
"objpath",
"or",
"'.'",
"interm_c_dir",
"=",
"interm_c_dir",
"or",
"os",
".",
"path",
".",
"dirname",
"(",
"objpath",
")",
"abs_objpath",
"=",
"get_abspath",
"(",
"objpath",
",",
"cwd",
"=",
"cwd",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"abs_objpath",
")",
":",
"pyx_fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"pyxpath",
")",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"pyx_fname",
")",
"objpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"objpath",
",",
"name",
"+",
"objext",
")",
"cy_kwargs",
"=",
"cy_kwargs",
"or",
"{",
"}",
"cy_kwargs",
"[",
"'output_dir'",
"]",
"=",
"cwd",
"if",
"cplus",
"is",
"None",
":",
"cplus",
"=",
"pyx_is_cplus",
"(",
"pyxpath",
")",
"cy_kwargs",
"[",
"'cplus'",
"]",
"=",
"cplus",
"if",
"gdb",
":",
"cy_kwargs",
"[",
"'gdb_debug'",
"]",
"=",
"True",
"if",
"include_dirs",
":",
"cy_kwargs",
"[",
"'include_path'",
"]",
"=",
"include_dirs",
"interm_c_file",
"=",
"simple_cythonize",
"(",
"pyxpath",
",",
"destdir",
"=",
"interm_c_dir",
",",
"cwd",
"=",
"cwd",
",",
"logger",
"=",
"logger",
",",
"full_module_name",
"=",
"full_module_name",
",",
"only_update",
"=",
"only_update",
",",
"*",
"*",
"cy_kwargs",
")",
"include_dirs",
"=",
"include_dirs",
"or",
"[",
"]",
"if",
"include_numpy",
":",
"import",
"numpy",
"numpy_inc_dir",
"=",
"numpy",
".",
"get_include",
"(",
")",
"if",
"numpy_inc_dir",
"not",
"in",
"include_dirs",
":",
"include_dirs",
".",
"append",
"(",
"numpy_inc_dir",
")",
"flags",
"=",
"kwargs",
".",
"pop",
"(",
"'flags'",
",",
"[",
"]",
")",
"needed_flags",
"=",
"(",
"'-fwrapv'",
",",
"'-pthread'",
")",
"if",
"not",
"cplus",
":",
"needed_flags",
"+=",
"(",
"'-Wstrict-prototypes'",
",",
")",
"# not really needed..",
"for",
"flag",
"in",
"needed_flags",
":",
"if",
"flag",
"not",
"in",
"flags",
":",
"flags",
".",
"append",
"(",
"flag",
")",
"options",
"=",
"kwargs",
".",
"pop",
"(",
"'options'",
",",
"[",
"]",
")",
"if",
"kwargs",
".",
"pop",
"(",
"'strict_aliasing'",
",",
"False",
")",
":",
"raise",
"CompilationError",
"(",
"\"Cython req. strict aliasing to be disabled.\"",
")",
"if",
"'pic'",
"not",
"in",
"options",
":",
"options",
".",
"append",
"(",
"'pic'",
")",
"if",
"'warn'",
"not",
"in",
"options",
":",
"options",
".",
"append",
"(",
"'warn'",
")",
"# Let's be explicit about standard",
"if",
"cplus",
":",
"std",
"=",
"kwargs",
".",
"pop",
"(",
"'std'",
",",
"'c++98'",
")",
"else",
":",
"std",
"=",
"kwargs",
".",
"pop",
"(",
"'std'",
",",
"'c99'",
")",
"return",
"src2obj",
"(",
"interm_c_file",
",",
"objpath",
"=",
"objpath",
",",
"cwd",
"=",
"cwd",
",",
"only_update",
"=",
"only_update",
",",
"metadir",
"=",
"metadir",
",",
"include_dirs",
"=",
"include_dirs",
",",
"flags",
"=",
"flags",
",",
"std",
"=",
"std",
",",
"options",
"=",
"options",
",",
"logger",
"=",
"logger",
",",
"inc_py",
"=",
"True",
",",
"strict_aliasing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")"
] | Convenience function
If cwd is specified, pyxpath and dst are taken to be relative
If only_update is set to `True` the modification time is checked
and compilation is only run if the source is newer than the
destination
Parameters
----------
pyxpath: path string
path to Cython source file
objpath: path string (optional)
path to object file to generate
interm_c_dir: path string (optional)
directory to put generated C file.
cwd: path string (optional)
working directory and root of relative paths
logger: logging.Logger (optional)
passed onto `simple_cythonize` and `src2obj`
full_module_name: string (optional)
passed onto `simple_cythonize`
only_update: bool (optional)
passed onto `simple_cythonize` and `src2obj`
metadir: path string (optional)
passed onto src2obj
include_numpy: bool (optional)
Add numpy include directory to include_dirs. default: False
include_dirs: iterable of path strings (optional)
Passed onto src2obj and via cy_kwargs['include_path']
to simple_cythonize.
cy_kwargs: dict (optional)
keyword arguments passed onto `simple_cythonize`
gdb: bool (optional)
convenience: cy_kwargs['gdb_debug'] is set True if gdb=True,
default: False
cplus: bool (optional)
Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus`
**kwargs: dict
keyword arguments passed onto src2obj
Returns
-------
Absolute path of generated object file. | [
"Convenience",
"function"
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L474-L596 |
bjodah/pycompilation | pycompilation/compilation.py | compile_link_import_py_ext | def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod | python | def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
"""
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
"""
build_dir = build_dir or '.'
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod | [
"def",
"compile_link_import_py_ext",
"(",
"srcs",
",",
"extname",
"=",
"None",
",",
"build_dir",
"=",
"None",
",",
"compile_kwargs",
"=",
"None",
",",
"link_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"build_dir",
"=",
"build_dir",
"or",
"'.'",
"if",
"extname",
"is",
"None",
":",
"extname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"srcs",
"[",
"-",
"1",
"]",
")",
")",
"[",
"0",
"]",
"compile_kwargs",
"=",
"compile_kwargs",
"or",
"{",
"}",
"compile_kwargs",
".",
"update",
"(",
"kwargs",
")",
"link_kwargs",
"=",
"link_kwargs",
"or",
"{",
"}",
"link_kwargs",
".",
"update",
"(",
"kwargs",
")",
"try",
":",
"mod",
"=",
"import_module_from_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"build_dir",
",",
"extname",
")",
",",
"srcs",
")",
"except",
"ImportError",
":",
"objs",
"=",
"compile_sources",
"(",
"list",
"(",
"map",
"(",
"get_abspath",
",",
"srcs",
")",
")",
",",
"destdir",
"=",
"build_dir",
",",
"cwd",
"=",
"build_dir",
",",
"*",
"*",
"compile_kwargs",
")",
"so",
"=",
"link_py_so",
"(",
"objs",
",",
"cwd",
"=",
"build_dir",
",",
"fort",
"=",
"any_fort",
"(",
"srcs",
")",
",",
"cplus",
"=",
"any_cplus",
"(",
"srcs",
")",
",",
"*",
"*",
"link_kwargs",
")",
"mod",
"=",
"import_module_from_file",
"(",
"so",
")",
"return",
"mod"
] | Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP | [
"Compiles",
"sources",
"in",
"srcs",
"to",
"a",
"shared",
"object",
"(",
"python",
"extension",
")",
"which",
"is",
"imported",
".",
"If",
"shared",
"object",
"is",
"newer",
"than",
"the",
"sources",
"they",
"are",
"not",
"recompiled",
"but",
"instead",
"it",
"is",
"imported",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L617-L673 |
bjodah/pycompilation | pycompilation/compilation.py | compile_link_import_strings | def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs) | python | def compile_link_import_strings(codes, build_dir=None, **kwargs):
"""
Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext`
"""
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get('logger', False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs['logger'] = logging.getLogger()
only_update = kwargs.get('only_update', True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode('utf-8')).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+'.md5'):
md5_on_disk = open(dest+'.md5', 'rt').read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, 'wt') as fh:
fh.write(code_)
open(dest+'.md5', 'wt').write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs) | [
"def",
"compile_link_import_strings",
"(",
"codes",
",",
"build_dir",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"build_dir",
"=",
"build_dir",
"or",
"tempfile",
".",
"mkdtemp",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"build_dir",
")",
":",
"raise",
"OSError",
"(",
"\"Non-existent directory: \"",
",",
"build_dir",
")",
"source_files",
"=",
"[",
"]",
"if",
"kwargs",
".",
"get",
"(",
"'logger'",
",",
"False",
")",
"is",
"True",
":",
"import",
"logging",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"kwargs",
"[",
"'logger'",
"]",
"=",
"logging",
".",
"getLogger",
"(",
")",
"only_update",
"=",
"kwargs",
".",
"get",
"(",
"'only_update'",
",",
"True",
")",
"for",
"name",
",",
"code_",
"in",
"codes",
":",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"build_dir",
",",
"name",
")",
"differs",
"=",
"True",
"md5_in_mem",
"=",
"md5_of_string",
"(",
"code_",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"if",
"only_update",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
"+",
"'.md5'",
")",
":",
"md5_on_disk",
"=",
"open",
"(",
"dest",
"+",
"'.md5'",
",",
"'rt'",
")",
".",
"read",
"(",
")",
"else",
":",
"md5_on_disk",
"=",
"md5_of_file",
"(",
"dest",
")",
".",
"hexdigest",
"(",
")",
"differs",
"=",
"md5_on_disk",
"!=",
"md5_in_mem",
"if",
"not",
"only_update",
"or",
"differs",
":",
"with",
"open",
"(",
"dest",
",",
"'wt'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"code_",
")",
"open",
"(",
"dest",
"+",
"'.md5'",
",",
"'wt'",
")",
".",
"write",
"(",
"md5_in_mem",
")",
"source_files",
".",
"append",
"(",
"dest",
")",
"return",
"compile_link_import_py_ext",
"(",
"source_files",
",",
"build_dir",
"=",
"build_dir",
",",
"*",
"*",
"kwargs",
")"
] | Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext` | [
"Creates",
"a",
"temporary",
"directory",
"and",
"dumps",
"compiles",
"and",
"links",
"provided",
"source",
"code",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L676-L717 |
reclosedev/lathermail | lathermail/storage/mongo.py | switch_db | def switch_db(name):
""" Hack to switch Flask-Pymongo db
:param name: db name
"""
with app.app_context():
app.extensions['pymongo'][mongo.config_prefix] = mongo.cx, mongo.cx[name] | python | def switch_db(name):
""" Hack to switch Flask-Pymongo db
:param name: db name
"""
with app.app_context():
app.extensions['pymongo'][mongo.config_prefix] = mongo.cx, mongo.cx[name] | [
"def",
"switch_db",
"(",
"name",
")",
":",
"with",
"app",
".",
"app_context",
"(",
")",
":",
"app",
".",
"extensions",
"[",
"'pymongo'",
"]",
"[",
"mongo",
".",
"config_prefix",
"]",
"=",
"mongo",
".",
"cx",
",",
"mongo",
".",
"cx",
"[",
"name",
"]"
] | Hack to switch Flask-Pymongo db
:param name: db name | [
"Hack",
"to",
"switch",
"Flask",
"-",
"Pymongo",
"db",
":",
"param",
"name",
":",
"db",
"name"
] | train | https://github.com/reclosedev/lathermail/blob/be006b4e4082002db31afea125c58345de1cd606/lathermail/storage/mongo.py#L22-L27 |
BlueBrain/hpcbench | hpcbench/benchmark/osu.py | OSU.arguments | def arguments(self):
"""Dictionary providing the list of arguments for every
benchmark"""
if 'arguments' in self.attributes:
LOGGER.warning(
"WARNING: 'arguments' use in OSU yaml configuration file is deprecated. Please use 'options'!"
)
arguments = self.attributes['arguments']
if isinstance(arguments, dict):
return arguments
else:
return {k: arguments for k in self.categories}
elif 'options' in self.attributes:
options = self.attributes['options']
if isinstance(options, dict):
return options
else:
return {k: options for k in self.categories} | python | def arguments(self):
"""Dictionary providing the list of arguments for every
benchmark"""
if 'arguments' in self.attributes:
LOGGER.warning(
"WARNING: 'arguments' use in OSU yaml configuration file is deprecated. Please use 'options'!"
)
arguments = self.attributes['arguments']
if isinstance(arguments, dict):
return arguments
else:
return {k: arguments for k in self.categories}
elif 'options' in self.attributes:
options = self.attributes['options']
if isinstance(options, dict):
return options
else:
return {k: options for k in self.categories} | [
"def",
"arguments",
"(",
"self",
")",
":",
"if",
"'arguments'",
"in",
"self",
".",
"attributes",
":",
"LOGGER",
".",
"warning",
"(",
"\"WARNING: 'arguments' use in OSU yaml configuration file is deprecated. Please use 'options'!\"",
")",
"arguments",
"=",
"self",
".",
"attributes",
"[",
"'arguments'",
"]",
"if",
"isinstance",
"(",
"arguments",
",",
"dict",
")",
":",
"return",
"arguments",
"else",
":",
"return",
"{",
"k",
":",
"arguments",
"for",
"k",
"in",
"self",
".",
"categories",
"}",
"elif",
"'options'",
"in",
"self",
".",
"attributes",
":",
"options",
"=",
"self",
".",
"attributes",
"[",
"'options'",
"]",
"if",
"isinstance",
"(",
"options",
",",
"dict",
")",
":",
"return",
"options",
"else",
":",
"return",
"{",
"k",
":",
"options",
"for",
"k",
"in",
"self",
".",
"categories",
"}"
] | Dictionary providing the list of arguments for every
benchmark | [
"Dictionary",
"providing",
"the",
"list",
"of",
"arguments",
"for",
"every",
"benchmark"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/osu.py#L289-L306 |
portfoliome/foil | foil/serializers.py | _ | def _(obj):
"""ISO 8601 format. Interprets naive datetime as UTC with zulu suffix."""
tz_offset = obj.utcoffset()
if not tz_offset or tz_offset == UTC_ZERO:
iso_datetime = obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
iso_datetime = obj.isoformat()
return iso_datetime | python | def _(obj):
"""ISO 8601 format. Interprets naive datetime as UTC with zulu suffix."""
tz_offset = obj.utcoffset()
if not tz_offset or tz_offset == UTC_ZERO:
iso_datetime = obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
else:
iso_datetime = obj.isoformat()
return iso_datetime | [
"def",
"_",
"(",
"obj",
")",
":",
"tz_offset",
"=",
"obj",
".",
"utcoffset",
"(",
")",
"if",
"not",
"tz_offset",
"or",
"tz_offset",
"==",
"UTC_ZERO",
":",
"iso_datetime",
"=",
"obj",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S.%fZ'",
")",
"else",
":",
"iso_datetime",
"=",
"obj",
".",
"isoformat",
"(",
")",
"return",
"iso_datetime"
] | ISO 8601 format. Interprets naive datetime as UTC with zulu suffix. | [
"ISO",
"8601",
"format",
".",
"Interprets",
"naive",
"datetime",
"as",
"UTC",
"with",
"zulu",
"suffix",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/serializers.py#L26-L36 |
Metatab/metatab | metatab/resolver.py | WebResolver.get_row_generator | def get_row_generator(self, ref, cache=None):
"""Return a row generator for a reference"""
from inspect import isgenerator
from rowgenerators import get_generator
g = get_generator(ref)
if not g:
raise GenerateError("Cant figure out how to generate rows from {} ref: {}".format(type(ref), ref))
else:
return g | python | def get_row_generator(self, ref, cache=None):
"""Return a row generator for a reference"""
from inspect import isgenerator
from rowgenerators import get_generator
g = get_generator(ref)
if not g:
raise GenerateError("Cant figure out how to generate rows from {} ref: {}".format(type(ref), ref))
else:
return g | [
"def",
"get_row_generator",
"(",
"self",
",",
"ref",
",",
"cache",
"=",
"None",
")",
":",
"from",
"inspect",
"import",
"isgenerator",
"from",
"rowgenerators",
"import",
"get_generator",
"g",
"=",
"get_generator",
"(",
"ref",
")",
"if",
"not",
"g",
":",
"raise",
"GenerateError",
"(",
"\"Cant figure out how to generate rows from {} ref: {}\"",
".",
"format",
"(",
"type",
"(",
"ref",
")",
",",
"ref",
")",
")",
"else",
":",
"return",
"g"
] | Return a row generator for a reference | [
"Return",
"a",
"row",
"generator",
"for",
"a",
"reference"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/resolver.py#L34-L45 |
portfoliome/foil | foil/filters.py | create_key_filter | def create_key_filter(properties: Dict[str, list]) -> List[Tuple]:
"""Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')]
"""
combinations = (product([k], v) for k, v in properties.items())
return chain.from_iterable(combinations) | python | def create_key_filter(properties: Dict[str, list]) -> List[Tuple]:
"""Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')]
"""
combinations = (product([k], v) for k, v in properties.items())
return chain.from_iterable(combinations) | [
"def",
"create_key_filter",
"(",
"properties",
":",
"Dict",
"[",
"str",
",",
"list",
"]",
")",
"->",
"List",
"[",
"Tuple",
"]",
":",
"combinations",
"=",
"(",
"product",
"(",
"[",
"k",
"]",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"properties",
".",
"items",
"(",
")",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"combinations",
")"
] | Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')] | [
"Generate",
"combinations",
"of",
"key",
"value",
"pairs",
"for",
"each",
"key",
"in",
"properties",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/filters.py#L38-L50 |
portfoliome/foil | foil/filters.py | create_indexer | def create_indexer(indexes: list):
"""Create indexer function to pluck values from list."""
if len(indexes) == 1:
index = indexes[0]
return lambda x: (x[index],)
else:
return itemgetter(*indexes) | python | def create_indexer(indexes: list):
"""Create indexer function to pluck values from list."""
if len(indexes) == 1:
index = indexes[0]
return lambda x: (x[index],)
else:
return itemgetter(*indexes) | [
"def",
"create_indexer",
"(",
"indexes",
":",
"list",
")",
":",
"if",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"return",
"lambda",
"x",
":",
"(",
"x",
"[",
"index",
"]",
",",
")",
"else",
":",
"return",
"itemgetter",
"(",
"*",
"indexes",
")"
] | Create indexer function to pluck values from list. | [
"Create",
"indexer",
"function",
"to",
"pluck",
"values",
"from",
"list",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/filters.py#L53-L60 |
portfoliome/foil | foil/filters.py | AttributeFilter.including | def including(self, sequence) -> Generator:
"""Include the sequence elements matching the filter set."""
return (element for element in sequence
if self.indexer(element) in self.predicates) | python | def including(self, sequence) -> Generator:
"""Include the sequence elements matching the filter set."""
return (element for element in sequence
if self.indexer(element) in self.predicates) | [
"def",
"including",
"(",
"self",
",",
"sequence",
")",
"->",
"Generator",
":",
"return",
"(",
"element",
"for",
"element",
"in",
"sequence",
"if",
"self",
".",
"indexer",
"(",
"element",
")",
"in",
"self",
".",
"predicates",
")"
] | Include the sequence elements matching the filter set. | [
"Include",
"the",
"sequence",
"elements",
"matching",
"the",
"filter",
"set",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/filters.py#L27-L30 |
portfoliome/foil | foil/filters.py | AttributeFilter.excluding | def excluding(self, sequence) -> Generator:
"""Exclude the sequence elements matching the filter set."""
return (element for element in sequence
if self.indexer(element) not in self.predicates) | python | def excluding(self, sequence) -> Generator:
"""Exclude the sequence elements matching the filter set."""
return (element for element in sequence
if self.indexer(element) not in self.predicates) | [
"def",
"excluding",
"(",
"self",
",",
"sequence",
")",
"->",
"Generator",
":",
"return",
"(",
"element",
"for",
"element",
"in",
"sequence",
"if",
"self",
".",
"indexer",
"(",
"element",
")",
"not",
"in",
"self",
".",
"predicates",
")"
] | Exclude the sequence elements matching the filter set. | [
"Exclude",
"the",
"sequence",
"elements",
"matching",
"the",
"filter",
"set",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/filters.py#L32-L35 |
bruziev/security_interface | security_interface/api.py | Security.can | async def can(self, identity, permission) -> bool:
"""
Check user permissions.
:return: ``True`` if the identity is allowed the permission, else return ``False``.
"""
assert isinstance(permission, (str, enum.Enum)), permission
assert permission
identify = await self.identity_policy.identify(identity)
# non-registered user still may has some permissions
access = await self.autz_policy.can(identify, permission)
return access | python | async def can(self, identity, permission) -> bool:
"""
Check user permissions.
:return: ``True`` if the identity is allowed the permission, else return ``False``.
"""
assert isinstance(permission, (str, enum.Enum)), permission
assert permission
identify = await self.identity_policy.identify(identity)
# non-registered user still may has some permissions
access = await self.autz_policy.can(identify, permission)
return access | [
"async",
"def",
"can",
"(",
"self",
",",
"identity",
",",
"permission",
")",
"->",
"bool",
":",
"assert",
"isinstance",
"(",
"permission",
",",
"(",
"str",
",",
"enum",
".",
"Enum",
")",
")",
",",
"permission",
"assert",
"permission",
"identify",
"=",
"await",
"self",
".",
"identity_policy",
".",
"identify",
"(",
"identity",
")",
"# non-registered user still may has some permissions",
"access",
"=",
"await",
"self",
".",
"autz_policy",
".",
"can",
"(",
"identify",
",",
"permission",
")",
"return",
"access"
] | Check user permissions.
:return: ``True`` if the identity is allowed the permission, else return ``False``. | [
"Check",
"user",
"permissions",
"."
] | train | https://github.com/bruziev/security_interface/blob/ec1f30c8ac051291694b0099caa0a7fde97ddfe6/security_interface/api.py#L25-L36 |
bruziev/security_interface | security_interface/api.py | Security.check_authorized | async def check_authorized(self, identity):
"""
Works like :func:`Security.identity`, but when check is failed
:func:`UnauthorizedError` exception is raised.
:param identity: Claim
:return: Checked claim or return ``None``
:raise: :func:`UnauthorizedError`
"""
identify = await self.identify(identity)
if identify is None:
raise UnauthorizedError()
return identify | python | async def check_authorized(self, identity):
"""
Works like :func:`Security.identity`, but when check is failed
:func:`UnauthorizedError` exception is raised.
:param identity: Claim
:return: Checked claim or return ``None``
:raise: :func:`UnauthorizedError`
"""
identify = await self.identify(identity)
if identify is None:
raise UnauthorizedError()
return identify | [
"async",
"def",
"check_authorized",
"(",
"self",
",",
"identity",
")",
":",
"identify",
"=",
"await",
"self",
".",
"identify",
"(",
"identity",
")",
"if",
"identify",
"is",
"None",
":",
"raise",
"UnauthorizedError",
"(",
")",
"return",
"identify"
] | Works like :func:`Security.identity`, but when check is failed
:func:`UnauthorizedError` exception is raised.
:param identity: Claim
:return: Checked claim or return ``None``
:raise: :func:`UnauthorizedError` | [
"Works",
"like",
":",
"func",
":",
"Security",
".",
"identity",
"but",
"when",
"check",
"is",
"failed",
":",
"func",
":",
"UnauthorizedError",
"exception",
"is",
"raised",
"."
] | train | https://github.com/bruziev/security_interface/blob/ec1f30c8ac051291694b0099caa0a7fde97ddfe6/security_interface/api.py#L49-L61 |
bruziev/security_interface | security_interface/api.py | Security.check_permission | async def check_permission(self, identity, permission):
"""
Works like :func:`Security.can`, but when check is failed
:func:`ForbiddenError` exception is raised.
:param identity: Claim
:param permission: Permission
:return: Checked claim
:raise: :func:`ForbiddenError`
"""
await self.check_authorized(identity)
allowed = await self.can(identity, permission)
if not allowed:
raise ForbiddenError() | python | async def check_permission(self, identity, permission):
"""
Works like :func:`Security.can`, but when check is failed
:func:`ForbiddenError` exception is raised.
:param identity: Claim
:param permission: Permission
:return: Checked claim
:raise: :func:`ForbiddenError`
"""
await self.check_authorized(identity)
allowed = await self.can(identity, permission)
if not allowed:
raise ForbiddenError() | [
"async",
"def",
"check_permission",
"(",
"self",
",",
"identity",
",",
"permission",
")",
":",
"await",
"self",
".",
"check_authorized",
"(",
"identity",
")",
"allowed",
"=",
"await",
"self",
".",
"can",
"(",
"identity",
",",
"permission",
")",
"if",
"not",
"allowed",
":",
"raise",
"ForbiddenError",
"(",
")"
] | Works like :func:`Security.can`, but when check is failed
:func:`ForbiddenError` exception is raised.
:param identity: Claim
:param permission: Permission
:return: Checked claim
:raise: :func:`ForbiddenError` | [
"Works",
"like",
":",
"func",
":",
"Security",
".",
"can",
"but",
"when",
"check",
"is",
"failed",
":",
"func",
":",
"ForbiddenError",
"exception",
"is",
"raised",
"."
] | train | https://github.com/bruziev/security_interface/blob/ec1f30c8ac051291694b0099caa0a7fde97ddfe6/security_interface/api.py#L63-L76 |
jfear/sramongo | sramongo/sra2mongo.py | arguments | def arguments():
"""Pulls in command line arguments."""
DESCRIPTION = """\
"""
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=Raw)
parser.add_argument("--email", dest="email", action='store', required=False, default=False,
help="An email address is required for querying Entrez databases.")
parser.add_argument("--api", dest="api_key", action='store', required=False, default=False,
help="A users ENTREZ API Key. Will speed up download.")
parser.add_argument("--query", dest="query", action='store', required=True,
help="Query to submit to Entrez.")
parser.add_argument("--host", dest="host", action='store', required=False, default='localhost',
help="Location of an already running database.")
parser.add_argument("--port", dest="port", action='store', type=int, required=False, default=27017,
help="Mongo database port.")
parser.add_argument("--db", dest="db", action='store', required=False, default='sramongo',
help="Name of the database.")
parser.add_argument("--debug", dest="debug", action='store_true', required=False,
help="Turn on debug output.")
parser.add_argument("--force", dest="force", action='store_true', required=False,
help="Forces clearing the cache.")
args = parser.parse_args()
if not (args.email or args.api_key):
logger.error('You must provide either an `--email` or `--api`.')
sys.exit()
return args | python | def arguments():
"""Pulls in command line arguments."""
DESCRIPTION = """\
"""
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=Raw)
parser.add_argument("--email", dest="email", action='store', required=False, default=False,
help="An email address is required for querying Entrez databases.")
parser.add_argument("--api", dest="api_key", action='store', required=False, default=False,
help="A users ENTREZ API Key. Will speed up download.")
parser.add_argument("--query", dest="query", action='store', required=True,
help="Query to submit to Entrez.")
parser.add_argument("--host", dest="host", action='store', required=False, default='localhost',
help="Location of an already running database.")
parser.add_argument("--port", dest="port", action='store', type=int, required=False, default=27017,
help="Mongo database port.")
parser.add_argument("--db", dest="db", action='store', required=False, default='sramongo',
help="Name of the database.")
parser.add_argument("--debug", dest="debug", action='store_true', required=False,
help="Turn on debug output.")
parser.add_argument("--force", dest="force", action='store_true', required=False,
help="Forces clearing the cache.")
args = parser.parse_args()
if not (args.email or args.api_key):
logger.error('You must provide either an `--email` or `--api`.')
sys.exit()
return args | [
"def",
"arguments",
"(",
")",
":",
"DESCRIPTION",
"=",
"\"\"\"\\\n \"\"\"",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"DESCRIPTION",
",",
"formatter_class",
"=",
"Raw",
")",
"parser",
".",
"add_argument",
"(",
"\"--email\"",
",",
"dest",
"=",
"\"email\"",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"An email address is required for querying Entrez databases.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--api\"",
",",
"dest",
"=",
"\"api_key\"",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"A users ENTREZ API Key. Will speed up download.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--query\"",
",",
"dest",
"=",
"\"query\"",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Query to submit to Entrez.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--host\"",
",",
"dest",
"=",
"\"host\"",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"'localhost'",
",",
"help",
"=",
"\"Location of an already running database.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--port\"",
",",
"dest",
"=",
"\"port\"",
",",
"action",
"=",
"'store'",
",",
"type",
"=",
"int",
",",
"required",
"=",
"False",
",",
"default",
"=",
"27017",
",",
"help",
"=",
"\"Mongo database port.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--db\"",
",",
"dest",
"=",
"\"db\"",
",",
"action",
"=",
"'store'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"'sramongo'",
",",
"help",
"=",
"\"Name of the database.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--debug\"",
",",
"dest",
"=",
"\"debug\"",
",",
"action",
"=",
"'store_true'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"\"Turn on debug output.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--force\"",
",",
"dest",
"=",
"\"force\"",
",",
"action",
"=",
"'store_true'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"\"Forces clearing the cache.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"(",
"args",
".",
"email",
"or",
"args",
".",
"api_key",
")",
":",
"logger",
".",
"error",
"(",
"'You must provide either an `--email` or `--api`.'",
")",
"sys",
".",
"exit",
"(",
")",
"return",
"args"
] | Pulls in command line arguments. | [
"Pulls",
"in",
"command",
"line",
"arguments",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/sra2mongo.py#L24-L62 |
BlueBrain/hpcbench | hpcbench/net/__init__.py | BeNet.run | def run(self, *nodes):
"""Execute benchmarks on every node specified in arguments.
If none are given, then execute benchmarks on every nodes
specified in the ``network.nodes`` campaign configuration.
"""
nodes = nodes or self.nodes
self._prelude(*nodes)
@write_yaml_report
def _run():
self._build_installer()
runner = functools.partial(run_on_host, self.campaign)
if self.campaign.network.max_concurrent_runs > 1:
pool = Pool(self.campaign.network.max_concurrent_runs)
pool.map(runner, nodes)
else:
for node in nodes:
runner(node)
return nodes
with pushd(self.campaign_path):
_run() | python | def run(self, *nodes):
"""Execute benchmarks on every node specified in arguments.
If none are given, then execute benchmarks on every nodes
specified in the ``network.nodes`` campaign configuration.
"""
nodes = nodes or self.nodes
self._prelude(*nodes)
@write_yaml_report
def _run():
self._build_installer()
runner = functools.partial(run_on_host, self.campaign)
if self.campaign.network.max_concurrent_runs > 1:
pool = Pool(self.campaign.network.max_concurrent_runs)
pool.map(runner, nodes)
else:
for node in nodes:
runner(node)
return nodes
with pushd(self.campaign_path):
_run() | [
"def",
"run",
"(",
"self",
",",
"*",
"nodes",
")",
":",
"nodes",
"=",
"nodes",
"or",
"self",
".",
"nodes",
"self",
".",
"_prelude",
"(",
"*",
"nodes",
")",
"@",
"write_yaml_report",
"def",
"_run",
"(",
")",
":",
"self",
".",
"_build_installer",
"(",
")",
"runner",
"=",
"functools",
".",
"partial",
"(",
"run_on_host",
",",
"self",
".",
"campaign",
")",
"if",
"self",
".",
"campaign",
".",
"network",
".",
"max_concurrent_runs",
">",
"1",
":",
"pool",
"=",
"Pool",
"(",
"self",
".",
"campaign",
".",
"network",
".",
"max_concurrent_runs",
")",
"pool",
".",
"map",
"(",
"runner",
",",
"nodes",
")",
"else",
":",
"for",
"node",
"in",
"nodes",
":",
"runner",
"(",
"node",
")",
"return",
"nodes",
"with",
"pushd",
"(",
"self",
".",
"campaign_path",
")",
":",
"_run",
"(",
")"
] | Execute benchmarks on every node specified in arguments.
If none are given, then execute benchmarks on every nodes
specified in the ``network.nodes`` campaign configuration. | [
"Execute",
"benchmarks",
"on",
"every",
"node",
"specified",
"in",
"arguments",
".",
"If",
"none",
"are",
"given",
"then",
"execute",
"benchmarks",
"on",
"every",
"nodes",
"specified",
"in",
"the",
"network",
".",
"nodes",
"campaign",
"configuration",
"."
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/net/__init__.py#L97-L118 |
BlueBrain/hpcbench | hpcbench/net/__init__.py | BeNetHost.run | def run(self):
"""Execute benchmark on the specified node
"""
with self._scp_bensh_runner():
self._execute_bensh_runner()
path = self._retrieve_tarball()
try:
self._aggregate_tarball(path)
finally:
os.remove(path) | python | def run(self):
"""Execute benchmark on the specified node
"""
with self._scp_bensh_runner():
self._execute_bensh_runner()
path = self._retrieve_tarball()
try:
self._aggregate_tarball(path)
finally:
os.remove(path) | [
"def",
"run",
"(",
"self",
")",
":",
"with",
"self",
".",
"_scp_bensh_runner",
"(",
")",
":",
"self",
".",
"_execute_bensh_runner",
"(",
")",
"path",
"=",
"self",
".",
"_retrieve_tarball",
"(",
")",
"try",
":",
"self",
".",
"_aggregate_tarball",
"(",
"path",
")",
"finally",
":",
"os",
".",
"remove",
"(",
"path",
")"
] | Execute benchmark on the specified node | [
"Execute",
"benchmark",
"on",
"the",
"specified",
"node"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/net/__init__.py#L190-L199 |
BlueBrain/hpcbench | hpcbench/benchmark/imb.py | IMB.node_pairing | def node_pairing(self):
"""if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
"""
value = self.attributes['node_pairing']
if value not in IMB.NODE_PAIRING:
msg = 'Unexpected {0} value: got "{1}" but valid values are {2}'
msg = msg.format('node_pairing', value, IMB.NODE_PAIRING)
raise ValueError(msg)
return value | python | def node_pairing(self):
"""if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag.
"""
value = self.attributes['node_pairing']
if value not in IMB.NODE_PAIRING:
msg = 'Unexpected {0} value: got "{1}" but valid values are {2}'
msg = msg.format('node_pairing', value, IMB.NODE_PAIRING)
raise ValueError(msg)
return value | [
"def",
"node_pairing",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"attributes",
"[",
"'node_pairing'",
"]",
"if",
"value",
"not",
"in",
"IMB",
".",
"NODE_PAIRING",
":",
"msg",
"=",
"'Unexpected {0} value: got \"{1}\" but valid values are {2}'",
"msg",
"=",
"msg",
".",
"format",
"(",
"'node_pairing'",
",",
"value",
",",
"IMB",
".",
"NODE_PAIRING",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"value"
] | if "node" then test current node and next one
if "tag", then create tests for every pair of the current tag. | [
"if",
"node",
"then",
"test",
"current",
"node",
"and",
"next",
"one",
"if",
"tag",
"then",
"create",
"tests",
"for",
"every",
"pair",
"of",
"the",
"current",
"tag",
"."
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/imb.py#L252-L261 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | write_makeconfig | def write_makeconfig(_path):
"""
Write a valid gentoo make.conf file to :path:.
Args:
path - The output path of the make.conf
"""
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
rsync_proxy = str(CFG["gentoo"]["rsync_proxy"])
path.mkfile_uchroot(local.path('/') / _path)
with open(_path, 'w') as makeconf:
lines = '''
PORTAGE_USERNAME=root
PORTAGE_GROUPNAME=root
CFLAGS="-O2 -pipe"
CXXFLAGS="${CFLAGS}"
FEATURES="nostrip -xattr"
CHOST="x86_64-pc-linux-gnu"
USE="bindist mmx sse sse2"
PORTDIR="/usr/portage"
DISTDIR="/mnt/distfiles"
PKGDIR="${PORTDIR}/packages"
'''
makeconf.write(lines)
mounts = CFG["container"]["mounts"].value
tmp_dir = str(CFG["tmp_dir"])
mounts.append({"src": tmp_dir, "tgt": "/mnt/distfiles"})
CFG["container"]["mounts"] = mounts
if http_proxy is not None:
http_s = "http_proxy={0}".format(http_proxy)
https_s = "https_proxy={0}".format(http_proxy)
makeconf.write(http_s + "\n")
makeconf.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
makeconf.write(fp_s + "\n")
if rsync_proxy is not None:
rp_s = "RSYNC_PROXY={0}".format(rsync_proxy)
makeconf.write(rp_s + "\n") | python | def write_makeconfig(_path):
"""
Write a valid gentoo make.conf file to :path:.
Args:
path - The output path of the make.conf
"""
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
rsync_proxy = str(CFG["gentoo"]["rsync_proxy"])
path.mkfile_uchroot(local.path('/') / _path)
with open(_path, 'w') as makeconf:
lines = '''
PORTAGE_USERNAME=root
PORTAGE_GROUPNAME=root
CFLAGS="-O2 -pipe"
CXXFLAGS="${CFLAGS}"
FEATURES="nostrip -xattr"
CHOST="x86_64-pc-linux-gnu"
USE="bindist mmx sse sse2"
PORTDIR="/usr/portage"
DISTDIR="/mnt/distfiles"
PKGDIR="${PORTDIR}/packages"
'''
makeconf.write(lines)
mounts = CFG["container"]["mounts"].value
tmp_dir = str(CFG["tmp_dir"])
mounts.append({"src": tmp_dir, "tgt": "/mnt/distfiles"})
CFG["container"]["mounts"] = mounts
if http_proxy is not None:
http_s = "http_proxy={0}".format(http_proxy)
https_s = "https_proxy={0}".format(http_proxy)
makeconf.write(http_s + "\n")
makeconf.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
makeconf.write(fp_s + "\n")
if rsync_proxy is not None:
rp_s = "RSYNC_PROXY={0}".format(rsync_proxy)
makeconf.write(rp_s + "\n") | [
"def",
"write_makeconfig",
"(",
"_path",
")",
":",
"http_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"http_proxy\"",
"]",
")",
"ftp_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"ftp_proxy\"",
"]",
")",
"rsync_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"rsync_proxy\"",
"]",
")",
"path",
".",
"mkfile_uchroot",
"(",
"local",
".",
"path",
"(",
"'/'",
")",
"/",
"_path",
")",
"with",
"open",
"(",
"_path",
",",
"'w'",
")",
"as",
"makeconf",
":",
"lines",
"=",
"'''\nPORTAGE_USERNAME=root\nPORTAGE_GROUPNAME=root\nCFLAGS=\"-O2 -pipe\"\nCXXFLAGS=\"${CFLAGS}\"\nFEATURES=\"nostrip -xattr\"\nCHOST=\"x86_64-pc-linux-gnu\"\nUSE=\"bindist mmx sse sse2\"\nPORTDIR=\"/usr/portage\"\nDISTDIR=\"/mnt/distfiles\"\nPKGDIR=\"${PORTDIR}/packages\"\n'''",
"makeconf",
".",
"write",
"(",
"lines",
")",
"mounts",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"mounts\"",
"]",
".",
"value",
"tmp_dir",
"=",
"str",
"(",
"CFG",
"[",
"\"tmp_dir\"",
"]",
")",
"mounts",
".",
"append",
"(",
"{",
"\"src\"",
":",
"tmp_dir",
",",
"\"tgt\"",
":",
"\"/mnt/distfiles\"",
"}",
")",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"mounts\"",
"]",
"=",
"mounts",
"if",
"http_proxy",
"is",
"not",
"None",
":",
"http_s",
"=",
"\"http_proxy={0}\"",
".",
"format",
"(",
"http_proxy",
")",
"https_s",
"=",
"\"https_proxy={0}\"",
".",
"format",
"(",
"http_proxy",
")",
"makeconf",
".",
"write",
"(",
"http_s",
"+",
"\"\\n\"",
")",
"makeconf",
".",
"write",
"(",
"https_s",
"+",
"\"\\n\"",
")",
"if",
"ftp_proxy",
"is",
"not",
"None",
":",
"fp_s",
"=",
"\"ftp_proxy={0}\"",
".",
"format",
"(",
"ftp_proxy",
")",
"makeconf",
".",
"write",
"(",
"fp_s",
"+",
"\"\\n\"",
")",
"if",
"rsync_proxy",
"is",
"not",
"None",
":",
"rp_s",
"=",
"\"RSYNC_PROXY={0}\"",
".",
"format",
"(",
"rsync_proxy",
")",
"makeconf",
".",
"write",
"(",
"rp_s",
"+",
"\"\\n\"",
")"
] | Write a valid gentoo make.conf file to :path:.
Args:
path - The output path of the make.conf | [
"Write",
"a",
"valid",
"gentoo",
"make",
".",
"conf",
"file",
"to",
":",
"path",
":",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L139-L184 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | write_bashrc | def write_bashrc(_path):
"""
Write a valid gentoo bashrc file to :path:.
Args:
path - The output path of the make.conf
"""
cfg_mounts = CFG["container"]["mounts"].value
cfg_prefix = CFG["container"]["prefixes"].value
path.mkfile_uchroot("/etc/portage/bashrc")
mounts = uchroot.mounts("mnt", cfg_mounts)
p_paths, p_libs = uchroot.env(cfg_prefix)
paths, libs = uchroot.env(mounts)
paths = paths + p_paths
libs = libs + p_libs
with open(_path, 'w') as bashrc:
lines = '''
export PATH="{0}:${{PATH}}"
export LD_LIBRARY_PATH="{1}:${{LD_LIBRARY_PATH}}"
'''.format(path.list_to_path(paths), path.list_to_path(libs))
bashrc.write(lines) | python | def write_bashrc(_path):
"""
Write a valid gentoo bashrc file to :path:.
Args:
path - The output path of the make.conf
"""
cfg_mounts = CFG["container"]["mounts"].value
cfg_prefix = CFG["container"]["prefixes"].value
path.mkfile_uchroot("/etc/portage/bashrc")
mounts = uchroot.mounts("mnt", cfg_mounts)
p_paths, p_libs = uchroot.env(cfg_prefix)
paths, libs = uchroot.env(mounts)
paths = paths + p_paths
libs = libs + p_libs
with open(_path, 'w') as bashrc:
lines = '''
export PATH="{0}:${{PATH}}"
export LD_LIBRARY_PATH="{1}:${{LD_LIBRARY_PATH}}"
'''.format(path.list_to_path(paths), path.list_to_path(libs))
bashrc.write(lines) | [
"def",
"write_bashrc",
"(",
"_path",
")",
":",
"cfg_mounts",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"mounts\"",
"]",
".",
"value",
"cfg_prefix",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"prefixes\"",
"]",
".",
"value",
"path",
".",
"mkfile_uchroot",
"(",
"\"/etc/portage/bashrc\"",
")",
"mounts",
"=",
"uchroot",
".",
"mounts",
"(",
"\"mnt\"",
",",
"cfg_mounts",
")",
"p_paths",
",",
"p_libs",
"=",
"uchroot",
".",
"env",
"(",
"cfg_prefix",
")",
"paths",
",",
"libs",
"=",
"uchroot",
".",
"env",
"(",
"mounts",
")",
"paths",
"=",
"paths",
"+",
"p_paths",
"libs",
"=",
"libs",
"+",
"p_libs",
"with",
"open",
"(",
"_path",
",",
"'w'",
")",
"as",
"bashrc",
":",
"lines",
"=",
"'''\nexport PATH=\"{0}:${{PATH}}\"\nexport LD_LIBRARY_PATH=\"{1}:${{LD_LIBRARY_PATH}}\"\n'''",
".",
"format",
"(",
"path",
".",
"list_to_path",
"(",
"paths",
")",
",",
"path",
".",
"list_to_path",
"(",
"libs",
")",
")",
"bashrc",
".",
"write",
"(",
"lines",
")"
] | Write a valid gentoo bashrc file to :path:.
Args:
path - The output path of the make.conf | [
"Write",
"a",
"valid",
"gentoo",
"bashrc",
"file",
"to",
":",
"path",
":",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L187-L211 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | write_layout | def write_layout(_path):
"""
Write a valid gentoo layout file to :path:.
Args:
path - The output path of the layout.conf
"""
path.mkdir_uchroot("/etc/portage/metadata")
path.mkfile_uchroot("/etc/portage/metadata/layout.conf")
with open(_path, 'w') as layoutconf:
lines = '''masters = gentoo'''
layoutconf.write(lines) | python | def write_layout(_path):
"""
Write a valid gentoo layout file to :path:.
Args:
path - The output path of the layout.conf
"""
path.mkdir_uchroot("/etc/portage/metadata")
path.mkfile_uchroot("/etc/portage/metadata/layout.conf")
with open(_path, 'w') as layoutconf:
lines = '''masters = gentoo'''
layoutconf.write(lines) | [
"def",
"write_layout",
"(",
"_path",
")",
":",
"path",
".",
"mkdir_uchroot",
"(",
"\"/etc/portage/metadata\"",
")",
"path",
".",
"mkfile_uchroot",
"(",
"\"/etc/portage/metadata/layout.conf\"",
")",
"with",
"open",
"(",
"_path",
",",
"'w'",
")",
"as",
"layoutconf",
":",
"lines",
"=",
"'''masters = gentoo'''",
"layoutconf",
".",
"write",
"(",
"lines",
")"
] | Write a valid gentoo layout file to :path:.
Args:
path - The output path of the layout.conf | [
"Write",
"a",
"valid",
"gentoo",
"layout",
"file",
"to",
":",
"path",
":",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L214-L226 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | write_wgetrc | def write_wgetrc(_path):
"""
Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc
"""
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
path.mkfile_uchroot("/etc/wgetrc")
with open(_path, 'w') as wgetrc:
if http_proxy is not None:
http_s = "http_proxy = {0}".format(http_proxy)
https_s = "https_proxy = {0}".format(http_proxy)
wgetrc.write("use_proxy = on\n")
wgetrc.write(http_s + "\n")
wgetrc.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
wgetrc.write(fp_s + "\n") | python | def write_wgetrc(_path):
"""
Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc
"""
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
path.mkfile_uchroot("/etc/wgetrc")
with open(_path, 'w') as wgetrc:
if http_proxy is not None:
http_s = "http_proxy = {0}".format(http_proxy)
https_s = "https_proxy = {0}".format(http_proxy)
wgetrc.write("use_proxy = on\n")
wgetrc.write(http_s + "\n")
wgetrc.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
wgetrc.write(fp_s + "\n") | [
"def",
"write_wgetrc",
"(",
"_path",
")",
":",
"http_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"http_proxy\"",
"]",
")",
"ftp_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"ftp_proxy\"",
"]",
")",
"path",
".",
"mkfile_uchroot",
"(",
"\"/etc/wgetrc\"",
")",
"with",
"open",
"(",
"_path",
",",
"'w'",
")",
"as",
"wgetrc",
":",
"if",
"http_proxy",
"is",
"not",
"None",
":",
"http_s",
"=",
"\"http_proxy = {0}\"",
".",
"format",
"(",
"http_proxy",
")",
"https_s",
"=",
"\"https_proxy = {0}\"",
".",
"format",
"(",
"http_proxy",
")",
"wgetrc",
".",
"write",
"(",
"\"use_proxy = on\\n\"",
")",
"wgetrc",
".",
"write",
"(",
"http_s",
"+",
"\"\\n\"",
")",
"wgetrc",
".",
"write",
"(",
"https_s",
"+",
"\"\\n\"",
")",
"if",
"ftp_proxy",
"is",
"not",
"None",
":",
"fp_s",
"=",
"\"ftp_proxy={0}\"",
".",
"format",
"(",
"ftp_proxy",
")",
"wgetrc",
".",
"write",
"(",
"fp_s",
"+",
"\"\\n\"",
")"
] | Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc | [
"Write",
"a",
"valid",
"gentoo",
"wgetrc",
"file",
"to",
":",
"path",
":",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L229-L250 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | setup_benchbuild | def setup_benchbuild():
"""
Setup benchbuild inside a container.
This will query a for an existing installation of benchbuild and
try to upgrade it to the latest version, if possible.
"""
LOG.debug("Setting up Benchbuild...")
venv_dir = local.path("/benchbuild")
prefixes = CFG["container"]["prefixes"].value
prefixes.append(venv_dir)
CFG["container"]["prefixes"] = prefixes
src_dir = str(CFG["source_dir"])
have_src = src_dir is not None
if have_src:
__mount_source(src_dir)
benchbuild = find_benchbuild()
if benchbuild and not requires_update(benchbuild):
if have_src:
__upgrade_from_source(venv_dir, with_deps=False)
return
setup_virtualenv(venv_dir)
if have_src:
__upgrade_from_source(venv_dir)
else:
__upgrade_from_pip(venv_dir) | python | def setup_benchbuild():
"""
Setup benchbuild inside a container.
This will query a for an existing installation of benchbuild and
try to upgrade it to the latest version, if possible.
"""
LOG.debug("Setting up Benchbuild...")
venv_dir = local.path("/benchbuild")
prefixes = CFG["container"]["prefixes"].value
prefixes.append(venv_dir)
CFG["container"]["prefixes"] = prefixes
src_dir = str(CFG["source_dir"])
have_src = src_dir is not None
if have_src:
__mount_source(src_dir)
benchbuild = find_benchbuild()
if benchbuild and not requires_update(benchbuild):
if have_src:
__upgrade_from_source(venv_dir, with_deps=False)
return
setup_virtualenv(venv_dir)
if have_src:
__upgrade_from_source(venv_dir)
else:
__upgrade_from_pip(venv_dir) | [
"def",
"setup_benchbuild",
"(",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Setting up Benchbuild...\"",
")",
"venv_dir",
"=",
"local",
".",
"path",
"(",
"\"/benchbuild\"",
")",
"prefixes",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"prefixes\"",
"]",
".",
"value",
"prefixes",
".",
"append",
"(",
"venv_dir",
")",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"prefixes\"",
"]",
"=",
"prefixes",
"src_dir",
"=",
"str",
"(",
"CFG",
"[",
"\"source_dir\"",
"]",
")",
"have_src",
"=",
"src_dir",
"is",
"not",
"None",
"if",
"have_src",
":",
"__mount_source",
"(",
"src_dir",
")",
"benchbuild",
"=",
"find_benchbuild",
"(",
")",
"if",
"benchbuild",
"and",
"not",
"requires_update",
"(",
"benchbuild",
")",
":",
"if",
"have_src",
":",
"__upgrade_from_source",
"(",
"venv_dir",
",",
"with_deps",
"=",
"False",
")",
"return",
"setup_virtualenv",
"(",
"venv_dir",
")",
"if",
"have_src",
":",
"__upgrade_from_source",
"(",
"venv_dir",
")",
"else",
":",
"__upgrade_from_pip",
"(",
"venv_dir",
")"
] | Setup benchbuild inside a container.
This will query a for an existing installation of benchbuild and
try to upgrade it to the latest version, if possible. | [
"Setup",
"benchbuild",
"inside",
"a",
"container",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L288-L317 |
BlueBrain/hpcbench | hpcbench/cli/bennett.py | main | def main(argv=None):
"""ben-nett entry point"""
arguments = cli_common(__doc__, argv=argv)
benet = BeNet(arguments['CAMPAIGN_FILE'])
benet.run()
if argv is not None:
return benet | python | def main(argv=None):
"""ben-nett entry point"""
arguments = cli_common(__doc__, argv=argv)
benet = BeNet(arguments['CAMPAIGN_FILE'])
benet.run()
if argv is not None:
return benet | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"benet",
"=",
"BeNet",
"(",
"arguments",
"[",
"'CAMPAIGN_FILE'",
"]",
")",
"benet",
".",
"run",
"(",
")",
"if",
"argv",
"is",
"not",
"None",
":",
"return",
"benet"
] | ben-nett entry point | [
"ben",
"-",
"nett",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bennett.py#L19-L25 |
BlueBrain/hpcbench | hpcbench/cli/benelastic.py | main | def main(argv=None):
"""ben-elastic entry point"""
arguments = cli_common(__doc__, argv=argv)
es_export = ESExporter(arguments['CAMPAIGN-DIR'], arguments['--es'])
es_export.export()
if argv is not None:
return es_export | python | def main(argv=None):
"""ben-elastic entry point"""
arguments = cli_common(__doc__, argv=argv)
es_export = ESExporter(arguments['CAMPAIGN-DIR'], arguments['--es'])
es_export.export()
if argv is not None:
return es_export | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"es_export",
"=",
"ESExporter",
"(",
"arguments",
"[",
"'CAMPAIGN-DIR'",
"]",
",",
"arguments",
"[",
"'--es'",
"]",
")",
"es_export",
".",
"export",
"(",
")",
"if",
"argv",
"is",
"not",
"None",
":",
"return",
"es_export"
] | ben-elastic entry point | [
"ben",
"-",
"elastic",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/benelastic.py#L20-L26 |
KelSolaar/Manager | manager/QObject_component.py | QObjectComponent.name | def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format("name", value)
self.__name = value | python | def name(self, value):
"""
Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format("name", value)
self.__name = value | [
"def",
"name",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"unicode",
",",
"\"'{0}' attribute: '{1}' type is not 'unicode'!\"",
".",
"format",
"(",
"\"name\"",
",",
"value",
")",
"self",
".",
"__name",
"=",
"value"
] | Setter for **self.__name** attribute.
:param value: Attribute value.
:type value: unicode | [
"Setter",
"for",
"**",
"self",
".",
"__name",
"**",
"attribute",
"."
] | train | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/QObject_component.py#L101-L111 |
KelSolaar/Manager | manager/QObject_component.py | QObjectComponent.activated | def activated(self, value):
"""
Setter for **self.__activated** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("activated", value)
self.component_activated.emit() if value else self.component_deactivated.emit()
self.__activated = value | python | def activated(self, value):
"""
Setter for **self.__activated** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("activated", value)
self.component_activated.emit() if value else self.component_deactivated.emit()
self.__activated = value | [
"def",
"activated",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"bool",
",",
"\"'{0}' attribute: '{1}' type is not 'bool'!\"",
".",
"format",
"(",
"\"activated\"",
",",
"value",
")",
"self",
".",
"component_activated",
".",
"emit",
"(",
")",
"if",
"value",
"else",
"self",
".",
"component_deactivated",
".",
"emit",
"(",
")",
"self",
".",
"__activated",
"=",
"value"
] | Setter for **self.__activated** attribute.
:param value: Attribute value.
:type value: bool | [
"Setter",
"for",
"**",
"self",
".",
"__activated",
"**",
"attribute",
"."
] | train | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/QObject_component.py#L136-L147 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.